1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Copyright (c) by Jaroslav Kysela <perex@perex.cz> |
4 | * Takashi Iwai <tiwai@suse.de> |
5 | * |
6 | * Generic memory allocators |
7 | */ |
8 | |
9 | #include <linux/slab.h> |
10 | #include <linux/mm.h> |
11 | #include <linux/dma-mapping.h> |
12 | #include <linux/dma-map-ops.h> |
13 | #include <linux/genalloc.h> |
14 | #include <linux/highmem.h> |
15 | #include <linux/vmalloc.h> |
16 | #ifdef CONFIG_X86 |
17 | #include <asm/set_memory.h> |
18 | #endif |
19 | #include <sound/memalloc.h> |
20 | #include "memalloc_local.h" |
21 | |
22 | #define DEFAULT_GFP \ |
23 | (GFP_KERNEL | \ |
24 | __GFP_RETRY_MAYFAIL | /* don't trigger OOM-killer */ \ |
25 | __GFP_NOWARN) /* no stack trace print - this call is non-critical */ |
26 | |
27 | static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab); |
28 | |
29 | #ifdef CONFIG_SND_DMA_SGBUF |
30 | static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size); |
31 | #endif |
32 | |
33 | static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size) |
34 | { |
35 | const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); |
36 | |
37 | if (WARN_ON_ONCE(!ops || !ops->alloc)) |
38 | return NULL; |
39 | return ops->alloc(dmab, size); |
40 | } |
41 | |
42 | /** |
43 | * snd_dma_alloc_dir_pages - allocate the buffer area according to the given |
44 | * type and direction |
45 | * @type: the DMA buffer type |
46 | * @device: the device pointer |
47 | * @dir: DMA direction |
48 | * @size: the buffer size to allocate |
49 | * @dmab: buffer allocation record to store the allocated data |
50 | * |
51 | * Calls the memory-allocator function for the corresponding |
52 | * buffer type. |
53 | * |
54 | * Return: Zero if the buffer with the given size is allocated successfully, |
55 | * otherwise a negative value on error. |
56 | */ |
57 | int snd_dma_alloc_dir_pages(int type, struct device *device, |
58 | enum dma_data_direction dir, size_t size, |
59 | struct snd_dma_buffer *dmab) |
60 | { |
61 | if (WARN_ON(!size)) |
62 | return -ENXIO; |
63 | if (WARN_ON(!dmab)) |
64 | return -ENXIO; |
65 | |
66 | size = PAGE_ALIGN(size); |
67 | dmab->dev.type = type; |
68 | dmab->dev.dev = device; |
69 | dmab->dev.dir = dir; |
70 | dmab->bytes = 0; |
71 | dmab->addr = 0; |
72 | dmab->private_data = NULL; |
73 | dmab->area = __snd_dma_alloc_pages(dmab, size); |
74 | if (!dmab->area) |
75 | return -ENOMEM; |
76 | dmab->bytes = size; |
77 | return 0; |
78 | } |
79 | EXPORT_SYMBOL(snd_dma_alloc_dir_pages); |
80 | |
81 | /** |
82 | * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback |
83 | * @type: the DMA buffer type |
84 | * @device: the device pointer |
85 | * @size: the buffer size to allocate |
86 | * @dmab: buffer allocation record to store the allocated data |
87 | * |
88 | * Calls the memory-allocator function for the corresponding |
89 | * buffer type. When no space is left, this function reduces the size and |
90 | * tries to allocate again. The size actually allocated is stored in |
91 | * res_size argument. |
92 | * |
93 | * Return: Zero if the buffer with the given size is allocated successfully, |
94 | * otherwise a negative value on error. |
95 | */ |
96 | int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size, |
97 | struct snd_dma_buffer *dmab) |
98 | { |
99 | int err; |
100 | |
101 | while ((err = snd_dma_alloc_pages(type, dev: device, size, dmab)) < 0) { |
102 | if (err != -ENOMEM) |
103 | return err; |
104 | if (size <= PAGE_SIZE) |
105 | return -ENOMEM; |
106 | size >>= 1; |
107 | size = PAGE_SIZE << get_order(size); |
108 | } |
109 | if (! dmab->area) |
110 | return -ENOMEM; |
111 | return 0; |
112 | } |
113 | EXPORT_SYMBOL(snd_dma_alloc_pages_fallback); |
114 | |
115 | /** |
116 | * snd_dma_free_pages - release the allocated buffer |
117 | * @dmab: the buffer allocation record to release |
118 | * |
119 | * Releases the allocated buffer via snd_dma_alloc_pages(). |
120 | */ |
121 | void snd_dma_free_pages(struct snd_dma_buffer *dmab) |
122 | { |
123 | const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); |
124 | |
125 | if (ops && ops->free) |
126 | ops->free(dmab); |
127 | } |
128 | EXPORT_SYMBOL(snd_dma_free_pages); |
129 | |
130 | /* called by devres */ |
131 | static void __snd_release_pages(struct device *dev, void *res) |
132 | { |
133 | snd_dma_free_pages(res); |
134 | } |
135 | |
136 | /** |
137 | * snd_devm_alloc_dir_pages - allocate the buffer and manage with devres |
138 | * @dev: the device pointer |
139 | * @type: the DMA buffer type |
140 | * @dir: DMA direction |
141 | * @size: the buffer size to allocate |
142 | * |
143 | * Allocate buffer pages depending on the given type and manage using devres. |
144 | * The pages will be released automatically at the device removal. |
145 | * |
146 | * Unlike snd_dma_alloc_pages(), this function requires the real device pointer, |
147 | * hence it can't work with SNDRV_DMA_TYPE_CONTINUOUS or |
148 | * SNDRV_DMA_TYPE_VMALLOC type. |
149 | * |
150 | * Return: the snd_dma_buffer object at success, or NULL if failed |
151 | */ |
152 | struct snd_dma_buffer * |
153 | snd_devm_alloc_dir_pages(struct device *dev, int type, |
154 | enum dma_data_direction dir, size_t size) |
155 | { |
156 | struct snd_dma_buffer *dmab; |
157 | int err; |
158 | |
159 | if (WARN_ON(type == SNDRV_DMA_TYPE_CONTINUOUS || |
160 | type == SNDRV_DMA_TYPE_VMALLOC)) |
161 | return NULL; |
162 | |
163 | dmab = devres_alloc(__snd_release_pages, sizeof(*dmab), GFP_KERNEL); |
164 | if (!dmab) |
165 | return NULL; |
166 | |
167 | err = snd_dma_alloc_dir_pages(type, dev, dir, size, dmab); |
168 | if (err < 0) { |
169 | devres_free(res: dmab); |
170 | return NULL; |
171 | } |
172 | |
173 | devres_add(dev, res: dmab); |
174 | return dmab; |
175 | } |
176 | EXPORT_SYMBOL_GPL(snd_devm_alloc_dir_pages); |
177 | |
178 | /** |
179 | * snd_dma_buffer_mmap - perform mmap of the given DMA buffer |
180 | * @dmab: buffer allocation information |
181 | * @area: VM area information |
182 | * |
183 | * Return: zero if successful, or a negative error code |
184 | */ |
185 | int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab, |
186 | struct vm_area_struct *area) |
187 | { |
188 | const struct snd_malloc_ops *ops; |
189 | |
190 | if (!dmab) |
191 | return -ENOENT; |
192 | ops = snd_dma_get_ops(dmab); |
193 | if (ops && ops->mmap) |
194 | return ops->mmap(dmab, area); |
195 | else |
196 | return -ENOENT; |
197 | } |
198 | EXPORT_SYMBOL(snd_dma_buffer_mmap); |
199 | |
200 | #ifdef CONFIG_HAS_DMA |
201 | /** |
202 | * snd_dma_buffer_sync - sync DMA buffer between CPU and device |
203 | * @dmab: buffer allocation information |
204 | * @mode: sync mode |
205 | */ |
206 | void snd_dma_buffer_sync(struct snd_dma_buffer *dmab, |
207 | enum snd_dma_sync_mode mode) |
208 | { |
209 | const struct snd_malloc_ops *ops; |
210 | |
211 | if (!dmab || !dmab->dev.need_sync) |
212 | return; |
213 | ops = snd_dma_get_ops(dmab); |
214 | if (ops && ops->sync) |
215 | ops->sync(dmab, mode); |
216 | } |
217 | EXPORT_SYMBOL_GPL(snd_dma_buffer_sync); |
218 | #endif /* CONFIG_HAS_DMA */ |
219 | |
220 | /** |
221 | * snd_sgbuf_get_addr - return the physical address at the corresponding offset |
222 | * @dmab: buffer allocation information |
223 | * @offset: offset in the ring buffer |
224 | * |
225 | * Return: the physical address |
226 | */ |
227 | dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset) |
228 | { |
229 | const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); |
230 | |
231 | if (ops && ops->get_addr) |
232 | return ops->get_addr(dmab, offset); |
233 | else |
234 | return dmab->addr + offset; |
235 | } |
236 | EXPORT_SYMBOL(snd_sgbuf_get_addr); |
237 | |
238 | /** |
239 | * snd_sgbuf_get_page - return the physical page at the corresponding offset |
240 | * @dmab: buffer allocation information |
241 | * @offset: offset in the ring buffer |
242 | * |
243 | * Return: the page pointer |
244 | */ |
245 | struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset) |
246 | { |
247 | const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); |
248 | |
249 | if (ops && ops->get_page) |
250 | return ops->get_page(dmab, offset); |
251 | else |
252 | return virt_to_page(dmab->area + offset); |
253 | } |
254 | EXPORT_SYMBOL(snd_sgbuf_get_page); |
255 | |
256 | /** |
257 | * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages |
258 | * on sg-buffer |
259 | * @dmab: buffer allocation information |
260 | * @ofs: offset in the ring buffer |
261 | * @size: the requested size |
262 | * |
263 | * Return: the chunk size |
264 | */ |
265 | unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab, |
266 | unsigned int ofs, unsigned int size) |
267 | { |
268 | const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); |
269 | |
270 | if (ops && ops->get_chunk_size) |
271 | return ops->get_chunk_size(dmab, ofs, size); |
272 | else |
273 | return size; |
274 | } |
275 | EXPORT_SYMBOL(snd_sgbuf_get_chunk_size); |
276 | |
277 | /* |
278 | * Continuous pages allocator |
279 | */ |
280 | static void *do_alloc_pages(struct device *dev, size_t size, dma_addr_t *addr, |
281 | bool wc) |
282 | { |
283 | void *p; |
284 | gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; |
285 | |
286 | again: |
287 | p = alloc_pages_exact(size, gfp_mask: gfp); |
288 | if (!p) |
289 | return NULL; |
290 | *addr = page_to_phys(virt_to_page(p)); |
291 | if (!dev) |
292 | return p; |
293 | if ((*addr + size - 1) & ~dev->coherent_dma_mask) { |
294 | if (IS_ENABLED(CONFIG_ZONE_DMA32) && !(gfp & GFP_DMA32)) { |
295 | gfp |= GFP_DMA32; |
296 | goto again; |
297 | } |
298 | if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) { |
299 | gfp = (gfp & ~GFP_DMA32) | GFP_DMA; |
300 | goto again; |
301 | } |
302 | } |
303 | #ifdef CONFIG_X86 |
304 | if (wc) |
305 | set_memory_wc(addr: (unsigned long)(p), numpages: size >> PAGE_SHIFT); |
306 | #endif |
307 | return p; |
308 | } |
309 | |
310 | static void do_free_pages(void *p, size_t size, bool wc) |
311 | { |
312 | #ifdef CONFIG_X86 |
313 | if (wc) |
314 | set_memory_wb(addr: (unsigned long)(p), numpages: size >> PAGE_SHIFT); |
315 | #endif |
316 | free_pages_exact(virt: p, size); |
317 | } |
318 | |
319 | |
320 | static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size) |
321 | { |
322 | return do_alloc_pages(dev: dmab->dev.dev, size, addr: &dmab->addr, wc: false); |
323 | } |
324 | |
325 | static void snd_dma_continuous_free(struct snd_dma_buffer *dmab) |
326 | { |
327 | do_free_pages(p: dmab->area, size: dmab->bytes, wc: false); |
328 | } |
329 | |
330 | static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab, |
331 | struct vm_area_struct *area) |
332 | { |
333 | return remap_pfn_range(area, addr: area->vm_start, |
334 | pfn: dmab->addr >> PAGE_SHIFT, |
335 | size: area->vm_end - area->vm_start, |
336 | area->vm_page_prot); |
337 | } |
338 | |
339 | static const struct snd_malloc_ops snd_dma_continuous_ops = { |
340 | .alloc = snd_dma_continuous_alloc, |
341 | .free = snd_dma_continuous_free, |
342 | .mmap = snd_dma_continuous_mmap, |
343 | }; |
344 | |
345 | /* |
346 | * VMALLOC allocator |
347 | */ |
348 | static void *snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size) |
349 | { |
350 | return vmalloc(size); |
351 | } |
352 | |
353 | static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab) |
354 | { |
355 | vfree(addr: dmab->area); |
356 | } |
357 | |
358 | static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab, |
359 | struct vm_area_struct *area) |
360 | { |
361 | return remap_vmalloc_range(vma: area, addr: dmab->area, pgoff: 0); |
362 | } |
363 | |
364 | #define get_vmalloc_page_addr(dmab, offset) \ |
365 | page_to_phys(vmalloc_to_page((dmab)->area + (offset))) |
366 | |
367 | static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab, |
368 | size_t offset) |
369 | { |
370 | return get_vmalloc_page_addr(dmab, offset) + offset % PAGE_SIZE; |
371 | } |
372 | |
373 | static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab, |
374 | size_t offset) |
375 | { |
376 | return vmalloc_to_page(addr: dmab->area + offset); |
377 | } |
378 | |
379 | static unsigned int |
380 | snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab, |
381 | unsigned int ofs, unsigned int size) |
382 | { |
383 | unsigned int start, end; |
384 | unsigned long addr; |
385 | |
386 | start = ALIGN_DOWN(ofs, PAGE_SIZE); |
387 | end = ofs + size - 1; /* the last byte address */ |
388 | /* check page continuity */ |
389 | addr = get_vmalloc_page_addr(dmab, start); |
390 | for (;;) { |
391 | start += PAGE_SIZE; |
392 | if (start > end) |
393 | break; |
394 | addr += PAGE_SIZE; |
395 | if (get_vmalloc_page_addr(dmab, start) != addr) |
396 | return start - ofs; |
397 | } |
398 | /* ok, all on continuous pages */ |
399 | return size; |
400 | } |
401 | |
402 | static const struct snd_malloc_ops snd_dma_vmalloc_ops = { |
403 | .alloc = snd_dma_vmalloc_alloc, |
404 | .free = snd_dma_vmalloc_free, |
405 | .mmap = snd_dma_vmalloc_mmap, |
406 | .get_addr = snd_dma_vmalloc_get_addr, |
407 | .get_page = snd_dma_vmalloc_get_page, |
408 | .get_chunk_size = snd_dma_vmalloc_get_chunk_size, |
409 | }; |
410 | |
411 | #ifdef CONFIG_HAS_DMA |
412 | /* |
413 | * IRAM allocator |
414 | */ |
415 | #ifdef CONFIG_GENERIC_ALLOCATOR |
416 | static void *snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size) |
417 | { |
418 | struct device *dev = dmab->dev.dev; |
419 | struct gen_pool *pool; |
420 | void *p; |
421 | |
422 | if (dev->of_node) { |
423 | pool = of_gen_pool_get(np: dev->of_node, propname: "iram" , index: 0); |
424 | /* Assign the pool into private_data field */ |
425 | dmab->private_data = pool; |
426 | |
427 | p = gen_pool_dma_alloc_align(pool, size, dma: &dmab->addr, PAGE_SIZE); |
428 | if (p) |
429 | return p; |
430 | } |
431 | |
432 | /* Internal memory might have limited size and no enough space, |
433 | * so if we fail to malloc, try to fetch memory traditionally. |
434 | */ |
435 | dmab->dev.type = SNDRV_DMA_TYPE_DEV; |
436 | return __snd_dma_alloc_pages(dmab, size); |
437 | } |
438 | |
439 | static void snd_dma_iram_free(struct snd_dma_buffer *dmab) |
440 | { |
441 | struct gen_pool *pool = dmab->private_data; |
442 | |
443 | if (pool && dmab->area) |
444 | gen_pool_free(pool, addr: (unsigned long)dmab->area, size: dmab->bytes); |
445 | } |
446 | |
447 | static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab, |
448 | struct vm_area_struct *area) |
449 | { |
450 | area->vm_page_prot = pgprot_writecombine(prot: area->vm_page_prot); |
451 | return remap_pfn_range(area, addr: area->vm_start, |
452 | pfn: dmab->addr >> PAGE_SHIFT, |
453 | size: area->vm_end - area->vm_start, |
454 | area->vm_page_prot); |
455 | } |
456 | |
457 | static const struct snd_malloc_ops snd_dma_iram_ops = { |
458 | .alloc = snd_dma_iram_alloc, |
459 | .free = snd_dma_iram_free, |
460 | .mmap = snd_dma_iram_mmap, |
461 | }; |
462 | #endif /* CONFIG_GENERIC_ALLOCATOR */ |
463 | |
464 | /* |
465 | * Coherent device pages allocator |
466 | */ |
467 | static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size) |
468 | { |
469 | return dma_alloc_coherent(dev: dmab->dev.dev, size, dma_handle: &dmab->addr, DEFAULT_GFP); |
470 | } |
471 | |
472 | static void snd_dma_dev_free(struct snd_dma_buffer *dmab) |
473 | { |
474 | dma_free_coherent(dev: dmab->dev.dev, size: dmab->bytes, cpu_addr: dmab->area, dma_handle: dmab->addr); |
475 | } |
476 | |
477 | static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab, |
478 | struct vm_area_struct *area) |
479 | { |
480 | return dma_mmap_coherent(dmab->dev.dev, area, |
481 | dmab->area, dmab->addr, dmab->bytes); |
482 | } |
483 | |
484 | static const struct snd_malloc_ops snd_dma_dev_ops = { |
485 | .alloc = snd_dma_dev_alloc, |
486 | .free = snd_dma_dev_free, |
487 | .mmap = snd_dma_dev_mmap, |
488 | }; |
489 | |
490 | /* |
491 | * Write-combined pages |
492 | */ |
493 | /* x86-specific allocations */ |
494 | #ifdef CONFIG_SND_DMA_SGBUF |
495 | static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size) |
496 | { |
497 | return do_alloc_pages(dev: dmab->dev.dev, size, addr: &dmab->addr, wc: true); |
498 | } |
499 | |
500 | static void snd_dma_wc_free(struct snd_dma_buffer *dmab) |
501 | { |
502 | do_free_pages(p: dmab->area, size: dmab->bytes, wc: true); |
503 | } |
504 | |
505 | static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab, |
506 | struct vm_area_struct *area) |
507 | { |
508 | area->vm_page_prot = pgprot_writecombine(prot: area->vm_page_prot); |
509 | return snd_dma_continuous_mmap(dmab, area); |
510 | } |
511 | #else |
512 | static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size) |
513 | { |
514 | return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP); |
515 | } |
516 | |
517 | static void snd_dma_wc_free(struct snd_dma_buffer *dmab) |
518 | { |
519 | dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); |
520 | } |
521 | |
522 | static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab, |
523 | struct vm_area_struct *area) |
524 | { |
525 | return dma_mmap_wc(dmab->dev.dev, area, |
526 | dmab->area, dmab->addr, dmab->bytes); |
527 | } |
528 | #endif /* CONFIG_SND_DMA_SGBUF */ |
529 | |
530 | static const struct snd_malloc_ops snd_dma_wc_ops = { |
531 | .alloc = snd_dma_wc_alloc, |
532 | .free = snd_dma_wc_free, |
533 | .mmap = snd_dma_wc_mmap, |
534 | }; |
535 | |
536 | /* |
537 | * Non-contiguous pages allocator |
538 | */ |
539 | static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size) |
540 | { |
541 | struct sg_table *sgt; |
542 | void *p; |
543 | |
544 | #ifdef CONFIG_SND_DMA_SGBUF |
545 | if (cpu_feature_enabled(X86_FEATURE_XENPV)) |
546 | return snd_dma_sg_fallback_alloc(dmab, size); |
547 | #endif |
548 | sgt = dma_alloc_noncontiguous(dev: dmab->dev.dev, size, dir: dmab->dev.dir, |
549 | DEFAULT_GFP, attrs: 0); |
550 | #ifdef CONFIG_SND_DMA_SGBUF |
551 | if (!sgt && !get_dma_ops(dev: dmab->dev.dev)) |
552 | return snd_dma_sg_fallback_alloc(dmab, size); |
553 | #endif |
554 | if (!sgt) |
555 | return NULL; |
556 | |
557 | dmab->dev.need_sync = dma_need_sync(dev: dmab->dev.dev, |
558 | sg_dma_address(sgt->sgl)); |
559 | p = dma_vmap_noncontiguous(dev: dmab->dev.dev, size, sgt); |
560 | if (p) { |
561 | dmab->private_data = sgt; |
562 | /* store the first page address for convenience */ |
563 | dmab->addr = snd_sgbuf_get_addr(dmab, 0); |
564 | } else { |
565 | dma_free_noncontiguous(dev: dmab->dev.dev, size, sgt, dir: dmab->dev.dir); |
566 | } |
567 | return p; |
568 | } |
569 | |
570 | static void snd_dma_noncontig_free(struct snd_dma_buffer *dmab) |
571 | { |
572 | dma_vunmap_noncontiguous(dev: dmab->dev.dev, vaddr: dmab->area); |
573 | dma_free_noncontiguous(dev: dmab->dev.dev, size: dmab->bytes, sgt: dmab->private_data, |
574 | dir: dmab->dev.dir); |
575 | } |
576 | |
577 | static int snd_dma_noncontig_mmap(struct snd_dma_buffer *dmab, |
578 | struct vm_area_struct *area) |
579 | { |
580 | return dma_mmap_noncontiguous(dev: dmab->dev.dev, vma: area, |
581 | size: dmab->bytes, sgt: dmab->private_data); |
582 | } |
583 | |
584 | static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab, |
585 | enum snd_dma_sync_mode mode) |
586 | { |
587 | if (mode == SNDRV_DMA_SYNC_CPU) { |
588 | if (dmab->dev.dir == DMA_TO_DEVICE) |
589 | return; |
590 | invalidate_kernel_vmap_range(vaddr: dmab->area, size: dmab->bytes); |
591 | dma_sync_sgtable_for_cpu(dev: dmab->dev.dev, sgt: dmab->private_data, |
592 | dir: dmab->dev.dir); |
593 | } else { |
594 | if (dmab->dev.dir == DMA_FROM_DEVICE) |
595 | return; |
596 | flush_kernel_vmap_range(vaddr: dmab->area, size: dmab->bytes); |
597 | dma_sync_sgtable_for_device(dev: dmab->dev.dev, sgt: dmab->private_data, |
598 | dir: dmab->dev.dir); |
599 | } |
600 | } |
601 | |
602 | static inline void snd_dma_noncontig_iter_set(struct snd_dma_buffer *dmab, |
603 | struct sg_page_iter *piter, |
604 | size_t offset) |
605 | { |
606 | struct sg_table *sgt = dmab->private_data; |
607 | |
608 | __sg_page_iter_start(piter, sglist: sgt->sgl, nents: sgt->orig_nents, |
609 | pgoffset: offset >> PAGE_SHIFT); |
610 | } |
611 | |
612 | static dma_addr_t snd_dma_noncontig_get_addr(struct snd_dma_buffer *dmab, |
613 | size_t offset) |
614 | { |
615 | struct sg_dma_page_iter iter; |
616 | |
617 | snd_dma_noncontig_iter_set(dmab, piter: &iter.base, offset); |
618 | __sg_page_iter_dma_next(dma_iter: &iter); |
619 | return sg_page_iter_dma_address(dma_iter: &iter) + offset % PAGE_SIZE; |
620 | } |
621 | |
622 | static struct page *snd_dma_noncontig_get_page(struct snd_dma_buffer *dmab, |
623 | size_t offset) |
624 | { |
625 | struct sg_page_iter iter; |
626 | |
627 | snd_dma_noncontig_iter_set(dmab, piter: &iter, offset); |
628 | __sg_page_iter_next(piter: &iter); |
629 | return sg_page_iter_page(piter: &iter); |
630 | } |
631 | |
632 | static unsigned int |
633 | snd_dma_noncontig_get_chunk_size(struct snd_dma_buffer *dmab, |
634 | unsigned int ofs, unsigned int size) |
635 | { |
636 | struct sg_dma_page_iter iter; |
637 | unsigned int start, end; |
638 | unsigned long addr; |
639 | |
640 | start = ALIGN_DOWN(ofs, PAGE_SIZE); |
641 | end = ofs + size - 1; /* the last byte address */ |
642 | snd_dma_noncontig_iter_set(dmab, piter: &iter.base, offset: start); |
643 | if (!__sg_page_iter_dma_next(dma_iter: &iter)) |
644 | return 0; |
645 | /* check page continuity */ |
646 | addr = sg_page_iter_dma_address(dma_iter: &iter); |
647 | for (;;) { |
648 | start += PAGE_SIZE; |
649 | if (start > end) |
650 | break; |
651 | addr += PAGE_SIZE; |
652 | if (!__sg_page_iter_dma_next(dma_iter: &iter) || |
653 | sg_page_iter_dma_address(dma_iter: &iter) != addr) |
654 | return start - ofs; |
655 | } |
656 | /* ok, all on continuous pages */ |
657 | return size; |
658 | } |
659 | |
660 | static const struct snd_malloc_ops snd_dma_noncontig_ops = { |
661 | .alloc = snd_dma_noncontig_alloc, |
662 | .free = snd_dma_noncontig_free, |
663 | .mmap = snd_dma_noncontig_mmap, |
664 | .sync = snd_dma_noncontig_sync, |
665 | .get_addr = snd_dma_noncontig_get_addr, |
666 | .get_page = snd_dma_noncontig_get_page, |
667 | .get_chunk_size = snd_dma_noncontig_get_chunk_size, |
668 | }; |
669 | |
670 | /* x86-specific SG-buffer with WC pages */ |
671 | #ifdef CONFIG_SND_DMA_SGBUF |
672 | #define sg_wc_address(it) ((unsigned long)page_address(sg_page_iter_page(it))) |
673 | |
674 | static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size) |
675 | { |
676 | void *p = snd_dma_noncontig_alloc(dmab, size); |
677 | struct sg_table *sgt = dmab->private_data; |
678 | struct sg_page_iter iter; |
679 | |
680 | if (!p) |
681 | return NULL; |
682 | if (dmab->dev.type != SNDRV_DMA_TYPE_DEV_WC_SG) |
683 | return p; |
684 | for_each_sgtable_page(sgt, &iter, 0) |
685 | set_memory_wc(sg_wc_address(&iter), numpages: 1); |
686 | return p; |
687 | } |
688 | |
689 | static void snd_dma_sg_wc_free(struct snd_dma_buffer *dmab) |
690 | { |
691 | struct sg_table *sgt = dmab->private_data; |
692 | struct sg_page_iter iter; |
693 | |
694 | for_each_sgtable_page(sgt, &iter, 0) |
695 | set_memory_wb(sg_wc_address(&iter), numpages: 1); |
696 | snd_dma_noncontig_free(dmab); |
697 | } |
698 | |
699 | static int snd_dma_sg_wc_mmap(struct snd_dma_buffer *dmab, |
700 | struct vm_area_struct *area) |
701 | { |
702 | area->vm_page_prot = pgprot_writecombine(prot: area->vm_page_prot); |
703 | return dma_mmap_noncontiguous(dev: dmab->dev.dev, vma: area, |
704 | size: dmab->bytes, sgt: dmab->private_data); |
705 | } |
706 | |
707 | static const struct snd_malloc_ops snd_dma_sg_wc_ops = { |
708 | .alloc = snd_dma_sg_wc_alloc, |
709 | .free = snd_dma_sg_wc_free, |
710 | .mmap = snd_dma_sg_wc_mmap, |
711 | .sync = snd_dma_noncontig_sync, |
712 | .get_addr = snd_dma_noncontig_get_addr, |
713 | .get_page = snd_dma_noncontig_get_page, |
714 | .get_chunk_size = snd_dma_noncontig_get_chunk_size, |
715 | }; |
716 | |
717 | /* Fallback SG-buffer allocations for x86 */ |
718 | struct snd_dma_sg_fallback { |
719 | bool use_dma_alloc_coherent; |
720 | size_t count; |
721 | struct page **pages; |
722 | /* DMA address array; the first page contains #pages in ~PAGE_MASK */ |
723 | dma_addr_t *addrs; |
724 | }; |
725 | |
726 | static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab, |
727 | struct snd_dma_sg_fallback *sgbuf) |
728 | { |
729 | size_t i, size; |
730 | |
731 | if (sgbuf->pages && sgbuf->addrs) { |
732 | i = 0; |
733 | while (i < sgbuf->count) { |
734 | if (!sgbuf->pages[i] || !sgbuf->addrs[i]) |
735 | break; |
736 | size = sgbuf->addrs[i] & ~PAGE_MASK; |
737 | if (WARN_ON(!size)) |
738 | break; |
739 | if (sgbuf->use_dma_alloc_coherent) |
740 | dma_free_coherent(dev: dmab->dev.dev, size: size << PAGE_SHIFT, |
741 | page_address(sgbuf->pages[i]), |
742 | dma_handle: sgbuf->addrs[i] & PAGE_MASK); |
743 | else |
744 | do_free_pages(page_address(sgbuf->pages[i]), |
745 | size: size << PAGE_SHIFT, wc: false); |
746 | i += size; |
747 | } |
748 | } |
749 | kvfree(addr: sgbuf->pages); |
750 | kvfree(addr: sgbuf->addrs); |
751 | kfree(objp: sgbuf); |
752 | } |
753 | |
754 | static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size) |
755 | { |
756 | struct snd_dma_sg_fallback *sgbuf; |
757 | struct page **pagep, *curp; |
758 | size_t chunk, npages; |
759 | dma_addr_t *addrp; |
760 | dma_addr_t addr; |
761 | void *p; |
762 | |
763 | /* correct the type */ |
764 | if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_SG) |
765 | dmab->dev.type = SNDRV_DMA_TYPE_DEV_SG_FALLBACK; |
766 | else if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG) |
767 | dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK; |
768 | |
769 | sgbuf = kzalloc(size: sizeof(*sgbuf), GFP_KERNEL); |
770 | if (!sgbuf) |
771 | return NULL; |
772 | sgbuf->use_dma_alloc_coherent = cpu_feature_enabled(X86_FEATURE_XENPV); |
773 | size = PAGE_ALIGN(size); |
774 | sgbuf->count = size >> PAGE_SHIFT; |
775 | sgbuf->pages = kvcalloc(n: sgbuf->count, size: sizeof(*sgbuf->pages), GFP_KERNEL); |
776 | sgbuf->addrs = kvcalloc(n: sgbuf->count, size: sizeof(*sgbuf->addrs), GFP_KERNEL); |
777 | if (!sgbuf->pages || !sgbuf->addrs) |
778 | goto error; |
779 | |
780 | pagep = sgbuf->pages; |
781 | addrp = sgbuf->addrs; |
782 | chunk = (PAGE_SIZE - 1) << PAGE_SHIFT; /* to fit in low bits in addrs */ |
783 | while (size > 0) { |
784 | chunk = min(size, chunk); |
785 | if (sgbuf->use_dma_alloc_coherent) |
786 | p = dma_alloc_coherent(dev: dmab->dev.dev, size: chunk, dma_handle: &addr, DEFAULT_GFP); |
787 | else |
788 | p = do_alloc_pages(dev: dmab->dev.dev, size: chunk, addr: &addr, wc: false); |
789 | if (!p) { |
790 | if (chunk <= PAGE_SIZE) |
791 | goto error; |
792 | chunk >>= 1; |
793 | chunk = PAGE_SIZE << get_order(size: chunk); |
794 | continue; |
795 | } |
796 | |
797 | size -= chunk; |
798 | /* fill pages */ |
799 | npages = chunk >> PAGE_SHIFT; |
800 | *addrp = npages; /* store in lower bits */ |
801 | curp = virt_to_page(p); |
802 | while (npages--) { |
803 | *pagep++ = curp++; |
804 | *addrp++ |= addr; |
805 | addr += PAGE_SIZE; |
806 | } |
807 | } |
808 | |
809 | p = vmap(pages: sgbuf->pages, count: sgbuf->count, VM_MAP, PAGE_KERNEL); |
810 | if (!p) |
811 | goto error; |
812 | |
813 | if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK) |
814 | set_pages_array_wc(pages: sgbuf->pages, addrinarray: sgbuf->count); |
815 | |
816 | dmab->private_data = sgbuf; |
817 | /* store the first page address for convenience */ |
818 | dmab->addr = sgbuf->addrs[0] & PAGE_MASK; |
819 | return p; |
820 | |
821 | error: |
822 | __snd_dma_sg_fallback_free(dmab, sgbuf); |
823 | return NULL; |
824 | } |
825 | |
826 | static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab) |
827 | { |
828 | struct snd_dma_sg_fallback *sgbuf = dmab->private_data; |
829 | |
830 | if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK) |
831 | set_pages_array_wb(pages: sgbuf->pages, addrinarray: sgbuf->count); |
832 | vunmap(addr: dmab->area); |
833 | __snd_dma_sg_fallback_free(dmab, sgbuf: dmab->private_data); |
834 | } |
835 | |
836 | static dma_addr_t snd_dma_sg_fallback_get_addr(struct snd_dma_buffer *dmab, |
837 | size_t offset) |
838 | { |
839 | struct snd_dma_sg_fallback *sgbuf = dmab->private_data; |
840 | size_t index = offset >> PAGE_SHIFT; |
841 | |
842 | return (sgbuf->addrs[index] & PAGE_MASK) | (offset & ~PAGE_MASK); |
843 | } |
844 | |
845 | static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab, |
846 | struct vm_area_struct *area) |
847 | { |
848 | struct snd_dma_sg_fallback *sgbuf = dmab->private_data; |
849 | |
850 | if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK) |
851 | area->vm_page_prot = pgprot_writecombine(prot: area->vm_page_prot); |
852 | return vm_map_pages(vma: area, pages: sgbuf->pages, num: sgbuf->count); |
853 | } |
854 | |
855 | static const struct snd_malloc_ops snd_dma_sg_fallback_ops = { |
856 | .alloc = snd_dma_sg_fallback_alloc, |
857 | .free = snd_dma_sg_fallback_free, |
858 | .mmap = snd_dma_sg_fallback_mmap, |
859 | .get_addr = snd_dma_sg_fallback_get_addr, |
860 | /* reuse vmalloc helpers */ |
861 | .get_page = snd_dma_vmalloc_get_page, |
862 | .get_chunk_size = snd_dma_vmalloc_get_chunk_size, |
863 | }; |
864 | #endif /* CONFIG_SND_DMA_SGBUF */ |
865 | |
866 | /* |
867 | * Non-coherent pages allocator |
868 | */ |
869 | static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size) |
870 | { |
871 | void *p; |
872 | |
873 | p = dma_alloc_noncoherent(dev: dmab->dev.dev, size, dma_handle: &dmab->addr, |
874 | dir: dmab->dev.dir, DEFAULT_GFP); |
875 | if (p) |
876 | dmab->dev.need_sync = dma_need_sync(dev: dmab->dev.dev, dma_addr: dmab->addr); |
877 | return p; |
878 | } |
879 | |
880 | static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab) |
881 | { |
882 | dma_free_noncoherent(dev: dmab->dev.dev, size: dmab->bytes, vaddr: dmab->area, |
883 | dma_handle: dmab->addr, dir: dmab->dev.dir); |
884 | } |
885 | |
886 | static int snd_dma_noncoherent_mmap(struct snd_dma_buffer *dmab, |
887 | struct vm_area_struct *area) |
888 | { |
889 | area->vm_page_prot = vm_get_page_prot(vm_flags: area->vm_flags); |
890 | return dma_mmap_pages(dev: dmab->dev.dev, vma: area, |
891 | size: area->vm_end - area->vm_start, |
892 | virt_to_page(dmab->area)); |
893 | } |
894 | |
895 | static void snd_dma_noncoherent_sync(struct snd_dma_buffer *dmab, |
896 | enum snd_dma_sync_mode mode) |
897 | { |
898 | if (mode == SNDRV_DMA_SYNC_CPU) { |
899 | if (dmab->dev.dir != DMA_TO_DEVICE) |
900 | dma_sync_single_for_cpu(dev: dmab->dev.dev, addr: dmab->addr, |
901 | size: dmab->bytes, dir: dmab->dev.dir); |
902 | } else { |
903 | if (dmab->dev.dir != DMA_FROM_DEVICE) |
904 | dma_sync_single_for_device(dev: dmab->dev.dev, addr: dmab->addr, |
905 | size: dmab->bytes, dir: dmab->dev.dir); |
906 | } |
907 | } |
908 | |
909 | static const struct snd_malloc_ops snd_dma_noncoherent_ops = { |
910 | .alloc = snd_dma_noncoherent_alloc, |
911 | .free = snd_dma_noncoherent_free, |
912 | .mmap = snd_dma_noncoherent_mmap, |
913 | .sync = snd_dma_noncoherent_sync, |
914 | }; |
915 | |
916 | #endif /* CONFIG_HAS_DMA */ |
917 | |
918 | /* |
919 | * Entry points |
920 | */ |
921 | static const struct snd_malloc_ops *snd_dma_ops[] = { |
922 | [SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops, |
923 | [SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops, |
924 | #ifdef CONFIG_HAS_DMA |
925 | [SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops, |
926 | [SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops, |
927 | [SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops, |
928 | [SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops, |
929 | #ifdef CONFIG_SND_DMA_SGBUF |
930 | [SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_wc_ops, |
931 | #endif |
932 | #ifdef CONFIG_GENERIC_ALLOCATOR |
933 | [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops, |
934 | #endif /* CONFIG_GENERIC_ALLOCATOR */ |
935 | #ifdef CONFIG_SND_DMA_SGBUF |
936 | [SNDRV_DMA_TYPE_DEV_SG_FALLBACK] = &snd_dma_sg_fallback_ops, |
937 | [SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK] = &snd_dma_sg_fallback_ops, |
938 | #endif |
939 | #endif /* CONFIG_HAS_DMA */ |
940 | }; |
941 | |
942 | static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab) |
943 | { |
944 | if (WARN_ON_ONCE(!dmab)) |
945 | return NULL; |
946 | if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN || |
947 | dmab->dev.type >= ARRAY_SIZE(snd_dma_ops))) |
948 | return NULL; |
949 | return snd_dma_ops[dmab->dev.type]; |
950 | } |
951 | |