1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Coherent per-device memory handling. |
4 | * Borrowed from i386 |
5 | */ |
6 | #include <linux/io.h> |
7 | #include <linux/slab.h> |
8 | #include <linux/kernel.h> |
9 | #include <linux/module.h> |
10 | #include <linux/dma-direct.h> |
11 | #include <linux/dma-map-ops.h> |
12 | |
13 | struct dma_coherent_mem { |
14 | void *virt_base; |
15 | dma_addr_t device_base; |
16 | unsigned long pfn_base; |
17 | int size; |
18 | unsigned long *bitmap; |
19 | spinlock_t spinlock; |
20 | bool use_dev_dma_pfn_offset; |
21 | }; |
22 | |
23 | static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev) |
24 | { |
25 | if (dev && dev->dma_mem) |
26 | return dev->dma_mem; |
27 | return NULL; |
28 | } |
29 | |
30 | static inline dma_addr_t dma_get_device_base(struct device *dev, |
31 | struct dma_coherent_mem * mem) |
32 | { |
33 | if (mem->use_dev_dma_pfn_offset) |
34 | return phys_to_dma(dev, PFN_PHYS(mem->pfn_base)); |
35 | return mem->device_base; |
36 | } |
37 | |
38 | static struct dma_coherent_mem *dma_init_coherent_memory(phys_addr_t phys_addr, |
39 | dma_addr_t device_addr, size_t size, bool use_dma_pfn_offset) |
40 | { |
41 | struct dma_coherent_mem *dma_mem; |
42 | int pages = size >> PAGE_SHIFT; |
43 | void *mem_base; |
44 | |
45 | if (!size) |
46 | return ERR_PTR(error: -EINVAL); |
47 | |
48 | mem_base = memremap(offset: phys_addr, size, flags: MEMREMAP_WC); |
49 | if (!mem_base) |
50 | return ERR_PTR(error: -EINVAL); |
51 | |
52 | dma_mem = kzalloc(size: sizeof(struct dma_coherent_mem), GFP_KERNEL); |
53 | if (!dma_mem) |
54 | goto out_unmap_membase; |
55 | dma_mem->bitmap = bitmap_zalloc(nbits: pages, GFP_KERNEL); |
56 | if (!dma_mem->bitmap) |
57 | goto out_free_dma_mem; |
58 | |
59 | dma_mem->virt_base = mem_base; |
60 | dma_mem->device_base = device_addr; |
61 | dma_mem->pfn_base = PFN_DOWN(phys_addr); |
62 | dma_mem->size = pages; |
63 | dma_mem->use_dev_dma_pfn_offset = use_dma_pfn_offset; |
64 | spin_lock_init(&dma_mem->spinlock); |
65 | |
66 | return dma_mem; |
67 | |
68 | out_free_dma_mem: |
69 | kfree(objp: dma_mem); |
70 | out_unmap_membase: |
71 | memunmap(addr: mem_base); |
72 | pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %zd MiB\n" , |
73 | &phys_addr, size / SZ_1M); |
74 | return ERR_PTR(error: -ENOMEM); |
75 | } |
76 | |
77 | static void _dma_release_coherent_memory(struct dma_coherent_mem *mem) |
78 | { |
79 | if (!mem) |
80 | return; |
81 | |
82 | memunmap(addr: mem->virt_base); |
83 | bitmap_free(bitmap: mem->bitmap); |
84 | kfree(objp: mem); |
85 | } |
86 | |
87 | static int dma_assign_coherent_memory(struct device *dev, |
88 | struct dma_coherent_mem *mem) |
89 | { |
90 | if (!dev) |
91 | return -ENODEV; |
92 | |
93 | if (dev->dma_mem) |
94 | return -EBUSY; |
95 | |
96 | dev->dma_mem = mem; |
97 | return 0; |
98 | } |
99 | |
100 | /* |
101 | * Declare a region of memory to be handed out by dma_alloc_coherent() when it |
102 | * is asked for coherent memory for this device. This shall only be used |
103 | * from platform code, usually based on the device tree description. |
104 | * |
105 | * phys_addr is the CPU physical address to which the memory is currently |
106 | * assigned (this will be ioremapped so the CPU can access the region). |
107 | * |
108 | * device_addr is the DMA address the device needs to be programmed with to |
109 | * actually address this memory (this will be handed out as the dma_addr_t in |
110 | * dma_alloc_coherent()). |
111 | * |
112 | * size is the size of the area (must be a multiple of PAGE_SIZE). |
113 | * |
114 | * As a simplification for the platforms, only *one* such region of memory may |
115 | * be declared per device. |
116 | */ |
117 | int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, |
118 | dma_addr_t device_addr, size_t size) |
119 | { |
120 | struct dma_coherent_mem *mem; |
121 | int ret; |
122 | |
123 | mem = dma_init_coherent_memory(phys_addr, device_addr, size, use_dma_pfn_offset: false); |
124 | if (IS_ERR(ptr: mem)) |
125 | return PTR_ERR(ptr: mem); |
126 | |
127 | ret = dma_assign_coherent_memory(dev, mem); |
128 | if (ret) |
129 | _dma_release_coherent_memory(mem); |
130 | return ret; |
131 | } |
132 | |
133 | void dma_release_coherent_memory(struct device *dev) |
134 | { |
135 | if (dev) |
136 | _dma_release_coherent_memory(mem: dev->dma_mem); |
137 | } |
138 | |
139 | static void *__dma_alloc_from_coherent(struct device *dev, |
140 | struct dma_coherent_mem *mem, |
141 | ssize_t size, dma_addr_t *dma_handle) |
142 | { |
143 | int order = get_order(size); |
144 | unsigned long flags; |
145 | int pageno; |
146 | void *ret; |
147 | |
148 | spin_lock_irqsave(&mem->spinlock, flags); |
149 | |
150 | if (unlikely(size > ((dma_addr_t)mem->size << PAGE_SHIFT))) |
151 | goto err; |
152 | |
153 | pageno = bitmap_find_free_region(bitmap: mem->bitmap, bits: mem->size, order); |
154 | if (unlikely(pageno < 0)) |
155 | goto err; |
156 | |
157 | /* |
158 | * Memory was found in the coherent area. |
159 | */ |
160 | *dma_handle = dma_get_device_base(dev, mem) + |
161 | ((dma_addr_t)pageno << PAGE_SHIFT); |
162 | ret = mem->virt_base + ((dma_addr_t)pageno << PAGE_SHIFT); |
163 | spin_unlock_irqrestore(lock: &mem->spinlock, flags); |
164 | memset(ret, 0, size); |
165 | return ret; |
166 | err: |
167 | spin_unlock_irqrestore(lock: &mem->spinlock, flags); |
168 | return NULL; |
169 | } |
170 | |
171 | /** |
172 | * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool |
173 | * @dev: device from which we allocate memory |
174 | * @size: size of requested memory area |
175 | * @dma_handle: This will be filled with the correct dma handle |
176 | * @ret: This pointer will be filled with the virtual address |
177 | * to allocated area. |
178 | * |
179 | * This function should be only called from per-arch dma_alloc_coherent() |
180 | * to support allocation from per-device coherent memory pools. |
181 | * |
182 | * Returns 0 if dma_alloc_coherent should continue with allocating from |
183 | * generic memory areas, or !0 if dma_alloc_coherent should return @ret. |
184 | */ |
185 | int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, |
186 | dma_addr_t *dma_handle, void **ret) |
187 | { |
188 | struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); |
189 | |
190 | if (!mem) |
191 | return 0; |
192 | |
193 | *ret = __dma_alloc_from_coherent(dev, mem, size, dma_handle); |
194 | return 1; |
195 | } |
196 | |
197 | static int __dma_release_from_coherent(struct dma_coherent_mem *mem, |
198 | int order, void *vaddr) |
199 | { |
200 | if (mem && vaddr >= mem->virt_base && vaddr < |
201 | (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) { |
202 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; |
203 | unsigned long flags; |
204 | |
205 | spin_lock_irqsave(&mem->spinlock, flags); |
206 | bitmap_release_region(bitmap: mem->bitmap, pos: page, order); |
207 | spin_unlock_irqrestore(lock: &mem->spinlock, flags); |
208 | return 1; |
209 | } |
210 | return 0; |
211 | } |
212 | |
213 | /** |
214 | * dma_release_from_dev_coherent() - free memory to device coherent memory pool |
215 | * @dev: device from which the memory was allocated |
216 | * @order: the order of pages allocated |
217 | * @vaddr: virtual address of allocated pages |
218 | * |
219 | * This checks whether the memory was allocated from the per-device |
220 | * coherent memory pool and if so, releases that memory. |
221 | * |
222 | * Returns 1 if we correctly released the memory, or 0 if the caller should |
223 | * proceed with releasing memory from generic pools. |
224 | */ |
225 | int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr) |
226 | { |
227 | struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); |
228 | |
229 | return __dma_release_from_coherent(mem, order, vaddr); |
230 | } |
231 | |
232 | static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem, |
233 | struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) |
234 | { |
235 | if (mem && vaddr >= mem->virt_base && vaddr + size <= |
236 | (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) { |
237 | unsigned long off = vma->vm_pgoff; |
238 | int start = (vaddr - mem->virt_base) >> PAGE_SHIFT; |
239 | unsigned long user_count = vma_pages(vma); |
240 | int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
241 | |
242 | *ret = -ENXIO; |
243 | if (off < count && user_count <= count - off) { |
244 | unsigned long pfn = mem->pfn_base + start + off; |
245 | *ret = remap_pfn_range(vma, addr: vma->vm_start, pfn, |
246 | size: user_count << PAGE_SHIFT, |
247 | vma->vm_page_prot); |
248 | } |
249 | return 1; |
250 | } |
251 | return 0; |
252 | } |
253 | |
254 | /** |
255 | * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool |
256 | * @dev: device from which the memory was allocated |
257 | * @vma: vm_area for the userspace memory |
258 | * @vaddr: cpu address returned by dma_alloc_from_dev_coherent |
259 | * @size: size of the memory buffer allocated |
260 | * @ret: result from remap_pfn_range() |
261 | * |
262 | * This checks whether the memory was allocated from the per-device |
263 | * coherent memory pool and if so, maps that memory to the provided vma. |
264 | * |
265 | * Returns 1 if @vaddr belongs to the device coherent pool and the caller |
266 | * should return @ret, or 0 if they should proceed with mapping memory from |
267 | * generic areas. |
268 | */ |
269 | int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, |
270 | void *vaddr, size_t size, int *ret) |
271 | { |
272 | struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); |
273 | |
274 | return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret); |
275 | } |
276 | |
277 | #ifdef CONFIG_DMA_GLOBAL_POOL |
278 | static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init; |
279 | |
280 | void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, |
281 | dma_addr_t *dma_handle) |
282 | { |
283 | if (!dma_coherent_default_memory) |
284 | return NULL; |
285 | |
286 | return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size, |
287 | dma_handle); |
288 | } |
289 | |
290 | int dma_release_from_global_coherent(int order, void *vaddr) |
291 | { |
292 | if (!dma_coherent_default_memory) |
293 | return 0; |
294 | |
295 | return __dma_release_from_coherent(dma_coherent_default_memory, order, |
296 | vaddr); |
297 | } |
298 | |
299 | int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr, |
300 | size_t size, int *ret) |
301 | { |
302 | if (!dma_coherent_default_memory) |
303 | return 0; |
304 | |
305 | return __dma_mmap_from_coherent(dma_coherent_default_memory, vma, |
306 | vaddr, size, ret); |
307 | } |
308 | |
309 | int dma_init_global_coherent(phys_addr_t phys_addr, size_t size) |
310 | { |
311 | struct dma_coherent_mem *mem; |
312 | |
313 | mem = dma_init_coherent_memory(phys_addr, phys_addr, size, true); |
314 | if (IS_ERR(mem)) |
315 | return PTR_ERR(mem); |
316 | dma_coherent_default_memory = mem; |
317 | pr_info("DMA: default coherent area is set\n" ); |
318 | return 0; |
319 | } |
320 | #endif /* CONFIG_DMA_GLOBAL_POOL */ |
321 | |
322 | /* |
323 | * Support for reserved memory regions defined in device tree |
324 | */ |
325 | #ifdef CONFIG_OF_RESERVED_MEM |
326 | #include <linux/of.h> |
327 | #include <linux/of_fdt.h> |
328 | #include <linux/of_reserved_mem.h> |
329 | |
330 | #ifdef CONFIG_DMA_GLOBAL_POOL |
331 | static struct reserved_mem *dma_reserved_default_memory __initdata; |
332 | #endif |
333 | |
334 | static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) |
335 | { |
336 | if (!rmem->priv) { |
337 | struct dma_coherent_mem *mem; |
338 | |
339 | mem = dma_init_coherent_memory(phys_addr: rmem->base, device_addr: rmem->base, |
340 | size: rmem->size, use_dma_pfn_offset: true); |
341 | if (IS_ERR(ptr: mem)) |
342 | return PTR_ERR(ptr: mem); |
343 | rmem->priv = mem; |
344 | } |
345 | dma_assign_coherent_memory(dev, mem: rmem->priv); |
346 | return 0; |
347 | } |
348 | |
349 | static void rmem_dma_device_release(struct reserved_mem *rmem, |
350 | struct device *dev) |
351 | { |
352 | if (dev) |
353 | dev->dma_mem = NULL; |
354 | } |
355 | |
356 | static const struct reserved_mem_ops rmem_dma_ops = { |
357 | .device_init = rmem_dma_device_init, |
358 | .device_release = rmem_dma_device_release, |
359 | }; |
360 | |
361 | static int __init rmem_dma_setup(struct reserved_mem *rmem) |
362 | { |
363 | unsigned long node = rmem->fdt_node; |
364 | |
365 | if (of_get_flat_dt_prop(node, name: "reusable" , NULL)) |
366 | return -EINVAL; |
367 | |
368 | #ifdef CONFIG_ARM |
369 | if (!of_get_flat_dt_prop(node, "no-map" , NULL)) { |
370 | pr_err("Reserved memory: regions without no-map are not yet supported\n" ); |
371 | return -EINVAL; |
372 | } |
373 | #endif |
374 | |
375 | #ifdef CONFIG_DMA_GLOBAL_POOL |
376 | if (of_get_flat_dt_prop(node, "linux,dma-default" , NULL)) { |
377 | WARN(dma_reserved_default_memory, |
378 | "Reserved memory: region for default DMA coherent area is redefined\n" ); |
379 | dma_reserved_default_memory = rmem; |
380 | } |
381 | #endif |
382 | |
383 | rmem->ops = &rmem_dma_ops; |
384 | pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n" , |
385 | &rmem->base, (unsigned long)rmem->size / SZ_1M); |
386 | return 0; |
387 | } |
388 | |
389 | #ifdef CONFIG_DMA_GLOBAL_POOL |
390 | static int __init dma_init_reserved_memory(void) |
391 | { |
392 | if (!dma_reserved_default_memory) |
393 | return -ENOMEM; |
394 | return dma_init_global_coherent(dma_reserved_default_memory->base, |
395 | dma_reserved_default_memory->size); |
396 | } |
397 | core_initcall(dma_init_reserved_memory); |
398 | #endif /* CONFIG_DMA_GLOBAL_POOL */ |
399 | |
400 | RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool" , rmem_dma_setup); |
401 | #endif |
402 | |