1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/cred.h> |
3 | #include <linux/device.h> |
4 | #include <linux/dma-buf.h> |
5 | #include <linux/dma-resv.h> |
6 | #include <linux/highmem.h> |
7 | #include <linux/init.h> |
8 | #include <linux/kernel.h> |
9 | #include <linux/memfd.h> |
10 | #include <linux/miscdevice.h> |
11 | #include <linux/module.h> |
12 | #include <linux/shmem_fs.h> |
13 | #include <linux/slab.h> |
14 | #include <linux/udmabuf.h> |
15 | #include <linux/vmalloc.h> |
16 | #include <linux/iosys-map.h> |
17 | |
18 | static int list_limit = 1024; |
19 | module_param(list_limit, int, 0644); |
20 | MODULE_PARM_DESC(list_limit, "udmabuf_create_list->count limit. Default is 1024." ); |
21 | |
22 | static int size_limit_mb = 64; |
23 | module_param(size_limit_mb, int, 0644); |
24 | MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is 64." ); |
25 | |
26 | struct udmabuf { |
27 | pgoff_t pagecount; |
28 | struct page **pages; |
29 | struct sg_table *sg; |
30 | struct miscdevice *device; |
31 | }; |
32 | |
33 | static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf) |
34 | { |
35 | struct vm_area_struct *vma = vmf->vma; |
36 | struct udmabuf *ubuf = vma->vm_private_data; |
37 | pgoff_t pgoff = vmf->pgoff; |
38 | |
39 | if (pgoff >= ubuf->pagecount) |
40 | return VM_FAULT_SIGBUS; |
41 | vmf->page = ubuf->pages[pgoff]; |
42 | get_page(page: vmf->page); |
43 | return 0; |
44 | } |
45 | |
46 | static const struct vm_operations_struct udmabuf_vm_ops = { |
47 | .fault = udmabuf_vm_fault, |
48 | }; |
49 | |
50 | static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma) |
51 | { |
52 | struct udmabuf *ubuf = buf->priv; |
53 | |
54 | if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) |
55 | return -EINVAL; |
56 | |
57 | vma->vm_ops = &udmabuf_vm_ops; |
58 | vma->vm_private_data = ubuf; |
59 | return 0; |
60 | } |
61 | |
62 | static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map) |
63 | { |
64 | struct udmabuf *ubuf = buf->priv; |
65 | void *vaddr; |
66 | |
67 | dma_resv_assert_held(buf->resv); |
68 | |
69 | vaddr = vm_map_ram(pages: ubuf->pages, count: ubuf->pagecount, node: -1); |
70 | if (!vaddr) |
71 | return -EINVAL; |
72 | |
73 | iosys_map_set_vaddr(map, vaddr); |
74 | return 0; |
75 | } |
76 | |
77 | static void vunmap_udmabuf(struct dma_buf *buf, struct iosys_map *map) |
78 | { |
79 | struct udmabuf *ubuf = buf->priv; |
80 | |
81 | dma_resv_assert_held(buf->resv); |
82 | |
83 | vm_unmap_ram(mem: map->vaddr, count: ubuf->pagecount); |
84 | } |
85 | |
86 | static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf, |
87 | enum dma_data_direction direction) |
88 | { |
89 | struct udmabuf *ubuf = buf->priv; |
90 | struct sg_table *sg; |
91 | int ret; |
92 | |
93 | sg = kzalloc(size: sizeof(*sg), GFP_KERNEL); |
94 | if (!sg) |
95 | return ERR_PTR(error: -ENOMEM); |
96 | ret = sg_alloc_table_from_pages(sgt: sg, pages: ubuf->pages, n_pages: ubuf->pagecount, |
97 | offset: 0, size: ubuf->pagecount << PAGE_SHIFT, |
98 | GFP_KERNEL); |
99 | if (ret < 0) |
100 | goto err; |
101 | ret = dma_map_sgtable(dev, sgt: sg, dir: direction, attrs: 0); |
102 | if (ret < 0) |
103 | goto err; |
104 | return sg; |
105 | |
106 | err: |
107 | sg_free_table(sg); |
108 | kfree(objp: sg); |
109 | return ERR_PTR(error: ret); |
110 | } |
111 | |
112 | static void put_sg_table(struct device *dev, struct sg_table *sg, |
113 | enum dma_data_direction direction) |
114 | { |
115 | dma_unmap_sgtable(dev, sgt: sg, dir: direction, attrs: 0); |
116 | sg_free_table(sg); |
117 | kfree(objp: sg); |
118 | } |
119 | |
120 | static struct sg_table *map_udmabuf(struct dma_buf_attachment *at, |
121 | enum dma_data_direction direction) |
122 | { |
123 | return get_sg_table(dev: at->dev, buf: at->dmabuf, direction); |
124 | } |
125 | |
126 | static void unmap_udmabuf(struct dma_buf_attachment *at, |
127 | struct sg_table *sg, |
128 | enum dma_data_direction direction) |
129 | { |
130 | return put_sg_table(dev: at->dev, sg, direction); |
131 | } |
132 | |
133 | static void release_udmabuf(struct dma_buf *buf) |
134 | { |
135 | struct udmabuf *ubuf = buf->priv; |
136 | struct device *dev = ubuf->device->this_device; |
137 | pgoff_t pg; |
138 | |
139 | if (ubuf->sg) |
140 | put_sg_table(dev, sg: ubuf->sg, direction: DMA_BIDIRECTIONAL); |
141 | |
142 | for (pg = 0; pg < ubuf->pagecount; pg++) |
143 | put_page(page: ubuf->pages[pg]); |
144 | kfree(objp: ubuf->pages); |
145 | kfree(objp: ubuf); |
146 | } |
147 | |
148 | static int begin_cpu_udmabuf(struct dma_buf *buf, |
149 | enum dma_data_direction direction) |
150 | { |
151 | struct udmabuf *ubuf = buf->priv; |
152 | struct device *dev = ubuf->device->this_device; |
153 | int ret = 0; |
154 | |
155 | if (!ubuf->sg) { |
156 | ubuf->sg = get_sg_table(dev, buf, direction); |
157 | if (IS_ERR(ptr: ubuf->sg)) { |
158 | ret = PTR_ERR(ptr: ubuf->sg); |
159 | ubuf->sg = NULL; |
160 | } |
161 | } else { |
162 | dma_sync_sg_for_cpu(dev, sg: ubuf->sg->sgl, nelems: ubuf->sg->nents, |
163 | dir: direction); |
164 | } |
165 | |
166 | return ret; |
167 | } |
168 | |
169 | static int end_cpu_udmabuf(struct dma_buf *buf, |
170 | enum dma_data_direction direction) |
171 | { |
172 | struct udmabuf *ubuf = buf->priv; |
173 | struct device *dev = ubuf->device->this_device; |
174 | |
175 | if (!ubuf->sg) |
176 | return -EINVAL; |
177 | |
178 | dma_sync_sg_for_device(dev, sg: ubuf->sg->sgl, nelems: ubuf->sg->nents, dir: direction); |
179 | return 0; |
180 | } |
181 | |
182 | static const struct dma_buf_ops udmabuf_ops = { |
183 | .cache_sgt_mapping = true, |
184 | .map_dma_buf = map_udmabuf, |
185 | .unmap_dma_buf = unmap_udmabuf, |
186 | .release = release_udmabuf, |
187 | .mmap = mmap_udmabuf, |
188 | .vmap = vmap_udmabuf, |
189 | .vunmap = vunmap_udmabuf, |
190 | .begin_cpu_access = begin_cpu_udmabuf, |
191 | .end_cpu_access = end_cpu_udmabuf, |
192 | }; |
193 | |
194 | #define SEALS_WANTED (F_SEAL_SHRINK) |
195 | #define SEALS_DENIED (F_SEAL_WRITE) |
196 | |
197 | static long udmabuf_create(struct miscdevice *device, |
198 | struct udmabuf_create_list *head, |
199 | struct udmabuf_create_item *list) |
200 | { |
201 | DEFINE_DMA_BUF_EXPORT_INFO(exp_info); |
202 | struct file *memfd = NULL; |
203 | struct address_space *mapping = NULL; |
204 | struct udmabuf *ubuf; |
205 | struct dma_buf *buf; |
206 | pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit; |
207 | struct page *page; |
208 | int seals, ret = -EINVAL; |
209 | u32 i, flags; |
210 | |
211 | ubuf = kzalloc(size: sizeof(*ubuf), GFP_KERNEL); |
212 | if (!ubuf) |
213 | return -ENOMEM; |
214 | |
215 | pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT; |
216 | for (i = 0; i < head->count; i++) { |
217 | if (!IS_ALIGNED(list[i].offset, PAGE_SIZE)) |
218 | goto err; |
219 | if (!IS_ALIGNED(list[i].size, PAGE_SIZE)) |
220 | goto err; |
221 | ubuf->pagecount += list[i].size >> PAGE_SHIFT; |
222 | if (ubuf->pagecount > pglimit) |
223 | goto err; |
224 | } |
225 | |
226 | if (!ubuf->pagecount) |
227 | goto err; |
228 | |
229 | ubuf->pages = kmalloc_array(n: ubuf->pagecount, size: sizeof(*ubuf->pages), |
230 | GFP_KERNEL); |
231 | if (!ubuf->pages) { |
232 | ret = -ENOMEM; |
233 | goto err; |
234 | } |
235 | |
236 | pgbuf = 0; |
237 | for (i = 0; i < head->count; i++) { |
238 | ret = -EBADFD; |
239 | memfd = fget(fd: list[i].memfd); |
240 | if (!memfd) |
241 | goto err; |
242 | mapping = memfd->f_mapping; |
243 | if (!shmem_mapping(mapping)) |
244 | goto err; |
245 | seals = memfd_fcntl(file: memfd, F_GET_SEALS, arg: 0); |
246 | if (seals == -EINVAL) |
247 | goto err; |
248 | ret = -EINVAL; |
249 | if ((seals & SEALS_WANTED) != SEALS_WANTED || |
250 | (seals & SEALS_DENIED) != 0) |
251 | goto err; |
252 | pgoff = list[i].offset >> PAGE_SHIFT; |
253 | pgcnt = list[i].size >> PAGE_SHIFT; |
254 | for (pgidx = 0; pgidx < pgcnt; pgidx++) { |
255 | page = shmem_read_mapping_page(mapping, index: pgoff + pgidx); |
256 | if (IS_ERR(ptr: page)) { |
257 | ret = PTR_ERR(ptr: page); |
258 | goto err; |
259 | } |
260 | ubuf->pages[pgbuf++] = page; |
261 | } |
262 | fput(memfd); |
263 | memfd = NULL; |
264 | } |
265 | |
266 | exp_info.ops = &udmabuf_ops; |
267 | exp_info.size = ubuf->pagecount << PAGE_SHIFT; |
268 | exp_info.priv = ubuf; |
269 | exp_info.flags = O_RDWR; |
270 | |
271 | ubuf->device = device; |
272 | buf = dma_buf_export(exp_info: &exp_info); |
273 | if (IS_ERR(ptr: buf)) { |
274 | ret = PTR_ERR(ptr: buf); |
275 | goto err; |
276 | } |
277 | |
278 | flags = 0; |
279 | if (head->flags & UDMABUF_FLAGS_CLOEXEC) |
280 | flags |= O_CLOEXEC; |
281 | return dma_buf_fd(dmabuf: buf, flags); |
282 | |
283 | err: |
284 | while (pgbuf > 0) |
285 | put_page(page: ubuf->pages[--pgbuf]); |
286 | if (memfd) |
287 | fput(memfd); |
288 | kfree(objp: ubuf->pages); |
289 | kfree(objp: ubuf); |
290 | return ret; |
291 | } |
292 | |
293 | static long udmabuf_ioctl_create(struct file *filp, unsigned long arg) |
294 | { |
295 | struct udmabuf_create create; |
296 | struct udmabuf_create_list head; |
297 | struct udmabuf_create_item list; |
298 | |
299 | if (copy_from_user(to: &create, from: (void __user *)arg, |
300 | n: sizeof(create))) |
301 | return -EFAULT; |
302 | |
303 | head.flags = create.flags; |
304 | head.count = 1; |
305 | list.memfd = create.memfd; |
306 | list.offset = create.offset; |
307 | list.size = create.size; |
308 | |
309 | return udmabuf_create(device: filp->private_data, head: &head, list: &list); |
310 | } |
311 | |
312 | static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg) |
313 | { |
314 | struct udmabuf_create_list head; |
315 | struct udmabuf_create_item *list; |
316 | int ret = -EINVAL; |
317 | u32 lsize; |
318 | |
319 | if (copy_from_user(to: &head, from: (void __user *)arg, n: sizeof(head))) |
320 | return -EFAULT; |
321 | if (head.count > list_limit) |
322 | return -EINVAL; |
323 | lsize = sizeof(struct udmabuf_create_item) * head.count; |
324 | list = memdup_user((void __user *)(arg + sizeof(head)), lsize); |
325 | if (IS_ERR(ptr: list)) |
326 | return PTR_ERR(ptr: list); |
327 | |
328 | ret = udmabuf_create(device: filp->private_data, head: &head, list); |
329 | kfree(objp: list); |
330 | return ret; |
331 | } |
332 | |
333 | static long udmabuf_ioctl(struct file *filp, unsigned int ioctl, |
334 | unsigned long arg) |
335 | { |
336 | long ret; |
337 | |
338 | switch (ioctl) { |
339 | case UDMABUF_CREATE: |
340 | ret = udmabuf_ioctl_create(filp, arg); |
341 | break; |
342 | case UDMABUF_CREATE_LIST: |
343 | ret = udmabuf_ioctl_create_list(filp, arg); |
344 | break; |
345 | default: |
346 | ret = -ENOTTY; |
347 | break; |
348 | } |
349 | return ret; |
350 | } |
351 | |
352 | static const struct file_operations udmabuf_fops = { |
353 | .owner = THIS_MODULE, |
354 | .unlocked_ioctl = udmabuf_ioctl, |
355 | #ifdef CONFIG_COMPAT |
356 | .compat_ioctl = udmabuf_ioctl, |
357 | #endif |
358 | }; |
359 | |
360 | static struct miscdevice udmabuf_misc = { |
361 | .minor = MISC_DYNAMIC_MINOR, |
362 | .name = "udmabuf" , |
363 | .fops = &udmabuf_fops, |
364 | }; |
365 | |
366 | static int __init udmabuf_dev_init(void) |
367 | { |
368 | int ret; |
369 | |
370 | ret = misc_register(misc: &udmabuf_misc); |
371 | if (ret < 0) { |
372 | pr_err("Could not initialize udmabuf device\n" ); |
373 | return ret; |
374 | } |
375 | |
376 | ret = dma_coerce_mask_and_coherent(dev: udmabuf_misc.this_device, |
377 | DMA_BIT_MASK(64)); |
378 | if (ret < 0) { |
379 | pr_err("Could not setup DMA mask for udmabuf device\n" ); |
380 | misc_deregister(misc: &udmabuf_misc); |
381 | return ret; |
382 | } |
383 | |
384 | return 0; |
385 | } |
386 | |
387 | static void __exit udmabuf_dev_exit(void) |
388 | { |
389 | misc_deregister(misc: &udmabuf_misc); |
390 | } |
391 | |
392 | module_init(udmabuf_dev_init) |
393 | module_exit(udmabuf_dev_exit) |
394 | |
395 | MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>" ); |
396 | |