1// SPDX-License-Identifier: GPL-2.0+
2/* Copyright (C) 2015-2018 Broadcom */
3
4/**
5 * DOC: V3D GEM BO management support
6 *
7 * Compared to VC4 (V3D 2.x), V3D 3.3 introduces an MMU between the
8 * GPU and the bus, allowing us to use shmem objects for our storage
9 * instead of CMA.
10 *
11 * Physically contiguous objects may still be imported to V3D, but the
12 * driver doesn't allocate physically contiguous objects on its own.
13 * Display engines requiring physically contiguous allocations should
14 * look into Mesa's "renderonly" support (as used by the Mesa pl111
15 * driver) for an example of how to integrate with V3D.
16 *
17 * Long term, we should support evicting pages from the MMU when under
18 * memory pressure (thus the v3d_bo_get_pages() refcounting), but
19 * that's not a high priority since our systems tend to not have swap.
20 */
21
22#include <linux/dma-buf.h>
23#include <linux/pfn_t.h>
24
25#include "v3d_drv.h"
26#include "uapi/drm/v3d_drm.h"
27
28/* Called DRM core on the last userspace/kernel unreference of the
29 * BO.
30 */
31void v3d_free_object(struct drm_gem_object *obj)
32{
33 struct v3d_dev *v3d = to_v3d_dev(dev: obj->dev);
34 struct v3d_bo *bo = to_v3d_bo(bo: obj);
35
36 if (bo->vaddr)
37 v3d_put_bo_vaddr(bo);
38
39 v3d_mmu_remove_ptes(bo);
40
41 mutex_lock(&v3d->bo_lock);
42 v3d->bo_stats.num_allocated--;
43 v3d->bo_stats.pages_allocated -= obj->size >> V3D_MMU_PAGE_SHIFT;
44 mutex_unlock(lock: &v3d->bo_lock);
45
46 spin_lock(lock: &v3d->mm_lock);
47 drm_mm_remove_node(node: &bo->node);
48 spin_unlock(lock: &v3d->mm_lock);
49
50 /* GPU execution may have dirtied any pages in the BO. */
51 bo->base.pages_mark_dirty_on_put = true;
52
53 drm_gem_shmem_free(shmem: &bo->base);
54}
55
56static const struct drm_gem_object_funcs v3d_gem_funcs = {
57 .free = v3d_free_object,
58 .print_info = drm_gem_shmem_object_print_info,
59 .pin = drm_gem_shmem_object_pin,
60 .unpin = drm_gem_shmem_object_unpin,
61 .get_sg_table = drm_gem_shmem_object_get_sg_table,
62 .vmap = drm_gem_shmem_object_vmap,
63 .vunmap = drm_gem_shmem_object_vunmap,
64 .mmap = drm_gem_shmem_object_mmap,
65 .vm_ops = &drm_gem_shmem_vm_ops,
66};
67
68/* gem_create_object function for allocating a BO struct and doing
69 * early setup.
70 */
71struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size)
72{
73 struct v3d_bo *bo;
74 struct drm_gem_object *obj;
75
76 if (size == 0)
77 return ERR_PTR(error: -EINVAL);
78
79 bo = kzalloc(size: sizeof(*bo), GFP_KERNEL);
80 if (!bo)
81 return ERR_PTR(error: -ENOMEM);
82 obj = &bo->base.base;
83
84 obj->funcs = &v3d_gem_funcs;
85 bo->base.map_wc = true;
86 INIT_LIST_HEAD(list: &bo->unref_head);
87
88 return &bo->base.base;
89}
90
91static int
92v3d_bo_create_finish(struct drm_gem_object *obj)
93{
94 struct v3d_dev *v3d = to_v3d_dev(dev: obj->dev);
95 struct v3d_bo *bo = to_v3d_bo(bo: obj);
96 struct sg_table *sgt;
97 int ret;
98
99 /* So far we pin the BO in the MMU for its lifetime, so use
100 * shmem's helper for getting a lifetime sgt.
101 */
102 sgt = drm_gem_shmem_get_pages_sgt(shmem: &bo->base);
103 if (IS_ERR(ptr: sgt))
104 return PTR_ERR(ptr: sgt);
105
106 spin_lock(lock: &v3d->mm_lock);
107 /* Allocate the object's space in the GPU's page tables.
108 * Inserting PTEs will happen later, but the offset is for the
109 * lifetime of the BO.
110 */
111 ret = drm_mm_insert_node_generic(mm: &v3d->mm, node: &bo->node,
112 size: obj->size >> V3D_MMU_PAGE_SHIFT,
113 GMP_GRANULARITY >> V3D_MMU_PAGE_SHIFT, color: 0, mode: 0);
114 spin_unlock(lock: &v3d->mm_lock);
115 if (ret)
116 return ret;
117
118 /* Track stats for /debug/dri/n/bo_stats. */
119 mutex_lock(&v3d->bo_lock);
120 v3d->bo_stats.num_allocated++;
121 v3d->bo_stats.pages_allocated += obj->size >> V3D_MMU_PAGE_SHIFT;
122 mutex_unlock(lock: &v3d->bo_lock);
123
124 v3d_mmu_insert_ptes(bo);
125
126 return 0;
127}
128
129struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
130 size_t unaligned_size)
131{
132 struct drm_gem_shmem_object *shmem_obj;
133 struct v3d_bo *bo;
134 int ret;
135
136 shmem_obj = drm_gem_shmem_create(dev, size: unaligned_size);
137 if (IS_ERR(ptr: shmem_obj))
138 return ERR_CAST(ptr: shmem_obj);
139 bo = to_v3d_bo(bo: &shmem_obj->base);
140 bo->vaddr = NULL;
141
142 ret = v3d_bo_create_finish(obj: &shmem_obj->base);
143 if (ret)
144 goto free_obj;
145
146 return bo;
147
148free_obj:
149 drm_gem_shmem_free(shmem: shmem_obj);
150 return ERR_PTR(error: ret);
151}
152
153struct drm_gem_object *
154v3d_prime_import_sg_table(struct drm_device *dev,
155 struct dma_buf_attachment *attach,
156 struct sg_table *sgt)
157{
158 struct drm_gem_object *obj;
159 int ret;
160
161 obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
162 if (IS_ERR(ptr: obj))
163 return obj;
164
165 ret = v3d_bo_create_finish(obj);
166 if (ret) {
167 drm_gem_shmem_free(shmem: &to_v3d_bo(bo: obj)->base);
168 return ERR_PTR(error: ret);
169 }
170
171 return obj;
172}
173
174void v3d_get_bo_vaddr(struct v3d_bo *bo)
175{
176 struct drm_gem_shmem_object *obj = &bo->base;
177
178 bo->vaddr = vmap(pages: obj->pages, count: obj->base.size >> PAGE_SHIFT, VM_MAP,
179 pgprot_writecombine(PAGE_KERNEL));
180}
181
182void v3d_put_bo_vaddr(struct v3d_bo *bo)
183{
184 vunmap(addr: bo->vaddr);
185 bo->vaddr = NULL;
186}
187
188int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
189 struct drm_file *file_priv)
190{
191 struct drm_v3d_create_bo *args = data;
192 struct v3d_bo *bo = NULL;
193 int ret;
194
195 if (args->flags != 0) {
196 DRM_INFO("unknown create_bo flags: %d\n", args->flags);
197 return -EINVAL;
198 }
199
200 bo = v3d_bo_create(dev, file_priv, PAGE_ALIGN(args->size));
201 if (IS_ERR(ptr: bo))
202 return PTR_ERR(ptr: bo);
203
204 args->offset = bo->node.start << V3D_MMU_PAGE_SHIFT;
205
206 ret = drm_gem_handle_create(file_priv, obj: &bo->base.base, handlep: &args->handle);
207 drm_gem_object_put(obj: &bo->base.base);
208
209 return ret;
210}
211
212int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
213 struct drm_file *file_priv)
214{
215 struct drm_v3d_mmap_bo *args = data;
216 struct drm_gem_object *gem_obj;
217
218 if (args->flags != 0) {
219 DRM_INFO("unknown mmap_bo flags: %d\n", args->flags);
220 return -EINVAL;
221 }
222
223 gem_obj = drm_gem_object_lookup(filp: file_priv, handle: args->handle);
224 if (!gem_obj) {
225 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
226 return -ENOENT;
227 }
228
229 args->offset = drm_vma_node_offset_addr(node: &gem_obj->vma_node);
230 drm_gem_object_put(obj: gem_obj);
231
232 return 0;
233}
234
235int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
236 struct drm_file *file_priv)
237{
238 struct drm_v3d_get_bo_offset *args = data;
239 struct drm_gem_object *gem_obj;
240 struct v3d_bo *bo;
241
242 gem_obj = drm_gem_object_lookup(filp: file_priv, handle: args->handle);
243 if (!gem_obj) {
244 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
245 return -ENOENT;
246 }
247 bo = to_v3d_bo(bo: gem_obj);
248
249 args->offset = bo->node.start << V3D_MMU_PAGE_SHIFT;
250
251 drm_gem_object_put(obj: gem_obj);
252 return 0;
253}
254
255int
256v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
257 struct drm_file *file_priv)
258{
259 int ret;
260 struct drm_v3d_wait_bo *args = data;
261 ktime_t start = ktime_get();
262 u64 delta_ns;
263 unsigned long timeout_jiffies =
264 nsecs_to_jiffies_timeout(n: args->timeout_ns);
265
266 if (args->pad != 0)
267 return -EINVAL;
268
269 ret = drm_gem_dma_resv_wait(filep: file_priv, handle: args->handle,
270 wait_all: true, timeout: timeout_jiffies);
271
272 /* Decrement the user's timeout, in case we got interrupted
273 * such that the ioctl will be restarted.
274 */
275 delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start));
276 if (delta_ns < args->timeout_ns)
277 args->timeout_ns -= delta_ns;
278 else
279 args->timeout_ns = 0;
280
281 /* Asked to wait beyond the jiffie/scheduler precision? */
282 if (ret == -ETIME && args->timeout_ns)
283 ret = -EAGAIN;
284
285 return ret;
286}
287

source code of linux/drivers/gpu/drm/v3d/v3d_bo.c