1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * KUnit test suite for GEM objects backed by shmem buffers |
4 | * |
5 | * Copyright (C) 2023 Red Hat, Inc. |
6 | * |
7 | * Author: Marco Pagani <marpagan@redhat.com> |
8 | */ |
9 | |
10 | #include <linux/dma-buf.h> |
11 | #include <linux/iosys-map.h> |
12 | #include <linux/sizes.h> |
13 | |
14 | #include <kunit/test.h> |
15 | |
16 | #include <drm/drm_device.h> |
17 | #include <drm/drm_drv.h> |
18 | #include <drm/drm_gem.h> |
19 | #include <drm/drm_gem_shmem_helper.h> |
20 | #include <drm/drm_kunit_helpers.h> |
21 | |
22 | #define TEST_SIZE SZ_1M |
23 | #define TEST_BYTE 0xae |
24 | |
25 | /* |
26 | * Wrappers to avoid an explicit type casting when passing action |
27 | * functions to kunit_add_action(). |
28 | */ |
29 | static void kfree_wrapper(void *ptr) |
30 | { |
31 | const void *obj = ptr; |
32 | |
33 | kfree(objp: obj); |
34 | } |
35 | |
36 | static void sg_free_table_wrapper(void *ptr) |
37 | { |
38 | struct sg_table *sgt = ptr; |
39 | |
40 | sg_free_table(sgt); |
41 | } |
42 | |
43 | static void drm_gem_shmem_free_wrapper(void *ptr) |
44 | { |
45 | struct drm_gem_shmem_object *shmem = ptr; |
46 | |
47 | drm_gem_shmem_free(shmem); |
48 | } |
49 | |
50 | /* |
51 | * Test creating a shmem GEM object backed by shmem buffer. The test |
52 | * case succeeds if the GEM object is successfully allocated with the |
53 | * shmem file node and object functions attributes set, and the size |
54 | * attribute is equal to the correct size. |
55 | */ |
56 | static void drm_gem_shmem_test_obj_create(struct kunit *test) |
57 | { |
58 | struct drm_device *drm_dev = test->priv; |
59 | struct drm_gem_shmem_object *shmem; |
60 | |
61 | shmem = drm_gem_shmem_create(dev: drm_dev, TEST_SIZE); |
62 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem); |
63 | KUNIT_EXPECT_EQ(test, shmem->base.size, TEST_SIZE); |
64 | KUNIT_EXPECT_NOT_NULL(test, shmem->base.filp); |
65 | KUNIT_EXPECT_NOT_NULL(test, shmem->base.funcs); |
66 | |
67 | drm_gem_shmem_free(shmem); |
68 | } |
69 | |
70 | /* |
71 | * Test creating a shmem GEM object from a scatter/gather table exported |
72 | * via a DMA-BUF. The test case succeed if the GEM object is successfully |
73 | * created with the shmem file node attribute equal to NULL and the sgt |
74 | * attribute pointing to the scatter/gather table that has been imported. |
75 | */ |
76 | static void drm_gem_shmem_test_obj_create_private(struct kunit *test) |
77 | { |
78 | struct drm_device *drm_dev = test->priv; |
79 | struct drm_gem_shmem_object *shmem; |
80 | struct drm_gem_object *gem_obj; |
81 | struct dma_buf buf_mock; |
82 | struct dma_buf_attachment attach_mock; |
83 | struct sg_table *sgt; |
84 | char *buf; |
85 | int ret; |
86 | |
87 | /* Create a mock scatter/gather table */ |
88 | buf = kunit_kzalloc(test, TEST_SIZE, GFP_KERNEL); |
89 | KUNIT_ASSERT_NOT_NULL(test, buf); |
90 | |
91 | sgt = kzalloc(size: sizeof(*sgt), GFP_KERNEL); |
92 | KUNIT_ASSERT_NOT_NULL(test, sgt); |
93 | |
94 | ret = kunit_add_action_or_reset(test, action: kfree_wrapper, ctx: sgt); |
95 | KUNIT_ASSERT_EQ(test, ret, 0); |
96 | |
97 | ret = sg_alloc_table(sgt, 1, GFP_KERNEL); |
98 | KUNIT_ASSERT_EQ(test, ret, 0); |
99 | |
100 | ret = kunit_add_action_or_reset(test, action: sg_free_table_wrapper, ctx: sgt); |
101 | KUNIT_ASSERT_EQ(test, ret, 0); |
102 | |
103 | sg_init_one(sgt->sgl, buf, TEST_SIZE); |
104 | |
105 | /* Init a mock DMA-BUF */ |
106 | buf_mock.size = TEST_SIZE; |
107 | attach_mock.dmabuf = &buf_mock; |
108 | |
109 | gem_obj = drm_gem_shmem_prime_import_sg_table(dev: drm_dev, attach: &attach_mock, sgt); |
110 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gem_obj); |
111 | KUNIT_EXPECT_EQ(test, gem_obj->size, TEST_SIZE); |
112 | KUNIT_EXPECT_NULL(test, gem_obj->filp); |
113 | KUNIT_EXPECT_NOT_NULL(test, gem_obj->funcs); |
114 | |
115 | /* The scatter/gather table will be freed by drm_gem_shmem_free */ |
116 | kunit_remove_action(test, action: sg_free_table_wrapper, ctx: sgt); |
117 | kunit_remove_action(test, action: kfree_wrapper, ctx: sgt); |
118 | |
119 | shmem = to_drm_gem_shmem_obj(gem_obj); |
120 | KUNIT_EXPECT_PTR_EQ(test, shmem->sgt, sgt); |
121 | |
122 | drm_gem_shmem_free(shmem); |
123 | } |
124 | |
125 | /* |
126 | * Test pinning backing pages for a shmem GEM object. The test case |
127 | * succeeds if a suitable number of backing pages are allocated, and |
128 | * the pages table counter attribute is increased by one. |
129 | */ |
130 | static void drm_gem_shmem_test_pin_pages(struct kunit *test) |
131 | { |
132 | struct drm_device *drm_dev = test->priv; |
133 | struct drm_gem_shmem_object *shmem; |
134 | int i, ret; |
135 | |
136 | shmem = drm_gem_shmem_create(dev: drm_dev, TEST_SIZE); |
137 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem); |
138 | KUNIT_EXPECT_NULL(test, shmem->pages); |
139 | KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0); |
140 | |
141 | ret = kunit_add_action_or_reset(test, action: drm_gem_shmem_free_wrapper, ctx: shmem); |
142 | KUNIT_ASSERT_EQ(test, ret, 0); |
143 | |
144 | ret = drm_gem_shmem_pin(shmem); |
145 | KUNIT_ASSERT_EQ(test, ret, 0); |
146 | KUNIT_ASSERT_NOT_NULL(test, shmem->pages); |
147 | KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1); |
148 | |
149 | for (i = 0; i < (shmem->base.size >> PAGE_SHIFT); i++) |
150 | KUNIT_ASSERT_NOT_NULL(test, shmem->pages[i]); |
151 | |
152 | drm_gem_shmem_unpin(shmem); |
153 | KUNIT_EXPECT_NULL(test, shmem->pages); |
154 | KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0); |
155 | } |
156 | |
157 | /* |
158 | * Test creating a virtual mapping for a shmem GEM object. The test |
159 | * case succeeds if the backing memory is mapped and the reference |
160 | * counter for virtual mapping is increased by one. Moreover, the test |
161 | * case writes and then reads a test pattern over the mapped memory. |
162 | */ |
163 | static void drm_gem_shmem_test_vmap(struct kunit *test) |
164 | { |
165 | struct drm_device *drm_dev = test->priv; |
166 | struct drm_gem_shmem_object *shmem; |
167 | struct iosys_map map; |
168 | int ret, i; |
169 | |
170 | shmem = drm_gem_shmem_create(dev: drm_dev, TEST_SIZE); |
171 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem); |
172 | KUNIT_EXPECT_NULL(test, shmem->vaddr); |
173 | KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0); |
174 | |
175 | ret = kunit_add_action_or_reset(test, action: drm_gem_shmem_free_wrapper, ctx: shmem); |
176 | KUNIT_ASSERT_EQ(test, ret, 0); |
177 | |
178 | ret = drm_gem_shmem_vmap(shmem, map: &map); |
179 | KUNIT_ASSERT_EQ(test, ret, 0); |
180 | KUNIT_ASSERT_NOT_NULL(test, shmem->vaddr); |
181 | KUNIT_ASSERT_FALSE(test, iosys_map_is_null(&map)); |
182 | KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 1); |
183 | |
184 | iosys_map_memset(dst: &map, offset: 0, TEST_BYTE, TEST_SIZE); |
185 | for (i = 0; i < TEST_SIZE; i++) |
186 | KUNIT_EXPECT_EQ(test, iosys_map_rd(&map, i, u8), TEST_BYTE); |
187 | |
188 | drm_gem_shmem_vunmap(shmem, map: &map); |
189 | KUNIT_EXPECT_NULL(test, shmem->vaddr); |
190 | KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0); |
191 | } |
192 | |
193 | /* |
194 | * Test exporting a scatter/gather table of pinned pages suitable for |
195 | * PRIME usage from a shmem GEM object. The test case succeeds if a |
196 | * scatter/gather table large enough to accommodate the backing memory |
197 | * is successfully exported. |
198 | */ |
199 | static void drm_gem_shmem_test_get_pages_sgt(struct kunit *test) |
200 | { |
201 | struct drm_device *drm_dev = test->priv; |
202 | struct drm_gem_shmem_object *shmem; |
203 | struct sg_table *sgt; |
204 | struct scatterlist *sg; |
205 | unsigned int si, len = 0; |
206 | int ret; |
207 | |
208 | shmem = drm_gem_shmem_create(dev: drm_dev, TEST_SIZE); |
209 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem); |
210 | |
211 | ret = kunit_add_action_or_reset(test, action: drm_gem_shmem_free_wrapper, ctx: shmem); |
212 | KUNIT_ASSERT_EQ(test, ret, 0); |
213 | |
214 | ret = drm_gem_shmem_pin(shmem); |
215 | KUNIT_ASSERT_EQ(test, ret, 0); |
216 | |
217 | sgt = drm_gem_shmem_get_sg_table(shmem); |
218 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt); |
219 | KUNIT_EXPECT_NULL(test, shmem->sgt); |
220 | |
221 | ret = kunit_add_action_or_reset(test, action: sg_free_table_wrapper, ctx: sgt); |
222 | KUNIT_ASSERT_EQ(test, ret, 0); |
223 | |
224 | for_each_sgtable_sg(sgt, sg, si) { |
225 | KUNIT_EXPECT_NOT_NULL(test, sg); |
226 | len += sg->length; |
227 | } |
228 | |
229 | KUNIT_EXPECT_GE(test, len, TEST_SIZE); |
230 | } |
231 | |
232 | /* |
233 | * Test pinning pages and exporting a scatter/gather table suitable for |
234 | * driver usage from a shmem GEM object. The test case succeeds if the |
235 | * backing pages are pinned and a scatter/gather table large enough to |
236 | * accommodate the backing memory is successfully exported. |
237 | */ |
238 | static void drm_gem_shmem_test_get_sg_table(struct kunit *test) |
239 | { |
240 | struct drm_device *drm_dev = test->priv; |
241 | struct drm_gem_shmem_object *shmem; |
242 | struct sg_table *sgt; |
243 | struct scatterlist *sg; |
244 | unsigned int si, ret, len = 0; |
245 | |
246 | shmem = drm_gem_shmem_create(dev: drm_dev, TEST_SIZE); |
247 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem); |
248 | |
249 | ret = kunit_add_action_or_reset(test, action: drm_gem_shmem_free_wrapper, ctx: shmem); |
250 | KUNIT_ASSERT_EQ(test, ret, 0); |
251 | |
252 | /* The scatter/gather table will be freed by drm_gem_shmem_free */ |
253 | sgt = drm_gem_shmem_get_pages_sgt(shmem); |
254 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt); |
255 | KUNIT_ASSERT_NOT_NULL(test, shmem->pages); |
256 | KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1); |
257 | KUNIT_EXPECT_PTR_EQ(test, sgt, shmem->sgt); |
258 | |
259 | for_each_sgtable_sg(sgt, sg, si) { |
260 | KUNIT_EXPECT_NOT_NULL(test, sg); |
261 | len += sg->length; |
262 | } |
263 | |
264 | KUNIT_EXPECT_GE(test, len, TEST_SIZE); |
265 | } |
266 | |
267 | /* |
268 | * Test updating the madvise state of a shmem GEM object. The test |
269 | * case checks that the function for setting madv updates it only if |
270 | * its current value is greater or equal than zero and returns false |
271 | * if it has a negative value. |
272 | */ |
273 | static void drm_gem_shmem_test_madvise(struct kunit *test) |
274 | { |
275 | struct drm_device *drm_dev = test->priv; |
276 | struct drm_gem_shmem_object *shmem; |
277 | int ret; |
278 | |
279 | shmem = drm_gem_shmem_create(dev: drm_dev, TEST_SIZE); |
280 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem); |
281 | KUNIT_ASSERT_EQ(test, shmem->madv, 0); |
282 | |
283 | ret = kunit_add_action_or_reset(test, action: drm_gem_shmem_free_wrapper, ctx: shmem); |
284 | KUNIT_ASSERT_EQ(test, ret, 0); |
285 | |
286 | ret = drm_gem_shmem_madvise(shmem, madv: 1); |
287 | KUNIT_EXPECT_TRUE(test, ret); |
288 | KUNIT_ASSERT_EQ(test, shmem->madv, 1); |
289 | |
290 | /* Set madv to a negative value */ |
291 | ret = drm_gem_shmem_madvise(shmem, madv: -1); |
292 | KUNIT_EXPECT_FALSE(test, ret); |
293 | KUNIT_ASSERT_EQ(test, shmem->madv, -1); |
294 | |
295 | /* Check that madv cannot be set back to a positive value */ |
296 | ret = drm_gem_shmem_madvise(shmem, madv: 0); |
297 | KUNIT_EXPECT_FALSE(test, ret); |
298 | KUNIT_ASSERT_EQ(test, shmem->madv, -1); |
299 | } |
300 | |
301 | /* |
302 | * Test purging a shmem GEM object. First, assert that a newly created |
303 | * shmem GEM object is not purgeable. Then, set madvise to a positive |
304 | * value and call drm_gem_shmem_get_pages_sgt() to pin and dma-map the |
305 | * backing pages. Finally, assert that the shmem GEM object is now |
306 | * purgeable and purge it. |
307 | */ |
308 | static void drm_gem_shmem_test_purge(struct kunit *test) |
309 | { |
310 | struct drm_device *drm_dev = test->priv; |
311 | struct drm_gem_shmem_object *shmem; |
312 | struct sg_table *sgt; |
313 | int ret; |
314 | |
315 | shmem = drm_gem_shmem_create(dev: drm_dev, TEST_SIZE); |
316 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem); |
317 | |
318 | ret = kunit_add_action_or_reset(test, action: drm_gem_shmem_free_wrapper, ctx: shmem); |
319 | KUNIT_ASSERT_EQ(test, ret, 0); |
320 | |
321 | ret = drm_gem_shmem_is_purgeable(shmem); |
322 | KUNIT_EXPECT_FALSE(test, ret); |
323 | |
324 | ret = drm_gem_shmem_madvise(shmem, madv: 1); |
325 | KUNIT_EXPECT_TRUE(test, ret); |
326 | |
327 | /* The scatter/gather table will be freed by drm_gem_shmem_free */ |
328 | sgt = drm_gem_shmem_get_pages_sgt(shmem); |
329 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt); |
330 | |
331 | ret = drm_gem_shmem_is_purgeable(shmem); |
332 | KUNIT_EXPECT_TRUE(test, ret); |
333 | |
334 | drm_gem_shmem_purge(shmem); |
335 | KUNIT_EXPECT_NULL(test, shmem->pages); |
336 | KUNIT_EXPECT_NULL(test, shmem->sgt); |
337 | KUNIT_EXPECT_EQ(test, shmem->madv, -1); |
338 | } |
339 | |
340 | static int drm_gem_shmem_test_init(struct kunit *test) |
341 | { |
342 | struct device *dev; |
343 | struct drm_device *drm_dev; |
344 | |
345 | /* Allocate a parent device */ |
346 | dev = drm_kunit_helper_alloc_device(test); |
347 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev); |
348 | |
349 | /* |
350 | * The DRM core will automatically initialize the GEM core and create |
351 | * a DRM Memory Manager object which provides an address space pool |
352 | * for GEM objects allocation. |
353 | */ |
354 | drm_dev = __drm_kunit_helper_alloc_drm_device(test, dev, size: sizeof(*drm_dev), |
355 | offset: 0, features: DRIVER_GEM); |
356 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, drm_dev); |
357 | |
358 | test->priv = drm_dev; |
359 | |
360 | return 0; |
361 | } |
362 | |
363 | static struct kunit_case drm_gem_shmem_test_cases[] = { |
364 | KUNIT_CASE(drm_gem_shmem_test_obj_create), |
365 | KUNIT_CASE(drm_gem_shmem_test_obj_create_private), |
366 | KUNIT_CASE(drm_gem_shmem_test_pin_pages), |
367 | KUNIT_CASE(drm_gem_shmem_test_vmap), |
368 | KUNIT_CASE(drm_gem_shmem_test_get_pages_sgt), |
369 | KUNIT_CASE(drm_gem_shmem_test_get_sg_table), |
370 | KUNIT_CASE(drm_gem_shmem_test_madvise), |
371 | KUNIT_CASE(drm_gem_shmem_test_purge), |
372 | {} |
373 | }; |
374 | |
375 | static struct kunit_suite drm_gem_shmem_suite = { |
376 | .name = "drm_gem_shmem" , |
377 | .init = drm_gem_shmem_test_init, |
378 | .test_cases = drm_gem_shmem_test_cases |
379 | }; |
380 | |
381 | kunit_test_suite(drm_gem_shmem_suite); |
382 | |
383 | MODULE_LICENSE("GPL" ); |
384 | |