1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* |
3 | * Data Access Monitor Unit Tests |
4 | * |
5 | * Copyright 2019 Amazon.com, Inc. or its affiliates. All rights reserved. |
6 | * |
7 | * Author: SeongJae Park <sj@kernel.org> |
8 | */ |
9 | |
10 | #ifdef CONFIG_DAMON_KUNIT_TEST |
11 | |
12 | #ifndef _DAMON_CORE_TEST_H |
13 | #define _DAMON_CORE_TEST_H |
14 | |
15 | #include <kunit/test.h> |
16 | |
17 | static void damon_test_regions(struct kunit *test) |
18 | { |
19 | struct damon_region *r; |
20 | struct damon_target *t; |
21 | |
22 | r = damon_new_region(start: 1, end: 2); |
23 | KUNIT_EXPECT_EQ(test, 1ul, r->ar.start); |
24 | KUNIT_EXPECT_EQ(test, 2ul, r->ar.end); |
25 | KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses); |
26 | |
27 | t = damon_new_target(); |
28 | KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t)); |
29 | |
30 | damon_add_region(r, t); |
31 | KUNIT_EXPECT_EQ(test, 1u, damon_nr_regions(t)); |
32 | |
33 | damon_destroy_region(r, t); |
34 | KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t)); |
35 | |
36 | damon_free_target(t); |
37 | } |
38 | |
39 | static unsigned int nr_damon_targets(struct damon_ctx *ctx) |
40 | { |
41 | struct damon_target *t; |
42 | unsigned int nr_targets = 0; |
43 | |
44 | damon_for_each_target(t, ctx) |
45 | nr_targets++; |
46 | |
47 | return nr_targets; |
48 | } |
49 | |
50 | static void damon_test_target(struct kunit *test) |
51 | { |
52 | struct damon_ctx *c = damon_new_ctx(); |
53 | struct damon_target *t; |
54 | |
55 | t = damon_new_target(); |
56 | KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c)); |
57 | |
58 | damon_add_target(ctx: c, t); |
59 | KUNIT_EXPECT_EQ(test, 1u, nr_damon_targets(c)); |
60 | |
61 | damon_destroy_target(t); |
62 | KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c)); |
63 | |
64 | damon_destroy_ctx(ctx: c); |
65 | } |
66 | |
67 | /* |
68 | * Test kdamond_reset_aggregated() |
69 | * |
70 | * DAMON checks access to each region and aggregates this information as the |
71 | * access frequency of each region. In detail, it increases '->nr_accesses' of |
72 | * regions that an access has confirmed. 'kdamond_reset_aggregated()' flushes |
73 | * the aggregated information ('->nr_accesses' of each regions) to the result |
74 | * buffer. As a result of the flushing, the '->nr_accesses' of regions are |
75 | * initialized to zero. |
76 | */ |
77 | static void damon_test_aggregate(struct kunit *test) |
78 | { |
79 | struct damon_ctx *ctx = damon_new_ctx(); |
80 | unsigned long saddr[][3] = {{10, 20, 30}, {5, 42, 49}, {13, 33, 55} }; |
81 | unsigned long eaddr[][3] = {{15, 27, 40}, {31, 45, 55}, {23, 44, 66} }; |
82 | unsigned long accesses[][3] = {{42, 95, 84}, {10, 20, 30}, {0, 1, 2} }; |
83 | struct damon_target *t; |
84 | struct damon_region *r; |
85 | int it, ir; |
86 | |
87 | for (it = 0; it < 3; it++) { |
88 | t = damon_new_target(); |
89 | damon_add_target(ctx, t); |
90 | } |
91 | |
92 | it = 0; |
93 | damon_for_each_target(t, ctx) { |
94 | for (ir = 0; ir < 3; ir++) { |
95 | r = damon_new_region(start: saddr[it][ir], end: eaddr[it][ir]); |
96 | r->nr_accesses = accesses[it][ir]; |
97 | r->nr_accesses_bp = accesses[it][ir] * 10000; |
98 | damon_add_region(r, t); |
99 | } |
100 | it++; |
101 | } |
102 | kdamond_reset_aggregated(c: ctx); |
103 | it = 0; |
104 | damon_for_each_target(t, ctx) { |
105 | ir = 0; |
106 | /* '->nr_accesses' should be zeroed */ |
107 | damon_for_each_region(r, t) { |
108 | KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses); |
109 | ir++; |
110 | } |
111 | /* regions should be preserved */ |
112 | KUNIT_EXPECT_EQ(test, 3, ir); |
113 | it++; |
114 | } |
115 | /* targets also should be preserved */ |
116 | KUNIT_EXPECT_EQ(test, 3, it); |
117 | |
118 | damon_destroy_ctx(ctx); |
119 | } |
120 | |
121 | static void damon_test_split_at(struct kunit *test) |
122 | { |
123 | struct damon_ctx *c = damon_new_ctx(); |
124 | struct damon_target *t; |
125 | struct damon_region *r, *r_new; |
126 | |
127 | t = damon_new_target(); |
128 | r = damon_new_region(start: 0, end: 100); |
129 | r->nr_accesses_bp = 420000; |
130 | r->nr_accesses = 42; |
131 | r->last_nr_accesses = 15; |
132 | damon_add_region(r, t); |
133 | damon_split_region_at(t, r, sz_r: 25); |
134 | KUNIT_EXPECT_EQ(test, r->ar.start, 0ul); |
135 | KUNIT_EXPECT_EQ(test, r->ar.end, 25ul); |
136 | |
137 | r_new = damon_next_region(r); |
138 | KUNIT_EXPECT_EQ(test, r_new->ar.start, 25ul); |
139 | KUNIT_EXPECT_EQ(test, r_new->ar.end, 100ul); |
140 | |
141 | KUNIT_EXPECT_EQ(test, r->nr_accesses_bp, r_new->nr_accesses_bp); |
142 | KUNIT_EXPECT_EQ(test, r->nr_accesses, r_new->nr_accesses); |
143 | KUNIT_EXPECT_EQ(test, r->last_nr_accesses, r_new->last_nr_accesses); |
144 | |
145 | damon_free_target(t); |
146 | damon_destroy_ctx(ctx: c); |
147 | } |
148 | |
149 | static void damon_test_merge_two(struct kunit *test) |
150 | { |
151 | struct damon_target *t; |
152 | struct damon_region *r, *r2, *r3; |
153 | int i; |
154 | |
155 | t = damon_new_target(); |
156 | r = damon_new_region(start: 0, end: 100); |
157 | r->nr_accesses = 10; |
158 | r->nr_accesses_bp = 100000; |
159 | damon_add_region(r, t); |
160 | r2 = damon_new_region(start: 100, end: 300); |
161 | r2->nr_accesses = 20; |
162 | r2->nr_accesses_bp = 200000; |
163 | damon_add_region(r: r2, t); |
164 | |
165 | damon_merge_two_regions(t, l: r, r: r2); |
166 | KUNIT_EXPECT_EQ(test, r->ar.start, 0ul); |
167 | KUNIT_EXPECT_EQ(test, r->ar.end, 300ul); |
168 | KUNIT_EXPECT_EQ(test, r->nr_accesses, 16u); |
169 | |
170 | i = 0; |
171 | damon_for_each_region(r3, t) { |
172 | KUNIT_EXPECT_PTR_EQ(test, r, r3); |
173 | i++; |
174 | } |
175 | KUNIT_EXPECT_EQ(test, i, 1); |
176 | |
177 | damon_free_target(t); |
178 | } |
179 | |
180 | static struct damon_region *__nth_region_of(struct damon_target *t, int idx) |
181 | { |
182 | struct damon_region *r; |
183 | unsigned int i = 0; |
184 | |
185 | damon_for_each_region(r, t) { |
186 | if (i++ == idx) |
187 | return r; |
188 | } |
189 | |
190 | return NULL; |
191 | } |
192 | |
193 | static void damon_test_merge_regions_of(struct kunit *test) |
194 | { |
195 | struct damon_target *t; |
196 | struct damon_region *r; |
197 | unsigned long sa[] = {0, 100, 114, 122, 130, 156, 170, 184}; |
198 | unsigned long ea[] = {100, 112, 122, 130, 156, 170, 184, 230}; |
199 | unsigned int nrs[] = {0, 0, 10, 10, 20, 30, 1, 2}; |
200 | |
201 | unsigned long saddrs[] = {0, 114, 130, 156, 170}; |
202 | unsigned long eaddrs[] = {112, 130, 156, 170, 230}; |
203 | int i; |
204 | |
205 | t = damon_new_target(); |
206 | for (i = 0; i < ARRAY_SIZE(sa); i++) { |
207 | r = damon_new_region(start: sa[i], end: ea[i]); |
208 | r->nr_accesses = nrs[i]; |
209 | r->nr_accesses_bp = nrs[i] * 10000; |
210 | damon_add_region(r, t); |
211 | } |
212 | |
213 | damon_merge_regions_of(t, thres: 9, sz_limit: 9999); |
214 | /* 0-112, 114-130, 130-156, 156-170 */ |
215 | KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 5u); |
216 | for (i = 0; i < 5; i++) { |
217 | r = __nth_region_of(t, idx: i); |
218 | KUNIT_EXPECT_EQ(test, r->ar.start, saddrs[i]); |
219 | KUNIT_EXPECT_EQ(test, r->ar.end, eaddrs[i]); |
220 | } |
221 | damon_free_target(t); |
222 | } |
223 | |
224 | static void damon_test_split_regions_of(struct kunit *test) |
225 | { |
226 | struct damon_ctx *c = damon_new_ctx(); |
227 | struct damon_target *t; |
228 | struct damon_region *r; |
229 | |
230 | t = damon_new_target(); |
231 | r = damon_new_region(start: 0, end: 22); |
232 | damon_add_region(r, t); |
233 | damon_split_regions_of(t, nr_subs: 2); |
234 | KUNIT_EXPECT_LE(test, damon_nr_regions(t), 2u); |
235 | damon_free_target(t); |
236 | |
237 | t = damon_new_target(); |
238 | r = damon_new_region(start: 0, end: 220); |
239 | damon_add_region(r, t); |
240 | damon_split_regions_of(t, nr_subs: 4); |
241 | KUNIT_EXPECT_LE(test, damon_nr_regions(t), 4u); |
242 | damon_free_target(t); |
243 | damon_destroy_ctx(ctx: c); |
244 | } |
245 | |
246 | static void damon_test_ops_registration(struct kunit *test) |
247 | { |
248 | struct damon_ctx *c = damon_new_ctx(); |
249 | struct damon_operations ops, bak; |
250 | |
251 | /* DAMON_OPS_{V,P}ADDR are registered on subsys_initcall */ |
252 | KUNIT_EXPECT_EQ(test, damon_select_ops(c, DAMON_OPS_VADDR), 0); |
253 | KUNIT_EXPECT_EQ(test, damon_select_ops(c, DAMON_OPS_PADDR), 0); |
254 | |
255 | /* Double-registration is prohibited */ |
256 | ops.id = DAMON_OPS_VADDR; |
257 | KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL); |
258 | ops.id = DAMON_OPS_PADDR; |
259 | KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL); |
260 | |
261 | /* Unknown ops id cannot be registered */ |
262 | KUNIT_EXPECT_EQ(test, damon_select_ops(c, NR_DAMON_OPS), -EINVAL); |
263 | |
264 | /* Registration should success after unregistration */ |
265 | mutex_lock(&damon_ops_lock); |
266 | bak = damon_registered_ops[DAMON_OPS_VADDR]; |
267 | damon_registered_ops[DAMON_OPS_VADDR] = (struct damon_operations){}; |
268 | mutex_unlock(lock: &damon_ops_lock); |
269 | |
270 | ops.id = DAMON_OPS_VADDR; |
271 | KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), 0); |
272 | |
273 | mutex_lock(&damon_ops_lock); |
274 | damon_registered_ops[DAMON_OPS_VADDR] = bak; |
275 | mutex_unlock(lock: &damon_ops_lock); |
276 | |
277 | /* Check double-registration failure again */ |
278 | KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL); |
279 | |
280 | damon_destroy_ctx(ctx: c); |
281 | } |
282 | |
283 | static void damon_test_set_regions(struct kunit *test) |
284 | { |
285 | struct damon_target *t = damon_new_target(); |
286 | struct damon_region *r1 = damon_new_region(start: 4, end: 16); |
287 | struct damon_region *r2 = damon_new_region(start: 24, end: 32); |
288 | struct damon_addr_range range = {.start = 8, .end = 28}; |
289 | unsigned long expects[] = {8, 16, 16, 24, 24, 28}; |
290 | int expect_idx = 0; |
291 | struct damon_region *r; |
292 | |
293 | damon_add_region(r: r1, t); |
294 | damon_add_region(r: r2, t); |
295 | damon_set_regions(t, ranges: &range, nr_ranges: 1); |
296 | |
297 | KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 3); |
298 | damon_for_each_region(r, t) { |
299 | KUNIT_EXPECT_EQ(test, r->ar.start, expects[expect_idx++]); |
300 | KUNIT_EXPECT_EQ(test, r->ar.end, expects[expect_idx++]); |
301 | } |
302 | damon_destroy_target(t); |
303 | } |
304 | |
305 | static void damon_test_nr_accesses_to_accesses_bp(struct kunit *test) |
306 | { |
307 | struct damon_attrs attrs = { |
308 | .sample_interval = 10, |
309 | .aggr_interval = ((unsigned long)UINT_MAX + 1) * 10 |
310 | }; |
311 | |
312 | KUNIT_EXPECT_EQ(test, damon_nr_accesses_to_accesses_bp(123, &attrs), 0); |
313 | } |
314 | |
315 | static void damon_test_update_monitoring_result(struct kunit *test) |
316 | { |
317 | struct damon_attrs old_attrs = { |
318 | .sample_interval = 10, .aggr_interval = 1000,}; |
319 | struct damon_attrs new_attrs; |
320 | struct damon_region *r = damon_new_region(start: 3, end: 7); |
321 | |
322 | r->nr_accesses = 15; |
323 | r->nr_accesses_bp = 150000; |
324 | r->age = 20; |
325 | |
326 | new_attrs = (struct damon_attrs){ |
327 | .sample_interval = 100, .aggr_interval = 10000,}; |
328 | damon_update_monitoring_result(r, old_attrs: &old_attrs, new_attrs: &new_attrs); |
329 | KUNIT_EXPECT_EQ(test, r->nr_accesses, 15); |
330 | KUNIT_EXPECT_EQ(test, r->age, 2); |
331 | |
332 | new_attrs = (struct damon_attrs){ |
333 | .sample_interval = 1, .aggr_interval = 1000}; |
334 | damon_update_monitoring_result(r, old_attrs: &old_attrs, new_attrs: &new_attrs); |
335 | KUNIT_EXPECT_EQ(test, r->nr_accesses, 150); |
336 | KUNIT_EXPECT_EQ(test, r->age, 2); |
337 | |
338 | new_attrs = (struct damon_attrs){ |
339 | .sample_interval = 1, .aggr_interval = 100}; |
340 | damon_update_monitoring_result(r, old_attrs: &old_attrs, new_attrs: &new_attrs); |
341 | KUNIT_EXPECT_EQ(test, r->nr_accesses, 150); |
342 | KUNIT_EXPECT_EQ(test, r->age, 20); |
343 | |
344 | damon_free_region(r); |
345 | } |
346 | |
347 | static void damon_test_set_attrs(struct kunit *test) |
348 | { |
349 | struct damon_ctx *c = damon_new_ctx(); |
350 | struct damon_attrs valid_attrs = { |
351 | .min_nr_regions = 10, .max_nr_regions = 1000, |
352 | .sample_interval = 5000, .aggr_interval = 100000,}; |
353 | struct damon_attrs invalid_attrs; |
354 | |
355 | KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &valid_attrs), 0); |
356 | |
357 | invalid_attrs = valid_attrs; |
358 | invalid_attrs.min_nr_regions = 1; |
359 | KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL); |
360 | |
361 | invalid_attrs = valid_attrs; |
362 | invalid_attrs.max_nr_regions = 9; |
363 | KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL); |
364 | |
365 | invalid_attrs = valid_attrs; |
366 | invalid_attrs.aggr_interval = 4999; |
367 | KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL); |
368 | |
369 | damon_destroy_ctx(ctx: c); |
370 | } |
371 | |
372 | static void damon_test_moving_sum(struct kunit *test) |
373 | { |
374 | unsigned int mvsum = 50000, nomvsum = 50000, len_window = 10; |
375 | unsigned int new_values[] = {10000, 0, 10000, 0, 0, 0, 10000, 0, 0, 0}; |
376 | unsigned int expects[] = {55000, 50000, 55000, 50000, 45000, 40000, |
377 | 45000, 40000, 35000, 30000}; |
378 | int i; |
379 | |
380 | for (i = 0; i < ARRAY_SIZE(new_values); i++) { |
381 | mvsum = damon_moving_sum(mvsum, nomvsum, len_window, |
382 | new_value: new_values[i]); |
383 | KUNIT_EXPECT_EQ(test, mvsum, expects[i]); |
384 | } |
385 | } |
386 | |
387 | static void damos_test_new_filter(struct kunit *test) |
388 | { |
389 | struct damos_filter *filter; |
390 | |
391 | filter = damos_new_filter(type: DAMOS_FILTER_TYPE_ANON, matching: true); |
392 | KUNIT_EXPECT_EQ(test, filter->type, DAMOS_FILTER_TYPE_ANON); |
393 | KUNIT_EXPECT_EQ(test, filter->matching, true); |
394 | KUNIT_EXPECT_PTR_EQ(test, filter->list.prev, &filter->list); |
395 | KUNIT_EXPECT_PTR_EQ(test, filter->list.next, &filter->list); |
396 | damos_destroy_filter(f: filter); |
397 | } |
398 | |
399 | static void damos_test_filter_out(struct kunit *test) |
400 | { |
401 | struct damon_target *t; |
402 | struct damon_region *r, *r2; |
403 | struct damos_filter *f; |
404 | |
405 | f = damos_new_filter(type: DAMOS_FILTER_TYPE_ADDR, matching: true); |
406 | f->addr_range = (struct damon_addr_range){ |
407 | .start = DAMON_MIN_REGION * 2, .end = DAMON_MIN_REGION * 6}; |
408 | |
409 | t = damon_new_target(); |
410 | r = damon_new_region(DAMON_MIN_REGION * 3, DAMON_MIN_REGION * 5); |
411 | damon_add_region(r, t); |
412 | |
413 | /* region in the range */ |
414 | KUNIT_EXPECT_TRUE(test, __damos_filter_out(NULL, t, r, f)); |
415 | KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1); |
416 | |
417 | /* region before the range */ |
418 | r->ar.start = DAMON_MIN_REGION * 1; |
419 | r->ar.end = DAMON_MIN_REGION * 2; |
420 | KUNIT_EXPECT_FALSE(test, __damos_filter_out(NULL, t, r, f)); |
421 | KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1); |
422 | |
423 | /* region after the range */ |
424 | r->ar.start = DAMON_MIN_REGION * 6; |
425 | r->ar.end = DAMON_MIN_REGION * 8; |
426 | KUNIT_EXPECT_FALSE(test, __damos_filter_out(NULL, t, r, f)); |
427 | KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1); |
428 | |
429 | /* region started before the range */ |
430 | r->ar.start = DAMON_MIN_REGION * 1; |
431 | r->ar.end = DAMON_MIN_REGION * 4; |
432 | KUNIT_EXPECT_FALSE(test, __damos_filter_out(NULL, t, r, f)); |
433 | /* filter should have split the region */ |
434 | KUNIT_EXPECT_EQ(test, r->ar.start, DAMON_MIN_REGION * 1); |
435 | KUNIT_EXPECT_EQ(test, r->ar.end, DAMON_MIN_REGION * 2); |
436 | KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 2); |
437 | r2 = damon_next_region(r); |
438 | KUNIT_EXPECT_EQ(test, r2->ar.start, DAMON_MIN_REGION * 2); |
439 | KUNIT_EXPECT_EQ(test, r2->ar.end, DAMON_MIN_REGION * 4); |
440 | damon_destroy_region(r: r2, t); |
441 | |
442 | /* region started in the range */ |
443 | r->ar.start = DAMON_MIN_REGION * 2; |
444 | r->ar.end = DAMON_MIN_REGION * 8; |
445 | KUNIT_EXPECT_TRUE(test, __damos_filter_out(NULL, t, r, f)); |
446 | /* filter should have split the region */ |
447 | KUNIT_EXPECT_EQ(test, r->ar.start, DAMON_MIN_REGION * 2); |
448 | KUNIT_EXPECT_EQ(test, r->ar.end, DAMON_MIN_REGION * 6); |
449 | KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 2); |
450 | r2 = damon_next_region(r); |
451 | KUNIT_EXPECT_EQ(test, r2->ar.start, DAMON_MIN_REGION * 6); |
452 | KUNIT_EXPECT_EQ(test, r2->ar.end, DAMON_MIN_REGION * 8); |
453 | damon_destroy_region(r: r2, t); |
454 | |
455 | damon_free_target(t); |
456 | damos_free_filter(f); |
457 | } |
458 | |
459 | static void damon_test_feed_loop_next_input(struct kunit *test) |
460 | { |
461 | unsigned long last_input = 900000, current_score = 200; |
462 | |
463 | /* |
464 | * If current score is lower than the goal, which is always 10,000 |
465 | * (read the comment on damon_feed_loop_next_input()'s comment), next |
466 | * input should be higher than the last input. |
467 | */ |
468 | KUNIT_EXPECT_GT(test, |
469 | damon_feed_loop_next_input(last_input, current_score), |
470 | last_input); |
471 | |
472 | /* |
473 | * If current score is higher than the goal, next input should be lower |
474 | * than the last input. |
475 | */ |
476 | current_score = 250000000; |
477 | KUNIT_EXPECT_LT(test, |
478 | damon_feed_loop_next_input(last_input, current_score), |
479 | last_input); |
480 | |
481 | /* |
482 | * The next input depends on the distance between the current score and |
483 | * the goal |
484 | */ |
485 | KUNIT_EXPECT_GT(test, |
486 | damon_feed_loop_next_input(last_input, 200), |
487 | damon_feed_loop_next_input(last_input, 2000)); |
488 | } |
489 | |
490 | static struct kunit_case damon_test_cases[] = { |
491 | KUNIT_CASE(damon_test_target), |
492 | KUNIT_CASE(damon_test_regions), |
493 | KUNIT_CASE(damon_test_aggregate), |
494 | KUNIT_CASE(damon_test_split_at), |
495 | KUNIT_CASE(damon_test_merge_two), |
496 | KUNIT_CASE(damon_test_merge_regions_of), |
497 | KUNIT_CASE(damon_test_split_regions_of), |
498 | KUNIT_CASE(damon_test_ops_registration), |
499 | KUNIT_CASE(damon_test_set_regions), |
500 | KUNIT_CASE(damon_test_nr_accesses_to_accesses_bp), |
501 | KUNIT_CASE(damon_test_update_monitoring_result), |
502 | KUNIT_CASE(damon_test_set_attrs), |
503 | KUNIT_CASE(damon_test_moving_sum), |
504 | KUNIT_CASE(damos_test_new_filter), |
505 | KUNIT_CASE(damos_test_filter_out), |
506 | KUNIT_CASE(damon_test_feed_loop_next_input), |
507 | {}, |
508 | }; |
509 | |
510 | static struct kunit_suite damon_test_suite = { |
511 | .name = "damon" , |
512 | .test_cases = damon_test_cases, |
513 | }; |
514 | kunit_test_suite(damon_test_suite); |
515 | |
516 | #endif /* _DAMON_CORE_TEST_H */ |
517 | |
518 | #endif /* CONFIG_DAMON_KUNIT_TEST */ |
519 | |