1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * DAMON Primitives for Virtual Address Spaces |
4 | * |
5 | * Author: SeongJae Park <sj@kernel.org> |
6 | */ |
7 | |
8 | #define pr_fmt(fmt) "damon-va: " fmt |
9 | |
10 | #include <linux/highmem.h> |
11 | #include <linux/hugetlb.h> |
12 | #include <linux/mman.h> |
13 | #include <linux/mmu_notifier.h> |
14 | #include <linux/page_idle.h> |
15 | #include <linux/pagewalk.h> |
16 | #include <linux/sched/mm.h> |
17 | |
18 | #include "ops-common.h" |
19 | |
20 | #ifdef CONFIG_DAMON_VADDR_KUNIT_TEST |
21 | #undef DAMON_MIN_REGION |
22 | #define DAMON_MIN_REGION 1 |
23 | #endif |
24 | |
25 | /* |
26 | * 't->pid' should be the pointer to the relevant 'struct pid' having reference |
27 | * count. Caller must put the returned task, unless it is NULL. |
28 | */ |
29 | static inline struct task_struct *damon_get_task_struct(struct damon_target *t) |
30 | { |
31 | return get_pid_task(pid: t->pid, PIDTYPE_PID); |
32 | } |
33 | |
34 | /* |
35 | * Get the mm_struct of the given target |
36 | * |
37 | * Caller _must_ put the mm_struct after use, unless it is NULL. |
38 | * |
39 | * Returns the mm_struct of the target on success, NULL on failure |
40 | */ |
41 | static struct mm_struct *damon_get_mm(struct damon_target *t) |
42 | { |
43 | struct task_struct *task; |
44 | struct mm_struct *mm; |
45 | |
46 | task = damon_get_task_struct(t); |
47 | if (!task) |
48 | return NULL; |
49 | |
50 | mm = get_task_mm(task); |
51 | put_task_struct(t: task); |
52 | return mm; |
53 | } |
54 | |
55 | /* |
56 | * Functions for the initial monitoring target regions construction |
57 | */ |
58 | |
59 | /* |
60 | * Size-evenly split a region into 'nr_pieces' small regions |
61 | * |
62 | * Returns 0 on success, or negative error code otherwise. |
63 | */ |
64 | static int damon_va_evenly_split_region(struct damon_target *t, |
65 | struct damon_region *r, unsigned int nr_pieces) |
66 | { |
67 | unsigned long sz_orig, sz_piece, orig_end; |
68 | struct damon_region *n = NULL, *next; |
69 | unsigned long start; |
70 | |
71 | if (!r || !nr_pieces) |
72 | return -EINVAL; |
73 | |
74 | orig_end = r->ar.end; |
75 | sz_orig = damon_sz_region(r); |
76 | sz_piece = ALIGN_DOWN(sz_orig / nr_pieces, DAMON_MIN_REGION); |
77 | |
78 | if (!sz_piece) |
79 | return -EINVAL; |
80 | |
81 | r->ar.end = r->ar.start + sz_piece; |
82 | next = damon_next_region(r); |
83 | for (start = r->ar.end; start + sz_piece <= orig_end; |
84 | start += sz_piece) { |
85 | n = damon_new_region(start, end: start + sz_piece); |
86 | if (!n) |
87 | return -ENOMEM; |
88 | damon_insert_region(r: n, prev: r, next, t); |
89 | r = n; |
90 | } |
91 | /* complement last region for possible rounding error */ |
92 | if (n) |
93 | n->ar.end = orig_end; |
94 | |
95 | return 0; |
96 | } |
97 | |
98 | static unsigned long sz_range(struct damon_addr_range *r) |
99 | { |
100 | return r->end - r->start; |
101 | } |
102 | |
103 | /* |
104 | * Find three regions separated by two biggest unmapped regions |
105 | * |
106 | * vma the head vma of the target address space |
107 | * regions an array of three address ranges that results will be saved |
108 | * |
109 | * This function receives an address space and finds three regions in it which |
110 | * separated by the two biggest unmapped regions in the space. Please refer to |
111 | * below comments of '__damon_va_init_regions()' function to know why this is |
112 | * necessary. |
113 | * |
114 | * Returns 0 if success, or negative error code otherwise. |
115 | */ |
116 | static int __damon_va_three_regions(struct mm_struct *mm, |
117 | struct damon_addr_range regions[3]) |
118 | { |
119 | struct damon_addr_range first_gap = {0}, second_gap = {0}; |
120 | VMA_ITERATOR(vmi, mm, 0); |
121 | struct vm_area_struct *vma, *prev = NULL; |
122 | unsigned long start; |
123 | |
124 | /* |
125 | * Find the two biggest gaps so that first_gap > second_gap > others. |
126 | * If this is too slow, it can be optimised to examine the maple |
127 | * tree gaps. |
128 | */ |
129 | for_each_vma(vmi, vma) { |
130 | unsigned long gap; |
131 | |
132 | if (!prev) { |
133 | start = vma->vm_start; |
134 | goto next; |
135 | } |
136 | gap = vma->vm_start - prev->vm_end; |
137 | |
138 | if (gap > sz_range(r: &first_gap)) { |
139 | second_gap = first_gap; |
140 | first_gap.start = prev->vm_end; |
141 | first_gap.end = vma->vm_start; |
142 | } else if (gap > sz_range(r: &second_gap)) { |
143 | second_gap.start = prev->vm_end; |
144 | second_gap.end = vma->vm_start; |
145 | } |
146 | next: |
147 | prev = vma; |
148 | } |
149 | |
150 | if (!sz_range(r: &second_gap) || !sz_range(r: &first_gap)) |
151 | return -EINVAL; |
152 | |
153 | /* Sort the two biggest gaps by address */ |
154 | if (first_gap.start > second_gap.start) |
155 | swap(first_gap, second_gap); |
156 | |
157 | /* Store the result */ |
158 | regions[0].start = ALIGN(start, DAMON_MIN_REGION); |
159 | regions[0].end = ALIGN(first_gap.start, DAMON_MIN_REGION); |
160 | regions[1].start = ALIGN(first_gap.end, DAMON_MIN_REGION); |
161 | regions[1].end = ALIGN(second_gap.start, DAMON_MIN_REGION); |
162 | regions[2].start = ALIGN(second_gap.end, DAMON_MIN_REGION); |
163 | regions[2].end = ALIGN(prev->vm_end, DAMON_MIN_REGION); |
164 | |
165 | return 0; |
166 | } |
167 | |
168 | /* |
169 | * Get the three regions in the given target (task) |
170 | * |
171 | * Returns 0 on success, negative error code otherwise. |
172 | */ |
173 | static int damon_va_three_regions(struct damon_target *t, |
174 | struct damon_addr_range regions[3]) |
175 | { |
176 | struct mm_struct *mm; |
177 | int rc; |
178 | |
179 | mm = damon_get_mm(t); |
180 | if (!mm) |
181 | return -EINVAL; |
182 | |
183 | mmap_read_lock(mm); |
184 | rc = __damon_va_three_regions(mm, regions); |
185 | mmap_read_unlock(mm); |
186 | |
187 | mmput(mm); |
188 | return rc; |
189 | } |
190 | |
191 | /* |
192 | * Initialize the monitoring target regions for the given target (task) |
193 | * |
194 | * t the given target |
195 | * |
196 | * Because only a number of small portions of the entire address space |
197 | * is actually mapped to the memory and accessed, monitoring the unmapped |
198 | * regions is wasteful. That said, because we can deal with small noises, |
199 | * tracking every mapping is not strictly required but could even incur a high |
200 | * overhead if the mapping frequently changes or the number of mappings is |
201 | * high. The adaptive regions adjustment mechanism will further help to deal |
202 | * with the noise by simply identifying the unmapped areas as a region that |
203 | * has no access. Moreover, applying the real mappings that would have many |
204 | * unmapped areas inside will make the adaptive mechanism quite complex. That |
205 | * said, too huge unmapped areas inside the monitoring target should be removed |
206 | * to not take the time for the adaptive mechanism. |
207 | * |
208 | * For the reason, we convert the complex mappings to three distinct regions |
209 | * that cover every mapped area of the address space. Also the two gaps |
210 | * between the three regions are the two biggest unmapped areas in the given |
211 | * address space. In detail, this function first identifies the start and the |
212 | * end of the mappings and the two biggest unmapped areas of the address space. |
213 | * Then, it constructs the three regions as below: |
214 | * |
215 | * [mappings[0]->start, big_two_unmapped_areas[0]->start) |
216 | * [big_two_unmapped_areas[0]->end, big_two_unmapped_areas[1]->start) |
217 | * [big_two_unmapped_areas[1]->end, mappings[nr_mappings - 1]->end) |
218 | * |
219 | * As usual memory map of processes is as below, the gap between the heap and |
220 | * the uppermost mmap()-ed region, and the gap between the lowermost mmap()-ed |
221 | * region and the stack will be two biggest unmapped regions. Because these |
222 | * gaps are exceptionally huge areas in usual address space, excluding these |
223 | * two biggest unmapped regions will be sufficient to make a trade-off. |
224 | * |
225 | * <heap> |
226 | * <BIG UNMAPPED REGION 1> |
227 | * <uppermost mmap()-ed region> |
228 | * (other mmap()-ed regions and small unmapped regions) |
229 | * <lowermost mmap()-ed region> |
230 | * <BIG UNMAPPED REGION 2> |
231 | * <stack> |
232 | */ |
233 | static void __damon_va_init_regions(struct damon_ctx *ctx, |
234 | struct damon_target *t) |
235 | { |
236 | struct damon_target *ti; |
237 | struct damon_region *r; |
238 | struct damon_addr_range regions[3]; |
239 | unsigned long sz = 0, nr_pieces; |
240 | int i, tidx = 0; |
241 | |
242 | if (damon_va_three_regions(t, regions)) { |
243 | damon_for_each_target(ti, ctx) { |
244 | if (ti == t) |
245 | break; |
246 | tidx++; |
247 | } |
248 | pr_debug("Failed to get three regions of %dth target\n" , tidx); |
249 | return; |
250 | } |
251 | |
252 | for (i = 0; i < 3; i++) |
253 | sz += regions[i].end - regions[i].start; |
254 | if (ctx->attrs.min_nr_regions) |
255 | sz /= ctx->attrs.min_nr_regions; |
256 | if (sz < DAMON_MIN_REGION) |
257 | sz = DAMON_MIN_REGION; |
258 | |
259 | /* Set the initial three regions of the target */ |
260 | for (i = 0; i < 3; i++) { |
261 | r = damon_new_region(start: regions[i].start, end: regions[i].end); |
262 | if (!r) { |
263 | pr_err("%d'th init region creation failed\n" , i); |
264 | return; |
265 | } |
266 | damon_add_region(r, t); |
267 | |
268 | nr_pieces = (regions[i].end - regions[i].start) / sz; |
269 | damon_va_evenly_split_region(t, r, nr_pieces); |
270 | } |
271 | } |
272 | |
273 | /* Initialize '->regions_list' of every target (task) */ |
274 | static void damon_va_init(struct damon_ctx *ctx) |
275 | { |
276 | struct damon_target *t; |
277 | |
278 | damon_for_each_target(t, ctx) { |
279 | /* the user may set the target regions as they want */ |
280 | if (!damon_nr_regions(t)) |
281 | __damon_va_init_regions(ctx, t); |
282 | } |
283 | } |
284 | |
285 | /* |
286 | * Update regions for current memory mappings |
287 | */ |
288 | static void damon_va_update(struct damon_ctx *ctx) |
289 | { |
290 | struct damon_addr_range three_regions[3]; |
291 | struct damon_target *t; |
292 | |
293 | damon_for_each_target(t, ctx) { |
294 | if (damon_va_three_regions(t, regions: three_regions)) |
295 | continue; |
296 | damon_set_regions(t, ranges: three_regions, nr_ranges: 3); |
297 | } |
298 | } |
299 | |
300 | static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr, |
301 | unsigned long next, struct mm_walk *walk) |
302 | { |
303 | pte_t *pte; |
304 | pmd_t pmde; |
305 | spinlock_t *ptl; |
306 | |
307 | if (pmd_trans_huge(pmd: pmdp_get(pmdp: pmd))) { |
308 | ptl = pmd_lock(mm: walk->mm, pmd); |
309 | pmde = pmdp_get(pmdp: pmd); |
310 | |
311 | if (!pmd_present(pmd: pmde)) { |
312 | spin_unlock(lock: ptl); |
313 | return 0; |
314 | } |
315 | |
316 | if (pmd_trans_huge(pmd: pmde)) { |
317 | damon_pmdp_mkold(pmd, vma: walk->vma, addr); |
318 | spin_unlock(lock: ptl); |
319 | return 0; |
320 | } |
321 | spin_unlock(lock: ptl); |
322 | } |
323 | |
324 | pte = pte_offset_map_lock(mm: walk->mm, pmd, addr, ptlp: &ptl); |
325 | if (!pte) { |
326 | walk->action = ACTION_AGAIN; |
327 | return 0; |
328 | } |
329 | if (!pte_present(a: ptep_get(ptep: pte))) |
330 | goto out; |
331 | damon_ptep_mkold(pte, vma: walk->vma, addr); |
332 | out: |
333 | pte_unmap_unlock(pte, ptl); |
334 | return 0; |
335 | } |
336 | |
337 | #ifdef CONFIG_HUGETLB_PAGE |
338 | static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm, |
339 | struct vm_area_struct *vma, unsigned long addr) |
340 | { |
341 | bool referenced = false; |
342 | pte_t entry = huge_ptep_get(ptep: pte); |
343 | struct folio *folio = pfn_folio(pfn: pte_pfn(pte: entry)); |
344 | unsigned long psize = huge_page_size(h: hstate_vma(vma)); |
345 | |
346 | folio_get(folio); |
347 | |
348 | if (pte_young(pte: entry)) { |
349 | referenced = true; |
350 | entry = pte_mkold(pte: entry); |
351 | set_huge_pte_at(mm, addr, ptep: pte, pte: entry, sz: psize); |
352 | } |
353 | |
354 | #ifdef CONFIG_MMU_NOTIFIER |
355 | if (mmu_notifier_clear_young(mm, start: addr, |
356 | end: addr + huge_page_size(h: hstate_vma(vma)))) |
357 | referenced = true; |
358 | #endif /* CONFIG_MMU_NOTIFIER */ |
359 | |
360 | if (referenced) |
361 | folio_set_young(folio); |
362 | |
363 | folio_set_idle(folio); |
364 | folio_put(folio); |
365 | } |
366 | |
367 | static int damon_mkold_hugetlb_entry(pte_t *pte, unsigned long hmask, |
368 | unsigned long addr, unsigned long end, |
369 | struct mm_walk *walk) |
370 | { |
371 | struct hstate *h = hstate_vma(vma: walk->vma); |
372 | spinlock_t *ptl; |
373 | pte_t entry; |
374 | |
375 | ptl = huge_pte_lock(h, mm: walk->mm, pte); |
376 | entry = huge_ptep_get(ptep: pte); |
377 | if (!pte_present(a: entry)) |
378 | goto out; |
379 | |
380 | damon_hugetlb_mkold(pte, mm: walk->mm, vma: walk->vma, addr); |
381 | |
382 | out: |
383 | spin_unlock(lock: ptl); |
384 | return 0; |
385 | } |
386 | #else |
387 | #define damon_mkold_hugetlb_entry NULL |
388 | #endif /* CONFIG_HUGETLB_PAGE */ |
389 | |
390 | static const struct mm_walk_ops damon_mkold_ops = { |
391 | .pmd_entry = damon_mkold_pmd_entry, |
392 | .hugetlb_entry = damon_mkold_hugetlb_entry, |
393 | .walk_lock = PGWALK_RDLOCK, |
394 | }; |
395 | |
396 | static void damon_va_mkold(struct mm_struct *mm, unsigned long addr) |
397 | { |
398 | mmap_read_lock(mm); |
399 | walk_page_range(mm, start: addr, end: addr + 1, ops: &damon_mkold_ops, NULL); |
400 | mmap_read_unlock(mm); |
401 | } |
402 | |
403 | /* |
404 | * Functions for the access checking of the regions |
405 | */ |
406 | |
407 | static void __damon_va_prepare_access_check(struct mm_struct *mm, |
408 | struct damon_region *r) |
409 | { |
410 | r->sampling_addr = damon_rand(l: r->ar.start, r: r->ar.end); |
411 | |
412 | damon_va_mkold(mm, addr: r->sampling_addr); |
413 | } |
414 | |
415 | static void damon_va_prepare_access_checks(struct damon_ctx *ctx) |
416 | { |
417 | struct damon_target *t; |
418 | struct mm_struct *mm; |
419 | struct damon_region *r; |
420 | |
421 | damon_for_each_target(t, ctx) { |
422 | mm = damon_get_mm(t); |
423 | if (!mm) |
424 | continue; |
425 | damon_for_each_region(r, t) |
426 | __damon_va_prepare_access_check(mm, r); |
427 | mmput(mm); |
428 | } |
429 | } |
430 | |
431 | struct damon_young_walk_private { |
432 | /* size of the folio for the access checked virtual memory address */ |
433 | unsigned long *folio_sz; |
434 | bool young; |
435 | }; |
436 | |
437 | static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr, |
438 | unsigned long next, struct mm_walk *walk) |
439 | { |
440 | pte_t *pte; |
441 | pte_t ptent; |
442 | spinlock_t *ptl; |
443 | struct folio *folio; |
444 | struct damon_young_walk_private *priv = walk->private; |
445 | |
446 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
447 | if (pmd_trans_huge(pmd: pmdp_get(pmdp: pmd))) { |
448 | pmd_t pmde; |
449 | |
450 | ptl = pmd_lock(mm: walk->mm, pmd); |
451 | pmde = pmdp_get(pmdp: pmd); |
452 | |
453 | if (!pmd_present(pmd: pmde)) { |
454 | spin_unlock(lock: ptl); |
455 | return 0; |
456 | } |
457 | |
458 | if (!pmd_trans_huge(pmd: pmde)) { |
459 | spin_unlock(lock: ptl); |
460 | goto regular_page; |
461 | } |
462 | folio = damon_get_folio(pfn: pmd_pfn(pmd: pmde)); |
463 | if (!folio) |
464 | goto huge_out; |
465 | if (pmd_young(pmd: pmde) || !folio_test_idle(folio) || |
466 | mmu_notifier_test_young(mm: walk->mm, |
467 | address: addr)) |
468 | priv->young = true; |
469 | *priv->folio_sz = HPAGE_PMD_SIZE; |
470 | folio_put(folio); |
471 | huge_out: |
472 | spin_unlock(lock: ptl); |
473 | return 0; |
474 | } |
475 | |
476 | regular_page: |
477 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
478 | |
479 | pte = pte_offset_map_lock(mm: walk->mm, pmd, addr, ptlp: &ptl); |
480 | if (!pte) { |
481 | walk->action = ACTION_AGAIN; |
482 | return 0; |
483 | } |
484 | ptent = ptep_get(ptep: pte); |
485 | if (!pte_present(a: ptent)) |
486 | goto out; |
487 | folio = damon_get_folio(pfn: pte_pfn(pte: ptent)); |
488 | if (!folio) |
489 | goto out; |
490 | if (pte_young(pte: ptent) || !folio_test_idle(folio) || |
491 | mmu_notifier_test_young(mm: walk->mm, address: addr)) |
492 | priv->young = true; |
493 | *priv->folio_sz = folio_size(folio); |
494 | folio_put(folio); |
495 | out: |
496 | pte_unmap_unlock(pte, ptl); |
497 | return 0; |
498 | } |
499 | |
500 | #ifdef CONFIG_HUGETLB_PAGE |
501 | static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask, |
502 | unsigned long addr, unsigned long end, |
503 | struct mm_walk *walk) |
504 | { |
505 | struct damon_young_walk_private *priv = walk->private; |
506 | struct hstate *h = hstate_vma(vma: walk->vma); |
507 | struct folio *folio; |
508 | spinlock_t *ptl; |
509 | pte_t entry; |
510 | |
511 | ptl = huge_pte_lock(h, mm: walk->mm, pte); |
512 | entry = huge_ptep_get(ptep: pte); |
513 | if (!pte_present(a: entry)) |
514 | goto out; |
515 | |
516 | folio = pfn_folio(pfn: pte_pfn(pte: entry)); |
517 | folio_get(folio); |
518 | |
519 | if (pte_young(pte: entry) || !folio_test_idle(folio) || |
520 | mmu_notifier_test_young(mm: walk->mm, address: addr)) |
521 | priv->young = true; |
522 | *priv->folio_sz = huge_page_size(h); |
523 | |
524 | folio_put(folio); |
525 | |
526 | out: |
527 | spin_unlock(lock: ptl); |
528 | return 0; |
529 | } |
530 | #else |
531 | #define damon_young_hugetlb_entry NULL |
532 | #endif /* CONFIG_HUGETLB_PAGE */ |
533 | |
534 | static const struct mm_walk_ops damon_young_ops = { |
535 | .pmd_entry = damon_young_pmd_entry, |
536 | .hugetlb_entry = damon_young_hugetlb_entry, |
537 | .walk_lock = PGWALK_RDLOCK, |
538 | }; |
539 | |
540 | static bool damon_va_young(struct mm_struct *mm, unsigned long addr, |
541 | unsigned long *folio_sz) |
542 | { |
543 | struct damon_young_walk_private arg = { |
544 | .folio_sz = folio_sz, |
545 | .young = false, |
546 | }; |
547 | |
548 | mmap_read_lock(mm); |
549 | walk_page_range(mm, start: addr, end: addr + 1, ops: &damon_young_ops, private: &arg); |
550 | mmap_read_unlock(mm); |
551 | return arg.young; |
552 | } |
553 | |
554 | /* |
555 | * Check whether the region was accessed after the last preparation |
556 | * |
557 | * mm 'mm_struct' for the given virtual address space |
558 | * r the region to be checked |
559 | */ |
560 | static void __damon_va_check_access(struct mm_struct *mm, |
561 | struct damon_region *r, bool same_target, |
562 | struct damon_attrs *attrs) |
563 | { |
564 | static unsigned long last_addr; |
565 | static unsigned long last_folio_sz = PAGE_SIZE; |
566 | static bool last_accessed; |
567 | |
568 | if (!mm) { |
569 | damon_update_region_access_rate(r, accessed: false, attrs); |
570 | return; |
571 | } |
572 | |
573 | /* If the region is in the last checked page, reuse the result */ |
574 | if (same_target && (ALIGN_DOWN(last_addr, last_folio_sz) == |
575 | ALIGN_DOWN(r->sampling_addr, last_folio_sz))) { |
576 | damon_update_region_access_rate(r, accessed: last_accessed, attrs); |
577 | return; |
578 | } |
579 | |
580 | last_accessed = damon_va_young(mm, addr: r->sampling_addr, folio_sz: &last_folio_sz); |
581 | damon_update_region_access_rate(r, accessed: last_accessed, attrs); |
582 | |
583 | last_addr = r->sampling_addr; |
584 | } |
585 | |
586 | static unsigned int damon_va_check_accesses(struct damon_ctx *ctx) |
587 | { |
588 | struct damon_target *t; |
589 | struct mm_struct *mm; |
590 | struct damon_region *r; |
591 | unsigned int max_nr_accesses = 0; |
592 | bool same_target; |
593 | |
594 | damon_for_each_target(t, ctx) { |
595 | mm = damon_get_mm(t); |
596 | same_target = false; |
597 | damon_for_each_region(r, t) { |
598 | __damon_va_check_access(mm, r, same_target, |
599 | attrs: &ctx->attrs); |
600 | max_nr_accesses = max(r->nr_accesses, max_nr_accesses); |
601 | same_target = true; |
602 | } |
603 | if (mm) |
604 | mmput(mm); |
605 | } |
606 | |
607 | return max_nr_accesses; |
608 | } |
609 | |
610 | /* |
611 | * Functions for the target validity check and cleanup |
612 | */ |
613 | |
614 | static bool damon_va_target_valid(struct damon_target *t) |
615 | { |
616 | struct task_struct *task; |
617 | |
618 | task = damon_get_task_struct(t); |
619 | if (task) { |
620 | put_task_struct(t: task); |
621 | return true; |
622 | } |
623 | |
624 | return false; |
625 | } |
626 | |
627 | #ifndef CONFIG_ADVISE_SYSCALLS |
628 | static unsigned long damos_madvise(struct damon_target *target, |
629 | struct damon_region *r, int behavior) |
630 | { |
631 | return 0; |
632 | } |
633 | #else |
634 | static unsigned long damos_madvise(struct damon_target *target, |
635 | struct damon_region *r, int behavior) |
636 | { |
637 | struct mm_struct *mm; |
638 | unsigned long start = PAGE_ALIGN(r->ar.start); |
639 | unsigned long len = PAGE_ALIGN(damon_sz_region(r)); |
640 | unsigned long applied; |
641 | |
642 | mm = damon_get_mm(t: target); |
643 | if (!mm) |
644 | return 0; |
645 | |
646 | applied = do_madvise(mm, start, len_in: len, behavior) ? 0 : len; |
647 | mmput(mm); |
648 | |
649 | return applied; |
650 | } |
651 | #endif /* CONFIG_ADVISE_SYSCALLS */ |
652 | |
653 | static unsigned long damon_va_apply_scheme(struct damon_ctx *ctx, |
654 | struct damon_target *t, struct damon_region *r, |
655 | struct damos *scheme) |
656 | { |
657 | int madv_action; |
658 | |
659 | switch (scheme->action) { |
660 | case DAMOS_WILLNEED: |
661 | madv_action = MADV_WILLNEED; |
662 | break; |
663 | case DAMOS_COLD: |
664 | madv_action = MADV_COLD; |
665 | break; |
666 | case DAMOS_PAGEOUT: |
667 | madv_action = MADV_PAGEOUT; |
668 | break; |
669 | case DAMOS_HUGEPAGE: |
670 | madv_action = MADV_HUGEPAGE; |
671 | break; |
672 | case DAMOS_NOHUGEPAGE: |
673 | madv_action = MADV_NOHUGEPAGE; |
674 | break; |
675 | case DAMOS_STAT: |
676 | return 0; |
677 | default: |
678 | /* |
679 | * DAMOS actions that are not yet supported by 'vaddr'. |
680 | */ |
681 | return 0; |
682 | } |
683 | |
684 | return damos_madvise(target: t, r, behavior: madv_action); |
685 | } |
686 | |
687 | static int damon_va_scheme_score(struct damon_ctx *context, |
688 | struct damon_target *t, struct damon_region *r, |
689 | struct damos *scheme) |
690 | { |
691 | |
692 | switch (scheme->action) { |
693 | case DAMOS_PAGEOUT: |
694 | return damon_cold_score(c: context, r, s: scheme); |
695 | default: |
696 | break; |
697 | } |
698 | |
699 | return DAMOS_MAX_SCORE; |
700 | } |
701 | |
702 | static int __init damon_va_initcall(void) |
703 | { |
704 | struct damon_operations ops = { |
705 | .id = DAMON_OPS_VADDR, |
706 | .init = damon_va_init, |
707 | .update = damon_va_update, |
708 | .prepare_access_checks = damon_va_prepare_access_checks, |
709 | .check_accesses = damon_va_check_accesses, |
710 | .reset_aggregated = NULL, |
711 | .target_valid = damon_va_target_valid, |
712 | .cleanup = NULL, |
713 | .apply_scheme = damon_va_apply_scheme, |
714 | .get_scheme_score = damon_va_scheme_score, |
715 | }; |
716 | /* ops for fixed virtual address ranges */ |
717 | struct damon_operations ops_fvaddr = ops; |
718 | int err; |
719 | |
720 | /* Don't set the monitoring target regions for the entire mapping */ |
721 | ops_fvaddr.id = DAMON_OPS_FVADDR; |
722 | ops_fvaddr.init = NULL; |
723 | ops_fvaddr.update = NULL; |
724 | |
725 | err = damon_register_ops(ops: &ops); |
726 | if (err) |
727 | return err; |
728 | return damon_register_ops(ops: &ops_fvaddr); |
729 | }; |
730 | |
731 | subsys_initcall(damon_va_initcall); |
732 | |
733 | #include "vaddr-test.h" |
734 | |