1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Copyright 2013 Red Hat Inc. |
4 | * |
5 | * Authors: Jérôme Glisse <jglisse@redhat.com> |
6 | */ |
7 | /* |
8 | * Refer to include/linux/hmm.h for information about heterogeneous memory |
9 | * management or HMM for short. |
10 | */ |
11 | #include <linux/pagewalk.h> |
12 | #include <linux/hmm.h> |
13 | #include <linux/init.h> |
14 | #include <linux/rmap.h> |
15 | #include <linux/swap.h> |
16 | #include <linux/slab.h> |
17 | #include <linux/sched.h> |
18 | #include <linux/mmzone.h> |
19 | #include <linux/pagemap.h> |
20 | #include <linux/swapops.h> |
21 | #include <linux/hugetlb.h> |
22 | #include <linux/memremap.h> |
23 | #include <linux/sched/mm.h> |
24 | #include <linux/jump_label.h> |
25 | #include <linux/dma-mapping.h> |
26 | #include <linux/mmu_notifier.h> |
27 | #include <linux/memory_hotplug.h> |
28 | |
29 | #include "internal.h" |
30 | |
31 | struct hmm_vma_walk { |
32 | struct hmm_range *range; |
33 | unsigned long last; |
34 | }; |
35 | |
36 | enum { |
37 | HMM_NEED_FAULT = 1 << 0, |
38 | HMM_NEED_WRITE_FAULT = 1 << 1, |
39 | HMM_NEED_ALL_BITS = HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT, |
40 | }; |
41 | |
42 | static int hmm_pfns_fill(unsigned long addr, unsigned long end, |
43 | struct hmm_range *range, unsigned long cpu_flags) |
44 | { |
45 | unsigned long i = (addr - range->start) >> PAGE_SHIFT; |
46 | |
47 | for (; addr < end; addr += PAGE_SIZE, i++) |
48 | range->hmm_pfns[i] = cpu_flags; |
49 | return 0; |
50 | } |
51 | |
52 | /* |
53 | * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s) |
54 | * @addr: range virtual start address (inclusive) |
55 | * @end: range virtual end address (exclusive) |
56 | * @required_fault: HMM_NEED_* flags |
57 | * @walk: mm_walk structure |
58 | * Return: -EBUSY after page fault, or page fault error |
59 | * |
60 | * This function will be called whenever pmd_none() or pte_none() returns true, |
61 | * or whenever there is no page directory covering the virtual address range. |
62 | */ |
63 | static int hmm_vma_fault(unsigned long addr, unsigned long end, |
64 | unsigned int required_fault, struct mm_walk *walk) |
65 | { |
66 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
67 | struct vm_area_struct *vma = walk->vma; |
68 | unsigned int fault_flags = FAULT_FLAG_REMOTE; |
69 | |
70 | WARN_ON_ONCE(!required_fault); |
71 | hmm_vma_walk->last = addr; |
72 | |
73 | if (required_fault & HMM_NEED_WRITE_FAULT) { |
74 | if (!(vma->vm_flags & VM_WRITE)) |
75 | return -EPERM; |
76 | fault_flags |= FAULT_FLAG_WRITE; |
77 | } |
78 | |
79 | for (; addr < end; addr += PAGE_SIZE) |
80 | if (handle_mm_fault(vma, address: addr, flags: fault_flags, NULL) & |
81 | VM_FAULT_ERROR) |
82 | return -EFAULT; |
83 | return -EBUSY; |
84 | } |
85 | |
86 | static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, |
87 | unsigned long pfn_req_flags, |
88 | unsigned long cpu_flags) |
89 | { |
90 | struct hmm_range *range = hmm_vma_walk->range; |
91 | |
92 | /* |
93 | * So we not only consider the individual per page request we also |
94 | * consider the default flags requested for the range. The API can |
95 | * be used 2 ways. The first one where the HMM user coalesces |
96 | * multiple page faults into one request and sets flags per pfn for |
97 | * those faults. The second one where the HMM user wants to pre- |
98 | * fault a range with specific flags. For the latter one it is a |
99 | * waste to have the user pre-fill the pfn arrays with a default |
100 | * flags value. |
101 | */ |
102 | pfn_req_flags &= range->pfn_flags_mask; |
103 | pfn_req_flags |= range->default_flags; |
104 | |
105 | /* We aren't ask to do anything ... */ |
106 | if (!(pfn_req_flags & HMM_PFN_REQ_FAULT)) |
107 | return 0; |
108 | |
109 | /* Need to write fault ? */ |
110 | if ((pfn_req_flags & HMM_PFN_REQ_WRITE) && |
111 | !(cpu_flags & HMM_PFN_WRITE)) |
112 | return HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT; |
113 | |
114 | /* If CPU page table is not valid then we need to fault */ |
115 | if (!(cpu_flags & HMM_PFN_VALID)) |
116 | return HMM_NEED_FAULT; |
117 | return 0; |
118 | } |
119 | |
120 | static unsigned int |
121 | hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk, |
122 | const unsigned long hmm_pfns[], unsigned long npages, |
123 | unsigned long cpu_flags) |
124 | { |
125 | struct hmm_range *range = hmm_vma_walk->range; |
126 | unsigned int required_fault = 0; |
127 | unsigned long i; |
128 | |
129 | /* |
130 | * If the default flags do not request to fault pages, and the mask does |
131 | * not allow for individual pages to be faulted, then |
132 | * hmm_pte_need_fault() will always return 0. |
133 | */ |
134 | if (!((range->default_flags | range->pfn_flags_mask) & |
135 | HMM_PFN_REQ_FAULT)) |
136 | return 0; |
137 | |
138 | for (i = 0; i < npages; ++i) { |
139 | required_fault |= hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags: hmm_pfns[i], |
140 | cpu_flags); |
141 | if (required_fault == HMM_NEED_ALL_BITS) |
142 | return required_fault; |
143 | } |
144 | return required_fault; |
145 | } |
146 | |
147 | static int hmm_vma_walk_hole(unsigned long addr, unsigned long end, |
148 | __always_unused int depth, struct mm_walk *walk) |
149 | { |
150 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
151 | struct hmm_range *range = hmm_vma_walk->range; |
152 | unsigned int required_fault; |
153 | unsigned long i, npages; |
154 | unsigned long *hmm_pfns; |
155 | |
156 | i = (addr - range->start) >> PAGE_SHIFT; |
157 | npages = (end - addr) >> PAGE_SHIFT; |
158 | hmm_pfns = &range->hmm_pfns[i]; |
159 | required_fault = |
160 | hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags: 0); |
161 | if (!walk->vma) { |
162 | if (required_fault) |
163 | return -EFAULT; |
164 | return hmm_pfns_fill(addr, end, range, cpu_flags: HMM_PFN_ERROR); |
165 | } |
166 | if (required_fault) |
167 | return hmm_vma_fault(addr, end, required_fault, walk); |
168 | return hmm_pfns_fill(addr, end, range, cpu_flags: 0); |
169 | } |
170 | |
171 | static inline unsigned long hmm_pfn_flags_order(unsigned long order) |
172 | { |
173 | return order << HMM_PFN_ORDER_SHIFT; |
174 | } |
175 | |
176 | static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range, |
177 | pmd_t pmd) |
178 | { |
179 | if (pmd_protnone(pmd)) |
180 | return 0; |
181 | return (pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : |
182 | HMM_PFN_VALID) | |
183 | hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT); |
184 | } |
185 | |
186 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
187 | static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, |
188 | unsigned long end, unsigned long hmm_pfns[], |
189 | pmd_t pmd) |
190 | { |
191 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
192 | struct hmm_range *range = hmm_vma_walk->range; |
193 | unsigned long pfn, npages, i; |
194 | unsigned int required_fault; |
195 | unsigned long cpu_flags; |
196 | |
197 | npages = (end - addr) >> PAGE_SHIFT; |
198 | cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); |
199 | required_fault = |
200 | hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags); |
201 | if (required_fault) |
202 | return hmm_vma_fault(addr, end, required_fault, walk); |
203 | |
204 | pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); |
205 | for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) |
206 | hmm_pfns[i] = pfn | cpu_flags; |
207 | return 0; |
208 | } |
209 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ |
210 | /* stub to allow the code below to compile */ |
211 | int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, |
212 | unsigned long end, unsigned long hmm_pfns[], pmd_t pmd); |
213 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
214 | |
215 | static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range, |
216 | pte_t pte) |
217 | { |
218 | if (pte_none(pte) || !pte_present(a: pte) || pte_protnone(pte)) |
219 | return 0; |
220 | return pte_write(pte) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID; |
221 | } |
222 | |
223 | static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, |
224 | unsigned long end, pmd_t *pmdp, pte_t *ptep, |
225 | unsigned long *hmm_pfn) |
226 | { |
227 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
228 | struct hmm_range *range = hmm_vma_walk->range; |
229 | unsigned int required_fault; |
230 | unsigned long cpu_flags; |
231 | pte_t pte = ptep_get(ptep); |
232 | uint64_t pfn_req_flags = *hmm_pfn; |
233 | |
234 | if (pte_none_mostly(pte)) { |
235 | required_fault = |
236 | hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags: 0); |
237 | if (required_fault) |
238 | goto fault; |
239 | *hmm_pfn = 0; |
240 | return 0; |
241 | } |
242 | |
243 | if (!pte_present(a: pte)) { |
244 | swp_entry_t entry = pte_to_swp_entry(pte); |
245 | |
246 | /* |
247 | * Don't fault in device private pages owned by the caller, |
248 | * just report the PFN. |
249 | */ |
250 | if (is_device_private_entry(entry) && |
251 | pfn_swap_entry_to_page(entry)->pgmap->owner == |
252 | range->dev_private_owner) { |
253 | cpu_flags = HMM_PFN_VALID; |
254 | if (is_writable_device_private_entry(entry)) |
255 | cpu_flags |= HMM_PFN_WRITE; |
256 | *hmm_pfn = swp_offset_pfn(entry) | cpu_flags; |
257 | return 0; |
258 | } |
259 | |
260 | required_fault = |
261 | hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags: 0); |
262 | if (!required_fault) { |
263 | *hmm_pfn = 0; |
264 | return 0; |
265 | } |
266 | |
267 | if (!non_swap_entry(entry)) |
268 | goto fault; |
269 | |
270 | if (is_device_private_entry(entry)) |
271 | goto fault; |
272 | |
273 | if (is_device_exclusive_entry(entry)) |
274 | goto fault; |
275 | |
276 | if (is_migration_entry(entry)) { |
277 | pte_unmap(pte: ptep); |
278 | hmm_vma_walk->last = addr; |
279 | migration_entry_wait(mm: walk->mm, pmd: pmdp, address: addr); |
280 | return -EBUSY; |
281 | } |
282 | |
283 | /* Report error for everything else */ |
284 | pte_unmap(pte: ptep); |
285 | return -EFAULT; |
286 | } |
287 | |
288 | cpu_flags = pte_to_hmm_pfn_flags(range, pte); |
289 | required_fault = |
290 | hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags); |
291 | if (required_fault) |
292 | goto fault; |
293 | |
294 | /* |
295 | * Bypass devmap pte such as DAX page when all pfn requested |
296 | * flags(pfn_req_flags) are fulfilled. |
297 | * Since each architecture defines a struct page for the zero page, just |
298 | * fall through and treat it like a normal page. |
299 | */ |
300 | if (!vm_normal_page(vma: walk->vma, addr, pte) && |
301 | !pte_devmap(a: pte) && |
302 | !is_zero_pfn(pfn: pte_pfn(pte))) { |
303 | if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags: 0)) { |
304 | pte_unmap(pte: ptep); |
305 | return -EFAULT; |
306 | } |
307 | *hmm_pfn = HMM_PFN_ERROR; |
308 | return 0; |
309 | } |
310 | |
311 | *hmm_pfn = pte_pfn(pte) | cpu_flags; |
312 | return 0; |
313 | |
314 | fault: |
315 | pte_unmap(pte: ptep); |
316 | /* Fault any virtual address we were asked to fault */ |
317 | return hmm_vma_fault(addr, end, required_fault, walk); |
318 | } |
319 | |
320 | static int hmm_vma_walk_pmd(pmd_t *pmdp, |
321 | unsigned long start, |
322 | unsigned long end, |
323 | struct mm_walk *walk) |
324 | { |
325 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
326 | struct hmm_range *range = hmm_vma_walk->range; |
327 | unsigned long *hmm_pfns = |
328 | &range->hmm_pfns[(start - range->start) >> PAGE_SHIFT]; |
329 | unsigned long npages = (end - start) >> PAGE_SHIFT; |
330 | unsigned long addr = start; |
331 | pte_t *ptep; |
332 | pmd_t pmd; |
333 | |
334 | again: |
335 | pmd = pmdp_get_lockless(pmdp); |
336 | if (pmd_none(pmd)) |
337 | return hmm_vma_walk_hole(addr: start, end, depth: -1, walk); |
338 | |
339 | if (thp_migration_supported() && is_pmd_migration_entry(pmd)) { |
340 | if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags: 0)) { |
341 | hmm_vma_walk->last = addr; |
342 | pmd_migration_entry_wait(mm: walk->mm, pmd: pmdp); |
343 | return -EBUSY; |
344 | } |
345 | return hmm_pfns_fill(addr: start, end, range, cpu_flags: 0); |
346 | } |
347 | |
348 | if (!pmd_present(pmd)) { |
349 | if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags: 0)) |
350 | return -EFAULT; |
351 | return hmm_pfns_fill(addr: start, end, range, cpu_flags: HMM_PFN_ERROR); |
352 | } |
353 | |
354 | if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) { |
355 | /* |
356 | * No need to take pmd_lock here, even if some other thread |
357 | * is splitting the huge pmd we will get that event through |
358 | * mmu_notifier callback. |
359 | * |
360 | * So just read pmd value and check again it's a transparent |
361 | * huge or device mapping one and compute corresponding pfn |
362 | * values. |
363 | */ |
364 | pmd = pmdp_get_lockless(pmdp); |
365 | if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) |
366 | goto again; |
367 | |
368 | return hmm_vma_handle_pmd(walk, addr, end, hmm_pfns, pmd); |
369 | } |
370 | |
371 | /* |
372 | * We have handled all the valid cases above ie either none, migration, |
373 | * huge or transparent huge. At this point either it is a valid pmd |
374 | * entry pointing to pte directory or it is a bad pmd that will not |
375 | * recover. |
376 | */ |
377 | if (pmd_bad(pmd)) { |
378 | if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags: 0)) |
379 | return -EFAULT; |
380 | return hmm_pfns_fill(addr: start, end, range, cpu_flags: HMM_PFN_ERROR); |
381 | } |
382 | |
383 | ptep = pte_offset_map(pmd: pmdp, addr); |
384 | if (!ptep) |
385 | goto again; |
386 | for (; addr < end; addr += PAGE_SIZE, ptep++, hmm_pfns++) { |
387 | int r; |
388 | |
389 | r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, hmm_pfn: hmm_pfns); |
390 | if (r) { |
391 | /* hmm_vma_handle_pte() did pte_unmap() */ |
392 | return r; |
393 | } |
394 | } |
395 | pte_unmap(pte: ptep - 1); |
396 | return 0; |
397 | } |
398 | |
399 | #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \ |
400 | defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) |
401 | static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range, |
402 | pud_t pud) |
403 | { |
404 | if (!pud_present(pud)) |
405 | return 0; |
406 | return (pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : |
407 | HMM_PFN_VALID) | |
408 | hmm_pfn_flags_order(PUD_SHIFT - PAGE_SHIFT); |
409 | } |
410 | |
411 | static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end, |
412 | struct mm_walk *walk) |
413 | { |
414 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
415 | struct hmm_range *range = hmm_vma_walk->range; |
416 | unsigned long addr = start; |
417 | pud_t pud; |
418 | spinlock_t *ptl = pud_trans_huge_lock(pud: pudp, vma: walk->vma); |
419 | |
420 | if (!ptl) |
421 | return 0; |
422 | |
423 | /* Normally we don't want to split the huge page */ |
424 | walk->action = ACTION_CONTINUE; |
425 | |
426 | pud = READ_ONCE(*pudp); |
427 | if (pud_none(pud)) { |
428 | spin_unlock(lock: ptl); |
429 | return hmm_vma_walk_hole(addr: start, end, depth: -1, walk); |
430 | } |
431 | |
432 | if (pud_huge(pud) && pud_devmap(pud)) { |
433 | unsigned long i, npages, pfn; |
434 | unsigned int required_fault; |
435 | unsigned long *hmm_pfns; |
436 | unsigned long cpu_flags; |
437 | |
438 | if (!pud_present(pud)) { |
439 | spin_unlock(lock: ptl); |
440 | return hmm_vma_walk_hole(addr: start, end, depth: -1, walk); |
441 | } |
442 | |
443 | i = (addr - range->start) >> PAGE_SHIFT; |
444 | npages = (end - addr) >> PAGE_SHIFT; |
445 | hmm_pfns = &range->hmm_pfns[i]; |
446 | |
447 | cpu_flags = pud_to_hmm_pfn_flags(range, pud); |
448 | required_fault = hmm_range_need_fault(hmm_vma_walk, hmm_pfns, |
449 | npages, cpu_flags); |
450 | if (required_fault) { |
451 | spin_unlock(lock: ptl); |
452 | return hmm_vma_fault(addr, end, required_fault, walk); |
453 | } |
454 | |
455 | pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); |
456 | for (i = 0; i < npages; ++i, ++pfn) |
457 | hmm_pfns[i] = pfn | cpu_flags; |
458 | goto out_unlock; |
459 | } |
460 | |
461 | /* Ask for the PUD to be split */ |
462 | walk->action = ACTION_SUBTREE; |
463 | |
464 | out_unlock: |
465 | spin_unlock(lock: ptl); |
466 | return 0; |
467 | } |
468 | #else |
469 | #define hmm_vma_walk_pud NULL |
470 | #endif |
471 | |
472 | #ifdef CONFIG_HUGETLB_PAGE |
473 | static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, |
474 | unsigned long start, unsigned long end, |
475 | struct mm_walk *walk) |
476 | { |
477 | unsigned long addr = start, i, pfn; |
478 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
479 | struct hmm_range *range = hmm_vma_walk->range; |
480 | struct vm_area_struct *vma = walk->vma; |
481 | unsigned int required_fault; |
482 | unsigned long pfn_req_flags; |
483 | unsigned long cpu_flags; |
484 | spinlock_t *ptl; |
485 | pte_t entry; |
486 | |
487 | ptl = huge_pte_lock(h: hstate_vma(vma), mm: walk->mm, pte); |
488 | entry = huge_ptep_get(ptep: pte); |
489 | |
490 | i = (start - range->start) >> PAGE_SHIFT; |
491 | pfn_req_flags = range->hmm_pfns[i]; |
492 | cpu_flags = pte_to_hmm_pfn_flags(range, pte: entry) | |
493 | hmm_pfn_flags_order(order: huge_page_order(h: hstate_vma(vma))); |
494 | required_fault = |
495 | hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags); |
496 | if (required_fault) { |
497 | int ret; |
498 | |
499 | spin_unlock(lock: ptl); |
500 | hugetlb_vma_unlock_read(vma); |
501 | /* |
502 | * Avoid deadlock: drop the vma lock before calling |
503 | * hmm_vma_fault(), which will itself potentially take and |
504 | * drop the vma lock. This is also correct from a |
505 | * protection point of view, because there is no further |
506 | * use here of either pte or ptl after dropping the vma |
507 | * lock. |
508 | */ |
509 | ret = hmm_vma_fault(addr, end, required_fault, walk); |
510 | hugetlb_vma_lock_read(vma); |
511 | return ret; |
512 | } |
513 | |
514 | pfn = pte_pfn(pte: entry) + ((start & ~hmask) >> PAGE_SHIFT); |
515 | for (; addr < end; addr += PAGE_SIZE, i++, pfn++) |
516 | range->hmm_pfns[i] = pfn | cpu_flags; |
517 | |
518 | spin_unlock(lock: ptl); |
519 | return 0; |
520 | } |
521 | #else |
522 | #define hmm_vma_walk_hugetlb_entry NULL |
523 | #endif /* CONFIG_HUGETLB_PAGE */ |
524 | |
525 | static int hmm_vma_walk_test(unsigned long start, unsigned long end, |
526 | struct mm_walk *walk) |
527 | { |
528 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
529 | struct hmm_range *range = hmm_vma_walk->range; |
530 | struct vm_area_struct *vma = walk->vma; |
531 | |
532 | if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)) && |
533 | vma->vm_flags & VM_READ) |
534 | return 0; |
535 | |
536 | /* |
537 | * vma ranges that don't have struct page backing them or map I/O |
538 | * devices directly cannot be handled by hmm_range_fault(). |
539 | * |
540 | * If the vma does not allow read access, then assume that it does not |
541 | * allow write access either. HMM does not support architectures that |
542 | * allow write without read. |
543 | * |
544 | * If a fault is requested for an unsupported range then it is a hard |
545 | * failure. |
546 | */ |
547 | if (hmm_range_need_fault(hmm_vma_walk, |
548 | hmm_pfns: range->hmm_pfns + |
549 | ((start - range->start) >> PAGE_SHIFT), |
550 | npages: (end - start) >> PAGE_SHIFT, cpu_flags: 0)) |
551 | return -EFAULT; |
552 | |
553 | hmm_pfns_fill(addr: start, end, range, cpu_flags: HMM_PFN_ERROR); |
554 | |
555 | /* Skip this vma and continue processing the next vma. */ |
556 | return 1; |
557 | } |
558 | |
559 | static const struct mm_walk_ops hmm_walk_ops = { |
560 | .pud_entry = hmm_vma_walk_pud, |
561 | .pmd_entry = hmm_vma_walk_pmd, |
562 | .pte_hole = hmm_vma_walk_hole, |
563 | .hugetlb_entry = hmm_vma_walk_hugetlb_entry, |
564 | .test_walk = hmm_vma_walk_test, |
565 | .walk_lock = PGWALK_RDLOCK, |
566 | }; |
567 | |
568 | /** |
569 | * hmm_range_fault - try to fault some address in a virtual address range |
570 | * @range: argument structure |
571 | * |
572 | * Returns 0 on success or one of the following error codes: |
573 | * |
574 | * -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma |
575 | * (e.g., device file vma). |
576 | * -ENOMEM: Out of memory. |
577 | * -EPERM: Invalid permission (e.g., asking for write and range is read |
578 | * only). |
579 | * -EBUSY: The range has been invalidated and the caller needs to wait for |
580 | * the invalidation to finish. |
581 | * -EFAULT: A page was requested to be valid and could not be made valid |
582 | * ie it has no backing VMA or it is illegal to access |
583 | * |
584 | * This is similar to get_user_pages(), except that it can read the page tables |
585 | * without mutating them (ie causing faults). |
586 | */ |
587 | int hmm_range_fault(struct hmm_range *range) |
588 | { |
589 | struct hmm_vma_walk hmm_vma_walk = { |
590 | .range = range, |
591 | .last = range->start, |
592 | }; |
593 | struct mm_struct *mm = range->notifier->mm; |
594 | int ret; |
595 | |
596 | mmap_assert_locked(mm); |
597 | |
598 | do { |
599 | /* If range is no longer valid force retry. */ |
600 | if (mmu_interval_check_retry(interval_sub: range->notifier, |
601 | seq: range->notifier_seq)) |
602 | return -EBUSY; |
603 | ret = walk_page_range(mm, start: hmm_vma_walk.last, end: range->end, |
604 | ops: &hmm_walk_ops, private: &hmm_vma_walk); |
605 | /* |
606 | * When -EBUSY is returned the loop restarts with |
607 | * hmm_vma_walk.last set to an address that has not been stored |
608 | * in pfns. All entries < last in the pfn array are set to their |
609 | * output, and all >= are still at their input values. |
610 | */ |
611 | } while (ret == -EBUSY); |
612 | return ret; |
613 | } |
614 | EXPORT_SYMBOL(hmm_range_fault); |
615 | |