1// SPDX-License-Identifier: GPL-2.0
2/*
3 * mm/pgtable-generic.c
4 *
5 * Generic pgtable methods declared in asm-generic/pgtable.h
6 *
7 * Copyright (C) 2010 Linus Torvalds
8 */
9
10#include <linux/pagemap.h>
11#include <linux/hugetlb.h>
12#include <asm/tlb.h>
13#include <asm-generic/pgtable.h>
14
15/*
16 * If a p?d_bad entry is found while walking page tables, report
17 * the error, before resetting entry to p?d_none. Usually (but
18 * very seldom) called out from the p?d_none_or_clear_bad macros.
19 */
20
21void pgd_clear_bad(pgd_t *pgd)
22{
23 pgd_ERROR(*pgd);
24 pgd_clear(pgd);
25}
26
27void p4d_clear_bad(p4d_t *p4d)
28{
29 p4d_ERROR(*p4d);
30 p4d_clear(p4d);
31}
32
33void pud_clear_bad(pud_t *pud)
34{
35 pud_ERROR(*pud);
36 pud_clear(pud);
37}
38
39void pmd_clear_bad(pmd_t *pmd)
40{
41 pmd_ERROR(*pmd);
42 pmd_clear(pmd);
43}
44
45#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
46/*
47 * Only sets the access flags (dirty, accessed), as well as write
48 * permission. Furthermore, we know it always gets set to a "more
49 * permissive" setting, which allows most architectures to optimize
50 * this. We return whether the PTE actually changed, which in turn
51 * instructs the caller to do things like update__mmu_cache. This
52 * used to be done in the caller, but sparc needs minor faults to
53 * force that call on sun4c so we changed this macro slightly
54 */
55int ptep_set_access_flags(struct vm_area_struct *vma,
56 unsigned long address, pte_t *ptep,
57 pte_t entry, int dirty)
58{
59 int changed = !pte_same(*ptep, entry);
60 if (changed) {
61 set_pte_at(vma->vm_mm, address, ptep, entry);
62 flush_tlb_fix_spurious_fault(vma, address);
63 }
64 return changed;
65}
66#endif
67
68#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
69int ptep_clear_flush_young(struct vm_area_struct *vma,
70 unsigned long address, pte_t *ptep)
71{
72 int young;
73 young = ptep_test_and_clear_young(vma, address, ptep);
74 if (young)
75 flush_tlb_page(vma, address);
76 return young;
77}
78#endif
79
80#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
81pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
82 pte_t *ptep)
83{
84 struct mm_struct *mm = (vma)->vm_mm;
85 pte_t pte;
86 pte = ptep_get_and_clear(mm, address, ptep);
87 if (pte_accessible(mm, pte))
88 flush_tlb_page(vma, address);
89 return pte;
90}
91#endif
92
93#ifdef CONFIG_TRANSPARENT_HUGEPAGE
94
95#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
96int pmdp_set_access_flags(struct vm_area_struct *vma,
97 unsigned long address, pmd_t *pmdp,
98 pmd_t entry, int dirty)
99{
100 int changed = !pmd_same(*pmdp, entry);
101 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
102 if (changed) {
103 set_pmd_at(vma->vm_mm, address, pmdp, entry);
104 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
105 }
106 return changed;
107}
108#endif
109
110#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
111int pmdp_clear_flush_young(struct vm_area_struct *vma,
112 unsigned long address, pmd_t *pmdp)
113{
114 int young;
115 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
116 young = pmdp_test_and_clear_young(vma, address, pmdp);
117 if (young)
118 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
119 return young;
120}
121#endif
122
123#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
124pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
125 pmd_t *pmdp)
126{
127 pmd_t pmd;
128 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
129 VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
130 !pmd_devmap(*pmdp)) || !pmd_present(*pmdp));
131 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
132 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
133 return pmd;
134}
135
136#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
137pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
138 pud_t *pudp)
139{
140 pud_t pud;
141
142 VM_BUG_ON(address & ~HPAGE_PUD_MASK);
143 VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp));
144 pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
145 flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
146 return pud;
147}
148#endif
149#endif
150
151#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
152void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
153 pgtable_t pgtable)
154{
155 assert_spin_locked(pmd_lockptr(mm, pmdp));
156
157 /* FIFO */
158 if (!pmd_huge_pte(mm, pmdp))
159 INIT_LIST_HEAD(&pgtable->lru);
160 else
161 list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
162 pmd_huge_pte(mm, pmdp) = pgtable;
163}
164#endif
165
166#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
167/* no "address" argument so destroys page coloring of some arch */
168pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
169{
170 pgtable_t pgtable;
171
172 assert_spin_locked(pmd_lockptr(mm, pmdp));
173
174 /* FIFO */
175 pgtable = pmd_huge_pte(mm, pmdp);
176 pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru,
177 struct page, lru);
178 if (pmd_huge_pte(mm, pmdp))
179 list_del(&pgtable->lru);
180 return pgtable;
181}
182#endif
183
184#ifndef __HAVE_ARCH_PMDP_INVALIDATE
185pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
186 pmd_t *pmdp)
187{
188 pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mknotpresent(*pmdp));
189 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
190 return old;
191}
192#endif
193
194#ifndef pmdp_collapse_flush
195pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
196 pmd_t *pmdp)
197{
198 /*
199 * pmd and hugepage pte format are same. So we could
200 * use the same function.
201 */
202 pmd_t pmd;
203
204 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
205 VM_BUG_ON(pmd_trans_huge(*pmdp));
206 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
207
208 /* collapse entails shooting down ptes not pmd */
209 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
210 return pmd;
211}
212#endif
213#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
214