Warning: This file is not a C or C++ file. It does not have highlighting.
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
---|---|
2 | #ifndef _ASM_X86_PGTABLE_3LEVEL_H |
3 | #define _ASM_X86_PGTABLE_3LEVEL_H |
4 | |
5 | /* |
6 | * Intel Physical Address Extension (PAE) Mode - three-level page |
7 | * tables on PPro+ CPUs. |
8 | * |
9 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> |
10 | */ |
11 | |
12 | #define pte_ERROR(e) \ |
13 | pr_err("%s:%d: bad pte %p(%08lx%08lx)\n", \ |
14 | __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low) |
15 | #define pmd_ERROR(e) \ |
16 | pr_err("%s:%d: bad pmd %p(%016Lx)\n", \ |
17 | __FILE__, __LINE__, &(e), pmd_val(e)) |
18 | #define pgd_ERROR(e) \ |
19 | pr_err("%s:%d: bad pgd %p(%016Lx)\n", \ |
20 | __FILE__, __LINE__, &(e), pgd_val(e)) |
21 | |
22 | #define pxx_xchg64(_pxx, _ptr, _val) ({ \ |
23 | _pxx##val_t *_p = (_pxx##val_t *)_ptr; \ |
24 | _pxx##val_t _o = *_p; \ |
25 | do { } while (!try_cmpxchg64(_p, &_o, (_val))); \ |
26 | native_make_##_pxx(_o); \ |
27 | }) |
28 | |
29 | /* |
30 | * Rules for using set_pte: the pte being assigned *must* be |
31 | * either not present or in a state where the hardware will |
32 | * not attempt to update the pte. In places where this is |
33 | * not possible, use pte_get_and_clear to obtain the old pte |
34 | * value and then use set_pte to update it. -ben |
35 | */ |
36 | static inline void native_set_pte(pte_t *ptep, pte_t pte) |
37 | { |
38 | WRITE_ONCE(ptep->pte_high, pte.pte_high); |
39 | smp_wmb(); |
40 | WRITE_ONCE(ptep->pte_low, pte.pte_low); |
41 | } |
42 | |
43 | static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) |
44 | { |
45 | pxx_xchg64(pte, ptep, native_pte_val(pte)); |
46 | } |
47 | |
48 | static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) |
49 | { |
50 | pxx_xchg64(pmd, pmdp, native_pmd_val(pmd)); |
51 | } |
52 | |
53 | static inline void native_set_pud(pud_t *pudp, pud_t pud) |
54 | { |
55 | #ifdef CONFIG_PAGE_TABLE_ISOLATION |
56 | pud.p4d.pgd = pti_set_user_pgtbl(&pudp->p4d.pgd, pud.p4d.pgd); |
57 | #endif |
58 | pxx_xchg64(pud, pudp, native_pud_val(pud)); |
59 | } |
60 | |
61 | /* |
62 | * For PTEs and PDEs, we must clear the P-bit first when clearing a page table |
63 | * entry, so clear the bottom half first and enforce ordering with a compiler |
64 | * barrier. |
65 | */ |
66 | static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, |
67 | pte_t *ptep) |
68 | { |
69 | WRITE_ONCE(ptep->pte_low, 0); |
70 | smp_wmb(); |
71 | WRITE_ONCE(ptep->pte_high, 0); |
72 | } |
73 | |
74 | static inline void native_pmd_clear(pmd_t *pmdp) |
75 | { |
76 | WRITE_ONCE(pmdp->pmd_low, 0); |
77 | smp_wmb(); |
78 | WRITE_ONCE(pmdp->pmd_high, 0); |
79 | } |
80 | |
81 | static inline void native_pud_clear(pud_t *pudp) |
82 | { |
83 | } |
84 | |
85 | static inline void pud_clear(pud_t *pudp) |
86 | { |
87 | set_pud(pudp, __pud(0)); |
88 | |
89 | /* |
90 | * According to Intel App note "TLBs, Paging-Structure Caches, |
91 | * and Their Invalidation", April 2007, document 317080-001, |
92 | * section 8.1: in PAE mode we explicitly have to flush the |
93 | * TLB via cr3 if the top-level pgd is changed... |
94 | * |
95 | * Currently all places where pud_clear() is called either have |
96 | * flush_tlb_mm() followed or don't need TLB flush (x86_64 code or |
97 | * pud_clear_bad()), so we don't need TLB flush here. |
98 | */ |
99 | } |
100 | |
101 | |
102 | #ifdef CONFIG_SMP |
103 | static inline pte_t native_ptep_get_and_clear(pte_t *ptep) |
104 | { |
105 | return pxx_xchg64(pte, ptep, 0ULL); |
106 | } |
107 | |
108 | static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp) |
109 | { |
110 | return pxx_xchg64(pmd, pmdp, 0ULL); |
111 | } |
112 | |
113 | static inline pud_t native_pudp_get_and_clear(pud_t *pudp) |
114 | { |
115 | return pxx_xchg64(pud, pudp, 0ULL); |
116 | } |
117 | #else |
118 | #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) |
119 | #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) |
120 | #define native_pudp_get_and_clear(xp) native_local_pudp_get_and_clear(xp) |
121 | #endif |
122 | |
123 | #ifndef pmdp_establish |
124 | #define pmdp_establish pmdp_establish |
125 | static inline pmd_t pmdp_establish(struct vm_area_struct *vma, |
126 | unsigned long address, pmd_t *pmdp, pmd_t pmd) |
127 | { |
128 | pmd_t old; |
129 | |
130 | /* |
131 | * If pmd has present bit cleared we can get away without expensive |
132 | * cmpxchg64: we can update pmdp half-by-half without racing with |
133 | * anybody. |
134 | */ |
135 | if (!(pmd_val(pmd) & _PAGE_PRESENT)) { |
136 | /* xchg acts as a barrier before setting of the high bits */ |
137 | old.pmd_low = xchg(&pmdp->pmd_low, pmd.pmd_low); |
138 | old.pmd_high = READ_ONCE(pmdp->pmd_high); |
139 | WRITE_ONCE(pmdp->pmd_high, pmd.pmd_high); |
140 | |
141 | return old; |
142 | } |
143 | |
144 | return pxx_xchg64(pmd, pmdp, pmd.pmd); |
145 | } |
146 | #endif |
147 | |
148 | /* |
149 | * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that |
150 | * are !pte_none() && !pte_present(). |
151 | * |
152 | * Format of swap PTEs: |
153 | * |
154 | * 6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 |
155 | * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 |
156 | * < type -> <---------------------- offset ---------------------- |
157 | * |
158 | * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 |
159 | * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 |
160 | * --------------------------------------------> 0 E 0 0 0 0 0 0 0 |
161 | * |
162 | * E is the exclusive marker that is not stored in swap entries. |
163 | */ |
164 | #define SWP_TYPE_BITS 5 |
165 | #define _SWP_TYPE_MASK ((1U << SWP_TYPE_BITS) - 1) |
166 | |
167 | #define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1) |
168 | |
169 | /* We always extract/encode the offset by shifting it all the way up, and then down again */ |
170 | #define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT + SWP_TYPE_BITS) |
171 | |
172 | #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS) |
173 | #define __swp_type(x) (((x).val) & _SWP_TYPE_MASK) |
174 | #define __swp_offset(x) ((x).val >> SWP_TYPE_BITS) |
175 | #define __swp_entry(type, offset) ((swp_entry_t){((type) & _SWP_TYPE_MASK) \ |
176 | | (offset) << SWP_TYPE_BITS}) |
177 | |
178 | /* |
179 | * Normally, __swp_entry() converts from arch-independent swp_entry_t to |
180 | * arch-dependent swp_entry_t, and __swp_entry_to_pte() just stores the result |
181 | * to pte. But here we have 32bit swp_entry_t and 64bit pte, and need to use the |
182 | * whole 64 bits. Thus, we shift the "real" arch-dependent conversion to |
183 | * __swp_entry_to_pte() through the following helper macro based on 64bit |
184 | * __swp_entry(). |
185 | */ |
186 | #define __swp_pteval_entry(type, offset) ((pteval_t) { \ |
187 | (~(pteval_t)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \ |
188 | | ((pteval_t)(type) << (64 - SWP_TYPE_BITS)) }) |
189 | |
190 | #define __swp_entry_to_pte(x) ((pte_t){ .pte = \ |
191 | __swp_pteval_entry(__swp_type(x), __swp_offset(x)) }) |
192 | /* |
193 | * Analogically, __pte_to_swp_entry() doesn't just extract the arch-dependent |
194 | * swp_entry_t, but also has to convert it from 64bit to the 32bit |
195 | * intermediate representation, using the following macros based on 64bit |
196 | * __swp_type() and __swp_offset(). |
197 | */ |
198 | #define __pteval_swp_type(x) ((unsigned long)((x).pte >> (64 - SWP_TYPE_BITS))) |
199 | #define __pteval_swp_offset(x) ((unsigned long)(~((x).pte) << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)) |
200 | |
201 | #define __pte_to_swp_entry(pte) (__swp_entry(__pteval_swp_type(pte), \ |
202 | __pteval_swp_offset(pte))) |
203 | |
204 | /* We borrow bit 7 to store the exclusive marker in swap PTEs. */ |
205 | #define _PAGE_SWP_EXCLUSIVE _PAGE_PSE |
206 | |
207 | #include <asm/pgtable-invert.h> |
208 | |
209 | #endif /* _ASM_X86_PGTABLE_3LEVEL_H */ |
210 |
Warning: This file is not a C or C++ file. It does not have highlighting.