1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _ASM_X86_PGTABLE_DEFS_H |
3 | #define _ASM_X86_PGTABLE_DEFS_H |
4 | |
5 | #include <linux/const.h> |
6 | #include <linux/mem_encrypt.h> |
7 | |
8 | #include <asm/page_types.h> |
9 | |
10 | #define FIRST_USER_ADDRESS 0UL |
11 | |
12 | #define _PAGE_BIT_PRESENT 0 /* is present */ |
13 | #define _PAGE_BIT_RW 1 /* writeable */ |
14 | #define _PAGE_BIT_USER 2 /* userspace addressable */ |
15 | #define _PAGE_BIT_PWT 3 /* page write through */ |
16 | #define _PAGE_BIT_PCD 4 /* page cache disabled */ |
17 | #define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */ |
18 | #define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */ |
19 | #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ |
20 | #define _PAGE_BIT_PAT 7 /* on 4KB pages */ |
21 | #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ |
22 | #define _PAGE_BIT_SOFTW1 9 /* available for programmer */ |
23 | #define _PAGE_BIT_SOFTW2 10 /* " */ |
24 | #define _PAGE_BIT_SOFTW3 11 /* " */ |
25 | #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ |
26 | #define _PAGE_BIT_SOFTW4 58 /* available for programmer */ |
27 | #define _PAGE_BIT_PKEY_BIT0 59 /* Protection Keys, bit 1/4 */ |
28 | #define _PAGE_BIT_PKEY_BIT1 60 /* Protection Keys, bit 2/4 */ |
29 | #define _PAGE_BIT_PKEY_BIT2 61 /* Protection Keys, bit 3/4 */ |
30 | #define _PAGE_BIT_PKEY_BIT3 62 /* Protection Keys, bit 4/4 */ |
31 | #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ |
32 | |
33 | #define _PAGE_BIT_SPECIAL _PAGE_BIT_SOFTW1 |
34 | #define _PAGE_BIT_CPA_TEST _PAGE_BIT_SOFTW1 |
35 | #define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */ |
36 | #define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4 |
37 | |
38 | /* If _PAGE_BIT_PRESENT is clear, we use these: */ |
39 | /* - if the user mapped it with PROT_NONE; pte_present gives true */ |
40 | #define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL |
41 | |
42 | #define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT) |
43 | #define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW) |
44 | #define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER) |
45 | #define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT) |
46 | #define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD) |
47 | #define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED) |
48 | #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY) |
49 | #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE) |
50 | #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL) |
51 | #define _PAGE_SOFTW1 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW1) |
52 | #define _PAGE_SOFTW2 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW2) |
53 | #define _PAGE_SOFTW3 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW3) |
54 | #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT) |
55 | #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) |
56 | #define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL) |
57 | #define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST) |
58 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS |
59 | #define _PAGE_PKEY_BIT0 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT0) |
60 | #define _PAGE_PKEY_BIT1 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT1) |
61 | #define _PAGE_PKEY_BIT2 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT2) |
62 | #define _PAGE_PKEY_BIT3 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT3) |
63 | #else |
64 | #define _PAGE_PKEY_BIT0 (_AT(pteval_t, 0)) |
65 | #define _PAGE_PKEY_BIT1 (_AT(pteval_t, 0)) |
66 | #define _PAGE_PKEY_BIT2 (_AT(pteval_t, 0)) |
67 | #define _PAGE_PKEY_BIT3 (_AT(pteval_t, 0)) |
68 | #endif |
69 | |
70 | #define _PAGE_PKEY_MASK (_PAGE_PKEY_BIT0 | \ |
71 | _PAGE_PKEY_BIT1 | \ |
72 | _PAGE_PKEY_BIT2 | \ |
73 | _PAGE_PKEY_BIT3) |
74 | |
75 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) |
76 | #define _PAGE_KNL_ERRATUM_MASK (_PAGE_DIRTY | _PAGE_ACCESSED) |
77 | #else |
78 | #define _PAGE_KNL_ERRATUM_MASK 0 |
79 | #endif |
80 | |
81 | #ifdef CONFIG_MEM_SOFT_DIRTY |
82 | #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY) |
83 | #else |
84 | #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0)) |
85 | #endif |
86 | |
87 | /* |
88 | * Tracking soft dirty bit when a page goes to a swap is tricky. |
89 | * We need a bit which can be stored in pte _and_ not conflict |
90 | * with swap entry format. On x86 bits 1-4 are *not* involved |
91 | * into swap entry computation, but bit 7 is used for thp migration, |
92 | * so we borrow bit 1 for soft dirty tracking. |
93 | * |
94 | * Please note that this bit must be treated as swap dirty page |
95 | * mark if and only if the PTE/PMD has present bit clear! |
96 | */ |
97 | #ifdef CONFIG_MEM_SOFT_DIRTY |
98 | #define _PAGE_SWP_SOFT_DIRTY _PAGE_RW |
99 | #else |
100 | #define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0)) |
101 | #endif |
102 | |
103 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) |
104 | #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) |
105 | #define _PAGE_DEVMAP (_AT(u64, 1) << _PAGE_BIT_DEVMAP) |
106 | #define __HAVE_ARCH_PTE_DEVMAP |
107 | #else |
108 | #define _PAGE_NX (_AT(pteval_t, 0)) |
109 | #define _PAGE_DEVMAP (_AT(pteval_t, 0)) |
110 | #endif |
111 | |
112 | #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE) |
113 | |
114 | #define _PAGE_TABLE_NOENC (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |\ |
115 | _PAGE_ACCESSED | _PAGE_DIRTY) |
116 | #define _KERNPG_TABLE_NOENC (_PAGE_PRESENT | _PAGE_RW | \ |
117 | _PAGE_ACCESSED | _PAGE_DIRTY) |
118 | |
119 | /* |
120 | * Set of bits not changed in pte_modify. The pte's |
121 | * protection key is treated like _PAGE_RW, for |
122 | * instance, and is *not* included in this mask since |
123 | * pte_modify() does modify it. |
124 | */ |
125 | #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ |
126 | _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \ |
127 | _PAGE_SOFT_DIRTY | _PAGE_DEVMAP) |
128 | #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE) |
129 | |
130 | /* |
131 | * The cache modes defined here are used to translate between pure SW usage |
132 | * and the HW defined cache mode bits and/or PAT entries. |
133 | * |
134 | * The resulting bits for PWT, PCD and PAT should be chosen in a way |
135 | * to have the WB mode at index 0 (all bits clear). This is the default |
136 | * right now and likely would break too much if changed. |
137 | */ |
138 | #ifndef __ASSEMBLY__ |
139 | enum page_cache_mode { |
140 | _PAGE_CACHE_MODE_WB = 0, |
141 | _PAGE_CACHE_MODE_WC = 1, |
142 | _PAGE_CACHE_MODE_UC_MINUS = 2, |
143 | _PAGE_CACHE_MODE_UC = 3, |
144 | _PAGE_CACHE_MODE_WT = 4, |
145 | _PAGE_CACHE_MODE_WP = 5, |
146 | _PAGE_CACHE_MODE_NUM = 8 |
147 | }; |
148 | #endif |
149 | |
150 | #define _PAGE_CACHE_MASK (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT) |
151 | #define _PAGE_NOCACHE (cachemode2protval(_PAGE_CACHE_MODE_UC)) |
152 | #define _PAGE_CACHE_WP (cachemode2protval(_PAGE_CACHE_MODE_WP)) |
153 | |
154 | #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) |
155 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ |
156 | _PAGE_ACCESSED | _PAGE_NX) |
157 | |
158 | #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \ |
159 | _PAGE_USER | _PAGE_ACCESSED) |
160 | #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ |
161 | _PAGE_ACCESSED | _PAGE_NX) |
162 | #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ |
163 | _PAGE_ACCESSED) |
164 | #define PAGE_COPY PAGE_COPY_NOEXEC |
165 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \ |
166 | _PAGE_ACCESSED | _PAGE_NX) |
167 | #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ |
168 | _PAGE_ACCESSED) |
169 | |
170 | #define __PAGE_KERNEL_EXEC \ |
171 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL) |
172 | #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX) |
173 | |
174 | #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) |
175 | #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) |
176 | #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE) |
177 | #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER) |
178 | #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) |
179 | #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) |
180 | #define __PAGE_KERNEL_WP (__PAGE_KERNEL | _PAGE_CACHE_WP) |
181 | |
182 | #define __PAGE_KERNEL_IO (__PAGE_KERNEL) |
183 | #define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE) |
184 | |
185 | #ifndef __ASSEMBLY__ |
186 | |
187 | #define _PAGE_ENC (_AT(pteval_t, sme_me_mask)) |
188 | |
189 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \ |
190 | _PAGE_DIRTY | _PAGE_ENC) |
191 | #define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER) |
192 | |
193 | #define __PAGE_KERNEL_ENC (__PAGE_KERNEL | _PAGE_ENC) |
194 | #define __PAGE_KERNEL_ENC_WP (__PAGE_KERNEL_WP | _PAGE_ENC) |
195 | |
196 | #define __PAGE_KERNEL_NOENC (__PAGE_KERNEL) |
197 | #define __PAGE_KERNEL_NOENC_WP (__PAGE_KERNEL_WP) |
198 | |
199 | #define default_pgprot(x) __pgprot((x) & __default_kernel_pte_mask) |
200 | |
201 | #define PAGE_KERNEL default_pgprot(__PAGE_KERNEL | _PAGE_ENC) |
202 | #define PAGE_KERNEL_NOENC default_pgprot(__PAGE_KERNEL) |
203 | #define PAGE_KERNEL_RO default_pgprot(__PAGE_KERNEL_RO | _PAGE_ENC) |
204 | #define PAGE_KERNEL_EXEC default_pgprot(__PAGE_KERNEL_EXEC | _PAGE_ENC) |
205 | #define PAGE_KERNEL_EXEC_NOENC default_pgprot(__PAGE_KERNEL_EXEC) |
206 | #define PAGE_KERNEL_RX default_pgprot(__PAGE_KERNEL_RX | _PAGE_ENC) |
207 | #define PAGE_KERNEL_NOCACHE default_pgprot(__PAGE_KERNEL_NOCACHE | _PAGE_ENC) |
208 | #define PAGE_KERNEL_LARGE default_pgprot(__PAGE_KERNEL_LARGE | _PAGE_ENC) |
209 | #define PAGE_KERNEL_LARGE_EXEC default_pgprot(__PAGE_KERNEL_LARGE_EXEC | _PAGE_ENC) |
210 | #define PAGE_KERNEL_VVAR default_pgprot(__PAGE_KERNEL_VVAR | _PAGE_ENC) |
211 | |
212 | #define PAGE_KERNEL_IO default_pgprot(__PAGE_KERNEL_IO) |
213 | #define PAGE_KERNEL_IO_NOCACHE default_pgprot(__PAGE_KERNEL_IO_NOCACHE) |
214 | |
215 | #endif /* __ASSEMBLY__ */ |
216 | |
217 | /* xwr */ |
218 | #define __P000 PAGE_NONE |
219 | #define __P001 PAGE_READONLY |
220 | #define __P010 PAGE_COPY |
221 | #define __P011 PAGE_COPY |
222 | #define __P100 PAGE_READONLY_EXEC |
223 | #define __P101 PAGE_READONLY_EXEC |
224 | #define __P110 PAGE_COPY_EXEC |
225 | #define __P111 PAGE_COPY_EXEC |
226 | |
227 | #define __S000 PAGE_NONE |
228 | #define __S001 PAGE_READONLY |
229 | #define __S010 PAGE_SHARED |
230 | #define __S011 PAGE_SHARED |
231 | #define __S100 PAGE_READONLY_EXEC |
232 | #define __S101 PAGE_READONLY_EXEC |
233 | #define __S110 PAGE_SHARED_EXEC |
234 | #define __S111 PAGE_SHARED_EXEC |
235 | |
236 | /* |
237 | * early identity mapping pte attrib macros. |
238 | */ |
239 | #ifdef CONFIG_X86_64 |
240 | #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC |
241 | #else |
242 | #define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */ |
243 | #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */ |
244 | #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */ |
245 | #endif |
246 | |
247 | #ifdef CONFIG_X86_32 |
248 | # include <asm/pgtable_32_types.h> |
249 | #else |
250 | # include <asm/pgtable_64_types.h> |
251 | #endif |
252 | |
253 | #ifndef __ASSEMBLY__ |
254 | |
255 | #include <linux/types.h> |
256 | |
257 | /* Extracts the PFN from a (pte|pmd|pud|pgd)val_t of a 4KB page */ |
258 | #define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK) |
259 | |
260 | /* |
261 | * Extracts the flags from a (pte|pmd|pud|pgd)val_t |
262 | * This includes the protection key value. |
263 | */ |
264 | #define PTE_FLAGS_MASK (~PTE_PFN_MASK) |
265 | |
266 | typedef struct pgprot { pgprotval_t pgprot; } pgprot_t; |
267 | |
268 | typedef struct { pgdval_t pgd; } pgd_t; |
269 | |
270 | #ifdef CONFIG_X86_PAE |
271 | |
272 | /* |
273 | * PHYSICAL_PAGE_MASK might be non-constant when SME is compiled in, so we can't |
274 | * use it here. |
275 | */ |
276 | |
277 | #define PGD_PAE_PAGE_MASK ((signed long)PAGE_MASK) |
278 | #define PGD_PAE_PHYS_MASK (((1ULL << __PHYSICAL_MASK_SHIFT)-1) & PGD_PAE_PAGE_MASK) |
279 | |
280 | /* |
281 | * PAE allows Base Address, P, PWT, PCD and AVL bits to be set in PGD entries. |
282 | * All other bits are Reserved MBZ |
283 | */ |
284 | #define PGD_ALLOWED_BITS (PGD_PAE_PHYS_MASK | _PAGE_PRESENT | \ |
285 | _PAGE_PWT | _PAGE_PCD | \ |
286 | _PAGE_SOFTW1 | _PAGE_SOFTW2 | _PAGE_SOFTW3) |
287 | |
288 | #else |
289 | /* No need to mask any bits for !PAE */ |
290 | #define PGD_ALLOWED_BITS (~0ULL) |
291 | #endif |
292 | |
293 | static inline pgd_t native_make_pgd(pgdval_t val) |
294 | { |
295 | return (pgd_t) { val & PGD_ALLOWED_BITS }; |
296 | } |
297 | |
298 | static inline pgdval_t native_pgd_val(pgd_t pgd) |
299 | { |
300 | return pgd.pgd & PGD_ALLOWED_BITS; |
301 | } |
302 | |
303 | static inline pgdval_t pgd_flags(pgd_t pgd) |
304 | { |
305 | return native_pgd_val(pgd) & PTE_FLAGS_MASK; |
306 | } |
307 | |
308 | #if CONFIG_PGTABLE_LEVELS > 4 |
309 | typedef struct { p4dval_t p4d; } p4d_t; |
310 | |
311 | static inline p4d_t native_make_p4d(pudval_t val) |
312 | { |
313 | return (p4d_t) { val }; |
314 | } |
315 | |
316 | static inline p4dval_t native_p4d_val(p4d_t p4d) |
317 | { |
318 | return p4d.p4d; |
319 | } |
320 | #else |
321 | #include <asm-generic/pgtable-nop4d.h> |
322 | |
323 | static inline p4d_t native_make_p4d(pudval_t val) |
324 | { |
325 | return (p4d_t) { .pgd = native_make_pgd((pgdval_t)val) }; |
326 | } |
327 | |
328 | static inline p4dval_t native_p4d_val(p4d_t p4d) |
329 | { |
330 | return native_pgd_val(p4d.pgd); |
331 | } |
332 | #endif |
333 | |
334 | #if CONFIG_PGTABLE_LEVELS > 3 |
335 | typedef struct { pudval_t pud; } pud_t; |
336 | |
337 | static inline pud_t native_make_pud(pmdval_t val) |
338 | { |
339 | return (pud_t) { val }; |
340 | } |
341 | |
342 | static inline pudval_t native_pud_val(pud_t pud) |
343 | { |
344 | return pud.pud; |
345 | } |
346 | #else |
347 | #include <asm-generic/pgtable-nopud.h> |
348 | |
349 | static inline pud_t native_make_pud(pudval_t val) |
350 | { |
351 | return (pud_t) { .p4d.pgd = native_make_pgd(val) }; |
352 | } |
353 | |
354 | static inline pudval_t native_pud_val(pud_t pud) |
355 | { |
356 | return native_pgd_val(pud.p4d.pgd); |
357 | } |
358 | #endif |
359 | |
360 | #if CONFIG_PGTABLE_LEVELS > 2 |
361 | typedef struct { pmdval_t pmd; } pmd_t; |
362 | |
363 | static inline pmd_t native_make_pmd(pmdval_t val) |
364 | { |
365 | return (pmd_t) { val }; |
366 | } |
367 | |
368 | static inline pmdval_t native_pmd_val(pmd_t pmd) |
369 | { |
370 | return pmd.pmd; |
371 | } |
372 | #else |
373 | #include <asm-generic/pgtable-nopmd.h> |
374 | |
375 | static inline pmd_t native_make_pmd(pmdval_t val) |
376 | { |
377 | return (pmd_t) { .pud.p4d.pgd = native_make_pgd(val) }; |
378 | } |
379 | |
380 | static inline pmdval_t native_pmd_val(pmd_t pmd) |
381 | { |
382 | return native_pgd_val(pmd.pud.p4d.pgd); |
383 | } |
384 | #endif |
385 | |
386 | static inline p4dval_t p4d_pfn_mask(p4d_t p4d) |
387 | { |
388 | /* No 512 GiB huge pages yet */ |
389 | return PTE_PFN_MASK; |
390 | } |
391 | |
392 | static inline p4dval_t p4d_flags_mask(p4d_t p4d) |
393 | { |
394 | return ~p4d_pfn_mask(p4d); |
395 | } |
396 | |
397 | static inline p4dval_t p4d_flags(p4d_t p4d) |
398 | { |
399 | return native_p4d_val(p4d) & p4d_flags_mask(p4d); |
400 | } |
401 | |
402 | static inline pudval_t pud_pfn_mask(pud_t pud) |
403 | { |
404 | if (native_pud_val(pud) & _PAGE_PSE) |
405 | return PHYSICAL_PUD_PAGE_MASK; |
406 | else |
407 | return PTE_PFN_MASK; |
408 | } |
409 | |
410 | static inline pudval_t pud_flags_mask(pud_t pud) |
411 | { |
412 | return ~pud_pfn_mask(pud); |
413 | } |
414 | |
415 | static inline pudval_t pud_flags(pud_t pud) |
416 | { |
417 | return native_pud_val(pud) & pud_flags_mask(pud); |
418 | } |
419 | |
420 | static inline pmdval_t pmd_pfn_mask(pmd_t pmd) |
421 | { |
422 | if (native_pmd_val(pmd) & _PAGE_PSE) |
423 | return PHYSICAL_PMD_PAGE_MASK; |
424 | else |
425 | return PTE_PFN_MASK; |
426 | } |
427 | |
428 | static inline pmdval_t pmd_flags_mask(pmd_t pmd) |
429 | { |
430 | return ~pmd_pfn_mask(pmd); |
431 | } |
432 | |
433 | static inline pmdval_t pmd_flags(pmd_t pmd) |
434 | { |
435 | return native_pmd_val(pmd) & pmd_flags_mask(pmd); |
436 | } |
437 | |
438 | static inline pte_t native_make_pte(pteval_t val) |
439 | { |
440 | return (pte_t) { .pte = val }; |
441 | } |
442 | |
443 | static inline pteval_t native_pte_val(pte_t pte) |
444 | { |
445 | return pte.pte; |
446 | } |
447 | |
448 | static inline pteval_t pte_flags(pte_t pte) |
449 | { |
450 | return native_pte_val(pte) & PTE_FLAGS_MASK; |
451 | } |
452 | |
453 | #define pgprot_val(x) ((x).pgprot) |
454 | #define __pgprot(x) ((pgprot_t) { (x) } ) |
455 | |
456 | extern uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM]; |
457 | extern uint8_t __pte2cachemode_tbl[8]; |
458 | |
459 | #define __pte2cm_idx(cb) \ |
460 | ((((cb) >> (_PAGE_BIT_PAT - 2)) & 4) | \ |
461 | (((cb) >> (_PAGE_BIT_PCD - 1)) & 2) | \ |
462 | (((cb) >> _PAGE_BIT_PWT) & 1)) |
463 | #define __cm_idx2pte(i) \ |
464 | ((((i) & 4) << (_PAGE_BIT_PAT - 2)) | \ |
465 | (((i) & 2) << (_PAGE_BIT_PCD - 1)) | \ |
466 | (((i) & 1) << _PAGE_BIT_PWT)) |
467 | |
468 | static inline unsigned long cachemode2protval(enum page_cache_mode pcm) |
469 | { |
470 | if (likely(pcm == 0)) |
471 | return 0; |
472 | return __cachemode2pte_tbl[pcm]; |
473 | } |
474 | static inline pgprot_t cachemode2pgprot(enum page_cache_mode pcm) |
475 | { |
476 | return __pgprot(cachemode2protval(pcm)); |
477 | } |
478 | static inline enum page_cache_mode pgprot2cachemode(pgprot_t pgprot) |
479 | { |
480 | unsigned long masked; |
481 | |
482 | masked = pgprot_val(pgprot) & _PAGE_CACHE_MASK; |
483 | if (likely(masked == 0)) |
484 | return 0; |
485 | return __pte2cachemode_tbl[__pte2cm_idx(masked)]; |
486 | } |
487 | static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot) |
488 | { |
489 | pgprotval_t val = pgprot_val(pgprot); |
490 | pgprot_t new; |
491 | |
492 | pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) | |
493 | ((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT)); |
494 | return new; |
495 | } |
496 | static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot) |
497 | { |
498 | pgprotval_t val = pgprot_val(pgprot); |
499 | pgprot_t new; |
500 | |
501 | pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) | |
502 | ((val & _PAGE_PAT_LARGE) >> |
503 | (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT)); |
504 | return new; |
505 | } |
506 | |
507 | |
508 | typedef struct page *pgtable_t; |
509 | |
510 | extern pteval_t __supported_pte_mask; |
511 | extern pteval_t __default_kernel_pte_mask; |
512 | extern void set_nx(void); |
513 | extern int nx_enabled; |
514 | |
515 | #define pgprot_writecombine pgprot_writecombine |
516 | extern pgprot_t pgprot_writecombine(pgprot_t prot); |
517 | |
518 | #define pgprot_writethrough pgprot_writethrough |
519 | extern pgprot_t pgprot_writethrough(pgprot_t prot); |
520 | |
521 | /* Indicate that x86 has its own track and untrack pfn vma functions */ |
522 | #define __HAVE_PFNMAP_TRACKING |
523 | |
524 | #define __HAVE_PHYS_MEM_ACCESS_PROT |
525 | struct file; |
526 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
527 | unsigned long size, pgprot_t vma_prot); |
528 | |
529 | /* Install a pte for a particular vaddr in kernel space. */ |
530 | void set_pte_vaddr(unsigned long vaddr, pte_t pte); |
531 | |
532 | #ifdef CONFIG_X86_32 |
533 | extern void native_pagetable_init(void); |
534 | #else |
535 | #define native_pagetable_init paging_init |
536 | #endif |
537 | |
538 | struct seq_file; |
539 | extern void arch_report_meminfo(struct seq_file *m); |
540 | |
541 | enum pg_level { |
542 | PG_LEVEL_NONE, |
543 | PG_LEVEL_4K, |
544 | PG_LEVEL_2M, |
545 | PG_LEVEL_1G, |
546 | PG_LEVEL_512G, |
547 | PG_LEVEL_NUM |
548 | }; |
549 | |
550 | #ifdef CONFIG_PROC_FS |
551 | extern void update_page_count(int level, unsigned long pages); |
552 | #else |
553 | static inline void update_page_count(int level, unsigned long pages) { } |
554 | #endif |
555 | |
556 | /* |
557 | * Helper function that returns the kernel pagetable entry controlling |
558 | * the virtual address 'address'. NULL means no pagetable entry present. |
559 | * NOTE: the return type is pte_t but if the pmd is PSE then we return it |
560 | * as a pte too. |
561 | */ |
562 | extern pte_t *lookup_address(unsigned long address, unsigned int *level); |
563 | extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, |
564 | unsigned int *level); |
565 | extern pmd_t *lookup_pmd_address(unsigned long address); |
566 | extern phys_addr_t slow_virt_to_phys(void *__address); |
567 | extern int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, |
568 | unsigned long address, |
569 | unsigned numpages, |
570 | unsigned long page_flags); |
571 | extern int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address, |
572 | unsigned long numpages); |
573 | #endif /* !__ASSEMBLY__ */ |
574 | |
575 | #endif /* _ASM_X86_PGTABLE_DEFS_H */ |
576 | |