1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_PGTABLE_64_H
3#define _ASM_X86_PGTABLE_64_H
4
5#include <linux/const.h>
6#include <asm/pgtable_64_types.h>
7
8#ifndef __ASSEMBLY__
9
10/*
11 * This file contains the functions and defines necessary to modify and use
12 * the x86-64 page table tree.
13 */
14#include <asm/processor.h>
15#include <linux/bitops.h>
16#include <linux/threads.h>
17#include <asm/fixmap.h>
18
19extern p4d_t level4_kernel_pgt[512];
20extern p4d_t level4_ident_pgt[512];
21extern pud_t level3_kernel_pgt[512];
22extern pud_t level3_ident_pgt[512];
23extern pmd_t level2_kernel_pgt[512];
24extern pmd_t level2_fixmap_pgt[512];
25extern pmd_t level2_ident_pgt[512];
26extern pte_t level1_fixmap_pgt[512 * FIXMAP_PMD_NUM];
27extern pgd_t init_top_pgt[];
28
29#define swapper_pg_dir init_top_pgt
30
31extern void paging_init(void);
32static inline void sync_initial_page_table(void) { }
33
34#define pte_ERROR(e) \
35 pr_err("%s:%d: bad pte %p(%016lx)\n", \
36 __FILE__, __LINE__, &(e), pte_val(e))
37#define pmd_ERROR(e) \
38 pr_err("%s:%d: bad pmd %p(%016lx)\n", \
39 __FILE__, __LINE__, &(e), pmd_val(e))
40#define pud_ERROR(e) \
41 pr_err("%s:%d: bad pud %p(%016lx)\n", \
42 __FILE__, __LINE__, &(e), pud_val(e))
43
44#if CONFIG_PGTABLE_LEVELS >= 5
45#define p4d_ERROR(e) \
46 pr_err("%s:%d: bad p4d %p(%016lx)\n", \
47 __FILE__, __LINE__, &(e), p4d_val(e))
48#endif
49
50#define pgd_ERROR(e) \
51 pr_err("%s:%d: bad pgd %p(%016lx)\n", \
52 __FILE__, __LINE__, &(e), pgd_val(e))
53
54struct mm_struct;
55
56#define mm_p4d_folded mm_p4d_folded
57static inline bool mm_p4d_folded(struct mm_struct *mm)
58{
59 return !pgtable_l5_enabled();
60}
61
62void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte);
63void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
64
65static inline void native_set_pte(pte_t *ptep, pte_t pte)
66{
67 WRITE_ONCE(*ptep, pte);
68}
69
70static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
71 pte_t *ptep)
72{
73 native_set_pte(ptep, pte: native_make_pte(val: 0));
74}
75
76static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
77{
78 native_set_pte(ptep, pte);
79}
80
81static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
82{
83 WRITE_ONCE(*pmdp, pmd);
84}
85
86static inline void native_pmd_clear(pmd_t *pmd)
87{
88 native_set_pmd(pmdp: pmd, pmd: native_make_pmd(val: 0));
89}
90
91static inline pte_t native_ptep_get_and_clear(pte_t *xp)
92{
93#ifdef CONFIG_SMP
94 return native_make_pte(xchg(&xp->pte, 0));
95#else
96 /* native_local_ptep_get_and_clear,
97 but duplicated because of cyclic dependency */
98 pte_t ret = *xp;
99 native_pte_clear(NULL, 0, xp);
100 return ret;
101#endif
102}
103
104static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
105{
106#ifdef CONFIG_SMP
107 return native_make_pmd(xchg(&xp->pmd, 0));
108#else
109 /* native_local_pmdp_get_and_clear,
110 but duplicated because of cyclic dependency */
111 pmd_t ret = *xp;
112 native_pmd_clear(xp);
113 return ret;
114#endif
115}
116
117static inline void native_set_pud(pud_t *pudp, pud_t pud)
118{
119 WRITE_ONCE(*pudp, pud);
120}
121
122static inline void native_pud_clear(pud_t *pud)
123{
124 native_set_pud(pudp: pud, pud: native_make_pud(val: 0));
125}
126
127static inline pud_t native_pudp_get_and_clear(pud_t *xp)
128{
129#ifdef CONFIG_SMP
130 return native_make_pud(xchg(&xp->pud, 0));
131#else
132 /* native_local_pudp_get_and_clear,
133 * but duplicated because of cyclic dependency
134 */
135 pud_t ret = *xp;
136
137 native_pud_clear(xp);
138 return ret;
139#endif
140}
141
142static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
143{
144 pgd_t pgd;
145
146 if (pgtable_l5_enabled() || !IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) {
147 WRITE_ONCE(*p4dp, p4d);
148 return;
149 }
150
151 pgd = native_make_pgd(val: native_p4d_val(p4d));
152 pgd = pti_set_user_pgtbl(pgdp: (pgd_t *)p4dp, pgd);
153 WRITE_ONCE(*p4dp, native_make_p4d(native_pgd_val(pgd)));
154}
155
156static inline void native_p4d_clear(p4d_t *p4d)
157{
158 native_set_p4d(p4dp: p4d, p4d: native_make_p4d(val: 0));
159}
160
161static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
162{
163 WRITE_ONCE(*pgdp, pti_set_user_pgtbl(pgdp, pgd));
164}
165
166static inline void native_pgd_clear(pgd_t *pgd)
167{
168 native_set_pgd(pgdp: pgd, pgd: native_make_pgd(val: 0));
169}
170
171/*
172 * Conversion functions: convert a page and protection to a page entry,
173 * and a page entry and page directory to the page they refer to.
174 */
175
176/* PGD - Level 4 access */
177
178/* PUD - Level 3 access */
179
180/* PMD - Level 2 access */
181
182/* PTE - Level 1 access */
183
184/*
185 * Encode and de-code a swap entry
186 *
187 * | ... | 11| 10| 9|8|7|6|5| 4| 3|2| 1|0| <- bit number
188 * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names
189 * | TYPE (59-63) | ~OFFSET (9-58) |0|0|X|X| X| E|F|SD|0| <- swp entry
190 *
191 * G (8) is aliased and used as a PROT_NONE indicator for
192 * !present ptes. We need to start storing swap entries above
193 * there. We also need to avoid using A and D because of an
194 * erratum where they can be incorrectly set by hardware on
195 * non-present PTEs.
196 *
197 * SD Bits 1-4 are not used in non-present format and available for
198 * special use described below:
199 *
200 * SD (1) in swp entry is used to store soft dirty bit, which helps us
201 * remember soft dirty over page migration
202 *
203 * F (2) in swp entry is used to record when a pagetable is
204 * writeprotected by userfaultfd WP support.
205 *
206 * E (3) in swp entry is used to rememeber PG_anon_exclusive.
207 *
208 * Bit 7 in swp entry should be 0 because pmd_present checks not only P,
209 * but also L and G.
210 *
211 * The offset is inverted by a binary not operation to make the high
212 * physical bits set.
213 */
214#define SWP_TYPE_BITS 5
215
216#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
217
218/* We always extract/encode the offset by shifting it all the way up, and then down again */
219#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT+SWP_TYPE_BITS)
220
221#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
222
223/* Extract the high bits for type */
224#define __swp_type(x) ((x).val >> (64 - SWP_TYPE_BITS))
225
226/* Shift up (to get rid of type), then down to get value */
227#define __swp_offset(x) (~(x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)
228
229/*
230 * Shift the offset up "too far" by TYPE bits, then down again
231 * The offset is inverted by a binary not operation to make the high
232 * physical bits set.
233 */
234#define __swp_entry(type, offset) ((swp_entry_t) { \
235 (~(unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
236 | ((unsigned long)(type) << (64-SWP_TYPE_BITS)) })
237
238#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
239#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val((pmd)) })
240#define __swp_entry_to_pte(x) (__pte((x).val))
241#define __swp_entry_to_pmd(x) (__pmd((x).val))
242
243extern void cleanup_highmap(void);
244
245#define HAVE_ARCH_UNMAPPED_AREA
246#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
247
248#define PAGE_AGP PAGE_KERNEL_NOCACHE
249#define HAVE_PAGE_AGP 1
250
251/* fs/proc/kcore.c */
252#define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
253#define kc_offset_to_vaddr(o) ((o) | ~__VIRTUAL_MASK)
254
255#define __HAVE_ARCH_PTE_SAME
256
257#define vmemmap ((struct page *)VMEMMAP_START)
258
259extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
260extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
261
262#define gup_fast_permitted gup_fast_permitted
263static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
264{
265 if (end >> __VIRTUAL_MASK_SHIFT)
266 return false;
267 return true;
268}
269
270#include <asm/pgtable-invert.h>
271
272#endif /* !__ASSEMBLY__ */
273#endif /* _ASM_X86_PGTABLE_64_H */
274

source code of linux/arch/x86/include/asm/pgtable_64.h