1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/module.h> |
3 | #include <linux/kernel.h> |
4 | #include <linux/slab.h> |
5 | #include <linux/mm_types.h> |
6 | #include <linux/pgtable.h> |
7 | |
8 | #include <asm/cputype.h> |
9 | #include <asm/idmap.h> |
10 | #include <asm/hwcap.h> |
11 | #include <asm/pgalloc.h> |
12 | #include <asm/sections.h> |
13 | #include <asm/system_info.h> |
14 | |
15 | /* |
16 | * Note: accesses outside of the kernel image and the identity map area |
17 | * are not supported on any CPU using the idmap tables as its current |
18 | * page tables. |
19 | */ |
20 | pgd_t *idmap_pgd __ro_after_init; |
21 | long long arch_phys_to_idmap_offset __ro_after_init; |
22 | |
23 | #ifdef CONFIG_ARM_LPAE |
24 | static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, |
25 | unsigned long prot) |
26 | { |
27 | pmd_t *pmd; |
28 | unsigned long next; |
29 | |
30 | if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) { |
31 | pmd = pmd_alloc_one(&init_mm, addr); |
32 | if (!pmd) { |
33 | pr_warn("Failed to allocate identity pmd.\n" ); |
34 | return; |
35 | } |
36 | /* |
37 | * Copy the original PMD to ensure that the PMD entries for |
38 | * the kernel image are preserved. |
39 | */ |
40 | if (!pud_none(*pud)) |
41 | memcpy(pmd, pmd_offset(pud, 0), |
42 | PTRS_PER_PMD * sizeof(pmd_t)); |
43 | pud_populate(&init_mm, pud, pmd); |
44 | pmd += pmd_index(addr); |
45 | } else |
46 | pmd = pmd_offset(pud, addr); |
47 | |
48 | do { |
49 | next = pmd_addr_end(addr, end); |
50 | *pmd = __pmd((addr & PMD_MASK) | prot); |
51 | flush_pmd_entry(pmd); |
52 | } while (pmd++, addr = next, addr != end); |
53 | } |
54 | #else /* !CONFIG_ARM_LPAE */ |
55 | static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, |
56 | unsigned long prot) |
57 | { |
58 | pmd_t *pmd = pmd_offset(pud, address: addr); |
59 | |
60 | addr = (addr & PMD_MASK) | prot; |
61 | pmd[0] = __pmd(val: addr); |
62 | addr += SECTION_SIZE; |
63 | pmd[1] = __pmd(val: addr); |
64 | flush_pmd_entry(pmd); |
65 | } |
66 | #endif /* CONFIG_ARM_LPAE */ |
67 | |
68 | static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end, |
69 | unsigned long prot) |
70 | { |
71 | p4d_t *p4d = p4d_offset(pgd, address: addr); |
72 | pud_t *pud = pud_offset(p4d, address: addr); |
73 | unsigned long next; |
74 | |
75 | do { |
76 | next = pud_addr_end(addr, end); |
77 | idmap_add_pmd(pud, addr, end: next, prot); |
78 | } while (pud++, addr = next, addr != end); |
79 | } |
80 | |
81 | static void identity_mapping_add(pgd_t *pgd, const char *text_start, |
82 | const char *text_end, unsigned long prot) |
83 | { |
84 | unsigned long addr, end; |
85 | unsigned long next; |
86 | |
87 | addr = virt_to_idmap(text_start); |
88 | end = virt_to_idmap(text_end); |
89 | pr_info("Setting up static identity map for 0x%lx - 0x%lx\n" , addr, end); |
90 | |
91 | prot |= PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF; |
92 | |
93 | if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale_family()) |
94 | prot |= PMD_BIT4; |
95 | |
96 | pgd += pgd_index(addr); |
97 | do { |
98 | next = pgd_addr_end(addr, end); |
99 | idmap_add_pud(pgd, addr, end: next, prot); |
100 | } while (pgd++, addr = next, addr != end); |
101 | } |
102 | |
103 | extern char __idmap_text_start[], __idmap_text_end[]; |
104 | |
105 | static int __init init_static_idmap(void) |
106 | { |
107 | idmap_pgd = pgd_alloc(&init_mm); |
108 | if (!idmap_pgd) |
109 | return -ENOMEM; |
110 | |
111 | identity_mapping_add(pgd: idmap_pgd, text_start: __idmap_text_start, |
112 | text_end: __idmap_text_end, prot: 0); |
113 | |
114 | /* Flush L1 for the hardware to see this page table content */ |
115 | if (!(elf_hwcap & HWCAP_LPAE)) |
116 | flush_cache_louis(); |
117 | |
118 | return 0; |
119 | } |
120 | early_initcall(init_static_idmap); |
121 | |
122 | /* |
123 | * In order to soft-boot, we need to switch to a 1:1 mapping for the |
124 | * cpu_reset functions. This will then ensure that we have predictable |
125 | * results when turning off the mmu. |
126 | */ |
127 | void setup_mm_for_reboot(void) |
128 | { |
129 | /* Switch to the identity mapping. */ |
130 | cpu_switch_mm(idmap_pgd, &init_mm); |
131 | local_flush_bp_all(); |
132 | |
133 | #ifdef CONFIG_CPU_HAS_ASID |
134 | /* |
135 | * We don't have a clean ASID for the identity mapping, which |
136 | * may clash with virtual addresses of the previous page tables |
137 | * and therefore potentially in the TLB. |
138 | */ |
139 | local_flush_tlb_all(); |
140 | #endif |
141 | } |
142 | |