1 | // SPDX-License-Identifier: GPL-2.0 |
2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. |
3 | |
4 | #include <linux/bug.h> |
5 | #include <linux/module.h> |
6 | #include <linux/init.h> |
7 | #include <linux/signal.h> |
8 | #include <linux/sched.h> |
9 | #include <linux/kernel.h> |
10 | #include <linux/errno.h> |
11 | #include <linux/string.h> |
12 | #include <linux/types.h> |
13 | #include <linux/pagemap.h> |
14 | #include <linux/ptrace.h> |
15 | #include <linux/mman.h> |
16 | #include <linux/mm.h> |
17 | #include <linux/highmem.h> |
18 | #include <linux/memblock.h> |
19 | #include <linux/swap.h> |
20 | #include <linux/proc_fs.h> |
21 | #include <linux/pfn.h> |
22 | #include <linux/initrd.h> |
23 | |
24 | #include <asm/setup.h> |
25 | #include <asm/cachectl.h> |
26 | #include <asm/dma.h> |
27 | #include <asm/pgalloc.h> |
28 | #include <asm/mmu_context.h> |
29 | #include <asm/sections.h> |
30 | #include <asm/tlb.h> |
31 | #include <asm/cacheflush.h> |
32 | |
33 | #define PTRS_KERN_TABLE \ |
34 | ((PTRS_PER_PGD - USER_PTRS_PER_PGD) * PTRS_PER_PTE) |
35 | |
36 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; |
37 | pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; |
38 | pte_t kernel_pte_tables[PTRS_KERN_TABLE] __page_aligned_bss; |
39 | |
40 | EXPORT_SYMBOL(invalid_pte_table); |
41 | unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] |
42 | __page_aligned_bss; |
43 | EXPORT_SYMBOL(empty_zero_page); |
44 | |
45 | #ifdef CONFIG_BLK_DEV_INITRD |
46 | static void __init setup_initrd(void) |
47 | { |
48 | unsigned long size; |
49 | |
50 | if (initrd_start >= initrd_end) { |
51 | pr_err("initrd not found or empty" ); |
52 | goto disable; |
53 | } |
54 | |
55 | if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { |
56 | pr_err("initrd extends beyond end of memory" ); |
57 | goto disable; |
58 | } |
59 | |
60 | size = initrd_end - initrd_start; |
61 | |
62 | if (memblock_is_region_reserved(__pa(initrd_start), size)) { |
63 | pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region" , |
64 | __pa(initrd_start), size); |
65 | goto disable; |
66 | } |
67 | |
68 | memblock_reserve(__pa(initrd_start), size); |
69 | |
70 | pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n" , |
71 | (void *)(initrd_start), size); |
72 | |
73 | initrd_below_start_ok = 1; |
74 | |
75 | return; |
76 | |
77 | disable: |
78 | initrd_start = initrd_end = 0; |
79 | |
80 | pr_err(" - disabling initrd\n" ); |
81 | } |
82 | #endif |
83 | |
84 | void __init mem_init(void) |
85 | { |
86 | #ifdef CONFIG_HIGHMEM |
87 | unsigned long tmp; |
88 | |
89 | set_max_mapnr(highend_pfn - ARCH_PFN_OFFSET); |
90 | #else |
91 | set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET); |
92 | #endif |
93 | high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); |
94 | |
95 | #ifdef CONFIG_BLK_DEV_INITRD |
96 | setup_initrd(); |
97 | #endif |
98 | |
99 | memblock_free_all(); |
100 | |
101 | #ifdef CONFIG_HIGHMEM |
102 | for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { |
103 | struct page *page = pfn_to_page(tmp); |
104 | |
105 | /* FIXME not sure about */ |
106 | if (!memblock_is_reserved(tmp << PAGE_SHIFT)) |
107 | free_highmem_page(page); |
108 | } |
109 | #endif |
110 | } |
111 | |
112 | void free_initmem(void) |
113 | { |
114 | free_initmem_default(poison: -1); |
115 | } |
116 | |
117 | void pgd_init(unsigned long *p) |
118 | { |
119 | int i; |
120 | |
121 | for (i = 0; i < PTRS_PER_PGD; i++) |
122 | p[i] = __pa(invalid_pte_table); |
123 | |
124 | flush_tlb_all(); |
125 | local_icache_inv_all(NULL); |
126 | } |
127 | |
128 | void __init mmu_init(unsigned long min_pfn, unsigned long max_pfn) |
129 | { |
130 | int i; |
131 | |
132 | for (i = 0; i < USER_PTRS_PER_PGD; i++) |
133 | swapper_pg_dir[i].pgd = __pa(invalid_pte_table); |
134 | |
135 | for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) |
136 | swapper_pg_dir[i].pgd = |
137 | __pa(kernel_pte_tables + (PTRS_PER_PTE * (i - USER_PTRS_PER_PGD))); |
138 | |
139 | for (i = 0; i < PTRS_KERN_TABLE; i++) |
140 | set_pte(&kernel_pte_tables[i], __pte(_PAGE_GLOBAL)); |
141 | |
142 | for (i = min_pfn; i < max_pfn; i++) |
143 | set_pte(&kernel_pte_tables[i - PFN_DOWN(va_pa_offset)], pfn_pte(i, PAGE_KERNEL)); |
144 | |
145 | flush_tlb_all(); |
146 | local_icache_inv_all(NULL); |
147 | |
148 | /* Setup page mask to 4k */ |
149 | write_mmu_pagemask(0); |
150 | |
151 | setup_pgd(swapper_pg_dir, 0); |
152 | } |
153 | |
154 | void __init fixrange_init(unsigned long start, unsigned long end, |
155 | pgd_t *pgd_base) |
156 | { |
157 | pgd_t *pgd; |
158 | pud_t *pud; |
159 | pmd_t *pmd; |
160 | pte_t *pte; |
161 | int i, j, k; |
162 | unsigned long vaddr; |
163 | |
164 | vaddr = start; |
165 | i = pgd_index(vaddr); |
166 | j = pud_index(address: vaddr); |
167 | k = pmd_index(address: vaddr); |
168 | pgd = pgd_base + i; |
169 | |
170 | for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { |
171 | pud = (pud_t *)pgd; |
172 | for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { |
173 | pmd = (pmd_t *)pud; |
174 | for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { |
175 | if (pmd_none(pmd: *pmd)) { |
176 | pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); |
177 | if (!pte) |
178 | panic(fmt: "%s: Failed to allocate %lu bytes align=%lx\n" , |
179 | __func__, PAGE_SIZE, |
180 | PAGE_SIZE); |
181 | |
182 | set_pmd(pmdp: pmd, pmd: __pmd(__pa(pte))); |
183 | BUG_ON(pte != pte_offset_kernel(pmd, 0)); |
184 | } |
185 | vaddr += PMD_SIZE; |
186 | } |
187 | k = 0; |
188 | } |
189 | j = 0; |
190 | } |
191 | } |
192 | |
193 | void __init fixaddr_init(void) |
194 | { |
195 | unsigned long vaddr; |
196 | |
197 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; |
198 | fixrange_init(start: vaddr, end: vaddr + PMD_SIZE, swapper_pg_dir); |
199 | } |
200 | |
201 | static const pgprot_t protection_map[16] = { |
202 | [VM_NONE] = PAGE_NONE, |
203 | [VM_READ] = PAGE_READ, |
204 | [VM_WRITE] = PAGE_READ, |
205 | [VM_WRITE | VM_READ] = PAGE_READ, |
206 | [VM_EXEC] = PAGE_READ, |
207 | [VM_EXEC | VM_READ] = PAGE_READ, |
208 | [VM_EXEC | VM_WRITE] = PAGE_READ, |
209 | [VM_EXEC | VM_WRITE | VM_READ] = PAGE_READ, |
210 | [VM_SHARED] = PAGE_NONE, |
211 | [VM_SHARED | VM_READ] = PAGE_READ, |
212 | [VM_SHARED | VM_WRITE] = PAGE_WRITE, |
213 | [VM_SHARED | VM_WRITE | VM_READ] = PAGE_WRITE, |
214 | [VM_SHARED | VM_EXEC] = PAGE_READ, |
215 | [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READ, |
216 | [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_WRITE, |
217 | [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_WRITE |
218 | }; |
219 | DECLARE_VM_GET_PAGE_PROT |
220 | |