1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Memory subsystem initialization for Hexagon |
4 | * |
5 | * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. |
6 | */ |
7 | |
8 | #include <linux/init.h> |
9 | #include <linux/mm.h> |
10 | #include <linux/memblock.h> |
11 | #include <asm/atomic.h> |
12 | #include <linux/highmem.h> |
13 | #include <asm/tlb.h> |
14 | #include <asm/sections.h> |
15 | #include <asm/setup.h> |
16 | #include <asm/vm_mmu.h> |
17 | |
18 | /* |
19 | * Define a startpg just past the end of the kernel image and a lastpg |
20 | * that corresponds to the end of real or simulated platform memory. |
21 | */ |
22 | #define bootmem_startpg (PFN_UP(((unsigned long) _end) - PAGE_OFFSET + PHYS_OFFSET)) |
23 | |
24 | unsigned long bootmem_lastpg; /* Should be set by platform code */ |
25 | unsigned long __phys_offset; /* physical kernel offset >> 12 */ |
26 | |
27 | /* Set as variable to limit PMD copies */ |
28 | int max_kernel_seg = 0x303; |
29 | |
30 | /* indicate pfn's of high memory */ |
31 | unsigned long highstart_pfn, highend_pfn; |
32 | |
33 | /* Default cache attribute for newly created page tables */ |
34 | unsigned long _dflt_cache_att = CACHEDEF; |
35 | |
36 | /* |
37 | * The current "generation" of kernel map, which should not roll |
38 | * over until Hell freezes over. Actual bound in years needs to be |
39 | * calculated to confirm. |
40 | */ |
41 | DEFINE_SPINLOCK(kmap_gen_lock); |
42 | |
43 | /* checkpatch says don't init this to 0. */ |
44 | unsigned long long kmap_generation; |
45 | |
46 | /* |
47 | * mem_init - initializes memory |
48 | * |
49 | * Frees up bootmem |
50 | * Fixes up more stuff for HIGHMEM |
51 | * Calculates and displays memory available/used |
52 | */ |
53 | void __init mem_init(void) |
54 | { |
55 | /* No idea where this is actually declared. Seems to evade LXR. */ |
56 | memblock_free_all(); |
57 | |
58 | /* |
59 | * To-Do: someone somewhere should wipe out the bootmem map |
60 | * after we're done? |
61 | */ |
62 | |
63 | /* |
64 | * This can be moved to some more virtual-memory-specific |
65 | * initialization hook at some point. Set the init_mm |
66 | * descriptors "context" value to point to the initial |
67 | * kernel segment table's physical address. |
68 | */ |
69 | init_mm.context.ptbase = __pa(init_mm.pgd); |
70 | } |
71 | |
72 | void sync_icache_dcache(pte_t pte) |
73 | { |
74 | unsigned long addr; |
75 | struct page *page; |
76 | |
77 | page = pte_page(pte); |
78 | addr = (unsigned long) page_address(page); |
79 | |
80 | __vmcache_idsync(addr, PAGE_SIZE); |
81 | } |
82 | |
83 | /* |
84 | * In order to set up page allocator "nodes", |
85 | * somebody has to call free_area_init() for UMA. |
86 | * |
87 | * In this mode, we only have one pg_data_t |
88 | * structure: contig_mem_data. |
89 | */ |
90 | static void __init paging_init(void) |
91 | { |
92 | unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, }; |
93 | |
94 | /* |
95 | * This is not particularly well documented anywhere, but |
96 | * give ZONE_NORMAL all the memory, including the big holes |
97 | * left by the kernel+bootmem_map which are already left as reserved |
98 | * in the bootmem_map; free_area_init should see those bits and |
99 | * adjust accordingly. |
100 | */ |
101 | |
102 | max_zone_pfn[ZONE_NORMAL] = max_low_pfn; |
103 | |
104 | free_area_init(max_zone_pfn); /* sets up the zonelists and mem_map */ |
105 | |
106 | /* |
107 | * Start of high memory area. Will probably need something more |
108 | * fancy if we... get more fancy. |
109 | */ |
110 | high_memory = (void *)((bootmem_lastpg + 1) << PAGE_SHIFT); |
111 | } |
112 | |
113 | #ifndef DMA_RESERVE |
114 | #define DMA_RESERVE (4) |
115 | #endif |
116 | |
117 | #define DMA_CHUNKSIZE (1<<22) |
118 | #define DMA_RESERVED_BYTES (DMA_RESERVE * DMA_CHUNKSIZE) |
119 | |
120 | /* |
121 | * Pick out the memory size. We look for mem=size, |
122 | * where size is "size[KkMm]" |
123 | */ |
124 | static int __init early_mem(char *p) |
125 | { |
126 | unsigned long size; |
127 | char *endp; |
128 | |
129 | size = memparse(ptr: p, retptr: &endp); |
130 | |
131 | bootmem_lastpg = PFN_DOWN(size); |
132 | |
133 | return 0; |
134 | } |
135 | early_param("mem" , early_mem); |
136 | |
137 | size_t hexagon_coherent_pool_size = (size_t) (DMA_RESERVE << 22); |
138 | |
139 | void __init setup_arch_memory(void) |
140 | { |
141 | /* XXX Todo: this probably should be cleaned up */ |
142 | u32 *segtable = (u32 *) &swapper_pg_dir[0]; |
143 | u32 *segtable_end; |
144 | |
145 | /* |
146 | * Set up boot memory allocator |
147 | * |
148 | * The Gorman book also talks about these functions. |
149 | * This needs to change for highmem setups. |
150 | */ |
151 | |
152 | /* Prior to this, bootmem_lastpg is actually mem size */ |
153 | bootmem_lastpg += ARCH_PFN_OFFSET; |
154 | |
155 | /* Memory size needs to be a multiple of 16M */ |
156 | bootmem_lastpg = PFN_DOWN((bootmem_lastpg << PAGE_SHIFT) & |
157 | ~((BIG_KERNEL_PAGE_SIZE) - 1)); |
158 | |
159 | memblock_add(PHYS_OFFSET, |
160 | (bootmem_lastpg - ARCH_PFN_OFFSET) << PAGE_SHIFT); |
161 | |
162 | /* Reserve kernel text/data/bss */ |
163 | memblock_reserve(PHYS_OFFSET, |
164 | (bootmem_startpg - ARCH_PFN_OFFSET) << PAGE_SHIFT); |
165 | /* |
166 | * Reserve the top DMA_RESERVE bytes of RAM for DMA (uncached) |
167 | * memory allocation |
168 | */ |
169 | max_low_pfn = bootmem_lastpg - PFN_DOWN(DMA_RESERVED_BYTES); |
170 | min_low_pfn = ARCH_PFN_OFFSET; |
171 | memblock_reserve(PFN_PHYS(max_low_pfn), DMA_RESERVED_BYTES); |
172 | |
173 | printk(KERN_INFO "bootmem_startpg: 0x%08lx\n" , bootmem_startpg); |
174 | printk(KERN_INFO "bootmem_lastpg: 0x%08lx\n" , bootmem_lastpg); |
175 | printk(KERN_INFO "min_low_pfn: 0x%08lx\n" , min_low_pfn); |
176 | printk(KERN_INFO "max_low_pfn: 0x%08lx\n" , max_low_pfn); |
177 | |
178 | /* |
179 | * The default VM page tables (will be) populated with |
180 | * VA=PA+PAGE_OFFSET mapping. We go in and invalidate entries |
181 | * higher than what we have memory for. |
182 | */ |
183 | |
184 | /* this is pointer arithmetic; each entry covers 4MB */ |
185 | segtable = segtable + (PAGE_OFFSET >> 22); |
186 | |
187 | /* this actually only goes to the end of the first gig */ |
188 | segtable_end = segtable + (1<<(30-22)); |
189 | |
190 | /* |
191 | * Move forward to the start of empty pages; take into account |
192 | * phys_offset shift. |
193 | */ |
194 | |
195 | segtable += (bootmem_lastpg-ARCH_PFN_OFFSET)>>(22-PAGE_SHIFT); |
196 | { |
197 | int i; |
198 | |
199 | for (i = 1 ; i <= DMA_RESERVE ; i++) |
200 | segtable[-i] = ((segtable[-i] & __HVM_PTE_PGMASK_4MB) |
201 | | __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X |
202 | | __HEXAGON_C_UNC << 6 |
203 | | __HVM_PDE_S_4MB); |
204 | } |
205 | |
206 | printk(KERN_INFO "clearing segtable from %p to %p\n" , segtable, |
207 | segtable_end); |
208 | while (segtable < (segtable_end-8)) |
209 | *(segtable++) = __HVM_PDE_S_INVALID; |
210 | /* stop the pointer at the device I/O 4MB page */ |
211 | |
212 | printk(KERN_INFO "segtable = %p (should be equal to _K_io_map)\n" , |
213 | segtable); |
214 | |
215 | #if 0 |
216 | /* Other half of the early device table from vm_init_segtable. */ |
217 | printk(KERN_INFO "&_K_init_devicetable = 0x%08x\n" , |
218 | (unsigned long) _K_init_devicetable-PAGE_OFFSET); |
219 | *segtable = ((u32) (unsigned long) _K_init_devicetable-PAGE_OFFSET) | |
220 | __HVM_PDE_S_4KB; |
221 | printk(KERN_INFO "*segtable = 0x%08x\n" , *segtable); |
222 | #endif |
223 | |
224 | /* |
225 | * The bootmem allocator seemingly just lives to feed memory |
226 | * to the paging system |
227 | */ |
228 | printk(KERN_INFO "PAGE_SIZE=%lu\n" , PAGE_SIZE); |
229 | paging_init(); /* See Gorman Book, 2.3 */ |
230 | |
231 | /* |
232 | * At this point, the page allocator is kind of initialized, but |
233 | * apparently no pages are available (just like with the bootmem |
234 | * allocator), and need to be freed themselves via mem_init(), |
235 | * which is called by start_kernel() later on in the process |
236 | */ |
237 | } |
238 | |
239 | static const pgprot_t protection_map[16] = { |
240 | [VM_NONE] = __pgprot(_PAGE_PRESENT | _PAGE_USER | |
241 | CACHEDEF), |
242 | [VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER | |
243 | _PAGE_READ | CACHEDEF), |
244 | [VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER | |
245 | CACHEDEF), |
246 | [VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER | |
247 | _PAGE_READ | CACHEDEF), |
248 | [VM_EXEC] = __pgprot(_PAGE_PRESENT | _PAGE_USER | |
249 | _PAGE_EXECUTE | CACHEDEF), |
250 | [VM_EXEC | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER | |
251 | _PAGE_EXECUTE | _PAGE_READ | |
252 | CACHEDEF), |
253 | [VM_EXEC | VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER | |
254 | _PAGE_EXECUTE | CACHEDEF), |
255 | [VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER | |
256 | _PAGE_EXECUTE | _PAGE_READ | |
257 | CACHEDEF), |
258 | [VM_SHARED] = __pgprot(_PAGE_PRESENT | _PAGE_USER | |
259 | CACHEDEF), |
260 | [VM_SHARED | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER | |
261 | _PAGE_READ | CACHEDEF), |
262 | [VM_SHARED | VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER | |
263 | _PAGE_WRITE | CACHEDEF), |
264 | [VM_SHARED | VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER | |
265 | _PAGE_READ | _PAGE_WRITE | |
266 | CACHEDEF), |
267 | [VM_SHARED | VM_EXEC] = __pgprot(_PAGE_PRESENT | _PAGE_USER | |
268 | _PAGE_EXECUTE | CACHEDEF), |
269 | [VM_SHARED | VM_EXEC | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER | |
270 | _PAGE_EXECUTE | _PAGE_READ | |
271 | CACHEDEF), |
272 | [VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER | |
273 | _PAGE_EXECUTE | _PAGE_WRITE | |
274 | CACHEDEF), |
275 | [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER | |
276 | _PAGE_READ | _PAGE_EXECUTE | |
277 | _PAGE_WRITE | CACHEDEF) |
278 | }; |
279 | DECLARE_VM_GET_PAGE_PROT |
280 | |