1 | /* |
2 | * Based on linux/arch/arm/mm/nommu.c |
3 | * |
4 | * ARM PMSAv7 supporting functions. |
5 | */ |
6 | |
7 | #include <linux/bitops.h> |
8 | #include <linux/memblock.h> |
9 | #include <linux/string.h> |
10 | |
11 | #include <asm/cacheflush.h> |
12 | #include <asm/cp15.h> |
13 | #include <asm/cputype.h> |
14 | #include <asm/mpu.h> |
15 | #include <asm/sections.h> |
16 | |
17 | #include "mm.h" |
18 | |
19 | struct region { |
20 | phys_addr_t base; |
21 | phys_addr_t size; |
22 | unsigned long subreg; |
23 | }; |
24 | |
25 | static struct region __initdata mem[MPU_MAX_REGIONS]; |
26 | #ifdef CONFIG_XIP_KERNEL |
27 | static struct region __initdata xip[MPU_MAX_REGIONS]; |
28 | #endif |
29 | |
30 | static unsigned int __initdata mpu_min_region_order; |
31 | static unsigned int __initdata mpu_max_regions; |
32 | |
33 | static int __init __mpu_min_region_order(void); |
34 | static int __init __mpu_max_regions(void); |
35 | |
36 | #ifndef CONFIG_CPU_V7M |
37 | |
38 | #define DRBAR __ACCESS_CP15(c6, 0, c1, 0) |
39 | #define IRBAR __ACCESS_CP15(c6, 0, c1, 1) |
40 | #define DRSR __ACCESS_CP15(c6, 0, c1, 2) |
41 | #define IRSR __ACCESS_CP15(c6, 0, c1, 3) |
42 | #define DRACR __ACCESS_CP15(c6, 0, c1, 4) |
43 | #define IRACR __ACCESS_CP15(c6, 0, c1, 5) |
44 | #define RNGNR __ACCESS_CP15(c6, 0, c2, 0) |
45 | |
46 | /* Region number */ |
47 | static inline void rgnr_write(u32 v) |
48 | { |
49 | write_sysreg(v, RNGNR); |
50 | } |
51 | |
52 | /* Data-side / unified region attributes */ |
53 | |
54 | /* Region access control register */ |
55 | static inline void dracr_write(u32 v) |
56 | { |
57 | write_sysreg(v, DRACR); |
58 | } |
59 | |
60 | /* Region size register */ |
61 | static inline void drsr_write(u32 v) |
62 | { |
63 | write_sysreg(v, DRSR); |
64 | } |
65 | |
66 | /* Region base address register */ |
67 | static inline void drbar_write(u32 v) |
68 | { |
69 | write_sysreg(v, DRBAR); |
70 | } |
71 | |
72 | static inline u32 drbar_read(void) |
73 | { |
74 | return read_sysreg(DRBAR); |
75 | } |
76 | /* Optional instruction-side region attributes */ |
77 | |
78 | /* I-side Region access control register */ |
79 | static inline void iracr_write(u32 v) |
80 | { |
81 | write_sysreg(v, IRACR); |
82 | } |
83 | |
84 | /* I-side Region size register */ |
85 | static inline void irsr_write(u32 v) |
86 | { |
87 | write_sysreg(v, IRSR); |
88 | } |
89 | |
90 | /* I-side Region base address register */ |
91 | static inline void irbar_write(u32 v) |
92 | { |
93 | write_sysreg(v, IRBAR); |
94 | } |
95 | |
96 | static inline u32 irbar_read(void) |
97 | { |
98 | return read_sysreg(IRBAR); |
99 | } |
100 | |
101 | #else |
102 | |
103 | static inline void rgnr_write(u32 v) |
104 | { |
105 | writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv7_RNR); |
106 | } |
107 | |
108 | /* Data-side / unified region attributes */ |
109 | |
110 | /* Region access control register */ |
111 | static inline void dracr_write(u32 v) |
112 | { |
113 | u32 rsr = readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RASR) & GENMASK(15, 0); |
114 | |
115 | writel_relaxed((v << 16) | rsr, BASEADDR_V7M_SCB + PMSAv7_RASR); |
116 | } |
117 | |
118 | /* Region size register */ |
119 | static inline void drsr_write(u32 v) |
120 | { |
121 | u32 racr = readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RASR) & GENMASK(31, 16); |
122 | |
123 | writel_relaxed(v | racr, BASEADDR_V7M_SCB + PMSAv7_RASR); |
124 | } |
125 | |
126 | /* Region base address register */ |
127 | static inline void drbar_write(u32 v) |
128 | { |
129 | writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv7_RBAR); |
130 | } |
131 | |
132 | static inline u32 drbar_read(void) |
133 | { |
134 | return readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RBAR); |
135 | } |
136 | |
137 | /* ARMv7-M only supports a unified MPU, so I-side operations are nop */ |
138 | |
139 | static inline void iracr_write(u32 v) {} |
140 | static inline void irsr_write(u32 v) {} |
141 | static inline void irbar_write(u32 v) {} |
142 | static inline unsigned long irbar_read(void) {return 0;} |
143 | |
144 | #endif |
145 | |
146 | static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct region *region) |
147 | { |
148 | unsigned long subreg, bslots, sslots; |
149 | phys_addr_t abase = base & ~(size - 1); |
150 | phys_addr_t asize = base + size - abase; |
151 | phys_addr_t p2size = 1 << __fls(word: asize); |
152 | phys_addr_t bdiff, sdiff; |
153 | |
154 | if (p2size != asize) |
155 | p2size *= 2; |
156 | |
157 | bdiff = base - abase; |
158 | sdiff = p2size - asize; |
159 | subreg = p2size / PMSAv7_NR_SUBREGS; |
160 | |
161 | if ((bdiff % subreg) || (sdiff % subreg)) |
162 | return false; |
163 | |
164 | bslots = bdiff / subreg; |
165 | sslots = sdiff / subreg; |
166 | |
167 | if (bslots || sslots) { |
168 | int i; |
169 | |
170 | if (subreg < PMSAv7_MIN_SUBREG_SIZE) |
171 | return false; |
172 | |
173 | if (bslots + sslots > PMSAv7_NR_SUBREGS) |
174 | return false; |
175 | |
176 | for (i = 0; i < bslots; i++) |
177 | _set_bit(i, ®ion->subreg); |
178 | |
179 | for (i = 1; i <= sslots; i++) |
180 | _set_bit(PMSAv7_NR_SUBREGS - i, ®ion->subreg); |
181 | } |
182 | |
183 | region->base = abase; |
184 | region->size = p2size; |
185 | |
186 | return true; |
187 | } |
188 | |
189 | static int __init allocate_region(phys_addr_t base, phys_addr_t size, |
190 | unsigned int limit, struct region *regions) |
191 | { |
192 | int count = 0; |
193 | phys_addr_t diff = size; |
194 | int attempts = MPU_MAX_REGIONS; |
195 | |
196 | while (diff) { |
197 | /* Try cover region as is (maybe with help of subregions) */ |
198 | if (try_split_region(base, size, region: ®ions[count])) { |
199 | count++; |
200 | base += size; |
201 | diff -= size; |
202 | size = diff; |
203 | } else { |
204 | /* |
205 | * Maximum aligned region might overflow phys_addr_t |
206 | * if "base" is 0. Hence we keep everything below 4G |
207 | * until we take the smaller of the aligned region |
208 | * size ("asize") and rounded region size ("p2size"), |
209 | * one of which is guaranteed to be smaller than the |
210 | * maximum physical address. |
211 | */ |
212 | phys_addr_t asize = (base - 1) ^ base; |
213 | phys_addr_t p2size = (1 << __fls(word: diff)) - 1; |
214 | |
215 | size = asize < p2size ? asize + 1 : p2size + 1; |
216 | } |
217 | |
218 | if (count > limit) |
219 | break; |
220 | |
221 | if (!attempts) |
222 | break; |
223 | |
224 | attempts--; |
225 | } |
226 | |
227 | return count; |
228 | } |
229 | |
230 | /* MPU initialisation functions */ |
231 | void __init pmsav7_adjust_lowmem_bounds(void) |
232 | { |
233 | phys_addr_t specified_mem_size = 0, total_mem_size = 0; |
234 | phys_addr_t mem_start; |
235 | phys_addr_t mem_end; |
236 | phys_addr_t reg_start, reg_end; |
237 | unsigned int mem_max_regions; |
238 | bool first = true; |
239 | int num; |
240 | u64 i; |
241 | |
242 | /* Free-up PMSAv7_PROBE_REGION */ |
243 | mpu_min_region_order = __mpu_min_region_order(); |
244 | |
245 | /* How many regions are supported */ |
246 | mpu_max_regions = __mpu_max_regions(); |
247 | |
248 | mem_max_regions = min((unsigned int)MPU_MAX_REGIONS, mpu_max_regions); |
249 | |
250 | /* We need to keep one slot for background region */ |
251 | mem_max_regions--; |
252 | |
253 | #ifndef CONFIG_CPU_V7M |
254 | /* ... and one for vectors */ |
255 | mem_max_regions--; |
256 | #endif |
257 | |
258 | #ifdef CONFIG_XIP_KERNEL |
259 | /* plus some regions to cover XIP ROM */ |
260 | num = allocate_region(CONFIG_XIP_PHYS_ADDR, __pa(_exiprom) - CONFIG_XIP_PHYS_ADDR, |
261 | mem_max_regions, xip); |
262 | |
263 | mem_max_regions -= num; |
264 | #endif |
265 | |
266 | for_each_mem_range(i, ®_start, ®_end) { |
267 | if (first) { |
268 | phys_addr_t phys_offset = PHYS_OFFSET; |
269 | |
270 | /* |
271 | * Initially only use memory continuous from |
272 | * PHYS_OFFSET */ |
273 | if (reg_start != phys_offset) |
274 | panic(fmt: "First memory bank must be contiguous from PHYS_OFFSET" ); |
275 | |
276 | mem_start = reg_start; |
277 | mem_end = reg_end; |
278 | specified_mem_size = mem_end - mem_start; |
279 | first = false; |
280 | } else { |
281 | /* |
282 | * memblock auto merges contiguous blocks, remove |
283 | * all blocks afterwards in one go (we can't remove |
284 | * blocks separately while iterating) |
285 | */ |
286 | pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n" , |
287 | &mem_end, ®_start); |
288 | memblock_remove(base: reg_start, size: 0 - reg_start); |
289 | break; |
290 | } |
291 | } |
292 | |
293 | memset(mem, 0, sizeof(mem)); |
294 | num = allocate_region(base: mem_start, size: specified_mem_size, limit: mem_max_regions, regions: mem); |
295 | |
296 | for (i = 0; i < num; i++) { |
297 | unsigned long subreg = mem[i].size / PMSAv7_NR_SUBREGS; |
298 | |
299 | total_mem_size += mem[i].size - subreg * hweight_long(w: mem[i].subreg); |
300 | |
301 | pr_debug("MPU: base %pa size %pa disable subregions: %*pbl\n" , |
302 | &mem[i].base, &mem[i].size, PMSAv7_NR_SUBREGS, &mem[i].subreg); |
303 | } |
304 | |
305 | if (total_mem_size != specified_mem_size) { |
306 | pr_warn("Truncating memory from %pa to %pa (MPU region constraints)" , |
307 | &specified_mem_size, &total_mem_size); |
308 | memblock_remove(base: mem_start + total_mem_size, |
309 | size: specified_mem_size - total_mem_size); |
310 | } |
311 | } |
312 | |
313 | static int __init __mpu_max_regions(void) |
314 | { |
315 | /* |
316 | * We don't support a different number of I/D side regions so if we |
317 | * have separate instruction and data memory maps then return |
318 | * whichever side has a smaller number of supported regions. |
319 | */ |
320 | u32 dregions, iregions, mpuir; |
321 | |
322 | mpuir = read_cpuid_mputype(); |
323 | |
324 | dregions = iregions = (mpuir & MPUIR_DREGION_SZMASK) >> MPUIR_DREGION; |
325 | |
326 | /* Check for separate d-side and i-side memory maps */ |
327 | if (mpuir & MPUIR_nU) |
328 | iregions = (mpuir & MPUIR_IREGION_SZMASK) >> MPUIR_IREGION; |
329 | |
330 | /* Use the smallest of the two maxima */ |
331 | return min(dregions, iregions); |
332 | } |
333 | |
334 | static int __init mpu_iside_independent(void) |
335 | { |
336 | /* MPUIR.nU specifies whether there is *not* a unified memory map */ |
337 | return read_cpuid_mputype() & MPUIR_nU; |
338 | } |
339 | |
340 | static int __init __mpu_min_region_order(void) |
341 | { |
342 | u32 drbar_result, irbar_result; |
343 | |
344 | /* We've kept a region free for this probing */ |
345 | rgnr_write(v: PMSAv7_PROBE_REGION); |
346 | isb(); |
347 | /* |
348 | * As per ARM ARM, write 0xFFFFFFFC to DRBAR to find the minimum |
349 | * region order |
350 | */ |
351 | drbar_write(v: 0xFFFFFFFC); |
352 | drbar_result = irbar_result = drbar_read(); |
353 | drbar_write(v: 0x0); |
354 | /* If the MPU is non-unified, we use the larger of the two minima*/ |
355 | if (mpu_iside_independent()) { |
356 | irbar_write(v: 0xFFFFFFFC); |
357 | irbar_result = irbar_read(); |
358 | irbar_write(v: 0x0); |
359 | } |
360 | isb(); /* Ensure that MPU region operations have completed */ |
361 | /* Return whichever result is larger */ |
362 | |
363 | return __ffs(max(drbar_result, irbar_result)); |
364 | } |
365 | |
366 | static int __init mpu_setup_region(unsigned int number, phys_addr_t start, |
367 | unsigned int size_order, unsigned int properties, |
368 | unsigned int subregions, bool need_flush) |
369 | { |
370 | u32 size_data; |
371 | |
372 | /* We kept a region free for probing resolution of MPU regions*/ |
373 | if (number > mpu_max_regions |
374 | || number >= MPU_MAX_REGIONS) |
375 | return -ENOENT; |
376 | |
377 | if (size_order > 32) |
378 | return -ENOMEM; |
379 | |
380 | if (size_order < mpu_min_region_order) |
381 | return -ENOMEM; |
382 | |
383 | /* Writing N to bits 5:1 (RSR_SZ) specifies region size 2^N+1 */ |
384 | size_data = ((size_order - 1) << PMSAv7_RSR_SZ) | 1 << PMSAv7_RSR_EN; |
385 | size_data |= subregions << PMSAv7_RSR_SD; |
386 | |
387 | if (need_flush) |
388 | flush_cache_all(); |
389 | |
390 | dsb(); /* Ensure all previous data accesses occur with old mappings */ |
391 | rgnr_write(v: number); |
392 | isb(); |
393 | drbar_write(v: start); |
394 | dracr_write(v: properties); |
395 | isb(); /* Propagate properties before enabling region */ |
396 | drsr_write(v: size_data); |
397 | |
398 | /* Check for independent I-side registers */ |
399 | if (mpu_iside_independent()) { |
400 | irbar_write(v: start); |
401 | iracr_write(v: properties); |
402 | isb(); |
403 | irsr_write(v: size_data); |
404 | } |
405 | isb(); |
406 | |
407 | /* Store region info (we treat i/d side the same, so only store d) */ |
408 | mpu_rgn_info.rgns[number].dracr = properties; |
409 | mpu_rgn_info.rgns[number].drbar = start; |
410 | mpu_rgn_info.rgns[number].drsr = size_data; |
411 | |
412 | mpu_rgn_info.used++; |
413 | |
414 | return 0; |
415 | } |
416 | |
417 | /* |
418 | * Set up default MPU regions, doing nothing if there is no MPU |
419 | */ |
420 | void __init pmsav7_setup(void) |
421 | { |
422 | int i, region = 0, err = 0; |
423 | |
424 | /* Setup MPU (order is important) */ |
425 | |
426 | /* Background */ |
427 | err |= mpu_setup_region(region++, 0, 32, |
428 | PMSAv7_ACR_XN | PMSAv7_RGN_STRONGLY_ORDERED | PMSAv7_AP_PL1RW_PL0RW, |
429 | 0, false); |
430 | |
431 | #ifdef CONFIG_XIP_KERNEL |
432 | /* ROM */ |
433 | for (i = 0; i < ARRAY_SIZE(xip); i++) { |
434 | /* |
435 | * In case we overwrite RAM region we set earlier in |
436 | * head-nommu.S (which is cachable) all subsequent |
437 | * data access till we setup RAM bellow would be done |
438 | * with BG region (which is uncachable), thus we need |
439 | * to clean and invalidate cache. |
440 | */ |
441 | bool need_flush = region == PMSAv7_RAM_REGION; |
442 | |
443 | if (!xip[i].size) |
444 | continue; |
445 | |
446 | err |= mpu_setup_region(region++, xip[i].base, ilog2(xip[i].size), |
447 | PMSAv7_AP_PL1RO_PL0NA | PMSAv7_RGN_NORMAL, |
448 | xip[i].subreg, need_flush); |
449 | } |
450 | #endif |
451 | |
452 | /* RAM */ |
453 | for (i = 0; i < ARRAY_SIZE(mem); i++) { |
454 | if (!mem[i].size) |
455 | continue; |
456 | |
457 | err |= mpu_setup_region(region++, mem[i].base, ilog2(mem[i].size), |
458 | PMSAv7_AP_PL1RW_PL0RW | PMSAv7_RGN_NORMAL, |
459 | mem[i].subreg, false); |
460 | } |
461 | |
462 | /* Vectors */ |
463 | #ifndef CONFIG_CPU_V7M |
464 | err |= mpu_setup_region(region++, vectors_base, ilog2(2 * PAGE_SIZE), |
465 | PMSAv7_AP_PL1RW_PL0NA | PMSAv7_RGN_NORMAL, |
466 | 0, false); |
467 | #endif |
468 | if (err) { |
469 | panic(fmt: "MPU region initialization failure! %d" , err); |
470 | } else { |
471 | pr_info("Using ARMv7 PMSA Compliant MPU. " |
472 | "Region independence: %s, Used %d of %d regions\n" , |
473 | mpu_iside_independent() ? "Yes" : "No" , |
474 | mpu_rgn_info.used, mpu_max_regions); |
475 | } |
476 | } |
477 | |