1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * This file contains the routines for TLB flushing. |
4 | * On machines where the MMU does not use a hash table to store virtual to |
5 | * physical translations (ie, SW loaded TLBs or Book3E compilant processors, |
6 | * this does -not- include 603 however which shares the implementation with |
7 | * hash based processors) |
8 | * |
9 | * -- BenH |
10 | * |
11 | * Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org> |
12 | * IBM Corp. |
13 | * |
14 | * Derived from arch/ppc/mm/init.c: |
15 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
16 | * |
17 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) |
18 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) |
19 | * Copyright (C) 1996 Paul Mackerras |
20 | * |
21 | * Derived from "arch/i386/mm/init.c" |
22 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
23 | */ |
24 | |
25 | #include <linux/kernel.h> |
26 | #include <linux/export.h> |
27 | #include <linux/mm.h> |
28 | #include <linux/init.h> |
29 | #include <linux/highmem.h> |
30 | #include <linux/pagemap.h> |
31 | #include <linux/preempt.h> |
32 | #include <linux/spinlock.h> |
33 | #include <linux/memblock.h> |
34 | #include <linux/of_fdt.h> |
35 | #include <linux/hugetlb.h> |
36 | |
37 | #include <asm/pgalloc.h> |
38 | #include <asm/tlbflush.h> |
39 | #include <asm/tlb.h> |
40 | #include <asm/code-patching.h> |
41 | #include <asm/cputhreads.h> |
42 | #include <asm/hugetlb.h> |
43 | #include <asm/paca.h> |
44 | |
45 | #include <mm/mmu_decl.h> |
46 | |
47 | /* |
48 | * This struct lists the sw-supported page sizes. The hardawre MMU may support |
49 | * other sizes not listed here. The .ind field is only used on MMUs that have |
50 | * indirect page table entries. |
51 | */ |
52 | #ifdef CONFIG_PPC_E500 |
53 | struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { |
54 | [MMU_PAGE_4K] = { |
55 | .shift = 12, |
56 | .enc = BOOK3E_PAGESZ_4K, |
57 | }, |
58 | [MMU_PAGE_2M] = { |
59 | .shift = 21, |
60 | .enc = BOOK3E_PAGESZ_2M, |
61 | }, |
62 | [MMU_PAGE_4M] = { |
63 | .shift = 22, |
64 | .enc = BOOK3E_PAGESZ_4M, |
65 | }, |
66 | [MMU_PAGE_16M] = { |
67 | .shift = 24, |
68 | .enc = BOOK3E_PAGESZ_16M, |
69 | }, |
70 | [MMU_PAGE_64M] = { |
71 | .shift = 26, |
72 | .enc = BOOK3E_PAGESZ_64M, |
73 | }, |
74 | [MMU_PAGE_256M] = { |
75 | .shift = 28, |
76 | .enc = BOOK3E_PAGESZ_256M, |
77 | }, |
78 | [MMU_PAGE_1G] = { |
79 | .shift = 30, |
80 | .enc = BOOK3E_PAGESZ_1GB, |
81 | }, |
82 | }; |
83 | |
84 | static inline int mmu_get_tsize(int psize) |
85 | { |
86 | return mmu_psize_defs[psize].enc; |
87 | } |
88 | #else |
89 | static inline int mmu_get_tsize(int psize) |
90 | { |
91 | /* This isn't used on !Book3E for now */ |
92 | return 0; |
93 | } |
94 | #endif |
95 | |
96 | #ifdef CONFIG_PPC_8xx |
97 | struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { |
98 | [MMU_PAGE_4K] = { |
99 | .shift = 12, |
100 | }, |
101 | [MMU_PAGE_16K] = { |
102 | .shift = 14, |
103 | }, |
104 | [MMU_PAGE_512K] = { |
105 | .shift = 19, |
106 | }, |
107 | [MMU_PAGE_8M] = { |
108 | .shift = 23, |
109 | }, |
110 | }; |
111 | #endif |
112 | |
113 | /* The variables below are currently only used on 64-bit Book3E |
114 | * though this will probably be made common with other nohash |
115 | * implementations at some point |
116 | */ |
117 | #ifdef CONFIG_PPC64 |
118 | |
119 | int mmu_pte_psize; /* Page size used for PTE pages */ |
120 | int mmu_vmemmap_psize; /* Page size used for the virtual mem map */ |
121 | int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */ |
122 | unsigned long linear_map_top; /* Top of linear mapping */ |
123 | |
124 | |
125 | /* |
126 | * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug |
127 | * exceptions. This is used for bolted and e6500 TLB miss handlers which |
128 | * do not modify this SPRG in the TLB miss code; for other TLB miss handlers, |
129 | * this is set to zero. |
130 | */ |
131 | int extlb_level_exc; |
132 | |
133 | #endif /* CONFIG_PPC64 */ |
134 | |
135 | #ifdef CONFIG_PPC_E500 |
136 | /* next_tlbcam_idx is used to round-robin tlbcam entry assignment */ |
137 | DEFINE_PER_CPU(int, next_tlbcam_idx); |
138 | EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx); |
139 | #endif |
140 | |
141 | /* |
142 | * Base TLB flushing operations: |
143 | * |
144 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's |
145 | * - flush_tlb_page(vma, vmaddr) flushes one page |
146 | * - flush_tlb_range(vma, start, end) flushes a range of pages |
147 | * - flush_tlb_kernel_range(start, end) flushes kernel pages |
148 | * |
149 | * - local_* variants of page and mm only apply to the current |
150 | * processor |
151 | */ |
152 | |
153 | #ifndef CONFIG_PPC_8xx |
154 | /* |
155 | * These are the base non-SMP variants of page and mm flushing |
156 | */ |
157 | void local_flush_tlb_mm(struct mm_struct *mm) |
158 | { |
159 | unsigned int pid; |
160 | |
161 | preempt_disable(); |
162 | pid = mm->context.id; |
163 | if (pid != MMU_NO_CONTEXT) |
164 | _tlbil_pid(pid); |
165 | preempt_enable(); |
166 | } |
167 | EXPORT_SYMBOL(local_flush_tlb_mm); |
168 | |
169 | void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, |
170 | int tsize, int ind) |
171 | { |
172 | unsigned int pid; |
173 | |
174 | preempt_disable(); |
175 | pid = mm ? mm->context.id : 0; |
176 | if (pid != MMU_NO_CONTEXT) |
177 | _tlbil_va(vmaddr, pid, tsize, ind); |
178 | preempt_enable(); |
179 | } |
180 | |
181 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) |
182 | { |
183 | __local_flush_tlb_page(mm: vma ? vma->vm_mm : NULL, vmaddr, |
184 | tsize: mmu_get_tsize(psize: mmu_virtual_psize), ind: 0); |
185 | } |
186 | EXPORT_SYMBOL(local_flush_tlb_page); |
187 | |
188 | void local_flush_tlb_page_psize(struct mm_struct *mm, |
189 | unsigned long vmaddr, int psize) |
190 | { |
191 | __local_flush_tlb_page(mm, vmaddr, tsize: mmu_get_tsize(psize), ind: 0); |
192 | } |
193 | EXPORT_SYMBOL(local_flush_tlb_page_psize); |
194 | |
195 | #endif |
196 | |
197 | /* |
198 | * And here are the SMP non-local implementations |
199 | */ |
200 | #ifdef CONFIG_SMP |
201 | |
202 | static DEFINE_RAW_SPINLOCK(tlbivax_lock); |
203 | |
204 | struct tlb_flush_param { |
205 | unsigned long addr; |
206 | unsigned int pid; |
207 | unsigned int tsize; |
208 | unsigned int ind; |
209 | }; |
210 | |
211 | static void do_flush_tlb_mm_ipi(void *param) |
212 | { |
213 | struct tlb_flush_param *p = param; |
214 | |
215 | _tlbil_pid(p ? p->pid : 0); |
216 | } |
217 | |
218 | static void do_flush_tlb_page_ipi(void *param) |
219 | { |
220 | struct tlb_flush_param *p = param; |
221 | |
222 | _tlbil_va(p->addr, p->pid, p->tsize, p->ind); |
223 | } |
224 | |
225 | |
226 | /* Note on invalidations and PID: |
227 | * |
228 | * We snapshot the PID with preempt disabled. At this point, it can still |
229 | * change either because: |
230 | * - our context is being stolen (PID -> NO_CONTEXT) on another CPU |
231 | * - we are invaliating some target that isn't currently running here |
232 | * and is concurrently acquiring a new PID on another CPU |
233 | * - some other CPU is re-acquiring a lost PID for this mm |
234 | * etc... |
235 | * |
236 | * However, this shouldn't be a problem as we only guarantee |
237 | * invalidation of TLB entries present prior to this call, so we |
238 | * don't care about the PID changing, and invalidating a stale PID |
239 | * is generally harmless. |
240 | */ |
241 | |
242 | void flush_tlb_mm(struct mm_struct *mm) |
243 | { |
244 | unsigned int pid; |
245 | |
246 | preempt_disable(); |
247 | pid = mm->context.id; |
248 | if (unlikely(pid == MMU_NO_CONTEXT)) |
249 | goto no_context; |
250 | if (!mm_is_core_local(mm)) { |
251 | struct tlb_flush_param p = { .pid = pid }; |
252 | /* Ignores smp_processor_id() even if set. */ |
253 | smp_call_function_many(mask: mm_cpumask(mm), |
254 | func: do_flush_tlb_mm_ipi, info: &p, wait: 1); |
255 | } |
256 | _tlbil_pid(pid); |
257 | no_context: |
258 | preempt_enable(); |
259 | } |
260 | EXPORT_SYMBOL(flush_tlb_mm); |
261 | |
262 | void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, |
263 | int tsize, int ind) |
264 | { |
265 | struct cpumask *cpu_mask; |
266 | unsigned int pid; |
267 | |
268 | /* |
269 | * This function as well as __local_flush_tlb_page() must only be called |
270 | * for user contexts. |
271 | */ |
272 | if (WARN_ON(!mm)) |
273 | return; |
274 | |
275 | preempt_disable(); |
276 | pid = mm->context.id; |
277 | if (unlikely(pid == MMU_NO_CONTEXT)) |
278 | goto bail; |
279 | cpu_mask = mm_cpumask(mm); |
280 | if (!mm_is_core_local(mm)) { |
281 | /* If broadcast tlbivax is supported, use it */ |
282 | if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) { |
283 | int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL); |
284 | if (lock) |
285 | raw_spin_lock(&tlbivax_lock); |
286 | _tlbivax_bcast(vmaddr, pid, tsize, ind); |
287 | if (lock) |
288 | raw_spin_unlock(&tlbivax_lock); |
289 | goto bail; |
290 | } else { |
291 | struct tlb_flush_param p = { |
292 | .pid = pid, |
293 | .addr = vmaddr, |
294 | .tsize = tsize, |
295 | .ind = ind, |
296 | }; |
297 | /* Ignores smp_processor_id() even if set in cpu_mask */ |
298 | smp_call_function_many(mask: cpu_mask, |
299 | func: do_flush_tlb_page_ipi, info: &p, wait: 1); |
300 | } |
301 | } |
302 | _tlbil_va(vmaddr, pid, tsize, ind); |
303 | bail: |
304 | preempt_enable(); |
305 | } |
306 | |
307 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) |
308 | { |
309 | #ifdef CONFIG_HUGETLB_PAGE |
310 | if (vma && is_vm_hugetlb_page(vma)) |
311 | flush_hugetlb_page(vma, vmaddr); |
312 | #endif |
313 | |
314 | __flush_tlb_page(mm: vma ? vma->vm_mm : NULL, vmaddr, |
315 | tsize: mmu_get_tsize(psize: mmu_virtual_psize), ind: 0); |
316 | } |
317 | EXPORT_SYMBOL(flush_tlb_page); |
318 | |
319 | #endif /* CONFIG_SMP */ |
320 | |
321 | /* |
322 | * Flush kernel TLB entries in the given range |
323 | */ |
324 | #ifndef CONFIG_PPC_8xx |
325 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) |
326 | { |
327 | #ifdef CONFIG_SMP |
328 | preempt_disable(); |
329 | smp_call_function(func: do_flush_tlb_mm_ipi, NULL, wait: 1); |
330 | _tlbil_pid(0); |
331 | preempt_enable(); |
332 | #else |
333 | _tlbil_pid(0); |
334 | #endif |
335 | } |
336 | EXPORT_SYMBOL(flush_tlb_kernel_range); |
337 | #endif |
338 | |
339 | /* |
340 | * Currently, for range flushing, we just do a full mm flush. This should |
341 | * be optimized based on a threshold on the size of the range, since |
342 | * some implementation can stack multiple tlbivax before a tlbsync but |
343 | * for now, we keep it that way |
344 | */ |
345 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, |
346 | unsigned long end) |
347 | |
348 | { |
349 | if (end - start == PAGE_SIZE && !(start & ~PAGE_MASK)) |
350 | flush_tlb_page(vma, start); |
351 | else |
352 | flush_tlb_mm(vma->vm_mm); |
353 | } |
354 | EXPORT_SYMBOL(flush_tlb_range); |
355 | |
356 | void tlb_flush(struct mmu_gather *tlb) |
357 | { |
358 | flush_tlb_mm(tlb->mm); |
359 | } |
360 | |
361 | /* |
362 | * Below are functions specific to the 64-bit variant of Book3E though that |
363 | * may change in the future |
364 | */ |
365 | |
366 | #ifdef CONFIG_PPC64 |
367 | |
368 | /* |
369 | * Handling of virtual linear page tables or indirect TLB entries |
370 | * flushing when PTE pages are freed |
371 | */ |
372 | void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address) |
373 | { |
374 | int tsize = mmu_psize_defs[mmu_pte_psize].enc; |
375 | |
376 | if (book3e_htw_mode != PPC_HTW_NONE) { |
377 | unsigned long start = address & PMD_MASK; |
378 | unsigned long end = address + PMD_SIZE; |
379 | unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift; |
380 | |
381 | /* This isn't the most optimal, ideally we would factor out the |
382 | * while preempt & CPU mask mucking around, or even the IPI but |
383 | * it will do for now |
384 | */ |
385 | while (start < end) { |
386 | __flush_tlb_page(tlb->mm, start, tsize, 1); |
387 | start += size; |
388 | } |
389 | } else { |
390 | unsigned long rmask = 0xf000000000000000ul; |
391 | unsigned long rid = (address & rmask) | 0x1000000000000000ul; |
392 | unsigned long vpte = address & ~rmask; |
393 | |
394 | vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful; |
395 | vpte |= rid; |
396 | __flush_tlb_page(tlb->mm, vpte, tsize, 0); |
397 | } |
398 | } |
399 | |
400 | static void __init setup_page_sizes(void) |
401 | { |
402 | unsigned int tlb0cfg; |
403 | unsigned int tlb0ps; |
404 | unsigned int eptcfg; |
405 | int i, psize; |
406 | |
407 | #ifdef CONFIG_PPC_E500 |
408 | unsigned int mmucfg = mfspr(SPRN_MMUCFG); |
409 | int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E); |
410 | |
411 | if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) { |
412 | unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG); |
413 | unsigned int min_pg, max_pg; |
414 | |
415 | min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT; |
416 | max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT; |
417 | |
418 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { |
419 | struct mmu_psize_def *def; |
420 | unsigned int shift; |
421 | |
422 | def = &mmu_psize_defs[psize]; |
423 | shift = def->shift; |
424 | |
425 | if (shift == 0 || shift & 1) |
426 | continue; |
427 | |
428 | /* adjust to be in terms of 4^shift Kb */ |
429 | shift = (shift - 10) >> 1; |
430 | |
431 | if ((shift >= min_pg) && (shift <= max_pg)) |
432 | def->flags |= MMU_PAGE_SIZE_DIRECT; |
433 | } |
434 | |
435 | goto out; |
436 | } |
437 | |
438 | if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) { |
439 | u32 tlb1cfg, tlb1ps; |
440 | |
441 | tlb0cfg = mfspr(SPRN_TLB0CFG); |
442 | tlb1cfg = mfspr(SPRN_TLB1CFG); |
443 | tlb1ps = mfspr(SPRN_TLB1PS); |
444 | eptcfg = mfspr(SPRN_EPTCFG); |
445 | |
446 | if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT)) |
447 | book3e_htw_mode = PPC_HTW_E6500; |
448 | |
449 | /* |
450 | * We expect 4K subpage size and unrestricted indirect size. |
451 | * The lack of a restriction on indirect size is a Freescale |
452 | * extension, indicated by PSn = 0 but SPSn != 0. |
453 | */ |
454 | if (eptcfg != 2) |
455 | book3e_htw_mode = PPC_HTW_NONE; |
456 | |
457 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { |
458 | struct mmu_psize_def *def = &mmu_psize_defs[psize]; |
459 | |
460 | if (!def->shift) |
461 | continue; |
462 | |
463 | if (tlb1ps & (1U << (def->shift - 10))) { |
464 | def->flags |= MMU_PAGE_SIZE_DIRECT; |
465 | |
466 | if (book3e_htw_mode && psize == MMU_PAGE_2M) |
467 | def->flags |= MMU_PAGE_SIZE_INDIRECT; |
468 | } |
469 | } |
470 | |
471 | goto out; |
472 | } |
473 | #endif |
474 | |
475 | tlb0cfg = mfspr(SPRN_TLB0CFG); |
476 | tlb0ps = mfspr(SPRN_TLB0PS); |
477 | eptcfg = mfspr(SPRN_EPTCFG); |
478 | |
479 | /* Look for supported direct sizes */ |
480 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { |
481 | struct mmu_psize_def *def = &mmu_psize_defs[psize]; |
482 | |
483 | if (tlb0ps & (1U << (def->shift - 10))) |
484 | def->flags |= MMU_PAGE_SIZE_DIRECT; |
485 | } |
486 | |
487 | /* Indirect page sizes supported ? */ |
488 | if ((tlb0cfg & TLBnCFG_IND) == 0 || |
489 | (tlb0cfg & TLBnCFG_PT) == 0) |
490 | goto out; |
491 | |
492 | book3e_htw_mode = PPC_HTW_IBM; |
493 | |
494 | /* Now, we only deal with one IND page size for each |
495 | * direct size. Hopefully all implementations today are |
496 | * unambiguous, but we might want to be careful in the |
497 | * future. |
498 | */ |
499 | for (i = 0; i < 3; i++) { |
500 | unsigned int ps, sps; |
501 | |
502 | sps = eptcfg & 0x1f; |
503 | eptcfg >>= 5; |
504 | ps = eptcfg & 0x1f; |
505 | eptcfg >>= 5; |
506 | if (!ps || !sps) |
507 | continue; |
508 | for (psize = 0; psize < MMU_PAGE_COUNT; psize++) { |
509 | struct mmu_psize_def *def = &mmu_psize_defs[psize]; |
510 | |
511 | if (ps == (def->shift - 10)) |
512 | def->flags |= MMU_PAGE_SIZE_INDIRECT; |
513 | if (sps == (def->shift - 10)) |
514 | def->ind = ps + 10; |
515 | } |
516 | } |
517 | |
518 | out: |
519 | /* Cleanup array and print summary */ |
520 | pr_info("MMU: Supported page sizes\n" ); |
521 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { |
522 | struct mmu_psize_def *def = &mmu_psize_defs[psize]; |
523 | const char *__page_type_names[] = { |
524 | "unsupported" , |
525 | "direct" , |
526 | "indirect" , |
527 | "direct & indirect" |
528 | }; |
529 | if (def->flags == 0) { |
530 | def->shift = 0; |
531 | continue; |
532 | } |
533 | pr_info(" %8ld KB as %s\n" , 1ul << (def->shift - 10), |
534 | __page_type_names[def->flags & 0x3]); |
535 | } |
536 | } |
537 | |
538 | static void __init setup_mmu_htw(void) |
539 | { |
540 | /* |
541 | * If we want to use HW tablewalk, enable it by patching the TLB miss |
542 | * handlers to branch to the one dedicated to it. |
543 | */ |
544 | |
545 | switch (book3e_htw_mode) { |
546 | case PPC_HTW_IBM: |
547 | patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e); |
548 | patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e); |
549 | break; |
550 | #ifdef CONFIG_PPC_E500 |
551 | case PPC_HTW_E6500: |
552 | extlb_level_exc = EX_TLB_SIZE; |
553 | patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e); |
554 | patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e); |
555 | break; |
556 | #endif |
557 | } |
558 | pr_info("MMU: Book3E HW tablewalk %s\n" , |
559 | book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported" ); |
560 | } |
561 | |
562 | /* |
563 | * Early initialization of the MMU TLB code |
564 | */ |
565 | static void early_init_this_mmu(void) |
566 | { |
567 | unsigned int mas4; |
568 | |
569 | /* Set MAS4 based on page table setting */ |
570 | |
571 | mas4 = 0x4 << MAS4_WIMGED_SHIFT; |
572 | switch (book3e_htw_mode) { |
573 | case PPC_HTW_E6500: |
574 | mas4 |= MAS4_INDD; |
575 | mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT; |
576 | mas4 |= MAS4_TLBSELD(1); |
577 | mmu_pte_psize = MMU_PAGE_2M; |
578 | break; |
579 | |
580 | case PPC_HTW_IBM: |
581 | mas4 |= MAS4_INDD; |
582 | mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT; |
583 | mmu_pte_psize = MMU_PAGE_1M; |
584 | break; |
585 | |
586 | case PPC_HTW_NONE: |
587 | mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT; |
588 | mmu_pte_psize = mmu_virtual_psize; |
589 | break; |
590 | } |
591 | mtspr(SPRN_MAS4, mas4); |
592 | |
593 | #ifdef CONFIG_PPC_E500 |
594 | if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { |
595 | unsigned int num_cams; |
596 | bool map = true; |
597 | |
598 | /* use a quarter of the TLBCAM for bolted linear map */ |
599 | num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4; |
600 | |
601 | /* |
602 | * Only do the mapping once per core, or else the |
603 | * transient mapping would cause problems. |
604 | */ |
605 | #ifdef CONFIG_SMP |
606 | if (hweight32(get_tensr()) > 1) |
607 | map = false; |
608 | #endif |
609 | |
610 | if (map) |
611 | linear_map_top = map_mem_in_cams(linear_map_top, |
612 | num_cams, false, true); |
613 | } |
614 | #endif |
615 | |
616 | /* A sync won't hurt us after mucking around with |
617 | * the MMU configuration |
618 | */ |
619 | mb(); |
620 | } |
621 | |
622 | static void __init early_init_mmu_global(void) |
623 | { |
624 | /* XXX This should be decided at runtime based on supported |
625 | * page sizes in the TLB, but for now let's assume 16M is |
626 | * always there and a good fit (which it probably is) |
627 | * |
628 | * Freescale booke only supports 4K pages in TLB0, so use that. |
629 | */ |
630 | if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) |
631 | mmu_vmemmap_psize = MMU_PAGE_4K; |
632 | else |
633 | mmu_vmemmap_psize = MMU_PAGE_16M; |
634 | |
635 | /* XXX This code only checks for TLB 0 capabilities and doesn't |
636 | * check what page size combos are supported by the HW. It |
637 | * also doesn't handle the case where a separate array holds |
638 | * the IND entries from the array loaded by the PT. |
639 | */ |
640 | /* Look for supported page sizes */ |
641 | setup_page_sizes(); |
642 | |
643 | /* Look for HW tablewalk support */ |
644 | setup_mmu_htw(); |
645 | |
646 | #ifdef CONFIG_PPC_E500 |
647 | if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { |
648 | if (book3e_htw_mode == PPC_HTW_NONE) { |
649 | extlb_level_exc = EX_TLB_SIZE; |
650 | patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e); |
651 | patch_exception(0x1e0, |
652 | exc_instruction_tlb_miss_bolted_book3e); |
653 | } |
654 | } |
655 | #endif |
656 | |
657 | /* Set the global containing the top of the linear mapping |
658 | * for use by the TLB miss code |
659 | */ |
660 | linear_map_top = memblock_end_of_DRAM(); |
661 | |
662 | ioremap_bot = IOREMAP_BASE; |
663 | } |
664 | |
665 | static void __init early_mmu_set_memory_limit(void) |
666 | { |
667 | #ifdef CONFIG_PPC_E500 |
668 | if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { |
669 | /* |
670 | * Limit memory so we dont have linear faults. |
671 | * Unlike memblock_set_current_limit, which limits |
672 | * memory available during early boot, this permanently |
673 | * reduces the memory available to Linux. We need to |
674 | * do this because highmem is not supported on 64-bit. |
675 | */ |
676 | memblock_enforce_memory_limit(linear_map_top); |
677 | } |
678 | #endif |
679 | |
680 | memblock_set_current_limit(linear_map_top); |
681 | } |
682 | |
683 | /* boot cpu only */ |
684 | void __init early_init_mmu(void) |
685 | { |
686 | early_init_mmu_global(); |
687 | early_init_this_mmu(); |
688 | early_mmu_set_memory_limit(); |
689 | } |
690 | |
691 | void early_init_mmu_secondary(void) |
692 | { |
693 | early_init_this_mmu(); |
694 | } |
695 | |
696 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, |
697 | phys_addr_t first_memblock_size) |
698 | { |
699 | /* On non-FSL Embedded 64-bit, we adjust the RMA size to match |
700 | * the bolted TLB entry. We know for now that only 1G |
701 | * entries are supported though that may eventually |
702 | * change. |
703 | * |
704 | * on FSL Embedded 64-bit, usually all RAM is bolted, but with |
705 | * unusual memory sizes it's possible for some RAM to not be mapped |
706 | * (such RAM is not used at all by Linux, since we don't support |
707 | * highmem on 64-bit). We limit ppc64_rma_size to what would be |
708 | * mappable if this memblock is the only one. Additional memblocks |
709 | * can only increase, not decrease, the amount that ends up getting |
710 | * mapped. We still limit max to 1G even if we'll eventually map |
711 | * more. This is due to what the early init code is set up to do. |
712 | * |
713 | * We crop it to the size of the first MEMBLOCK to |
714 | * avoid going over total available memory just in case... |
715 | */ |
716 | #ifdef CONFIG_PPC_E500 |
717 | if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { |
718 | unsigned long linear_sz; |
719 | unsigned int num_cams; |
720 | |
721 | /* use a quarter of the TLBCAM for bolted linear map */ |
722 | num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4; |
723 | |
724 | linear_sz = map_mem_in_cams(first_memblock_size, num_cams, |
725 | true, true); |
726 | |
727 | ppc64_rma_size = min_t(u64, linear_sz, 0x40000000); |
728 | } else |
729 | #endif |
730 | ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); |
731 | |
732 | /* Finally limit subsequent allocations */ |
733 | memblock_set_current_limit(first_memblock_base + ppc64_rma_size); |
734 | } |
735 | #else /* ! CONFIG_PPC64 */ |
736 | void __init early_init_mmu(void) |
737 | { |
738 | unsigned long root = of_get_flat_dt_root(); |
739 | |
740 | if (IS_ENABLED(CONFIG_PPC_47x) && IS_ENABLED(CONFIG_SMP) && |
741 | of_get_flat_dt_prop(node: root, name: "cooperative-partition" , NULL)) |
742 | mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST); |
743 | } |
744 | #endif /* CONFIG_PPC64 */ |
745 | |