1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. |
5 | * |
6 | * Copyright (C) 1996 David S. Miller (davem@davemloft.net) |
7 | * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org |
8 | * Carsten Langgaard, carstenl@mips.com |
9 | * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved. |
10 | */ |
11 | #include <linux/cpu_pm.h> |
12 | #include <linux/init.h> |
13 | #include <linux/sched.h> |
14 | #include <linux/smp.h> |
15 | #include <linux/mm.h> |
16 | #include <linux/hugetlb.h> |
17 | #include <linux/export.h> |
18 | |
19 | #include <asm/cpu.h> |
20 | #include <asm/cpu-type.h> |
21 | #include <asm/bootinfo.h> |
22 | #include <asm/hazards.h> |
23 | #include <asm/mmu_context.h> |
24 | #include <asm/tlb.h> |
25 | #include <asm/tlbex.h> |
26 | #include <asm/tlbmisc.h> |
27 | #include <asm/setup.h> |
28 | |
29 | /* |
30 | * LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has |
31 | * a 4 entry itlb and a 4 entry dtlb which are subsets of jtlb. Unfortunately, |
32 | * itlb/dtlb are not totally transparent to software. |
33 | */ |
34 | static inline void flush_micro_tlb(void) |
35 | { |
36 | switch (current_cpu_type()) { |
37 | case CPU_LOONGSON2EF: |
38 | write_c0_diag(LOONGSON_DIAG_ITLB); |
39 | break; |
40 | case CPU_LOONGSON64: |
41 | write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB); |
42 | break; |
43 | default: |
44 | break; |
45 | } |
46 | } |
47 | |
48 | static inline void flush_micro_tlb_vm(struct vm_area_struct *vma) |
49 | { |
50 | if (vma->vm_flags & VM_EXEC) |
51 | flush_micro_tlb(); |
52 | } |
53 | |
54 | void local_flush_tlb_all(void) |
55 | { |
56 | unsigned long flags; |
57 | unsigned long old_ctx; |
58 | int entry, ftlbhighset; |
59 | |
60 | local_irq_save(flags); |
61 | /* Save old context and create impossible VPN2 value */ |
62 | old_ctx = read_c0_entryhi(); |
63 | htw_stop(); |
64 | write_c0_entrylo0(0); |
65 | write_c0_entrylo1(0); |
66 | |
67 | entry = num_wired_entries(); |
68 | |
69 | /* |
70 | * Blast 'em all away. |
71 | * If there are any wired entries, fall back to iterating |
72 | */ |
73 | if (cpu_has_tlbinv && !entry) { |
74 | if (current_cpu_data.tlbsizevtlb) { |
75 | write_c0_index(0); |
76 | mtc0_tlbw_hazard(); |
77 | tlbinvf(); /* invalidate VTLB */ |
78 | } |
79 | ftlbhighset = current_cpu_data.tlbsizevtlb + |
80 | current_cpu_data.tlbsizeftlbsets; |
81 | for (entry = current_cpu_data.tlbsizevtlb; |
82 | entry < ftlbhighset; |
83 | entry++) { |
84 | write_c0_index(entry); |
85 | mtc0_tlbw_hazard(); |
86 | tlbinvf(); /* invalidate one FTLB set */ |
87 | } |
88 | } else { |
89 | while (entry < current_cpu_data.tlbsize) { |
90 | /* Make sure all entries differ. */ |
91 | write_c0_entryhi(UNIQUE_ENTRYHI(entry)); |
92 | write_c0_index(entry); |
93 | mtc0_tlbw_hazard(); |
94 | tlb_write_indexed(); |
95 | entry++; |
96 | } |
97 | } |
98 | tlbw_use_hazard(); |
99 | write_c0_entryhi(old_ctx); |
100 | htw_start(); |
101 | flush_micro_tlb(); |
102 | local_irq_restore(flags); |
103 | } |
104 | EXPORT_SYMBOL(local_flush_tlb_all); |
105 | |
106 | void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, |
107 | unsigned long end) |
108 | { |
109 | struct mm_struct *mm = vma->vm_mm; |
110 | int cpu = smp_processor_id(); |
111 | |
112 | if (cpu_context(cpu, mm) != 0) { |
113 | unsigned long size, flags; |
114 | |
115 | local_irq_save(flags); |
116 | start = round_down(start, PAGE_SIZE << 1); |
117 | end = round_up(end, PAGE_SIZE << 1); |
118 | size = (end - start) >> (PAGE_SHIFT + 1); |
119 | if (size <= (current_cpu_data.tlbsizeftlbsets ? |
120 | current_cpu_data.tlbsize / 8 : |
121 | current_cpu_data.tlbsize / 2)) { |
122 | unsigned long old_entryhi, old_mmid; |
123 | int newpid = cpu_asid(cpu, mm); |
124 | |
125 | old_entryhi = read_c0_entryhi(); |
126 | if (cpu_has_mmid) { |
127 | old_mmid = read_c0_memorymapid(); |
128 | write_c0_memorymapid(newpid); |
129 | } |
130 | |
131 | htw_stop(); |
132 | while (start < end) { |
133 | int idx; |
134 | |
135 | if (cpu_has_mmid) |
136 | write_c0_entryhi(start); |
137 | else |
138 | write_c0_entryhi(start | newpid); |
139 | start += (PAGE_SIZE << 1); |
140 | mtc0_tlbw_hazard(); |
141 | tlb_probe(); |
142 | tlb_probe_hazard(); |
143 | idx = read_c0_index(); |
144 | write_c0_entrylo0(0); |
145 | write_c0_entrylo1(0); |
146 | if (idx < 0) |
147 | continue; |
148 | /* Make sure all entries differ. */ |
149 | write_c0_entryhi(UNIQUE_ENTRYHI(idx)); |
150 | mtc0_tlbw_hazard(); |
151 | tlb_write_indexed(); |
152 | } |
153 | tlbw_use_hazard(); |
154 | write_c0_entryhi(old_entryhi); |
155 | if (cpu_has_mmid) |
156 | write_c0_memorymapid(old_mmid); |
157 | htw_start(); |
158 | } else { |
159 | drop_mmu_context(mm); |
160 | } |
161 | flush_micro_tlb(); |
162 | local_irq_restore(flags); |
163 | } |
164 | } |
165 | |
166 | void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) |
167 | { |
168 | unsigned long size, flags; |
169 | |
170 | local_irq_save(flags); |
171 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
172 | size = (size + 1) >> 1; |
173 | if (size <= (current_cpu_data.tlbsizeftlbsets ? |
174 | current_cpu_data.tlbsize / 8 : |
175 | current_cpu_data.tlbsize / 2)) { |
176 | int pid = read_c0_entryhi(); |
177 | |
178 | start &= (PAGE_MASK << 1); |
179 | end += ((PAGE_SIZE << 1) - 1); |
180 | end &= (PAGE_MASK << 1); |
181 | htw_stop(); |
182 | |
183 | while (start < end) { |
184 | int idx; |
185 | |
186 | write_c0_entryhi(start); |
187 | start += (PAGE_SIZE << 1); |
188 | mtc0_tlbw_hazard(); |
189 | tlb_probe(); |
190 | tlb_probe_hazard(); |
191 | idx = read_c0_index(); |
192 | write_c0_entrylo0(0); |
193 | write_c0_entrylo1(0); |
194 | if (idx < 0) |
195 | continue; |
196 | /* Make sure all entries differ. */ |
197 | write_c0_entryhi(UNIQUE_ENTRYHI(idx)); |
198 | mtc0_tlbw_hazard(); |
199 | tlb_write_indexed(); |
200 | } |
201 | tlbw_use_hazard(); |
202 | write_c0_entryhi(pid); |
203 | htw_start(); |
204 | } else { |
205 | local_flush_tlb_all(); |
206 | } |
207 | flush_micro_tlb(); |
208 | local_irq_restore(flags); |
209 | } |
210 | |
211 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
212 | { |
213 | int cpu = smp_processor_id(); |
214 | |
215 | if (cpu_context(cpu, vma->vm_mm) != 0) { |
216 | unsigned long old_mmid; |
217 | unsigned long flags, old_entryhi; |
218 | int idx; |
219 | |
220 | page &= (PAGE_MASK << 1); |
221 | local_irq_save(flags); |
222 | old_entryhi = read_c0_entryhi(); |
223 | htw_stop(); |
224 | if (cpu_has_mmid) { |
225 | old_mmid = read_c0_memorymapid(); |
226 | write_c0_entryhi(page); |
227 | write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm)); |
228 | } else { |
229 | write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm)); |
230 | } |
231 | mtc0_tlbw_hazard(); |
232 | tlb_probe(); |
233 | tlb_probe_hazard(); |
234 | idx = read_c0_index(); |
235 | write_c0_entrylo0(0); |
236 | write_c0_entrylo1(0); |
237 | if (idx < 0) |
238 | goto finish; |
239 | /* Make sure all entries differ. */ |
240 | write_c0_entryhi(UNIQUE_ENTRYHI(idx)); |
241 | mtc0_tlbw_hazard(); |
242 | tlb_write_indexed(); |
243 | tlbw_use_hazard(); |
244 | |
245 | finish: |
246 | write_c0_entryhi(old_entryhi); |
247 | if (cpu_has_mmid) |
248 | write_c0_memorymapid(old_mmid); |
249 | htw_start(); |
250 | flush_micro_tlb_vm(vma); |
251 | local_irq_restore(flags); |
252 | } |
253 | } |
254 | |
255 | /* |
256 | * This one is only used for pages with the global bit set so we don't care |
257 | * much about the ASID. |
258 | */ |
259 | void local_flush_tlb_one(unsigned long page) |
260 | { |
261 | unsigned long flags; |
262 | int oldpid, idx; |
263 | |
264 | local_irq_save(flags); |
265 | oldpid = read_c0_entryhi(); |
266 | htw_stop(); |
267 | page &= (PAGE_MASK << 1); |
268 | write_c0_entryhi(page); |
269 | mtc0_tlbw_hazard(); |
270 | tlb_probe(); |
271 | tlb_probe_hazard(); |
272 | idx = read_c0_index(); |
273 | write_c0_entrylo0(0); |
274 | write_c0_entrylo1(0); |
275 | if (idx >= 0) { |
276 | /* Make sure all entries differ. */ |
277 | write_c0_entryhi(UNIQUE_ENTRYHI(idx)); |
278 | mtc0_tlbw_hazard(); |
279 | tlb_write_indexed(); |
280 | tlbw_use_hazard(); |
281 | } |
282 | write_c0_entryhi(oldpid); |
283 | htw_start(); |
284 | flush_micro_tlb(); |
285 | local_irq_restore(flags); |
286 | } |
287 | |
288 | /* |
289 | * We will need multiple versions of update_mmu_cache(), one that just |
290 | * updates the TLB with the new pte(s), and another which also checks |
291 | * for the R4k "end of page" hardware bug and does the needy. |
292 | */ |
293 | void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) |
294 | { |
295 | unsigned long flags; |
296 | pgd_t *pgdp; |
297 | p4d_t *p4dp; |
298 | pud_t *pudp; |
299 | pmd_t *pmdp; |
300 | pte_t *ptep, *ptemap = NULL; |
301 | int idx, pid; |
302 | |
303 | /* |
304 | * Handle debugger faulting in for debuggee. |
305 | */ |
306 | if (current->active_mm != vma->vm_mm) |
307 | return; |
308 | |
309 | local_irq_save(flags); |
310 | |
311 | htw_stop(); |
312 | address &= (PAGE_MASK << 1); |
313 | if (cpu_has_mmid) { |
314 | write_c0_entryhi(address); |
315 | } else { |
316 | pid = read_c0_entryhi() & cpu_asid_mask(¤t_cpu_data); |
317 | write_c0_entryhi(address | pid); |
318 | } |
319 | pgdp = pgd_offset(vma->vm_mm, address); |
320 | mtc0_tlbw_hazard(); |
321 | tlb_probe(); |
322 | tlb_probe_hazard(); |
323 | p4dp = p4d_offset(pgd: pgdp, address); |
324 | pudp = pud_offset(p4d: p4dp, address); |
325 | pmdp = pmd_offset(pud: pudp, address); |
326 | idx = read_c0_index(); |
327 | #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT |
328 | /* this could be a huge page */ |
329 | if (pmd_huge(*pmdp)) { |
330 | unsigned long lo; |
331 | write_c0_pagemask(PM_HUGE_MASK); |
332 | ptep = (pte_t *)pmdp; |
333 | lo = pte_to_entrylo(pte_val(*ptep)); |
334 | write_c0_entrylo0(lo); |
335 | write_c0_entrylo1(lo + (HPAGE_SIZE >> 7)); |
336 | |
337 | mtc0_tlbw_hazard(); |
338 | if (idx < 0) |
339 | tlb_write_random(); |
340 | else |
341 | tlb_write_indexed(); |
342 | tlbw_use_hazard(); |
343 | write_c0_pagemask(PM_DEFAULT_MASK); |
344 | } else |
345 | #endif |
346 | { |
347 | ptemap = ptep = pte_offset_map(pmd: pmdp, addr: address); |
348 | /* |
349 | * update_mmu_cache() is called between pte_offset_map_lock() |
350 | * and pte_unmap_unlock(), so we can assume that ptep is not |
351 | * NULL here: and what should be done below if it were NULL? |
352 | */ |
353 | |
354 | #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) |
355 | #ifdef CONFIG_XPA |
356 | write_c0_entrylo0(pte_to_entrylo(ptep->pte_high)); |
357 | if (cpu_has_xpa) |
358 | writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK); |
359 | ptep++; |
360 | write_c0_entrylo1(pte_to_entrylo(ptep->pte_high)); |
361 | if (cpu_has_xpa) |
362 | writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK); |
363 | #else |
364 | write_c0_entrylo0(ptep->pte_high); |
365 | ptep++; |
366 | write_c0_entrylo1(ptep->pte_high); |
367 | #endif |
368 | #else |
369 | write_c0_entrylo0(pte_to_entrylo(pte_val(pte: *ptep++))); |
370 | write_c0_entrylo1(pte_to_entrylo(pte_val(pte: *ptep))); |
371 | #endif |
372 | mtc0_tlbw_hazard(); |
373 | if (idx < 0) |
374 | tlb_write_random(); |
375 | else |
376 | tlb_write_indexed(); |
377 | } |
378 | tlbw_use_hazard(); |
379 | htw_start(); |
380 | flush_micro_tlb_vm(vma); |
381 | |
382 | if (ptemap) |
383 | pte_unmap(pte: ptemap); |
384 | local_irq_restore(flags); |
385 | } |
386 | |
387 | void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, |
388 | unsigned long entryhi, unsigned long pagemask) |
389 | { |
390 | #ifdef CONFIG_XPA |
391 | panic("Broken for XPA kernels" ); |
392 | #else |
393 | unsigned int old_mmid; |
394 | unsigned long flags; |
395 | unsigned long wired; |
396 | unsigned long old_pagemask; |
397 | unsigned long old_ctx; |
398 | |
399 | local_irq_save(flags); |
400 | if (cpu_has_mmid) { |
401 | old_mmid = read_c0_memorymapid(); |
402 | write_c0_memorymapid(MMID_KERNEL_WIRED); |
403 | } |
404 | /* Save old context and create impossible VPN2 value */ |
405 | old_ctx = read_c0_entryhi(); |
406 | htw_stop(); |
407 | old_pagemask = read_c0_pagemask(); |
408 | wired = num_wired_entries(); |
409 | write_c0_wired(wired + 1); |
410 | write_c0_index(wired); |
411 | tlbw_use_hazard(); /* What is the hazard here? */ |
412 | write_c0_pagemask(pagemask); |
413 | write_c0_entryhi(entryhi); |
414 | write_c0_entrylo0(entrylo0); |
415 | write_c0_entrylo1(entrylo1); |
416 | mtc0_tlbw_hazard(); |
417 | tlb_write_indexed(); |
418 | tlbw_use_hazard(); |
419 | |
420 | write_c0_entryhi(old_ctx); |
421 | if (cpu_has_mmid) |
422 | write_c0_memorymapid(old_mmid); |
423 | tlbw_use_hazard(); /* What is the hazard here? */ |
424 | htw_start(); |
425 | write_c0_pagemask(old_pagemask); |
426 | local_flush_tlb_all(); |
427 | local_irq_restore(flags); |
428 | #endif |
429 | } |
430 | |
431 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
432 | |
433 | int has_transparent_hugepage(void) |
434 | { |
435 | static unsigned int mask = -1; |
436 | |
437 | if (mask == -1) { /* first call comes during __init */ |
438 | unsigned long flags; |
439 | |
440 | local_irq_save(flags); |
441 | write_c0_pagemask(PM_HUGE_MASK); |
442 | back_to_back_c0_hazard(); |
443 | mask = read_c0_pagemask(); |
444 | write_c0_pagemask(PM_DEFAULT_MASK); |
445 | local_irq_restore(flags); |
446 | } |
447 | return mask == PM_HUGE_MASK; |
448 | } |
449 | EXPORT_SYMBOL(has_transparent_hugepage); |
450 | |
451 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
452 | |
453 | /* |
454 | * Used for loading TLB entries before trap_init() has started, when we |
455 | * don't actually want to add a wired entry which remains throughout the |
456 | * lifetime of the system |
457 | */ |
458 | |
459 | int temp_tlb_entry; |
460 | |
461 | #ifndef CONFIG_64BIT |
462 | __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, |
463 | unsigned long entryhi, unsigned long pagemask) |
464 | { |
465 | int ret = 0; |
466 | unsigned long flags; |
467 | unsigned long wired; |
468 | unsigned long old_pagemask; |
469 | unsigned long old_ctx; |
470 | |
471 | local_irq_save(flags); |
472 | /* Save old context and create impossible VPN2 value */ |
473 | htw_stop(); |
474 | old_ctx = read_c0_entryhi(); |
475 | old_pagemask = read_c0_pagemask(); |
476 | wired = num_wired_entries(); |
477 | if (--temp_tlb_entry < wired) { |
478 | printk(KERN_WARNING |
479 | "No TLB space left for add_temporary_entry\n" ); |
480 | ret = -ENOSPC; |
481 | goto out; |
482 | } |
483 | |
484 | write_c0_index(temp_tlb_entry); |
485 | write_c0_pagemask(pagemask); |
486 | write_c0_entryhi(entryhi); |
487 | write_c0_entrylo0(entrylo0); |
488 | write_c0_entrylo1(entrylo1); |
489 | mtc0_tlbw_hazard(); |
490 | tlb_write_indexed(); |
491 | tlbw_use_hazard(); |
492 | |
493 | write_c0_entryhi(old_ctx); |
494 | write_c0_pagemask(old_pagemask); |
495 | htw_start(); |
496 | out: |
497 | local_irq_restore(flags); |
498 | return ret; |
499 | } |
500 | #endif |
501 | |
502 | static int ntlb; |
503 | static int __init set_ntlb(char *str) |
504 | { |
505 | get_option(str: &str, pint: &ntlb); |
506 | return 1; |
507 | } |
508 | |
509 | __setup("ntlb=" , set_ntlb); |
510 | |
511 | /* |
512 | * Configure TLB (for init or after a CPU has been powered off). |
513 | */ |
514 | static void r4k_tlb_configure(void) |
515 | { |
516 | /* |
517 | * You should never change this register: |
518 | * - On R4600 1.7 the tlbp never hits for pages smaller than |
519 | * the value in the c0_pagemask register. |
520 | * - The entire mm handling assumes the c0_pagemask register to |
521 | * be set to fixed-size pages. |
522 | */ |
523 | write_c0_pagemask(PM_DEFAULT_MASK); |
524 | back_to_back_c0_hazard(); |
525 | if (read_c0_pagemask() != PM_DEFAULT_MASK) |
526 | panic(fmt: "MMU doesn't support PAGE_SIZE=0x%lx" , PAGE_SIZE); |
527 | |
528 | write_c0_wired(0); |
529 | if (current_cpu_type() == CPU_R10000 || |
530 | current_cpu_type() == CPU_R12000 || |
531 | current_cpu_type() == CPU_R14000 || |
532 | current_cpu_type() == CPU_R16000) |
533 | write_c0_framemask(0); |
534 | |
535 | if (cpu_has_rixi) { |
536 | /* |
537 | * Enable the no read, no exec bits, and enable large physical |
538 | * address. |
539 | */ |
540 | #ifdef CONFIG_64BIT |
541 | set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA); |
542 | #else |
543 | set_c0_pagegrain(PG_RIE | PG_XIE); |
544 | #endif |
545 | } |
546 | |
547 | temp_tlb_entry = current_cpu_data.tlbsize - 1; |
548 | |
549 | /* From this point on the ARC firmware is dead. */ |
550 | local_flush_tlb_all(); |
551 | |
552 | /* Did I tell you that ARC SUCKS? */ |
553 | } |
554 | |
555 | void tlb_init(void) |
556 | { |
557 | r4k_tlb_configure(); |
558 | |
559 | if (ntlb) { |
560 | if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) { |
561 | int wired = current_cpu_data.tlbsize - ntlb; |
562 | write_c0_wired(wired); |
563 | write_c0_index(wired-1); |
564 | printk("Restricting TLB to %d entries\n" , ntlb); |
565 | } else |
566 | printk("Ignoring invalid argument ntlb=%d\n" , ntlb); |
567 | } |
568 | |
569 | build_tlb_refill_handler(); |
570 | } |
571 | |
572 | static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd, |
573 | void *v) |
574 | { |
575 | switch (cmd) { |
576 | case CPU_PM_ENTER_FAILED: |
577 | case CPU_PM_EXIT: |
578 | r4k_tlb_configure(); |
579 | break; |
580 | } |
581 | |
582 | return NOTIFY_OK; |
583 | } |
584 | |
585 | static struct notifier_block r4k_tlb_pm_notifier_block = { |
586 | .notifier_call = r4k_tlb_pm_notifier, |
587 | }; |
588 | |
589 | static int __init r4k_tlb_init_pm(void) |
590 | { |
591 | return cpu_pm_register_notifier(nb: &r4k_tlb_pm_notifier_block); |
592 | } |
593 | arch_initcall(r4k_tlb_init_pm); |
594 | |