1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * linux/arch/alpha/mm/fault.c |
4 | * |
5 | * Copyright (C) 1995 Linus Torvalds |
6 | */ |
7 | |
8 | #include <linux/sched/signal.h> |
9 | #include <linux/kernel.h> |
10 | #include <linux/mm.h> |
11 | #include <asm/io.h> |
12 | |
13 | #define __EXTERN_INLINE inline |
14 | #include <asm/mmu_context.h> |
15 | #include <asm/tlbflush.h> |
16 | #undef __EXTERN_INLINE |
17 | |
18 | #include <linux/signal.h> |
19 | #include <linux/errno.h> |
20 | #include <linux/string.h> |
21 | #include <linux/types.h> |
22 | #include <linux/ptrace.h> |
23 | #include <linux/mman.h> |
24 | #include <linux/smp.h> |
25 | #include <linux/interrupt.h> |
26 | #include <linux/extable.h> |
27 | #include <linux/uaccess.h> |
28 | #include <linux/perf_event.h> |
29 | |
30 | extern void die_if_kernel(char *,struct pt_regs *,long, unsigned long *); |
31 | |
32 | |
33 | /* |
34 | * Force a new ASN for a task. |
35 | */ |
36 | |
37 | #ifndef CONFIG_SMP |
38 | unsigned long last_asn = ASN_FIRST_VERSION; |
39 | #endif |
40 | |
41 | void |
42 | __load_new_mm_context(struct mm_struct *next_mm) |
43 | { |
44 | unsigned long mmc; |
45 | struct pcb_struct *pcb; |
46 | |
47 | mmc = __get_new_mm_context(next_mm, smp_processor_id()); |
48 | next_mm->context[smp_processor_id()] = mmc; |
49 | |
50 | pcb = ¤t_thread_info()->pcb; |
51 | pcb->asn = mmc & HARDWARE_ASN_MASK; |
52 | pcb->ptbr = ((unsigned long) next_mm->pgd - IDENT_ADDR) >> PAGE_SHIFT; |
53 | |
54 | __reload_thread(pcb); |
55 | } |
56 | |
57 | |
58 | /* |
59 | * This routine handles page faults. It determines the address, |
60 | * and the problem, and then passes it off to handle_mm_fault(). |
61 | * |
62 | * mmcsr: |
63 | * 0 = translation not valid |
64 | * 1 = access violation |
65 | * 2 = fault-on-read |
66 | * 3 = fault-on-execute |
67 | * 4 = fault-on-write |
68 | * |
69 | * cause: |
70 | * -1 = instruction fetch |
71 | * 0 = load |
72 | * 1 = store |
73 | * |
74 | * Registers $9 through $15 are saved in a block just prior to `regs' and |
75 | * are saved and restored around the call to allow exception code to |
76 | * modify them. |
77 | */ |
78 | |
79 | /* Macro for exception fixup code to access integer registers. */ |
80 | #define dpf_reg(r) \ |
81 | (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \ |
82 | (r) <= 18 ? (r)+10 : (r)-10]) |
83 | |
84 | asmlinkage void |
85 | do_page_fault(unsigned long address, unsigned long mmcsr, |
86 | long cause, struct pt_regs *regs) |
87 | { |
88 | struct vm_area_struct * vma; |
89 | struct mm_struct *mm = current->mm; |
90 | const struct exception_table_entry *fixup; |
91 | int si_code = SEGV_MAPERR; |
92 | vm_fault_t fault; |
93 | unsigned int flags = FAULT_FLAG_DEFAULT; |
94 | |
95 | /* As of EV6, a load into $31/$f31 is a prefetch, and never faults |
96 | (or is suppressed by the PALcode). Support that for older CPUs |
97 | by ignoring such an instruction. */ |
98 | if (cause == 0) { |
99 | unsigned int insn; |
100 | __get_user(insn, (unsigned int __user *)regs->pc); |
101 | if ((insn >> 21 & 0x1f) == 0x1f && |
102 | /* ldq ldl ldt lds ldg ldf ldwu ldbu */ |
103 | (1ul << (insn >> 26) & 0x30f00001400ul)) { |
104 | regs->pc += 4; |
105 | return; |
106 | } |
107 | } |
108 | |
109 | /* If we're in an interrupt context, or have no user context, |
110 | we must not take the fault. */ |
111 | if (!mm || faulthandler_disabled()) |
112 | goto no_context; |
113 | |
114 | #ifdef CONFIG_ALPHA_LARGE_VMALLOC |
115 | if (address >= TASK_SIZE) |
116 | goto vmalloc_fault; |
117 | #endif |
118 | if (user_mode(regs)) |
119 | flags |= FAULT_FLAG_USER; |
120 | perf_sw_event(event_id: PERF_COUNT_SW_PAGE_FAULTS, nr: 1, regs, addr: address); |
121 | retry: |
122 | vma = lock_mm_and_find_vma(mm, address, regs); |
123 | if (!vma) |
124 | goto bad_area_nosemaphore; |
125 | |
126 | /* Ok, we have a good vm_area for this memory access, so |
127 | we can handle it. */ |
128 | si_code = SEGV_ACCERR; |
129 | if (cause < 0) { |
130 | if (!(vma->vm_flags & VM_EXEC)) |
131 | goto bad_area; |
132 | } else if (!cause) { |
133 | /* Allow reads even for write-only mappings */ |
134 | if (!(vma->vm_flags & (VM_READ | VM_WRITE))) |
135 | goto bad_area; |
136 | } else { |
137 | if (!(vma->vm_flags & VM_WRITE)) |
138 | goto bad_area; |
139 | flags |= FAULT_FLAG_WRITE; |
140 | } |
141 | |
142 | /* If for any reason at all we couldn't handle the fault, |
143 | make sure we exit gracefully rather than endlessly redo |
144 | the fault. */ |
145 | fault = handle_mm_fault(vma, address, flags, regs); |
146 | |
147 | if (fault_signal_pending(fault_flags: fault, regs)) { |
148 | if (!user_mode(regs)) |
149 | goto no_context; |
150 | return; |
151 | } |
152 | |
153 | /* The fault is fully completed (including releasing mmap lock) */ |
154 | if (fault & VM_FAULT_COMPLETED) |
155 | return; |
156 | |
157 | if (unlikely(fault & VM_FAULT_ERROR)) { |
158 | if (fault & VM_FAULT_OOM) |
159 | goto out_of_memory; |
160 | else if (fault & VM_FAULT_SIGSEGV) |
161 | goto bad_area; |
162 | else if (fault & VM_FAULT_SIGBUS) |
163 | goto do_sigbus; |
164 | BUG(); |
165 | } |
166 | |
167 | if (fault & VM_FAULT_RETRY) { |
168 | flags |= FAULT_FLAG_TRIED; |
169 | |
170 | /* No need to mmap_read_unlock(mm) as we would |
171 | * have already released it in __lock_page_or_retry |
172 | * in mm/filemap.c. |
173 | */ |
174 | |
175 | goto retry; |
176 | } |
177 | |
178 | mmap_read_unlock(mm); |
179 | |
180 | return; |
181 | |
182 | /* Something tried to access memory that isn't in our memory map. |
183 | Fix it, but check if it's kernel or user first. */ |
184 | bad_area: |
185 | mmap_read_unlock(mm); |
186 | |
187 | bad_area_nosemaphore: |
188 | if (user_mode(regs)) |
189 | goto do_sigsegv; |
190 | |
191 | no_context: |
192 | /* Are we prepared to handle this fault as an exception? */ |
193 | if ((fixup = search_exception_tables(add: regs->pc)) != 0) { |
194 | unsigned long newpc; |
195 | newpc = fixup_exception(regs: dpf_reg, trapnr: fixup, error_code: regs->pc); |
196 | regs->pc = newpc; |
197 | return; |
198 | } |
199 | |
200 | /* Oops. The kernel tried to access some bad page. We'll have to |
201 | terminate things with extreme prejudice. */ |
202 | printk(KERN_ALERT "Unable to handle kernel paging request at " |
203 | "virtual address %016lx\n" , address); |
204 | die_if_kernel("Oops" , regs, cause, (unsigned long*)regs - 16); |
205 | make_task_dead(SIGKILL); |
206 | |
207 | /* We ran out of memory, or some other thing happened to us that |
208 | made us unable to handle the page fault gracefully. */ |
209 | out_of_memory: |
210 | mmap_read_unlock(mm); |
211 | if (!user_mode(regs)) |
212 | goto no_context; |
213 | pagefault_out_of_memory(); |
214 | return; |
215 | |
216 | do_sigbus: |
217 | mmap_read_unlock(mm); |
218 | /* Send a sigbus, regardless of whether we were in kernel |
219 | or user mode. */ |
220 | force_sig_fault(SIGBUS, BUS_ADRERR, addr: (void __user *) address); |
221 | if (!user_mode(regs)) |
222 | goto no_context; |
223 | return; |
224 | |
225 | do_sigsegv: |
226 | force_sig_fault(SIGSEGV, code: si_code, addr: (void __user *) address); |
227 | return; |
228 | |
229 | #ifdef CONFIG_ALPHA_LARGE_VMALLOC |
230 | vmalloc_fault: |
231 | if (user_mode(regs)) |
232 | goto do_sigsegv; |
233 | else { |
234 | /* Synchronize this task's top level page-table |
235 | with the "reference" page table from init. */ |
236 | long index = pgd_index(address); |
237 | pgd_t *pgd, *pgd_k; |
238 | |
239 | pgd = current->active_mm->pgd + index; |
240 | pgd_k = swapper_pg_dir + index; |
241 | if (!pgd_present(*pgd) && pgd_present(*pgd_k)) { |
242 | pgd_val(*pgd) = pgd_val(*pgd_k); |
243 | return; |
244 | } |
245 | goto no_context; |
246 | } |
247 | #endif |
248 | } |
249 | |