1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/mmzone.h>
4#include <linux/nodemask.h>
5#include <linux/spinlock.h>
6#include <linux/smp.h>
7#include <linux/atomic.h>
8#include <asm/sn/types.h>
9#include <asm/sn/addrs.h>
10#include <asm/sn/nmi.h>
11#include <asm/sn/arch.h>
12#include <asm/sn/agent.h>
13
14#include "ip27-common.h"
15
16#if 0
17#define NODE_NUM_CPUS(n) CNODE_NUM_CPUS(n)
18#else
19#define NODE_NUM_CPUS(n) CPUS_PER_NODE
20#endif
21
22#define SEND_NMI(_nasid, _slice) \
23 REMOTE_HUB_S((_nasid), (PI_NMI_A + ((_slice) * PI_NMI_OFFSET)), 1)
24
25typedef unsigned long machreg_t;
26
27static arch_spinlock_t nmi_lock = __ARCH_SPIN_LOCK_UNLOCKED;
28static void nmi_dump(void);
29
30void install_cpu_nmi_handler(int slice)
31{
32 nmi_t *nmi_addr;
33
34 nmi_addr = (nmi_t *)NMI_ADDR(get_nasid(), slice);
35 if (nmi_addr->call_addr)
36 return;
37 nmi_addr->magic = NMI_MAGIC;
38 nmi_addr->call_addr = (void *)nmi_dump;
39 nmi_addr->call_addr_c =
40 (void *)(~((unsigned long)(nmi_addr->call_addr)));
41 nmi_addr->call_parm = 0;
42}
43
44/*
45 * Copy the cpu registers which have been saved in the IP27prom format
46 * into the eframe format for the node under consideration.
47 */
48
49static void nmi_cpu_eframe_save(nasid_t nasid, int slice)
50{
51 struct reg_struct *nr;
52 int i;
53
54 /* Get the pointer to the current cpu's register set. */
55 nr = (struct reg_struct *)
56 (TO_UNCAC(TO_NODE(nasid, IP27_NMI_KREGS_OFFSET)) +
57 slice * IP27_NMI_KREGS_CPU_SIZE);
58
59 pr_emerg("NMI nasid %d: slice %d\n", nasid, slice);
60
61 /*
62 * Saved main processor registers
63 */
64 for (i = 0; i < 32; ) {
65 if ((i % 4) == 0)
66 pr_emerg("$%2d :", i);
67 pr_cont(" %016lx", nr->gpr[i]);
68
69 i++;
70 if ((i % 4) == 0)
71 pr_cont("\n");
72 }
73
74 pr_emerg("Hi : (value lost)\n");
75 pr_emerg("Lo : (value lost)\n");
76
77 /*
78 * Saved cp0 registers
79 */
80 pr_emerg("epc : %016lx %pS\n", nr->epc, (void *)nr->epc);
81 pr_emerg("%s\n", print_tainted());
82 pr_emerg("ErrEPC: %016lx %pS\n", nr->error_epc, (void *)nr->error_epc);
83 pr_emerg("ra : %016lx %pS\n", nr->gpr[31], (void *)nr->gpr[31]);
84 pr_emerg("Status: %08lx ", nr->sr);
85
86 if (nr->sr & ST0_KX)
87 pr_cont("KX ");
88 if (nr->sr & ST0_SX)
89 pr_cont("SX ");
90 if (nr->sr & ST0_UX)
91 pr_cont("UX ");
92
93 switch (nr->sr & ST0_KSU) {
94 case KSU_USER:
95 pr_cont("USER ");
96 break;
97 case KSU_SUPERVISOR:
98 pr_cont("SUPERVISOR ");
99 break;
100 case KSU_KERNEL:
101 pr_cont("KERNEL ");
102 break;
103 default:
104 pr_cont("BAD_MODE ");
105 break;
106 }
107
108 if (nr->sr & ST0_ERL)
109 pr_cont("ERL ");
110 if (nr->sr & ST0_EXL)
111 pr_cont("EXL ");
112 if (nr->sr & ST0_IE)
113 pr_cont("IE ");
114 pr_cont("\n");
115
116 pr_emerg("Cause : %08lx\n", nr->cause);
117 pr_emerg("PrId : %08x\n", read_c0_prid());
118 pr_emerg("BadVA : %016lx\n", nr->badva);
119 pr_emerg("CErr : %016lx\n", nr->cache_err);
120 pr_emerg("NMI_SR: %016lx\n", nr->nmi_sr);
121
122 pr_emerg("\n");
123}
124
125static void nmi_dump_hub_irq(nasid_t nasid, int slice)
126{
127 u64 mask0, mask1, pend0, pend1;
128
129 if (slice == 0) { /* Slice A */
130 mask0 = REMOTE_HUB_L(nasid, PI_INT_MASK0_A);
131 mask1 = REMOTE_HUB_L(nasid, PI_INT_MASK1_A);
132 } else { /* Slice B */
133 mask0 = REMOTE_HUB_L(nasid, PI_INT_MASK0_B);
134 mask1 = REMOTE_HUB_L(nasid, PI_INT_MASK1_B);
135 }
136
137 pend0 = REMOTE_HUB_L(nasid, PI_INT_PEND0);
138 pend1 = REMOTE_HUB_L(nasid, PI_INT_PEND1);
139
140 pr_emerg("PI_INT_MASK0: %16llx PI_INT_MASK1: %16llx\n", mask0, mask1);
141 pr_emerg("PI_INT_PEND0: %16llx PI_INT_PEND1: %16llx\n", pend0, pend1);
142 pr_emerg("\n\n");
143}
144
145/*
146 * Copy the cpu registers which have been saved in the IP27prom format
147 * into the eframe format for the node under consideration.
148 */
149static void nmi_node_eframe_save(nasid_t nasid)
150{
151 int slice;
152
153 if (nasid == INVALID_NASID)
154 return;
155
156 /* Save the registers into eframe for each cpu */
157 for (slice = 0; slice < NODE_NUM_CPUS(slice); slice++) {
158 nmi_cpu_eframe_save(nasid, slice);
159 nmi_dump_hub_irq(nasid, slice);
160 }
161}
162
163/*
164 * Save the nmi cpu registers for all cpus in the system.
165 */
166static void nmi_eframes_save(void)
167{
168 nasid_t nasid;
169
170 for_each_online_node(nasid)
171 nmi_node_eframe_save(nasid);
172}
173
174static void nmi_dump(void)
175{
176#ifndef REAL_NMI_SIGNAL
177 static atomic_t nmied_cpus = ATOMIC_INIT(0);
178
179 atomic_inc(v: &nmied_cpus);
180#endif
181 /*
182 * Only allow 1 cpu to proceed
183 */
184 arch_spin_lock(&nmi_lock);
185
186#ifdef REAL_NMI_SIGNAL
187 /*
188 * Wait up to 15 seconds for the other cpus to respond to the NMI.
189 * If a cpu has not responded after 10 sec, send it 1 additional NMI.
190 * This is for 2 reasons:
191 * - sometimes a MMSC fail to NMI all cpus.
192 * - on 512p SN0 system, the MMSC will only send NMIs to
193 * half the cpus. Unfortunately, we don't know which cpus may be
194 * NMIed - it depends on how the site chooses to configure.
195 *
196 * Note: it has been measure that it takes the MMSC up to 2.3 secs to
197 * send NMIs to all cpus on a 256p system.
198 */
199 for (i=0; i < 1500; i++) {
200 for_each_online_node(node)
201 if (NODEPDA(node)->dump_count == 0)
202 break;
203 if (node == MAX_NUMNODES)
204 break;
205 if (i == 1000) {
206 for_each_online_node(node)
207 if (NODEPDA(node)->dump_count == 0) {
208 cpu = cpumask_first(cpumask_of_node(node));
209 for (n=0; n < CNODE_NUM_CPUS(node); cpu++, n++) {
210 CPUMASK_SETB(nmied_cpus, cpu);
211 /*
212 * cputonasid, cputoslice
213 * needs kernel cpuid
214 */
215 SEND_NMI((cputonasid(cpu)), (cputoslice(cpu)));
216 }
217 }
218
219 }
220 udelay(10000);
221 }
222#else
223 while (atomic_read(v: &nmied_cpus) != num_online_cpus());
224#endif
225
226 /*
227 * Save the nmi cpu registers for all cpu in the eframe format.
228 */
229 nmi_eframes_save();
230 LOCAL_HUB_S(NI_PORT_RESET, NPR_PORTRESET | NPR_LOCALRESET);
231}
232

source code of linux/arch/mips/sgi-ip27/ip27-nmi.c