1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * SGI NMI support routines |
4 | * |
5 | * (C) Copyright 2020 Hewlett Packard Enterprise Development LP |
6 | * Copyright (C) 2007-2017 Silicon Graphics, Inc. All rights reserved. |
7 | * Copyright (c) Mike Travis |
8 | */ |
9 | |
10 | #include <linux/cpu.h> |
11 | #include <linux/delay.h> |
12 | #include <linux/kdb.h> |
13 | #include <linux/kexec.h> |
14 | #include <linux/kgdb.h> |
15 | #include <linux/moduleparam.h> |
16 | #include <linux/nmi.h> |
17 | #include <linux/sched.h> |
18 | #include <linux/sched/debug.h> |
19 | #include <linux/slab.h> |
20 | #include <linux/string.h> |
21 | #include <linux/clocksource.h> |
22 | |
23 | #include <asm/apic.h> |
24 | #include <asm/current.h> |
25 | #include <asm/kdebug.h> |
26 | #include <asm/local64.h> |
27 | #include <asm/nmi.h> |
28 | #include <asm/reboot.h> |
29 | #include <asm/traps.h> |
30 | #include <asm/uv/uv.h> |
31 | #include <asm/uv/uv_hub.h> |
32 | #include <asm/uv/uv_mmrs.h> |
33 | |
34 | /* |
35 | * UV handler for NMI |
36 | * |
37 | * Handle system-wide NMI events generated by the global 'power nmi' command. |
38 | * |
39 | * Basic operation is to field the NMI interrupt on each CPU and wait |
40 | * until all CPU's have arrived into the nmi handler. If some CPU's do not |
41 | * make it into the handler, try and force them in with the IPI(NMI) signal. |
42 | * |
43 | * We also have to lessen UV Hub MMR accesses as much as possible as this |
44 | * disrupts the UV Hub's primary mission of directing NumaLink traffic and |
45 | * can cause system problems to occur. |
46 | * |
47 | * To do this we register our primary NMI notifier on the NMI_UNKNOWN |
48 | * chain. This reduces the number of false NMI calls when the perf |
49 | * tools are running which generate an enormous number of NMIs per |
50 | * second (~4M/s for 1024 CPU threads). Our secondary NMI handler is |
51 | * very short as it only checks that if it has been "pinged" with the |
52 | * IPI(NMI) signal as mentioned above, and does not read the UV Hub's MMR. |
53 | * |
54 | */ |
55 | |
56 | static struct uv_hub_nmi_s **uv_hub_nmi_list; |
57 | |
58 | DEFINE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi); |
59 | |
60 | /* Newer SMM NMI handler, not present in all systems */ |
61 | static unsigned long uvh_nmi_mmrx; /* UVH_EVENT_OCCURRED0/1 */ |
62 | static unsigned long uvh_nmi_mmrx_clear; /* UVH_EVENT_OCCURRED0/1_ALIAS */ |
63 | static int uvh_nmi_mmrx_shift; /* UVH_EVENT_OCCURRED0/1_EXTIO_INT0_SHFT */ |
64 | static char *uvh_nmi_mmrx_type; /* "EXTIO_INT0" */ |
65 | |
66 | /* Non-zero indicates newer SMM NMI handler present */ |
67 | static unsigned long uvh_nmi_mmrx_supported; /* UVH_EXTIO_INT0_BROADCAST */ |
68 | |
69 | /* Indicates to BIOS that we want to use the newer SMM NMI handler */ |
70 | static unsigned long uvh_nmi_mmrx_req; /* UVH_BIOS_KERNEL_MMR_ALIAS_2 */ |
71 | static int uvh_nmi_mmrx_req_shift; /* 62 */ |
72 | |
73 | /* UV hubless values */ |
74 | #define NMI_CONTROL_PORT 0x70 |
75 | #define NMI_DUMMY_PORT 0x71 |
76 | #define PAD_OWN_GPP_D_0 0x2c |
77 | #define GPI_NMI_STS_GPP_D_0 0x164 |
78 | #define GPI_NMI_ENA_GPP_D_0 0x174 |
79 | #define STS_GPP_D_0_MASK 0x1 |
80 | #define PAD_CFG_DW0_GPP_D_0 0x4c0 |
81 | #define GPIROUTNMI (1ul << 17) |
82 | #define PCH_PCR_GPIO_1_BASE 0xfdae0000ul |
83 | #define PCH_PCR_GPIO_ADDRESS(offset) (int *)((u64)(pch_base) | (u64)(offset)) |
84 | |
85 | static u64 *pch_base; |
86 | static unsigned long nmi_mmr; |
87 | static unsigned long nmi_mmr_clear; |
88 | static unsigned long nmi_mmr_pending; |
89 | |
90 | static atomic_t uv_in_nmi; |
91 | static atomic_t uv_nmi_cpu = ATOMIC_INIT(-1); |
92 | static atomic_t uv_nmi_cpus_in_nmi = ATOMIC_INIT(-1); |
93 | static atomic_t uv_nmi_slave_continue; |
94 | static cpumask_var_t uv_nmi_cpu_mask; |
95 | |
96 | static atomic_t uv_nmi_kexec_failed; |
97 | |
98 | /* Values for uv_nmi_slave_continue */ |
99 | #define SLAVE_CLEAR 0 |
100 | #define SLAVE_CONTINUE 1 |
101 | #define SLAVE_EXIT 2 |
102 | |
103 | /* |
104 | * Default is all stack dumps go to the console and buffer. |
105 | * Lower level to send to log buffer only. |
106 | */ |
107 | static int uv_nmi_loglevel = CONSOLE_LOGLEVEL_DEFAULT; |
108 | module_param_named(dump_loglevel, uv_nmi_loglevel, int, 0644); |
109 | |
110 | /* |
111 | * The following values show statistics on how perf events are affecting |
112 | * this system. |
113 | */ |
114 | static int param_get_local64(char *buffer, const struct kernel_param *kp) |
115 | { |
116 | return sprintf(buf: buffer, fmt: "%lu\n" , local64_read((local64_t *)kp->arg)); |
117 | } |
118 | |
119 | static int param_set_local64(const char *val, const struct kernel_param *kp) |
120 | { |
121 | /* Clear on any write */ |
122 | local64_set((local64_t *)kp->arg, 0); |
123 | return 0; |
124 | } |
125 | |
126 | static const struct kernel_param_ops param_ops_local64 = { |
127 | .get = param_get_local64, |
128 | .set = param_set_local64, |
129 | }; |
130 | #define param_check_local64(name, p) __param_check(name, p, local64_t) |
131 | |
132 | static local64_t uv_nmi_count; |
133 | module_param_named(nmi_count, uv_nmi_count, local64, 0644); |
134 | |
135 | static local64_t uv_nmi_misses; |
136 | module_param_named(nmi_misses, uv_nmi_misses, local64, 0644); |
137 | |
138 | static local64_t uv_nmi_ping_count; |
139 | module_param_named(ping_count, uv_nmi_ping_count, local64, 0644); |
140 | |
141 | static local64_t uv_nmi_ping_misses; |
142 | module_param_named(ping_misses, uv_nmi_ping_misses, local64, 0644); |
143 | |
144 | /* |
145 | * Following values allow tuning for large systems under heavy loading |
146 | */ |
147 | static int uv_nmi_initial_delay = 100; |
148 | module_param_named(initial_delay, uv_nmi_initial_delay, int, 0644); |
149 | |
150 | static int uv_nmi_slave_delay = 100; |
151 | module_param_named(slave_delay, uv_nmi_slave_delay, int, 0644); |
152 | |
153 | static int uv_nmi_loop_delay = 100; |
154 | module_param_named(loop_delay, uv_nmi_loop_delay, int, 0644); |
155 | |
156 | static int uv_nmi_trigger_delay = 10000; |
157 | module_param_named(trigger_delay, uv_nmi_trigger_delay, int, 0644); |
158 | |
159 | static int uv_nmi_wait_count = 100; |
160 | module_param_named(wait_count, uv_nmi_wait_count, int, 0644); |
161 | |
162 | static int uv_nmi_retry_count = 500; |
163 | module_param_named(retry_count, uv_nmi_retry_count, int, 0644); |
164 | |
165 | static bool uv_pch_intr_enable = true; |
166 | static bool uv_pch_intr_now_enabled; |
167 | module_param_named(pch_intr_enable, uv_pch_intr_enable, bool, 0644); |
168 | |
169 | static bool uv_pch_init_enable = true; |
170 | module_param_named(pch_init_enable, uv_pch_init_enable, bool, 0644); |
171 | |
172 | static int uv_nmi_debug; |
173 | module_param_named(debug, uv_nmi_debug, int, 0644); |
174 | |
175 | #define nmi_debug(fmt, ...) \ |
176 | do { \ |
177 | if (uv_nmi_debug) \ |
178 | pr_info(fmt, ##__VA_ARGS__); \ |
179 | } while (0) |
180 | |
181 | /* Valid NMI Actions */ |
182 | enum action_t { |
183 | nmi_act_kdump, |
184 | nmi_act_dump, |
185 | nmi_act_ips, |
186 | nmi_act_kdb, |
187 | nmi_act_kgdb, |
188 | nmi_act_health, |
189 | nmi_act_max |
190 | }; |
191 | |
192 | static const char * const actions[nmi_act_max] = { |
193 | [nmi_act_kdump] = "kdump" , |
194 | [nmi_act_dump] = "dump" , |
195 | [nmi_act_ips] = "ips" , |
196 | [nmi_act_kdb] = "kdb" , |
197 | [nmi_act_kgdb] = "kgdb" , |
198 | [nmi_act_health] = "health" , |
199 | }; |
200 | |
201 | static const char * const actions_desc[nmi_act_max] = { |
202 | [nmi_act_kdump] = "do kernel crash dump" , |
203 | [nmi_act_dump] = "dump process stack for each cpu" , |
204 | [nmi_act_ips] = "dump Inst Ptr info for each cpu" , |
205 | [nmi_act_kdb] = "enter KDB (needs kgdboc= assignment)" , |
206 | [nmi_act_kgdb] = "enter KGDB (needs gdb target remote)" , |
207 | [nmi_act_health] = "check if CPUs respond to NMI" , |
208 | }; |
209 | |
210 | static enum action_t uv_nmi_action = nmi_act_dump; |
211 | |
212 | static int param_get_action(char *buffer, const struct kernel_param *kp) |
213 | { |
214 | return sprintf(buf: buffer, fmt: "%s\n" , actions[uv_nmi_action]); |
215 | } |
216 | |
217 | static int param_set_action(const char *val, const struct kernel_param *kp) |
218 | { |
219 | int i, n = ARRAY_SIZE(actions); |
220 | |
221 | i = sysfs_match_string(actions, val); |
222 | if (i >= 0) { |
223 | uv_nmi_action = i; |
224 | pr_info("UV: New NMI action:%s\n" , actions[i]); |
225 | return 0; |
226 | } |
227 | |
228 | pr_err("UV: Invalid NMI action. Valid actions are:\n" ); |
229 | for (i = 0; i < n; i++) |
230 | pr_err("UV: %-8s - %s\n" , actions[i], actions_desc[i]); |
231 | |
232 | return -EINVAL; |
233 | } |
234 | |
235 | static const struct kernel_param_ops param_ops_action = { |
236 | .get = param_get_action, |
237 | .set = param_set_action, |
238 | }; |
239 | #define param_check_action(name, p) __param_check(name, p, enum action_t) |
240 | |
241 | module_param_named(action, uv_nmi_action, action, 0644); |
242 | |
243 | /* Setup which NMI support is present in system */ |
244 | static void uv_nmi_setup_mmrs(void) |
245 | { |
246 | bool new_nmi_method_only = false; |
247 | |
248 | /* First determine arch specific MMRs to handshake with BIOS */ |
249 | if (UVH_EVENT_OCCURRED0_EXTIO_INT0_MASK) { /* UV2,3,4 setup */ |
250 | uvh_nmi_mmrx = UVH_EVENT_OCCURRED0; |
251 | uvh_nmi_mmrx_clear = UVH_EVENT_OCCURRED0_ALIAS; |
252 | uvh_nmi_mmrx_shift = UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT; |
253 | uvh_nmi_mmrx_type = "OCRD0-EXTIO_INT0" ; |
254 | |
255 | uvh_nmi_mmrx_supported = UVH_EXTIO_INT0_BROADCAST; |
256 | uvh_nmi_mmrx_req = UVH_BIOS_KERNEL_MMR_ALIAS_2; |
257 | uvh_nmi_mmrx_req_shift = 62; |
258 | |
259 | } else if (UVH_EVENT_OCCURRED1_EXTIO_INT0_MASK) { /* UV5+ setup */ |
260 | uvh_nmi_mmrx = UVH_EVENT_OCCURRED1; |
261 | uvh_nmi_mmrx_clear = UVH_EVENT_OCCURRED1_ALIAS; |
262 | uvh_nmi_mmrx_shift = UVH_EVENT_OCCURRED1_EXTIO_INT0_SHFT; |
263 | uvh_nmi_mmrx_type = "OCRD1-EXTIO_INT0" ; |
264 | |
265 | new_nmi_method_only = true; /* Newer nmi always valid on UV5+ */ |
266 | uvh_nmi_mmrx_req = 0; /* no request bit to clear */ |
267 | |
268 | } else { |
269 | pr_err("UV:%s:NMI support not available on this system\n" , __func__); |
270 | return; |
271 | } |
272 | |
273 | /* Then find out if new NMI is supported */ |
274 | if (new_nmi_method_only || uv_read_local_mmr(offset: uvh_nmi_mmrx_supported)) { |
275 | if (uvh_nmi_mmrx_req) |
276 | uv_write_local_mmr(offset: uvh_nmi_mmrx_req, |
277 | val: 1UL << uvh_nmi_mmrx_req_shift); |
278 | nmi_mmr = uvh_nmi_mmrx; |
279 | nmi_mmr_clear = uvh_nmi_mmrx_clear; |
280 | nmi_mmr_pending = 1UL << uvh_nmi_mmrx_shift; |
281 | pr_info("UV: SMI NMI support: %s\n" , uvh_nmi_mmrx_type); |
282 | } else { |
283 | nmi_mmr = UVH_NMI_MMR; |
284 | nmi_mmr_clear = UVH_NMI_MMR_CLEAR; |
285 | nmi_mmr_pending = 1UL << UVH_NMI_MMR_SHIFT; |
286 | pr_info("UV: SMI NMI support: %s\n" , UVH_NMI_MMR_TYPE); |
287 | } |
288 | } |
289 | |
290 | /* Read NMI MMR and check if NMI flag was set by BMC. */ |
291 | static inline int uv_nmi_test_mmr(struct uv_hub_nmi_s *hub_nmi) |
292 | { |
293 | hub_nmi->nmi_value = uv_read_local_mmr(offset: nmi_mmr); |
294 | atomic_inc(v: &hub_nmi->read_mmr_count); |
295 | return !!(hub_nmi->nmi_value & nmi_mmr_pending); |
296 | } |
297 | |
298 | static inline void uv_local_mmr_clear_nmi(void) |
299 | { |
300 | uv_write_local_mmr(offset: nmi_mmr_clear, val: nmi_mmr_pending); |
301 | } |
302 | |
303 | /* |
304 | * UV hubless NMI handler functions |
305 | */ |
306 | static inline void uv_reassert_nmi(void) |
307 | { |
308 | /* (from arch/x86/include/asm/mach_traps.h) */ |
309 | outb(value: 0x8f, NMI_CONTROL_PORT); |
310 | inb(NMI_DUMMY_PORT); /* dummy read */ |
311 | outb(value: 0x0f, NMI_CONTROL_PORT); |
312 | inb(NMI_DUMMY_PORT); /* dummy read */ |
313 | } |
314 | |
315 | static void uv_init_hubless_pch_io(int offset, int mask, int data) |
316 | { |
317 | int *addr = PCH_PCR_GPIO_ADDRESS(offset); |
318 | int readd = readl(addr); |
319 | |
320 | if (mask) { /* OR in new data */ |
321 | int writed = (readd & ~mask) | data; |
322 | |
323 | nmi_debug("UV:PCH: %p = %x & %x | %x (%x)\n" , |
324 | addr, readd, ~mask, data, writed); |
325 | writel(val: writed, addr); |
326 | } else if (readd & data) { /* clear status bit */ |
327 | nmi_debug("UV:PCH: %p = %x\n" , addr, data); |
328 | writel(val: data, addr); |
329 | } |
330 | |
331 | (void)readl(addr); /* flush write data */ |
332 | } |
333 | |
334 | static void uv_nmi_setup_hubless_intr(void) |
335 | { |
336 | uv_pch_intr_now_enabled = uv_pch_intr_enable; |
337 | |
338 | uv_init_hubless_pch_io( |
339 | PAD_CFG_DW0_GPP_D_0, GPIROUTNMI, |
340 | data: uv_pch_intr_now_enabled ? GPIROUTNMI : 0); |
341 | |
342 | nmi_debug("UV:NMI: GPP_D_0 interrupt %s\n" , |
343 | uv_pch_intr_now_enabled ? "enabled" : "disabled" ); |
344 | } |
345 | |
346 | static struct init_nmi { |
347 | unsigned int offset; |
348 | unsigned int mask; |
349 | unsigned int data; |
350 | } init_nmi[] = { |
351 | { /* HOSTSW_OWN_GPP_D_0 */ |
352 | .offset = 0x84, |
353 | .mask = 0x1, |
354 | .data = 0x0, /* ACPI Mode */ |
355 | }, |
356 | |
357 | /* Clear status: */ |
358 | { /* GPI_INT_STS_GPP_D_0 */ |
359 | .offset = 0x104, |
360 | .mask = 0x0, |
361 | .data = 0x1, /* Clear Status */ |
362 | }, |
363 | { /* GPI_GPE_STS_GPP_D_0 */ |
364 | .offset = 0x124, |
365 | .mask = 0x0, |
366 | .data = 0x1, /* Clear Status */ |
367 | }, |
368 | { /* GPI_SMI_STS_GPP_D_0 */ |
369 | .offset = 0x144, |
370 | .mask = 0x0, |
371 | .data = 0x1, /* Clear Status */ |
372 | }, |
373 | { /* GPI_NMI_STS_GPP_D_0 */ |
374 | .offset = 0x164, |
375 | .mask = 0x0, |
376 | .data = 0x1, /* Clear Status */ |
377 | }, |
378 | |
379 | /* Disable interrupts: */ |
380 | { /* GPI_INT_EN_GPP_D_0 */ |
381 | .offset = 0x114, |
382 | .mask = 0x1, |
383 | .data = 0x0, /* Disable interrupt generation */ |
384 | }, |
385 | { /* GPI_GPE_EN_GPP_D_0 */ |
386 | .offset = 0x134, |
387 | .mask = 0x1, |
388 | .data = 0x0, /* Disable interrupt generation */ |
389 | }, |
390 | { /* GPI_SMI_EN_GPP_D_0 */ |
391 | .offset = 0x154, |
392 | .mask = 0x1, |
393 | .data = 0x0, /* Disable interrupt generation */ |
394 | }, |
395 | { /* GPI_NMI_EN_GPP_D_0 */ |
396 | .offset = 0x174, |
397 | .mask = 0x1, |
398 | .data = 0x0, /* Disable interrupt generation */ |
399 | }, |
400 | |
401 | /* Setup GPP_D_0 Pad Config: */ |
402 | { /* PAD_CFG_DW0_GPP_D_0 */ |
403 | .offset = 0x4c0, |
404 | .mask = 0xffffffff, |
405 | .data = 0x82020100, |
406 | /* |
407 | * 31:30 Pad Reset Config (PADRSTCFG): = 2h # PLTRST# (default) |
408 | * |
409 | * 29 RX Pad State Select (RXPADSTSEL): = 0 # Raw RX pad state directly |
410 | * from RX buffer (default) |
411 | * |
412 | * 28 RX Raw Override to '1' (RXRAW1): = 0 # No Override |
413 | * |
414 | * 26:25 RX Level/Edge Configuration (RXEVCFG): |
415 | * = 0h # Level |
416 | * = 1h # Edge |
417 | * |
418 | * 23 RX Invert (RXINV): = 0 # No Inversion (signal active high) |
419 | * |
420 | * 20 GPIO Input Route IOxAPIC (GPIROUTIOXAPIC): |
421 | * = 0 # Routing does not cause peripheral IRQ... |
422 | * # (we want an NMI not an IRQ) |
423 | * |
424 | * 19 GPIO Input Route SCI (GPIROUTSCI): = 0 # Routing does not cause SCI. |
425 | * 18 GPIO Input Route SMI (GPIROUTSMI): = 0 # Routing does not cause SMI. |
426 | * 17 GPIO Input Route NMI (GPIROUTNMI): = 1 # Routing can cause NMI. |
427 | * |
428 | * 11:10 Pad Mode (PMODE1/0): = 0h = GPIO control the Pad. |
429 | * 9 GPIO RX Disable (GPIORXDIS): |
430 | * = 0 # Enable the input buffer (active low enable) |
431 | * |
432 | * 8 GPIO TX Disable (GPIOTXDIS): |
433 | * = 1 # Disable the output buffer; i.e. Hi-Z |
434 | * |
435 | * 1 GPIO RX State (GPIORXSTATE): This is the current internal RX pad state.. |
436 | * 0 GPIO TX State (GPIOTXSTATE): |
437 | * = 0 # (Leave at default) |
438 | */ |
439 | }, |
440 | |
441 | /* Pad Config DW1 */ |
442 | { /* PAD_CFG_DW1_GPP_D_0 */ |
443 | .offset = 0x4c4, |
444 | .mask = 0x3c00, |
445 | .data = 0, /* Termination = none (default) */ |
446 | }, |
447 | }; |
448 | |
449 | static void uv_init_hubless_pch_d0(void) |
450 | { |
451 | int i, read; |
452 | |
453 | read = *PCH_PCR_GPIO_ADDRESS(PAD_OWN_GPP_D_0); |
454 | if (read != 0) { |
455 | pr_info("UV: Hubless NMI already configured\n" ); |
456 | return; |
457 | } |
458 | |
459 | nmi_debug("UV: Initializing UV Hubless NMI on PCH\n" ); |
460 | for (i = 0; i < ARRAY_SIZE(init_nmi); i++) { |
461 | uv_init_hubless_pch_io(offset: init_nmi[i].offset, |
462 | mask: init_nmi[i].mask, |
463 | data: init_nmi[i].data); |
464 | } |
465 | } |
466 | |
467 | static int uv_nmi_test_hubless(struct uv_hub_nmi_s *hub_nmi) |
468 | { |
469 | int *pstat = PCH_PCR_GPIO_ADDRESS(GPI_NMI_STS_GPP_D_0); |
470 | int status = *pstat; |
471 | |
472 | hub_nmi->nmi_value = status; |
473 | atomic_inc(v: &hub_nmi->read_mmr_count); |
474 | |
475 | if (!(status & STS_GPP_D_0_MASK)) /* Not a UV external NMI */ |
476 | return 0; |
477 | |
478 | *pstat = STS_GPP_D_0_MASK; /* Is a UV NMI: clear GPP_D_0 status */ |
479 | (void)*pstat; /* Flush write */ |
480 | |
481 | return 1; |
482 | } |
483 | |
484 | static int uv_test_nmi(struct uv_hub_nmi_s *hub_nmi) |
485 | { |
486 | if (hub_nmi->hub_present) |
487 | return uv_nmi_test_mmr(hub_nmi); |
488 | |
489 | if (hub_nmi->pch_owner) /* Only PCH owner can check status */ |
490 | return uv_nmi_test_hubless(hub_nmi); |
491 | |
492 | return -1; |
493 | } |
494 | |
495 | /* |
496 | * If first CPU in on this hub, set hub_nmi "in_nmi" and "owner" values and |
497 | * return true. If first CPU in on the system, set global "in_nmi" flag. |
498 | */ |
499 | static int uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi) |
500 | { |
501 | int first = atomic_add_unless(v: &hub_nmi->in_nmi, a: 1, u: 1); |
502 | |
503 | if (first) { |
504 | atomic_set(v: &hub_nmi->cpu_owner, i: cpu); |
505 | if (atomic_add_unless(v: &uv_in_nmi, a: 1, u: 1)) |
506 | atomic_set(v: &uv_nmi_cpu, i: cpu); |
507 | |
508 | atomic_inc(v: &hub_nmi->nmi_count); |
509 | } |
510 | return first; |
511 | } |
512 | |
513 | /* Check if this is a system NMI event */ |
514 | static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi) |
515 | { |
516 | int cpu = smp_processor_id(); |
517 | int nmi = 0; |
518 | int nmi_detected = 0; |
519 | |
520 | local64_inc(&uv_nmi_count); |
521 | this_cpu_inc(uv_cpu_nmi.queries); |
522 | |
523 | do { |
524 | nmi = atomic_read(v: &hub_nmi->in_nmi); |
525 | if (nmi) |
526 | break; |
527 | |
528 | if (raw_spin_trylock(&hub_nmi->nmi_lock)) { |
529 | nmi_detected = uv_test_nmi(hub_nmi); |
530 | |
531 | /* Check flag for UV external NMI */ |
532 | if (nmi_detected > 0) { |
533 | uv_set_in_nmi(cpu, hub_nmi); |
534 | nmi = 1; |
535 | break; |
536 | } |
537 | |
538 | /* A non-PCH node in a hubless system waits for NMI */ |
539 | else if (nmi_detected < 0) |
540 | goto slave_wait; |
541 | |
542 | /* MMR/PCH NMI flag is clear */ |
543 | raw_spin_unlock(&hub_nmi->nmi_lock); |
544 | |
545 | } else { |
546 | |
547 | /* Wait a moment for the HUB NMI locker to set flag */ |
548 | slave_wait: cpu_relax(); |
549 | udelay(uv_nmi_slave_delay); |
550 | |
551 | /* Re-check hub in_nmi flag */ |
552 | nmi = atomic_read(v: &hub_nmi->in_nmi); |
553 | if (nmi) |
554 | break; |
555 | } |
556 | |
557 | /* |
558 | * Check if this BMC missed setting the MMR NMI flag (or) |
559 | * UV hubless system where only PCH owner can check flag |
560 | */ |
561 | if (!nmi) { |
562 | nmi = atomic_read(v: &uv_in_nmi); |
563 | if (nmi) |
564 | uv_set_in_nmi(cpu, hub_nmi); |
565 | } |
566 | |
567 | /* If we're holding the hub lock, release it now */ |
568 | if (nmi_detected < 0) |
569 | raw_spin_unlock(&hub_nmi->nmi_lock); |
570 | |
571 | } while (0); |
572 | |
573 | if (!nmi) |
574 | local64_inc(&uv_nmi_misses); |
575 | |
576 | return nmi; |
577 | } |
578 | |
579 | /* Need to reset the NMI MMR register, but only once per hub. */ |
580 | static inline void uv_clear_nmi(int cpu) |
581 | { |
582 | struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi; |
583 | |
584 | if (cpu == atomic_read(v: &hub_nmi->cpu_owner)) { |
585 | atomic_set(v: &hub_nmi->cpu_owner, i: -1); |
586 | atomic_set(v: &hub_nmi->in_nmi, i: 0); |
587 | if (hub_nmi->hub_present) |
588 | uv_local_mmr_clear_nmi(); |
589 | else |
590 | uv_reassert_nmi(); |
591 | raw_spin_unlock(&hub_nmi->nmi_lock); |
592 | } |
593 | } |
594 | |
595 | /* Ping non-responding CPU's attempting to force them into the NMI handler */ |
596 | static void uv_nmi_nr_cpus_ping(void) |
597 | { |
598 | int cpu; |
599 | |
600 | for_each_cpu(cpu, uv_nmi_cpu_mask) |
601 | uv_cpu_nmi_per(cpu).pinging = 1; |
602 | |
603 | __apic_send_IPI_mask(mask: uv_nmi_cpu_mask, APIC_DM_NMI); |
604 | } |
605 | |
606 | /* Clean up flags for CPU's that ignored both NMI and ping */ |
607 | static void uv_nmi_cleanup_mask(void) |
608 | { |
609 | int cpu; |
610 | |
611 | for_each_cpu(cpu, uv_nmi_cpu_mask) { |
612 | uv_cpu_nmi_per(cpu).pinging = 0; |
613 | uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_OUT; |
614 | cpumask_clear_cpu(cpu, dstp: uv_nmi_cpu_mask); |
615 | } |
616 | } |
617 | |
618 | /* Loop waiting as CPU's enter NMI handler */ |
619 | static int uv_nmi_wait_cpus(int first) |
620 | { |
621 | int i, j, k, n = num_online_cpus(); |
622 | int last_k = 0, waiting = 0; |
623 | int cpu = smp_processor_id(); |
624 | |
625 | if (first) { |
626 | cpumask_copy(dstp: uv_nmi_cpu_mask, cpu_online_mask); |
627 | k = 0; |
628 | } else { |
629 | k = n - cpumask_weight(srcp: uv_nmi_cpu_mask); |
630 | } |
631 | |
632 | /* PCH NMI causes only one CPU to respond */ |
633 | if (first && uv_pch_intr_now_enabled) { |
634 | cpumask_clear_cpu(cpu, dstp: uv_nmi_cpu_mask); |
635 | return n - k - 1; |
636 | } |
637 | |
638 | udelay(uv_nmi_initial_delay); |
639 | for (i = 0; i < uv_nmi_retry_count; i++) { |
640 | int loop_delay = uv_nmi_loop_delay; |
641 | |
642 | for_each_cpu(j, uv_nmi_cpu_mask) { |
643 | if (uv_cpu_nmi_per(j).state) { |
644 | cpumask_clear_cpu(cpu: j, dstp: uv_nmi_cpu_mask); |
645 | if (++k >= n) |
646 | break; |
647 | } |
648 | } |
649 | if (k >= n) { /* all in? */ |
650 | k = n; |
651 | break; |
652 | } |
653 | if (last_k != k) { /* abort if no new CPU's coming in */ |
654 | last_k = k; |
655 | waiting = 0; |
656 | } else if (++waiting > uv_nmi_wait_count) |
657 | break; |
658 | |
659 | /* Extend delay if waiting only for CPU 0: */ |
660 | if (waiting && (n - k) == 1 && |
661 | cpumask_test_cpu(cpu: 0, cpumask: uv_nmi_cpu_mask)) |
662 | loop_delay *= 100; |
663 | |
664 | udelay(loop_delay); |
665 | } |
666 | atomic_set(v: &uv_nmi_cpus_in_nmi, i: k); |
667 | return n - k; |
668 | } |
669 | |
670 | /* Wait until all slave CPU's have entered UV NMI handler */ |
671 | static void uv_nmi_wait(int master) |
672 | { |
673 | /* Indicate this CPU is in: */ |
674 | this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_IN); |
675 | |
676 | /* If not the first CPU in (the master), then we are a slave CPU */ |
677 | if (!master) |
678 | return; |
679 | |
680 | do { |
681 | /* Wait for all other CPU's to gather here */ |
682 | if (!uv_nmi_wait_cpus(first: 1)) |
683 | break; |
684 | |
685 | /* If not all made it in, send IPI NMI to them */ |
686 | pr_alert("UV: Sending NMI IPI to %d CPUs: %*pbl\n" , |
687 | cpumask_weight(uv_nmi_cpu_mask), |
688 | cpumask_pr_args(uv_nmi_cpu_mask)); |
689 | |
690 | uv_nmi_nr_cpus_ping(); |
691 | |
692 | /* If all CPU's are in, then done */ |
693 | if (!uv_nmi_wait_cpus(first: 0)) |
694 | break; |
695 | |
696 | pr_alert("UV: %d CPUs not in NMI loop: %*pbl\n" , |
697 | cpumask_weight(uv_nmi_cpu_mask), |
698 | cpumask_pr_args(uv_nmi_cpu_mask)); |
699 | } while (0); |
700 | |
701 | pr_alert("UV: %d of %d CPUs in NMI\n" , |
702 | atomic_read(&uv_nmi_cpus_in_nmi), num_online_cpus()); |
703 | } |
704 | |
705 | /* Dump Instruction Pointer header */ |
706 | static void uv_nmi_dump_cpu_ip_hdr(void) |
707 | { |
708 | pr_info("\nUV: %4s %6s %-32s %s (Note: PID 0 not listed)\n" , |
709 | "CPU" , "PID" , "COMMAND" , "IP" ); |
710 | } |
711 | |
712 | /* Dump Instruction Pointer info */ |
713 | static void uv_nmi_dump_cpu_ip(int cpu, struct pt_regs *regs) |
714 | { |
715 | pr_info("UV: %4d %6d %-32.32s %pS" , |
716 | cpu, current->pid, current->comm, (void *)regs->ip); |
717 | } |
718 | |
719 | /* |
720 | * Dump this CPU's state. If action was set to "kdump" and the crash_kexec |
721 | * failed, then we provide "dump" as an alternate action. Action "dump" now |
722 | * also includes the show "ips" (instruction pointers) action whereas the |
723 | * action "ips" only displays instruction pointers for the non-idle CPU's. |
724 | * This is an abbreviated form of the "ps" command. |
725 | */ |
726 | static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs) |
727 | { |
728 | const char *dots = " ................................. " ; |
729 | |
730 | if (cpu == 0) |
731 | uv_nmi_dump_cpu_ip_hdr(); |
732 | |
733 | if (current->pid != 0 || uv_nmi_action != nmi_act_ips) |
734 | uv_nmi_dump_cpu_ip(cpu, regs); |
735 | |
736 | if (uv_nmi_action == nmi_act_dump) { |
737 | pr_info("UV:%sNMI process trace for CPU %d\n" , dots, cpu); |
738 | show_regs(regs); |
739 | } |
740 | |
741 | this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE); |
742 | } |
743 | |
744 | /* Trigger a slave CPU to dump it's state */ |
745 | static void uv_nmi_trigger_dump(int cpu) |
746 | { |
747 | int retry = uv_nmi_trigger_delay; |
748 | |
749 | if (uv_cpu_nmi_per(cpu).state != UV_NMI_STATE_IN) |
750 | return; |
751 | |
752 | uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP; |
753 | do { |
754 | cpu_relax(); |
755 | udelay(10); |
756 | if (uv_cpu_nmi_per(cpu).state |
757 | != UV_NMI_STATE_DUMP) |
758 | return; |
759 | } while (--retry > 0); |
760 | |
761 | pr_crit("UV: CPU %d stuck in process dump function\n" , cpu); |
762 | uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE; |
763 | } |
764 | |
765 | /* Wait until all CPU's ready to exit */ |
766 | static void uv_nmi_sync_exit(int master) |
767 | { |
768 | atomic_dec(v: &uv_nmi_cpus_in_nmi); |
769 | if (master) { |
770 | while (atomic_read(v: &uv_nmi_cpus_in_nmi) > 0) |
771 | cpu_relax(); |
772 | atomic_set(v: &uv_nmi_slave_continue, SLAVE_CLEAR); |
773 | } else { |
774 | while (atomic_read(v: &uv_nmi_slave_continue)) |
775 | cpu_relax(); |
776 | } |
777 | } |
778 | |
779 | /* Current "health" check is to check which CPU's are responsive */ |
780 | static void uv_nmi_action_health(int cpu, struct pt_regs *regs, int master) |
781 | { |
782 | if (master) { |
783 | int in = atomic_read(v: &uv_nmi_cpus_in_nmi); |
784 | int out = num_online_cpus() - in; |
785 | |
786 | pr_alert("UV: NMI CPU health check (non-responding:%d)\n" , out); |
787 | atomic_set(v: &uv_nmi_slave_continue, SLAVE_EXIT); |
788 | } else { |
789 | while (!atomic_read(v: &uv_nmi_slave_continue)) |
790 | cpu_relax(); |
791 | } |
792 | uv_nmi_sync_exit(master); |
793 | } |
794 | |
795 | /* Walk through CPU list and dump state of each */ |
796 | static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master) |
797 | { |
798 | if (master) { |
799 | int tcpu; |
800 | int ignored = 0; |
801 | int saved_console_loglevel = console_loglevel; |
802 | |
803 | pr_alert("UV: tracing %s for %d CPUs from CPU %d\n" , |
804 | uv_nmi_action == nmi_act_ips ? "IPs" : "processes" , |
805 | atomic_read(&uv_nmi_cpus_in_nmi), cpu); |
806 | |
807 | console_loglevel = uv_nmi_loglevel; |
808 | atomic_set(v: &uv_nmi_slave_continue, SLAVE_EXIT); |
809 | for_each_online_cpu(tcpu) { |
810 | if (cpumask_test_cpu(cpu: tcpu, cpumask: uv_nmi_cpu_mask)) |
811 | ignored++; |
812 | else if (tcpu == cpu) |
813 | uv_nmi_dump_state_cpu(cpu: tcpu, regs); |
814 | else |
815 | uv_nmi_trigger_dump(cpu: tcpu); |
816 | } |
817 | if (ignored) |
818 | pr_alert("UV: %d CPUs ignored NMI\n" , ignored); |
819 | |
820 | console_loglevel = saved_console_loglevel; |
821 | pr_alert("UV: process trace complete\n" ); |
822 | } else { |
823 | while (!atomic_read(v: &uv_nmi_slave_continue)) |
824 | cpu_relax(); |
825 | while (this_cpu_read(uv_cpu_nmi.state) != UV_NMI_STATE_DUMP) |
826 | cpu_relax(); |
827 | uv_nmi_dump_state_cpu(cpu, regs); |
828 | } |
829 | uv_nmi_sync_exit(master); |
830 | } |
831 | |
832 | static void uv_nmi_touch_watchdogs(void) |
833 | { |
834 | touch_softlockup_watchdog_sync(); |
835 | clocksource_touch_watchdog(); |
836 | rcu_cpu_stall_reset(); |
837 | touch_nmi_watchdog(); |
838 | } |
839 | |
840 | static void uv_nmi_kdump(int cpu, int main, struct pt_regs *regs) |
841 | { |
842 | /* Check if kdump kernel loaded for both main and secondary CPUs */ |
843 | if (!kexec_crash_image) { |
844 | if (main) |
845 | pr_err("UV: NMI error: kdump kernel not loaded\n" ); |
846 | return; |
847 | } |
848 | |
849 | /* Call crash to dump system state */ |
850 | if (main) { |
851 | pr_emerg("UV: NMI executing crash_kexec on CPU%d\n" , cpu); |
852 | crash_kexec(regs); |
853 | |
854 | pr_emerg("UV: crash_kexec unexpectedly returned\n" ); |
855 | atomic_set(v: &uv_nmi_kexec_failed, i: 1); |
856 | |
857 | } else { /* secondary */ |
858 | |
859 | /* If kdump kernel fails, secondaries will exit this loop */ |
860 | while (atomic_read(v: &uv_nmi_kexec_failed) == 0) { |
861 | |
862 | /* Once shootdown cpus starts, they do not return */ |
863 | run_crash_ipi_callback(regs); |
864 | |
865 | mdelay(10); |
866 | } |
867 | } |
868 | } |
869 | |
870 | #ifdef CONFIG_KGDB |
871 | #ifdef CONFIG_KGDB_KDB |
872 | static inline int uv_nmi_kdb_reason(void) |
873 | { |
874 | return KDB_REASON_SYSTEM_NMI; |
875 | } |
876 | #else /* !CONFIG_KGDB_KDB */ |
877 | static inline int uv_nmi_kdb_reason(void) |
878 | { |
879 | /* Ensure user is expecting to attach gdb remote */ |
880 | if (uv_nmi_action == nmi_act_kgdb) |
881 | return 0; |
882 | |
883 | pr_err("UV: NMI error: KDB is not enabled in this kernel\n" ); |
884 | return -1; |
885 | } |
886 | #endif /* CONFIG_KGDB_KDB */ |
887 | |
888 | /* |
889 | * Call KGDB/KDB from NMI handler |
890 | * |
891 | * Note that if both KGDB and KDB are configured, then the action of 'kgdb' or |
892 | * 'kdb' has no affect on which is used. See the KGDB documentation for further |
893 | * information. |
894 | */ |
895 | static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master) |
896 | { |
897 | if (master) { |
898 | int reason = uv_nmi_kdb_reason(); |
899 | int ret; |
900 | |
901 | if (reason < 0) |
902 | return; |
903 | |
904 | /* Call KGDB NMI handler as MASTER */ |
905 | ret = kgdb_nmicallin(cpu, X86_TRAP_NMI, regs, err_code: reason, |
906 | snd_rdy: &uv_nmi_slave_continue); |
907 | if (ret) { |
908 | pr_alert("KGDB returned error, is kgdboc set?\n" ); |
909 | atomic_set(v: &uv_nmi_slave_continue, SLAVE_EXIT); |
910 | } |
911 | } else { |
912 | /* Wait for KGDB signal that it's ready for slaves to enter */ |
913 | int sig; |
914 | |
915 | do { |
916 | cpu_relax(); |
917 | sig = atomic_read(v: &uv_nmi_slave_continue); |
918 | } while (!sig); |
919 | |
920 | /* Call KGDB as slave */ |
921 | if (sig == SLAVE_CONTINUE) |
922 | kgdb_nmicallback(cpu, regs); |
923 | } |
924 | uv_nmi_sync_exit(master); |
925 | } |
926 | |
927 | #else /* !CONFIG_KGDB */ |
928 | static inline void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master) |
929 | { |
930 | pr_err("UV: NMI error: KGDB is not enabled in this kernel\n" ); |
931 | } |
932 | #endif /* !CONFIG_KGDB */ |
933 | |
934 | /* |
935 | * UV NMI handler |
936 | */ |
937 | static int uv_handle_nmi(unsigned int reason, struct pt_regs *regs) |
938 | { |
939 | struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi; |
940 | int cpu = smp_processor_id(); |
941 | int master = 0; |
942 | unsigned long flags; |
943 | |
944 | local_irq_save(flags); |
945 | |
946 | /* If not a UV System NMI, ignore */ |
947 | if (!this_cpu_read(uv_cpu_nmi.pinging) && !uv_check_nmi(hub_nmi)) { |
948 | local_irq_restore(flags); |
949 | return NMI_DONE; |
950 | } |
951 | |
952 | /* Indicate we are the first CPU into the NMI handler */ |
953 | master = (atomic_read(v: &uv_nmi_cpu) == cpu); |
954 | |
955 | /* If NMI action is "kdump", then attempt to do it */ |
956 | if (uv_nmi_action == nmi_act_kdump) { |
957 | uv_nmi_kdump(cpu, main: master, regs); |
958 | |
959 | /* Unexpected return, revert action to "dump" */ |
960 | if (master) |
961 | uv_nmi_action = nmi_act_dump; |
962 | } |
963 | |
964 | /* Pause as all CPU's enter the NMI handler */ |
965 | uv_nmi_wait(master); |
966 | |
967 | /* Process actions other than "kdump": */ |
968 | switch (uv_nmi_action) { |
969 | case nmi_act_health: |
970 | uv_nmi_action_health(cpu, regs, master); |
971 | break; |
972 | case nmi_act_ips: |
973 | case nmi_act_dump: |
974 | uv_nmi_dump_state(cpu, regs, master); |
975 | break; |
976 | case nmi_act_kdb: |
977 | case nmi_act_kgdb: |
978 | uv_call_kgdb_kdb(cpu, regs, master); |
979 | break; |
980 | default: |
981 | if (master) |
982 | pr_alert("UV: unknown NMI action: %d\n" , uv_nmi_action); |
983 | uv_nmi_sync_exit(master); |
984 | break; |
985 | } |
986 | |
987 | /* Clear per_cpu "in_nmi" flag */ |
988 | this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_OUT); |
989 | |
990 | /* Clear MMR NMI flag on each hub */ |
991 | uv_clear_nmi(cpu); |
992 | |
993 | /* Clear global flags */ |
994 | if (master) { |
995 | if (!cpumask_empty(srcp: uv_nmi_cpu_mask)) |
996 | uv_nmi_cleanup_mask(); |
997 | atomic_set(v: &uv_nmi_cpus_in_nmi, i: -1); |
998 | atomic_set(v: &uv_nmi_cpu, i: -1); |
999 | atomic_set(v: &uv_in_nmi, i: 0); |
1000 | atomic_set(v: &uv_nmi_kexec_failed, i: 0); |
1001 | atomic_set(v: &uv_nmi_slave_continue, SLAVE_CLEAR); |
1002 | } |
1003 | |
1004 | uv_nmi_touch_watchdogs(); |
1005 | local_irq_restore(flags); |
1006 | |
1007 | return NMI_HANDLED; |
1008 | } |
1009 | |
1010 | /* |
1011 | * NMI handler for pulling in CPU's when perf events are grabbing our NMI |
1012 | */ |
1013 | static int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs) |
1014 | { |
1015 | int ret; |
1016 | |
1017 | this_cpu_inc(uv_cpu_nmi.queries); |
1018 | if (!this_cpu_read(uv_cpu_nmi.pinging)) { |
1019 | local64_inc(&uv_nmi_ping_misses); |
1020 | return NMI_DONE; |
1021 | } |
1022 | |
1023 | this_cpu_inc(uv_cpu_nmi.pings); |
1024 | local64_inc(&uv_nmi_ping_count); |
1025 | ret = uv_handle_nmi(reason, regs); |
1026 | this_cpu_write(uv_cpu_nmi.pinging, 0); |
1027 | return ret; |
1028 | } |
1029 | |
1030 | static void uv_register_nmi_notifier(void) |
1031 | { |
1032 | if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv" )) |
1033 | pr_warn("UV: NMI handler failed to register\n" ); |
1034 | |
1035 | if (register_nmi_handler(NMI_LOCAL, uv_handle_nmi_ping, 0, "uvping" )) |
1036 | pr_warn("UV: PING NMI handler failed to register\n" ); |
1037 | } |
1038 | |
1039 | void uv_nmi_init(void) |
1040 | { |
1041 | unsigned int value; |
1042 | |
1043 | /* |
1044 | * Unmask NMI on all CPU's |
1045 | */ |
1046 | value = apic_read(APIC_LVT1) | APIC_DM_NMI; |
1047 | value &= ~APIC_LVT_MASKED; |
1048 | apic_write(APIC_LVT1, val: value); |
1049 | } |
1050 | |
1051 | /* Setup HUB NMI info */ |
1052 | static void __init uv_nmi_setup_common(bool hubbed) |
1053 | { |
1054 | int size = sizeof(void *) * (1 << NODES_SHIFT); |
1055 | int cpu; |
1056 | |
1057 | uv_hub_nmi_list = kzalloc(size, GFP_KERNEL); |
1058 | nmi_debug("UV: NMI hub list @ 0x%p (%d)\n" , uv_hub_nmi_list, size); |
1059 | BUG_ON(!uv_hub_nmi_list); |
1060 | size = sizeof(struct uv_hub_nmi_s); |
1061 | for_each_present_cpu(cpu) { |
1062 | int nid = cpu_to_node(cpu); |
1063 | if (uv_hub_nmi_list[nid] == NULL) { |
1064 | uv_hub_nmi_list[nid] = kzalloc_node(size, |
1065 | GFP_KERNEL, node: nid); |
1066 | BUG_ON(!uv_hub_nmi_list[nid]); |
1067 | raw_spin_lock_init(&(uv_hub_nmi_list[nid]->nmi_lock)); |
1068 | atomic_set(v: &uv_hub_nmi_list[nid]->cpu_owner, i: -1); |
1069 | uv_hub_nmi_list[nid]->hub_present = hubbed; |
1070 | uv_hub_nmi_list[nid]->pch_owner = (nid == 0); |
1071 | } |
1072 | uv_hub_nmi_per(cpu) = uv_hub_nmi_list[nid]; |
1073 | } |
1074 | BUG_ON(!alloc_cpumask_var(&uv_nmi_cpu_mask, GFP_KERNEL)); |
1075 | } |
1076 | |
1077 | /* Setup for UV Hub systems */ |
1078 | void __init uv_nmi_setup(void) |
1079 | { |
1080 | uv_nmi_setup_mmrs(); |
1081 | uv_nmi_setup_common(hubbed: true); |
1082 | uv_register_nmi_notifier(); |
1083 | pr_info("UV: Hub NMI enabled\n" ); |
1084 | } |
1085 | |
1086 | /* Setup for UV Hubless systems */ |
1087 | void __init uv_nmi_setup_hubless(void) |
1088 | { |
1089 | uv_nmi_setup_common(hubbed: false); |
1090 | pch_base = xlate_dev_mem_ptr(PCH_PCR_GPIO_1_BASE); |
1091 | nmi_debug("UV: PCH base:%p from 0x%lx, GPP_D_0\n" , |
1092 | pch_base, PCH_PCR_GPIO_1_BASE); |
1093 | if (uv_pch_init_enable) |
1094 | uv_init_hubless_pch_d0(); |
1095 | uv_init_hubless_pch_io(GPI_NMI_ENA_GPP_D_0, |
1096 | STS_GPP_D_0_MASK, STS_GPP_D_0_MASK); |
1097 | uv_nmi_setup_hubless_intr(); |
1098 | /* Ensure NMI enabled in Processor Interface Reg: */ |
1099 | uv_reassert_nmi(); |
1100 | uv_register_nmi_notifier(); |
1101 | pr_info("UV: PCH NMI enabled\n" ); |
1102 | } |
1103 | |