1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * (c) 2005-2016 Advanced Micro Devices, Inc. |
4 | * |
5 | * Written by Jacob Shin - AMD, Inc. |
6 | * Maintained by: Borislav Petkov <bp@alien8.de> |
7 | * |
8 | * All MC4_MISCi registers are shared between cores on a node. |
9 | */ |
10 | #include <linux/interrupt.h> |
11 | #include <linux/notifier.h> |
12 | #include <linux/kobject.h> |
13 | #include <linux/percpu.h> |
14 | #include <linux/errno.h> |
15 | #include <linux/sched.h> |
16 | #include <linux/sysfs.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/init.h> |
19 | #include <linux/cpu.h> |
20 | #include <linux/smp.h> |
21 | #include <linux/string.h> |
22 | |
23 | #include <asm/amd_nb.h> |
24 | #include <asm/traps.h> |
25 | #include <asm/apic.h> |
26 | #include <asm/mce.h> |
27 | #include <asm/msr.h> |
28 | #include <asm/trace/irq_vectors.h> |
29 | |
30 | #include "internal.h" |
31 | |
32 | #define NR_BLOCKS 5 |
33 | #define THRESHOLD_MAX 0xFFF |
34 | #define INT_TYPE_APIC 0x00020000 |
35 | #define MASK_VALID_HI 0x80000000 |
36 | #define MASK_CNTP_HI 0x40000000 |
37 | #define MASK_LOCKED_HI 0x20000000 |
38 | #define MASK_LVTOFF_HI 0x00F00000 |
39 | #define MASK_COUNT_EN_HI 0x00080000 |
40 | #define MASK_INT_TYPE_HI 0x00060000 |
41 | #define MASK_OVERFLOW_HI 0x00010000 |
42 | #define MASK_ERR_COUNT_HI 0x00000FFF |
43 | #define MASK_BLKPTR_LO 0xFF000000 |
44 | #define MCG_XBLK_ADDR 0xC0000400 |
45 | |
46 | /* Deferred error settings */ |
47 | #define MSR_CU_DEF_ERR 0xC0000410 |
48 | #define MASK_DEF_LVTOFF 0x000000F0 |
49 | #define MASK_DEF_INT_TYPE 0x00000006 |
50 | #define DEF_LVT_OFF 0x2 |
51 | #define DEF_INT_TYPE_APIC 0x2 |
52 | |
53 | /* Scalable MCA: */ |
54 | |
55 | /* Threshold LVT offset is at MSR0xC0000410[15:12] */ |
56 | #define SMCA_THR_LVT_OFF 0xF000 |
57 | |
58 | static bool thresholding_irq_en; |
59 | |
60 | static const char * const th_names[] = { |
61 | "load_store" , |
62 | "insn_fetch" , |
63 | "combined_unit" , |
64 | "decode_unit" , |
65 | "northbridge" , |
66 | "execution_unit" , |
67 | }; |
68 | |
69 | static const char * const smca_umc_block_names[] = { |
70 | "dram_ecc" , |
71 | "misc_umc" |
72 | }; |
73 | |
74 | #define HWID_MCATYPE(hwid, mcatype) (((hwid) << 16) | (mcatype)) |
75 | |
76 | struct smca_hwid { |
77 | unsigned int bank_type; /* Use with smca_bank_types for easy indexing. */ |
78 | u32 hwid_mcatype; /* (hwid,mcatype) tuple */ |
79 | }; |
80 | |
81 | struct smca_bank { |
82 | const struct smca_hwid *hwid; |
83 | u32 id; /* Value of MCA_IPID[InstanceId]. */ |
84 | u8 sysfs_id; /* Value used for sysfs name. */ |
85 | }; |
86 | |
87 | static DEFINE_PER_CPU_READ_MOSTLY(struct smca_bank[MAX_NR_BANKS], smca_banks); |
88 | static DEFINE_PER_CPU_READ_MOSTLY(u8[N_SMCA_BANK_TYPES], smca_bank_counts); |
89 | |
90 | struct smca_bank_name { |
91 | const char *name; /* Short name for sysfs */ |
92 | const char *long_name; /* Long name for pretty-printing */ |
93 | }; |
94 | |
95 | static struct smca_bank_name smca_names[] = { |
96 | [SMCA_LS ... SMCA_LS_V2] = { "load_store" , "Load Store Unit" }, |
97 | [SMCA_IF] = { .name: "insn_fetch" , .long_name: "Instruction Fetch Unit" }, |
98 | [SMCA_L2_CACHE] = { .name: "l2_cache" , .long_name: "L2 Cache" }, |
99 | [SMCA_DE] = { .name: "decode_unit" , .long_name: "Decode Unit" }, |
100 | [SMCA_RESERVED] = { .name: "reserved" , .long_name: "Reserved" }, |
101 | [SMCA_EX] = { .name: "execution_unit" , .long_name: "Execution Unit" }, |
102 | [SMCA_FP] = { .name: "floating_point" , .long_name: "Floating Point Unit" }, |
103 | [SMCA_L3_CACHE] = { .name: "l3_cache" , .long_name: "L3 Cache" }, |
104 | [SMCA_CS ... SMCA_CS_V2] = { .name: "coherent_slave" , .long_name: "Coherent Slave" }, |
105 | [SMCA_PIE] = { .name: "pie" , .long_name: "Power, Interrupts, etc." }, |
106 | |
107 | /* UMC v2 is separate because both of them can exist in a single system. */ |
108 | [SMCA_UMC] = { .name: "umc" , .long_name: "Unified Memory Controller" }, |
109 | [SMCA_UMC_V2] = { .name: "umc_v2" , .long_name: "Unified Memory Controller v2" }, |
110 | [SMCA_PB] = { .name: "param_block" , .long_name: "Parameter Block" }, |
111 | [SMCA_PSP ... SMCA_PSP_V2] = { .name: "psp" , .long_name: "Platform Security Processor" }, |
112 | [SMCA_SMU ... SMCA_SMU_V2] = { .name: "smu" , .long_name: "System Management Unit" }, |
113 | [SMCA_MP5] = { .name: "mp5" , .long_name: "Microprocessor 5 Unit" }, |
114 | [SMCA_MPDMA] = { .name: "mpdma" , .long_name: "MPDMA Unit" }, |
115 | [SMCA_NBIO] = { .name: "nbio" , .long_name: "Northbridge IO Unit" }, |
116 | [SMCA_PCIE ... SMCA_PCIE_V2] = { .name: "pcie" , .long_name: "PCI Express Unit" }, |
117 | [SMCA_XGMI_PCS] = { .name: "xgmi_pcs" , .long_name: "Ext Global Memory Interconnect PCS Unit" }, |
118 | [SMCA_NBIF] = { .name: "nbif" , .long_name: "NBIF Unit" }, |
119 | [SMCA_SHUB] = { .name: "shub" , .long_name: "System Hub Unit" }, |
120 | [SMCA_SATA] = { .name: "sata" , .long_name: "SATA Unit" }, |
121 | [SMCA_USB] = { .name: "usb" , .long_name: "USB Unit" }, |
122 | [SMCA_GMI_PCS] = { .name: "gmi_pcs" , .long_name: "Global Memory Interconnect PCS Unit" }, |
123 | [SMCA_XGMI_PHY] = { .name: "xgmi_phy" , .long_name: "Ext Global Memory Interconnect PHY Unit" }, |
124 | [SMCA_WAFL_PHY] = { .name: "wafl_phy" , .long_name: "WAFL PHY Unit" }, |
125 | [SMCA_GMI_PHY] = { .name: "gmi_phy" , .long_name: "Global Memory Interconnect PHY Unit" }, |
126 | }; |
127 | |
128 | static const char *smca_get_name(enum smca_bank_types t) |
129 | { |
130 | if (t >= N_SMCA_BANK_TYPES) |
131 | return NULL; |
132 | |
133 | return smca_names[t].name; |
134 | } |
135 | |
136 | const char *smca_get_long_name(enum smca_bank_types t) |
137 | { |
138 | if (t >= N_SMCA_BANK_TYPES) |
139 | return NULL; |
140 | |
141 | return smca_names[t].long_name; |
142 | } |
143 | EXPORT_SYMBOL_GPL(smca_get_long_name); |
144 | |
145 | enum smca_bank_types smca_get_bank_type(unsigned int cpu, unsigned int bank) |
146 | { |
147 | struct smca_bank *b; |
148 | |
149 | if (bank >= MAX_NR_BANKS) |
150 | return N_SMCA_BANK_TYPES; |
151 | |
152 | b = &per_cpu(smca_banks, cpu)[bank]; |
153 | if (!b->hwid) |
154 | return N_SMCA_BANK_TYPES; |
155 | |
156 | return b->hwid->bank_type; |
157 | } |
158 | EXPORT_SYMBOL_GPL(smca_get_bank_type); |
159 | |
160 | static const struct smca_hwid smca_hwid_mcatypes[] = { |
161 | /* { bank_type, hwid_mcatype } */ |
162 | |
163 | /* Reserved type */ |
164 | { .bank_type: SMCA_RESERVED, HWID_MCATYPE(0x00, 0x0) }, |
165 | |
166 | /* ZN Core (HWID=0xB0) MCA types */ |
167 | { SMCA_LS, HWID_MCATYPE(0xB0, 0x0) }, |
168 | { SMCA_LS_V2, HWID_MCATYPE(0xB0, 0x10) }, |
169 | { SMCA_IF, HWID_MCATYPE(0xB0, 0x1) }, |
170 | { SMCA_L2_CACHE, HWID_MCATYPE(0xB0, 0x2) }, |
171 | { SMCA_DE, HWID_MCATYPE(0xB0, 0x3) }, |
172 | /* HWID 0xB0 MCATYPE 0x4 is Reserved */ |
173 | { SMCA_EX, HWID_MCATYPE(0xB0, 0x5) }, |
174 | { SMCA_FP, HWID_MCATYPE(0xB0, 0x6) }, |
175 | { SMCA_L3_CACHE, HWID_MCATYPE(0xB0, 0x7) }, |
176 | |
177 | /* Data Fabric MCA types */ |
178 | { SMCA_CS, HWID_MCATYPE(0x2E, 0x0) }, |
179 | { SMCA_PIE, HWID_MCATYPE(0x2E, 0x1) }, |
180 | { SMCA_CS_V2, HWID_MCATYPE(0x2E, 0x2) }, |
181 | |
182 | /* Unified Memory Controller MCA type */ |
183 | { SMCA_UMC, HWID_MCATYPE(0x96, 0x0) }, |
184 | { SMCA_UMC_V2, HWID_MCATYPE(0x96, 0x1) }, |
185 | |
186 | /* Parameter Block MCA type */ |
187 | { SMCA_PB, HWID_MCATYPE(0x05, 0x0) }, |
188 | |
189 | /* Platform Security Processor MCA type */ |
190 | { SMCA_PSP, HWID_MCATYPE(0xFF, 0x0) }, |
191 | { SMCA_PSP_V2, HWID_MCATYPE(0xFF, 0x1) }, |
192 | |
193 | /* System Management Unit MCA type */ |
194 | { SMCA_SMU, HWID_MCATYPE(0x01, 0x0) }, |
195 | { SMCA_SMU_V2, HWID_MCATYPE(0x01, 0x1) }, |
196 | |
197 | /* Microprocessor 5 Unit MCA type */ |
198 | { SMCA_MP5, HWID_MCATYPE(0x01, 0x2) }, |
199 | |
200 | /* MPDMA MCA type */ |
201 | { SMCA_MPDMA, HWID_MCATYPE(0x01, 0x3) }, |
202 | |
203 | /* Northbridge IO Unit MCA type */ |
204 | { SMCA_NBIO, HWID_MCATYPE(0x18, 0x0) }, |
205 | |
206 | /* PCI Express Unit MCA type */ |
207 | { SMCA_PCIE, HWID_MCATYPE(0x46, 0x0) }, |
208 | { SMCA_PCIE_V2, HWID_MCATYPE(0x46, 0x1) }, |
209 | |
210 | { SMCA_XGMI_PCS, HWID_MCATYPE(0x50, 0x0) }, |
211 | { SMCA_NBIF, HWID_MCATYPE(0x6C, 0x0) }, |
212 | { SMCA_SHUB, HWID_MCATYPE(0x80, 0x0) }, |
213 | { SMCA_SATA, HWID_MCATYPE(0xA8, 0x0) }, |
214 | { SMCA_USB, HWID_MCATYPE(0xAA, 0x0) }, |
215 | { SMCA_GMI_PCS, HWID_MCATYPE(0x241, 0x0) }, |
216 | { SMCA_XGMI_PHY, HWID_MCATYPE(0x259, 0x0) }, |
217 | { SMCA_WAFL_PHY, HWID_MCATYPE(0x267, 0x0) }, |
218 | { SMCA_GMI_PHY, HWID_MCATYPE(0x269, 0x0) }, |
219 | }; |
220 | |
221 | /* |
222 | * In SMCA enabled processors, we can have multiple banks for a given IP type. |
223 | * So to define a unique name for each bank, we use a temp c-string to append |
224 | * the MCA_IPID[InstanceId] to type's name in get_name(). |
225 | * |
226 | * InstanceId is 32 bits which is 8 characters. Make sure MAX_MCATYPE_NAME_LEN |
227 | * is greater than 8 plus 1 (for underscore) plus length of longest type name. |
228 | */ |
229 | #define MAX_MCATYPE_NAME_LEN 30 |
230 | static char buf_mcatype[MAX_MCATYPE_NAME_LEN]; |
231 | |
232 | static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks); |
233 | |
234 | /* |
235 | * A list of the banks enabled on each logical CPU. Controls which respective |
236 | * descriptors to initialize later in mce_threshold_create_device(). |
237 | */ |
238 | static DEFINE_PER_CPU(u64, bank_map); |
239 | |
240 | /* Map of banks that have more than MCA_MISC0 available. */ |
241 | static DEFINE_PER_CPU(u64, smca_misc_banks_map); |
242 | |
243 | static void amd_threshold_interrupt(void); |
244 | static void amd_deferred_error_interrupt(void); |
245 | |
246 | static void default_deferred_error_interrupt(void) |
247 | { |
248 | pr_err("Unexpected deferred interrupt at vector %x\n" , DEFERRED_ERROR_VECTOR); |
249 | } |
250 | void (*deferred_error_int_vector)(void) = default_deferred_error_interrupt; |
251 | |
252 | static void smca_set_misc_banks_map(unsigned int bank, unsigned int cpu) |
253 | { |
254 | u32 low, high; |
255 | |
256 | /* |
257 | * For SMCA enabled processors, BLKPTR field of the first MISC register |
258 | * (MCx_MISC0) indicates presence of additional MISC regs set (MISC1-4). |
259 | */ |
260 | if (rdmsr_safe(MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high)) |
261 | return; |
262 | |
263 | if (!(low & MCI_CONFIG_MCAX)) |
264 | return; |
265 | |
266 | if (rdmsr_safe(MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high)) |
267 | return; |
268 | |
269 | if (low & MASK_BLKPTR_LO) |
270 | per_cpu(smca_misc_banks_map, cpu) |= BIT_ULL(bank); |
271 | |
272 | } |
273 | |
274 | static void smca_configure(unsigned int bank, unsigned int cpu) |
275 | { |
276 | u8 *bank_counts = this_cpu_ptr(smca_bank_counts); |
277 | const struct smca_hwid *s_hwid; |
278 | unsigned int i, hwid_mcatype; |
279 | u32 high, low; |
280 | u32 smca_config = MSR_AMD64_SMCA_MCx_CONFIG(bank); |
281 | |
282 | /* Set appropriate bits in MCA_CONFIG */ |
283 | if (!rdmsr_safe(smca_config, &low, &high)) { |
284 | /* |
285 | * OS is required to set the MCAX bit to acknowledge that it is |
286 | * now using the new MSR ranges and new registers under each |
287 | * bank. It also means that the OS will configure deferred |
288 | * errors in the new MCx_CONFIG register. If the bit is not set, |
289 | * uncorrectable errors will cause a system panic. |
290 | * |
291 | * MCA_CONFIG[MCAX] is bit 32 (0 in the high portion of the MSR.) |
292 | */ |
293 | high |= BIT(0); |
294 | |
295 | /* |
296 | * SMCA sets the Deferred Error Interrupt type per bank. |
297 | * |
298 | * MCA_CONFIG[DeferredIntTypeSupported] is bit 5, and tells us |
299 | * if the DeferredIntType bit field is available. |
300 | * |
301 | * MCA_CONFIG[DeferredIntType] is bits [38:37] ([6:5] in the |
302 | * high portion of the MSR). OS should set this to 0x1 to enable |
303 | * APIC based interrupt. First, check that no interrupt has been |
304 | * set. |
305 | */ |
306 | if ((low & BIT(5)) && !((high >> 5) & 0x3)) |
307 | high |= BIT(5); |
308 | |
309 | this_cpu_ptr(mce_banks_array)[bank].lsb_in_status = !!(low & BIT(8)); |
310 | |
311 | wrmsr(smca_config, low, high); |
312 | } |
313 | |
314 | smca_set_misc_banks_map(bank, cpu); |
315 | |
316 | if (rdmsr_safe(MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) { |
317 | pr_warn("Failed to read MCA_IPID for bank %d\n" , bank); |
318 | return; |
319 | } |
320 | |
321 | hwid_mcatype = HWID_MCATYPE(high & MCI_IPID_HWID, |
322 | (high & MCI_IPID_MCATYPE) >> 16); |
323 | |
324 | for (i = 0; i < ARRAY_SIZE(smca_hwid_mcatypes); i++) { |
325 | s_hwid = &smca_hwid_mcatypes[i]; |
326 | |
327 | if (hwid_mcatype == s_hwid->hwid_mcatype) { |
328 | this_cpu_ptr(smca_banks)[bank].hwid = s_hwid; |
329 | this_cpu_ptr(smca_banks)[bank].id = low; |
330 | this_cpu_ptr(smca_banks)[bank].sysfs_id = bank_counts[s_hwid->bank_type]++; |
331 | break; |
332 | } |
333 | } |
334 | } |
335 | |
336 | struct thresh_restart { |
337 | struct threshold_block *b; |
338 | int reset; |
339 | int set_lvt_off; |
340 | int lvt_off; |
341 | u16 old_limit; |
342 | }; |
343 | |
344 | static inline bool is_shared_bank(int bank) |
345 | { |
346 | /* |
347 | * Scalable MCA provides for only one core to have access to the MSRs of |
348 | * a shared bank. |
349 | */ |
350 | if (mce_flags.smca) |
351 | return false; |
352 | |
353 | /* Bank 4 is for northbridge reporting and is thus shared */ |
354 | return (bank == 4); |
355 | } |
356 | |
357 | static const char *bank4_names(const struct threshold_block *b) |
358 | { |
359 | switch (b->address) { |
360 | /* MSR4_MISC0 */ |
361 | case 0x00000413: |
362 | return "dram" ; |
363 | |
364 | case 0xc0000408: |
365 | return "ht_links" ; |
366 | |
367 | case 0xc0000409: |
368 | return "l3_cache" ; |
369 | |
370 | default: |
371 | WARN(1, "Funny MSR: 0x%08x\n" , b->address); |
372 | return "" ; |
373 | } |
374 | }; |
375 | |
376 | |
377 | static bool lvt_interrupt_supported(unsigned int bank, u32 msr_high_bits) |
378 | { |
379 | /* |
380 | * bank 4 supports APIC LVT interrupts implicitly since forever. |
381 | */ |
382 | if (bank == 4) |
383 | return true; |
384 | |
385 | /* |
386 | * IntP: interrupt present; if this bit is set, the thresholding |
387 | * bank can generate APIC LVT interrupts |
388 | */ |
389 | return msr_high_bits & BIT(28); |
390 | } |
391 | |
392 | static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi) |
393 | { |
394 | int msr = (hi & MASK_LVTOFF_HI) >> 20; |
395 | |
396 | if (apic < 0) { |
397 | pr_err(FW_BUG "cpu %d, failed to setup threshold interrupt " |
398 | "for bank %d, block %d (MSR%08X=0x%x%08x)\n" , b->cpu, |
399 | b->bank, b->block, b->address, hi, lo); |
400 | return 0; |
401 | } |
402 | |
403 | if (apic != msr) { |
404 | /* |
405 | * On SMCA CPUs, LVT offset is programmed at a different MSR, and |
406 | * the BIOS provides the value. The original field where LVT offset |
407 | * was set is reserved. Return early here: |
408 | */ |
409 | if (mce_flags.smca) |
410 | return 0; |
411 | |
412 | pr_err(FW_BUG "cpu %d, invalid threshold interrupt offset %d " |
413 | "for bank %d, block %d (MSR%08X=0x%x%08x)\n" , |
414 | b->cpu, apic, b->bank, b->block, b->address, hi, lo); |
415 | return 0; |
416 | } |
417 | |
418 | return 1; |
419 | }; |
420 | |
421 | /* Reprogram MCx_MISC MSR behind this threshold bank. */ |
422 | static void threshold_restart_bank(void *_tr) |
423 | { |
424 | struct thresh_restart *tr = _tr; |
425 | u32 hi, lo; |
426 | |
427 | /* sysfs write might race against an offline operation */ |
428 | if (!this_cpu_read(threshold_banks) && !tr->set_lvt_off) |
429 | return; |
430 | |
431 | rdmsr(tr->b->address, lo, hi); |
432 | |
433 | if (tr->b->threshold_limit < (hi & THRESHOLD_MAX)) |
434 | tr->reset = 1; /* limit cannot be lower than err count */ |
435 | |
436 | if (tr->reset) { /* reset err count and overflow bit */ |
437 | hi = |
438 | (hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) | |
439 | (THRESHOLD_MAX - tr->b->threshold_limit); |
440 | } else if (tr->old_limit) { /* change limit w/o reset */ |
441 | int new_count = (hi & THRESHOLD_MAX) + |
442 | (tr->old_limit - tr->b->threshold_limit); |
443 | |
444 | hi = (hi & ~MASK_ERR_COUNT_HI) | |
445 | (new_count & THRESHOLD_MAX); |
446 | } |
447 | |
448 | /* clear IntType */ |
449 | hi &= ~MASK_INT_TYPE_HI; |
450 | |
451 | if (!tr->b->interrupt_capable) |
452 | goto done; |
453 | |
454 | if (tr->set_lvt_off) { |
455 | if (lvt_off_valid(b: tr->b, apic: tr->lvt_off, lo, hi)) { |
456 | /* set new lvt offset */ |
457 | hi &= ~MASK_LVTOFF_HI; |
458 | hi |= tr->lvt_off << 20; |
459 | } |
460 | } |
461 | |
462 | if (tr->b->interrupt_enable) |
463 | hi |= INT_TYPE_APIC; |
464 | |
465 | done: |
466 | |
467 | hi |= MASK_COUNT_EN_HI; |
468 | wrmsr(tr->b->address, lo, hi); |
469 | } |
470 | |
471 | static void mce_threshold_block_init(struct threshold_block *b, int offset) |
472 | { |
473 | struct thresh_restart tr = { |
474 | .b = b, |
475 | .set_lvt_off = 1, |
476 | .lvt_off = offset, |
477 | }; |
478 | |
479 | b->threshold_limit = THRESHOLD_MAX; |
480 | threshold_restart_bank(tr: &tr); |
481 | }; |
482 | |
483 | static int setup_APIC_mce_threshold(int reserved, int new) |
484 | { |
485 | if (reserved < 0 && !setup_APIC_eilvt(lvt_off: new, THRESHOLD_APIC_VECTOR, |
486 | APIC_EILVT_MSG_FIX, mask: 0)) |
487 | return new; |
488 | |
489 | return reserved; |
490 | } |
491 | |
492 | static int setup_APIC_deferred_error(int reserved, int new) |
493 | { |
494 | if (reserved < 0 && !setup_APIC_eilvt(lvt_off: new, DEFERRED_ERROR_VECTOR, |
495 | APIC_EILVT_MSG_FIX, mask: 0)) |
496 | return new; |
497 | |
498 | return reserved; |
499 | } |
500 | |
501 | static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c) |
502 | { |
503 | u32 low = 0, high = 0; |
504 | int def_offset = -1, def_new; |
505 | |
506 | if (rdmsr_safe(MSR_CU_DEF_ERR, &low, &high)) |
507 | return; |
508 | |
509 | def_new = (low & MASK_DEF_LVTOFF) >> 4; |
510 | if (!(low & MASK_DEF_LVTOFF)) { |
511 | pr_err(FW_BUG "Your BIOS is not setting up LVT offset 0x2 for deferred error IRQs correctly.\n" ); |
512 | def_new = DEF_LVT_OFF; |
513 | low = (low & ~MASK_DEF_LVTOFF) | (DEF_LVT_OFF << 4); |
514 | } |
515 | |
516 | def_offset = setup_APIC_deferred_error(reserved: def_offset, new: def_new); |
517 | if ((def_offset == def_new) && |
518 | (deferred_error_int_vector != amd_deferred_error_interrupt)) |
519 | deferred_error_int_vector = amd_deferred_error_interrupt; |
520 | |
521 | if (!mce_flags.smca) |
522 | low = (low & ~MASK_DEF_INT_TYPE) | DEF_INT_TYPE_APIC; |
523 | |
524 | wrmsr(MSR_CU_DEF_ERR, low, high); |
525 | } |
526 | |
527 | static u32 smca_get_block_address(unsigned int bank, unsigned int block, |
528 | unsigned int cpu) |
529 | { |
530 | if (!block) |
531 | return MSR_AMD64_SMCA_MCx_MISC(bank); |
532 | |
533 | if (!(per_cpu(smca_misc_banks_map, cpu) & BIT_ULL(bank))) |
534 | return 0; |
535 | |
536 | return MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1); |
537 | } |
538 | |
539 | static u32 get_block_address(u32 current_addr, u32 low, u32 high, |
540 | unsigned int bank, unsigned int block, |
541 | unsigned int cpu) |
542 | { |
543 | u32 addr = 0, offset = 0; |
544 | |
545 | if ((bank >= per_cpu(mce_num_banks, cpu)) || (block >= NR_BLOCKS)) |
546 | return addr; |
547 | |
548 | if (mce_flags.smca) |
549 | return smca_get_block_address(bank, block, cpu); |
550 | |
551 | /* Fall back to method we used for older processors: */ |
552 | switch (block) { |
553 | case 0: |
554 | addr = mca_msr_reg(bank, reg: MCA_MISC); |
555 | break; |
556 | case 1: |
557 | offset = ((low & MASK_BLKPTR_LO) >> 21); |
558 | if (offset) |
559 | addr = MCG_XBLK_ADDR + offset; |
560 | break; |
561 | default: |
562 | addr = ++current_addr; |
563 | } |
564 | return addr; |
565 | } |
566 | |
567 | static int |
568 | prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr, |
569 | int offset, u32 misc_high) |
570 | { |
571 | unsigned int cpu = smp_processor_id(); |
572 | u32 smca_low, smca_high; |
573 | struct threshold_block b; |
574 | int new; |
575 | |
576 | if (!block) |
577 | per_cpu(bank_map, cpu) |= BIT_ULL(bank); |
578 | |
579 | memset(&b, 0, sizeof(b)); |
580 | b.cpu = cpu; |
581 | b.bank = bank; |
582 | b.block = block; |
583 | b.address = addr; |
584 | b.interrupt_capable = lvt_interrupt_supported(bank, msr_high_bits: misc_high); |
585 | |
586 | if (!b.interrupt_capable) |
587 | goto done; |
588 | |
589 | b.interrupt_enable = 1; |
590 | |
591 | if (!mce_flags.smca) { |
592 | new = (misc_high & MASK_LVTOFF_HI) >> 20; |
593 | goto set_offset; |
594 | } |
595 | |
596 | /* Gather LVT offset for thresholding: */ |
597 | if (rdmsr_safe(MSR_CU_DEF_ERR, &smca_low, &smca_high)) |
598 | goto out; |
599 | |
600 | new = (smca_low & SMCA_THR_LVT_OFF) >> 12; |
601 | |
602 | set_offset: |
603 | offset = setup_APIC_mce_threshold(reserved: offset, new); |
604 | if (offset == new) |
605 | thresholding_irq_en = true; |
606 | |
607 | done: |
608 | mce_threshold_block_init(b: &b, offset); |
609 | |
610 | out: |
611 | return offset; |
612 | } |
613 | |
614 | bool amd_filter_mce(struct mce *m) |
615 | { |
616 | enum smca_bank_types bank_type = smca_get_bank_type(m->extcpu, m->bank); |
617 | struct cpuinfo_x86 *c = &boot_cpu_data; |
618 | |
619 | /* See Family 17h Models 10h-2Fh Erratum #1114. */ |
620 | if (c->x86 == 0x17 && |
621 | c->x86_model >= 0x10 && c->x86_model <= 0x2F && |
622 | bank_type == SMCA_IF && XEC(m->status, 0x3f) == 10) |
623 | return true; |
624 | |
625 | /* NB GART TLB error reporting is disabled by default. */ |
626 | if (c->x86 < 0x17) { |
627 | if (m->bank == 4 && XEC(m->status, 0x1f) == 0x5) |
628 | return true; |
629 | } |
630 | |
631 | return false; |
632 | } |
633 | |
634 | /* |
635 | * Turn off thresholding banks for the following conditions: |
636 | * - MC4_MISC thresholding is not supported on Family 0x15. |
637 | * - Prevent possible spurious interrupts from the IF bank on Family 0x17 |
638 | * Models 0x10-0x2F due to Erratum #1114. |
639 | */ |
640 | static void disable_err_thresholding(struct cpuinfo_x86 *c, unsigned int bank) |
641 | { |
642 | int i, num_msrs; |
643 | u64 hwcr; |
644 | bool need_toggle; |
645 | u32 msrs[NR_BLOCKS]; |
646 | |
647 | if (c->x86 == 0x15 && bank == 4) { |
648 | msrs[0] = 0x00000413; /* MC4_MISC0 */ |
649 | msrs[1] = 0xc0000408; /* MC4_MISC1 */ |
650 | num_msrs = 2; |
651 | } else if (c->x86 == 0x17 && |
652 | (c->x86_model >= 0x10 && c->x86_model <= 0x2F)) { |
653 | |
654 | if (smca_get_bank_type(smp_processor_id(), bank) != SMCA_IF) |
655 | return; |
656 | |
657 | msrs[0] = MSR_AMD64_SMCA_MCx_MISC(bank); |
658 | num_msrs = 1; |
659 | } else { |
660 | return; |
661 | } |
662 | |
663 | rdmsrl(MSR_K7_HWCR, hwcr); |
664 | |
665 | /* McStatusWrEn has to be set */ |
666 | need_toggle = !(hwcr & BIT(18)); |
667 | if (need_toggle) |
668 | wrmsrl(MSR_K7_HWCR, val: hwcr | BIT(18)); |
669 | |
670 | /* Clear CntP bit safely */ |
671 | for (i = 0; i < num_msrs; i++) |
672 | msr_clear_bit(msr: msrs[i], bit: 62); |
673 | |
674 | /* restore old settings */ |
675 | if (need_toggle) |
676 | wrmsrl(MSR_K7_HWCR, val: hwcr); |
677 | } |
678 | |
679 | /* cpu init entry point, called from mce.c with preempt off */ |
680 | void mce_amd_feature_init(struct cpuinfo_x86 *c) |
681 | { |
682 | unsigned int bank, block, cpu = smp_processor_id(); |
683 | u32 low = 0, high = 0, address = 0; |
684 | int offset = -1; |
685 | |
686 | |
687 | for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank) { |
688 | if (mce_flags.smca) |
689 | smca_configure(bank, cpu); |
690 | |
691 | disable_err_thresholding(c, bank); |
692 | |
693 | for (block = 0; block < NR_BLOCKS; ++block) { |
694 | address = get_block_address(current_addr: address, low, high, bank, block, cpu); |
695 | if (!address) |
696 | break; |
697 | |
698 | if (rdmsr_safe(address, &low, &high)) |
699 | break; |
700 | |
701 | if (!(high & MASK_VALID_HI)) |
702 | continue; |
703 | |
704 | if (!(high & MASK_CNTP_HI) || |
705 | (high & MASK_LOCKED_HI)) |
706 | continue; |
707 | |
708 | offset = prepare_threshold_block(bank, block, addr: address, offset, misc_high: high); |
709 | } |
710 | } |
711 | |
712 | if (mce_flags.succor) |
713 | deferred_error_interrupt_enable(c); |
714 | } |
715 | |
716 | /* |
717 | * DRAM ECC errors are reported in the Northbridge (bank 4) with |
718 | * Extended Error Code 8. |
719 | */ |
720 | static bool legacy_mce_is_memory_error(struct mce *m) |
721 | { |
722 | return m->bank == 4 && XEC(m->status, 0x1f) == 8; |
723 | } |
724 | |
725 | /* |
726 | * DRAM ECC errors are reported in Unified Memory Controllers with |
727 | * Extended Error Code 0. |
728 | */ |
729 | static bool smca_mce_is_memory_error(struct mce *m) |
730 | { |
731 | enum smca_bank_types bank_type; |
732 | |
733 | if (XEC(m->status, 0x3f)) |
734 | return false; |
735 | |
736 | bank_type = smca_get_bank_type(m->extcpu, m->bank); |
737 | |
738 | return bank_type == SMCA_UMC || bank_type == SMCA_UMC_V2; |
739 | } |
740 | |
741 | bool amd_mce_is_memory_error(struct mce *m) |
742 | { |
743 | if (mce_flags.smca) |
744 | return smca_mce_is_memory_error(m); |
745 | else |
746 | return legacy_mce_is_memory_error(m); |
747 | } |
748 | |
749 | /* |
750 | * AMD systems do not have an explicit indicator that the value in MCA_ADDR is |
751 | * a system physical address. Therefore, individual cases need to be detected. |
752 | * Future cases and checks will be added as needed. |
753 | * |
754 | * 1) General case |
755 | * a) Assume address is not usable. |
756 | * 2) Poison errors |
757 | * a) Indicated by MCA_STATUS[43]: poison. Defined for all banks except legacy |
758 | * northbridge (bank 4). |
759 | * b) Refers to poison consumption in the core. Does not include "no action", |
760 | * "action optional", or "deferred" error severities. |
761 | * c) Will include a usable address so that immediate action can be taken. |
762 | * 3) Northbridge DRAM ECC errors |
763 | * a) Reported in legacy bank 4 with extended error code (XEC) 8. |
764 | * b) MCA_STATUS[43] is *not* defined as poison in legacy bank 4. Therefore, |
765 | * this bit should not be checked. |
766 | * |
767 | * NOTE: SMCA UMC memory errors fall into case #1. |
768 | */ |
769 | bool amd_mce_usable_address(struct mce *m) |
770 | { |
771 | /* Check special northbridge case 3) first. */ |
772 | if (!mce_flags.smca) { |
773 | if (legacy_mce_is_memory_error(m)) |
774 | return true; |
775 | else if (m->bank == 4) |
776 | return false; |
777 | } |
778 | |
779 | /* Check poison bit for all other bank types. */ |
780 | if (m->status & MCI_STATUS_POISON) |
781 | return true; |
782 | |
783 | /* Assume address is not usable for all others. */ |
784 | return false; |
785 | } |
786 | |
787 | static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc) |
788 | { |
789 | struct mce m; |
790 | |
791 | mce_setup(m: &m); |
792 | |
793 | m.status = status; |
794 | m.misc = misc; |
795 | m.bank = bank; |
796 | m.tsc = rdtsc(); |
797 | |
798 | if (m.status & MCI_STATUS_ADDRV) { |
799 | m.addr = addr; |
800 | |
801 | smca_extract_err_addr(m: &m); |
802 | } |
803 | |
804 | if (mce_flags.smca) { |
805 | rdmsrl(MSR_AMD64_SMCA_MCx_IPID(bank), m.ipid); |
806 | |
807 | if (m.status & MCI_STATUS_SYNDV) |
808 | rdmsrl(MSR_AMD64_SMCA_MCx_SYND(bank), m.synd); |
809 | } |
810 | |
811 | mce_log(m: &m); |
812 | } |
813 | |
814 | DEFINE_IDTENTRY_SYSVEC(sysvec_deferred_error) |
815 | { |
816 | trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR); |
817 | inc_irq_stat(irq_deferred_error_count); |
818 | deferred_error_int_vector(); |
819 | trace_deferred_error_apic_exit(DEFERRED_ERROR_VECTOR); |
820 | apic_eoi(); |
821 | } |
822 | |
823 | /* |
824 | * Returns true if the logged error is deferred. False, otherwise. |
825 | */ |
826 | static inline bool |
827 | _log_error_bank(unsigned int bank, u32 msr_stat, u32 msr_addr, u64 misc) |
828 | { |
829 | u64 status, addr = 0; |
830 | |
831 | rdmsrl(msr_stat, status); |
832 | if (!(status & MCI_STATUS_VAL)) |
833 | return false; |
834 | |
835 | if (status & MCI_STATUS_ADDRV) |
836 | rdmsrl(msr_addr, addr); |
837 | |
838 | __log_error(bank, status, addr, misc); |
839 | |
840 | wrmsrl(msr: msr_stat, val: 0); |
841 | |
842 | return status & MCI_STATUS_DEFERRED; |
843 | } |
844 | |
845 | static bool _log_error_deferred(unsigned int bank, u32 misc) |
846 | { |
847 | if (!_log_error_bank(bank, msr_stat: mca_msr_reg(bank, reg: MCA_STATUS), |
848 | msr_addr: mca_msr_reg(bank, reg: MCA_ADDR), misc)) |
849 | return false; |
850 | |
851 | /* |
852 | * Non-SMCA systems don't have MCA_DESTAT/MCA_DEADDR registers. |
853 | * Return true here to avoid accessing these registers. |
854 | */ |
855 | if (!mce_flags.smca) |
856 | return true; |
857 | |
858 | /* Clear MCA_DESTAT if the deferred error was logged from MCA_STATUS. */ |
859 | wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank), val: 0); |
860 | return true; |
861 | } |
862 | |
863 | /* |
864 | * We have three scenarios for checking for Deferred errors: |
865 | * |
866 | * 1) Non-SMCA systems check MCA_STATUS and log error if found. |
867 | * 2) SMCA systems check MCA_STATUS. If error is found then log it and also |
868 | * clear MCA_DESTAT. |
869 | * 3) SMCA systems check MCA_DESTAT, if error was not found in MCA_STATUS, and |
870 | * log it. |
871 | */ |
872 | static void log_error_deferred(unsigned int bank) |
873 | { |
874 | if (_log_error_deferred(bank, misc: 0)) |
875 | return; |
876 | |
877 | /* |
878 | * Only deferred errors are logged in MCA_DE{STAT,ADDR} so just check |
879 | * for a valid error. |
880 | */ |
881 | _log_error_bank(bank, MSR_AMD64_SMCA_MCx_DESTAT(bank), |
882 | MSR_AMD64_SMCA_MCx_DEADDR(bank), misc: 0); |
883 | } |
884 | |
885 | /* APIC interrupt handler for deferred errors */ |
886 | static void amd_deferred_error_interrupt(void) |
887 | { |
888 | unsigned int bank; |
889 | |
890 | for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank) |
891 | log_error_deferred(bank); |
892 | } |
893 | |
894 | static void log_error_thresholding(unsigned int bank, u64 misc) |
895 | { |
896 | _log_error_deferred(bank, misc); |
897 | } |
898 | |
899 | static void log_and_reset_block(struct threshold_block *block) |
900 | { |
901 | struct thresh_restart tr; |
902 | u32 low = 0, high = 0; |
903 | |
904 | if (!block) |
905 | return; |
906 | |
907 | if (rdmsr_safe(block->address, &low, &high)) |
908 | return; |
909 | |
910 | if (!(high & MASK_OVERFLOW_HI)) |
911 | return; |
912 | |
913 | /* Log the MCE which caused the threshold event. */ |
914 | log_error_thresholding(bank: block->bank, misc: ((u64)high << 32) | low); |
915 | |
916 | /* Reset threshold block after logging error. */ |
917 | memset(&tr, 0, sizeof(tr)); |
918 | tr.b = block; |
919 | threshold_restart_bank(tr: &tr); |
920 | } |
921 | |
922 | /* |
923 | * Threshold interrupt handler will service THRESHOLD_APIC_VECTOR. The interrupt |
924 | * goes off when error_count reaches threshold_limit. |
925 | */ |
926 | static void amd_threshold_interrupt(void) |
927 | { |
928 | struct threshold_block *first_block = NULL, *block = NULL, *tmp = NULL; |
929 | struct threshold_bank **bp = this_cpu_read(threshold_banks); |
930 | unsigned int bank, cpu = smp_processor_id(); |
931 | |
932 | /* |
933 | * Validate that the threshold bank has been initialized already. The |
934 | * handler is installed at boot time, but on a hotplug event the |
935 | * interrupt might fire before the data has been initialized. |
936 | */ |
937 | if (!bp) |
938 | return; |
939 | |
940 | for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank) { |
941 | if (!(per_cpu(bank_map, cpu) & BIT_ULL(bank))) |
942 | continue; |
943 | |
944 | first_block = bp[bank]->blocks; |
945 | if (!first_block) |
946 | continue; |
947 | |
948 | /* |
949 | * The first block is also the head of the list. Check it first |
950 | * before iterating over the rest. |
951 | */ |
952 | log_and_reset_block(block: first_block); |
953 | list_for_each_entry_safe(block, tmp, &first_block->miscj, miscj) |
954 | log_and_reset_block(block); |
955 | } |
956 | } |
957 | |
958 | /* |
959 | * Sysfs Interface |
960 | */ |
961 | |
962 | struct threshold_attr { |
963 | struct attribute attr; |
964 | ssize_t (*show) (struct threshold_block *, char *); |
965 | ssize_t (*store) (struct threshold_block *, const char *, size_t count); |
966 | }; |
967 | |
968 | #define SHOW_FIELDS(name) \ |
969 | static ssize_t show_ ## name(struct threshold_block *b, char *buf) \ |
970 | { \ |
971 | return sprintf(buf, "%lu\n", (unsigned long) b->name); \ |
972 | } |
973 | SHOW_FIELDS(interrupt_enable) |
974 | SHOW_FIELDS(threshold_limit) |
975 | |
976 | static ssize_t |
977 | store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size) |
978 | { |
979 | struct thresh_restart tr; |
980 | unsigned long new; |
981 | |
982 | if (!b->interrupt_capable) |
983 | return -EINVAL; |
984 | |
985 | if (kstrtoul(s: buf, base: 0, res: &new) < 0) |
986 | return -EINVAL; |
987 | |
988 | b->interrupt_enable = !!new; |
989 | |
990 | memset(&tr, 0, sizeof(tr)); |
991 | tr.b = b; |
992 | |
993 | if (smp_call_function_single(cpuid: b->cpu, func: threshold_restart_bank, info: &tr, wait: 1)) |
994 | return -ENODEV; |
995 | |
996 | return size; |
997 | } |
998 | |
999 | static ssize_t |
1000 | store_threshold_limit(struct threshold_block *b, const char *buf, size_t size) |
1001 | { |
1002 | struct thresh_restart tr; |
1003 | unsigned long new; |
1004 | |
1005 | if (kstrtoul(s: buf, base: 0, res: &new) < 0) |
1006 | return -EINVAL; |
1007 | |
1008 | if (new > THRESHOLD_MAX) |
1009 | new = THRESHOLD_MAX; |
1010 | if (new < 1) |
1011 | new = 1; |
1012 | |
1013 | memset(&tr, 0, sizeof(tr)); |
1014 | tr.old_limit = b->threshold_limit; |
1015 | b->threshold_limit = new; |
1016 | tr.b = b; |
1017 | |
1018 | if (smp_call_function_single(cpuid: b->cpu, func: threshold_restart_bank, info: &tr, wait: 1)) |
1019 | return -ENODEV; |
1020 | |
1021 | return size; |
1022 | } |
1023 | |
1024 | static ssize_t show_error_count(struct threshold_block *b, char *buf) |
1025 | { |
1026 | u32 lo, hi; |
1027 | |
1028 | /* CPU might be offline by now */ |
1029 | if (rdmsr_on_cpu(cpu: b->cpu, msr_no: b->address, l: &lo, h: &hi)) |
1030 | return -ENODEV; |
1031 | |
1032 | return sprintf(buf, fmt: "%u\n" , ((hi & THRESHOLD_MAX) - |
1033 | (THRESHOLD_MAX - b->threshold_limit))); |
1034 | } |
1035 | |
1036 | static struct threshold_attr error_count = { |
1037 | .attr = {.name = __stringify(error_count), .mode = 0444 }, |
1038 | .show = show_error_count, |
1039 | }; |
1040 | |
1041 | #define RW_ATTR(val) \ |
1042 | static struct threshold_attr val = { \ |
1043 | .attr = {.name = __stringify(val), .mode = 0644 }, \ |
1044 | .show = show_## val, \ |
1045 | .store = store_## val, \ |
1046 | }; |
1047 | |
1048 | RW_ATTR(interrupt_enable); |
1049 | RW_ATTR(threshold_limit); |
1050 | |
1051 | static struct attribute *default_attrs[] = { |
1052 | &threshold_limit.attr, |
1053 | &error_count.attr, |
1054 | NULL, /* possibly interrupt_enable if supported, see below */ |
1055 | NULL, |
1056 | }; |
1057 | ATTRIBUTE_GROUPS(default); |
1058 | |
1059 | #define to_block(k) container_of(k, struct threshold_block, kobj) |
1060 | #define to_attr(a) container_of(a, struct threshold_attr, attr) |
1061 | |
1062 | static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) |
1063 | { |
1064 | struct threshold_block *b = to_block(kobj); |
1065 | struct threshold_attr *a = to_attr(attr); |
1066 | ssize_t ret; |
1067 | |
1068 | ret = a->show ? a->show(b, buf) : -EIO; |
1069 | |
1070 | return ret; |
1071 | } |
1072 | |
1073 | static ssize_t store(struct kobject *kobj, struct attribute *attr, |
1074 | const char *buf, size_t count) |
1075 | { |
1076 | struct threshold_block *b = to_block(kobj); |
1077 | struct threshold_attr *a = to_attr(attr); |
1078 | ssize_t ret; |
1079 | |
1080 | ret = a->store ? a->store(b, buf, count) : -EIO; |
1081 | |
1082 | return ret; |
1083 | } |
1084 | |
1085 | static const struct sysfs_ops threshold_ops = { |
1086 | .show = show, |
1087 | .store = store, |
1088 | }; |
1089 | |
1090 | static void threshold_block_release(struct kobject *kobj); |
1091 | |
1092 | static const struct kobj_type threshold_ktype = { |
1093 | .sysfs_ops = &threshold_ops, |
1094 | .default_groups = default_groups, |
1095 | .release = threshold_block_release, |
1096 | }; |
1097 | |
1098 | static const char *get_name(unsigned int cpu, unsigned int bank, struct threshold_block *b) |
1099 | { |
1100 | enum smca_bank_types bank_type; |
1101 | |
1102 | if (!mce_flags.smca) { |
1103 | if (b && bank == 4) |
1104 | return bank4_names(b); |
1105 | |
1106 | return th_names[bank]; |
1107 | } |
1108 | |
1109 | bank_type = smca_get_bank_type(cpu, bank); |
1110 | if (bank_type >= N_SMCA_BANK_TYPES) |
1111 | return NULL; |
1112 | |
1113 | if (b && (bank_type == SMCA_UMC || bank_type == SMCA_UMC_V2)) { |
1114 | if (b->block < ARRAY_SIZE(smca_umc_block_names)) |
1115 | return smca_umc_block_names[b->block]; |
1116 | return NULL; |
1117 | } |
1118 | |
1119 | if (per_cpu(smca_bank_counts, cpu)[bank_type] == 1) |
1120 | return smca_get_name(t: bank_type); |
1121 | |
1122 | snprintf(buf: buf_mcatype, MAX_MCATYPE_NAME_LEN, |
1123 | fmt: "%s_%u" , smca_get_name(t: bank_type), |
1124 | per_cpu(smca_banks, cpu)[bank].sysfs_id); |
1125 | return buf_mcatype; |
1126 | } |
1127 | |
1128 | static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb, |
1129 | unsigned int bank, unsigned int block, |
1130 | u32 address) |
1131 | { |
1132 | struct threshold_block *b = NULL; |
1133 | u32 low, high; |
1134 | int err; |
1135 | |
1136 | if ((bank >= this_cpu_read(mce_num_banks)) || (block >= NR_BLOCKS)) |
1137 | return 0; |
1138 | |
1139 | if (rdmsr_safe(address, &low, &high)) |
1140 | return 0; |
1141 | |
1142 | if (!(high & MASK_VALID_HI)) { |
1143 | if (block) |
1144 | goto recurse; |
1145 | else |
1146 | return 0; |
1147 | } |
1148 | |
1149 | if (!(high & MASK_CNTP_HI) || |
1150 | (high & MASK_LOCKED_HI)) |
1151 | goto recurse; |
1152 | |
1153 | b = kzalloc(size: sizeof(struct threshold_block), GFP_KERNEL); |
1154 | if (!b) |
1155 | return -ENOMEM; |
1156 | |
1157 | b->block = block; |
1158 | b->bank = bank; |
1159 | b->cpu = cpu; |
1160 | b->address = address; |
1161 | b->interrupt_enable = 0; |
1162 | b->interrupt_capable = lvt_interrupt_supported(bank, msr_high_bits: high); |
1163 | b->threshold_limit = THRESHOLD_MAX; |
1164 | |
1165 | if (b->interrupt_capable) { |
1166 | default_attrs[2] = &interrupt_enable.attr; |
1167 | b->interrupt_enable = 1; |
1168 | } else { |
1169 | default_attrs[2] = NULL; |
1170 | } |
1171 | |
1172 | INIT_LIST_HEAD(list: &b->miscj); |
1173 | |
1174 | /* This is safe as @tb is not visible yet */ |
1175 | if (tb->blocks) |
1176 | list_add(new: &b->miscj, head: &tb->blocks->miscj); |
1177 | else |
1178 | tb->blocks = b; |
1179 | |
1180 | err = kobject_init_and_add(kobj: &b->kobj, ktype: &threshold_ktype, parent: tb->kobj, fmt: get_name(cpu, bank, b)); |
1181 | if (err) |
1182 | goto out_free; |
1183 | recurse: |
1184 | address = get_block_address(current_addr: address, low, high, bank, block: ++block, cpu); |
1185 | if (!address) |
1186 | return 0; |
1187 | |
1188 | err = allocate_threshold_blocks(cpu, tb, bank, block, address); |
1189 | if (err) |
1190 | goto out_free; |
1191 | |
1192 | if (b) |
1193 | kobject_uevent(kobj: &b->kobj, action: KOBJ_ADD); |
1194 | |
1195 | return 0; |
1196 | |
1197 | out_free: |
1198 | if (b) { |
1199 | list_del(entry: &b->miscj); |
1200 | kobject_put(kobj: &b->kobj); |
1201 | } |
1202 | return err; |
1203 | } |
1204 | |
1205 | static int __threshold_add_blocks(struct threshold_bank *b) |
1206 | { |
1207 | struct list_head *head = &b->blocks->miscj; |
1208 | struct threshold_block *pos = NULL; |
1209 | struct threshold_block *tmp = NULL; |
1210 | int err = 0; |
1211 | |
1212 | err = kobject_add(kobj: &b->blocks->kobj, parent: b->kobj, fmt: b->blocks->kobj.name); |
1213 | if (err) |
1214 | return err; |
1215 | |
1216 | list_for_each_entry_safe(pos, tmp, head, miscj) { |
1217 | |
1218 | err = kobject_add(kobj: &pos->kobj, parent: b->kobj, fmt: pos->kobj.name); |
1219 | if (err) { |
1220 | list_for_each_entry_safe_reverse(pos, tmp, head, miscj) |
1221 | kobject_del(kobj: &pos->kobj); |
1222 | |
1223 | return err; |
1224 | } |
1225 | } |
1226 | return err; |
1227 | } |
1228 | |
1229 | static int threshold_create_bank(struct threshold_bank **bp, unsigned int cpu, |
1230 | unsigned int bank) |
1231 | { |
1232 | struct device *dev = this_cpu_read(mce_device); |
1233 | struct amd_northbridge *nb = NULL; |
1234 | struct threshold_bank *b = NULL; |
1235 | const char *name = get_name(cpu, bank, NULL); |
1236 | int err = 0; |
1237 | |
1238 | if (!dev) |
1239 | return -ENODEV; |
1240 | |
1241 | if (is_shared_bank(bank)) { |
1242 | nb = node_to_amd_nb(topology_die_id(cpu)); |
1243 | |
1244 | /* threshold descriptor already initialized on this node? */ |
1245 | if (nb && nb->bank4) { |
1246 | /* yes, use it */ |
1247 | b = nb->bank4; |
1248 | err = kobject_add(kobj: b->kobj, parent: &dev->kobj, fmt: name); |
1249 | if (err) |
1250 | goto out; |
1251 | |
1252 | bp[bank] = b; |
1253 | refcount_inc(r: &b->cpus); |
1254 | |
1255 | err = __threshold_add_blocks(b); |
1256 | |
1257 | goto out; |
1258 | } |
1259 | } |
1260 | |
1261 | b = kzalloc(size: sizeof(struct threshold_bank), GFP_KERNEL); |
1262 | if (!b) { |
1263 | err = -ENOMEM; |
1264 | goto out; |
1265 | } |
1266 | |
1267 | /* Associate the bank with the per-CPU MCE device */ |
1268 | b->kobj = kobject_create_and_add(name, parent: &dev->kobj); |
1269 | if (!b->kobj) { |
1270 | err = -EINVAL; |
1271 | goto out_free; |
1272 | } |
1273 | |
1274 | if (is_shared_bank(bank)) { |
1275 | b->shared = 1; |
1276 | refcount_set(r: &b->cpus, n: 1); |
1277 | |
1278 | /* nb is already initialized, see above */ |
1279 | if (nb) { |
1280 | WARN_ON(nb->bank4); |
1281 | nb->bank4 = b; |
1282 | } |
1283 | } |
1284 | |
1285 | err = allocate_threshold_blocks(cpu, tb: b, bank, block: 0, address: mca_msr_reg(bank, reg: MCA_MISC)); |
1286 | if (err) |
1287 | goto out_kobj; |
1288 | |
1289 | bp[bank] = b; |
1290 | return 0; |
1291 | |
1292 | out_kobj: |
1293 | kobject_put(kobj: b->kobj); |
1294 | out_free: |
1295 | kfree(objp: b); |
1296 | out: |
1297 | return err; |
1298 | } |
1299 | |
1300 | static void threshold_block_release(struct kobject *kobj) |
1301 | { |
1302 | kfree(to_block(kobj)); |
1303 | } |
1304 | |
1305 | static void deallocate_threshold_blocks(struct threshold_bank *bank) |
1306 | { |
1307 | struct threshold_block *pos, *tmp; |
1308 | |
1309 | list_for_each_entry_safe(pos, tmp, &bank->blocks->miscj, miscj) { |
1310 | list_del(entry: &pos->miscj); |
1311 | kobject_put(kobj: &pos->kobj); |
1312 | } |
1313 | |
1314 | kobject_put(kobj: &bank->blocks->kobj); |
1315 | } |
1316 | |
1317 | static void __threshold_remove_blocks(struct threshold_bank *b) |
1318 | { |
1319 | struct threshold_block *pos = NULL; |
1320 | struct threshold_block *tmp = NULL; |
1321 | |
1322 | kobject_put(kobj: b->kobj); |
1323 | |
1324 | list_for_each_entry_safe(pos, tmp, &b->blocks->miscj, miscj) |
1325 | kobject_put(kobj: b->kobj); |
1326 | } |
1327 | |
1328 | static void threshold_remove_bank(struct threshold_bank *bank) |
1329 | { |
1330 | struct amd_northbridge *nb; |
1331 | |
1332 | if (!bank->blocks) |
1333 | goto out_free; |
1334 | |
1335 | if (!bank->shared) |
1336 | goto out_dealloc; |
1337 | |
1338 | if (!refcount_dec_and_test(r: &bank->cpus)) { |
1339 | __threshold_remove_blocks(b: bank); |
1340 | return; |
1341 | } else { |
1342 | /* |
1343 | * The last CPU on this node using the shared bank is going |
1344 | * away, remove that bank now. |
1345 | */ |
1346 | nb = node_to_amd_nb(topology_die_id(smp_processor_id())); |
1347 | nb->bank4 = NULL; |
1348 | } |
1349 | |
1350 | out_dealloc: |
1351 | deallocate_threshold_blocks(bank); |
1352 | |
1353 | out_free: |
1354 | kobject_put(kobj: bank->kobj); |
1355 | kfree(objp: bank); |
1356 | } |
1357 | |
1358 | static void __threshold_remove_device(struct threshold_bank **bp) |
1359 | { |
1360 | unsigned int bank, numbanks = this_cpu_read(mce_num_banks); |
1361 | |
1362 | for (bank = 0; bank < numbanks; bank++) { |
1363 | if (!bp[bank]) |
1364 | continue; |
1365 | |
1366 | threshold_remove_bank(bank: bp[bank]); |
1367 | bp[bank] = NULL; |
1368 | } |
1369 | kfree(objp: bp); |
1370 | } |
1371 | |
1372 | int mce_threshold_remove_device(unsigned int cpu) |
1373 | { |
1374 | struct threshold_bank **bp = this_cpu_read(threshold_banks); |
1375 | |
1376 | if (!bp) |
1377 | return 0; |
1378 | |
1379 | /* |
1380 | * Clear the pointer before cleaning up, so that the interrupt won't |
1381 | * touch anything of this. |
1382 | */ |
1383 | this_cpu_write(threshold_banks, NULL); |
1384 | |
1385 | __threshold_remove_device(bp); |
1386 | return 0; |
1387 | } |
1388 | |
1389 | /** |
1390 | * mce_threshold_create_device - Create the per-CPU MCE threshold device |
1391 | * @cpu: The plugged in CPU |
1392 | * |
1393 | * Create directories and files for all valid threshold banks. |
1394 | * |
1395 | * This is invoked from the CPU hotplug callback which was installed in |
1396 | * mcheck_init_device(). The invocation happens in context of the hotplug |
1397 | * thread running on @cpu. The callback is invoked on all CPUs which are |
1398 | * online when the callback is installed or during a real hotplug event. |
1399 | */ |
1400 | int mce_threshold_create_device(unsigned int cpu) |
1401 | { |
1402 | unsigned int numbanks, bank; |
1403 | struct threshold_bank **bp; |
1404 | int err; |
1405 | |
1406 | if (!mce_flags.amd_threshold) |
1407 | return 0; |
1408 | |
1409 | bp = this_cpu_read(threshold_banks); |
1410 | if (bp) |
1411 | return 0; |
1412 | |
1413 | numbanks = this_cpu_read(mce_num_banks); |
1414 | bp = kcalloc(n: numbanks, size: sizeof(*bp), GFP_KERNEL); |
1415 | if (!bp) |
1416 | return -ENOMEM; |
1417 | |
1418 | for (bank = 0; bank < numbanks; ++bank) { |
1419 | if (!(this_cpu_read(bank_map) & BIT_ULL(bank))) |
1420 | continue; |
1421 | err = threshold_create_bank(bp, cpu, bank); |
1422 | if (err) { |
1423 | __threshold_remove_device(bp); |
1424 | return err; |
1425 | } |
1426 | } |
1427 | this_cpu_write(threshold_banks, bp); |
1428 | |
1429 | if (thresholding_irq_en) |
1430 | mce_threshold_vector = amd_threshold_interrupt; |
1431 | return 0; |
1432 | } |
1433 | |