1 | /* |
2 | * Intel e752x Memory Controller kernel module |
3 | * (C) 2004 Linux Networx (http://lnxi.com) |
4 | * This file may be distributed under the terms of the |
5 | * GNU General Public License. |
6 | * |
7 | * Implement support for the e7520, E7525, e7320 and i3100 memory controllers. |
8 | * |
9 | * Datasheets: |
10 | * https://www.intel.in/content/www/in/en/chipsets/e7525-memory-controller-hub-datasheet.html |
11 | * ftp://download.intel.com/design/intarch/datashts/31345803.pdf |
12 | * |
13 | * Written by Tom Zimmerman |
14 | * |
15 | * Contributors: |
16 | * Thayne Harbaugh at realmsys.com (?) |
17 | * Wang Zhenyu at intel.com |
18 | * Dave Jiang at mvista.com |
19 | * |
20 | */ |
21 | |
22 | #include <linux/module.h> |
23 | #include <linux/init.h> |
24 | #include <linux/pci.h> |
25 | #include <linux/pci_ids.h> |
26 | #include <linux/edac.h> |
27 | #include "edac_module.h" |
28 | |
29 | #define EDAC_MOD_STR "e752x_edac" |
30 | |
31 | static int report_non_memory_errors; |
32 | static int force_function_unhide; |
33 | static int sysbus_parity = -1; |
34 | |
35 | static struct edac_pci_ctl_info *e752x_pci; |
36 | |
37 | #define e752x_printk(level, fmt, arg...) \ |
38 | edac_printk(level, "e752x", fmt, ##arg) |
39 | |
40 | #define e752x_mc_printk(mci, level, fmt, arg...) \ |
41 | edac_mc_chipset_printk(mci, level, "e752x", fmt, ##arg) |
42 | |
43 | #ifndef PCI_DEVICE_ID_INTEL_7520_0 |
44 | #define PCI_DEVICE_ID_INTEL_7520_0 0x3590 |
45 | #endif /* PCI_DEVICE_ID_INTEL_7520_0 */ |
46 | |
47 | #ifndef PCI_DEVICE_ID_INTEL_7520_1_ERR |
48 | #define PCI_DEVICE_ID_INTEL_7520_1_ERR 0x3591 |
49 | #endif /* PCI_DEVICE_ID_INTEL_7520_1_ERR */ |
50 | |
51 | #ifndef PCI_DEVICE_ID_INTEL_7525_0 |
52 | #define PCI_DEVICE_ID_INTEL_7525_0 0x359E |
53 | #endif /* PCI_DEVICE_ID_INTEL_7525_0 */ |
54 | |
55 | #ifndef PCI_DEVICE_ID_INTEL_7525_1_ERR |
56 | #define PCI_DEVICE_ID_INTEL_7525_1_ERR 0x3593 |
57 | #endif /* PCI_DEVICE_ID_INTEL_7525_1_ERR */ |
58 | |
59 | #ifndef PCI_DEVICE_ID_INTEL_7320_0 |
60 | #define PCI_DEVICE_ID_INTEL_7320_0 0x3592 |
61 | #endif /* PCI_DEVICE_ID_INTEL_7320_0 */ |
62 | |
63 | #ifndef PCI_DEVICE_ID_INTEL_7320_1_ERR |
64 | #define PCI_DEVICE_ID_INTEL_7320_1_ERR 0x3593 |
65 | #endif /* PCI_DEVICE_ID_INTEL_7320_1_ERR */ |
66 | |
67 | #ifndef PCI_DEVICE_ID_INTEL_3100_0 |
68 | #define PCI_DEVICE_ID_INTEL_3100_0 0x35B0 |
69 | #endif /* PCI_DEVICE_ID_INTEL_3100_0 */ |
70 | |
71 | #ifndef PCI_DEVICE_ID_INTEL_3100_1_ERR |
72 | #define PCI_DEVICE_ID_INTEL_3100_1_ERR 0x35B1 |
73 | #endif /* PCI_DEVICE_ID_INTEL_3100_1_ERR */ |
74 | |
75 | #define E752X_NR_CSROWS 8 /* number of csrows */ |
76 | |
77 | /* E752X register addresses - device 0 function 0 */ |
78 | #define E752X_MCHSCRB 0x52 /* Memory Scrub register (16b) */ |
79 | /* |
80 | * 6:5 Scrub Completion Count |
81 | * 3:2 Scrub Rate (i3100 only) |
82 | * 01=fast 10=normal |
83 | * 1:0 Scrub Mode enable |
84 | * 00=off 10=on |
85 | */ |
86 | #define E752X_DRB 0x60 /* DRAM row boundary register (8b) */ |
87 | #define E752X_DRA 0x70 /* DRAM row attribute register (8b) */ |
88 | /* |
89 | * 31:30 Device width row 7 |
90 | * 01=x8 10=x4 11=x8 DDR2 |
91 | * 27:26 Device width row 6 |
92 | * 23:22 Device width row 5 |
93 | * 19:20 Device width row 4 |
94 | * 15:14 Device width row 3 |
95 | * 11:10 Device width row 2 |
96 | * 7:6 Device width row 1 |
97 | * 3:2 Device width row 0 |
98 | */ |
99 | #define E752X_DRC 0x7C /* DRAM controller mode reg (32b) */ |
100 | /* FIXME:IS THIS RIGHT? */ |
101 | /* |
102 | * 22 Number channels 0=1,1=2 |
103 | * 19:18 DRB Granularity 32/64MB |
104 | */ |
105 | #define E752X_DRM 0x80 /* Dimm mapping register */ |
106 | #define E752X_DDRCSR 0x9A /* DDR control and status reg (16b) */ |
107 | /* |
108 | * 14:12 1 single A, 2 single B, 3 dual |
109 | */ |
110 | #define E752X_TOLM 0xC4 /* DRAM top of low memory reg (16b) */ |
111 | #define E752X_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */ |
112 | #define E752X_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */ |
113 | #define E752X_REMAPOFFSET 0xCA /* DRAM remap limit offset reg (16b) */ |
114 | |
115 | /* E752X register addresses - device 0 function 1 */ |
116 | #define E752X_FERR_GLOBAL 0x40 /* Global first error register (32b) */ |
117 | #define E752X_NERR_GLOBAL 0x44 /* Global next error register (32b) */ |
118 | #define E752X_HI_FERR 0x50 /* Hub interface first error reg (8b) */ |
119 | #define E752X_HI_NERR 0x52 /* Hub interface next error reg (8b) */ |
120 | #define E752X_HI_ERRMASK 0x54 /* Hub interface error mask reg (8b) */ |
121 | #define E752X_HI_SMICMD 0x5A /* Hub interface SMI command reg (8b) */ |
122 | #define E752X_SYSBUS_FERR 0x60 /* System buss first error reg (16b) */ |
123 | #define E752X_SYSBUS_NERR 0x62 /* System buss next error reg (16b) */ |
124 | #define E752X_SYSBUS_ERRMASK 0x64 /* System buss error mask reg (16b) */ |
125 | #define E752X_SYSBUS_SMICMD 0x6A /* System buss SMI command reg (16b) */ |
126 | #define E752X_BUF_FERR 0x70 /* Memory buffer first error reg (8b) */ |
127 | #define E752X_BUF_NERR 0x72 /* Memory buffer next error reg (8b) */ |
128 | #define E752X_BUF_ERRMASK 0x74 /* Memory buffer error mask reg (8b) */ |
129 | #define E752X_BUF_SMICMD 0x7A /* Memory buffer SMI cmd reg (8b) */ |
130 | #define E752X_DRAM_FERR 0x80 /* DRAM first error register (16b) */ |
131 | #define E752X_DRAM_NERR 0x82 /* DRAM next error register (16b) */ |
132 | #define E752X_DRAM_ERRMASK 0x84 /* DRAM error mask register (8b) */ |
133 | #define E752X_DRAM_SMICMD 0x8A /* DRAM SMI command register (8b) */ |
134 | #define E752X_DRAM_RETR_ADD 0xAC /* DRAM Retry address register (32b) */ |
135 | #define E752X_DRAM_SEC1_ADD 0xA0 /* DRAM first correctable memory */ |
136 | /* error address register (32b) */ |
137 | /* |
138 | * 31 Reserved |
139 | * 30:2 CE address (64 byte block 34:6 |
140 | * 1 Reserved |
141 | * 0 HiLoCS |
142 | */ |
143 | #define E752X_DRAM_SEC2_ADD 0xC8 /* DRAM first correctable memory */ |
144 | /* error address register (32b) */ |
145 | /* |
146 | * 31 Reserved |
147 | * 30:2 CE address (64 byte block 34:6) |
148 | * 1 Reserved |
149 | * 0 HiLoCS |
150 | */ |
151 | #define E752X_DRAM_DED_ADD 0xA4 /* DRAM first uncorrectable memory */ |
152 | /* error address register (32b) */ |
153 | /* |
154 | * 31 Reserved |
155 | * 30:2 CE address (64 byte block 34:6) |
156 | * 1 Reserved |
157 | * 0 HiLoCS |
158 | */ |
159 | #define E752X_DRAM_SCRB_ADD 0xA8 /* DRAM 1st uncorrectable scrub mem */ |
160 | /* error address register (32b) */ |
161 | /* |
162 | * 31 Reserved |
163 | * 30:2 CE address (64 byte block 34:6 |
164 | * 1 Reserved |
165 | * 0 HiLoCS |
166 | */ |
167 | #define E752X_DRAM_SEC1_SYNDROME 0xC4 /* DRAM first correctable memory */ |
168 | /* error syndrome register (16b) */ |
169 | #define E752X_DRAM_SEC2_SYNDROME 0xC6 /* DRAM second correctable memory */ |
170 | /* error syndrome register (16b) */ |
171 | #define E752X_DEVPRES1 0xF4 /* Device Present 1 register (8b) */ |
172 | |
173 | /* 3100 IMCH specific register addresses - device 0 function 1 */ |
174 | #define I3100_NSI_FERR 0x48 /* NSI first error reg (32b) */ |
175 | #define I3100_NSI_NERR 0x4C /* NSI next error reg (32b) */ |
176 | #define I3100_NSI_SMICMD 0x54 /* NSI SMI command register (32b) */ |
177 | #define I3100_NSI_EMASK 0x90 /* NSI error mask register (32b) */ |
178 | |
179 | /* ICH5R register addresses - device 30 function 0 */ |
180 | #define ICH5R_PCI_STAT 0x06 /* PCI status register (16b) */ |
181 | #define ICH5R_PCI_2ND_STAT 0x1E /* PCI status secondary reg (16b) */ |
182 | #define ICH5R_PCI_BRIDGE_CTL 0x3E /* PCI bridge control register (16b) */ |
183 | |
184 | enum e752x_chips { |
185 | E7520 = 0, |
186 | E7525 = 1, |
187 | E7320 = 2, |
188 | I3100 = 3 |
189 | }; |
190 | |
191 | /* |
192 | * Those chips Support single-rank and dual-rank memories only. |
193 | * |
194 | * On e752x chips, the odd rows are present only on dual-rank memories. |
195 | * Dividing the rank by two will provide the dimm# |
196 | * |
197 | * i3100 MC has a different mapping: it supports only 4 ranks. |
198 | * |
199 | * The mapping is (from 1 to n): |
200 | * slot single-ranked double-ranked |
201 | * dimm #1 -> rank #4 NA |
202 | * dimm #2 -> rank #3 NA |
203 | * dimm #3 -> rank #2 Ranks 2 and 3 |
204 | * dimm #4 -> rank $1 Ranks 1 and 4 |
205 | * |
206 | * FIXME: The current mapping for i3100 considers that it supports up to 8 |
207 | * ranks/chanel, but datasheet says that the MC supports only 4 ranks. |
208 | */ |
209 | |
210 | struct e752x_pvt { |
211 | struct pci_dev *dev_d0f0; |
212 | struct pci_dev *dev_d0f1; |
213 | u32 tolm; |
214 | u32 remapbase; |
215 | u32 remaplimit; |
216 | int mc_symmetric; |
217 | u8 map[8]; |
218 | int map_type; |
219 | const struct e752x_dev_info *dev_info; |
220 | }; |
221 | |
222 | struct e752x_dev_info { |
223 | u16 err_dev; |
224 | u16 ctl_dev; |
225 | const char *ctl_name; |
226 | }; |
227 | |
228 | struct e752x_error_info { |
229 | u32 ferr_global; |
230 | u32 nerr_global; |
231 | u32 nsi_ferr; /* 3100 only */ |
232 | u32 nsi_nerr; /* 3100 only */ |
233 | u8 hi_ferr; /* all but 3100 */ |
234 | u8 hi_nerr; /* all but 3100 */ |
235 | u16 sysbus_ferr; |
236 | u16 sysbus_nerr; |
237 | u8 buf_ferr; |
238 | u8 buf_nerr; |
239 | u16 dram_ferr; |
240 | u16 dram_nerr; |
241 | u32 dram_sec1_add; |
242 | u32 dram_sec2_add; |
243 | u16 dram_sec1_syndrome; |
244 | u16 dram_sec2_syndrome; |
245 | u32 dram_ded_add; |
246 | u32 dram_scrb_add; |
247 | u32 dram_retr_add; |
248 | }; |
249 | |
250 | static const struct e752x_dev_info e752x_devs[] = { |
251 | [E7520] = { |
252 | .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR, |
253 | .ctl_dev = PCI_DEVICE_ID_INTEL_7520_0, |
254 | .ctl_name = "E7520" }, |
255 | [E7525] = { |
256 | .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR, |
257 | .ctl_dev = PCI_DEVICE_ID_INTEL_7525_0, |
258 | .ctl_name = "E7525" }, |
259 | [E7320] = { |
260 | .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR, |
261 | .ctl_dev = PCI_DEVICE_ID_INTEL_7320_0, |
262 | .ctl_name = "E7320" }, |
263 | [I3100] = { |
264 | .err_dev = PCI_DEVICE_ID_INTEL_3100_1_ERR, |
265 | .ctl_dev = PCI_DEVICE_ID_INTEL_3100_0, |
266 | .ctl_name = "3100" }, |
267 | }; |
268 | |
269 | /* Valid scrub rates for the e752x/3100 hardware memory scrubber. We |
270 | * map the scrubbing bandwidth to a hardware register value. The 'set' |
271 | * operation finds the 'matching or higher value'. Note that scrubbing |
272 | * on the e752x can only be enabled/disabled. The 3100 supports |
273 | * a normal and fast mode. |
274 | */ |
275 | |
276 | #define SDRATE_EOT 0xFFFFFFFF |
277 | |
278 | struct scrubrate { |
279 | u32 bandwidth; /* bandwidth consumed by scrubbing in bytes/sec */ |
280 | u16 scrubval; /* register value for scrub rate */ |
281 | }; |
282 | |
283 | /* Rate below assumes same performance as i3100 using PC3200 DDR2 in |
284 | * normal mode. e752x bridges don't support choosing normal or fast mode, |
285 | * so the scrubbing bandwidth value isn't all that important - scrubbing is |
286 | * either on or off. |
287 | */ |
288 | static const struct scrubrate scrubrates_e752x[] = { |
289 | {0, 0x00}, /* Scrubbing Off */ |
290 | {500000, 0x02}, /* Scrubbing On */ |
291 | {SDRATE_EOT, 0x00} /* End of Table */ |
292 | }; |
293 | |
294 | /* Fast mode: 2 GByte PC3200 DDR2 scrubbed in 33s = 63161283 bytes/s |
295 | * Normal mode: 125 (32000 / 256) times slower than fast mode. |
296 | */ |
297 | static const struct scrubrate scrubrates_i3100[] = { |
298 | {0, 0x00}, /* Scrubbing Off */ |
299 | {500000, 0x0a}, /* Normal mode - 32k clocks */ |
300 | {62500000, 0x06}, /* Fast mode - 256 clocks */ |
301 | {SDRATE_EOT, 0x00} /* End of Table */ |
302 | }; |
303 | |
304 | static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci, |
305 | unsigned long page) |
306 | { |
307 | u32 remap; |
308 | struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; |
309 | |
310 | edac_dbg(3, "\n" ); |
311 | |
312 | if (page < pvt->tolm) |
313 | return page; |
314 | |
315 | if ((page >= 0x100000) && (page < pvt->remapbase)) |
316 | return page; |
317 | |
318 | remap = (page - pvt->tolm) + pvt->remapbase; |
319 | |
320 | if (remap < pvt->remaplimit) |
321 | return remap; |
322 | |
323 | e752x_printk(KERN_ERR, "Invalid page %lx - out of range\n" , page); |
324 | return pvt->tolm - 1; |
325 | } |
326 | |
327 | static void do_process_ce(struct mem_ctl_info *mci, u16 error_one, |
328 | u32 sec1_add, u16 sec1_syndrome) |
329 | { |
330 | u32 page; |
331 | int row; |
332 | int channel; |
333 | int i; |
334 | struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; |
335 | |
336 | edac_dbg(3, "\n" ); |
337 | |
338 | /* convert the addr to 4k page */ |
339 | page = sec1_add >> (PAGE_SHIFT - 4); |
340 | |
341 | /* FIXME - check for -1 */ |
342 | if (pvt->mc_symmetric) { |
343 | /* chip select are bits 14 & 13 */ |
344 | row = ((page >> 1) & 3); |
345 | e752x_printk(KERN_WARNING, |
346 | "Test row %d Table %d %d %d %d %d %d %d %d\n" , row, |
347 | pvt->map[0], pvt->map[1], pvt->map[2], pvt->map[3], |
348 | pvt->map[4], pvt->map[5], pvt->map[6], |
349 | pvt->map[7]); |
350 | |
351 | /* test for channel remapping */ |
352 | for (i = 0; i < 8; i++) { |
353 | if (pvt->map[i] == row) |
354 | break; |
355 | } |
356 | |
357 | e752x_printk(KERN_WARNING, "Test computed row %d\n" , i); |
358 | |
359 | if (i < 8) |
360 | row = i; |
361 | else |
362 | e752x_mc_printk(mci, KERN_WARNING, |
363 | "row %d not found in remap table\n" , |
364 | row); |
365 | } else |
366 | row = edac_mc_find_csrow_by_page(mci, page); |
367 | |
368 | /* 0 = channel A, 1 = channel B */ |
369 | channel = !(error_one & 1); |
370 | |
371 | /* e752x mc reads 34:6 of the DRAM linear address */ |
372 | edac_mc_handle_error(type: HW_EVENT_ERR_CORRECTED, mci, error_count: 1, |
373 | page_frame_number: page, offset_in_page(sec1_add << 4), syndrome: sec1_syndrome, |
374 | top_layer: row, mid_layer: channel, low_layer: -1, |
375 | msg: "e752x CE" , other_detail: "" ); |
376 | } |
377 | |
378 | static inline void process_ce(struct mem_ctl_info *mci, u16 error_one, |
379 | u32 sec1_add, u16 sec1_syndrome, int *error_found, |
380 | int handle_error) |
381 | { |
382 | *error_found = 1; |
383 | |
384 | if (handle_error) |
385 | do_process_ce(mci, error_one, sec1_add, sec1_syndrome); |
386 | } |
387 | |
388 | static void do_process_ue(struct mem_ctl_info *mci, u16 error_one, |
389 | u32 ded_add, u32 scrb_add) |
390 | { |
391 | u32 error_2b, block_page; |
392 | int row; |
393 | struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; |
394 | |
395 | edac_dbg(3, "\n" ); |
396 | |
397 | if (error_one & 0x0202) { |
398 | error_2b = ded_add; |
399 | |
400 | /* convert to 4k address */ |
401 | block_page = error_2b >> (PAGE_SHIFT - 4); |
402 | |
403 | row = pvt->mc_symmetric ? |
404 | /* chip select are bits 14 & 13 */ |
405 | ((block_page >> 1) & 3) : |
406 | edac_mc_find_csrow_by_page(mci, page: block_page); |
407 | |
408 | /* e752x mc reads 34:6 of the DRAM linear address */ |
409 | edac_mc_handle_error(type: HW_EVENT_ERR_UNCORRECTED, mci, error_count: 1, |
410 | page_frame_number: block_page, |
411 | offset_in_page(error_2b << 4), syndrome: 0, |
412 | top_layer: row, mid_layer: -1, low_layer: -1, |
413 | msg: "e752x UE from Read" , other_detail: "" ); |
414 | |
415 | } |
416 | if (error_one & 0x0404) { |
417 | error_2b = scrb_add; |
418 | |
419 | /* convert to 4k address */ |
420 | block_page = error_2b >> (PAGE_SHIFT - 4); |
421 | |
422 | row = pvt->mc_symmetric ? |
423 | /* chip select are bits 14 & 13 */ |
424 | ((block_page >> 1) & 3) : |
425 | edac_mc_find_csrow_by_page(mci, page: block_page); |
426 | |
427 | /* e752x mc reads 34:6 of the DRAM linear address */ |
428 | edac_mc_handle_error(type: HW_EVENT_ERR_UNCORRECTED, mci, error_count: 1, |
429 | page_frame_number: block_page, |
430 | offset_in_page(error_2b << 4), syndrome: 0, |
431 | top_layer: row, mid_layer: -1, low_layer: -1, |
432 | msg: "e752x UE from Scruber" , other_detail: "" ); |
433 | } |
434 | } |
435 | |
436 | static inline void process_ue(struct mem_ctl_info *mci, u16 error_one, |
437 | u32 ded_add, u32 scrb_add, int *error_found, |
438 | int handle_error) |
439 | { |
440 | *error_found = 1; |
441 | |
442 | if (handle_error) |
443 | do_process_ue(mci, error_one, ded_add, scrb_add); |
444 | } |
445 | |
446 | static inline void process_ue_no_info_wr(struct mem_ctl_info *mci, |
447 | int *error_found, int handle_error) |
448 | { |
449 | *error_found = 1; |
450 | |
451 | if (!handle_error) |
452 | return; |
453 | |
454 | edac_dbg(3, "\n" ); |
455 | edac_mc_handle_error(type: HW_EVENT_ERR_UNCORRECTED, mci, error_count: 1, page_frame_number: 0, offset_in_page: 0, syndrome: 0, |
456 | top_layer: -1, mid_layer: -1, low_layer: -1, |
457 | msg: "e752x UE log memory write" , other_detail: "" ); |
458 | } |
459 | |
460 | static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error, |
461 | u32 retry_add) |
462 | { |
463 | u32 error_1b, page; |
464 | int row; |
465 | struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; |
466 | |
467 | error_1b = retry_add; |
468 | page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */ |
469 | |
470 | /* chip select are bits 14 & 13 */ |
471 | row = pvt->mc_symmetric ? ((page >> 1) & 3) : |
472 | edac_mc_find_csrow_by_page(mci, page); |
473 | |
474 | e752x_mc_printk(mci, KERN_WARNING, |
475 | "CE page 0x%lx, row %d : Memory read retry\n" , |
476 | (long unsigned int)page, row); |
477 | } |
478 | |
479 | static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error, |
480 | u32 retry_add, int *error_found, |
481 | int handle_error) |
482 | { |
483 | *error_found = 1; |
484 | |
485 | if (handle_error) |
486 | do_process_ded_retry(mci, error, retry_add); |
487 | } |
488 | |
489 | static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error, |
490 | int *error_found, int handle_error) |
491 | { |
492 | *error_found = 1; |
493 | |
494 | if (handle_error) |
495 | e752x_mc_printk(mci, KERN_WARNING, "Memory threshold CE\n" ); |
496 | } |
497 | |
498 | static char *global_message[11] = { |
499 | "PCI Express C1" , |
500 | "PCI Express C" , |
501 | "PCI Express B1" , |
502 | "PCI Express B" , |
503 | "PCI Express A1" , |
504 | "PCI Express A" , |
505 | "DMA Controller" , |
506 | "HUB or NS Interface" , |
507 | "System Bus" , |
508 | "DRAM Controller" , /* 9th entry */ |
509 | "Internal Buffer" |
510 | }; |
511 | |
512 | #define DRAM_ENTRY 9 |
513 | |
514 | static char *fatal_message[2] = { "Non-Fatal " , "Fatal " }; |
515 | |
516 | static void do_global_error(int fatal, u32 errors) |
517 | { |
518 | int i; |
519 | |
520 | for (i = 0; i < 11; i++) { |
521 | if (errors & (1 << i)) { |
522 | /* If the error is from DRAM Controller OR |
523 | * we are to report ALL errors, then |
524 | * report the error |
525 | */ |
526 | if ((i == DRAM_ENTRY) || report_non_memory_errors) |
527 | e752x_printk(KERN_WARNING, "%sError %s\n" , |
528 | fatal_message[fatal], |
529 | global_message[i]); |
530 | } |
531 | } |
532 | } |
533 | |
534 | static inline void global_error(int fatal, u32 errors, int *error_found, |
535 | int handle_error) |
536 | { |
537 | *error_found = 1; |
538 | |
539 | if (handle_error) |
540 | do_global_error(fatal, errors); |
541 | } |
542 | |
543 | static char *hub_message[7] = { |
544 | "HI Address or Command Parity" , "HI Illegal Access" , |
545 | "HI Internal Parity" , "Out of Range Access" , |
546 | "HI Data Parity" , "Enhanced Config Access" , |
547 | "Hub Interface Target Abort" |
548 | }; |
549 | |
550 | static void do_hub_error(int fatal, u8 errors) |
551 | { |
552 | int i; |
553 | |
554 | for (i = 0; i < 7; i++) { |
555 | if (errors & (1 << i)) |
556 | e752x_printk(KERN_WARNING, "%sError %s\n" , |
557 | fatal_message[fatal], hub_message[i]); |
558 | } |
559 | } |
560 | |
561 | static inline void hub_error(int fatal, u8 errors, int *error_found, |
562 | int handle_error) |
563 | { |
564 | *error_found = 1; |
565 | |
566 | if (handle_error) |
567 | do_hub_error(fatal, errors); |
568 | } |
569 | |
570 | #define NSI_FATAL_MASK 0x0c080081 |
571 | #define NSI_NON_FATAL_MASK 0x23a0ba64 |
572 | #define NSI_ERR_MASK (NSI_FATAL_MASK | NSI_NON_FATAL_MASK) |
573 | |
574 | static char *nsi_message[30] = { |
575 | "NSI Link Down" , /* NSI_FERR/NSI_NERR bit 0, fatal error */ |
576 | "" , /* reserved */ |
577 | "NSI Parity Error" , /* bit 2, non-fatal */ |
578 | "" , /* reserved */ |
579 | "" , /* reserved */ |
580 | "Correctable Error Message" , /* bit 5, non-fatal */ |
581 | "Non-Fatal Error Message" , /* bit 6, non-fatal */ |
582 | "Fatal Error Message" , /* bit 7, fatal */ |
583 | "" , /* reserved */ |
584 | "Receiver Error" , /* bit 9, non-fatal */ |
585 | "" , /* reserved */ |
586 | "Bad TLP" , /* bit 11, non-fatal */ |
587 | "Bad DLLP" , /* bit 12, non-fatal */ |
588 | "REPLAY_NUM Rollover" , /* bit 13, non-fatal */ |
589 | "" , /* reserved */ |
590 | "Replay Timer Timeout" , /* bit 15, non-fatal */ |
591 | "" , /* reserved */ |
592 | "" , /* reserved */ |
593 | "" , /* reserved */ |
594 | "Data Link Protocol Error" , /* bit 19, fatal */ |
595 | "" , /* reserved */ |
596 | "Poisoned TLP" , /* bit 21, non-fatal */ |
597 | "" , /* reserved */ |
598 | "Completion Timeout" , /* bit 23, non-fatal */ |
599 | "Completer Abort" , /* bit 24, non-fatal */ |
600 | "Unexpected Completion" , /* bit 25, non-fatal */ |
601 | "Receiver Overflow" , /* bit 26, fatal */ |
602 | "Malformed TLP" , /* bit 27, fatal */ |
603 | "" , /* reserved */ |
604 | "Unsupported Request" /* bit 29, non-fatal */ |
605 | }; |
606 | |
607 | static void do_nsi_error(int fatal, u32 errors) |
608 | { |
609 | int i; |
610 | |
611 | for (i = 0; i < 30; i++) { |
612 | if (errors & (1 << i)) |
613 | printk(KERN_WARNING "%sError %s\n" , |
614 | fatal_message[fatal], nsi_message[i]); |
615 | } |
616 | } |
617 | |
618 | static inline void nsi_error(int fatal, u32 errors, int *error_found, |
619 | int handle_error) |
620 | { |
621 | *error_found = 1; |
622 | |
623 | if (handle_error) |
624 | do_nsi_error(fatal, errors); |
625 | } |
626 | |
627 | static char *membuf_message[4] = { |
628 | "Internal PMWB to DRAM parity" , |
629 | "Internal PMWB to System Bus Parity" , |
630 | "Internal System Bus or IO to PMWB Parity" , |
631 | "Internal DRAM to PMWB Parity" |
632 | }; |
633 | |
634 | static void do_membuf_error(u8 errors) |
635 | { |
636 | int i; |
637 | |
638 | for (i = 0; i < 4; i++) { |
639 | if (errors & (1 << i)) |
640 | e752x_printk(KERN_WARNING, "Non-Fatal Error %s\n" , |
641 | membuf_message[i]); |
642 | } |
643 | } |
644 | |
645 | static inline void membuf_error(u8 errors, int *error_found, int handle_error) |
646 | { |
647 | *error_found = 1; |
648 | |
649 | if (handle_error) |
650 | do_membuf_error(errors); |
651 | } |
652 | |
653 | static char *sysbus_message[10] = { |
654 | "Addr or Request Parity" , |
655 | "Data Strobe Glitch" , |
656 | "Addr Strobe Glitch" , |
657 | "Data Parity" , |
658 | "Addr Above TOM" , |
659 | "Non DRAM Lock Error" , |
660 | "MCERR" , "BINIT" , |
661 | "Memory Parity" , |
662 | "IO Subsystem Parity" |
663 | }; |
664 | |
665 | static void do_sysbus_error(int fatal, u32 errors) |
666 | { |
667 | int i; |
668 | |
669 | for (i = 0; i < 10; i++) { |
670 | if (errors & (1 << i)) |
671 | e752x_printk(KERN_WARNING, "%sError System Bus %s\n" , |
672 | fatal_message[fatal], sysbus_message[i]); |
673 | } |
674 | } |
675 | |
676 | static inline void sysbus_error(int fatal, u32 errors, int *error_found, |
677 | int handle_error) |
678 | { |
679 | *error_found = 1; |
680 | |
681 | if (handle_error) |
682 | do_sysbus_error(fatal, errors); |
683 | } |
684 | |
685 | static void e752x_check_hub_interface(struct e752x_error_info *info, |
686 | int *error_found, int handle_error) |
687 | { |
688 | u8 stat8; |
689 | |
690 | //pci_read_config_byte(dev,E752X_HI_FERR,&stat8); |
691 | |
692 | stat8 = info->hi_ferr; |
693 | |
694 | if (stat8 & 0x7f) { /* Error, so process */ |
695 | stat8 &= 0x7f; |
696 | |
697 | if (stat8 & 0x2b) |
698 | hub_error(fatal: 1, errors: stat8 & 0x2b, error_found, handle_error); |
699 | |
700 | if (stat8 & 0x54) |
701 | hub_error(fatal: 0, errors: stat8 & 0x54, error_found, handle_error); |
702 | } |
703 | //pci_read_config_byte(dev,E752X_HI_NERR,&stat8); |
704 | |
705 | stat8 = info->hi_nerr; |
706 | |
707 | if (stat8 & 0x7f) { /* Error, so process */ |
708 | stat8 &= 0x7f; |
709 | |
710 | if (stat8 & 0x2b) |
711 | hub_error(fatal: 1, errors: stat8 & 0x2b, error_found, handle_error); |
712 | |
713 | if (stat8 & 0x54) |
714 | hub_error(fatal: 0, errors: stat8 & 0x54, error_found, handle_error); |
715 | } |
716 | } |
717 | |
718 | static void e752x_check_ns_interface(struct e752x_error_info *info, |
719 | int *error_found, int handle_error) |
720 | { |
721 | u32 stat32; |
722 | |
723 | stat32 = info->nsi_ferr; |
724 | if (stat32 & NSI_ERR_MASK) { /* Error, so process */ |
725 | if (stat32 & NSI_FATAL_MASK) /* check for fatal errors */ |
726 | nsi_error(fatal: 1, errors: stat32 & NSI_FATAL_MASK, error_found, |
727 | handle_error); |
728 | if (stat32 & NSI_NON_FATAL_MASK) /* check for non-fatal ones */ |
729 | nsi_error(fatal: 0, errors: stat32 & NSI_NON_FATAL_MASK, error_found, |
730 | handle_error); |
731 | } |
732 | stat32 = info->nsi_nerr; |
733 | if (stat32 & NSI_ERR_MASK) { |
734 | if (stat32 & NSI_FATAL_MASK) |
735 | nsi_error(fatal: 1, errors: stat32 & NSI_FATAL_MASK, error_found, |
736 | handle_error); |
737 | if (stat32 & NSI_NON_FATAL_MASK) |
738 | nsi_error(fatal: 0, errors: stat32 & NSI_NON_FATAL_MASK, error_found, |
739 | handle_error); |
740 | } |
741 | } |
742 | |
743 | static void e752x_check_sysbus(struct e752x_error_info *info, |
744 | int *error_found, int handle_error) |
745 | { |
746 | u32 stat32, error32; |
747 | |
748 | //pci_read_config_dword(dev,E752X_SYSBUS_FERR,&stat32); |
749 | stat32 = info->sysbus_ferr + (info->sysbus_nerr << 16); |
750 | |
751 | if (stat32 == 0) |
752 | return; /* no errors */ |
753 | |
754 | error32 = (stat32 >> 16) & 0x3ff; |
755 | stat32 = stat32 & 0x3ff; |
756 | |
757 | if (stat32 & 0x087) |
758 | sysbus_error(fatal: 1, errors: stat32 & 0x087, error_found, handle_error); |
759 | |
760 | if (stat32 & 0x378) |
761 | sysbus_error(fatal: 0, errors: stat32 & 0x378, error_found, handle_error); |
762 | |
763 | if (error32 & 0x087) |
764 | sysbus_error(fatal: 1, errors: error32 & 0x087, error_found, handle_error); |
765 | |
766 | if (error32 & 0x378) |
767 | sysbus_error(fatal: 0, errors: error32 & 0x378, error_found, handle_error); |
768 | } |
769 | |
770 | static void e752x_check_membuf(struct e752x_error_info *info, |
771 | int *error_found, int handle_error) |
772 | { |
773 | u8 stat8; |
774 | |
775 | stat8 = info->buf_ferr; |
776 | |
777 | if (stat8 & 0x0f) { /* Error, so process */ |
778 | stat8 &= 0x0f; |
779 | membuf_error(errors: stat8, error_found, handle_error); |
780 | } |
781 | |
782 | stat8 = info->buf_nerr; |
783 | |
784 | if (stat8 & 0x0f) { /* Error, so process */ |
785 | stat8 &= 0x0f; |
786 | membuf_error(errors: stat8, error_found, handle_error); |
787 | } |
788 | } |
789 | |
790 | static void e752x_check_dram(struct mem_ctl_info *mci, |
791 | struct e752x_error_info *info, int *error_found, |
792 | int handle_error) |
793 | { |
794 | u16 error_one, error_next; |
795 | |
796 | error_one = info->dram_ferr; |
797 | error_next = info->dram_nerr; |
798 | |
799 | /* decode and report errors */ |
800 | if (error_one & 0x0101) /* check first error correctable */ |
801 | process_ce(mci, error_one, sec1_add: info->dram_sec1_add, |
802 | sec1_syndrome: info->dram_sec1_syndrome, error_found, handle_error); |
803 | |
804 | if (error_next & 0x0101) /* check next error correctable */ |
805 | process_ce(mci, error_one: error_next, sec1_add: info->dram_sec2_add, |
806 | sec1_syndrome: info->dram_sec2_syndrome, error_found, handle_error); |
807 | |
808 | if (error_one & 0x4040) |
809 | process_ue_no_info_wr(mci, error_found, handle_error); |
810 | |
811 | if (error_next & 0x4040) |
812 | process_ue_no_info_wr(mci, error_found, handle_error); |
813 | |
814 | if (error_one & 0x2020) |
815 | process_ded_retry(mci, error: error_one, retry_add: info->dram_retr_add, |
816 | error_found, handle_error); |
817 | |
818 | if (error_next & 0x2020) |
819 | process_ded_retry(mci, error: error_next, retry_add: info->dram_retr_add, |
820 | error_found, handle_error); |
821 | |
822 | if (error_one & 0x0808) |
823 | process_threshold_ce(mci, error: error_one, error_found, handle_error); |
824 | |
825 | if (error_next & 0x0808) |
826 | process_threshold_ce(mci, error: error_next, error_found, |
827 | handle_error); |
828 | |
829 | if (error_one & 0x0606) |
830 | process_ue(mci, error_one, ded_add: info->dram_ded_add, |
831 | scrb_add: info->dram_scrb_add, error_found, handle_error); |
832 | |
833 | if (error_next & 0x0606) |
834 | process_ue(mci, error_one: error_next, ded_add: info->dram_ded_add, |
835 | scrb_add: info->dram_scrb_add, error_found, handle_error); |
836 | } |
837 | |
838 | static void e752x_get_error_info(struct mem_ctl_info *mci, |
839 | struct e752x_error_info *info) |
840 | { |
841 | struct pci_dev *dev; |
842 | struct e752x_pvt *pvt; |
843 | |
844 | memset(info, 0, sizeof(*info)); |
845 | pvt = (struct e752x_pvt *)mci->pvt_info; |
846 | dev = pvt->dev_d0f1; |
847 | pci_read_config_dword(dev, E752X_FERR_GLOBAL, val: &info->ferr_global); |
848 | |
849 | if (info->ferr_global) { |
850 | if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) { |
851 | pci_read_config_dword(dev, I3100_NSI_FERR, |
852 | val: &info->nsi_ferr); |
853 | info->hi_ferr = 0; |
854 | } else { |
855 | pci_read_config_byte(dev, E752X_HI_FERR, |
856 | val: &info->hi_ferr); |
857 | info->nsi_ferr = 0; |
858 | } |
859 | pci_read_config_word(dev, E752X_SYSBUS_FERR, |
860 | val: &info->sysbus_ferr); |
861 | pci_read_config_byte(dev, E752X_BUF_FERR, val: &info->buf_ferr); |
862 | pci_read_config_word(dev, E752X_DRAM_FERR, val: &info->dram_ferr); |
863 | pci_read_config_dword(dev, E752X_DRAM_SEC1_ADD, |
864 | val: &info->dram_sec1_add); |
865 | pci_read_config_word(dev, E752X_DRAM_SEC1_SYNDROME, |
866 | val: &info->dram_sec1_syndrome); |
867 | pci_read_config_dword(dev, E752X_DRAM_DED_ADD, |
868 | val: &info->dram_ded_add); |
869 | pci_read_config_dword(dev, E752X_DRAM_SCRB_ADD, |
870 | val: &info->dram_scrb_add); |
871 | pci_read_config_dword(dev, E752X_DRAM_RETR_ADD, |
872 | val: &info->dram_retr_add); |
873 | |
874 | /* ignore the reserved bits just in case */ |
875 | if (info->hi_ferr & 0x7f) |
876 | pci_write_config_byte(dev, E752X_HI_FERR, |
877 | val: info->hi_ferr); |
878 | |
879 | if (info->nsi_ferr & NSI_ERR_MASK) |
880 | pci_write_config_dword(dev, I3100_NSI_FERR, |
881 | val: info->nsi_ferr); |
882 | |
883 | if (info->sysbus_ferr) |
884 | pci_write_config_word(dev, E752X_SYSBUS_FERR, |
885 | val: info->sysbus_ferr); |
886 | |
887 | if (info->buf_ferr & 0x0f) |
888 | pci_write_config_byte(dev, E752X_BUF_FERR, |
889 | val: info->buf_ferr); |
890 | |
891 | if (info->dram_ferr) |
892 | pci_write_bits16(pdev: pvt->dev_d0f1, E752X_DRAM_FERR, |
893 | value: info->dram_ferr, mask: info->dram_ferr); |
894 | |
895 | pci_write_config_dword(dev, E752X_FERR_GLOBAL, |
896 | val: info->ferr_global); |
897 | } |
898 | |
899 | pci_read_config_dword(dev, E752X_NERR_GLOBAL, val: &info->nerr_global); |
900 | |
901 | if (info->nerr_global) { |
902 | if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) { |
903 | pci_read_config_dword(dev, I3100_NSI_NERR, |
904 | val: &info->nsi_nerr); |
905 | info->hi_nerr = 0; |
906 | } else { |
907 | pci_read_config_byte(dev, E752X_HI_NERR, |
908 | val: &info->hi_nerr); |
909 | info->nsi_nerr = 0; |
910 | } |
911 | pci_read_config_word(dev, E752X_SYSBUS_NERR, |
912 | val: &info->sysbus_nerr); |
913 | pci_read_config_byte(dev, E752X_BUF_NERR, val: &info->buf_nerr); |
914 | pci_read_config_word(dev, E752X_DRAM_NERR, val: &info->dram_nerr); |
915 | pci_read_config_dword(dev, E752X_DRAM_SEC2_ADD, |
916 | val: &info->dram_sec2_add); |
917 | pci_read_config_word(dev, E752X_DRAM_SEC2_SYNDROME, |
918 | val: &info->dram_sec2_syndrome); |
919 | |
920 | if (info->hi_nerr & 0x7f) |
921 | pci_write_config_byte(dev, E752X_HI_NERR, |
922 | val: info->hi_nerr); |
923 | |
924 | if (info->nsi_nerr & NSI_ERR_MASK) |
925 | pci_write_config_dword(dev, I3100_NSI_NERR, |
926 | val: info->nsi_nerr); |
927 | |
928 | if (info->sysbus_nerr) |
929 | pci_write_config_word(dev, E752X_SYSBUS_NERR, |
930 | val: info->sysbus_nerr); |
931 | |
932 | if (info->buf_nerr & 0x0f) |
933 | pci_write_config_byte(dev, E752X_BUF_NERR, |
934 | val: info->buf_nerr); |
935 | |
936 | if (info->dram_nerr) |
937 | pci_write_bits16(pdev: pvt->dev_d0f1, E752X_DRAM_NERR, |
938 | value: info->dram_nerr, mask: info->dram_nerr); |
939 | |
940 | pci_write_config_dword(dev, E752X_NERR_GLOBAL, |
941 | val: info->nerr_global); |
942 | } |
943 | } |
944 | |
945 | static int e752x_process_error_info(struct mem_ctl_info *mci, |
946 | struct e752x_error_info *info, |
947 | int handle_errors) |
948 | { |
949 | u32 error32, stat32; |
950 | int error_found; |
951 | |
952 | error_found = 0; |
953 | error32 = (info->ferr_global >> 18) & 0x3ff; |
954 | stat32 = (info->ferr_global >> 4) & 0x7ff; |
955 | |
956 | if (error32) |
957 | global_error(fatal: 1, errors: error32, error_found: &error_found, handle_error: handle_errors); |
958 | |
959 | if (stat32) |
960 | global_error(fatal: 0, errors: stat32, error_found: &error_found, handle_error: handle_errors); |
961 | |
962 | error32 = (info->nerr_global >> 18) & 0x3ff; |
963 | stat32 = (info->nerr_global >> 4) & 0x7ff; |
964 | |
965 | if (error32) |
966 | global_error(fatal: 1, errors: error32, error_found: &error_found, handle_error: handle_errors); |
967 | |
968 | if (stat32) |
969 | global_error(fatal: 0, errors: stat32, error_found: &error_found, handle_error: handle_errors); |
970 | |
971 | e752x_check_hub_interface(info, error_found: &error_found, handle_error: handle_errors); |
972 | e752x_check_ns_interface(info, error_found: &error_found, handle_error: handle_errors); |
973 | e752x_check_sysbus(info, error_found: &error_found, handle_error: handle_errors); |
974 | e752x_check_membuf(info, error_found: &error_found, handle_error: handle_errors); |
975 | e752x_check_dram(mci, info, error_found: &error_found, handle_error: handle_errors); |
976 | return error_found; |
977 | } |
978 | |
979 | static void e752x_check(struct mem_ctl_info *mci) |
980 | { |
981 | struct e752x_error_info info; |
982 | |
983 | e752x_get_error_info(mci, info: &info); |
984 | e752x_process_error_info(mci, info: &info, handle_errors: 1); |
985 | } |
986 | |
987 | /* Program byte/sec bandwidth scrub rate to hardware */ |
988 | static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw) |
989 | { |
990 | const struct scrubrate *scrubrates; |
991 | struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; |
992 | struct pci_dev *pdev = pvt->dev_d0f0; |
993 | int i; |
994 | |
995 | if (pvt->dev_info->ctl_dev == PCI_DEVICE_ID_INTEL_3100_0) |
996 | scrubrates = scrubrates_i3100; |
997 | else |
998 | scrubrates = scrubrates_e752x; |
999 | |
1000 | /* Translate the desired scrub rate to a e752x/3100 register value. |
1001 | * Search for the bandwidth that is equal or greater than the |
1002 | * desired rate and program the cooresponding register value. |
1003 | */ |
1004 | for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++) |
1005 | if (scrubrates[i].bandwidth >= new_bw) |
1006 | break; |
1007 | |
1008 | if (scrubrates[i].bandwidth == SDRATE_EOT) |
1009 | return -1; |
1010 | |
1011 | pci_write_config_word(dev: pdev, E752X_MCHSCRB, val: scrubrates[i].scrubval); |
1012 | |
1013 | return scrubrates[i].bandwidth; |
1014 | } |
1015 | |
1016 | /* Convert current scrub rate value into byte/sec bandwidth */ |
1017 | static int get_sdram_scrub_rate(struct mem_ctl_info *mci) |
1018 | { |
1019 | const struct scrubrate *scrubrates; |
1020 | struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; |
1021 | struct pci_dev *pdev = pvt->dev_d0f0; |
1022 | u16 scrubval; |
1023 | int i; |
1024 | |
1025 | if (pvt->dev_info->ctl_dev == PCI_DEVICE_ID_INTEL_3100_0) |
1026 | scrubrates = scrubrates_i3100; |
1027 | else |
1028 | scrubrates = scrubrates_e752x; |
1029 | |
1030 | /* Find the bandwidth matching the memory scrubber configuration */ |
1031 | pci_read_config_word(dev: pdev, E752X_MCHSCRB, val: &scrubval); |
1032 | scrubval = scrubval & 0x0f; |
1033 | |
1034 | for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++) |
1035 | if (scrubrates[i].scrubval == scrubval) |
1036 | break; |
1037 | |
1038 | if (scrubrates[i].bandwidth == SDRATE_EOT) { |
1039 | e752x_printk(KERN_WARNING, |
1040 | "Invalid sdram scrub control value: 0x%x\n" , scrubval); |
1041 | return -1; |
1042 | } |
1043 | return scrubrates[i].bandwidth; |
1044 | |
1045 | } |
1046 | |
1047 | /* Return 1 if dual channel mode is active. Else return 0. */ |
1048 | static inline int dual_channel_active(u16 ddrcsr) |
1049 | { |
1050 | return (((ddrcsr >> 12) & 3) == 3); |
1051 | } |
1052 | |
1053 | /* Remap csrow index numbers if map_type is "reverse" |
1054 | */ |
1055 | static inline int remap_csrow_index(struct mem_ctl_info *mci, int index) |
1056 | { |
1057 | struct e752x_pvt *pvt = mci->pvt_info; |
1058 | |
1059 | if (!pvt->map_type) |
1060 | return (7 - index); |
1061 | |
1062 | return (index); |
1063 | } |
1064 | |
1065 | static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, |
1066 | u16 ddrcsr) |
1067 | { |
1068 | struct csrow_info *csrow; |
1069 | enum edac_type edac_mode; |
1070 | unsigned long last_cumul_size; |
1071 | int index, mem_dev, drc_chan; |
1072 | int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */ |
1073 | int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */ |
1074 | u8 value; |
1075 | u32 dra, drc, cumul_size, i, nr_pages; |
1076 | |
1077 | dra = 0; |
1078 | for (index = 0; index < 4; index++) { |
1079 | u8 dra_reg; |
1080 | pci_read_config_byte(dev: pdev, E752X_DRA + index, val: &dra_reg); |
1081 | dra |= dra_reg << (index * 8); |
1082 | } |
1083 | pci_read_config_dword(dev: pdev, E752X_DRC, val: &drc); |
1084 | drc_chan = dual_channel_active(ddrcsr) ? 1 : 0; |
1085 | drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */ |
1086 | drc_ddim = (drc >> 20) & 0x3; |
1087 | |
1088 | /* The dram row boundary (DRB) reg values are boundary address for |
1089 | * each DRAM row with a granularity of 64 or 128MB (single/dual |
1090 | * channel operation). DRB regs are cumulative; therefore DRB7 will |
1091 | * contain the total memory contained in all eight rows. |
1092 | */ |
1093 | for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { |
1094 | /* mem_dev 0=x8, 1=x4 */ |
1095 | mem_dev = (dra >> (index * 4 + 2)) & 0x3; |
1096 | csrow = mci->csrows[remap_csrow_index(mci, index)]; |
1097 | |
1098 | mem_dev = (mem_dev == 2); |
1099 | pci_read_config_byte(dev: pdev, E752X_DRB + index, val: &value); |
1100 | /* convert a 128 or 64 MiB DRB to a page size. */ |
1101 | cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); |
1102 | edac_dbg(3, "(%d) cumul_size 0x%x\n" , index, cumul_size); |
1103 | if (cumul_size == last_cumul_size) |
1104 | continue; /* not populated */ |
1105 | |
1106 | csrow->first_page = last_cumul_size; |
1107 | csrow->last_page = cumul_size - 1; |
1108 | nr_pages = cumul_size - last_cumul_size; |
1109 | last_cumul_size = cumul_size; |
1110 | |
1111 | /* |
1112 | * if single channel or x8 devices then SECDED |
1113 | * if dual channel and x4 then S4ECD4ED |
1114 | */ |
1115 | if (drc_ddim) { |
1116 | if (drc_chan && mem_dev) { |
1117 | edac_mode = EDAC_S4ECD4ED; |
1118 | mci->edac_cap |= EDAC_FLAG_S4ECD4ED; |
1119 | } else { |
1120 | edac_mode = EDAC_SECDED; |
1121 | mci->edac_cap |= EDAC_FLAG_SECDED; |
1122 | } |
1123 | } else |
1124 | edac_mode = EDAC_NONE; |
1125 | for (i = 0; i < csrow->nr_channels; i++) { |
1126 | struct dimm_info *dimm = csrow->channels[i]->dimm; |
1127 | |
1128 | edac_dbg(3, "Initializing rank at (%i,%i)\n" , index, i); |
1129 | dimm->nr_pages = nr_pages / csrow->nr_channels; |
1130 | dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */ |
1131 | dimm->mtype = MEM_RDDR; /* only one type supported */ |
1132 | dimm->dtype = mem_dev ? DEV_X4 : DEV_X8; |
1133 | dimm->edac_mode = edac_mode; |
1134 | } |
1135 | } |
1136 | } |
1137 | |
1138 | static void e752x_init_mem_map_table(struct pci_dev *pdev, |
1139 | struct e752x_pvt *pvt) |
1140 | { |
1141 | int index; |
1142 | u8 value, last, row; |
1143 | |
1144 | last = 0; |
1145 | row = 0; |
1146 | |
1147 | for (index = 0; index < 8; index += 2) { |
1148 | pci_read_config_byte(dev: pdev, E752X_DRB + index, val: &value); |
1149 | /* test if there is a dimm in this slot */ |
1150 | if (value == last) { |
1151 | /* no dimm in the slot, so flag it as empty */ |
1152 | pvt->map[index] = 0xff; |
1153 | pvt->map[index + 1] = 0xff; |
1154 | } else { /* there is a dimm in the slot */ |
1155 | pvt->map[index] = row; |
1156 | row++; |
1157 | last = value; |
1158 | /* test the next value to see if the dimm is double |
1159 | * sided |
1160 | */ |
1161 | pci_read_config_byte(dev: pdev, E752X_DRB + index + 1, |
1162 | val: &value); |
1163 | |
1164 | /* the dimm is single sided, so flag as empty */ |
1165 | /* this is a double sided dimm to save the next row #*/ |
1166 | pvt->map[index + 1] = (value == last) ? 0xff : row; |
1167 | row++; |
1168 | last = value; |
1169 | } |
1170 | } |
1171 | } |
1172 | |
1173 | /* Return 0 on success or 1 on failure. */ |
1174 | static int e752x_get_devs(struct pci_dev *pdev, int dev_idx, |
1175 | struct e752x_pvt *pvt) |
1176 | { |
1177 | pvt->dev_d0f1 = pci_get_device(PCI_VENDOR_ID_INTEL, |
1178 | device: pvt->dev_info->err_dev, NULL); |
1179 | |
1180 | if (pvt->dev_d0f1 == NULL) { |
1181 | pvt->dev_d0f1 = pci_scan_single_device(bus: pdev->bus, |
1182 | PCI_DEVFN(0, 1)); |
1183 | pci_dev_get(dev: pvt->dev_d0f1); |
1184 | } |
1185 | |
1186 | if (pvt->dev_d0f1 == NULL) { |
1187 | e752x_printk(KERN_ERR, "error reporting device not found:" |
1188 | "vendor %x device 0x%x (broken BIOS?)\n" , |
1189 | PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev); |
1190 | return 1; |
1191 | } |
1192 | |
1193 | pvt->dev_d0f0 = pci_get_device(PCI_VENDOR_ID_INTEL, |
1194 | device: e752x_devs[dev_idx].ctl_dev, |
1195 | NULL); |
1196 | |
1197 | if (pvt->dev_d0f0 == NULL) |
1198 | goto fail; |
1199 | |
1200 | return 0; |
1201 | |
1202 | fail: |
1203 | pci_dev_put(dev: pvt->dev_d0f1); |
1204 | return 1; |
1205 | } |
1206 | |
1207 | /* Setup system bus parity mask register. |
1208 | * Sysbus parity supported on: |
1209 | * e7320/e7520/e7525 + Xeon |
1210 | */ |
1211 | static void e752x_init_sysbus_parity_mask(struct e752x_pvt *pvt) |
1212 | { |
1213 | char *cpu_id = cpu_data(0).x86_model_id; |
1214 | struct pci_dev *dev = pvt->dev_d0f1; |
1215 | int enable = 1; |
1216 | |
1217 | /* Allow module parameter override, else see if CPU supports parity */ |
1218 | if (sysbus_parity != -1) { |
1219 | enable = sysbus_parity; |
1220 | } else if (cpu_id[0] && !strstr(cpu_id, "Xeon" )) { |
1221 | e752x_printk(KERN_INFO, "System Bus Parity not " |
1222 | "supported by CPU, disabling\n" ); |
1223 | enable = 0; |
1224 | } |
1225 | |
1226 | if (enable) |
1227 | pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, val: 0x0000); |
1228 | else |
1229 | pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, val: 0x0309); |
1230 | } |
1231 | |
1232 | static void e752x_init_error_reporting_regs(struct e752x_pvt *pvt) |
1233 | { |
1234 | struct pci_dev *dev; |
1235 | |
1236 | dev = pvt->dev_d0f1; |
1237 | /* Turn off error disable & SMI in case the BIOS turned it on */ |
1238 | if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) { |
1239 | pci_write_config_dword(dev, I3100_NSI_EMASK, val: 0); |
1240 | pci_write_config_dword(dev, I3100_NSI_SMICMD, val: 0); |
1241 | } else { |
1242 | pci_write_config_byte(dev, E752X_HI_ERRMASK, val: 0x00); |
1243 | pci_write_config_byte(dev, E752X_HI_SMICMD, val: 0x00); |
1244 | } |
1245 | |
1246 | e752x_init_sysbus_parity_mask(pvt); |
1247 | |
1248 | pci_write_config_word(dev, E752X_SYSBUS_SMICMD, val: 0x00); |
1249 | pci_write_config_byte(dev, E752X_BUF_ERRMASK, val: 0x00); |
1250 | pci_write_config_byte(dev, E752X_BUF_SMICMD, val: 0x00); |
1251 | pci_write_config_byte(dev, E752X_DRAM_ERRMASK, val: 0x00); |
1252 | pci_write_config_byte(dev, E752X_DRAM_SMICMD, val: 0x00); |
1253 | } |
1254 | |
1255 | static int e752x_probe1(struct pci_dev *pdev, int dev_idx) |
1256 | { |
1257 | u16 pci_data; |
1258 | u8 stat8; |
1259 | struct mem_ctl_info *mci; |
1260 | struct edac_mc_layer layers[2]; |
1261 | struct e752x_pvt *pvt; |
1262 | u16 ddrcsr; |
1263 | int drc_chan; /* Number of channels 0=1chan,1=2chan */ |
1264 | struct e752x_error_info discard; |
1265 | |
1266 | edac_dbg(0, "mci\n" ); |
1267 | edac_dbg(0, "Starting Probe1\n" ); |
1268 | |
1269 | /* check to see if device 0 function 1 is enabled; if it isn't, we |
1270 | * assume the BIOS has reserved it for a reason and is expecting |
1271 | * exclusive access, we take care not to violate that assumption and |
1272 | * fail the probe. */ |
1273 | pci_read_config_byte(dev: pdev, E752X_DEVPRES1, val: &stat8); |
1274 | if (!force_function_unhide && !(stat8 & (1 << 5))) { |
1275 | printk(KERN_INFO "Contact your BIOS vendor to see if the " |
1276 | "E752x error registers can be safely un-hidden\n" ); |
1277 | return -ENODEV; |
1278 | } |
1279 | stat8 |= (1 << 5); |
1280 | pci_write_config_byte(dev: pdev, E752X_DEVPRES1, val: stat8); |
1281 | |
1282 | pci_read_config_word(dev: pdev, E752X_DDRCSR, val: &ddrcsr); |
1283 | /* FIXME: should check >>12 or 0xf, true for all? */ |
1284 | /* Dual channel = 1, Single channel = 0 */ |
1285 | drc_chan = dual_channel_active(ddrcsr); |
1286 | |
1287 | layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; |
1288 | layers[0].size = E752X_NR_CSROWS; |
1289 | layers[0].is_virt_csrow = true; |
1290 | layers[1].type = EDAC_MC_LAYER_CHANNEL; |
1291 | layers[1].size = drc_chan + 1; |
1292 | layers[1].is_virt_csrow = false; |
1293 | mci = edac_mc_alloc(mc_num: 0, ARRAY_SIZE(layers), layers, sz_pvt: sizeof(*pvt)); |
1294 | if (mci == NULL) |
1295 | return -ENOMEM; |
1296 | |
1297 | edac_dbg(3, "init mci\n" ); |
1298 | mci->mtype_cap = MEM_FLAG_RDDR; |
1299 | /* 3100 IMCH supports SECDEC only */ |
1300 | mci->edac_ctl_cap = (dev_idx == I3100) ? EDAC_FLAG_SECDED : |
1301 | (EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED); |
1302 | /* FIXME - what if different memory types are in different csrows? */ |
1303 | mci->mod_name = EDAC_MOD_STR; |
1304 | mci->pdev = &pdev->dev; |
1305 | |
1306 | edac_dbg(3, "init pvt\n" ); |
1307 | pvt = (struct e752x_pvt *)mci->pvt_info; |
1308 | pvt->dev_info = &e752x_devs[dev_idx]; |
1309 | pvt->mc_symmetric = ((ddrcsr & 0x10) != 0); |
1310 | |
1311 | if (e752x_get_devs(pdev, dev_idx, pvt)) { |
1312 | edac_mc_free(mci); |
1313 | return -ENODEV; |
1314 | } |
1315 | |
1316 | edac_dbg(3, "more mci init\n" ); |
1317 | mci->ctl_name = pvt->dev_info->ctl_name; |
1318 | mci->dev_name = pci_name(pdev); |
1319 | mci->edac_check = e752x_check; |
1320 | mci->ctl_page_to_phys = ctl_page_to_phys; |
1321 | mci->set_sdram_scrub_rate = set_sdram_scrub_rate; |
1322 | mci->get_sdram_scrub_rate = get_sdram_scrub_rate; |
1323 | |
1324 | /* set the map type. 1 = normal, 0 = reversed |
1325 | * Must be set before e752x_init_csrows in case csrow mapping |
1326 | * is reversed. |
1327 | */ |
1328 | pci_read_config_byte(dev: pdev, E752X_DRM, val: &stat8); |
1329 | pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f)); |
1330 | |
1331 | e752x_init_csrows(mci, pdev, ddrcsr); |
1332 | e752x_init_mem_map_table(pdev, pvt); |
1333 | |
1334 | if (dev_idx == I3100) |
1335 | mci->edac_cap = EDAC_FLAG_SECDED; /* the only mode supported */ |
1336 | else |
1337 | mci->edac_cap |= EDAC_FLAG_NONE; |
1338 | edac_dbg(3, "tolm, remapbase, remaplimit\n" ); |
1339 | |
1340 | /* load the top of low memory, remap base, and remap limit vars */ |
1341 | pci_read_config_word(dev: pdev, E752X_TOLM, val: &pci_data); |
1342 | pvt->tolm = ((u32) pci_data) << 4; |
1343 | pci_read_config_word(dev: pdev, E752X_REMAPBASE, val: &pci_data); |
1344 | pvt->remapbase = ((u32) pci_data) << 14; |
1345 | pci_read_config_word(dev: pdev, E752X_REMAPLIMIT, val: &pci_data); |
1346 | pvt->remaplimit = ((u32) pci_data) << 14; |
1347 | e752x_printk(KERN_INFO, |
1348 | "tolm = %x, remapbase = %x, remaplimit = %x\n" , |
1349 | pvt->tolm, pvt->remapbase, pvt->remaplimit); |
1350 | |
1351 | /* Here we assume that we will never see multiple instances of this |
1352 | * type of memory controller. The ID is therefore hardcoded to 0. |
1353 | */ |
1354 | if (edac_mc_add_mc(mci)) { |
1355 | edac_dbg(3, "failed edac_mc_add_mc()\n" ); |
1356 | goto fail; |
1357 | } |
1358 | |
1359 | e752x_init_error_reporting_regs(pvt); |
1360 | e752x_get_error_info(mci, info: &discard); /* clear other MCH errors */ |
1361 | |
1362 | /* allocating generic PCI control info */ |
1363 | e752x_pci = edac_pci_create_generic_ctl(dev: &pdev->dev, EDAC_MOD_STR); |
1364 | if (!e752x_pci) { |
1365 | printk(KERN_WARNING |
1366 | "%s(): Unable to create PCI control\n" , __func__); |
1367 | printk(KERN_WARNING |
1368 | "%s(): PCI error report via EDAC not setup\n" , |
1369 | __func__); |
1370 | } |
1371 | |
1372 | /* get this far and it's successful */ |
1373 | edac_dbg(3, "success\n" ); |
1374 | return 0; |
1375 | |
1376 | fail: |
1377 | pci_dev_put(dev: pvt->dev_d0f0); |
1378 | pci_dev_put(dev: pvt->dev_d0f1); |
1379 | edac_mc_free(mci); |
1380 | |
1381 | return -ENODEV; |
1382 | } |
1383 | |
1384 | /* returns count (>= 0), or negative on error */ |
1385 | static int e752x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
1386 | { |
1387 | edac_dbg(0, "\n" ); |
1388 | |
1389 | /* wake up and enable device */ |
1390 | if (pci_enable_device(dev: pdev) < 0) |
1391 | return -EIO; |
1392 | |
1393 | return e752x_probe1(pdev, dev_idx: ent->driver_data); |
1394 | } |
1395 | |
1396 | static void e752x_remove_one(struct pci_dev *pdev) |
1397 | { |
1398 | struct mem_ctl_info *mci; |
1399 | struct e752x_pvt *pvt; |
1400 | |
1401 | edac_dbg(0, "\n" ); |
1402 | |
1403 | if (e752x_pci) |
1404 | edac_pci_release_generic_ctl(pci: e752x_pci); |
1405 | |
1406 | if ((mci = edac_mc_del_mc(dev: &pdev->dev)) == NULL) |
1407 | return; |
1408 | |
1409 | pvt = (struct e752x_pvt *)mci->pvt_info; |
1410 | pci_dev_put(dev: pvt->dev_d0f0); |
1411 | pci_dev_put(dev: pvt->dev_d0f1); |
1412 | edac_mc_free(mci); |
1413 | } |
1414 | |
1415 | static const struct pci_device_id e752x_pci_tbl[] = { |
1416 | { |
1417 | PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
1418 | E7520}, |
1419 | { |
1420 | PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
1421 | E7525}, |
1422 | { |
1423 | PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
1424 | E7320}, |
1425 | { |
1426 | PCI_VEND_DEV(INTEL, 3100_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
1427 | I3100}, |
1428 | { |
1429 | 0, |
1430 | } /* 0 terminated list. */ |
1431 | }; |
1432 | |
1433 | MODULE_DEVICE_TABLE(pci, e752x_pci_tbl); |
1434 | |
1435 | static struct pci_driver e752x_driver = { |
1436 | .name = EDAC_MOD_STR, |
1437 | .probe = e752x_init_one, |
1438 | .remove = e752x_remove_one, |
1439 | .id_table = e752x_pci_tbl, |
1440 | }; |
1441 | |
1442 | static int __init e752x_init(void) |
1443 | { |
1444 | int pci_rc; |
1445 | |
1446 | edac_dbg(3, "\n" ); |
1447 | |
1448 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ |
1449 | opstate_init(); |
1450 | |
1451 | pci_rc = pci_register_driver(&e752x_driver); |
1452 | return (pci_rc < 0) ? pci_rc : 0; |
1453 | } |
1454 | |
1455 | static void __exit e752x_exit(void) |
1456 | { |
1457 | edac_dbg(3, "\n" ); |
1458 | pci_unregister_driver(dev: &e752x_driver); |
1459 | } |
1460 | |
1461 | module_init(e752x_init); |
1462 | module_exit(e752x_exit); |
1463 | |
1464 | MODULE_LICENSE("GPL" ); |
1465 | MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman" ); |
1466 | MODULE_DESCRIPTION("MC support for Intel e752x/3100 memory controllers" ); |
1467 | |
1468 | module_param(force_function_unhide, int, 0444); |
1469 | MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:" |
1470 | " 1=force unhide and hope BIOS doesn't fight driver for " |
1471 | "Dev0:Fun1 access" ); |
1472 | |
1473 | module_param(edac_op_state, int, 0444); |
1474 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI" ); |
1475 | |
1476 | module_param(sysbus_parity, int, 0444); |
1477 | MODULE_PARM_DESC(sysbus_parity, "0=disable system bus parity checking," |
1478 | " 1=enable system bus parity checking, default=auto-detect" ); |
1479 | module_param(report_non_memory_errors, int, 0644); |
1480 | MODULE_PARM_DESC(report_non_memory_errors, "0=disable non-memory error " |
1481 | "reporting, 1=enable non-memory error reporting" ); |
1482 | |