1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Xilinx Versal memory controller driver |
4 | * Copyright (C) 2023 Advanced Micro Devices, Inc. |
5 | */ |
6 | #include <linux/bitfield.h> |
7 | #include <linux/edac.h> |
8 | #include <linux/interrupt.h> |
9 | #include <linux/module.h> |
10 | #include <linux/of.h> |
11 | #include <linux/of_address.h> |
12 | #include <linux/of_device.h> |
13 | #include <linux/platform_device.h> |
14 | #include <linux/sizes.h> |
15 | #include <linux/firmware/xlnx-zynqmp.h> |
16 | #include <linux/firmware/xlnx-event-manager.h> |
17 | |
18 | #include "edac_module.h" |
19 | |
20 | /* Granularity of reported error in bytes */ |
21 | #define XDDR_EDAC_ERR_GRAIN 1 |
22 | |
23 | #define XDDR_EDAC_MSG_SIZE 256 |
24 | #define EVENT 2 |
25 | |
26 | #define XDDR_PCSR_OFFSET 0xC |
27 | #define XDDR_ISR_OFFSET 0x14 |
28 | #define XDDR_IRQ_EN_OFFSET 0x20 |
29 | #define XDDR_IRQ1_EN_OFFSET 0x2C |
30 | #define XDDR_IRQ_DIS_OFFSET 0x24 |
31 | #define XDDR_IRQ_CE_MASK GENMASK(18, 15) |
32 | #define XDDR_IRQ_UE_MASK GENMASK(14, 11) |
33 | |
34 | #define XDDR_REG_CONFIG0_OFFSET 0x258 |
35 | #define XDDR_REG_CONFIG0_BUS_WIDTH_MASK GENMASK(19, 18) |
36 | #define XDDR_REG_CONFIG0_NUM_CHANS_MASK BIT(17) |
37 | #define XDDR_REG_CONFIG0_NUM_RANKS_MASK GENMASK(15, 14) |
38 | #define XDDR_REG_CONFIG0_SIZE_MASK GENMASK(10, 8) |
39 | |
40 | #define XDDR_REG_PINOUT_OFFSET 0x25C |
41 | #define XDDR_REG_PINOUT_ECC_EN_MASK GENMASK(7, 5) |
42 | |
43 | #define ECCW0_FLIP_CTRL 0x109C |
44 | #define ECCW0_FLIP0_OFFSET 0x10A0 |
45 | #define ECCW0_FLIP0_BITS 31 |
46 | #define ECCW0_FLIP1_OFFSET 0x10A4 |
47 | #define ECCW1_FLIP_CTRL 0x10AC |
48 | #define ECCW1_FLIP0_OFFSET 0x10B0 |
49 | #define ECCW1_FLIP1_OFFSET 0x10B4 |
50 | #define ECCR0_CERR_STAT_OFFSET 0x10BC |
51 | #define ECCR0_CE_ADDR_LO_OFFSET 0x10C0 |
52 | #define ECCR0_CE_ADDR_HI_OFFSET 0x10C4 |
53 | #define ECCR0_CE_DATA_LO_OFFSET 0x10C8 |
54 | #define ECCR0_CE_DATA_HI_OFFSET 0x10CC |
55 | #define ECCR0_CE_DATA_PAR_OFFSET 0x10D0 |
56 | |
57 | #define ECCR0_UERR_STAT_OFFSET 0x10D4 |
58 | #define ECCR0_UE_ADDR_LO_OFFSET 0x10D8 |
59 | #define ECCR0_UE_ADDR_HI_OFFSET 0x10DC |
60 | #define ECCR0_UE_DATA_LO_OFFSET 0x10E0 |
61 | #define ECCR0_UE_DATA_HI_OFFSET 0x10E4 |
62 | #define ECCR0_UE_DATA_PAR_OFFSET 0x10E8 |
63 | |
64 | #define ECCR1_CERR_STAT_OFFSET 0x10F4 |
65 | #define ECCR1_CE_ADDR_LO_OFFSET 0x10F8 |
66 | #define ECCR1_CE_ADDR_HI_OFFSET 0x10FC |
67 | #define ECCR1_CE_DATA_LO_OFFSET 0x1100 |
68 | #define ECCR1_CE_DATA_HI_OFFSET 0x110C |
69 | #define ECCR1_CE_DATA_PAR_OFFSET 0x1108 |
70 | |
71 | #define ECCR1_UERR_STAT_OFFSET 0x110C |
72 | #define ECCR1_UE_ADDR_LO_OFFSET 0x1110 |
73 | #define ECCR1_UE_ADDR_HI_OFFSET 0x1114 |
74 | #define ECCR1_UE_DATA_LO_OFFSET 0x1118 |
75 | #define ECCR1_UE_DATA_HI_OFFSET 0x111C |
76 | #define ECCR1_UE_DATA_PAR_OFFSET 0x1120 |
77 | |
78 | #define XDDR_NOC_REG_ADEC4_OFFSET 0x44 |
79 | #define RANK_1_MASK GENMASK(11, 6) |
80 | #define LRANK_0_MASK GENMASK(17, 12) |
81 | #define LRANK_1_MASK GENMASK(23, 18) |
82 | #define MASK_24 GENMASK(29, 24) |
83 | |
84 | #define XDDR_NOC_REG_ADEC5_OFFSET 0x48 |
85 | #define XDDR_NOC_REG_ADEC6_OFFSET 0x4C |
86 | #define XDDR_NOC_REG_ADEC7_OFFSET 0x50 |
87 | #define XDDR_NOC_REG_ADEC8_OFFSET 0x54 |
88 | #define XDDR_NOC_REG_ADEC9_OFFSET 0x58 |
89 | #define XDDR_NOC_REG_ADEC10_OFFSET 0x5C |
90 | |
91 | #define XDDR_NOC_REG_ADEC11_OFFSET 0x60 |
92 | #define MASK_0 GENMASK(5, 0) |
93 | #define GRP_0_MASK GENMASK(11, 6) |
94 | #define GRP_1_MASK GENMASK(17, 12) |
95 | #define CH_0_MASK GENMASK(23, 18) |
96 | |
97 | #define XDDR_NOC_REG_ADEC12_OFFSET 0x71C |
98 | #define XDDR_NOC_REG_ADEC13_OFFSET 0x720 |
99 | |
100 | #define XDDR_NOC_REG_ADEC14_OFFSET 0x724 |
101 | #define XDDR_NOC_ROW_MATCH_MASK GENMASK(17, 0) |
102 | #define XDDR_NOC_COL_MATCH_MASK GENMASK(27, 18) |
103 | #define XDDR_NOC_BANK_MATCH_MASK GENMASK(29, 28) |
104 | #define XDDR_NOC_GRP_MATCH_MASK GENMASK(31, 30) |
105 | |
106 | #define XDDR_NOC_REG_ADEC15_OFFSET 0x728 |
107 | #define XDDR_NOC_RANK_MATCH_MASK GENMASK(1, 0) |
108 | #define XDDR_NOC_LRANK_MATCH_MASK GENMASK(4, 2) |
109 | #define XDDR_NOC_CH_MATCH_MASK BIT(5) |
110 | #define XDDR_NOC_MOD_SEL_MASK BIT(6) |
111 | #define XDDR_NOC_MATCH_EN_MASK BIT(8) |
112 | |
113 | #define ECCR_UE_CE_ADDR_HI_ROW_MASK GENMASK(7, 0) |
114 | |
115 | #define XDDR_EDAC_NR_CSROWS 1 |
116 | #define XDDR_EDAC_NR_CHANS 1 |
117 | |
118 | #define XDDR_BUS_WIDTH_64 0 |
119 | #define XDDR_BUS_WIDTH_32 1 |
120 | #define XDDR_BUS_WIDTH_16 2 |
121 | |
122 | #define XDDR_MAX_ROW_CNT 18 |
123 | #define XDDR_MAX_COL_CNT 10 |
124 | #define XDDR_MAX_RANK_CNT 2 |
125 | #define XDDR_MAX_LRANK_CNT 3 |
126 | #define XDDR_MAX_BANK_CNT 2 |
127 | #define XDDR_MAX_GRP_CNT 2 |
128 | |
129 | /* |
130 | * Config and system registers are usually locked. This is the |
131 | * code which unlocks them in order to accept writes. See |
132 | * |
133 | * https://docs.xilinx.com/r/en-US/am012-versal-register-reference/PCSR_LOCK-XRAM_SLCR-Register |
134 | */ |
135 | #define PCSR_UNLOCK_VAL 0xF9E8D7C6 |
136 | #define PCSR_LOCK_VAL 1 |
137 | #define XDDR_ERR_TYPE_CE 0 |
138 | #define XDDR_ERR_TYPE_UE 1 |
139 | |
140 | #define XILINX_DRAM_SIZE_4G 0 |
141 | #define XILINX_DRAM_SIZE_6G 1 |
142 | #define XILINX_DRAM_SIZE_8G 2 |
143 | #define XILINX_DRAM_SIZE_12G 3 |
144 | #define XILINX_DRAM_SIZE_16G 4 |
145 | #define XILINX_DRAM_SIZE_32G 5 |
146 | #define NUM_UE_BITPOS 2 |
147 | |
148 | /** |
149 | * struct ecc_error_info - ECC error log information. |
150 | * @burstpos: Burst position. |
151 | * @lrank: Logical Rank number. |
152 | * @rank: Rank number. |
153 | * @group: Group number. |
154 | * @bank: Bank number. |
155 | * @col: Column number. |
156 | * @row: Row number. |
157 | * @rowhi: Row number higher bits. |
158 | * @i: ECC error info. |
159 | */ |
160 | union ecc_error_info { |
161 | struct { |
162 | u32 burstpos:3; |
163 | u32 lrank:3; |
164 | u32 rank:2; |
165 | u32 group:2; |
166 | u32 bank:2; |
167 | u32 col:10; |
168 | u32 row:10; |
169 | u32 rowhi; |
170 | }; |
171 | u64 i; |
172 | } __packed; |
173 | |
174 | union edac_info { |
175 | struct { |
176 | u32 row0:6; |
177 | u32 row1:6; |
178 | u32 row2:6; |
179 | u32 row3:6; |
180 | u32 row4:6; |
181 | u32 reserved:2; |
182 | }; |
183 | struct { |
184 | u32 col1:6; |
185 | u32 col2:6; |
186 | u32 col3:6; |
187 | u32 col4:6; |
188 | u32 col5:6; |
189 | u32 reservedcol:2; |
190 | }; |
191 | u32 i; |
192 | } __packed; |
193 | |
194 | /** |
195 | * struct ecc_status - ECC status information to report. |
196 | * @ceinfo: Correctable error log information. |
197 | * @ueinfo: Uncorrectable error log information. |
198 | * @channel: Channel number. |
199 | * @error_type: Error type information. |
200 | */ |
201 | struct ecc_status { |
202 | union ecc_error_info ceinfo[2]; |
203 | union ecc_error_info ueinfo[2]; |
204 | u8 channel; |
205 | u8 error_type; |
206 | }; |
207 | |
208 | /** |
209 | * struct edac_priv - DDR memory controller private instance data. |
210 | * @ddrmc_baseaddr: Base address of the DDR controller. |
211 | * @ddrmc_noc_baseaddr: Base address of the DDRMC NOC. |
212 | * @message: Buffer for framing the event specific info. |
213 | * @mc_id: Memory controller ID. |
214 | * @ce_cnt: Correctable error count. |
215 | * @ue_cnt: UnCorrectable error count. |
216 | * @stat: ECC status information. |
217 | * @lrank_bit: Bit shifts for lrank bit. |
218 | * @rank_bit: Bit shifts for rank bit. |
219 | * @row_bit: Bit shifts for row bit. |
220 | * @col_bit: Bit shifts for column bit. |
221 | * @bank_bit: Bit shifts for bank bit. |
222 | * @grp_bit: Bit shifts for group bit. |
223 | * @ch_bit: Bit shifts for channel bit. |
224 | * @err_inject_addr: Data poison address. |
225 | * @debugfs: Debugfs handle. |
226 | */ |
227 | struct edac_priv { |
228 | void __iomem *ddrmc_baseaddr; |
229 | void __iomem *ddrmc_noc_baseaddr; |
230 | char message[XDDR_EDAC_MSG_SIZE]; |
231 | u32 mc_id; |
232 | u32 ce_cnt; |
233 | u32 ue_cnt; |
234 | struct ecc_status stat; |
235 | u32 lrank_bit[3]; |
236 | u32 rank_bit[2]; |
237 | u32 row_bit[18]; |
238 | u32 col_bit[10]; |
239 | u32 bank_bit[2]; |
240 | u32 grp_bit[2]; |
241 | u32 ch_bit; |
242 | #ifdef CONFIG_EDAC_DEBUG |
243 | u64 err_inject_addr; |
244 | struct dentry *debugfs; |
245 | #endif |
246 | }; |
247 | |
248 | static void get_ce_error_info(struct edac_priv *priv) |
249 | { |
250 | void __iomem *ddrmc_base; |
251 | struct ecc_status *p; |
252 | u32 regval; |
253 | u64 reghi; |
254 | |
255 | ddrmc_base = priv->ddrmc_baseaddr; |
256 | p = &priv->stat; |
257 | |
258 | p->error_type = XDDR_ERR_TYPE_CE; |
259 | regval = readl(addr: ddrmc_base + ECCR0_CE_ADDR_LO_OFFSET); |
260 | reghi = regval & ECCR_UE_CE_ADDR_HI_ROW_MASK; |
261 | p->ceinfo[0].i = regval | reghi << 32; |
262 | regval = readl(addr: ddrmc_base + ECCR0_CE_ADDR_HI_OFFSET); |
263 | |
264 | edac_dbg(2, "ERR DATA: 0x%08X%08X ERR DATA PARITY: 0x%08X\n" , |
265 | readl(ddrmc_base + ECCR0_CE_DATA_LO_OFFSET), |
266 | readl(ddrmc_base + ECCR0_CE_DATA_HI_OFFSET), |
267 | readl(ddrmc_base + ECCR0_CE_DATA_PAR_OFFSET)); |
268 | |
269 | regval = readl(addr: ddrmc_base + ECCR1_CE_ADDR_LO_OFFSET); |
270 | reghi = readl(addr: ddrmc_base + ECCR1_CE_ADDR_HI_OFFSET); |
271 | p->ceinfo[1].i = regval | reghi << 32; |
272 | regval = readl(addr: ddrmc_base + ECCR1_CE_ADDR_HI_OFFSET); |
273 | |
274 | edac_dbg(2, "ERR DATA: 0x%08X%08X ERR DATA PARITY: 0x%08X\n" , |
275 | readl(ddrmc_base + ECCR1_CE_DATA_LO_OFFSET), |
276 | readl(ddrmc_base + ECCR1_CE_DATA_HI_OFFSET), |
277 | readl(ddrmc_base + ECCR1_CE_DATA_PAR_OFFSET)); |
278 | } |
279 | |
280 | static void get_ue_error_info(struct edac_priv *priv) |
281 | { |
282 | void __iomem *ddrmc_base; |
283 | struct ecc_status *p; |
284 | u32 regval; |
285 | u64 reghi; |
286 | |
287 | ddrmc_base = priv->ddrmc_baseaddr; |
288 | p = &priv->stat; |
289 | |
290 | p->error_type = XDDR_ERR_TYPE_UE; |
291 | regval = readl(addr: ddrmc_base + ECCR0_UE_ADDR_LO_OFFSET); |
292 | reghi = readl(addr: ddrmc_base + ECCR0_UE_ADDR_HI_OFFSET); |
293 | |
294 | p->ueinfo[0].i = regval | reghi << 32; |
295 | regval = readl(addr: ddrmc_base + ECCR0_UE_ADDR_HI_OFFSET); |
296 | |
297 | edac_dbg(2, "ERR DATA: 0x%08X%08X ERR DATA PARITY: 0x%08X\n" , |
298 | readl(ddrmc_base + ECCR0_UE_DATA_LO_OFFSET), |
299 | readl(ddrmc_base + ECCR0_UE_DATA_HI_OFFSET), |
300 | readl(ddrmc_base + ECCR0_UE_DATA_PAR_OFFSET)); |
301 | |
302 | regval = readl(addr: ddrmc_base + ECCR1_UE_ADDR_LO_OFFSET); |
303 | reghi = readl(addr: ddrmc_base + ECCR1_UE_ADDR_HI_OFFSET); |
304 | p->ueinfo[1].i = regval | reghi << 32; |
305 | |
306 | edac_dbg(2, "ERR DATA: 0x%08X%08X ERR DATA PARITY: 0x%08X\n" , |
307 | readl(ddrmc_base + ECCR1_UE_DATA_LO_OFFSET), |
308 | readl(ddrmc_base + ECCR1_UE_DATA_HI_OFFSET), |
309 | readl(ddrmc_base + ECCR1_UE_DATA_PAR_OFFSET)); |
310 | } |
311 | |
312 | static bool get_error_info(struct edac_priv *priv) |
313 | { |
314 | u32 eccr0_ceval, eccr1_ceval, eccr0_ueval, eccr1_ueval; |
315 | void __iomem *ddrmc_base; |
316 | struct ecc_status *p; |
317 | |
318 | ddrmc_base = priv->ddrmc_baseaddr; |
319 | p = &priv->stat; |
320 | |
321 | eccr0_ceval = readl(addr: ddrmc_base + ECCR0_CERR_STAT_OFFSET); |
322 | eccr1_ceval = readl(addr: ddrmc_base + ECCR1_CERR_STAT_OFFSET); |
323 | eccr0_ueval = readl(addr: ddrmc_base + ECCR0_UERR_STAT_OFFSET); |
324 | eccr1_ueval = readl(addr: ddrmc_base + ECCR1_UERR_STAT_OFFSET); |
325 | |
326 | if (!eccr0_ceval && !eccr1_ceval && !eccr0_ueval && !eccr1_ueval) |
327 | return 1; |
328 | if (!eccr0_ceval) |
329 | p->channel = 1; |
330 | else |
331 | p->channel = 0; |
332 | |
333 | if (eccr0_ceval || eccr1_ceval) |
334 | get_ce_error_info(priv); |
335 | |
336 | if (eccr0_ueval || eccr1_ueval) { |
337 | if (!eccr0_ueval) |
338 | p->channel = 1; |
339 | else |
340 | p->channel = 0; |
341 | get_ue_error_info(priv); |
342 | } |
343 | |
344 | /* Unlock the PCSR registers */ |
345 | writel(PCSR_UNLOCK_VAL, addr: ddrmc_base + XDDR_PCSR_OFFSET); |
346 | |
347 | writel(val: 0, addr: ddrmc_base + ECCR0_CERR_STAT_OFFSET); |
348 | writel(val: 0, addr: ddrmc_base + ECCR1_CERR_STAT_OFFSET); |
349 | writel(val: 0, addr: ddrmc_base + ECCR0_UERR_STAT_OFFSET); |
350 | writel(val: 0, addr: ddrmc_base + ECCR1_UERR_STAT_OFFSET); |
351 | |
352 | /* Lock the PCSR registers */ |
353 | writel(val: 1, addr: ddrmc_base + XDDR_PCSR_OFFSET); |
354 | |
355 | return 0; |
356 | } |
357 | |
358 | /** |
359 | * convert_to_physical - Convert to physical address. |
360 | * @priv: DDR memory controller private instance data. |
361 | * @pinf: ECC error info structure. |
362 | * |
363 | * Return: Physical address of the DDR memory. |
364 | */ |
365 | static unsigned long convert_to_physical(struct edac_priv *priv, union ecc_error_info pinf) |
366 | { |
367 | unsigned long err_addr = 0; |
368 | u32 index; |
369 | u32 row; |
370 | |
371 | row = pinf.rowhi << 10 | pinf.row; |
372 | for (index = 0; index < XDDR_MAX_ROW_CNT; index++) { |
373 | err_addr |= (row & BIT(0)) << priv->row_bit[index]; |
374 | row >>= 1; |
375 | } |
376 | |
377 | for (index = 0; index < XDDR_MAX_COL_CNT; index++) { |
378 | err_addr |= (pinf.col & BIT(0)) << priv->col_bit[index]; |
379 | pinf.col >>= 1; |
380 | } |
381 | |
382 | for (index = 0; index < XDDR_MAX_BANK_CNT; index++) { |
383 | err_addr |= (pinf.bank & BIT(0)) << priv->bank_bit[index]; |
384 | pinf.bank >>= 1; |
385 | } |
386 | |
387 | for (index = 0; index < XDDR_MAX_GRP_CNT; index++) { |
388 | err_addr |= (pinf.group & BIT(0)) << priv->grp_bit[index]; |
389 | pinf.group >>= 1; |
390 | } |
391 | |
392 | for (index = 0; index < XDDR_MAX_RANK_CNT; index++) { |
393 | err_addr |= (pinf.rank & BIT(0)) << priv->rank_bit[index]; |
394 | pinf.rank >>= 1; |
395 | } |
396 | |
397 | for (index = 0; index < XDDR_MAX_LRANK_CNT; index++) { |
398 | err_addr |= (pinf.lrank & BIT(0)) << priv->lrank_bit[index]; |
399 | pinf.lrank >>= 1; |
400 | } |
401 | |
402 | err_addr |= (priv->stat.channel & BIT(0)) << priv->ch_bit; |
403 | |
404 | return err_addr; |
405 | } |
406 | |
407 | /** |
408 | * handle_error - Handle Correctable and Uncorrectable errors. |
409 | * @mci: EDAC memory controller instance. |
410 | * @stat: ECC status structure. |
411 | * |
412 | * Handles ECC correctable and uncorrectable errors. |
413 | */ |
414 | static void handle_error(struct mem_ctl_info *mci, struct ecc_status *stat) |
415 | { |
416 | struct edac_priv *priv = mci->pvt_info; |
417 | union ecc_error_info pinf; |
418 | |
419 | if (stat->error_type == XDDR_ERR_TYPE_CE) { |
420 | priv->ce_cnt++; |
421 | pinf = stat->ceinfo[stat->channel]; |
422 | snprintf(buf: priv->message, XDDR_EDAC_MSG_SIZE, |
423 | fmt: "Error type:%s MC ID: %d Addr at %lx Burst Pos: %d\n" , |
424 | "CE" , priv->mc_id, |
425 | convert_to_physical(priv, pinf), pinf.burstpos); |
426 | |
427 | edac_mc_handle_error(type: HW_EVENT_ERR_CORRECTED, mci, |
428 | error_count: priv->ce_cnt, page_frame_number: 0, offset_in_page: 0, syndrome: 0, top_layer: 0, mid_layer: 0, low_layer: -1, |
429 | msg: priv->message, other_detail: "" ); |
430 | } |
431 | |
432 | if (stat->error_type == XDDR_ERR_TYPE_UE) { |
433 | priv->ue_cnt++; |
434 | pinf = stat->ueinfo[stat->channel]; |
435 | snprintf(buf: priv->message, XDDR_EDAC_MSG_SIZE, |
436 | fmt: "Error type:%s MC ID: %d Addr at %lx Burst Pos: %d\n" , |
437 | "UE" , priv->mc_id, |
438 | convert_to_physical(priv, pinf), pinf.burstpos); |
439 | |
440 | edac_mc_handle_error(type: HW_EVENT_ERR_UNCORRECTED, mci, |
441 | error_count: priv->ue_cnt, page_frame_number: 0, offset_in_page: 0, syndrome: 0, top_layer: 0, mid_layer: 0, low_layer: -1, |
442 | msg: priv->message, other_detail: "" ); |
443 | } |
444 | |
445 | memset(stat, 0, sizeof(*stat)); |
446 | } |
447 | |
448 | /** |
449 | * err_callback - Handle Correctable and Uncorrectable errors. |
450 | * @payload: payload data. |
451 | * @data: mci controller data. |
452 | * |
453 | * Handles ECC correctable and uncorrectable errors. |
454 | */ |
455 | static void err_callback(const u32 *payload, void *data) |
456 | { |
457 | struct mem_ctl_info *mci = (struct mem_ctl_info *)data; |
458 | struct edac_priv *priv; |
459 | struct ecc_status *p; |
460 | int regval; |
461 | |
462 | priv = mci->pvt_info; |
463 | p = &priv->stat; |
464 | |
465 | regval = readl(addr: priv->ddrmc_baseaddr + XDDR_ISR_OFFSET); |
466 | |
467 | if (payload[EVENT] == XPM_EVENT_ERROR_MASK_DDRMC_CR) |
468 | p->error_type = XDDR_ERR_TYPE_CE; |
469 | if (payload[EVENT] == XPM_EVENT_ERROR_MASK_DDRMC_NCR) |
470 | p->error_type = XDDR_ERR_TYPE_UE; |
471 | |
472 | if (get_error_info(priv)) |
473 | return; |
474 | |
475 | handle_error(mci, stat: &priv->stat); |
476 | |
477 | /* Unlock the PCSR registers */ |
478 | writel(PCSR_UNLOCK_VAL, addr: priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET); |
479 | |
480 | /* Clear the ISR */ |
481 | writel(val: regval, addr: priv->ddrmc_baseaddr + XDDR_ISR_OFFSET); |
482 | |
483 | /* Lock the PCSR registers */ |
484 | writel(PCSR_LOCK_VAL, addr: priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET); |
485 | edac_dbg(3, "Total error count CE %d UE %d\n" , |
486 | priv->ce_cnt, priv->ue_cnt); |
487 | } |
488 | |
489 | /** |
490 | * get_dwidth - Return the controller memory width. |
491 | * @base: DDR memory controller base address. |
492 | * |
493 | * Get the EDAC device type width appropriate for the controller |
494 | * configuration. |
495 | * |
496 | * Return: a device type width enumeration. |
497 | */ |
498 | static enum dev_type get_dwidth(const void __iomem *base) |
499 | { |
500 | enum dev_type dt; |
501 | u32 regval; |
502 | u32 width; |
503 | |
504 | regval = readl(addr: base + XDDR_REG_CONFIG0_OFFSET); |
505 | width = FIELD_GET(XDDR_REG_CONFIG0_BUS_WIDTH_MASK, regval); |
506 | |
507 | switch (width) { |
508 | case XDDR_BUS_WIDTH_16: |
509 | dt = DEV_X2; |
510 | break; |
511 | case XDDR_BUS_WIDTH_32: |
512 | dt = DEV_X4; |
513 | break; |
514 | case XDDR_BUS_WIDTH_64: |
515 | dt = DEV_X8; |
516 | break; |
517 | default: |
518 | dt = DEV_UNKNOWN; |
519 | } |
520 | |
521 | return dt; |
522 | } |
523 | |
524 | /** |
525 | * get_ecc_state - Return the controller ECC enable/disable status. |
526 | * @base: DDR memory controller base address. |
527 | * |
528 | * Get the ECC enable/disable status for the controller. |
529 | * |
530 | * Return: a ECC status boolean i.e true/false - enabled/disabled. |
531 | */ |
532 | static bool get_ecc_state(void __iomem *base) |
533 | { |
534 | enum dev_type dt; |
535 | u32 ecctype; |
536 | |
537 | dt = get_dwidth(base); |
538 | if (dt == DEV_UNKNOWN) |
539 | return false; |
540 | |
541 | ecctype = readl(addr: base + XDDR_REG_PINOUT_OFFSET); |
542 | ecctype &= XDDR_REG_PINOUT_ECC_EN_MASK; |
543 | |
544 | return !!ecctype; |
545 | } |
546 | |
547 | /** |
548 | * get_memsize - Get the size of the attached memory device. |
549 | * @priv: DDR memory controller private instance data. |
550 | * |
551 | * Return: the memory size in bytes. |
552 | */ |
553 | static u64 get_memsize(struct edac_priv *priv) |
554 | { |
555 | u32 regval; |
556 | u64 size; |
557 | |
558 | regval = readl(addr: priv->ddrmc_baseaddr + XDDR_REG_CONFIG0_OFFSET); |
559 | regval = FIELD_GET(XDDR_REG_CONFIG0_SIZE_MASK, regval); |
560 | |
561 | switch (regval) { |
562 | case XILINX_DRAM_SIZE_4G: |
563 | size = 4U; break; |
564 | case XILINX_DRAM_SIZE_6G: |
565 | size = 6U; break; |
566 | case XILINX_DRAM_SIZE_8G: |
567 | size = 8U; break; |
568 | case XILINX_DRAM_SIZE_12G: |
569 | size = 12U; break; |
570 | case XILINX_DRAM_SIZE_16G: |
571 | size = 16U; break; |
572 | case XILINX_DRAM_SIZE_32G: |
573 | size = 32U; break; |
574 | /* Invalid configuration */ |
575 | default: |
576 | size = 0; break; |
577 | } |
578 | |
579 | size *= SZ_1G; |
580 | return size; |
581 | } |
582 | |
583 | /** |
584 | * init_csrows - Initialize the csrow data. |
585 | * @mci: EDAC memory controller instance. |
586 | * |
587 | * Initialize the chip select rows associated with the EDAC memory |
588 | * controller instance. |
589 | */ |
590 | static void init_csrows(struct mem_ctl_info *mci) |
591 | { |
592 | struct edac_priv *priv = mci->pvt_info; |
593 | struct csrow_info *csi; |
594 | struct dimm_info *dimm; |
595 | unsigned long size; |
596 | u32 row; |
597 | int ch; |
598 | |
599 | size = get_memsize(priv); |
600 | for (row = 0; row < mci->nr_csrows; row++) { |
601 | csi = mci->csrows[row]; |
602 | for (ch = 0; ch < csi->nr_channels; ch++) { |
603 | dimm = csi->channels[ch]->dimm; |
604 | dimm->edac_mode = EDAC_SECDED; |
605 | dimm->mtype = MEM_DDR4; |
606 | dimm->nr_pages = (size >> PAGE_SHIFT) / csi->nr_channels; |
607 | dimm->grain = XDDR_EDAC_ERR_GRAIN; |
608 | dimm->dtype = get_dwidth(base: priv->ddrmc_baseaddr); |
609 | } |
610 | } |
611 | } |
612 | |
613 | /** |
614 | * mc_init - Initialize one driver instance. |
615 | * @mci: EDAC memory controller instance. |
616 | * @pdev: platform device. |
617 | * |
618 | * Perform initialization of the EDAC memory controller instance and |
619 | * related driver-private data associated with the memory controller the |
620 | * instance is bound to. |
621 | */ |
622 | static void mc_init(struct mem_ctl_info *mci, struct platform_device *pdev) |
623 | { |
624 | mci->pdev = &pdev->dev; |
625 | platform_set_drvdata(pdev, data: mci); |
626 | |
627 | /* Initialize controller capabilities and configuration */ |
628 | mci->mtype_cap = MEM_FLAG_DDR4; |
629 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; |
630 | mci->scrub_cap = SCRUB_HW_SRC; |
631 | mci->scrub_mode = SCRUB_NONE; |
632 | |
633 | mci->edac_cap = EDAC_FLAG_SECDED; |
634 | mci->ctl_name = "xlnx_ddr_controller" ; |
635 | mci->dev_name = dev_name(dev: &pdev->dev); |
636 | mci->mod_name = "xlnx_edac" ; |
637 | |
638 | edac_op_state = EDAC_OPSTATE_INT; |
639 | |
640 | init_csrows(mci); |
641 | } |
642 | |
643 | static void enable_intr(struct edac_priv *priv) |
644 | { |
645 | /* Unlock the PCSR registers */ |
646 | writel(PCSR_UNLOCK_VAL, addr: priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET); |
647 | |
648 | /* Enable UE and CE Interrupts to support the interrupt case */ |
649 | writel(XDDR_IRQ_CE_MASK | XDDR_IRQ_UE_MASK, |
650 | addr: priv->ddrmc_baseaddr + XDDR_IRQ_EN_OFFSET); |
651 | |
652 | writel(XDDR_IRQ_UE_MASK, |
653 | addr: priv->ddrmc_baseaddr + XDDR_IRQ1_EN_OFFSET); |
654 | /* Lock the PCSR registers */ |
655 | writel(PCSR_LOCK_VAL, addr: priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET); |
656 | } |
657 | |
658 | static void disable_intr(struct edac_priv *priv) |
659 | { |
660 | /* Unlock the PCSR registers */ |
661 | writel(PCSR_UNLOCK_VAL, addr: priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET); |
662 | |
663 | /* Disable UE/CE Interrupts */ |
664 | writel(XDDR_IRQ_CE_MASK | XDDR_IRQ_UE_MASK, |
665 | addr: priv->ddrmc_baseaddr + XDDR_IRQ_DIS_OFFSET); |
666 | |
667 | /* Lock the PCSR registers */ |
668 | writel(PCSR_LOCK_VAL, addr: priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET); |
669 | } |
670 | |
671 | #define to_mci(k) container_of(k, struct mem_ctl_info, dev) |
672 | |
673 | #ifdef CONFIG_EDAC_DEBUG |
674 | /** |
675 | * poison_setup - Update poison registers. |
676 | * @priv: DDR memory controller private instance data. |
677 | * |
678 | * Update poison registers as per DDR mapping upon write of the address |
679 | * location the fault is injected. |
680 | * Return: none. |
681 | */ |
682 | static void poison_setup(struct edac_priv *priv) |
683 | { |
684 | u32 col = 0, row = 0, bank = 0, grp = 0, rank = 0, lrank = 0, ch = 0; |
685 | u32 index, regval; |
686 | |
687 | for (index = 0; index < XDDR_MAX_ROW_CNT; index++) { |
688 | row |= (((priv->err_inject_addr >> priv->row_bit[index]) & |
689 | BIT(0)) << index); |
690 | } |
691 | |
692 | for (index = 0; index < XDDR_MAX_COL_CNT; index++) { |
693 | col |= (((priv->err_inject_addr >> priv->col_bit[index]) & |
694 | BIT(0)) << index); |
695 | } |
696 | |
697 | for (index = 0; index < XDDR_MAX_BANK_CNT; index++) { |
698 | bank |= (((priv->err_inject_addr >> priv->bank_bit[index]) & |
699 | BIT(0)) << index); |
700 | } |
701 | |
702 | for (index = 0; index < XDDR_MAX_GRP_CNT; index++) { |
703 | grp |= (((priv->err_inject_addr >> priv->grp_bit[index]) & |
704 | BIT(0)) << index); |
705 | } |
706 | |
707 | for (index = 0; index < XDDR_MAX_RANK_CNT; index++) { |
708 | rank |= (((priv->err_inject_addr >> priv->rank_bit[index]) & |
709 | BIT(0)) << index); |
710 | } |
711 | |
712 | for (index = 0; index < XDDR_MAX_LRANK_CNT; index++) { |
713 | lrank |= (((priv->err_inject_addr >> priv->lrank_bit[index]) & |
714 | BIT(0)) << index); |
715 | } |
716 | |
717 | ch = (priv->err_inject_addr >> priv->ch_bit) & BIT(0); |
718 | if (ch) |
719 | writel(val: 0xFF, addr: priv->ddrmc_baseaddr + ECCW1_FLIP_CTRL); |
720 | else |
721 | writel(val: 0xFF, addr: priv->ddrmc_baseaddr + ECCW0_FLIP_CTRL); |
722 | |
723 | writel(val: 0, addr: priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC12_OFFSET); |
724 | writel(val: 0, addr: priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC13_OFFSET); |
725 | |
726 | regval = row & XDDR_NOC_ROW_MATCH_MASK; |
727 | regval |= FIELD_PREP(XDDR_NOC_COL_MATCH_MASK, col); |
728 | regval |= FIELD_PREP(XDDR_NOC_BANK_MATCH_MASK, bank); |
729 | regval |= FIELD_PREP(XDDR_NOC_GRP_MATCH_MASK, grp); |
730 | writel(val: regval, addr: priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC14_OFFSET); |
731 | |
732 | regval = rank & XDDR_NOC_RANK_MATCH_MASK; |
733 | regval |= FIELD_PREP(XDDR_NOC_LRANK_MATCH_MASK, lrank); |
734 | regval |= FIELD_PREP(XDDR_NOC_CH_MATCH_MASK, ch); |
735 | regval |= (XDDR_NOC_MOD_SEL_MASK | XDDR_NOC_MATCH_EN_MASK); |
736 | writel(val: regval, addr: priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC15_OFFSET); |
737 | } |
738 | |
739 | static void xddr_inject_data_ce_store(struct mem_ctl_info *mci, u8 ce_bitpos) |
740 | { |
741 | u32 ecc0_flip0, ecc1_flip0, ecc0_flip1, ecc1_flip1; |
742 | struct edac_priv *priv = mci->pvt_info; |
743 | |
744 | if (ce_bitpos < ECCW0_FLIP0_BITS) { |
745 | ecc0_flip0 = BIT(ce_bitpos); |
746 | ecc1_flip0 = BIT(ce_bitpos); |
747 | ecc0_flip1 = 0; |
748 | ecc1_flip1 = 0; |
749 | } else { |
750 | ce_bitpos = ce_bitpos - ECCW0_FLIP0_BITS; |
751 | ecc0_flip1 = BIT(ce_bitpos); |
752 | ecc1_flip1 = BIT(ce_bitpos); |
753 | ecc0_flip0 = 0; |
754 | ecc1_flip0 = 0; |
755 | } |
756 | |
757 | writel(val: ecc0_flip0, addr: priv->ddrmc_baseaddr + ECCW0_FLIP0_OFFSET); |
758 | writel(val: ecc1_flip0, addr: priv->ddrmc_baseaddr + ECCW1_FLIP0_OFFSET); |
759 | writel(val: ecc0_flip1, addr: priv->ddrmc_baseaddr + ECCW0_FLIP1_OFFSET); |
760 | writel(val: ecc1_flip1, addr: priv->ddrmc_baseaddr + ECCW1_FLIP1_OFFSET); |
761 | } |
762 | |
763 | /* |
764 | * To inject a correctable error, the following steps are needed: |
765 | * |
766 | * - Write the correctable error bit position value: |
767 | * echo <bit_pos val> > /sys/kernel/debug/edac/<controller instance>/inject_ce |
768 | * |
769 | * poison_setup() derives the row, column, bank, group and rank and |
770 | * writes to the ADEC registers based on the address given by the user. |
771 | * |
772 | * The ADEC12 and ADEC13 are mask registers; write 0 to make sure default |
773 | * configuration is there and no addresses are masked. |
774 | * |
775 | * The row, column, bank, group and rank registers are written to the |
776 | * match ADEC bit to generate errors at the particular address. ADEC14 |
777 | * and ADEC15 have the match bits. |
778 | * |
779 | * xddr_inject_data_ce_store() updates the ECC FLIP registers with the |
780 | * bits to be corrupted based on the bit position given by the user. |
781 | * |
782 | * Upon doing a read to the address the errors are injected. |
783 | */ |
784 | static ssize_t inject_data_ce_store(struct file *file, const char __user *data, |
785 | size_t count, loff_t *ppos) |
786 | { |
787 | struct device *dev = file->private_data; |
788 | struct mem_ctl_info *mci = to_mci(dev); |
789 | struct edac_priv *priv = mci->pvt_info; |
790 | u8 ce_bitpos; |
791 | int ret; |
792 | |
793 | ret = kstrtou8_from_user(s: data, count, base: 0, res: &ce_bitpos); |
794 | if (ret) |
795 | return ret; |
796 | |
797 | /* Unlock the PCSR registers */ |
798 | writel(PCSR_UNLOCK_VAL, addr: priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET); |
799 | writel(PCSR_UNLOCK_VAL, addr: priv->ddrmc_noc_baseaddr + XDDR_PCSR_OFFSET); |
800 | |
801 | poison_setup(priv); |
802 | |
803 | xddr_inject_data_ce_store(mci, ce_bitpos); |
804 | ret = count; |
805 | |
806 | /* Lock the PCSR registers */ |
807 | writel(PCSR_LOCK_VAL, addr: priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET); |
808 | writel(PCSR_LOCK_VAL, addr: priv->ddrmc_noc_baseaddr + XDDR_PCSR_OFFSET); |
809 | |
810 | return ret; |
811 | } |
812 | |
813 | static const struct file_operations xddr_inject_ce_fops = { |
814 | .open = simple_open, |
815 | .write = inject_data_ce_store, |
816 | .llseek = generic_file_llseek, |
817 | }; |
818 | |
819 | static void xddr_inject_data_ue_store(struct mem_ctl_info *mci, u32 val0, u32 val1) |
820 | { |
821 | struct edac_priv *priv = mci->pvt_info; |
822 | |
823 | writel(val: val0, addr: priv->ddrmc_baseaddr + ECCW0_FLIP0_OFFSET); |
824 | writel(val: val0, addr: priv->ddrmc_baseaddr + ECCW0_FLIP1_OFFSET); |
825 | writel(val: val1, addr: priv->ddrmc_baseaddr + ECCW1_FLIP1_OFFSET); |
826 | writel(val: val1, addr: priv->ddrmc_baseaddr + ECCW1_FLIP1_OFFSET); |
827 | } |
828 | |
829 | /* |
830 | * To inject an uncorrectable error, the following steps are needed: |
831 | * echo <bit_pos val> > /sys/kernel/debug/edac/<controller instance>/inject_ue |
832 | * |
833 | * poison_setup() derives the row, column, bank, group and rank and |
834 | * writes to the ADEC registers based on the address given by the user. |
835 | * |
836 | * The ADEC12 and ADEC13 are mask registers; write 0 so that none of the |
837 | * addresses are masked. The row, column, bank, group and rank registers |
838 | * are written to the match ADEC bit to generate errors at the |
839 | * particular address. ADEC14 and ADEC15 have the match bits. |
840 | * |
841 | * xddr_inject_data_ue_store() updates the ECC FLIP registers with the |
842 | * bits to be corrupted based on the bit position given by the user. For |
843 | * uncorrectable errors |
844 | * 2 bit errors are injected. |
845 | * |
846 | * Upon doing a read to the address the errors are injected. |
847 | */ |
848 | static ssize_t inject_data_ue_store(struct file *file, const char __user *data, |
849 | size_t count, loff_t *ppos) |
850 | { |
851 | struct device *dev = file->private_data; |
852 | struct mem_ctl_info *mci = to_mci(dev); |
853 | struct edac_priv *priv = mci->pvt_info; |
854 | char buf[6], *pbuf, *token[2]; |
855 | u32 val0 = 0, val1 = 0; |
856 | u8 len, ue0, ue1; |
857 | int i, ret; |
858 | |
859 | len = min_t(size_t, count, sizeof(buf)); |
860 | if (copy_from_user(to: buf, from: data, n: len)) |
861 | return -EFAULT; |
862 | |
863 | buf[len] = '\0'; |
864 | pbuf = &buf[0]; |
865 | for (i = 0; i < NUM_UE_BITPOS; i++) |
866 | token[i] = strsep(&pbuf, "," ); |
867 | |
868 | ret = kstrtou8(s: token[0], base: 0, res: &ue0); |
869 | if (ret) |
870 | return ret; |
871 | |
872 | ret = kstrtou8(s: token[1], base: 0, res: &ue1); |
873 | if (ret) |
874 | return ret; |
875 | |
876 | if (ue0 < ECCW0_FLIP0_BITS) { |
877 | val0 = BIT(ue0); |
878 | } else { |
879 | ue0 = ue0 - ECCW0_FLIP0_BITS; |
880 | val1 = BIT(ue0); |
881 | } |
882 | |
883 | if (ue1 < ECCW0_FLIP0_BITS) { |
884 | val0 |= BIT(ue1); |
885 | } else { |
886 | ue1 = ue1 - ECCW0_FLIP0_BITS; |
887 | val1 |= BIT(ue1); |
888 | } |
889 | |
890 | /* Unlock the PCSR registers */ |
891 | writel(PCSR_UNLOCK_VAL, addr: priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET); |
892 | writel(PCSR_UNLOCK_VAL, addr: priv->ddrmc_noc_baseaddr + XDDR_PCSR_OFFSET); |
893 | |
894 | poison_setup(priv); |
895 | |
896 | xddr_inject_data_ue_store(mci, val0, val1); |
897 | |
898 | /* Lock the PCSR registers */ |
899 | writel(PCSR_LOCK_VAL, addr: priv->ddrmc_noc_baseaddr + XDDR_PCSR_OFFSET); |
900 | writel(PCSR_LOCK_VAL, addr: priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET); |
901 | return count; |
902 | } |
903 | |
904 | static const struct file_operations xddr_inject_ue_fops = { |
905 | .open = simple_open, |
906 | .write = inject_data_ue_store, |
907 | .llseek = generic_file_llseek, |
908 | }; |
909 | |
910 | static void create_debugfs_attributes(struct mem_ctl_info *mci) |
911 | { |
912 | struct edac_priv *priv = mci->pvt_info; |
913 | |
914 | priv->debugfs = edac_debugfs_create_dir(dirname: mci->dev_name); |
915 | if (!priv->debugfs) |
916 | return; |
917 | |
918 | if (!edac_debugfs_create_file(name: "inject_ce" , mode: 0200, parent: priv->debugfs, |
919 | data: &mci->dev, fops: &xddr_inject_ce_fops)) { |
920 | debugfs_remove_recursive(dentry: priv->debugfs); |
921 | return; |
922 | } |
923 | |
924 | if (!edac_debugfs_create_file(name: "inject_ue" , mode: 0200, parent: priv->debugfs, |
925 | data: &mci->dev, fops: &xddr_inject_ue_fops)) { |
926 | debugfs_remove_recursive(dentry: priv->debugfs); |
927 | return; |
928 | } |
929 | debugfs_create_x64(name: "address" , mode: 0600, parent: priv->debugfs, |
930 | value: &priv->err_inject_addr); |
931 | mci->debugfs = priv->debugfs; |
932 | } |
933 | |
934 | static inline void process_bit(struct edac_priv *priv, unsigned int start, u32 regval) |
935 | { |
936 | union edac_info rows; |
937 | |
938 | rows.i = regval; |
939 | priv->row_bit[start] = rows.row0; |
940 | priv->row_bit[start + 1] = rows.row1; |
941 | priv->row_bit[start + 2] = rows.row2; |
942 | priv->row_bit[start + 3] = rows.row3; |
943 | priv->row_bit[start + 4] = rows.row4; |
944 | } |
945 | |
946 | static void setup_row_address_map(struct edac_priv *priv) |
947 | { |
948 | u32 regval; |
949 | union edac_info rows; |
950 | |
951 | regval = readl(addr: priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC5_OFFSET); |
952 | process_bit(priv, start: 0, regval); |
953 | |
954 | regval = readl(addr: priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC6_OFFSET); |
955 | process_bit(priv, start: 5, regval); |
956 | |
957 | regval = readl(addr: priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC7_OFFSET); |
958 | process_bit(priv, start: 10, regval); |
959 | |
960 | regval = readl(addr: priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC8_OFFSET); |
961 | rows.i = regval; |
962 | |
963 | priv->row_bit[15] = rows.row0; |
964 | priv->row_bit[16] = rows.row1; |
965 | priv->row_bit[17] = rows.row2; |
966 | } |
967 | |
968 | static void setup_column_address_map(struct edac_priv *priv) |
969 | { |
970 | u32 regval; |
971 | union edac_info cols; |
972 | |
973 | regval = readl(addr: priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC8_OFFSET); |
974 | priv->col_bit[0] = FIELD_GET(MASK_24, regval); |
975 | |
976 | regval = readl(addr: priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC9_OFFSET); |
977 | cols.i = regval; |
978 | priv->col_bit[1] = cols.col1; |
979 | priv->col_bit[2] = cols.col2; |
980 | priv->col_bit[3] = cols.col3; |
981 | priv->col_bit[4] = cols.col4; |
982 | priv->col_bit[5] = cols.col5; |
983 | |
984 | regval = readl(addr: priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC10_OFFSET); |
985 | cols.i = regval; |
986 | priv->col_bit[6] = cols.col1; |
987 | priv->col_bit[7] = cols.col2; |
988 | priv->col_bit[8] = cols.col3; |
989 | priv->col_bit[9] = cols.col4; |
990 | } |
991 | |
992 | static void setup_bank_grp_ch_address_map(struct edac_priv *priv) |
993 | { |
994 | u32 regval; |
995 | |
996 | regval = readl(addr: priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC10_OFFSET); |
997 | priv->bank_bit[0] = FIELD_GET(MASK_24, regval); |
998 | |
999 | regval = readl(addr: priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC11_OFFSET); |
1000 | priv->bank_bit[1] = (regval & MASK_0); |
1001 | priv->grp_bit[0] = FIELD_GET(GRP_0_MASK, regval); |
1002 | priv->grp_bit[1] = FIELD_GET(GRP_1_MASK, regval); |
1003 | priv->ch_bit = FIELD_GET(CH_0_MASK, regval); |
1004 | } |
1005 | |
1006 | static void setup_rank_lrank_address_map(struct edac_priv *priv) |
1007 | { |
1008 | u32 regval; |
1009 | |
1010 | regval = readl(addr: priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC4_OFFSET); |
1011 | priv->rank_bit[0] = (regval & MASK_0); |
1012 | priv->rank_bit[1] = FIELD_GET(RANK_1_MASK, regval); |
1013 | priv->lrank_bit[0] = FIELD_GET(LRANK_0_MASK, regval); |
1014 | priv->lrank_bit[1] = FIELD_GET(LRANK_1_MASK, regval); |
1015 | priv->lrank_bit[2] = FIELD_GET(MASK_24, regval); |
1016 | } |
1017 | |
1018 | /** |
1019 | * setup_address_map - Set Address Map by querying ADDRMAP registers. |
1020 | * @priv: DDR memory controller private instance data. |
1021 | * |
1022 | * Set Address Map by querying ADDRMAP registers. |
1023 | * |
1024 | * Return: none. |
1025 | */ |
1026 | static void setup_address_map(struct edac_priv *priv) |
1027 | { |
1028 | setup_row_address_map(priv); |
1029 | |
1030 | setup_column_address_map(priv); |
1031 | |
1032 | setup_bank_grp_ch_address_map(priv); |
1033 | |
1034 | setup_rank_lrank_address_map(priv); |
1035 | } |
1036 | #endif /* CONFIG_EDAC_DEBUG */ |
1037 | |
1038 | static const struct of_device_id xlnx_edac_match[] = { |
1039 | { .compatible = "xlnx,versal-ddrmc" , }, |
1040 | { |
1041 | /* end of table */ |
1042 | } |
1043 | }; |
1044 | |
1045 | MODULE_DEVICE_TABLE(of, xlnx_edac_match); |
1046 | static u32 emif_get_id(struct device_node *node) |
1047 | { |
1048 | u32 addr, my_addr, my_id = 0; |
1049 | struct device_node *np; |
1050 | const __be32 *addrp; |
1051 | |
1052 | addrp = of_get_address(dev: node, index: 0, NULL, NULL); |
1053 | my_addr = (u32)of_translate_address(np: node, addr: addrp); |
1054 | |
1055 | for_each_matching_node(np, xlnx_edac_match) { |
1056 | if (np == node) |
1057 | continue; |
1058 | |
1059 | addrp = of_get_address(dev: np, index: 0, NULL, NULL); |
1060 | addr = (u32)of_translate_address(np, addr: addrp); |
1061 | |
1062 | edac_printk(KERN_INFO, EDAC_MC, |
1063 | "addr=%x, my_addr=%x\n" , |
1064 | addr, my_addr); |
1065 | |
1066 | if (addr < my_addr) |
1067 | my_id++; |
1068 | } |
1069 | |
1070 | return my_id; |
1071 | } |
1072 | |
1073 | static int mc_probe(struct platform_device *pdev) |
1074 | { |
1075 | void __iomem *ddrmc_baseaddr, *ddrmc_noc_baseaddr; |
1076 | struct edac_mc_layer layers[2]; |
1077 | struct mem_ctl_info *mci; |
1078 | u8 num_chans, num_csrows; |
1079 | struct edac_priv *priv; |
1080 | u32 edac_mc_id, regval; |
1081 | int rc; |
1082 | |
1083 | ddrmc_baseaddr = devm_platform_ioremap_resource_byname(pdev, name: "base" ); |
1084 | if (IS_ERR(ptr: ddrmc_baseaddr)) |
1085 | return PTR_ERR(ptr: ddrmc_baseaddr); |
1086 | |
1087 | ddrmc_noc_baseaddr = devm_platform_ioremap_resource_byname(pdev, name: "noc" ); |
1088 | if (IS_ERR(ptr: ddrmc_noc_baseaddr)) |
1089 | return PTR_ERR(ptr: ddrmc_noc_baseaddr); |
1090 | |
1091 | if (!get_ecc_state(base: ddrmc_baseaddr)) |
1092 | return -ENXIO; |
1093 | |
1094 | /* Allocate ID number for the EMIF controller */ |
1095 | edac_mc_id = emif_get_id(node: pdev->dev.of_node); |
1096 | |
1097 | regval = readl(addr: ddrmc_baseaddr + XDDR_REG_CONFIG0_OFFSET); |
1098 | num_chans = FIELD_GET(XDDR_REG_CONFIG0_NUM_CHANS_MASK, regval); |
1099 | num_chans++; |
1100 | |
1101 | num_csrows = FIELD_GET(XDDR_REG_CONFIG0_NUM_RANKS_MASK, regval); |
1102 | num_csrows *= 2; |
1103 | if (!num_csrows) |
1104 | num_csrows = 1; |
1105 | |
1106 | layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; |
1107 | layers[0].size = num_csrows; |
1108 | layers[0].is_virt_csrow = true; |
1109 | layers[1].type = EDAC_MC_LAYER_CHANNEL; |
1110 | layers[1].size = num_chans; |
1111 | layers[1].is_virt_csrow = false; |
1112 | |
1113 | mci = edac_mc_alloc(mc_num: edac_mc_id, ARRAY_SIZE(layers), layers, |
1114 | sz_pvt: sizeof(struct edac_priv)); |
1115 | if (!mci) { |
1116 | edac_printk(KERN_ERR, EDAC_MC, |
1117 | "Failed memory allocation for mc instance\n" ); |
1118 | return -ENOMEM; |
1119 | } |
1120 | |
1121 | priv = mci->pvt_info; |
1122 | priv->ddrmc_baseaddr = ddrmc_baseaddr; |
1123 | priv->ddrmc_noc_baseaddr = ddrmc_noc_baseaddr; |
1124 | priv->ce_cnt = 0; |
1125 | priv->ue_cnt = 0; |
1126 | priv->mc_id = edac_mc_id; |
1127 | |
1128 | mc_init(mci, pdev); |
1129 | |
1130 | rc = edac_mc_add_mc(mci); |
1131 | if (rc) { |
1132 | edac_printk(KERN_ERR, EDAC_MC, |
1133 | "Failed to register with EDAC core\n" ); |
1134 | goto free_edac_mc; |
1135 | } |
1136 | |
1137 | rc = xlnx_register_event(cb_type: PM_NOTIFY_CB, VERSAL_EVENT_ERROR_PMC_ERR1, |
1138 | XPM_EVENT_ERROR_MASK_DDRMC_CR | XPM_EVENT_ERROR_MASK_DDRMC_NCR | |
1139 | XPM_EVENT_ERROR_MASK_NOC_CR | XPM_EVENT_ERROR_MASK_NOC_NCR, |
1140 | wake: false, cb_fun: err_callback, data: mci); |
1141 | if (rc) { |
1142 | if (rc == -EACCES) |
1143 | rc = -EPROBE_DEFER; |
1144 | |
1145 | goto del_mc; |
1146 | } |
1147 | |
1148 | #ifdef CONFIG_EDAC_DEBUG |
1149 | create_debugfs_attributes(mci); |
1150 | setup_address_map(priv); |
1151 | #endif |
1152 | enable_intr(priv); |
1153 | return rc; |
1154 | |
1155 | del_mc: |
1156 | edac_mc_del_mc(dev: &pdev->dev); |
1157 | free_edac_mc: |
1158 | edac_mc_free(mci); |
1159 | |
1160 | return rc; |
1161 | } |
1162 | |
1163 | static void mc_remove(struct platform_device *pdev) |
1164 | { |
1165 | struct mem_ctl_info *mci = platform_get_drvdata(pdev); |
1166 | struct edac_priv *priv = mci->pvt_info; |
1167 | |
1168 | disable_intr(priv); |
1169 | |
1170 | #ifdef CONFIG_EDAC_DEBUG |
1171 | debugfs_remove_recursive(dentry: priv->debugfs); |
1172 | #endif |
1173 | |
1174 | xlnx_unregister_event(cb_type: PM_NOTIFY_CB, VERSAL_EVENT_ERROR_PMC_ERR1, |
1175 | XPM_EVENT_ERROR_MASK_DDRMC_CR | |
1176 | XPM_EVENT_ERROR_MASK_NOC_CR | |
1177 | XPM_EVENT_ERROR_MASK_NOC_NCR | |
1178 | XPM_EVENT_ERROR_MASK_DDRMC_NCR, cb_fun: err_callback, data: mci); |
1179 | edac_mc_del_mc(dev: &pdev->dev); |
1180 | edac_mc_free(mci); |
1181 | } |
1182 | |
1183 | static struct platform_driver xilinx_ddr_edac_mc_driver = { |
1184 | .driver = { |
1185 | .name = "xilinx-ddrmc-edac" , |
1186 | .of_match_table = xlnx_edac_match, |
1187 | }, |
1188 | .probe = mc_probe, |
1189 | .remove_new = mc_remove, |
1190 | }; |
1191 | |
1192 | module_platform_driver(xilinx_ddr_edac_mc_driver); |
1193 | |
1194 | MODULE_AUTHOR("AMD Inc" ); |
1195 | MODULE_DESCRIPTION("Xilinx DDRMC ECC driver" ); |
1196 | MODULE_LICENSE("GPL" ); |
1197 | |