1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Support for Macronix external hardware ECC engine for NAND devices, also
4 * called DPE for Data Processing Engine.
5 *
6 * Copyright © 2019 Macronix
7 * Author: Miquel Raynal <miquel.raynal@bootlin.com>
8 */
9
10#include <linux/dma-mapping.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/iopoll.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/mtd/mtd.h>
18#include <linux/mtd/nand.h>
19#include <linux/mtd/nand-ecc-mxic.h>
20#include <linux/mutex.h>
21#include <linux/of.h>
22#include <linux/of_platform.h>
23#include <linux/platform_device.h>
24#include <linux/slab.h>
25
26/* DPE Configuration */
27#define DP_CONFIG 0x00
28#define ECC_EN BIT(0)
29#define ECC_TYP(idx) (((idx) << 3) & GENMASK(6, 3))
30/* DPE Interrupt Status */
31#define INTRPT_STS 0x04
32#define TRANS_CMPLT BIT(0)
33#define SDMA_MAIN BIT(1)
34#define SDMA_SPARE BIT(2)
35#define ECC_ERR BIT(3)
36#define TO_SPARE BIT(4)
37#define TO_MAIN BIT(5)
38/* DPE Interrupt Status Enable */
39#define INTRPT_STS_EN 0x08
40/* DPE Interrupt Signal Enable */
41#define INTRPT_SIG_EN 0x0C
42/* Host Controller Configuration */
43#define HC_CONFIG 0x10
44#define DEV2MEM 0 /* TRANS_TYP_DMA in the spec */
45#define MEM2MEM BIT(4) /* TRANS_TYP_IO in the spec */
46#define MAPPING BIT(5) /* TRANS_TYP_MAPPING in the spec */
47#define ECC_PACKED 0 /* LAYOUT_TYP_INTEGRATED in the spec */
48#define ECC_INTERLEAVED BIT(2) /* LAYOUT_TYP_DISTRIBUTED in the spec */
49#define BURST_TYP_FIXED 0
50#define BURST_TYP_INCREASING BIT(0)
51/* Host Controller Slave Address */
52#define HC_SLV_ADDR 0x14
53/* ECC Chunk Size */
54#define CHUNK_SIZE 0x20
55/* Main Data Size */
56#define MAIN_SIZE 0x24
57/* Spare Data Size */
58#define SPARE_SIZE 0x28
59#define META_SZ(reg) ((reg) & GENMASK(7, 0))
60#define PARITY_SZ(reg) (((reg) & GENMASK(15, 8)) >> 8)
61#define RSV_SZ(reg) (((reg) & GENMASK(23, 16)) >> 16)
62#define SPARE_SZ(reg) ((reg) >> 24)
63/* ECC Chunk Count */
64#define CHUNK_CNT 0x30
65/* SDMA Control */
66#define SDMA_CTRL 0x40
67#define WRITE_NAND 0
68#define READ_NAND BIT(1)
69#define CONT_NAND BIT(29)
70#define CONT_SYSM BIT(30) /* Continue System Memory? */
71#define SDMA_STRT BIT(31)
72/* SDMA Address of Main Data */
73#define SDMA_MAIN_ADDR 0x44
74/* SDMA Address of Spare Data */
75#define SDMA_SPARE_ADDR 0x48
76/* DPE Version Number */
77#define DP_VER 0xD0
78#define DP_VER_OFFSET 16
79
80/* Status bytes between each chunk of spare data */
81#define STAT_BYTES 4
82#define NO_ERR 0x00
83#define MAX_CORR_ERR 0x28
84#define UNCORR_ERR 0xFE
85#define ERASED_CHUNK 0xFF
86
87struct mxic_ecc_engine {
88 struct device *dev;
89 void __iomem *regs;
90 int irq;
91 struct completion complete;
92 struct nand_ecc_engine external_engine;
93 struct nand_ecc_engine pipelined_engine;
94 struct mutex lock;
95};
96
97struct mxic_ecc_ctx {
98 /* ECC machinery */
99 unsigned int data_step_sz;
100 unsigned int oob_step_sz;
101 unsigned int parity_sz;
102 unsigned int meta_sz;
103 u8 *status;
104 int steps;
105
106 /* DMA boilerplate */
107 struct nand_ecc_req_tweak_ctx req_ctx;
108 u8 *oobwithstat;
109 struct scatterlist sg[2];
110 struct nand_page_io_req *req;
111 unsigned int pageoffs;
112};
113
114static struct mxic_ecc_engine *ext_ecc_eng_to_mxic(struct nand_ecc_engine *eng)
115{
116 return container_of(eng, struct mxic_ecc_engine, external_engine);
117}
118
119static struct mxic_ecc_engine *pip_ecc_eng_to_mxic(struct nand_ecc_engine *eng)
120{
121 return container_of(eng, struct mxic_ecc_engine, pipelined_engine);
122}
123
124static struct mxic_ecc_engine *nand_to_mxic(struct nand_device *nand)
125{
126 struct nand_ecc_engine *eng = nand->ecc.engine;
127
128 if (eng->integration == NAND_ECC_ENGINE_INTEGRATION_EXTERNAL)
129 return ext_ecc_eng_to_mxic(eng);
130 else
131 return pip_ecc_eng_to_mxic(eng);
132}
133
134static int mxic_ecc_ooblayout_ecc(struct mtd_info *mtd, int section,
135 struct mtd_oob_region *oobregion)
136{
137 struct nand_device *nand = mtd_to_nanddev(mtd);
138 struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
139
140 if (section < 0 || section >= ctx->steps)
141 return -ERANGE;
142
143 oobregion->offset = (section * ctx->oob_step_sz) + ctx->meta_sz;
144 oobregion->length = ctx->parity_sz;
145
146 return 0;
147}
148
149static int mxic_ecc_ooblayout_free(struct mtd_info *mtd, int section,
150 struct mtd_oob_region *oobregion)
151{
152 struct nand_device *nand = mtd_to_nanddev(mtd);
153 struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
154
155 if (section < 0 || section >= ctx->steps)
156 return -ERANGE;
157
158 if (!section) {
159 oobregion->offset = 2;
160 oobregion->length = ctx->meta_sz - 2;
161 } else {
162 oobregion->offset = section * ctx->oob_step_sz;
163 oobregion->length = ctx->meta_sz;
164 }
165
166 return 0;
167}
168
169static const struct mtd_ooblayout_ops mxic_ecc_ooblayout_ops = {
170 .ecc = mxic_ecc_ooblayout_ecc,
171 .free = mxic_ecc_ooblayout_free,
172};
173
174static void mxic_ecc_disable_engine(struct mxic_ecc_engine *mxic)
175{
176 u32 reg;
177
178 reg = readl(addr: mxic->regs + DP_CONFIG);
179 reg &= ~ECC_EN;
180 writel(val: reg, addr: mxic->regs + DP_CONFIG);
181}
182
183static void mxic_ecc_enable_engine(struct mxic_ecc_engine *mxic)
184{
185 u32 reg;
186
187 reg = readl(addr: mxic->regs + DP_CONFIG);
188 reg |= ECC_EN;
189 writel(val: reg, addr: mxic->regs + DP_CONFIG);
190}
191
192static void mxic_ecc_disable_int(struct mxic_ecc_engine *mxic)
193{
194 writel(val: 0, addr: mxic->regs + INTRPT_SIG_EN);
195}
196
197static void mxic_ecc_enable_int(struct mxic_ecc_engine *mxic)
198{
199 writel(TRANS_CMPLT, addr: mxic->regs + INTRPT_SIG_EN);
200}
201
202static irqreturn_t mxic_ecc_isr(int irq, void *dev_id)
203{
204 struct mxic_ecc_engine *mxic = dev_id;
205 u32 sts;
206
207 sts = readl(addr: mxic->regs + INTRPT_STS);
208 if (!sts)
209 return IRQ_NONE;
210
211 if (sts & TRANS_CMPLT)
212 complete(&mxic->complete);
213
214 writel(val: sts, addr: mxic->regs + INTRPT_STS);
215
216 return IRQ_HANDLED;
217}
218
219static int mxic_ecc_init_ctx(struct nand_device *nand, struct device *dev)
220{
221 struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
222 struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
223 struct nand_ecc_props *reqs = &nand->ecc.requirements;
224 struct nand_ecc_props *user = &nand->ecc.user_conf;
225 struct mtd_info *mtd = nanddev_to_mtd(nand);
226 int step_size = 0, strength = 0, desired_correction = 0, steps, idx;
227 static const int possible_strength[] = {4, 8, 40, 48};
228 static const int spare_size[] = {32, 32, 96, 96};
229 struct mxic_ecc_ctx *ctx;
230 u32 spare_reg;
231 int ret;
232
233 ctx = devm_kzalloc(dev, size: sizeof(*ctx), GFP_KERNEL);
234 if (!ctx)
235 return -ENOMEM;
236
237 nand->ecc.ctx.priv = ctx;
238
239 /* Only large page NAND chips may use BCH */
240 if (mtd->oobsize < 64) {
241 pr_err("BCH cannot be used with small page NAND chips\n");
242 return -EINVAL;
243 }
244
245 mtd_set_ooblayout(mtd, ooblayout: &mxic_ecc_ooblayout_ops);
246
247 /* Enable all status bits */
248 writel(TRANS_CMPLT | SDMA_MAIN | SDMA_SPARE | ECC_ERR |
249 TO_SPARE | TO_MAIN, addr: mxic->regs + INTRPT_STS_EN);
250
251 /* Configure the correction depending on the NAND device topology */
252 if (user->step_size && user->strength) {
253 step_size = user->step_size;
254 strength = user->strength;
255 } else if (reqs->step_size && reqs->strength) {
256 step_size = reqs->step_size;
257 strength = reqs->strength;
258 }
259
260 if (step_size && strength) {
261 steps = mtd->writesize / step_size;
262 desired_correction = steps * strength;
263 }
264
265 /* Step size is fixed to 1kiB, strength may vary (4 possible values) */
266 conf->step_size = SZ_1K;
267 steps = mtd->writesize / conf->step_size;
268
269 ctx->status = devm_kzalloc(dev, size: steps * sizeof(u8), GFP_KERNEL);
270 if (!ctx->status)
271 return -ENOMEM;
272
273 if (desired_correction) {
274 strength = desired_correction / steps;
275
276 for (idx = 0; idx < ARRAY_SIZE(possible_strength); idx++)
277 if (possible_strength[idx] >= strength)
278 break;
279
280 idx = min_t(unsigned int, idx,
281 ARRAY_SIZE(possible_strength) - 1);
282 } else {
283 /* Missing data, maximize the correction */
284 idx = ARRAY_SIZE(possible_strength) - 1;
285 }
286
287 /* Tune the selected strength until it fits in the OOB area */
288 for (; idx >= 0; idx--) {
289 if (spare_size[idx] * steps <= mtd->oobsize)
290 break;
291 }
292
293 /* This engine cannot be used with this NAND device */
294 if (idx < 0)
295 return -EINVAL;
296
297 /* Configure the engine for the desired strength */
298 writel(ECC_TYP(idx), addr: mxic->regs + DP_CONFIG);
299 conf->strength = possible_strength[idx];
300 spare_reg = readl(addr: mxic->regs + SPARE_SIZE);
301
302 ctx->steps = steps;
303 ctx->data_step_sz = mtd->writesize / steps;
304 ctx->oob_step_sz = mtd->oobsize / steps;
305 ctx->parity_sz = PARITY_SZ(spare_reg);
306 ctx->meta_sz = META_SZ(spare_reg);
307
308 /* Ensure buffers will contain enough bytes to store the STAT_BYTES */
309 ctx->req_ctx.oob_buffer_size = nanddev_per_page_oobsize(nand) +
310 (ctx->steps * STAT_BYTES);
311 ret = nand_ecc_init_req_tweaking(ctx: &ctx->req_ctx, nand);
312 if (ret)
313 return ret;
314
315 ctx->oobwithstat = kmalloc(size: mtd->oobsize + (ctx->steps * STAT_BYTES),
316 GFP_KERNEL);
317 if (!ctx->oobwithstat) {
318 ret = -ENOMEM;
319 goto cleanup_req_tweak;
320 }
321
322 sg_init_table(ctx->sg, 2);
323
324 /* Configuration dump and sanity checks */
325 dev_err(dev, "DPE version number: %d\n",
326 readl(mxic->regs + DP_VER) >> DP_VER_OFFSET);
327 dev_err(dev, "Chunk size: %d\n", readl(mxic->regs + CHUNK_SIZE));
328 dev_err(dev, "Main size: %d\n", readl(mxic->regs + MAIN_SIZE));
329 dev_err(dev, "Spare size: %d\n", SPARE_SZ(spare_reg));
330 dev_err(dev, "Rsv size: %ld\n", RSV_SZ(spare_reg));
331 dev_err(dev, "Parity size: %d\n", ctx->parity_sz);
332 dev_err(dev, "Meta size: %d\n", ctx->meta_sz);
333
334 if ((ctx->meta_sz + ctx->parity_sz + RSV_SZ(spare_reg)) !=
335 SPARE_SZ(spare_reg)) {
336 dev_err(dev, "Wrong OOB configuration: %d + %d + %ld != %d\n",
337 ctx->meta_sz, ctx->parity_sz, RSV_SZ(spare_reg),
338 SPARE_SZ(spare_reg));
339 ret = -EINVAL;
340 goto free_oobwithstat;
341 }
342
343 if (ctx->oob_step_sz != SPARE_SZ(spare_reg)) {
344 dev_err(dev, "Wrong OOB configuration: %d != %d\n",
345 ctx->oob_step_sz, SPARE_SZ(spare_reg));
346 ret = -EINVAL;
347 goto free_oobwithstat;
348 }
349
350 return 0;
351
352free_oobwithstat:
353 kfree(objp: ctx->oobwithstat);
354cleanup_req_tweak:
355 nand_ecc_cleanup_req_tweaking(ctx: &ctx->req_ctx);
356
357 return ret;
358}
359
360static int mxic_ecc_init_ctx_external(struct nand_device *nand)
361{
362 struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
363 struct device *dev = nand->ecc.engine->dev;
364 int ret;
365
366 dev_info(dev, "Macronix ECC engine in external mode\n");
367
368 ret = mxic_ecc_init_ctx(nand, dev);
369 if (ret)
370 return ret;
371
372 /* Trigger each step manually */
373 writel(val: 1, addr: mxic->regs + CHUNK_CNT);
374 writel(BURST_TYP_INCREASING | ECC_PACKED | MEM2MEM,
375 addr: mxic->regs + HC_CONFIG);
376
377 return 0;
378}
379
380static int mxic_ecc_init_ctx_pipelined(struct nand_device *nand)
381{
382 struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
383 struct mxic_ecc_ctx *ctx;
384 struct device *dev;
385 int ret;
386
387 dev = nand_ecc_get_engine_dev(host: nand->ecc.engine->dev);
388 if (!dev)
389 return -EINVAL;
390
391 dev_info(dev, "Macronix ECC engine in pipelined/mapping mode\n");
392
393 ret = mxic_ecc_init_ctx(nand, dev);
394 if (ret)
395 return ret;
396
397 ctx = nand_to_ecc_ctx(nand);
398
399 /* All steps should be handled in one go directly by the internal DMA */
400 writel(val: ctx->steps, addr: mxic->regs + CHUNK_CNT);
401
402 /*
403 * Interleaved ECC scheme cannot be used otherwise factory bad block
404 * markers would be lost. A packed layout is mandatory.
405 */
406 writel(BURST_TYP_INCREASING | ECC_PACKED | MAPPING,
407 addr: mxic->regs + HC_CONFIG);
408
409 return 0;
410}
411
412static void mxic_ecc_cleanup_ctx(struct nand_device *nand)
413{
414 struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
415
416 if (ctx) {
417 nand_ecc_cleanup_req_tweaking(ctx: &ctx->req_ctx);
418 kfree(objp: ctx->oobwithstat);
419 }
420}
421
422static int mxic_ecc_data_xfer_wait_for_completion(struct mxic_ecc_engine *mxic)
423{
424 u32 val;
425 int ret;
426
427 if (mxic->irq) {
428 reinit_completion(x: &mxic->complete);
429 mxic_ecc_enable_int(mxic);
430 ret = wait_for_completion_timeout(x: &mxic->complete,
431 timeout: msecs_to_jiffies(m: 1000));
432 ret = ret ? 0 : -ETIMEDOUT;
433 mxic_ecc_disable_int(mxic);
434 } else {
435 ret = readl_poll_timeout(mxic->regs + INTRPT_STS, val,
436 val & TRANS_CMPLT, 10, USEC_PER_SEC);
437 writel(val, addr: mxic->regs + INTRPT_STS);
438 }
439
440 if (ret) {
441 dev_err(mxic->dev, "Timeout on data xfer completion\n");
442 return -ETIMEDOUT;
443 }
444
445 return 0;
446}
447
448static int mxic_ecc_process_data(struct mxic_ecc_engine *mxic,
449 unsigned int direction)
450{
451 unsigned int dir = (direction == NAND_PAGE_READ) ?
452 READ_NAND : WRITE_NAND;
453 int ret;
454
455 mxic_ecc_enable_engine(mxic);
456
457 /* Trigger processing */
458 writel(SDMA_STRT | dir, addr: mxic->regs + SDMA_CTRL);
459
460 /* Wait for completion */
461 ret = mxic_ecc_data_xfer_wait_for_completion(mxic);
462
463 mxic_ecc_disable_engine(mxic);
464
465 return ret;
466}
467
468int mxic_ecc_process_data_pipelined(struct nand_ecc_engine *eng,
469 unsigned int direction, dma_addr_t dirmap)
470{
471 struct mxic_ecc_engine *mxic = pip_ecc_eng_to_mxic(eng);
472
473 if (dirmap)
474 writel(val: dirmap, addr: mxic->regs + HC_SLV_ADDR);
475
476 return mxic_ecc_process_data(mxic, direction);
477}
478EXPORT_SYMBOL_GPL(mxic_ecc_process_data_pipelined);
479
480static void mxic_ecc_extract_status_bytes(struct mxic_ecc_ctx *ctx)
481{
482 u8 *buf = ctx->oobwithstat;
483 int next_stat_pos;
484 int step;
485
486 /* Extract the ECC status */
487 for (step = 0; step < ctx->steps; step++) {
488 next_stat_pos = ctx->oob_step_sz +
489 ((STAT_BYTES + ctx->oob_step_sz) * step);
490
491 ctx->status[step] = buf[next_stat_pos];
492 }
493}
494
495static void mxic_ecc_reconstruct_oobbuf(struct mxic_ecc_ctx *ctx,
496 u8 *dst, const u8 *src)
497{
498 int step;
499
500 /* Reconstruct the OOB buffer linearly (without the ECC status bytes) */
501 for (step = 0; step < ctx->steps; step++)
502 memcpy(dst + (step * ctx->oob_step_sz),
503 src + (step * (ctx->oob_step_sz + STAT_BYTES)),
504 ctx->oob_step_sz);
505}
506
507static void mxic_ecc_add_room_in_oobbuf(struct mxic_ecc_ctx *ctx,
508 u8 *dst, const u8 *src)
509{
510 int step;
511
512 /* Add some space in the OOB buffer for the status bytes */
513 for (step = 0; step < ctx->steps; step++)
514 memcpy(dst + (step * (ctx->oob_step_sz + STAT_BYTES)),
515 src + (step * ctx->oob_step_sz),
516 ctx->oob_step_sz);
517}
518
519static int mxic_ecc_count_biterrs(struct mxic_ecc_engine *mxic,
520 struct nand_device *nand)
521{
522 struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
523 struct mtd_info *mtd = nanddev_to_mtd(nand);
524 struct device *dev = mxic->dev;
525 unsigned int max_bf = 0;
526 bool failure = false;
527 int step;
528
529 for (step = 0; step < ctx->steps; step++) {
530 u8 stat = ctx->status[step];
531
532 if (stat == NO_ERR) {
533 dev_dbg(dev, "ECC step %d: no error\n", step);
534 } else if (stat == ERASED_CHUNK) {
535 dev_dbg(dev, "ECC step %d: erased\n", step);
536 } else if (stat == UNCORR_ERR || stat > MAX_CORR_ERR) {
537 dev_dbg(dev, "ECC step %d: uncorrectable\n", step);
538 mtd->ecc_stats.failed++;
539 failure = true;
540 } else {
541 dev_dbg(dev, "ECC step %d: %d bits corrected\n",
542 step, stat);
543 max_bf = max_t(unsigned int, max_bf, stat);
544 mtd->ecc_stats.corrected += stat;
545 }
546 }
547
548 return failure ? -EBADMSG : max_bf;
549}
550
551/* External ECC engine helpers */
552static int mxic_ecc_prepare_io_req_external(struct nand_device *nand,
553 struct nand_page_io_req *req)
554{
555 struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
556 struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
557 struct mtd_info *mtd = nanddev_to_mtd(nand);
558 int offset, nents, step, ret;
559
560 if (req->mode == MTD_OPS_RAW)
561 return 0;
562
563 nand_ecc_tweak_req(ctx: &ctx->req_ctx, req);
564 ctx->req = req;
565
566 if (req->type == NAND_PAGE_READ)
567 return 0;
568
569 mxic_ecc_add_room_in_oobbuf(ctx, dst: ctx->oobwithstat,
570 src: ctx->req->oobbuf.out);
571
572 sg_set_buf(sg: &ctx->sg[0], buf: req->databuf.out, buflen: req->datalen);
573 sg_set_buf(sg: &ctx->sg[1], buf: ctx->oobwithstat,
574 buflen: req->ooblen + (ctx->steps * STAT_BYTES));
575
576 nents = dma_map_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
577 if (!nents)
578 return -EINVAL;
579
580 mutex_lock(&mxic->lock);
581
582 for (step = 0; step < ctx->steps; step++) {
583 writel(sg_dma_address(&ctx->sg[0]) + (step * ctx->data_step_sz),
584 addr: mxic->regs + SDMA_MAIN_ADDR);
585 writel(sg_dma_address(&ctx->sg[1]) + (step * (ctx->oob_step_sz + STAT_BYTES)),
586 addr: mxic->regs + SDMA_SPARE_ADDR);
587 ret = mxic_ecc_process_data(mxic, direction: ctx->req->type);
588 if (ret)
589 break;
590 }
591
592 mutex_unlock(lock: &mxic->lock);
593
594 dma_unmap_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
595
596 if (ret)
597 return ret;
598
599 /* Retrieve the calculated ECC bytes */
600 for (step = 0; step < ctx->steps; step++) {
601 offset = ctx->meta_sz + (step * ctx->oob_step_sz);
602 mtd_ooblayout_get_eccbytes(mtd,
603 eccbuf: (u8 *)ctx->req->oobbuf.out + offset,
604 oobbuf: ctx->oobwithstat + (step * STAT_BYTES),
605 start: step * ctx->parity_sz,
606 nbytes: ctx->parity_sz);
607 }
608
609 return 0;
610}
611
612static int mxic_ecc_finish_io_req_external(struct nand_device *nand,
613 struct nand_page_io_req *req)
614{
615 struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
616 struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
617 int nents, step, ret;
618
619 if (req->mode == MTD_OPS_RAW)
620 return 0;
621
622 if (req->type == NAND_PAGE_WRITE) {
623 nand_ecc_restore_req(ctx: &ctx->req_ctx, req);
624 return 0;
625 }
626
627 /* Copy the OOB buffer and add room for the ECC engine status bytes */
628 mxic_ecc_add_room_in_oobbuf(ctx, dst: ctx->oobwithstat, src: ctx->req->oobbuf.in);
629
630 sg_set_buf(sg: &ctx->sg[0], buf: req->databuf.in, buflen: req->datalen);
631 sg_set_buf(sg: &ctx->sg[1], buf: ctx->oobwithstat,
632 buflen: req->ooblen + (ctx->steps * STAT_BYTES));
633 nents = dma_map_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
634 if (!nents)
635 return -EINVAL;
636
637 mutex_lock(&mxic->lock);
638
639 for (step = 0; step < ctx->steps; step++) {
640 writel(sg_dma_address(&ctx->sg[0]) + (step * ctx->data_step_sz),
641 addr: mxic->regs + SDMA_MAIN_ADDR);
642 writel(sg_dma_address(&ctx->sg[1]) + (step * (ctx->oob_step_sz + STAT_BYTES)),
643 addr: mxic->regs + SDMA_SPARE_ADDR);
644 ret = mxic_ecc_process_data(mxic, direction: ctx->req->type);
645 if (ret)
646 break;
647 }
648
649 mutex_unlock(lock: &mxic->lock);
650
651 dma_unmap_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
652
653 if (ret) {
654 nand_ecc_restore_req(ctx: &ctx->req_ctx, req);
655 return ret;
656 }
657
658 /* Extract the status bytes and reconstruct the buffer */
659 mxic_ecc_extract_status_bytes(ctx);
660 mxic_ecc_reconstruct_oobbuf(ctx, dst: ctx->req->oobbuf.in, src: ctx->oobwithstat);
661
662 nand_ecc_restore_req(ctx: &ctx->req_ctx, req);
663
664 return mxic_ecc_count_biterrs(mxic, nand);
665}
666
667/* Pipelined ECC engine helpers */
668static int mxic_ecc_prepare_io_req_pipelined(struct nand_device *nand,
669 struct nand_page_io_req *req)
670{
671 struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
672 struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
673 int nents;
674
675 if (req->mode == MTD_OPS_RAW)
676 return 0;
677
678 nand_ecc_tweak_req(ctx: &ctx->req_ctx, req);
679 ctx->req = req;
680
681 /* Copy the OOB buffer and add room for the ECC engine status bytes */
682 mxic_ecc_add_room_in_oobbuf(ctx, dst: ctx->oobwithstat, src: ctx->req->oobbuf.in);
683
684 sg_set_buf(sg: &ctx->sg[0], buf: req->databuf.in, buflen: req->datalen);
685 sg_set_buf(sg: &ctx->sg[1], buf: ctx->oobwithstat,
686 buflen: req->ooblen + (ctx->steps * STAT_BYTES));
687
688 nents = dma_map_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
689 if (!nents)
690 return -EINVAL;
691
692 mutex_lock(&mxic->lock);
693
694 writel(sg_dma_address(&ctx->sg[0]), addr: mxic->regs + SDMA_MAIN_ADDR);
695 writel(sg_dma_address(&ctx->sg[1]), addr: mxic->regs + SDMA_SPARE_ADDR);
696
697 return 0;
698}
699
700static int mxic_ecc_finish_io_req_pipelined(struct nand_device *nand,
701 struct nand_page_io_req *req)
702{
703 struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
704 struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
705 int ret = 0;
706
707 if (req->mode == MTD_OPS_RAW)
708 return 0;
709
710 mutex_unlock(lock: &mxic->lock);
711
712 dma_unmap_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
713
714 if (req->type == NAND_PAGE_READ) {
715 mxic_ecc_extract_status_bytes(ctx);
716 mxic_ecc_reconstruct_oobbuf(ctx, dst: ctx->req->oobbuf.in,
717 src: ctx->oobwithstat);
718 ret = mxic_ecc_count_biterrs(mxic, nand);
719 }
720
721 nand_ecc_restore_req(ctx: &ctx->req_ctx, req);
722
723 return ret;
724}
725
726static struct nand_ecc_engine_ops mxic_ecc_engine_external_ops = {
727 .init_ctx = mxic_ecc_init_ctx_external,
728 .cleanup_ctx = mxic_ecc_cleanup_ctx,
729 .prepare_io_req = mxic_ecc_prepare_io_req_external,
730 .finish_io_req = mxic_ecc_finish_io_req_external,
731};
732
733static struct nand_ecc_engine_ops mxic_ecc_engine_pipelined_ops = {
734 .init_ctx = mxic_ecc_init_ctx_pipelined,
735 .cleanup_ctx = mxic_ecc_cleanup_ctx,
736 .prepare_io_req = mxic_ecc_prepare_io_req_pipelined,
737 .finish_io_req = mxic_ecc_finish_io_req_pipelined,
738};
739
740struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void)
741{
742 return &mxic_ecc_engine_pipelined_ops;
743}
744EXPORT_SYMBOL_GPL(mxic_ecc_get_pipelined_ops);
745
746static struct platform_device *
747mxic_ecc_get_pdev(struct platform_device *spi_pdev)
748{
749 struct platform_device *eng_pdev;
750 struct device_node *np;
751
752 /* Retrieve the nand-ecc-engine phandle */
753 np = of_parse_phandle(np: spi_pdev->dev.of_node, phandle_name: "nand-ecc-engine", index: 0);
754 if (!np)
755 return NULL;
756
757 /* Jump to the engine's device node */
758 eng_pdev = of_find_device_by_node(np);
759 of_node_put(node: np);
760
761 return eng_pdev;
762}
763
764void mxic_ecc_put_pipelined_engine(struct nand_ecc_engine *eng)
765{
766 struct mxic_ecc_engine *mxic = pip_ecc_eng_to_mxic(eng);
767
768 platform_device_put(to_platform_device(mxic->dev));
769}
770EXPORT_SYMBOL_GPL(mxic_ecc_put_pipelined_engine);
771
772struct nand_ecc_engine *
773mxic_ecc_get_pipelined_engine(struct platform_device *spi_pdev)
774{
775 struct platform_device *eng_pdev;
776 struct mxic_ecc_engine *mxic;
777
778 eng_pdev = mxic_ecc_get_pdev(spi_pdev);
779 if (!eng_pdev)
780 return ERR_PTR(error: -ENODEV);
781
782 mxic = platform_get_drvdata(pdev: eng_pdev);
783 if (!mxic) {
784 platform_device_put(pdev: eng_pdev);
785 return ERR_PTR(error: -EPROBE_DEFER);
786 }
787
788 return &mxic->pipelined_engine;
789}
790EXPORT_SYMBOL_GPL(mxic_ecc_get_pipelined_engine);
791
792/*
793 * Only the external ECC engine is exported as the pipelined is SoC specific, so
794 * it is registered directly by the drivers that wrap it.
795 */
796static int mxic_ecc_probe(struct platform_device *pdev)
797{
798 struct device *dev = &pdev->dev;
799 struct mxic_ecc_engine *mxic;
800 int ret;
801
802 mxic = devm_kzalloc(dev: &pdev->dev, size: sizeof(*mxic), GFP_KERNEL);
803 if (!mxic)
804 return -ENOMEM;
805
806 mxic->dev = &pdev->dev;
807
808 /*
809 * Both memory regions for the ECC engine itself and the AXI slave
810 * address are mandatory.
811 */
812 mxic->regs = devm_platform_ioremap_resource(pdev, index: 0);
813 if (IS_ERR(ptr: mxic->regs)) {
814 dev_err(&pdev->dev, "Missing memory region\n");
815 return PTR_ERR(ptr: mxic->regs);
816 }
817
818 mxic_ecc_disable_engine(mxic);
819 mxic_ecc_disable_int(mxic);
820
821 /* IRQ is optional yet much more efficient */
822 mxic->irq = platform_get_irq_byname_optional(dev: pdev, name: "ecc-engine");
823 if (mxic->irq > 0) {
824 ret = devm_request_irq(dev: &pdev->dev, irq: mxic->irq, handler: mxic_ecc_isr, irqflags: 0,
825 devname: "mxic-ecc", dev_id: mxic);
826 if (ret)
827 return ret;
828 } else {
829 dev_info(dev, "Invalid or missing IRQ, fallback to polling\n");
830 mxic->irq = 0;
831 }
832
833 mutex_init(&mxic->lock);
834
835 /*
836 * In external mode, the device is the ECC engine. In pipelined mode,
837 * the device is the host controller. The device is used to match the
838 * right ECC engine based on the DT properties.
839 */
840 mxic->external_engine.dev = &pdev->dev;
841 mxic->external_engine.integration = NAND_ECC_ENGINE_INTEGRATION_EXTERNAL;
842 mxic->external_engine.ops = &mxic_ecc_engine_external_ops;
843
844 nand_ecc_register_on_host_hw_engine(engine: &mxic->external_engine);
845
846 platform_set_drvdata(pdev, data: mxic);
847
848 return 0;
849}
850
851static void mxic_ecc_remove(struct platform_device *pdev)
852{
853 struct mxic_ecc_engine *mxic = platform_get_drvdata(pdev);
854
855 nand_ecc_unregister_on_host_hw_engine(engine: &mxic->external_engine);
856}
857
858static const struct of_device_id mxic_ecc_of_ids[] = {
859 {
860 .compatible = "mxicy,nand-ecc-engine-rev3",
861 },
862 { /* sentinel */ },
863};
864MODULE_DEVICE_TABLE(of, mxic_ecc_of_ids);
865
866static struct platform_driver mxic_ecc_driver = {
867 .driver = {
868 .name = "mxic-nand-ecc-engine",
869 .of_match_table = mxic_ecc_of_ids,
870 },
871 .probe = mxic_ecc_probe,
872 .remove_new = mxic_ecc_remove,
873};
874module_platform_driver(mxic_ecc_driver);
875
876MODULE_LICENSE("GPL");
877MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
878MODULE_DESCRIPTION("Macronix NAND hardware ECC controller");
879

source code of linux/drivers/mtd/nand/ecc-mxic.c