1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 2016-2017 Micron Technology, Inc. |
4 | * |
5 | * Authors: |
6 | * Peter Pan <peterpandong@micron.com> |
7 | * Boris Brezillon <boris.brezillon@bootlin.com> |
8 | */ |
9 | |
10 | #define pr_fmt(fmt) "spi-nand: " fmt |
11 | |
12 | #include <linux/device.h> |
13 | #include <linux/jiffies.h> |
14 | #include <linux/kernel.h> |
15 | #include <linux/module.h> |
16 | #include <linux/mtd/spinand.h> |
17 | #include <linux/of.h> |
18 | #include <linux/slab.h> |
19 | #include <linux/string.h> |
20 | #include <linux/spi/spi.h> |
21 | #include <linux/spi/spi-mem.h> |
22 | |
23 | static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val) |
24 | { |
25 | struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg, |
26 | spinand->scratchbuf); |
27 | int ret; |
28 | |
29 | ret = spi_mem_exec_op(mem: spinand->spimem, op: &op); |
30 | if (ret) |
31 | return ret; |
32 | |
33 | *val = *spinand->scratchbuf; |
34 | return 0; |
35 | } |
36 | |
37 | static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val) |
38 | { |
39 | struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg, |
40 | spinand->scratchbuf); |
41 | |
42 | *spinand->scratchbuf = val; |
43 | return spi_mem_exec_op(mem: spinand->spimem, op: &op); |
44 | } |
45 | |
46 | static int spinand_read_status(struct spinand_device *spinand, u8 *status) |
47 | { |
48 | return spinand_read_reg_op(spinand, REG_STATUS, val: status); |
49 | } |
50 | |
51 | static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg) |
52 | { |
53 | struct nand_device *nand = spinand_to_nand(spinand); |
54 | |
55 | if (WARN_ON(spinand->cur_target < 0 || |
56 | spinand->cur_target >= nand->memorg.ntargets)) |
57 | return -EINVAL; |
58 | |
59 | *cfg = spinand->cfg_cache[spinand->cur_target]; |
60 | return 0; |
61 | } |
62 | |
63 | static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg) |
64 | { |
65 | struct nand_device *nand = spinand_to_nand(spinand); |
66 | int ret; |
67 | |
68 | if (WARN_ON(spinand->cur_target < 0 || |
69 | spinand->cur_target >= nand->memorg.ntargets)) |
70 | return -EINVAL; |
71 | |
72 | if (spinand->cfg_cache[spinand->cur_target] == cfg) |
73 | return 0; |
74 | |
75 | ret = spinand_write_reg_op(spinand, REG_CFG, val: cfg); |
76 | if (ret) |
77 | return ret; |
78 | |
79 | spinand->cfg_cache[spinand->cur_target] = cfg; |
80 | return 0; |
81 | } |
82 | |
83 | /** |
84 | * spinand_upd_cfg() - Update the configuration register |
85 | * @spinand: the spinand device |
86 | * @mask: the mask encoding the bits to update in the config reg |
87 | * @val: the new value to apply |
88 | * |
89 | * Update the configuration register. |
90 | * |
91 | * Return: 0 on success, a negative error code otherwise. |
92 | */ |
93 | int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val) |
94 | { |
95 | int ret; |
96 | u8 cfg; |
97 | |
98 | ret = spinand_get_cfg(spinand, cfg: &cfg); |
99 | if (ret) |
100 | return ret; |
101 | |
102 | cfg &= ~mask; |
103 | cfg |= val; |
104 | |
105 | return spinand_set_cfg(spinand, cfg); |
106 | } |
107 | |
108 | /** |
109 | * spinand_select_target() - Select a specific NAND target/die |
110 | * @spinand: the spinand device |
111 | * @target: the target/die to select |
112 | * |
113 | * Select a new target/die. If chip only has one die, this function is a NOOP. |
114 | * |
115 | * Return: 0 on success, a negative error code otherwise. |
116 | */ |
117 | int spinand_select_target(struct spinand_device *spinand, unsigned int target) |
118 | { |
119 | struct nand_device *nand = spinand_to_nand(spinand); |
120 | int ret; |
121 | |
122 | if (WARN_ON(target >= nand->memorg.ntargets)) |
123 | return -EINVAL; |
124 | |
125 | if (spinand->cur_target == target) |
126 | return 0; |
127 | |
128 | if (nand->memorg.ntargets == 1) { |
129 | spinand->cur_target = target; |
130 | return 0; |
131 | } |
132 | |
133 | ret = spinand->select_target(spinand, target); |
134 | if (ret) |
135 | return ret; |
136 | |
137 | spinand->cur_target = target; |
138 | return 0; |
139 | } |
140 | |
141 | static int spinand_read_cfg(struct spinand_device *spinand) |
142 | { |
143 | struct nand_device *nand = spinand_to_nand(spinand); |
144 | unsigned int target; |
145 | int ret; |
146 | |
147 | for (target = 0; target < nand->memorg.ntargets; target++) { |
148 | ret = spinand_select_target(spinand, target); |
149 | if (ret) |
150 | return ret; |
151 | |
152 | /* |
153 | * We use spinand_read_reg_op() instead of spinand_get_cfg() |
154 | * here to bypass the config cache. |
155 | */ |
156 | ret = spinand_read_reg_op(spinand, REG_CFG, |
157 | val: &spinand->cfg_cache[target]); |
158 | if (ret) |
159 | return ret; |
160 | } |
161 | |
162 | return 0; |
163 | } |
164 | |
165 | static int spinand_init_cfg_cache(struct spinand_device *spinand) |
166 | { |
167 | struct nand_device *nand = spinand_to_nand(spinand); |
168 | struct device *dev = &spinand->spimem->spi->dev; |
169 | |
170 | spinand->cfg_cache = devm_kcalloc(dev, |
171 | n: nand->memorg.ntargets, |
172 | size: sizeof(*spinand->cfg_cache), |
173 | GFP_KERNEL); |
174 | if (!spinand->cfg_cache) |
175 | return -ENOMEM; |
176 | |
177 | return 0; |
178 | } |
179 | |
180 | static int spinand_init_quad_enable(struct spinand_device *spinand) |
181 | { |
182 | bool enable = false; |
183 | |
184 | if (!(spinand->flags & SPINAND_HAS_QE_BIT)) |
185 | return 0; |
186 | |
187 | if (spinand->op_templates.read_cache->data.buswidth == 4 || |
188 | spinand->op_templates.write_cache->data.buswidth == 4 || |
189 | spinand->op_templates.update_cache->data.buswidth == 4) |
190 | enable = true; |
191 | |
192 | return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE, |
193 | val: enable ? CFG_QUAD_ENABLE : 0); |
194 | } |
195 | |
196 | static int spinand_ecc_enable(struct spinand_device *spinand, |
197 | bool enable) |
198 | { |
199 | return spinand_upd_cfg(spinand, CFG_ECC_ENABLE, |
200 | val: enable ? CFG_ECC_ENABLE : 0); |
201 | } |
202 | |
203 | static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status) |
204 | { |
205 | struct nand_device *nand = spinand_to_nand(spinand); |
206 | |
207 | if (spinand->eccinfo.get_status) |
208 | return spinand->eccinfo.get_status(spinand, status); |
209 | |
210 | switch (status & STATUS_ECC_MASK) { |
211 | case STATUS_ECC_NO_BITFLIPS: |
212 | return 0; |
213 | |
214 | case STATUS_ECC_HAS_BITFLIPS: |
215 | /* |
216 | * We have no way to know exactly how many bitflips have been |
217 | * fixed, so let's return the maximum possible value so that |
218 | * wear-leveling layers move the data immediately. |
219 | */ |
220 | return nanddev_get_ecc_conf(nand)->strength; |
221 | |
222 | case STATUS_ECC_UNCOR_ERROR: |
223 | return -EBADMSG; |
224 | |
225 | default: |
226 | break; |
227 | } |
228 | |
229 | return -EINVAL; |
230 | } |
231 | |
232 | static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section, |
233 | struct mtd_oob_region *region) |
234 | { |
235 | return -ERANGE; |
236 | } |
237 | |
238 | static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section, |
239 | struct mtd_oob_region *region) |
240 | { |
241 | if (section) |
242 | return -ERANGE; |
243 | |
244 | /* Reserve 2 bytes for the BBM. */ |
245 | region->offset = 2; |
246 | region->length = 62; |
247 | |
248 | return 0; |
249 | } |
250 | |
251 | static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = { |
252 | .ecc = spinand_noecc_ooblayout_ecc, |
253 | .free = spinand_noecc_ooblayout_free, |
254 | }; |
255 | |
256 | static int spinand_ondie_ecc_init_ctx(struct nand_device *nand) |
257 | { |
258 | struct spinand_device *spinand = nand_to_spinand(nand); |
259 | struct mtd_info *mtd = nanddev_to_mtd(nand); |
260 | struct spinand_ondie_ecc_conf *engine_conf; |
261 | |
262 | nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE; |
263 | nand->ecc.ctx.conf.step_size = nand->ecc.requirements.step_size; |
264 | nand->ecc.ctx.conf.strength = nand->ecc.requirements.strength; |
265 | |
266 | engine_conf = kzalloc(size: sizeof(*engine_conf), GFP_KERNEL); |
267 | if (!engine_conf) |
268 | return -ENOMEM; |
269 | |
270 | nand->ecc.ctx.priv = engine_conf; |
271 | |
272 | if (spinand->eccinfo.ooblayout) |
273 | mtd_set_ooblayout(mtd, ooblayout: spinand->eccinfo.ooblayout); |
274 | else |
275 | mtd_set_ooblayout(mtd, ooblayout: &spinand_noecc_ooblayout); |
276 | |
277 | return 0; |
278 | } |
279 | |
280 | static void spinand_ondie_ecc_cleanup_ctx(struct nand_device *nand) |
281 | { |
282 | kfree(objp: nand->ecc.ctx.priv); |
283 | } |
284 | |
285 | static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand, |
286 | struct nand_page_io_req *req) |
287 | { |
288 | struct spinand_device *spinand = nand_to_spinand(nand); |
289 | bool enable = (req->mode != MTD_OPS_RAW); |
290 | |
291 | memset(spinand->oobbuf, 0xff, nanddev_per_page_oobsize(nand)); |
292 | |
293 | /* Only enable or disable the engine */ |
294 | return spinand_ecc_enable(spinand, enable); |
295 | } |
296 | |
297 | static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand, |
298 | struct nand_page_io_req *req) |
299 | { |
300 | struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv; |
301 | struct spinand_device *spinand = nand_to_spinand(nand); |
302 | struct mtd_info *mtd = spinand_to_mtd(spinand); |
303 | int ret; |
304 | |
305 | if (req->mode == MTD_OPS_RAW) |
306 | return 0; |
307 | |
308 | /* Nothing to do when finishing a page write */ |
309 | if (req->type == NAND_PAGE_WRITE) |
310 | return 0; |
311 | |
312 | /* Finish a page read: check the status, report errors/bitflips */ |
313 | ret = spinand_check_ecc_status(spinand, status: engine_conf->status); |
314 | if (ret == -EBADMSG) |
315 | mtd->ecc_stats.failed++; |
316 | else if (ret > 0) |
317 | mtd->ecc_stats.corrected += ret; |
318 | |
319 | return ret; |
320 | } |
321 | |
322 | static struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = { |
323 | .init_ctx = spinand_ondie_ecc_init_ctx, |
324 | .cleanup_ctx = spinand_ondie_ecc_cleanup_ctx, |
325 | .prepare_io_req = spinand_ondie_ecc_prepare_io_req, |
326 | .finish_io_req = spinand_ondie_ecc_finish_io_req, |
327 | }; |
328 | |
329 | static struct nand_ecc_engine spinand_ondie_ecc_engine = { |
330 | .ops = &spinand_ondie_ecc_engine_ops, |
331 | }; |
332 | |
333 | static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status) |
334 | { |
335 | struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv; |
336 | |
337 | if (nand->ecc.ctx.conf.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE && |
338 | engine_conf) |
339 | engine_conf->status = status; |
340 | } |
341 | |
342 | static int spinand_write_enable_op(struct spinand_device *spinand) |
343 | { |
344 | struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true); |
345 | |
346 | return spi_mem_exec_op(mem: spinand->spimem, op: &op); |
347 | } |
348 | |
349 | static int spinand_load_page_op(struct spinand_device *spinand, |
350 | const struct nand_page_io_req *req) |
351 | { |
352 | struct nand_device *nand = spinand_to_nand(spinand); |
353 | unsigned int row = nanddev_pos_to_row(nand, pos: &req->pos); |
354 | struct spi_mem_op op = SPINAND_PAGE_READ_OP(row); |
355 | |
356 | return spi_mem_exec_op(mem: spinand->spimem, op: &op); |
357 | } |
358 | |
359 | static int spinand_read_from_cache_op(struct spinand_device *spinand, |
360 | const struct nand_page_io_req *req) |
361 | { |
362 | struct nand_device *nand = spinand_to_nand(spinand); |
363 | struct mtd_info *mtd = spinand_to_mtd(spinand); |
364 | struct spi_mem_dirmap_desc *rdesc; |
365 | unsigned int nbytes = 0; |
366 | void *buf = NULL; |
367 | u16 column = 0; |
368 | ssize_t ret; |
369 | |
370 | if (req->datalen) { |
371 | buf = spinand->databuf; |
372 | nbytes = nanddev_page_size(nand); |
373 | column = 0; |
374 | } |
375 | |
376 | if (req->ooblen) { |
377 | nbytes += nanddev_per_page_oobsize(nand); |
378 | if (!buf) { |
379 | buf = spinand->oobbuf; |
380 | column = nanddev_page_size(nand); |
381 | } |
382 | } |
383 | |
384 | if (req->mode == MTD_OPS_RAW) |
385 | rdesc = spinand->dirmaps[req->pos.plane].rdesc; |
386 | else |
387 | rdesc = spinand->dirmaps[req->pos.plane].rdesc_ecc; |
388 | |
389 | while (nbytes) { |
390 | ret = spi_mem_dirmap_read(desc: rdesc, offs: column, len: nbytes, buf); |
391 | if (ret < 0) |
392 | return ret; |
393 | |
394 | if (!ret || ret > nbytes) |
395 | return -EIO; |
396 | |
397 | nbytes -= ret; |
398 | column += ret; |
399 | buf += ret; |
400 | } |
401 | |
402 | if (req->datalen) |
403 | memcpy(req->databuf.in, spinand->databuf + req->dataoffs, |
404 | req->datalen); |
405 | |
406 | if (req->ooblen) { |
407 | if (req->mode == MTD_OPS_AUTO_OOB) |
408 | mtd_ooblayout_get_databytes(mtd, databuf: req->oobbuf.in, |
409 | oobbuf: spinand->oobbuf, |
410 | start: req->ooboffs, |
411 | nbytes: req->ooblen); |
412 | else |
413 | memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs, |
414 | req->ooblen); |
415 | } |
416 | |
417 | return 0; |
418 | } |
419 | |
420 | static int spinand_write_to_cache_op(struct spinand_device *spinand, |
421 | const struct nand_page_io_req *req) |
422 | { |
423 | struct nand_device *nand = spinand_to_nand(spinand); |
424 | struct mtd_info *mtd = spinand_to_mtd(spinand); |
425 | struct spi_mem_dirmap_desc *wdesc; |
426 | unsigned int nbytes, column = 0; |
427 | void *buf = spinand->databuf; |
428 | ssize_t ret; |
429 | |
430 | /* |
431 | * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset |
432 | * the cache content to 0xFF (depends on vendor implementation), so we |
433 | * must fill the page cache entirely even if we only want to program |
434 | * the data portion of the page, otherwise we might corrupt the BBM or |
435 | * user data previously programmed in OOB area. |
436 | * |
437 | * Only reset the data buffer manually, the OOB buffer is prepared by |
438 | * ECC engines ->prepare_io_req() callback. |
439 | */ |
440 | nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand); |
441 | memset(spinand->databuf, 0xff, nanddev_page_size(nand)); |
442 | |
443 | if (req->datalen) |
444 | memcpy(spinand->databuf + req->dataoffs, req->databuf.out, |
445 | req->datalen); |
446 | |
447 | if (req->ooblen) { |
448 | if (req->mode == MTD_OPS_AUTO_OOB) |
449 | mtd_ooblayout_set_databytes(mtd, databuf: req->oobbuf.out, |
450 | oobbuf: spinand->oobbuf, |
451 | start: req->ooboffs, |
452 | nbytes: req->ooblen); |
453 | else |
454 | memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out, |
455 | req->ooblen); |
456 | } |
457 | |
458 | if (req->mode == MTD_OPS_RAW) |
459 | wdesc = spinand->dirmaps[req->pos.plane].wdesc; |
460 | else |
461 | wdesc = spinand->dirmaps[req->pos.plane].wdesc_ecc; |
462 | |
463 | while (nbytes) { |
464 | ret = spi_mem_dirmap_write(desc: wdesc, offs: column, len: nbytes, buf); |
465 | if (ret < 0) |
466 | return ret; |
467 | |
468 | if (!ret || ret > nbytes) |
469 | return -EIO; |
470 | |
471 | nbytes -= ret; |
472 | column += ret; |
473 | buf += ret; |
474 | } |
475 | |
476 | return 0; |
477 | } |
478 | |
479 | static int spinand_program_op(struct spinand_device *spinand, |
480 | const struct nand_page_io_req *req) |
481 | { |
482 | struct nand_device *nand = spinand_to_nand(spinand); |
483 | unsigned int row = nanddev_pos_to_row(nand, pos: &req->pos); |
484 | struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row); |
485 | |
486 | return spi_mem_exec_op(mem: spinand->spimem, op: &op); |
487 | } |
488 | |
489 | static int spinand_erase_op(struct spinand_device *spinand, |
490 | const struct nand_pos *pos) |
491 | { |
492 | struct nand_device *nand = spinand_to_nand(spinand); |
493 | unsigned int row = nanddev_pos_to_row(nand, pos); |
494 | struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row); |
495 | |
496 | return spi_mem_exec_op(mem: spinand->spimem, op: &op); |
497 | } |
498 | |
499 | static int spinand_wait(struct spinand_device *spinand, |
500 | unsigned long initial_delay_us, |
501 | unsigned long poll_delay_us, |
502 | u8 *s) |
503 | { |
504 | struct spi_mem_op op = SPINAND_GET_FEATURE_OP(REG_STATUS, |
505 | spinand->scratchbuf); |
506 | u8 status; |
507 | int ret; |
508 | |
509 | ret = spi_mem_poll_status(mem: spinand->spimem, op: &op, STATUS_BUSY, match: 0, |
510 | initial_delay_us, |
511 | polling_delay_us: poll_delay_us, |
512 | SPINAND_WAITRDY_TIMEOUT_MS); |
513 | if (ret) |
514 | return ret; |
515 | |
516 | status = *spinand->scratchbuf; |
517 | if (!(status & STATUS_BUSY)) |
518 | goto out; |
519 | |
520 | /* |
521 | * Extra read, just in case the STATUS_READY bit has changed |
522 | * since our last check |
523 | */ |
524 | ret = spinand_read_status(spinand, status: &status); |
525 | if (ret) |
526 | return ret; |
527 | |
528 | out: |
529 | if (s) |
530 | *s = status; |
531 | |
532 | return status & STATUS_BUSY ? -ETIMEDOUT : 0; |
533 | } |
534 | |
535 | static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr, |
536 | u8 ndummy, u8 *buf) |
537 | { |
538 | struct spi_mem_op op = SPINAND_READID_OP( |
539 | naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN); |
540 | int ret; |
541 | |
542 | ret = spi_mem_exec_op(mem: spinand->spimem, op: &op); |
543 | if (!ret) |
544 | memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN); |
545 | |
546 | return ret; |
547 | } |
548 | |
549 | static int spinand_reset_op(struct spinand_device *spinand) |
550 | { |
551 | struct spi_mem_op op = SPINAND_RESET_OP; |
552 | int ret; |
553 | |
554 | ret = spi_mem_exec_op(mem: spinand->spimem, op: &op); |
555 | if (ret) |
556 | return ret; |
557 | |
558 | return spinand_wait(spinand, |
559 | SPINAND_RESET_INITIAL_DELAY_US, |
560 | SPINAND_RESET_POLL_DELAY_US, |
561 | NULL); |
562 | } |
563 | |
564 | static int spinand_lock_block(struct spinand_device *spinand, u8 lock) |
565 | { |
566 | return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, val: lock); |
567 | } |
568 | |
569 | static int spinand_read_page(struct spinand_device *spinand, |
570 | const struct nand_page_io_req *req) |
571 | { |
572 | struct nand_device *nand = spinand_to_nand(spinand); |
573 | u8 status; |
574 | int ret; |
575 | |
576 | ret = nand_ecc_prepare_io_req(nand, req: (struct nand_page_io_req *)req); |
577 | if (ret) |
578 | return ret; |
579 | |
580 | ret = spinand_load_page_op(spinand, req); |
581 | if (ret) |
582 | return ret; |
583 | |
584 | ret = spinand_wait(spinand, |
585 | SPINAND_READ_INITIAL_DELAY_US, |
586 | SPINAND_READ_POLL_DELAY_US, |
587 | s: &status); |
588 | if (ret < 0) |
589 | return ret; |
590 | |
591 | spinand_ondie_ecc_save_status(nand, status); |
592 | |
593 | ret = spinand_read_from_cache_op(spinand, req); |
594 | if (ret) |
595 | return ret; |
596 | |
597 | return nand_ecc_finish_io_req(nand, req: (struct nand_page_io_req *)req); |
598 | } |
599 | |
600 | static int spinand_write_page(struct spinand_device *spinand, |
601 | const struct nand_page_io_req *req) |
602 | { |
603 | struct nand_device *nand = spinand_to_nand(spinand); |
604 | u8 status; |
605 | int ret; |
606 | |
607 | ret = nand_ecc_prepare_io_req(nand, req: (struct nand_page_io_req *)req); |
608 | if (ret) |
609 | return ret; |
610 | |
611 | ret = spinand_write_enable_op(spinand); |
612 | if (ret) |
613 | return ret; |
614 | |
615 | ret = spinand_write_to_cache_op(spinand, req); |
616 | if (ret) |
617 | return ret; |
618 | |
619 | ret = spinand_program_op(spinand, req); |
620 | if (ret) |
621 | return ret; |
622 | |
623 | ret = spinand_wait(spinand, |
624 | SPINAND_WRITE_INITIAL_DELAY_US, |
625 | SPINAND_WRITE_POLL_DELAY_US, |
626 | s: &status); |
627 | if (!ret && (status & STATUS_PROG_FAILED)) |
628 | return -EIO; |
629 | |
630 | return nand_ecc_finish_io_req(nand, req: (struct nand_page_io_req *)req); |
631 | } |
632 | |
633 | static int spinand_mtd_read(struct mtd_info *mtd, loff_t from, |
634 | struct mtd_oob_ops *ops) |
635 | { |
636 | struct spinand_device *spinand = mtd_to_spinand(mtd); |
637 | struct nand_device *nand = mtd_to_nanddev(mtd); |
638 | struct mtd_ecc_stats old_stats; |
639 | unsigned int max_bitflips = 0; |
640 | struct nand_io_iter iter; |
641 | bool disable_ecc = false; |
642 | bool ecc_failed = false; |
643 | int ret = 0; |
644 | |
645 | if (ops->mode == MTD_OPS_RAW || !spinand->eccinfo.ooblayout) |
646 | disable_ecc = true; |
647 | |
648 | mutex_lock(&spinand->lock); |
649 | |
650 | old_stats = mtd->ecc_stats; |
651 | |
652 | nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) { |
653 | if (disable_ecc) |
654 | iter.req.mode = MTD_OPS_RAW; |
655 | |
656 | ret = spinand_select_target(spinand, target: iter.req.pos.target); |
657 | if (ret) |
658 | break; |
659 | |
660 | ret = spinand_read_page(spinand, req: &iter.req); |
661 | if (ret < 0 && ret != -EBADMSG) |
662 | break; |
663 | |
664 | if (ret == -EBADMSG) |
665 | ecc_failed = true; |
666 | else |
667 | max_bitflips = max_t(unsigned int, max_bitflips, ret); |
668 | |
669 | ret = 0; |
670 | ops->retlen += iter.req.datalen; |
671 | ops->oobretlen += iter.req.ooblen; |
672 | } |
673 | |
674 | if (ops->stats) { |
675 | ops->stats->uncorrectable_errors += |
676 | mtd->ecc_stats.failed - old_stats.failed; |
677 | ops->stats->corrected_bitflips += |
678 | mtd->ecc_stats.corrected - old_stats.corrected; |
679 | } |
680 | |
681 | mutex_unlock(lock: &spinand->lock); |
682 | |
683 | if (ecc_failed && !ret) |
684 | ret = -EBADMSG; |
685 | |
686 | return ret ? ret : max_bitflips; |
687 | } |
688 | |
689 | static int spinand_mtd_write(struct mtd_info *mtd, loff_t to, |
690 | struct mtd_oob_ops *ops) |
691 | { |
692 | struct spinand_device *spinand = mtd_to_spinand(mtd); |
693 | struct nand_device *nand = mtd_to_nanddev(mtd); |
694 | struct nand_io_iter iter; |
695 | bool disable_ecc = false; |
696 | int ret = 0; |
697 | |
698 | if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout) |
699 | disable_ecc = true; |
700 | |
701 | mutex_lock(&spinand->lock); |
702 | |
703 | nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) { |
704 | if (disable_ecc) |
705 | iter.req.mode = MTD_OPS_RAW; |
706 | |
707 | ret = spinand_select_target(spinand, target: iter.req.pos.target); |
708 | if (ret) |
709 | break; |
710 | |
711 | ret = spinand_write_page(spinand, req: &iter.req); |
712 | if (ret) |
713 | break; |
714 | |
715 | ops->retlen += iter.req.datalen; |
716 | ops->oobretlen += iter.req.ooblen; |
717 | } |
718 | |
719 | mutex_unlock(lock: &spinand->lock); |
720 | |
721 | return ret; |
722 | } |
723 | |
724 | static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos) |
725 | { |
726 | struct spinand_device *spinand = nand_to_spinand(nand); |
727 | u8 marker[2] = { }; |
728 | struct nand_page_io_req req = { |
729 | .pos = *pos, |
730 | .ooblen = sizeof(marker), |
731 | .ooboffs = 0, |
732 | .oobbuf.in = marker, |
733 | .mode = MTD_OPS_RAW, |
734 | }; |
735 | |
736 | spinand_select_target(spinand, target: pos->target); |
737 | spinand_read_page(spinand, req: &req); |
738 | if (marker[0] != 0xff || marker[1] != 0xff) |
739 | return true; |
740 | |
741 | return false; |
742 | } |
743 | |
744 | static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs) |
745 | { |
746 | struct nand_device *nand = mtd_to_nanddev(mtd); |
747 | struct spinand_device *spinand = nand_to_spinand(nand); |
748 | struct nand_pos pos; |
749 | int ret; |
750 | |
751 | nanddev_offs_to_pos(nand, offs, pos: &pos); |
752 | mutex_lock(&spinand->lock); |
753 | ret = nanddev_isbad(nand, pos: &pos); |
754 | mutex_unlock(lock: &spinand->lock); |
755 | |
756 | return ret; |
757 | } |
758 | |
759 | static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos) |
760 | { |
761 | struct spinand_device *spinand = nand_to_spinand(nand); |
762 | u8 marker[2] = { }; |
763 | struct nand_page_io_req req = { |
764 | .pos = *pos, |
765 | .ooboffs = 0, |
766 | .ooblen = sizeof(marker), |
767 | .oobbuf.out = marker, |
768 | .mode = MTD_OPS_RAW, |
769 | }; |
770 | int ret; |
771 | |
772 | ret = spinand_select_target(spinand, target: pos->target); |
773 | if (ret) |
774 | return ret; |
775 | |
776 | ret = spinand_write_enable_op(spinand); |
777 | if (ret) |
778 | return ret; |
779 | |
780 | return spinand_write_page(spinand, req: &req); |
781 | } |
782 | |
783 | static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs) |
784 | { |
785 | struct nand_device *nand = mtd_to_nanddev(mtd); |
786 | struct spinand_device *spinand = nand_to_spinand(nand); |
787 | struct nand_pos pos; |
788 | int ret; |
789 | |
790 | nanddev_offs_to_pos(nand, offs, pos: &pos); |
791 | mutex_lock(&spinand->lock); |
792 | ret = nanddev_markbad(nand, pos: &pos); |
793 | mutex_unlock(lock: &spinand->lock); |
794 | |
795 | return ret; |
796 | } |
797 | |
798 | static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos) |
799 | { |
800 | struct spinand_device *spinand = nand_to_spinand(nand); |
801 | u8 status; |
802 | int ret; |
803 | |
804 | ret = spinand_select_target(spinand, target: pos->target); |
805 | if (ret) |
806 | return ret; |
807 | |
808 | ret = spinand_write_enable_op(spinand); |
809 | if (ret) |
810 | return ret; |
811 | |
812 | ret = spinand_erase_op(spinand, pos); |
813 | if (ret) |
814 | return ret; |
815 | |
816 | ret = spinand_wait(spinand, |
817 | SPINAND_ERASE_INITIAL_DELAY_US, |
818 | SPINAND_ERASE_POLL_DELAY_US, |
819 | s: &status); |
820 | |
821 | if (!ret && (status & STATUS_ERASE_FAILED)) |
822 | ret = -EIO; |
823 | |
824 | return ret; |
825 | } |
826 | |
827 | static int spinand_mtd_erase(struct mtd_info *mtd, |
828 | struct erase_info *einfo) |
829 | { |
830 | struct spinand_device *spinand = mtd_to_spinand(mtd); |
831 | int ret; |
832 | |
833 | mutex_lock(&spinand->lock); |
834 | ret = nanddev_mtd_erase(mtd, einfo); |
835 | mutex_unlock(lock: &spinand->lock); |
836 | |
837 | return ret; |
838 | } |
839 | |
840 | static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs) |
841 | { |
842 | struct spinand_device *spinand = mtd_to_spinand(mtd); |
843 | struct nand_device *nand = mtd_to_nanddev(mtd); |
844 | struct nand_pos pos; |
845 | int ret; |
846 | |
847 | nanddev_offs_to_pos(nand, offs, pos: &pos); |
848 | mutex_lock(&spinand->lock); |
849 | ret = nanddev_isreserved(nand, pos: &pos); |
850 | mutex_unlock(lock: &spinand->lock); |
851 | |
852 | return ret; |
853 | } |
854 | |
855 | static int spinand_create_dirmap(struct spinand_device *spinand, |
856 | unsigned int plane) |
857 | { |
858 | struct nand_device *nand = spinand_to_nand(spinand); |
859 | struct spi_mem_dirmap_info info = { |
860 | .length = nanddev_page_size(nand) + |
861 | nanddev_per_page_oobsize(nand), |
862 | }; |
863 | struct spi_mem_dirmap_desc *desc; |
864 | |
865 | /* The plane number is passed in MSB just above the column address */ |
866 | info.offset = plane << fls(x: nand->memorg.pagesize); |
867 | |
868 | info.op_tmpl = *spinand->op_templates.update_cache; |
869 | desc = devm_spi_mem_dirmap_create(dev: &spinand->spimem->spi->dev, |
870 | mem: spinand->spimem, info: &info); |
871 | if (IS_ERR(ptr: desc)) |
872 | return PTR_ERR(ptr: desc); |
873 | |
874 | spinand->dirmaps[plane].wdesc = desc; |
875 | |
876 | info.op_tmpl = *spinand->op_templates.read_cache; |
877 | desc = devm_spi_mem_dirmap_create(dev: &spinand->spimem->spi->dev, |
878 | mem: spinand->spimem, info: &info); |
879 | if (IS_ERR(ptr: desc)) |
880 | return PTR_ERR(ptr: desc); |
881 | |
882 | spinand->dirmaps[plane].rdesc = desc; |
883 | |
884 | if (nand->ecc.engine->integration != NAND_ECC_ENGINE_INTEGRATION_PIPELINED) { |
885 | spinand->dirmaps[plane].wdesc_ecc = spinand->dirmaps[plane].wdesc; |
886 | spinand->dirmaps[plane].rdesc_ecc = spinand->dirmaps[plane].rdesc; |
887 | |
888 | return 0; |
889 | } |
890 | |
891 | info.op_tmpl = *spinand->op_templates.update_cache; |
892 | info.op_tmpl.data.ecc = true; |
893 | desc = devm_spi_mem_dirmap_create(dev: &spinand->spimem->spi->dev, |
894 | mem: spinand->spimem, info: &info); |
895 | if (IS_ERR(ptr: desc)) |
896 | return PTR_ERR(ptr: desc); |
897 | |
898 | spinand->dirmaps[plane].wdesc_ecc = desc; |
899 | |
900 | info.op_tmpl = *spinand->op_templates.read_cache; |
901 | info.op_tmpl.data.ecc = true; |
902 | desc = devm_spi_mem_dirmap_create(dev: &spinand->spimem->spi->dev, |
903 | mem: spinand->spimem, info: &info); |
904 | if (IS_ERR(ptr: desc)) |
905 | return PTR_ERR(ptr: desc); |
906 | |
907 | spinand->dirmaps[plane].rdesc_ecc = desc; |
908 | |
909 | return 0; |
910 | } |
911 | |
912 | static int spinand_create_dirmaps(struct spinand_device *spinand) |
913 | { |
914 | struct nand_device *nand = spinand_to_nand(spinand); |
915 | int i, ret; |
916 | |
917 | spinand->dirmaps = devm_kzalloc(dev: &spinand->spimem->spi->dev, |
918 | size: sizeof(*spinand->dirmaps) * |
919 | nand->memorg.planes_per_lun, |
920 | GFP_KERNEL); |
921 | if (!spinand->dirmaps) |
922 | return -ENOMEM; |
923 | |
924 | for (i = 0; i < nand->memorg.planes_per_lun; i++) { |
925 | ret = spinand_create_dirmap(spinand, plane: i); |
926 | if (ret) |
927 | return ret; |
928 | } |
929 | |
930 | return 0; |
931 | } |
932 | |
933 | static const struct nand_ops spinand_ops = { |
934 | .erase = spinand_erase, |
935 | .markbad = spinand_markbad, |
936 | .isbad = spinand_isbad, |
937 | }; |
938 | |
939 | static const struct spinand_manufacturer *spinand_manufacturers[] = { |
940 | &alliancememory_spinand_manufacturer, |
941 | &ato_spinand_manufacturer, |
942 | &esmt_c8_spinand_manufacturer, |
943 | &foresee_spinand_manufacturer, |
944 | &gigadevice_spinand_manufacturer, |
945 | ¯onix_spinand_manufacturer, |
946 | µn_spinand_manufacturer, |
947 | ¶gon_spinand_manufacturer, |
948 | &toshiba_spinand_manufacturer, |
949 | &winbond_spinand_manufacturer, |
950 | &xtx_spinand_manufacturer, |
951 | }; |
952 | |
953 | static int spinand_manufacturer_match(struct spinand_device *spinand, |
954 | enum spinand_readid_method rdid_method) |
955 | { |
956 | u8 *id = spinand->id.data; |
957 | unsigned int i; |
958 | int ret; |
959 | |
960 | for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) { |
961 | const struct spinand_manufacturer *manufacturer = |
962 | spinand_manufacturers[i]; |
963 | |
964 | if (id[0] != manufacturer->id) |
965 | continue; |
966 | |
967 | ret = spinand_match_and_init(spinand, |
968 | table: manufacturer->chips, |
969 | table_size: manufacturer->nchips, |
970 | rdid_method); |
971 | if (ret < 0) |
972 | continue; |
973 | |
974 | spinand->manufacturer = manufacturer; |
975 | return 0; |
976 | } |
977 | return -EOPNOTSUPP; |
978 | } |
979 | |
980 | static int spinand_id_detect(struct spinand_device *spinand) |
981 | { |
982 | u8 *id = spinand->id.data; |
983 | int ret; |
984 | |
985 | ret = spinand_read_id_op(spinand, naddr: 0, ndummy: 0, buf: id); |
986 | if (ret) |
987 | return ret; |
988 | ret = spinand_manufacturer_match(spinand, rdid_method: SPINAND_READID_METHOD_OPCODE); |
989 | if (!ret) |
990 | return 0; |
991 | |
992 | ret = spinand_read_id_op(spinand, naddr: 1, ndummy: 0, buf: id); |
993 | if (ret) |
994 | return ret; |
995 | ret = spinand_manufacturer_match(spinand, |
996 | rdid_method: SPINAND_READID_METHOD_OPCODE_ADDR); |
997 | if (!ret) |
998 | return 0; |
999 | |
1000 | ret = spinand_read_id_op(spinand, naddr: 0, ndummy: 1, buf: id); |
1001 | if (ret) |
1002 | return ret; |
1003 | ret = spinand_manufacturer_match(spinand, |
1004 | rdid_method: SPINAND_READID_METHOD_OPCODE_DUMMY); |
1005 | |
1006 | return ret; |
1007 | } |
1008 | |
1009 | static int spinand_manufacturer_init(struct spinand_device *spinand) |
1010 | { |
1011 | if (spinand->manufacturer->ops->init) |
1012 | return spinand->manufacturer->ops->init(spinand); |
1013 | |
1014 | return 0; |
1015 | } |
1016 | |
1017 | static void spinand_manufacturer_cleanup(struct spinand_device *spinand) |
1018 | { |
1019 | /* Release manufacturer private data */ |
1020 | if (spinand->manufacturer->ops->cleanup) |
1021 | return spinand->manufacturer->ops->cleanup(spinand); |
1022 | } |
1023 | |
1024 | static const struct spi_mem_op * |
1025 | spinand_select_op_variant(struct spinand_device *spinand, |
1026 | const struct spinand_op_variants *variants) |
1027 | { |
1028 | struct nand_device *nand = spinand_to_nand(spinand); |
1029 | unsigned int i; |
1030 | |
1031 | for (i = 0; i < variants->nops; i++) { |
1032 | struct spi_mem_op op = variants->ops[i]; |
1033 | unsigned int nbytes; |
1034 | int ret; |
1035 | |
1036 | nbytes = nanddev_per_page_oobsize(nand) + |
1037 | nanddev_page_size(nand); |
1038 | |
1039 | while (nbytes) { |
1040 | op.data.nbytes = nbytes; |
1041 | ret = spi_mem_adjust_op_size(mem: spinand->spimem, op: &op); |
1042 | if (ret) |
1043 | break; |
1044 | |
1045 | if (!spi_mem_supports_op(mem: spinand->spimem, op: &op)) |
1046 | break; |
1047 | |
1048 | nbytes -= op.data.nbytes; |
1049 | } |
1050 | |
1051 | if (!nbytes) |
1052 | return &variants->ops[i]; |
1053 | } |
1054 | |
1055 | return NULL; |
1056 | } |
1057 | |
1058 | /** |
1059 | * spinand_match_and_init() - Try to find a match between a device ID and an |
1060 | * entry in a spinand_info table |
1061 | * @spinand: SPI NAND object |
1062 | * @table: SPI NAND device description table |
1063 | * @table_size: size of the device description table |
1064 | * @rdid_method: read id method to match |
1065 | * |
1066 | * Match between a device ID retrieved through the READ_ID command and an |
1067 | * entry in the SPI NAND description table. If a match is found, the spinand |
1068 | * object will be initialized with information provided by the matching |
1069 | * spinand_info entry. |
1070 | * |
1071 | * Return: 0 on success, a negative error code otherwise. |
1072 | */ |
1073 | int spinand_match_and_init(struct spinand_device *spinand, |
1074 | const struct spinand_info *table, |
1075 | unsigned int table_size, |
1076 | enum spinand_readid_method rdid_method) |
1077 | { |
1078 | u8 *id = spinand->id.data; |
1079 | struct nand_device *nand = spinand_to_nand(spinand); |
1080 | unsigned int i; |
1081 | |
1082 | for (i = 0; i < table_size; i++) { |
1083 | const struct spinand_info *info = &table[i]; |
1084 | const struct spi_mem_op *op; |
1085 | |
1086 | if (rdid_method != info->devid.method) |
1087 | continue; |
1088 | |
1089 | if (memcmp(p: id + 1, q: info->devid.id, size: info->devid.len)) |
1090 | continue; |
1091 | |
1092 | nand->memorg = table[i].memorg; |
1093 | nanddev_set_ecc_requirements(nand, reqs: &table[i].eccreq); |
1094 | spinand->eccinfo = table[i].eccinfo; |
1095 | spinand->flags = table[i].flags; |
1096 | spinand->id.len = 1 + table[i].devid.len; |
1097 | spinand->select_target = table[i].select_target; |
1098 | |
1099 | op = spinand_select_op_variant(spinand, |
1100 | variants: info->op_variants.read_cache); |
1101 | if (!op) |
1102 | return -ENOTSUPP; |
1103 | |
1104 | spinand->op_templates.read_cache = op; |
1105 | |
1106 | op = spinand_select_op_variant(spinand, |
1107 | variants: info->op_variants.write_cache); |
1108 | if (!op) |
1109 | return -ENOTSUPP; |
1110 | |
1111 | spinand->op_templates.write_cache = op; |
1112 | |
1113 | op = spinand_select_op_variant(spinand, |
1114 | variants: info->op_variants.update_cache); |
1115 | spinand->op_templates.update_cache = op; |
1116 | |
1117 | return 0; |
1118 | } |
1119 | |
1120 | return -ENOTSUPP; |
1121 | } |
1122 | |
1123 | static int spinand_detect(struct spinand_device *spinand) |
1124 | { |
1125 | struct device *dev = &spinand->spimem->spi->dev; |
1126 | struct nand_device *nand = spinand_to_nand(spinand); |
1127 | int ret; |
1128 | |
1129 | ret = spinand_reset_op(spinand); |
1130 | if (ret) |
1131 | return ret; |
1132 | |
1133 | ret = spinand_id_detect(spinand); |
1134 | if (ret) { |
1135 | dev_err(dev, "unknown raw ID %*phN\n" , SPINAND_MAX_ID_LEN, |
1136 | spinand->id.data); |
1137 | return ret; |
1138 | } |
1139 | |
1140 | if (nand->memorg.ntargets > 1 && !spinand->select_target) { |
1141 | dev_err(dev, |
1142 | "SPI NANDs with more than one die must implement ->select_target()\n" ); |
1143 | return -EINVAL; |
1144 | } |
1145 | |
1146 | dev_info(&spinand->spimem->spi->dev, |
1147 | "%s SPI NAND was found.\n" , spinand->manufacturer->name); |
1148 | dev_info(&spinand->spimem->spi->dev, |
1149 | "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n" , |
1150 | nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10, |
1151 | nanddev_page_size(nand), nanddev_per_page_oobsize(nand)); |
1152 | |
1153 | return 0; |
1154 | } |
1155 | |
1156 | static int spinand_init_flash(struct spinand_device *spinand) |
1157 | { |
1158 | struct device *dev = &spinand->spimem->spi->dev; |
1159 | struct nand_device *nand = spinand_to_nand(spinand); |
1160 | int ret, i; |
1161 | |
1162 | ret = spinand_read_cfg(spinand); |
1163 | if (ret) |
1164 | return ret; |
1165 | |
1166 | ret = spinand_init_quad_enable(spinand); |
1167 | if (ret) |
1168 | return ret; |
1169 | |
1170 | ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, val: 0); |
1171 | if (ret) |
1172 | return ret; |
1173 | |
1174 | ret = spinand_manufacturer_init(spinand); |
1175 | if (ret) { |
1176 | dev_err(dev, |
1177 | "Failed to initialize the SPI NAND chip (err = %d)\n" , |
1178 | ret); |
1179 | return ret; |
1180 | } |
1181 | |
1182 | /* After power up, all blocks are locked, so unlock them here. */ |
1183 | for (i = 0; i < nand->memorg.ntargets; i++) { |
1184 | ret = spinand_select_target(spinand, target: i); |
1185 | if (ret) |
1186 | break; |
1187 | |
1188 | ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED); |
1189 | if (ret) |
1190 | break; |
1191 | } |
1192 | |
1193 | if (ret) |
1194 | spinand_manufacturer_cleanup(spinand); |
1195 | |
1196 | return ret; |
1197 | } |
1198 | |
1199 | static void spinand_mtd_resume(struct mtd_info *mtd) |
1200 | { |
1201 | struct spinand_device *spinand = mtd_to_spinand(mtd); |
1202 | int ret; |
1203 | |
1204 | ret = spinand_reset_op(spinand); |
1205 | if (ret) |
1206 | return; |
1207 | |
1208 | ret = spinand_init_flash(spinand); |
1209 | if (ret) |
1210 | return; |
1211 | |
1212 | spinand_ecc_enable(spinand, enable: false); |
1213 | } |
1214 | |
1215 | static int spinand_init(struct spinand_device *spinand) |
1216 | { |
1217 | struct device *dev = &spinand->spimem->spi->dev; |
1218 | struct mtd_info *mtd = spinand_to_mtd(spinand); |
1219 | struct nand_device *nand = mtd_to_nanddev(mtd); |
1220 | int ret; |
1221 | |
1222 | /* |
1223 | * We need a scratch buffer because the spi_mem interface requires that |
1224 | * buf passed in spi_mem_op->data.buf be DMA-able. |
1225 | */ |
1226 | spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL); |
1227 | if (!spinand->scratchbuf) |
1228 | return -ENOMEM; |
1229 | |
1230 | ret = spinand_detect(spinand); |
1231 | if (ret) |
1232 | goto err_free_bufs; |
1233 | |
1234 | /* |
1235 | * Use kzalloc() instead of devm_kzalloc() here, because some drivers |
1236 | * may use this buffer for DMA access. |
1237 | * Memory allocated by devm_ does not guarantee DMA-safe alignment. |
1238 | */ |
1239 | spinand->databuf = kzalloc(size: nanddev_page_size(nand) + |
1240 | nanddev_per_page_oobsize(nand), |
1241 | GFP_KERNEL); |
1242 | if (!spinand->databuf) { |
1243 | ret = -ENOMEM; |
1244 | goto err_free_bufs; |
1245 | } |
1246 | |
1247 | spinand->oobbuf = spinand->databuf + nanddev_page_size(nand); |
1248 | |
1249 | ret = spinand_init_cfg_cache(spinand); |
1250 | if (ret) |
1251 | goto err_free_bufs; |
1252 | |
1253 | ret = spinand_init_flash(spinand); |
1254 | if (ret) |
1255 | goto err_free_bufs; |
1256 | |
1257 | ret = nanddev_init(nand, ops: &spinand_ops, THIS_MODULE); |
1258 | if (ret) |
1259 | goto err_manuf_cleanup; |
1260 | |
1261 | /* SPI-NAND default ECC engine is on-die */ |
1262 | nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE; |
1263 | nand->ecc.ondie_engine = &spinand_ondie_ecc_engine; |
1264 | |
1265 | spinand_ecc_enable(spinand, enable: false); |
1266 | ret = nanddev_ecc_engine_init(nand); |
1267 | if (ret) |
1268 | goto err_cleanup_nanddev; |
1269 | |
1270 | mtd->_read_oob = spinand_mtd_read; |
1271 | mtd->_write_oob = spinand_mtd_write; |
1272 | mtd->_block_isbad = spinand_mtd_block_isbad; |
1273 | mtd->_block_markbad = spinand_mtd_block_markbad; |
1274 | mtd->_block_isreserved = spinand_mtd_block_isreserved; |
1275 | mtd->_erase = spinand_mtd_erase; |
1276 | mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks; |
1277 | mtd->_resume = spinand_mtd_resume; |
1278 | |
1279 | if (nand->ecc.engine) { |
1280 | ret = mtd_ooblayout_count_freebytes(mtd); |
1281 | if (ret < 0) |
1282 | goto err_cleanup_ecc_engine; |
1283 | } |
1284 | |
1285 | mtd->oobavail = ret; |
1286 | |
1287 | /* Propagate ECC information to mtd_info */ |
1288 | mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength; |
1289 | mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size; |
1290 | |
1291 | ret = spinand_create_dirmaps(spinand); |
1292 | if (ret) { |
1293 | dev_err(dev, |
1294 | "Failed to create direct mappings for read/write operations (err = %d)\n" , |
1295 | ret); |
1296 | goto err_cleanup_ecc_engine; |
1297 | } |
1298 | |
1299 | return 0; |
1300 | |
1301 | err_cleanup_ecc_engine: |
1302 | nanddev_ecc_engine_cleanup(nand); |
1303 | |
1304 | err_cleanup_nanddev: |
1305 | nanddev_cleanup(nand); |
1306 | |
1307 | err_manuf_cleanup: |
1308 | spinand_manufacturer_cleanup(spinand); |
1309 | |
1310 | err_free_bufs: |
1311 | kfree(objp: spinand->databuf); |
1312 | kfree(objp: spinand->scratchbuf); |
1313 | return ret; |
1314 | } |
1315 | |
1316 | static void spinand_cleanup(struct spinand_device *spinand) |
1317 | { |
1318 | struct nand_device *nand = spinand_to_nand(spinand); |
1319 | |
1320 | nanddev_cleanup(nand); |
1321 | spinand_manufacturer_cleanup(spinand); |
1322 | kfree(objp: spinand->databuf); |
1323 | kfree(objp: spinand->scratchbuf); |
1324 | } |
1325 | |
1326 | static int spinand_probe(struct spi_mem *mem) |
1327 | { |
1328 | struct spinand_device *spinand; |
1329 | struct mtd_info *mtd; |
1330 | int ret; |
1331 | |
1332 | spinand = devm_kzalloc(dev: &mem->spi->dev, size: sizeof(*spinand), |
1333 | GFP_KERNEL); |
1334 | if (!spinand) |
1335 | return -ENOMEM; |
1336 | |
1337 | spinand->spimem = mem; |
1338 | spi_mem_set_drvdata(mem, data: spinand); |
1339 | spinand_set_of_node(spinand, np: mem->spi->dev.of_node); |
1340 | mutex_init(&spinand->lock); |
1341 | mtd = spinand_to_mtd(spinand); |
1342 | mtd->dev.parent = &mem->spi->dev; |
1343 | |
1344 | ret = spinand_init(spinand); |
1345 | if (ret) |
1346 | return ret; |
1347 | |
1348 | ret = mtd_device_register(mtd, NULL, 0); |
1349 | if (ret) |
1350 | goto err_spinand_cleanup; |
1351 | |
1352 | return 0; |
1353 | |
1354 | err_spinand_cleanup: |
1355 | spinand_cleanup(spinand); |
1356 | |
1357 | return ret; |
1358 | } |
1359 | |
1360 | static int spinand_remove(struct spi_mem *mem) |
1361 | { |
1362 | struct spinand_device *spinand; |
1363 | struct mtd_info *mtd; |
1364 | int ret; |
1365 | |
1366 | spinand = spi_mem_get_drvdata(mem); |
1367 | mtd = spinand_to_mtd(spinand); |
1368 | |
1369 | ret = mtd_device_unregister(master: mtd); |
1370 | if (ret) |
1371 | return ret; |
1372 | |
1373 | spinand_cleanup(spinand); |
1374 | |
1375 | return 0; |
1376 | } |
1377 | |
1378 | static const struct spi_device_id spinand_ids[] = { |
1379 | { .name = "spi-nand" }, |
1380 | { /* sentinel */ }, |
1381 | }; |
1382 | MODULE_DEVICE_TABLE(spi, spinand_ids); |
1383 | |
1384 | #ifdef CONFIG_OF |
1385 | static const struct of_device_id spinand_of_ids[] = { |
1386 | { .compatible = "spi-nand" }, |
1387 | { /* sentinel */ }, |
1388 | }; |
1389 | MODULE_DEVICE_TABLE(of, spinand_of_ids); |
1390 | #endif |
1391 | |
1392 | static struct spi_mem_driver spinand_drv = { |
1393 | .spidrv = { |
1394 | .id_table = spinand_ids, |
1395 | .driver = { |
1396 | .name = "spi-nand" , |
1397 | .of_match_table = of_match_ptr(spinand_of_ids), |
1398 | }, |
1399 | }, |
1400 | .probe = spinand_probe, |
1401 | .remove = spinand_remove, |
1402 | }; |
1403 | module_spi_mem_driver(spinand_drv); |
1404 | |
1405 | MODULE_DESCRIPTION("SPI NAND framework" ); |
1406 | MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>" ); |
1407 | MODULE_LICENSE("GPL v2" ); |
1408 | |