1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
4 * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
5 *
6 * Copyright (C) 2005, Intec Automation Inc.
7 * Copyright (C) 2014, Freescale Semiconductor, Inc.
8 */
9
10#include <linux/err.h>
11#include <linux/errno.h>
12#include <linux/delay.h>
13#include <linux/device.h>
14#include <linux/math64.h>
15#include <linux/module.h>
16#include <linux/mtd/mtd.h>
17#include <linux/mtd/spi-nor.h>
18#include <linux/mutex.h>
19#include <linux/of_platform.h>
20#include <linux/sched/task_stack.h>
21#include <linux/sizes.h>
22#include <linux/slab.h>
23#include <linux/spi/flash.h>
24
25#include "core.h"
26
27/* Define max times to check status register before we give up. */
28
29/*
30 * For everything but full-chip erase; probably could be much smaller, but kept
31 * around for safety for now
32 */
33#define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
34
35/*
36 * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
37 * for larger flash
38 */
39#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
40
41#define SPI_NOR_MAX_ADDR_NBYTES 4
42
43#define SPI_NOR_SRST_SLEEP_MIN 200
44#define SPI_NOR_SRST_SLEEP_MAX 400
45
46/**
47 * spi_nor_get_cmd_ext() - Get the command opcode extension based on the
48 * extension type.
49 * @nor: pointer to a 'struct spi_nor'
50 * @op: pointer to the 'struct spi_mem_op' whose properties
51 * need to be initialized.
52 *
53 * Right now, only "repeat" and "invert" are supported.
54 *
55 * Return: The opcode extension.
56 */
57static u8 spi_nor_get_cmd_ext(const struct spi_nor *nor,
58 const struct spi_mem_op *op)
59{
60 switch (nor->cmd_ext_type) {
61 case SPI_NOR_EXT_INVERT:
62 return ~op->cmd.opcode;
63
64 case SPI_NOR_EXT_REPEAT:
65 return op->cmd.opcode;
66
67 default:
68 dev_err(nor->dev, "Unknown command extension type\n");
69 return 0;
70 }
71}
72
73/**
74 * spi_nor_spimem_setup_op() - Set up common properties of a spi-mem op.
75 * @nor: pointer to a 'struct spi_nor'
76 * @op: pointer to the 'struct spi_mem_op' whose properties
77 * need to be initialized.
78 * @proto: the protocol from which the properties need to be set.
79 */
80void spi_nor_spimem_setup_op(const struct spi_nor *nor,
81 struct spi_mem_op *op,
82 const enum spi_nor_protocol proto)
83{
84 u8 ext;
85
86 op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(proto);
87
88 if (op->addr.nbytes)
89 op->addr.buswidth = spi_nor_get_protocol_addr_nbits(proto);
90
91 if (op->dummy.nbytes)
92 op->dummy.buswidth = spi_nor_get_protocol_addr_nbits(proto);
93
94 if (op->data.nbytes)
95 op->data.buswidth = spi_nor_get_protocol_data_nbits(proto);
96
97 if (spi_nor_protocol_is_dtr(proto)) {
98 /*
99 * SPIMEM supports mixed DTR modes, but right now we can only
100 * have all phases either DTR or STR. IOW, SPIMEM can have
101 * something like 4S-4D-4D, but SPI NOR can't. So, set all 4
102 * phases to either DTR or STR.
103 */
104 op->cmd.dtr = true;
105 op->addr.dtr = true;
106 op->dummy.dtr = true;
107 op->data.dtr = true;
108
109 /* 2 bytes per clock cycle in DTR mode. */
110 op->dummy.nbytes *= 2;
111
112 ext = spi_nor_get_cmd_ext(nor, op);
113 op->cmd.opcode = (op->cmd.opcode << 8) | ext;
114 op->cmd.nbytes = 2;
115 }
116}
117
118/**
119 * spi_nor_spimem_bounce() - check if a bounce buffer is needed for the data
120 * transfer
121 * @nor: pointer to 'struct spi_nor'
122 * @op: pointer to 'struct spi_mem_op' template for transfer
123 *
124 * If we have to use the bounce buffer, the data field in @op will be updated.
125 *
126 * Return: true if the bounce buffer is needed, false if not
127 */
128static bool spi_nor_spimem_bounce(struct spi_nor *nor, struct spi_mem_op *op)
129{
130 /* op->data.buf.in occupies the same memory as op->data.buf.out */
131 if (object_is_on_stack(obj: op->data.buf.in) ||
132 !virt_addr_valid(op->data.buf.in)) {
133 if (op->data.nbytes > nor->bouncebuf_size)
134 op->data.nbytes = nor->bouncebuf_size;
135 op->data.buf.in = nor->bouncebuf;
136 return true;
137 }
138
139 return false;
140}
141
142/**
143 * spi_nor_spimem_exec_op() - execute a memory operation
144 * @nor: pointer to 'struct spi_nor'
145 * @op: pointer to 'struct spi_mem_op' template for transfer
146 *
147 * Return: 0 on success, -error otherwise.
148 */
149static int spi_nor_spimem_exec_op(struct spi_nor *nor, struct spi_mem_op *op)
150{
151 int error;
152
153 error = spi_mem_adjust_op_size(mem: nor->spimem, op);
154 if (error)
155 return error;
156
157 return spi_mem_exec_op(mem: nor->spimem, op);
158}
159
160int spi_nor_controller_ops_read_reg(struct spi_nor *nor, u8 opcode,
161 u8 *buf, size_t len)
162{
163 if (spi_nor_protocol_is_dtr(proto: nor->reg_proto))
164 return -EOPNOTSUPP;
165
166 return nor->controller_ops->read_reg(nor, opcode, buf, len);
167}
168
169int spi_nor_controller_ops_write_reg(struct spi_nor *nor, u8 opcode,
170 const u8 *buf, size_t len)
171{
172 if (spi_nor_protocol_is_dtr(proto: nor->reg_proto))
173 return -EOPNOTSUPP;
174
175 return nor->controller_ops->write_reg(nor, opcode, buf, len);
176}
177
178static int spi_nor_controller_ops_erase(struct spi_nor *nor, loff_t offs)
179{
180 if (spi_nor_protocol_is_dtr(proto: nor->reg_proto))
181 return -EOPNOTSUPP;
182
183 return nor->controller_ops->erase(nor, offs);
184}
185
186/**
187 * spi_nor_spimem_read_data() - read data from flash's memory region via
188 * spi-mem
189 * @nor: pointer to 'struct spi_nor'
190 * @from: offset to read from
191 * @len: number of bytes to read
192 * @buf: pointer to dst buffer
193 *
194 * Return: number of bytes read successfully, -errno otherwise
195 */
196static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from,
197 size_t len, u8 *buf)
198{
199 struct spi_mem_op op =
200 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
201 SPI_MEM_OP_ADDR(nor->addr_nbytes, from, 0),
202 SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
203 SPI_MEM_OP_DATA_IN(len, buf, 0));
204 bool usebouncebuf;
205 ssize_t nbytes;
206 int error;
207
208 spi_nor_spimem_setup_op(nor, op: &op, proto: nor->read_proto);
209
210 /* convert the dummy cycles to the number of bytes */
211 op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
212 if (spi_nor_protocol_is_dtr(proto: nor->read_proto))
213 op.dummy.nbytes *= 2;
214
215 usebouncebuf = spi_nor_spimem_bounce(nor, op: &op);
216
217 if (nor->dirmap.rdesc) {
218 nbytes = spi_mem_dirmap_read(desc: nor->dirmap.rdesc, offs: op.addr.val,
219 len: op.data.nbytes, buf: op.data.buf.in);
220 } else {
221 error = spi_nor_spimem_exec_op(nor, op: &op);
222 if (error)
223 return error;
224 nbytes = op.data.nbytes;
225 }
226
227 if (usebouncebuf && nbytes > 0)
228 memcpy(buf, op.data.buf.in, nbytes);
229
230 return nbytes;
231}
232
233/**
234 * spi_nor_read_data() - read data from flash memory
235 * @nor: pointer to 'struct spi_nor'
236 * @from: offset to read from
237 * @len: number of bytes to read
238 * @buf: pointer to dst buffer
239 *
240 * Return: number of bytes read successfully, -errno otherwise
241 */
242ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, u8 *buf)
243{
244 if (nor->spimem)
245 return spi_nor_spimem_read_data(nor, from, len, buf);
246
247 return nor->controller_ops->read(nor, from, len, buf);
248}
249
250/**
251 * spi_nor_spimem_write_data() - write data to flash memory via
252 * spi-mem
253 * @nor: pointer to 'struct spi_nor'
254 * @to: offset to write to
255 * @len: number of bytes to write
256 * @buf: pointer to src buffer
257 *
258 * Return: number of bytes written successfully, -errno otherwise
259 */
260static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
261 size_t len, const u8 *buf)
262{
263 struct spi_mem_op op =
264 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
265 SPI_MEM_OP_ADDR(nor->addr_nbytes, to, 0),
266 SPI_MEM_OP_NO_DUMMY,
267 SPI_MEM_OP_DATA_OUT(len, buf, 0));
268 ssize_t nbytes;
269 int error;
270
271 if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
272 op.addr.nbytes = 0;
273
274 spi_nor_spimem_setup_op(nor, op: &op, proto: nor->write_proto);
275
276 if (spi_nor_spimem_bounce(nor, op: &op))
277 memcpy(nor->bouncebuf, buf, op.data.nbytes);
278
279 if (nor->dirmap.wdesc) {
280 nbytes = spi_mem_dirmap_write(desc: nor->dirmap.wdesc, offs: op.addr.val,
281 len: op.data.nbytes, buf: op.data.buf.out);
282 } else {
283 error = spi_nor_spimem_exec_op(nor, op: &op);
284 if (error)
285 return error;
286 nbytes = op.data.nbytes;
287 }
288
289 return nbytes;
290}
291
292/**
293 * spi_nor_write_data() - write data to flash memory
294 * @nor: pointer to 'struct spi_nor'
295 * @to: offset to write to
296 * @len: number of bytes to write
297 * @buf: pointer to src buffer
298 *
299 * Return: number of bytes written successfully, -errno otherwise
300 */
301ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
302 const u8 *buf)
303{
304 if (nor->spimem)
305 return spi_nor_spimem_write_data(nor, to, len, buf);
306
307 return nor->controller_ops->write(nor, to, len, buf);
308}
309
310/**
311 * spi_nor_read_any_reg() - read any register from flash memory, nonvolatile or
312 * volatile.
313 * @nor: pointer to 'struct spi_nor'.
314 * @op: SPI memory operation. op->data.buf must be DMA-able.
315 * @proto: SPI protocol to use for the register operation.
316 *
317 * Return: zero on success, -errno otherwise
318 */
319int spi_nor_read_any_reg(struct spi_nor *nor, struct spi_mem_op *op,
320 enum spi_nor_protocol proto)
321{
322 if (!nor->spimem)
323 return -EOPNOTSUPP;
324
325 spi_nor_spimem_setup_op(nor, op, proto);
326 return spi_nor_spimem_exec_op(nor, op);
327}
328
329/**
330 * spi_nor_write_any_volatile_reg() - write any volatile register to flash
331 * memory.
332 * @nor: pointer to 'struct spi_nor'
333 * @op: SPI memory operation. op->data.buf must be DMA-able.
334 * @proto: SPI protocol to use for the register operation.
335 *
336 * Writing volatile registers are instant according to some manufacturers
337 * (Cypress, Micron) and do not need any status polling.
338 *
339 * Return: zero on success, -errno otherwise
340 */
341int spi_nor_write_any_volatile_reg(struct spi_nor *nor, struct spi_mem_op *op,
342 enum spi_nor_protocol proto)
343{
344 int ret;
345
346 if (!nor->spimem)
347 return -EOPNOTSUPP;
348
349 ret = spi_nor_write_enable(nor);
350 if (ret)
351 return ret;
352 spi_nor_spimem_setup_op(nor, op, proto);
353 return spi_nor_spimem_exec_op(nor, op);
354}
355
356/**
357 * spi_nor_write_enable() - Set write enable latch with Write Enable command.
358 * @nor: pointer to 'struct spi_nor'.
359 *
360 * Return: 0 on success, -errno otherwise.
361 */
362int spi_nor_write_enable(struct spi_nor *nor)
363{
364 int ret;
365
366 if (nor->spimem) {
367 struct spi_mem_op op = SPI_NOR_WREN_OP;
368
369 spi_nor_spimem_setup_op(nor, op: &op, proto: nor->reg_proto);
370
371 ret = spi_mem_exec_op(mem: nor->spimem, op: &op);
372 } else {
373 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WREN,
374 NULL, len: 0);
375 }
376
377 if (ret)
378 dev_dbg(nor->dev, "error %d on Write Enable\n", ret);
379
380 return ret;
381}
382
383/**
384 * spi_nor_write_disable() - Send Write Disable instruction to the chip.
385 * @nor: pointer to 'struct spi_nor'.
386 *
387 * Return: 0 on success, -errno otherwise.
388 */
389int spi_nor_write_disable(struct spi_nor *nor)
390{
391 int ret;
392
393 if (nor->spimem) {
394 struct spi_mem_op op = SPI_NOR_WRDI_OP;
395
396 spi_nor_spimem_setup_op(nor, op: &op, proto: nor->reg_proto);
397
398 ret = spi_mem_exec_op(mem: nor->spimem, op: &op);
399 } else {
400 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRDI,
401 NULL, len: 0);
402 }
403
404 if (ret)
405 dev_dbg(nor->dev, "error %d on Write Disable\n", ret);
406
407 return ret;
408}
409
410/**
411 * spi_nor_read_id() - Read the JEDEC ID.
412 * @nor: pointer to 'struct spi_nor'.
413 * @naddr: number of address bytes to send. Can be zero if the operation
414 * does not need to send an address.
415 * @ndummy: number of dummy bytes to send after an opcode or address. Can
416 * be zero if the operation does not require dummy bytes.
417 * @id: pointer to a DMA-able buffer where the value of the JEDEC ID
418 * will be written.
419 * @proto: the SPI protocol for register operation.
420 *
421 * Return: 0 on success, -errno otherwise.
422 */
423int spi_nor_read_id(struct spi_nor *nor, u8 naddr, u8 ndummy, u8 *id,
424 enum spi_nor_protocol proto)
425{
426 int ret;
427
428 if (nor->spimem) {
429 struct spi_mem_op op =
430 SPI_NOR_READID_OP(naddr, ndummy, id, SPI_NOR_MAX_ID_LEN);
431
432 spi_nor_spimem_setup_op(nor, op: &op, proto);
433 ret = spi_mem_exec_op(mem: nor->spimem, op: &op);
434 } else {
435 ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id,
436 SPI_NOR_MAX_ID_LEN);
437 }
438 return ret;
439}
440
441/**
442 * spi_nor_read_sr() - Read the Status Register.
443 * @nor: pointer to 'struct spi_nor'.
444 * @sr: pointer to a DMA-able buffer where the value of the
445 * Status Register will be written. Should be at least 2 bytes.
446 *
447 * Return: 0 on success, -errno otherwise.
448 */
449int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
450{
451 int ret;
452
453 if (nor->spimem) {
454 struct spi_mem_op op = SPI_NOR_RDSR_OP(sr);
455
456 if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) {
457 op.addr.nbytes = nor->params->rdsr_addr_nbytes;
458 op.dummy.nbytes = nor->params->rdsr_dummy;
459 /*
460 * We don't want to read only one byte in DTR mode. So,
461 * read 2 and then discard the second byte.
462 */
463 op.data.nbytes = 2;
464 }
465
466 spi_nor_spimem_setup_op(nor, op: &op, proto: nor->reg_proto);
467
468 ret = spi_mem_exec_op(mem: nor->spimem, op: &op);
469 } else {
470 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR, buf: sr,
471 len: 1);
472 }
473
474 if (ret)
475 dev_dbg(nor->dev, "error %d reading SR\n", ret);
476
477 return ret;
478}
479
480/**
481 * spi_nor_read_cr() - Read the Configuration Register using the
482 * SPINOR_OP_RDCR (35h) command.
483 * @nor: pointer to 'struct spi_nor'
484 * @cr: pointer to a DMA-able buffer where the value of the
485 * Configuration Register will be written.
486 *
487 * Return: 0 on success, -errno otherwise.
488 */
489int spi_nor_read_cr(struct spi_nor *nor, u8 *cr)
490{
491 int ret;
492
493 if (nor->spimem) {
494 struct spi_mem_op op = SPI_NOR_RDCR_OP(cr);
495
496 spi_nor_spimem_setup_op(nor, op: &op, proto: nor->reg_proto);
497
498 ret = spi_mem_exec_op(mem: nor->spimem, op: &op);
499 } else {
500 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDCR, buf: cr,
501 len: 1);
502 }
503
504 if (ret)
505 dev_dbg(nor->dev, "error %d reading CR\n", ret);
506
507 return ret;
508}
509
510/**
511 * spi_nor_set_4byte_addr_mode_en4b_ex4b() - Enter/Exit 4-byte address mode
512 * using SPINOR_OP_EN4B/SPINOR_OP_EX4B. Typically used by
513 * Winbond and Macronix.
514 * @nor: pointer to 'struct spi_nor'.
515 * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
516 * address mode.
517 *
518 * Return: 0 on success, -errno otherwise.
519 */
520int spi_nor_set_4byte_addr_mode_en4b_ex4b(struct spi_nor *nor, bool enable)
521{
522 int ret;
523
524 if (nor->spimem) {
525 struct spi_mem_op op = SPI_NOR_EN4B_EX4B_OP(enable);
526
527 spi_nor_spimem_setup_op(nor, op: &op, proto: nor->reg_proto);
528
529 ret = spi_mem_exec_op(mem: nor->spimem, op: &op);
530 } else {
531 ret = spi_nor_controller_ops_write_reg(nor,
532 opcode: enable ? SPINOR_OP_EN4B :
533 SPINOR_OP_EX4B,
534 NULL, len: 0);
535 }
536
537 if (ret)
538 dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
539
540 return ret;
541}
542
543/**
544 * spi_nor_set_4byte_addr_mode_wren_en4b_ex4b() - Set 4-byte address mode using
545 * SPINOR_OP_WREN followed by SPINOR_OP_EN4B or SPINOR_OP_EX4B. Typically used
546 * by ST and Micron flashes.
547 * @nor: pointer to 'struct spi_nor'.
548 * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
549 * address mode.
550 *
551 * Return: 0 on success, -errno otherwise.
552 */
553int spi_nor_set_4byte_addr_mode_wren_en4b_ex4b(struct spi_nor *nor, bool enable)
554{
555 int ret;
556
557 ret = spi_nor_write_enable(nor);
558 if (ret)
559 return ret;
560
561 ret = spi_nor_set_4byte_addr_mode_en4b_ex4b(nor, enable);
562 if (ret)
563 return ret;
564
565 return spi_nor_write_disable(nor);
566}
567
568/**
569 * spi_nor_set_4byte_addr_mode_brwr() - Set 4-byte address mode using
570 * SPINOR_OP_BRWR. Typically used by Spansion flashes.
571 * @nor: pointer to 'struct spi_nor'.
572 * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
573 * address mode.
574 *
575 * 8-bit volatile bank register used to define A[30:A24] bits. MSB (bit[7]) is
576 * used to enable/disable 4-byte address mode. When MSB is set to ‘1’, 4-byte
577 * address mode is active and A[30:24] bits are don’t care. Write instruction is
578 * SPINOR_OP_BRWR(17h) with 1 byte of data.
579 *
580 * Return: 0 on success, -errno otherwise.
581 */
582int spi_nor_set_4byte_addr_mode_brwr(struct spi_nor *nor, bool enable)
583{
584 int ret;
585
586 nor->bouncebuf[0] = enable << 7;
587
588 if (nor->spimem) {
589 struct spi_mem_op op = SPI_NOR_BRWR_OP(nor->bouncebuf);
590
591 spi_nor_spimem_setup_op(nor, op: &op, proto: nor->reg_proto);
592
593 ret = spi_mem_exec_op(mem: nor->spimem, op: &op);
594 } else {
595 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_BRWR,
596 buf: nor->bouncebuf, len: 1);
597 }
598
599 if (ret)
600 dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
601
602 return ret;
603}
604
605/**
606 * spi_nor_sr_ready() - Query the Status Register to see if the flash is ready
607 * for new commands.
608 * @nor: pointer to 'struct spi_nor'.
609 *
610 * Return: 1 if ready, 0 if not ready, -errno on errors.
611 */
612int spi_nor_sr_ready(struct spi_nor *nor)
613{
614 int ret;
615
616 ret = spi_nor_read_sr(nor, sr: nor->bouncebuf);
617 if (ret)
618 return ret;
619
620 return !(nor->bouncebuf[0] & SR_WIP);
621}
622
623/**
624 * spi_nor_use_parallel_locking() - Checks if RWW locking scheme shall be used
625 * @nor: pointer to 'struct spi_nor'.
626 *
627 * Return: true if parallel locking is enabled, false otherwise.
628 */
629static bool spi_nor_use_parallel_locking(struct spi_nor *nor)
630{
631 return nor->flags & SNOR_F_RWW;
632}
633
634/* Locking helpers for status read operations */
635static int spi_nor_rww_start_rdst(struct spi_nor *nor)
636{
637 struct spi_nor_rww *rww = &nor->rww;
638 int ret = -EAGAIN;
639
640 mutex_lock(&nor->lock);
641
642 if (rww->ongoing_io || rww->ongoing_rd)
643 goto busy;
644
645 rww->ongoing_io = true;
646 rww->ongoing_rd = true;
647 ret = 0;
648
649busy:
650 mutex_unlock(lock: &nor->lock);
651 return ret;
652}
653
654static void spi_nor_rww_end_rdst(struct spi_nor *nor)
655{
656 struct spi_nor_rww *rww = &nor->rww;
657
658 mutex_lock(&nor->lock);
659
660 rww->ongoing_io = false;
661 rww->ongoing_rd = false;
662
663 mutex_unlock(lock: &nor->lock);
664}
665
666static int spi_nor_lock_rdst(struct spi_nor *nor)
667{
668 if (spi_nor_use_parallel_locking(nor))
669 return spi_nor_rww_start_rdst(nor);
670
671 return 0;
672}
673
674static void spi_nor_unlock_rdst(struct spi_nor *nor)
675{
676 if (spi_nor_use_parallel_locking(nor)) {
677 spi_nor_rww_end_rdst(nor);
678 wake_up(&nor->rww.wait);
679 }
680}
681
682/**
683 * spi_nor_ready() - Query the flash to see if it is ready for new commands.
684 * @nor: pointer to 'struct spi_nor'.
685 *
686 * Return: 1 if ready, 0 if not ready, -errno on errors.
687 */
688static int spi_nor_ready(struct spi_nor *nor)
689{
690 int ret;
691
692 ret = spi_nor_lock_rdst(nor);
693 if (ret)
694 return 0;
695
696 /* Flashes might override the standard routine. */
697 if (nor->params->ready)
698 ret = nor->params->ready(nor);
699 else
700 ret = spi_nor_sr_ready(nor);
701
702 spi_nor_unlock_rdst(nor);
703
704 return ret;
705}
706
707/**
708 * spi_nor_wait_till_ready_with_timeout() - Service routine to read the
709 * Status Register until ready, or timeout occurs.
710 * @nor: pointer to "struct spi_nor".
711 * @timeout_jiffies: jiffies to wait until timeout.
712 *
713 * Return: 0 on success, -errno otherwise.
714 */
715static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
716 unsigned long timeout_jiffies)
717{
718 unsigned long deadline;
719 int timeout = 0, ret;
720
721 deadline = jiffies + timeout_jiffies;
722
723 while (!timeout) {
724 if (time_after_eq(jiffies, deadline))
725 timeout = 1;
726
727 ret = spi_nor_ready(nor);
728 if (ret < 0)
729 return ret;
730 if (ret)
731 return 0;
732
733 cond_resched();
734 }
735
736 dev_dbg(nor->dev, "flash operation timed out\n");
737
738 return -ETIMEDOUT;
739}
740
741/**
742 * spi_nor_wait_till_ready() - Wait for a predefined amount of time for the
743 * flash to be ready, or timeout occurs.
744 * @nor: pointer to "struct spi_nor".
745 *
746 * Return: 0 on success, -errno otherwise.
747 */
748int spi_nor_wait_till_ready(struct spi_nor *nor)
749{
750 return spi_nor_wait_till_ready_with_timeout(nor,
751 DEFAULT_READY_WAIT_JIFFIES);
752}
753
754/**
755 * spi_nor_global_block_unlock() - Unlock Global Block Protection.
756 * @nor: pointer to 'struct spi_nor'.
757 *
758 * Return: 0 on success, -errno otherwise.
759 */
760int spi_nor_global_block_unlock(struct spi_nor *nor)
761{
762 int ret;
763
764 ret = spi_nor_write_enable(nor);
765 if (ret)
766 return ret;
767
768 if (nor->spimem) {
769 struct spi_mem_op op = SPI_NOR_GBULK_OP;
770
771 spi_nor_spimem_setup_op(nor, op: &op, proto: nor->reg_proto);
772
773 ret = spi_mem_exec_op(mem: nor->spimem, op: &op);
774 } else {
775 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_GBULK,
776 NULL, len: 0);
777 }
778
779 if (ret) {
780 dev_dbg(nor->dev, "error %d on Global Block Unlock\n", ret);
781 return ret;
782 }
783
784 return spi_nor_wait_till_ready(nor);
785}
786
787/**
788 * spi_nor_write_sr() - Write the Status Register.
789 * @nor: pointer to 'struct spi_nor'.
790 * @sr: pointer to DMA-able buffer to write to the Status Register.
791 * @len: number of bytes to write to the Status Register.
792 *
793 * Return: 0 on success, -errno otherwise.
794 */
795int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len)
796{
797 int ret;
798
799 ret = spi_nor_write_enable(nor);
800 if (ret)
801 return ret;
802
803 if (nor->spimem) {
804 struct spi_mem_op op = SPI_NOR_WRSR_OP(sr, len);
805
806 spi_nor_spimem_setup_op(nor, op: &op, proto: nor->reg_proto);
807
808 ret = spi_mem_exec_op(mem: nor->spimem, op: &op);
809 } else {
810 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR, buf: sr,
811 len);
812 }
813
814 if (ret) {
815 dev_dbg(nor->dev, "error %d writing SR\n", ret);
816 return ret;
817 }
818
819 return spi_nor_wait_till_ready(nor);
820}
821
822/**
823 * spi_nor_write_sr1_and_check() - Write one byte to the Status Register 1 and
824 * ensure that the byte written match the received value.
825 * @nor: pointer to a 'struct spi_nor'.
826 * @sr1: byte value to be written to the Status Register.
827 *
828 * Return: 0 on success, -errno otherwise.
829 */
830static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1)
831{
832 int ret;
833
834 nor->bouncebuf[0] = sr1;
835
836 ret = spi_nor_write_sr(nor, sr: nor->bouncebuf, len: 1);
837 if (ret)
838 return ret;
839
840 ret = spi_nor_read_sr(nor, sr: nor->bouncebuf);
841 if (ret)
842 return ret;
843
844 if (nor->bouncebuf[0] != sr1) {
845 dev_dbg(nor->dev, "SR1: read back test failed\n");
846 return -EIO;
847 }
848
849 return 0;
850}
851
852/**
853 * spi_nor_write_16bit_sr_and_check() - Write the Status Register 1 and the
854 * Status Register 2 in one shot. Ensure that the byte written in the Status
855 * Register 1 match the received value, and that the 16-bit Write did not
856 * affect what was already in the Status Register 2.
857 * @nor: pointer to a 'struct spi_nor'.
858 * @sr1: byte value to be written to the Status Register 1.
859 *
860 * Return: 0 on success, -errno otherwise.
861 */
862static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1)
863{
864 int ret;
865 u8 *sr_cr = nor->bouncebuf;
866 u8 cr_written;
867
868 /* Make sure we don't overwrite the contents of Status Register 2. */
869 if (!(nor->flags & SNOR_F_NO_READ_CR)) {
870 ret = spi_nor_read_cr(nor, cr: &sr_cr[1]);
871 if (ret)
872 return ret;
873 } else if (spi_nor_get_protocol_width(proto: nor->read_proto) == 4 &&
874 spi_nor_get_protocol_width(proto: nor->write_proto) == 4 &&
875 nor->params->quad_enable) {
876 /*
877 * If the Status Register 2 Read command (35h) is not
878 * supported, we should at least be sure we don't
879 * change the value of the SR2 Quad Enable bit.
880 *
881 * When the Quad Enable method is set and the buswidth is 4, we
882 * can safely assume that the value of the QE bit is one, as a
883 * consequence of the nor->params->quad_enable() call.
884 *
885 * According to the JESD216 revB standard, BFPT DWORDS[15],
886 * bits 22:20, the 16-bit Write Status (01h) command is
887 * available just for the cases in which the QE bit is
888 * described in SR2 at BIT(1).
889 */
890 sr_cr[1] = SR2_QUAD_EN_BIT1;
891 } else {
892 sr_cr[1] = 0;
893 }
894
895 sr_cr[0] = sr1;
896
897 ret = spi_nor_write_sr(nor, sr: sr_cr, len: 2);
898 if (ret)
899 return ret;
900
901 ret = spi_nor_read_sr(nor, sr: sr_cr);
902 if (ret)
903 return ret;
904
905 if (sr1 != sr_cr[0]) {
906 dev_dbg(nor->dev, "SR: Read back test failed\n");
907 return -EIO;
908 }
909
910 if (nor->flags & SNOR_F_NO_READ_CR)
911 return 0;
912
913 cr_written = sr_cr[1];
914
915 ret = spi_nor_read_cr(nor, cr: &sr_cr[1]);
916 if (ret)
917 return ret;
918
919 if (cr_written != sr_cr[1]) {
920 dev_dbg(nor->dev, "CR: read back test failed\n");
921 return -EIO;
922 }
923
924 return 0;
925}
926
927/**
928 * spi_nor_write_16bit_cr_and_check() - Write the Status Register 1 and the
929 * Configuration Register in one shot. Ensure that the byte written in the
930 * Configuration Register match the received value, and that the 16-bit Write
931 * did not affect what was already in the Status Register 1.
932 * @nor: pointer to a 'struct spi_nor'.
933 * @cr: byte value to be written to the Configuration Register.
934 *
935 * Return: 0 on success, -errno otherwise.
936 */
937int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr)
938{
939 int ret;
940 u8 *sr_cr = nor->bouncebuf;
941 u8 sr_written;
942
943 /* Keep the current value of the Status Register 1. */
944 ret = spi_nor_read_sr(nor, sr: sr_cr);
945 if (ret)
946 return ret;
947
948 sr_cr[1] = cr;
949
950 ret = spi_nor_write_sr(nor, sr: sr_cr, len: 2);
951 if (ret)
952 return ret;
953
954 sr_written = sr_cr[0];
955
956 ret = spi_nor_read_sr(nor, sr: sr_cr);
957 if (ret)
958 return ret;
959
960 if (sr_written != sr_cr[0]) {
961 dev_dbg(nor->dev, "SR: Read back test failed\n");
962 return -EIO;
963 }
964
965 if (nor->flags & SNOR_F_NO_READ_CR)
966 return 0;
967
968 ret = spi_nor_read_cr(nor, cr: &sr_cr[1]);
969 if (ret)
970 return ret;
971
972 if (cr != sr_cr[1]) {
973 dev_dbg(nor->dev, "CR: read back test failed\n");
974 return -EIO;
975 }
976
977 return 0;
978}
979
980/**
981 * spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that
982 * the byte written match the received value without affecting other bits in the
983 * Status Register 1 and 2.
984 * @nor: pointer to a 'struct spi_nor'.
985 * @sr1: byte value to be written to the Status Register.
986 *
987 * Return: 0 on success, -errno otherwise.
988 */
989int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1)
990{
991 if (nor->flags & SNOR_F_HAS_16BIT_SR)
992 return spi_nor_write_16bit_sr_and_check(nor, sr1);
993
994 return spi_nor_write_sr1_and_check(nor, sr1);
995}
996
997/**
998 * spi_nor_write_sr2() - Write the Status Register 2 using the
999 * SPINOR_OP_WRSR2 (3eh) command.
1000 * @nor: pointer to 'struct spi_nor'.
1001 * @sr2: pointer to DMA-able buffer to write to the Status Register 2.
1002 *
1003 * Return: 0 on success, -errno otherwise.
1004 */
1005static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2)
1006{
1007 int ret;
1008
1009 ret = spi_nor_write_enable(nor);
1010 if (ret)
1011 return ret;
1012
1013 if (nor->spimem) {
1014 struct spi_mem_op op = SPI_NOR_WRSR2_OP(sr2);
1015
1016 spi_nor_spimem_setup_op(nor, op: &op, proto: nor->reg_proto);
1017
1018 ret = spi_mem_exec_op(mem: nor->spimem, op: &op);
1019 } else {
1020 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR2,
1021 buf: sr2, len: 1);
1022 }
1023
1024 if (ret) {
1025 dev_dbg(nor->dev, "error %d writing SR2\n", ret);
1026 return ret;
1027 }
1028
1029 return spi_nor_wait_till_ready(nor);
1030}
1031
1032/**
1033 * spi_nor_read_sr2() - Read the Status Register 2 using the
1034 * SPINOR_OP_RDSR2 (3fh) command.
1035 * @nor: pointer to 'struct spi_nor'.
1036 * @sr2: pointer to DMA-able buffer where the value of the
1037 * Status Register 2 will be written.
1038 *
1039 * Return: 0 on success, -errno otherwise.
1040 */
1041static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
1042{
1043 int ret;
1044
1045 if (nor->spimem) {
1046 struct spi_mem_op op = SPI_NOR_RDSR2_OP(sr2);
1047
1048 spi_nor_spimem_setup_op(nor, op: &op, proto: nor->reg_proto);
1049
1050 ret = spi_mem_exec_op(mem: nor->spimem, op: &op);
1051 } else {
1052 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR2, buf: sr2,
1053 len: 1);
1054 }
1055
1056 if (ret)
1057 dev_dbg(nor->dev, "error %d reading SR2\n", ret);
1058
1059 return ret;
1060}
1061
1062/**
1063 * spi_nor_erase_chip() - Erase the entire flash memory.
1064 * @nor: pointer to 'struct spi_nor'.
1065 *
1066 * Return: 0 on success, -errno otherwise.
1067 */
1068static int spi_nor_erase_chip(struct spi_nor *nor)
1069{
1070 int ret;
1071
1072 dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
1073
1074 if (nor->spimem) {
1075 struct spi_mem_op op = SPI_NOR_CHIP_ERASE_OP;
1076
1077 spi_nor_spimem_setup_op(nor, op: &op, proto: nor->reg_proto);
1078
1079 ret = spi_mem_exec_op(mem: nor->spimem, op: &op);
1080 } else {
1081 ret = spi_nor_controller_ops_write_reg(nor,
1082 SPINOR_OP_CHIP_ERASE,
1083 NULL, len: 0);
1084 }
1085
1086 if (ret)
1087 dev_dbg(nor->dev, "error %d erasing chip\n", ret);
1088
1089 return ret;
1090}
1091
1092static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
1093{
1094 size_t i;
1095
1096 for (i = 0; i < size; i++)
1097 if (table[i][0] == opcode)
1098 return table[i][1];
1099
1100 /* No conversion found, keep input op code. */
1101 return opcode;
1102}
1103
1104u8 spi_nor_convert_3to4_read(u8 opcode)
1105{
1106 static const u8 spi_nor_3to4_read[][2] = {
1107 { SPINOR_OP_READ, SPINOR_OP_READ_4B },
1108 { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
1109 { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
1110 { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
1111 { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
1112 { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
1113 { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
1114 { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
1115
1116 { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
1117 { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
1118 { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B },
1119 };
1120
1121 return spi_nor_convert_opcode(opcode, table: spi_nor_3to4_read,
1122 ARRAY_SIZE(spi_nor_3to4_read));
1123}
1124
1125static u8 spi_nor_convert_3to4_program(u8 opcode)
1126{
1127 static const u8 spi_nor_3to4_program[][2] = {
1128 { SPINOR_OP_PP, SPINOR_OP_PP_4B },
1129 { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
1130 { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
1131 { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B },
1132 { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B },
1133 };
1134
1135 return spi_nor_convert_opcode(opcode, table: spi_nor_3to4_program,
1136 ARRAY_SIZE(spi_nor_3to4_program));
1137}
1138
1139static u8 spi_nor_convert_3to4_erase(u8 opcode)
1140{
1141 static const u8 spi_nor_3to4_erase[][2] = {
1142 { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
1143 { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
1144 { SPINOR_OP_SE, SPINOR_OP_SE_4B },
1145 };
1146
1147 return spi_nor_convert_opcode(opcode, table: spi_nor_3to4_erase,
1148 ARRAY_SIZE(spi_nor_3to4_erase));
1149}
1150
1151static bool spi_nor_has_uniform_erase(const struct spi_nor *nor)
1152{
1153 return !!nor->params->erase_map.uniform_erase_type;
1154}
1155
1156static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
1157{
1158 nor->read_opcode = spi_nor_convert_3to4_read(opcode: nor->read_opcode);
1159 nor->program_opcode = spi_nor_convert_3to4_program(opcode: nor->program_opcode);
1160 nor->erase_opcode = spi_nor_convert_3to4_erase(opcode: nor->erase_opcode);
1161
1162 if (!spi_nor_has_uniform_erase(nor)) {
1163 struct spi_nor_erase_map *map = &nor->params->erase_map;
1164 struct spi_nor_erase_type *erase;
1165 int i;
1166
1167 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
1168 erase = &map->erase_type[i];
1169 erase->opcode =
1170 spi_nor_convert_3to4_erase(opcode: erase->opcode);
1171 }
1172 }
1173}
1174
1175static int spi_nor_prep(struct spi_nor *nor)
1176{
1177 int ret = 0;
1178
1179 if (nor->controller_ops && nor->controller_ops->prepare)
1180 ret = nor->controller_ops->prepare(nor);
1181
1182 return ret;
1183}
1184
1185static void spi_nor_unprep(struct spi_nor *nor)
1186{
1187 if (nor->controller_ops && nor->controller_ops->unprepare)
1188 nor->controller_ops->unprepare(nor);
1189}
1190
1191static void spi_nor_offset_to_banks(u64 bank_size, loff_t start, size_t len,
1192 u8 *first, u8 *last)
1193{
1194 /* This is currently safe, the number of banks being very small */
1195 *first = DIV_ROUND_DOWN_ULL(start, bank_size);
1196 *last = DIV_ROUND_DOWN_ULL(start + len - 1, bank_size);
1197}
1198
1199/* Generic helpers for internal locking and serialization */
1200static bool spi_nor_rww_start_io(struct spi_nor *nor)
1201{
1202 struct spi_nor_rww *rww = &nor->rww;
1203 bool start = false;
1204
1205 mutex_lock(&nor->lock);
1206
1207 if (rww->ongoing_io)
1208 goto busy;
1209
1210 rww->ongoing_io = true;
1211 start = true;
1212
1213busy:
1214 mutex_unlock(lock: &nor->lock);
1215 return start;
1216}
1217
1218static void spi_nor_rww_end_io(struct spi_nor *nor)
1219{
1220 mutex_lock(&nor->lock);
1221 nor->rww.ongoing_io = false;
1222 mutex_unlock(lock: &nor->lock);
1223}
1224
1225static int spi_nor_lock_device(struct spi_nor *nor)
1226{
1227 if (!spi_nor_use_parallel_locking(nor))
1228 return 0;
1229
1230 return wait_event_killable(nor->rww.wait, spi_nor_rww_start_io(nor));
1231}
1232
1233static void spi_nor_unlock_device(struct spi_nor *nor)
1234{
1235 if (spi_nor_use_parallel_locking(nor)) {
1236 spi_nor_rww_end_io(nor);
1237 wake_up(&nor->rww.wait);
1238 }
1239}
1240
1241/* Generic helpers for internal locking and serialization */
1242static bool spi_nor_rww_start_exclusive(struct spi_nor *nor)
1243{
1244 struct spi_nor_rww *rww = &nor->rww;
1245 bool start = false;
1246
1247 mutex_lock(&nor->lock);
1248
1249 if (rww->ongoing_io || rww->ongoing_rd || rww->ongoing_pe)
1250 goto busy;
1251
1252 rww->ongoing_io = true;
1253 rww->ongoing_rd = true;
1254 rww->ongoing_pe = true;
1255 start = true;
1256
1257busy:
1258 mutex_unlock(lock: &nor->lock);
1259 return start;
1260}
1261
1262static void spi_nor_rww_end_exclusive(struct spi_nor *nor)
1263{
1264 struct spi_nor_rww *rww = &nor->rww;
1265
1266 mutex_lock(&nor->lock);
1267 rww->ongoing_io = false;
1268 rww->ongoing_rd = false;
1269 rww->ongoing_pe = false;
1270 mutex_unlock(lock: &nor->lock);
1271}
1272
1273int spi_nor_prep_and_lock(struct spi_nor *nor)
1274{
1275 int ret;
1276
1277 ret = spi_nor_prep(nor);
1278 if (ret)
1279 return ret;
1280
1281 if (!spi_nor_use_parallel_locking(nor))
1282 mutex_lock(&nor->lock);
1283 else
1284 ret = wait_event_killable(nor->rww.wait,
1285 spi_nor_rww_start_exclusive(nor));
1286
1287 return ret;
1288}
1289
1290void spi_nor_unlock_and_unprep(struct spi_nor *nor)
1291{
1292 if (!spi_nor_use_parallel_locking(nor)) {
1293 mutex_unlock(lock: &nor->lock);
1294 } else {
1295 spi_nor_rww_end_exclusive(nor);
1296 wake_up(&nor->rww.wait);
1297 }
1298
1299 spi_nor_unprep(nor);
1300}
1301
1302/* Internal locking helpers for program and erase operations */
1303static bool spi_nor_rww_start_pe(struct spi_nor *nor, loff_t start, size_t len)
1304{
1305 struct spi_nor_rww *rww = &nor->rww;
1306 unsigned int used_banks = 0;
1307 bool started = false;
1308 u8 first, last;
1309 int bank;
1310
1311 mutex_lock(&nor->lock);
1312
1313 if (rww->ongoing_io || rww->ongoing_rd || rww->ongoing_pe)
1314 goto busy;
1315
1316 spi_nor_offset_to_banks(bank_size: nor->params->bank_size, start, len, first: &first, last: &last);
1317 for (bank = first; bank <= last; bank++) {
1318 if (rww->used_banks & BIT(bank))
1319 goto busy;
1320
1321 used_banks |= BIT(bank);
1322 }
1323
1324 rww->used_banks |= used_banks;
1325 rww->ongoing_pe = true;
1326 started = true;
1327
1328busy:
1329 mutex_unlock(lock: &nor->lock);
1330 return started;
1331}
1332
1333static void spi_nor_rww_end_pe(struct spi_nor *nor, loff_t start, size_t len)
1334{
1335 struct spi_nor_rww *rww = &nor->rww;
1336 u8 first, last;
1337 int bank;
1338
1339 mutex_lock(&nor->lock);
1340
1341 spi_nor_offset_to_banks(bank_size: nor->params->bank_size, start, len, first: &first, last: &last);
1342 for (bank = first; bank <= last; bank++)
1343 rww->used_banks &= ~BIT(bank);
1344
1345 rww->ongoing_pe = false;
1346
1347 mutex_unlock(lock: &nor->lock);
1348}
1349
1350static int spi_nor_prep_and_lock_pe(struct spi_nor *nor, loff_t start, size_t len)
1351{
1352 int ret;
1353
1354 ret = spi_nor_prep(nor);
1355 if (ret)
1356 return ret;
1357
1358 if (!spi_nor_use_parallel_locking(nor))
1359 mutex_lock(&nor->lock);
1360 else
1361 ret = wait_event_killable(nor->rww.wait,
1362 spi_nor_rww_start_pe(nor, start, len));
1363
1364 return ret;
1365}
1366
1367static void spi_nor_unlock_and_unprep_pe(struct spi_nor *nor, loff_t start, size_t len)
1368{
1369 if (!spi_nor_use_parallel_locking(nor)) {
1370 mutex_unlock(lock: &nor->lock);
1371 } else {
1372 spi_nor_rww_end_pe(nor, start, len);
1373 wake_up(&nor->rww.wait);
1374 }
1375
1376 spi_nor_unprep(nor);
1377}
1378
1379/* Internal locking helpers for read operations */
1380static bool spi_nor_rww_start_rd(struct spi_nor *nor, loff_t start, size_t len)
1381{
1382 struct spi_nor_rww *rww = &nor->rww;
1383 unsigned int used_banks = 0;
1384 bool started = false;
1385 u8 first, last;
1386 int bank;
1387
1388 mutex_lock(&nor->lock);
1389
1390 if (rww->ongoing_io || rww->ongoing_rd)
1391 goto busy;
1392
1393 spi_nor_offset_to_banks(bank_size: nor->params->bank_size, start, len, first: &first, last: &last);
1394 for (bank = first; bank <= last; bank++) {
1395 if (rww->used_banks & BIT(bank))
1396 goto busy;
1397
1398 used_banks |= BIT(bank);
1399 }
1400
1401 rww->used_banks |= used_banks;
1402 rww->ongoing_io = true;
1403 rww->ongoing_rd = true;
1404 started = true;
1405
1406busy:
1407 mutex_unlock(lock: &nor->lock);
1408 return started;
1409}
1410
1411static void spi_nor_rww_end_rd(struct spi_nor *nor, loff_t start, size_t len)
1412{
1413 struct spi_nor_rww *rww = &nor->rww;
1414 u8 first, last;
1415 int bank;
1416
1417 mutex_lock(&nor->lock);
1418
1419 spi_nor_offset_to_banks(bank_size: nor->params->bank_size, start, len, first: &first, last: &last);
1420 for (bank = first; bank <= last; bank++)
1421 nor->rww.used_banks &= ~BIT(bank);
1422
1423 rww->ongoing_io = false;
1424 rww->ongoing_rd = false;
1425
1426 mutex_unlock(lock: &nor->lock);
1427}
1428
1429static int spi_nor_prep_and_lock_rd(struct spi_nor *nor, loff_t start, size_t len)
1430{
1431 int ret;
1432
1433 ret = spi_nor_prep(nor);
1434 if (ret)
1435 return ret;
1436
1437 if (!spi_nor_use_parallel_locking(nor))
1438 mutex_lock(&nor->lock);
1439 else
1440 ret = wait_event_killable(nor->rww.wait,
1441 spi_nor_rww_start_rd(nor, start, len));
1442
1443 return ret;
1444}
1445
1446static void spi_nor_unlock_and_unprep_rd(struct spi_nor *nor, loff_t start, size_t len)
1447{
1448 if (!spi_nor_use_parallel_locking(nor)) {
1449 mutex_unlock(lock: &nor->lock);
1450 } else {
1451 spi_nor_rww_end_rd(nor, start, len);
1452 wake_up(&nor->rww.wait);
1453 }
1454
1455 spi_nor_unprep(nor);
1456}
1457
1458static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr)
1459{
1460 if (!nor->params->convert_addr)
1461 return addr;
1462
1463 return nor->params->convert_addr(nor, addr);
1464}
1465
1466/*
1467 * Initiate the erasure of a single sector
1468 */
1469int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
1470{
1471 int i;
1472
1473 addr = spi_nor_convert_addr(nor, addr);
1474
1475 if (nor->spimem) {
1476 struct spi_mem_op op =
1477 SPI_NOR_SECTOR_ERASE_OP(nor->erase_opcode,
1478 nor->addr_nbytes, addr);
1479
1480 spi_nor_spimem_setup_op(nor, op: &op, proto: nor->reg_proto);
1481
1482 return spi_mem_exec_op(mem: nor->spimem, op: &op);
1483 } else if (nor->controller_ops->erase) {
1484 return spi_nor_controller_ops_erase(nor, offs: addr);
1485 }
1486
1487 /*
1488 * Default implementation, if driver doesn't have a specialized HW
1489 * control
1490 */
1491 for (i = nor->addr_nbytes - 1; i >= 0; i--) {
1492 nor->bouncebuf[i] = addr & 0xff;
1493 addr >>= 8;
1494 }
1495
1496 return spi_nor_controller_ops_write_reg(nor, opcode: nor->erase_opcode,
1497 buf: nor->bouncebuf, len: nor->addr_nbytes);
1498}
1499
1500/**
1501 * spi_nor_div_by_erase_size() - calculate remainder and update new dividend
1502 * @erase: pointer to a structure that describes a SPI NOR erase type
1503 * @dividend: dividend value
1504 * @remainder: pointer to u32 remainder (will be updated)
1505 *
1506 * Return: the result of the division
1507 */
1508static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase,
1509 u64 dividend, u32 *remainder)
1510{
1511 /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
1512 *remainder = (u32)dividend & erase->size_mask;
1513 return dividend >> erase->size_shift;
1514}
1515
1516/**
1517 * spi_nor_find_best_erase_type() - find the best erase type for the given
1518 * offset in the serial flash memory and the
1519 * number of bytes to erase. The region in
1520 * which the address fits is expected to be
1521 * provided.
1522 * @map: the erase map of the SPI NOR
1523 * @region: pointer to a structure that describes a SPI NOR erase region
1524 * @addr: offset in the serial flash memory
1525 * @len: number of bytes to erase
1526 *
1527 * Return: a pointer to the best fitted erase type, NULL otherwise.
1528 */
1529static const struct spi_nor_erase_type *
1530spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
1531 const struct spi_nor_erase_region *region,
1532 u64 addr, u32 len)
1533{
1534 const struct spi_nor_erase_type *erase;
1535 u32 rem;
1536 int i;
1537 u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
1538
1539 /*
1540 * Erase types are ordered by size, with the smallest erase type at
1541 * index 0.
1542 */
1543 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
1544 /* Does the erase region support the tested erase type? */
1545 if (!(erase_mask & BIT(i)))
1546 continue;
1547
1548 erase = &map->erase_type[i];
1549 if (!erase->size)
1550 continue;
1551
1552 /* Alignment is not mandatory for overlaid regions */
1553 if (region->offset & SNOR_OVERLAID_REGION &&
1554 region->size <= len)
1555 return erase;
1556
1557 /* Don't erase more than what the user has asked for. */
1558 if (erase->size > len)
1559 continue;
1560
1561 spi_nor_div_by_erase_size(erase, dividend: addr, remainder: &rem);
1562 if (!rem)
1563 return erase;
1564 }
1565
1566 return NULL;
1567}
1568
1569static u64 spi_nor_region_is_last(const struct spi_nor_erase_region *region)
1570{
1571 return region->offset & SNOR_LAST_REGION;
1572}
1573
1574static u64 spi_nor_region_end(const struct spi_nor_erase_region *region)
1575{
1576 return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size;
1577}
1578
1579/**
1580 * spi_nor_region_next() - get the next spi nor region
1581 * @region: pointer to a structure that describes a SPI NOR erase region
1582 *
1583 * Return: the next spi nor region or NULL if last region.
1584 */
1585struct spi_nor_erase_region *
1586spi_nor_region_next(struct spi_nor_erase_region *region)
1587{
1588 if (spi_nor_region_is_last(region))
1589 return NULL;
1590 region++;
1591 return region;
1592}
1593
1594/**
1595 * spi_nor_find_erase_region() - find the region of the serial flash memory in
1596 * which the offset fits
1597 * @map: the erase map of the SPI NOR
1598 * @addr: offset in the serial flash memory
1599 *
1600 * Return: a pointer to the spi_nor_erase_region struct, ERR_PTR(-errno)
1601 * otherwise.
1602 */
1603static struct spi_nor_erase_region *
1604spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr)
1605{
1606 struct spi_nor_erase_region *region = map->regions;
1607 u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
1608 u64 region_end = region_start + region->size;
1609
1610 while (addr < region_start || addr >= region_end) {
1611 region = spi_nor_region_next(region);
1612 if (!region)
1613 return ERR_PTR(error: -EINVAL);
1614
1615 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
1616 region_end = region_start + region->size;
1617 }
1618
1619 return region;
1620}
1621
1622/**
1623 * spi_nor_init_erase_cmd() - initialize an erase command
1624 * @region: pointer to a structure that describes a SPI NOR erase region
1625 * @erase: pointer to a structure that describes a SPI NOR erase type
1626 *
1627 * Return: the pointer to the allocated erase command, ERR_PTR(-errno)
1628 * otherwise.
1629 */
1630static struct spi_nor_erase_command *
1631spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
1632 const struct spi_nor_erase_type *erase)
1633{
1634 struct spi_nor_erase_command *cmd;
1635
1636 cmd = kmalloc(size: sizeof(*cmd), GFP_KERNEL);
1637 if (!cmd)
1638 return ERR_PTR(error: -ENOMEM);
1639
1640 INIT_LIST_HEAD(list: &cmd->list);
1641 cmd->opcode = erase->opcode;
1642 cmd->count = 1;
1643
1644 if (region->offset & SNOR_OVERLAID_REGION)
1645 cmd->size = region->size;
1646 else
1647 cmd->size = erase->size;
1648
1649 return cmd;
1650}
1651
1652/**
1653 * spi_nor_destroy_erase_cmd_list() - destroy erase command list
1654 * @erase_list: list of erase commands
1655 */
1656static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list)
1657{
1658 struct spi_nor_erase_command *cmd, *next;
1659
1660 list_for_each_entry_safe(cmd, next, erase_list, list) {
1661 list_del(entry: &cmd->list);
1662 kfree(objp: cmd);
1663 }
1664}
1665
1666/**
1667 * spi_nor_init_erase_cmd_list() - initialize erase command list
1668 * @nor: pointer to a 'struct spi_nor'
1669 * @erase_list: list of erase commands to be executed once we validate that the
1670 * erase can be performed
1671 * @addr: offset in the serial flash memory
1672 * @len: number of bytes to erase
1673 *
1674 * Builds the list of best fitted erase commands and verifies if the erase can
1675 * be performed.
1676 *
1677 * Return: 0 on success, -errno otherwise.
1678 */
1679static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
1680 struct list_head *erase_list,
1681 u64 addr, u32 len)
1682{
1683 const struct spi_nor_erase_map *map = &nor->params->erase_map;
1684 const struct spi_nor_erase_type *erase, *prev_erase = NULL;
1685 struct spi_nor_erase_region *region;
1686 struct spi_nor_erase_command *cmd = NULL;
1687 u64 region_end;
1688 int ret = -EINVAL;
1689
1690 region = spi_nor_find_erase_region(map, addr);
1691 if (IS_ERR(ptr: region))
1692 return PTR_ERR(ptr: region);
1693
1694 region_end = spi_nor_region_end(region);
1695
1696 while (len) {
1697 erase = spi_nor_find_best_erase_type(map, region, addr, len);
1698 if (!erase)
1699 goto destroy_erase_cmd_list;
1700
1701 if (prev_erase != erase ||
1702 erase->size != cmd->size ||
1703 region->offset & SNOR_OVERLAID_REGION) {
1704 cmd = spi_nor_init_erase_cmd(region, erase);
1705 if (IS_ERR(ptr: cmd)) {
1706 ret = PTR_ERR(ptr: cmd);
1707 goto destroy_erase_cmd_list;
1708 }
1709
1710 list_add_tail(new: &cmd->list, head: erase_list);
1711 } else {
1712 cmd->count++;
1713 }
1714
1715 addr += cmd->size;
1716 len -= cmd->size;
1717
1718 if (len && addr >= region_end) {
1719 region = spi_nor_region_next(region);
1720 if (!region)
1721 goto destroy_erase_cmd_list;
1722 region_end = spi_nor_region_end(region);
1723 }
1724
1725 prev_erase = erase;
1726 }
1727
1728 return 0;
1729
1730destroy_erase_cmd_list:
1731 spi_nor_destroy_erase_cmd_list(erase_list);
1732 return ret;
1733}
1734
1735/**
1736 * spi_nor_erase_multi_sectors() - perform a non-uniform erase
1737 * @nor: pointer to a 'struct spi_nor'
1738 * @addr: offset in the serial flash memory
1739 * @len: number of bytes to erase
1740 *
1741 * Build a list of best fitted erase commands and execute it once we validate
1742 * that the erase can be performed.
1743 *
1744 * Return: 0 on success, -errno otherwise.
1745 */
1746static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
1747{
1748 LIST_HEAD(erase_list);
1749 struct spi_nor_erase_command *cmd, *next;
1750 int ret;
1751
1752 ret = spi_nor_init_erase_cmd_list(nor, erase_list: &erase_list, addr, len);
1753 if (ret)
1754 return ret;
1755
1756 list_for_each_entry_safe(cmd, next, &erase_list, list) {
1757 nor->erase_opcode = cmd->opcode;
1758 while (cmd->count) {
1759 dev_vdbg(nor->dev, "erase_cmd->size = 0x%08x, erase_cmd->opcode = 0x%02x, erase_cmd->count = %u\n",
1760 cmd->size, cmd->opcode, cmd->count);
1761
1762 ret = spi_nor_lock_device(nor);
1763 if (ret)
1764 goto destroy_erase_cmd_list;
1765
1766 ret = spi_nor_write_enable(nor);
1767 if (ret) {
1768 spi_nor_unlock_device(nor);
1769 goto destroy_erase_cmd_list;
1770 }
1771
1772 ret = spi_nor_erase_sector(nor, addr);
1773 spi_nor_unlock_device(nor);
1774 if (ret)
1775 goto destroy_erase_cmd_list;
1776
1777 ret = spi_nor_wait_till_ready(nor);
1778 if (ret)
1779 goto destroy_erase_cmd_list;
1780
1781 addr += cmd->size;
1782 cmd->count--;
1783 }
1784 list_del(entry: &cmd->list);
1785 kfree(objp: cmd);
1786 }
1787
1788 return 0;
1789
1790destroy_erase_cmd_list:
1791 spi_nor_destroy_erase_cmd_list(erase_list: &erase_list);
1792 return ret;
1793}
1794
1795/*
1796 * Erase an address range on the nor chip. The address range may extend
1797 * one or more erase sectors. Return an error if there is a problem erasing.
1798 */
1799static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
1800{
1801 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1802 u32 addr, len;
1803 uint32_t rem;
1804 int ret;
1805
1806 dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
1807 (long long)instr->len);
1808
1809 if (spi_nor_has_uniform_erase(nor)) {
1810 div_u64_rem(dividend: instr->len, divisor: mtd->erasesize, remainder: &rem);
1811 if (rem)
1812 return -EINVAL;
1813 }
1814
1815 addr = instr->addr;
1816 len = instr->len;
1817
1818 ret = spi_nor_prep_and_lock_pe(nor, start: instr->addr, len: instr->len);
1819 if (ret)
1820 return ret;
1821
1822 /* whole-chip erase? */
1823 if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
1824 unsigned long timeout;
1825
1826 ret = spi_nor_lock_device(nor);
1827 if (ret)
1828 goto erase_err;
1829
1830 ret = spi_nor_write_enable(nor);
1831 if (ret) {
1832 spi_nor_unlock_device(nor);
1833 goto erase_err;
1834 }
1835
1836 ret = spi_nor_erase_chip(nor);
1837 spi_nor_unlock_device(nor);
1838 if (ret)
1839 goto erase_err;
1840
1841 /*
1842 * Scale the timeout linearly with the size of the flash, with
1843 * a minimum calibrated to an old 2MB flash. We could try to
1844 * pull these from CFI/SFDP, but these values should be good
1845 * enough for now.
1846 */
1847 timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
1848 CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
1849 (unsigned long)(mtd->size / SZ_2M));
1850 ret = spi_nor_wait_till_ready_with_timeout(nor, timeout_jiffies: timeout);
1851 if (ret)
1852 goto erase_err;
1853
1854 /* REVISIT in some cases we could speed up erasing large regions
1855 * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up
1856 * to use "small sector erase", but that's not always optimal.
1857 */
1858
1859 /* "sector"-at-a-time erase */
1860 } else if (spi_nor_has_uniform_erase(nor)) {
1861 while (len) {
1862 ret = spi_nor_lock_device(nor);
1863 if (ret)
1864 goto erase_err;
1865
1866 ret = spi_nor_write_enable(nor);
1867 if (ret) {
1868 spi_nor_unlock_device(nor);
1869 goto erase_err;
1870 }
1871
1872 ret = spi_nor_erase_sector(nor, addr);
1873 spi_nor_unlock_device(nor);
1874 if (ret)
1875 goto erase_err;
1876
1877 ret = spi_nor_wait_till_ready(nor);
1878 if (ret)
1879 goto erase_err;
1880
1881 addr += mtd->erasesize;
1882 len -= mtd->erasesize;
1883 }
1884
1885 /* erase multiple sectors */
1886 } else {
1887 ret = spi_nor_erase_multi_sectors(nor, addr, len);
1888 if (ret)
1889 goto erase_err;
1890 }
1891
1892 ret = spi_nor_write_disable(nor);
1893
1894erase_err:
1895 spi_nor_unlock_and_unprep_pe(nor, start: instr->addr, len: instr->len);
1896
1897 return ret;
1898}
1899
1900/**
1901 * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status
1902 * Register 1.
1903 * @nor: pointer to a 'struct spi_nor'
1904 *
1905 * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories.
1906 *
1907 * Return: 0 on success, -errno otherwise.
1908 */
1909int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor)
1910{
1911 int ret;
1912
1913 ret = spi_nor_read_sr(nor, sr: nor->bouncebuf);
1914 if (ret)
1915 return ret;
1916
1917 if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)
1918 return 0;
1919
1920 nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
1921
1922 return spi_nor_write_sr1_and_check(nor, sr1: nor->bouncebuf[0]);
1923}
1924
1925/**
1926 * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status
1927 * Register 2.
1928 * @nor: pointer to a 'struct spi_nor'.
1929 *
1930 * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories.
1931 *
1932 * Return: 0 on success, -errno otherwise.
1933 */
1934int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
1935{
1936 int ret;
1937
1938 if (nor->flags & SNOR_F_NO_READ_CR)
1939 return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1);
1940
1941 ret = spi_nor_read_cr(nor, cr: nor->bouncebuf);
1942 if (ret)
1943 return ret;
1944
1945 if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
1946 return 0;
1947
1948 nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
1949
1950 return spi_nor_write_16bit_cr_and_check(nor, cr: nor->bouncebuf[0]);
1951}
1952
1953/**
1954 * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2.
1955 * @nor: pointer to a 'struct spi_nor'
1956 *
1957 * Set the Quad Enable (QE) bit in the Status Register 2.
1958 *
1959 * This is one of the procedures to set the QE bit described in the SFDP
1960 * (JESD216 rev B) specification but no manufacturer using this procedure has
1961 * been identified yet, hence the name of the function.
1962 *
1963 * Return: 0 on success, -errno otherwise.
1964 */
1965int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
1966{
1967 u8 *sr2 = nor->bouncebuf;
1968 int ret;
1969 u8 sr2_written;
1970
1971 /* Check current Quad Enable bit value. */
1972 ret = spi_nor_read_sr2(nor, sr2);
1973 if (ret)
1974 return ret;
1975 if (*sr2 & SR2_QUAD_EN_BIT7)
1976 return 0;
1977
1978 /* Update the Quad Enable bit. */
1979 *sr2 |= SR2_QUAD_EN_BIT7;
1980
1981 ret = spi_nor_write_sr2(nor, sr2);
1982 if (ret)
1983 return ret;
1984
1985 sr2_written = *sr2;
1986
1987 /* Read back and check it. */
1988 ret = spi_nor_read_sr2(nor, sr2);
1989 if (ret)
1990 return ret;
1991
1992 if (*sr2 != sr2_written) {
1993 dev_dbg(nor->dev, "SR2: Read back test failed\n");
1994 return -EIO;
1995 }
1996
1997 return 0;
1998}
1999
2000static const struct spi_nor_manufacturer *manufacturers[] = {
2001 &spi_nor_atmel,
2002 &spi_nor_eon,
2003 &spi_nor_esmt,
2004 &spi_nor_everspin,
2005 &spi_nor_gigadevice,
2006 &spi_nor_intel,
2007 &spi_nor_issi,
2008 &spi_nor_macronix,
2009 &spi_nor_micron,
2010 &spi_nor_st,
2011 &spi_nor_spansion,
2012 &spi_nor_sst,
2013 &spi_nor_winbond,
2014 &spi_nor_xilinx,
2015 &spi_nor_xmc,
2016};
2017
2018static const struct flash_info spi_nor_generic_flash = {
2019 .name = "spi-nor-generic",
2020};
2021
2022static const struct flash_info *spi_nor_match_id(struct spi_nor *nor,
2023 const u8 *id)
2024{
2025 const struct flash_info *part;
2026 unsigned int i, j;
2027
2028 for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
2029 for (j = 0; j < manufacturers[i]->nparts; j++) {
2030 part = &manufacturers[i]->parts[j];
2031 if (part->id &&
2032 !memcmp(p: part->id->bytes, q: id, size: part->id->len)) {
2033 nor->manufacturer = manufacturers[i];
2034 return part;
2035 }
2036 }
2037 }
2038
2039 return NULL;
2040}
2041
2042static const struct flash_info *spi_nor_detect(struct spi_nor *nor)
2043{
2044 const struct flash_info *info;
2045 u8 *id = nor->bouncebuf;
2046 int ret;
2047
2048 ret = spi_nor_read_id(nor, naddr: 0, ndummy: 0, id, proto: nor->reg_proto);
2049 if (ret) {
2050 dev_dbg(nor->dev, "error %d reading JEDEC ID\n", ret);
2051 return ERR_PTR(error: ret);
2052 }
2053
2054 /* Cache the complete flash ID. */
2055 nor->id = devm_kmemdup(dev: nor->dev, src: id, SPI_NOR_MAX_ID_LEN, GFP_KERNEL);
2056 if (!nor->id)
2057 return ERR_PTR(error: -ENOMEM);
2058
2059 info = spi_nor_match_id(nor, id);
2060
2061 /* Fallback to a generic flash described only by its SFDP data. */
2062 if (!info) {
2063 ret = spi_nor_check_sfdp_signature(nor);
2064 if (!ret)
2065 info = &spi_nor_generic_flash;
2066 }
2067
2068 if (!info) {
2069 dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n",
2070 SPI_NOR_MAX_ID_LEN, id);
2071 return ERR_PTR(error: -ENODEV);
2072 }
2073 return info;
2074}
2075
2076static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
2077 size_t *retlen, u_char *buf)
2078{
2079 struct spi_nor *nor = mtd_to_spi_nor(mtd);
2080 loff_t from_lock = from;
2081 size_t len_lock = len;
2082 ssize_t ret;
2083
2084 dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
2085
2086 ret = spi_nor_prep_and_lock_rd(nor, start: from_lock, len: len_lock);
2087 if (ret)
2088 return ret;
2089
2090 while (len) {
2091 loff_t addr = from;
2092
2093 addr = spi_nor_convert_addr(nor, addr);
2094
2095 ret = spi_nor_read_data(nor, from: addr, len, buf);
2096 if (ret == 0) {
2097 /* We shouldn't see 0-length reads */
2098 ret = -EIO;
2099 goto read_err;
2100 }
2101 if (ret < 0)
2102 goto read_err;
2103
2104 WARN_ON(ret > len);
2105 *retlen += ret;
2106 buf += ret;
2107 from += ret;
2108 len -= ret;
2109 }
2110 ret = 0;
2111
2112read_err:
2113 spi_nor_unlock_and_unprep_rd(nor, start: from_lock, len: len_lock);
2114
2115 return ret;
2116}
2117
2118/*
2119 * Write an address range to the nor chip. Data must be written in
2120 * FLASH_PAGESIZE chunks. The address range may be any size provided
2121 * it is within the physical boundaries.
2122 */
2123static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
2124 size_t *retlen, const u_char *buf)
2125{
2126 struct spi_nor *nor = mtd_to_spi_nor(mtd);
2127 size_t page_offset, page_remain, i;
2128 ssize_t ret;
2129 u32 page_size = nor->params->page_size;
2130
2131 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
2132
2133 ret = spi_nor_prep_and_lock_pe(nor, start: to, len);
2134 if (ret)
2135 return ret;
2136
2137 for (i = 0; i < len; ) {
2138 ssize_t written;
2139 loff_t addr = to + i;
2140
2141 /*
2142 * If page_size is a power of two, the offset can be quickly
2143 * calculated with an AND operation. On the other cases we
2144 * need to do a modulus operation (more expensive).
2145 */
2146 if (is_power_of_2(n: page_size)) {
2147 page_offset = addr & (page_size - 1);
2148 } else {
2149 uint64_t aux = addr;
2150
2151 page_offset = do_div(aux, page_size);
2152 }
2153 /* the size of data remaining on the first page */
2154 page_remain = min_t(size_t, page_size - page_offset, len - i);
2155
2156 addr = spi_nor_convert_addr(nor, addr);
2157
2158 ret = spi_nor_lock_device(nor);
2159 if (ret)
2160 goto write_err;
2161
2162 ret = spi_nor_write_enable(nor);
2163 if (ret) {
2164 spi_nor_unlock_device(nor);
2165 goto write_err;
2166 }
2167
2168 ret = spi_nor_write_data(nor, to: addr, len: page_remain, buf: buf + i);
2169 spi_nor_unlock_device(nor);
2170 if (ret < 0)
2171 goto write_err;
2172 written = ret;
2173
2174 ret = spi_nor_wait_till_ready(nor);
2175 if (ret)
2176 goto write_err;
2177 *retlen += written;
2178 i += written;
2179 }
2180
2181write_err:
2182 spi_nor_unlock_and_unprep_pe(nor, start: to, len);
2183
2184 return ret;
2185}
2186
2187static int spi_nor_check(struct spi_nor *nor)
2188{
2189 if (!nor->dev ||
2190 (!nor->spimem && !nor->controller_ops) ||
2191 (!nor->spimem && nor->controller_ops &&
2192 (!nor->controller_ops->read ||
2193 !nor->controller_ops->write ||
2194 !nor->controller_ops->read_reg ||
2195 !nor->controller_ops->write_reg))) {
2196 pr_err("spi-nor: please fill all the necessary fields!\n");
2197 return -EINVAL;
2198 }
2199
2200 if (nor->spimem && nor->controller_ops) {
2201 dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n");
2202 return -EINVAL;
2203 }
2204
2205 return 0;
2206}
2207
2208void
2209spi_nor_set_read_settings(struct spi_nor_read_command *read,
2210 u8 num_mode_clocks,
2211 u8 num_wait_states,
2212 u8 opcode,
2213 enum spi_nor_protocol proto)
2214{
2215 read->num_mode_clocks = num_mode_clocks;
2216 read->num_wait_states = num_wait_states;
2217 read->opcode = opcode;
2218 read->proto = proto;
2219}
2220
2221void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode,
2222 enum spi_nor_protocol proto)
2223{
2224 pp->opcode = opcode;
2225 pp->proto = proto;
2226}
2227
2228static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
2229{
2230 size_t i;
2231
2232 for (i = 0; i < size; i++)
2233 if (table[i][0] == (int)hwcaps)
2234 return table[i][1];
2235
2236 return -EINVAL;
2237}
2238
2239int spi_nor_hwcaps_read2cmd(u32 hwcaps)
2240{
2241 static const int hwcaps_read2cmd[][2] = {
2242 { SNOR_HWCAPS_READ, SNOR_CMD_READ },
2243 { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST },
2244 { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR },
2245 { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 },
2246 { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 },
2247 { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 },
2248 { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR },
2249 { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 },
2250 { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 },
2251 { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 },
2252 { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR },
2253 { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 },
2254 { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 },
2255 { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 },
2256 { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR },
2257 { SNOR_HWCAPS_READ_8_8_8_DTR, SNOR_CMD_READ_8_8_8_DTR },
2258 };
2259
2260 return spi_nor_hwcaps2cmd(hwcaps, table: hwcaps_read2cmd,
2261 ARRAY_SIZE(hwcaps_read2cmd));
2262}
2263
2264int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
2265{
2266 static const int hwcaps_pp2cmd[][2] = {
2267 { SNOR_HWCAPS_PP, SNOR_CMD_PP },
2268 { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 },
2269 { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 },
2270 { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 },
2271 { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 },
2272 { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 },
2273 { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 },
2274 { SNOR_HWCAPS_PP_8_8_8_DTR, SNOR_CMD_PP_8_8_8_DTR },
2275 };
2276
2277 return spi_nor_hwcaps2cmd(hwcaps, table: hwcaps_pp2cmd,
2278 ARRAY_SIZE(hwcaps_pp2cmd));
2279}
2280
2281/**
2282 * spi_nor_spimem_check_op - check if the operation is supported
2283 * by controller
2284 *@nor: pointer to a 'struct spi_nor'
2285 *@op: pointer to op template to be checked
2286 *
2287 * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
2288 */
2289static int spi_nor_spimem_check_op(struct spi_nor *nor,
2290 struct spi_mem_op *op)
2291{
2292 /*
2293 * First test with 4 address bytes. The opcode itself might
2294 * be a 3B addressing opcode but we don't care, because
2295 * SPI controller implementation should not check the opcode,
2296 * but just the sequence.
2297 */
2298 op->addr.nbytes = 4;
2299 if (!spi_mem_supports_op(mem: nor->spimem, op)) {
2300 if (nor->params->size > SZ_16M)
2301 return -EOPNOTSUPP;
2302
2303 /* If flash size <= 16MB, 3 address bytes are sufficient */
2304 op->addr.nbytes = 3;
2305 if (!spi_mem_supports_op(mem: nor->spimem, op))
2306 return -EOPNOTSUPP;
2307 }
2308
2309 return 0;
2310}
2311
2312/**
2313 * spi_nor_spimem_check_readop - check if the read op is supported
2314 * by controller
2315 *@nor: pointer to a 'struct spi_nor'
2316 *@read: pointer to op template to be checked
2317 *
2318 * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
2319 */
2320static int spi_nor_spimem_check_readop(struct spi_nor *nor,
2321 const struct spi_nor_read_command *read)
2322{
2323 struct spi_mem_op op = SPI_NOR_READ_OP(read->opcode);
2324
2325 spi_nor_spimem_setup_op(nor, op: &op, proto: read->proto);
2326
2327 /* convert the dummy cycles to the number of bytes */
2328 op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) *
2329 op.dummy.buswidth / 8;
2330 if (spi_nor_protocol_is_dtr(proto: nor->read_proto))
2331 op.dummy.nbytes *= 2;
2332
2333 return spi_nor_spimem_check_op(nor, op: &op);
2334}
2335
2336/**
2337 * spi_nor_spimem_check_pp - check if the page program op is supported
2338 * by controller
2339 *@nor: pointer to a 'struct spi_nor'
2340 *@pp: pointer to op template to be checked
2341 *
2342 * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
2343 */
2344static int spi_nor_spimem_check_pp(struct spi_nor *nor,
2345 const struct spi_nor_pp_command *pp)
2346{
2347 struct spi_mem_op op = SPI_NOR_PP_OP(pp->opcode);
2348
2349 spi_nor_spimem_setup_op(nor, op: &op, proto: pp->proto);
2350
2351 return spi_nor_spimem_check_op(nor, op: &op);
2352}
2353
2354/**
2355 * spi_nor_spimem_adjust_hwcaps - Find optimal Read/Write protocol
2356 * based on SPI controller capabilities
2357 * @nor: pointer to a 'struct spi_nor'
2358 * @hwcaps: pointer to resulting capabilities after adjusting
2359 * according to controller and flash's capability
2360 */
2361static void
2362spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps)
2363{
2364 struct spi_nor_flash_parameter *params = nor->params;
2365 unsigned int cap;
2366
2367 /* X-X-X modes are not supported yet, mask them all. */
2368 *hwcaps &= ~SNOR_HWCAPS_X_X_X;
2369
2370 /*
2371 * If the reset line is broken, we do not want to enter a stateful
2372 * mode.
2373 */
2374 if (nor->flags & SNOR_F_BROKEN_RESET)
2375 *hwcaps &= ~(SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR);
2376
2377 for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) {
2378 int rdidx, ppidx;
2379
2380 if (!(*hwcaps & BIT(cap)))
2381 continue;
2382
2383 rdidx = spi_nor_hwcaps_read2cmd(BIT(cap));
2384 if (rdidx >= 0 &&
2385 spi_nor_spimem_check_readop(nor, read: &params->reads[rdidx]))
2386 *hwcaps &= ~BIT(cap);
2387
2388 ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap));
2389 if (ppidx < 0)
2390 continue;
2391
2392 if (spi_nor_spimem_check_pp(nor,
2393 pp: &params->page_programs[ppidx]))
2394 *hwcaps &= ~BIT(cap);
2395 }
2396}
2397
2398/**
2399 * spi_nor_set_erase_type() - set a SPI NOR erase type
2400 * @erase: pointer to a structure that describes a SPI NOR erase type
2401 * @size: the size of the sector/block erased by the erase type
2402 * @opcode: the SPI command op code to erase the sector/block
2403 */
2404void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size,
2405 u8 opcode)
2406{
2407 erase->size = size;
2408 erase->opcode = opcode;
2409 /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
2410 erase->size_shift = ffs(erase->size) - 1;
2411 erase->size_mask = (1 << erase->size_shift) - 1;
2412}
2413
2414/**
2415 * spi_nor_mask_erase_type() - mask out a SPI NOR erase type
2416 * @erase: pointer to a structure that describes a SPI NOR erase type
2417 */
2418void spi_nor_mask_erase_type(struct spi_nor_erase_type *erase)
2419{
2420 erase->size = 0;
2421}
2422
2423/**
2424 * spi_nor_init_uniform_erase_map() - Initialize uniform erase map
2425 * @map: the erase map of the SPI NOR
2426 * @erase_mask: bitmask encoding erase types that can erase the entire
2427 * flash memory
2428 * @flash_size: the spi nor flash memory size
2429 */
2430void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
2431 u8 erase_mask, u64 flash_size)
2432{
2433 /* Offset 0 with erase_mask and SNOR_LAST_REGION bit set */
2434 map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) |
2435 SNOR_LAST_REGION;
2436 map->uniform_region.size = flash_size;
2437 map->regions = &map->uniform_region;
2438 map->uniform_erase_type = erase_mask;
2439}
2440
2441int spi_nor_post_bfpt_fixups(struct spi_nor *nor,
2442 const struct sfdp_parameter_header *bfpt_header,
2443 const struct sfdp_bfpt *bfpt)
2444{
2445 int ret;
2446
2447 if (nor->manufacturer && nor->manufacturer->fixups &&
2448 nor->manufacturer->fixups->post_bfpt) {
2449 ret = nor->manufacturer->fixups->post_bfpt(nor, bfpt_header,
2450 bfpt);
2451 if (ret)
2452 return ret;
2453 }
2454
2455 if (nor->info->fixups && nor->info->fixups->post_bfpt)
2456 return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt);
2457
2458 return 0;
2459}
2460
2461static int spi_nor_select_read(struct spi_nor *nor,
2462 u32 shared_hwcaps)
2463{
2464 int cmd, best_match = fls(x: shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
2465 const struct spi_nor_read_command *read;
2466
2467 if (best_match < 0)
2468 return -EINVAL;
2469
2470 cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
2471 if (cmd < 0)
2472 return -EINVAL;
2473
2474 read = &nor->params->reads[cmd];
2475 nor->read_opcode = read->opcode;
2476 nor->read_proto = read->proto;
2477
2478 /*
2479 * In the SPI NOR framework, we don't need to make the difference
2480 * between mode clock cycles and wait state clock cycles.
2481 * Indeed, the value of the mode clock cycles is used by a QSPI
2482 * flash memory to know whether it should enter or leave its 0-4-4
2483 * (Continuous Read / XIP) mode.
2484 * eXecution In Place is out of the scope of the mtd sub-system.
2485 * Hence we choose to merge both mode and wait state clock cycles
2486 * into the so called dummy clock cycles.
2487 */
2488 nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
2489 return 0;
2490}
2491
2492static int spi_nor_select_pp(struct spi_nor *nor,
2493 u32 shared_hwcaps)
2494{
2495 int cmd, best_match = fls(x: shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
2496 const struct spi_nor_pp_command *pp;
2497
2498 if (best_match < 0)
2499 return -EINVAL;
2500
2501 cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
2502 if (cmd < 0)
2503 return -EINVAL;
2504
2505 pp = &nor->params->page_programs[cmd];
2506 nor->program_opcode = pp->opcode;
2507 nor->write_proto = pp->proto;
2508 return 0;
2509}
2510
2511/**
2512 * spi_nor_select_uniform_erase() - select optimum uniform erase type
2513 * @map: the erase map of the SPI NOR
2514 *
2515 * Once the optimum uniform sector erase command is found, disable all the
2516 * other.
2517 *
2518 * Return: pointer to erase type on success, NULL otherwise.
2519 */
2520static const struct spi_nor_erase_type *
2521spi_nor_select_uniform_erase(struct spi_nor_erase_map *map)
2522{
2523 const struct spi_nor_erase_type *tested_erase, *erase = NULL;
2524 int i;
2525 u8 uniform_erase_type = map->uniform_erase_type;
2526
2527 /*
2528 * Search for the biggest erase size, except for when compiled
2529 * to use 4k erases.
2530 */
2531 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
2532 if (!(uniform_erase_type & BIT(i)))
2533 continue;
2534
2535 tested_erase = &map->erase_type[i];
2536
2537 /* Skip masked erase types. */
2538 if (!tested_erase->size)
2539 continue;
2540
2541 /*
2542 * If the current erase size is the 4k one, stop here,
2543 * we have found the right uniform Sector Erase command.
2544 */
2545 if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS) &&
2546 tested_erase->size == SZ_4K) {
2547 erase = tested_erase;
2548 break;
2549 }
2550
2551 /*
2552 * Otherwise, the current erase size is still a valid candidate.
2553 * Select the biggest valid candidate.
2554 */
2555 if (!erase && tested_erase->size)
2556 erase = tested_erase;
2557 /* keep iterating to find the wanted_size */
2558 }
2559
2560 if (!erase)
2561 return NULL;
2562
2563 /* Disable all other Sector Erase commands. */
2564 map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK;
2565 map->uniform_erase_type |= BIT(erase - map->erase_type);
2566 return erase;
2567}
2568
2569static int spi_nor_select_erase(struct spi_nor *nor)
2570{
2571 struct spi_nor_erase_map *map = &nor->params->erase_map;
2572 const struct spi_nor_erase_type *erase = NULL;
2573 struct mtd_info *mtd = &nor->mtd;
2574 int i;
2575
2576 /*
2577 * The previous implementation handling Sector Erase commands assumed
2578 * that the SPI flash memory has an uniform layout then used only one
2579 * of the supported erase sizes for all Sector Erase commands.
2580 * So to be backward compatible, the new implementation also tries to
2581 * manage the SPI flash memory as uniform with a single erase sector
2582 * size, when possible.
2583 */
2584 if (spi_nor_has_uniform_erase(nor)) {
2585 erase = spi_nor_select_uniform_erase(map);
2586 if (!erase)
2587 return -EINVAL;
2588 nor->erase_opcode = erase->opcode;
2589 mtd->erasesize = erase->size;
2590 return 0;
2591 }
2592
2593 /*
2594 * For non-uniform SPI flash memory, set mtd->erasesize to the
2595 * maximum erase sector size. No need to set nor->erase_opcode.
2596 */
2597 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
2598 if (map->erase_type[i].size) {
2599 erase = &map->erase_type[i];
2600 break;
2601 }
2602 }
2603
2604 if (!erase)
2605 return -EINVAL;
2606
2607 mtd->erasesize = erase->size;
2608 return 0;
2609}
2610
2611static int spi_nor_default_setup(struct spi_nor *nor,
2612 const struct spi_nor_hwcaps *hwcaps)
2613{
2614 struct spi_nor_flash_parameter *params = nor->params;
2615 u32 ignored_mask, shared_mask;
2616 int err;
2617
2618 /*
2619 * Keep only the hardware capabilities supported by both the SPI
2620 * controller and the SPI flash memory.
2621 */
2622 shared_mask = hwcaps->mask & params->hwcaps.mask;
2623
2624 if (nor->spimem) {
2625 /*
2626 * When called from spi_nor_probe(), all caps are set and we
2627 * need to discard some of them based on what the SPI
2628 * controller actually supports (using spi_mem_supports_op()).
2629 */
2630 spi_nor_spimem_adjust_hwcaps(nor, hwcaps: &shared_mask);
2631 } else {
2632 /*
2633 * SPI n-n-n protocols are not supported when the SPI
2634 * controller directly implements the spi_nor interface.
2635 * Yet another reason to switch to spi-mem.
2636 */
2637 ignored_mask = SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR;
2638 if (shared_mask & ignored_mask) {
2639 dev_dbg(nor->dev,
2640 "SPI n-n-n protocols are not supported.\n");
2641 shared_mask &= ~ignored_mask;
2642 }
2643 }
2644
2645 /* Select the (Fast) Read command. */
2646 err = spi_nor_select_read(nor, shared_hwcaps: shared_mask);
2647 if (err) {
2648 dev_dbg(nor->dev,
2649 "can't select read settings supported by both the SPI controller and memory.\n");
2650 return err;
2651 }
2652
2653 /* Select the Page Program command. */
2654 err = spi_nor_select_pp(nor, shared_hwcaps: shared_mask);
2655 if (err) {
2656 dev_dbg(nor->dev,
2657 "can't select write settings supported by both the SPI controller and memory.\n");
2658 return err;
2659 }
2660
2661 /* Select the Sector Erase command. */
2662 err = spi_nor_select_erase(nor);
2663 if (err) {
2664 dev_dbg(nor->dev,
2665 "can't select erase settings supported by both the SPI controller and memory.\n");
2666 return err;
2667 }
2668
2669 return 0;
2670}
2671
2672static int spi_nor_set_addr_nbytes(struct spi_nor *nor)
2673{
2674 if (nor->params->addr_nbytes) {
2675 nor->addr_nbytes = nor->params->addr_nbytes;
2676 } else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) {
2677 /*
2678 * In 8D-8D-8D mode, one byte takes half a cycle to transfer. So
2679 * in this protocol an odd addr_nbytes cannot be used because
2680 * then the address phase would only span a cycle and a half.
2681 * Half a cycle would be left over. We would then have to start
2682 * the dummy phase in the middle of a cycle and so too the data
2683 * phase, and we will end the transaction with half a cycle left
2684 * over.
2685 *
2686 * Force all 8D-8D-8D flashes to use an addr_nbytes of 4 to
2687 * avoid this situation.
2688 */
2689 nor->addr_nbytes = 4;
2690 } else if (nor->info->addr_nbytes) {
2691 nor->addr_nbytes = nor->info->addr_nbytes;
2692 } else {
2693 nor->addr_nbytes = 3;
2694 }
2695
2696 if (nor->addr_nbytes == 3 && nor->params->size > 0x1000000) {
2697 /* enable 4-byte addressing if the device exceeds 16MiB */
2698 nor->addr_nbytes = 4;
2699 }
2700
2701 if (nor->addr_nbytes > SPI_NOR_MAX_ADDR_NBYTES) {
2702 dev_dbg(nor->dev, "The number of address bytes is too large: %u\n",
2703 nor->addr_nbytes);
2704 return -EINVAL;
2705 }
2706
2707 /* Set 4byte opcodes when possible. */
2708 if (nor->addr_nbytes == 4 && nor->flags & SNOR_F_4B_OPCODES &&
2709 !(nor->flags & SNOR_F_HAS_4BAIT))
2710 spi_nor_set_4byte_opcodes(nor);
2711
2712 return 0;
2713}
2714
2715static int spi_nor_setup(struct spi_nor *nor,
2716 const struct spi_nor_hwcaps *hwcaps)
2717{
2718 int ret;
2719
2720 if (nor->params->setup)
2721 ret = nor->params->setup(nor, hwcaps);
2722 else
2723 ret = spi_nor_default_setup(nor, hwcaps);
2724 if (ret)
2725 return ret;
2726
2727 return spi_nor_set_addr_nbytes(nor);
2728}
2729
2730/**
2731 * spi_nor_manufacturer_init_params() - Initialize the flash's parameters and
2732 * settings based on MFR register and ->default_init() hook.
2733 * @nor: pointer to a 'struct spi_nor'.
2734 */
2735static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
2736{
2737 if (nor->manufacturer && nor->manufacturer->fixups &&
2738 nor->manufacturer->fixups->default_init)
2739 nor->manufacturer->fixups->default_init(nor);
2740
2741 if (nor->info->fixups && nor->info->fixups->default_init)
2742 nor->info->fixups->default_init(nor);
2743}
2744
2745/**
2746 * spi_nor_no_sfdp_init_params() - Initialize the flash's parameters and
2747 * settings based on nor->info->sfdp_flags. This method should be called only by
2748 * flashes that do not define SFDP tables. If the flash supports SFDP but the
2749 * information is wrong and the settings from this function can not be retrieved
2750 * by parsing SFDP, one should instead use the fixup hooks and update the wrong
2751 * bits.
2752 * @nor: pointer to a 'struct spi_nor'.
2753 */
2754static void spi_nor_no_sfdp_init_params(struct spi_nor *nor)
2755{
2756 struct spi_nor_flash_parameter *params = nor->params;
2757 struct spi_nor_erase_map *map = &params->erase_map;
2758 const struct flash_info *info = nor->info;
2759 const u8 no_sfdp_flags = info->no_sfdp_flags;
2760 u8 i, erase_mask;
2761
2762 if (no_sfdp_flags & SPI_NOR_DUAL_READ) {
2763 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
2764 spi_nor_set_read_settings(read: &params->reads[SNOR_CMD_READ_1_1_2],
2765 num_mode_clocks: 0, num_wait_states: 8, SPINOR_OP_READ_1_1_2,
2766 proto: SNOR_PROTO_1_1_2);
2767 }
2768
2769 if (no_sfdp_flags & SPI_NOR_QUAD_READ) {
2770 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
2771 spi_nor_set_read_settings(read: &params->reads[SNOR_CMD_READ_1_1_4],
2772 num_mode_clocks: 0, num_wait_states: 8, SPINOR_OP_READ_1_1_4,
2773 proto: SNOR_PROTO_1_1_4);
2774 }
2775
2776 if (no_sfdp_flags & SPI_NOR_OCTAL_READ) {
2777 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
2778 spi_nor_set_read_settings(read: &params->reads[SNOR_CMD_READ_1_1_8],
2779 num_mode_clocks: 0, num_wait_states: 8, SPINOR_OP_READ_1_1_8,
2780 proto: SNOR_PROTO_1_1_8);
2781 }
2782
2783 if (no_sfdp_flags & SPI_NOR_OCTAL_DTR_READ) {
2784 params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR;
2785 spi_nor_set_read_settings(read: &params->reads[SNOR_CMD_READ_8_8_8_DTR],
2786 num_mode_clocks: 0, num_wait_states: 20, SPINOR_OP_READ_FAST,
2787 proto: SNOR_PROTO_8_8_8_DTR);
2788 }
2789
2790 if (no_sfdp_flags & SPI_NOR_OCTAL_DTR_PP) {
2791 params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8_DTR;
2792 /*
2793 * Since xSPI Page Program opcode is backward compatible with
2794 * Legacy SPI, use Legacy SPI opcode there as well.
2795 */
2796 spi_nor_set_pp_settings(pp: &params->page_programs[SNOR_CMD_PP_8_8_8_DTR],
2797 SPINOR_OP_PP, proto: SNOR_PROTO_8_8_8_DTR);
2798 }
2799
2800 /*
2801 * Sector Erase settings. Sort Erase Types in ascending order, with the
2802 * smallest erase size starting at BIT(0).
2803 */
2804 erase_mask = 0;
2805 i = 0;
2806 if (no_sfdp_flags & SECT_4K) {
2807 erase_mask |= BIT(i);
2808 spi_nor_set_erase_type(erase: &map->erase_type[i], size: 4096u,
2809 SPINOR_OP_BE_4K);
2810 i++;
2811 }
2812 erase_mask |= BIT(i);
2813 spi_nor_set_erase_type(erase: &map->erase_type[i],
2814 size: info->sector_size ?: SPI_NOR_DEFAULT_SECTOR_SIZE,
2815 SPINOR_OP_SE);
2816 spi_nor_init_uniform_erase_map(map, erase_mask, flash_size: params->size);
2817}
2818
2819/**
2820 * spi_nor_init_flags() - Initialize NOR flags for settings that are not defined
2821 * in the JESD216 SFDP standard, thus can not be retrieved when parsing SFDP.
2822 * @nor: pointer to a 'struct spi_nor'
2823 */
2824static void spi_nor_init_flags(struct spi_nor *nor)
2825{
2826 struct device_node *np = spi_nor_get_flash_node(nor);
2827 const u16 flags = nor->info->flags;
2828
2829 if (of_property_read_bool(np, propname: "broken-flash-reset"))
2830 nor->flags |= SNOR_F_BROKEN_RESET;
2831
2832 if (of_property_read_bool(np, propname: "no-wp"))
2833 nor->flags |= SNOR_F_NO_WP;
2834
2835 if (flags & SPI_NOR_SWP_IS_VOLATILE)
2836 nor->flags |= SNOR_F_SWP_IS_VOLATILE;
2837
2838 if (flags & SPI_NOR_HAS_LOCK)
2839 nor->flags |= SNOR_F_HAS_LOCK;
2840
2841 if (flags & SPI_NOR_HAS_TB) {
2842 nor->flags |= SNOR_F_HAS_SR_TB;
2843 if (flags & SPI_NOR_TB_SR_BIT6)
2844 nor->flags |= SNOR_F_HAS_SR_TB_BIT6;
2845 }
2846
2847 if (flags & SPI_NOR_4BIT_BP) {
2848 nor->flags |= SNOR_F_HAS_4BIT_BP;
2849 if (flags & SPI_NOR_BP3_SR_BIT6)
2850 nor->flags |= SNOR_F_HAS_SR_BP3_BIT6;
2851 }
2852
2853 if (flags & NO_CHIP_ERASE)
2854 nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
2855
2856 if (flags & SPI_NOR_RWW && nor->params->n_banks > 1 &&
2857 !nor->controller_ops)
2858 nor->flags |= SNOR_F_RWW;
2859}
2860
2861/**
2862 * spi_nor_init_fixup_flags() - Initialize NOR flags for settings that can not
2863 * be discovered by SFDP for this particular flash because the SFDP table that
2864 * indicates this support is not defined in the flash. In case the table for
2865 * this support is defined but has wrong values, one should instead use a
2866 * post_sfdp() hook to set the SNOR_F equivalent flag.
2867 * @nor: pointer to a 'struct spi_nor'
2868 */
2869static void spi_nor_init_fixup_flags(struct spi_nor *nor)
2870{
2871 const u8 fixup_flags = nor->info->fixup_flags;
2872
2873 if (fixup_flags & SPI_NOR_4B_OPCODES)
2874 nor->flags |= SNOR_F_4B_OPCODES;
2875
2876 if (fixup_flags & SPI_NOR_IO_MODE_EN_VOLATILE)
2877 nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE;
2878}
2879
2880/**
2881 * spi_nor_late_init_params() - Late initialization of default flash parameters.
2882 * @nor: pointer to a 'struct spi_nor'
2883 *
2884 * Used to initialize flash parameters that are not declared in the JESD216
2885 * SFDP standard, or where SFDP tables are not defined at all.
2886 * Will replace the spi_nor_manufacturer_init_params() method.
2887 */
2888static int spi_nor_late_init_params(struct spi_nor *nor)
2889{
2890 struct spi_nor_flash_parameter *params = nor->params;
2891 int ret;
2892
2893 if (nor->manufacturer && nor->manufacturer->fixups &&
2894 nor->manufacturer->fixups->late_init) {
2895 ret = nor->manufacturer->fixups->late_init(nor);
2896 if (ret)
2897 return ret;
2898 }
2899
2900 if (nor->info->fixups && nor->info->fixups->late_init) {
2901 ret = nor->info->fixups->late_init(nor);
2902 if (ret)
2903 return ret;
2904 }
2905
2906 /* Default method kept for backward compatibility. */
2907 if (!params->set_4byte_addr_mode)
2908 params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_brwr;
2909
2910 spi_nor_init_flags(nor);
2911 spi_nor_init_fixup_flags(nor);
2912
2913 /*
2914 * NOR protection support. When locking_ops are not provided, we pick
2915 * the default ones.
2916 */
2917 if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops)
2918 spi_nor_init_default_locking_ops(nor);
2919
2920 if (params->n_banks > 1)
2921 params->bank_size = div64_u64(dividend: params->size, divisor: params->n_banks);
2922
2923 return 0;
2924}
2925
2926/**
2927 * spi_nor_sfdp_init_params_deprecated() - Deprecated way of initializing flash
2928 * parameters and settings based on JESD216 SFDP standard.
2929 * @nor: pointer to a 'struct spi_nor'.
2930 *
2931 * The method has a roll-back mechanism: in case the SFDP parsing fails, the
2932 * legacy flash parameters and settings will be restored.
2933 */
2934static void spi_nor_sfdp_init_params_deprecated(struct spi_nor *nor)
2935{
2936 struct spi_nor_flash_parameter sfdp_params;
2937
2938 memcpy(&sfdp_params, nor->params, sizeof(sfdp_params));
2939
2940 if (spi_nor_parse_sfdp(nor)) {
2941 memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
2942 nor->flags &= ~SNOR_F_4B_OPCODES;
2943 }
2944}
2945
2946/**
2947 * spi_nor_init_params_deprecated() - Deprecated way of initializing flash
2948 * parameters and settings.
2949 * @nor: pointer to a 'struct spi_nor'.
2950 *
2951 * The method assumes that flash doesn't support SFDP so it initializes flash
2952 * parameters in spi_nor_no_sfdp_init_params() which later on can be overwritten
2953 * when parsing SFDP, if supported.
2954 */
2955static void spi_nor_init_params_deprecated(struct spi_nor *nor)
2956{
2957 spi_nor_no_sfdp_init_params(nor);
2958
2959 spi_nor_manufacturer_init_params(nor);
2960
2961 if (nor->info->no_sfdp_flags & (SPI_NOR_DUAL_READ |
2962 SPI_NOR_QUAD_READ |
2963 SPI_NOR_OCTAL_READ |
2964 SPI_NOR_OCTAL_DTR_READ))
2965 spi_nor_sfdp_init_params_deprecated(nor);
2966}
2967
2968/**
2969 * spi_nor_init_default_params() - Default initialization of flash parameters
2970 * and settings. Done for all flashes, regardless is they define SFDP tables
2971 * or not.
2972 * @nor: pointer to a 'struct spi_nor'.
2973 */
2974static void spi_nor_init_default_params(struct spi_nor *nor)
2975{
2976 struct spi_nor_flash_parameter *params = nor->params;
2977 const struct flash_info *info = nor->info;
2978 struct device_node *np = spi_nor_get_flash_node(nor);
2979
2980 params->quad_enable = spi_nor_sr2_bit1_quad_enable;
2981 params->otp.org = info->otp;
2982
2983 /* Default to 16-bit Write Status (01h) Command */
2984 nor->flags |= SNOR_F_HAS_16BIT_SR;
2985
2986 /* Set SPI NOR sizes. */
2987 params->writesize = 1;
2988 params->size = info->size;
2989 params->bank_size = params->size;
2990 params->page_size = info->page_size ?: SPI_NOR_DEFAULT_PAGE_SIZE;
2991 params->n_banks = info->n_banks ?: SPI_NOR_DEFAULT_N_BANKS;
2992
2993 if (!(info->flags & SPI_NOR_NO_FR)) {
2994 /* Default to Fast Read for DT and non-DT platform devices. */
2995 params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
2996
2997 /* Mask out Fast Read if not requested at DT instantiation. */
2998 if (np && !of_property_read_bool(np, propname: "m25p,fast-read"))
2999 params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
3000 }
3001
3002 /* (Fast) Read settings. */
3003 params->hwcaps.mask |= SNOR_HWCAPS_READ;
3004 spi_nor_set_read_settings(read: &params->reads[SNOR_CMD_READ],
3005 num_mode_clocks: 0, num_wait_states: 0, SPINOR_OP_READ,
3006 proto: SNOR_PROTO_1_1_1);
3007
3008 if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
3009 spi_nor_set_read_settings(read: &params->reads[SNOR_CMD_READ_FAST],
3010 num_mode_clocks: 0, num_wait_states: 8, SPINOR_OP_READ_FAST,
3011 proto: SNOR_PROTO_1_1_1);
3012 /* Page Program settings. */
3013 params->hwcaps.mask |= SNOR_HWCAPS_PP;
3014 spi_nor_set_pp_settings(pp: &params->page_programs[SNOR_CMD_PP],
3015 SPINOR_OP_PP, proto: SNOR_PROTO_1_1_1);
3016
3017 if (info->flags & SPI_NOR_QUAD_PP) {
3018 params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4;
3019 spi_nor_set_pp_settings(pp: &params->page_programs[SNOR_CMD_PP_1_1_4],
3020 SPINOR_OP_PP_1_1_4, proto: SNOR_PROTO_1_1_4);
3021 }
3022}
3023
3024/**
3025 * spi_nor_init_params() - Initialize the flash's parameters and settings.
3026 * @nor: pointer to a 'struct spi_nor'.
3027 *
3028 * The flash parameters and settings are initialized based on a sequence of
3029 * calls that are ordered by priority:
3030 *
3031 * 1/ Default flash parameters initialization. The initializations are done
3032 * based on nor->info data:
3033 * spi_nor_info_init_params()
3034 *
3035 * which can be overwritten by:
3036 * 2/ Manufacturer flash parameters initialization. The initializations are
3037 * done based on MFR register, or when the decisions can not be done solely
3038 * based on MFR, by using specific flash_info tweeks, ->default_init():
3039 * spi_nor_manufacturer_init_params()
3040 *
3041 * which can be overwritten by:
3042 * 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and
3043 * should be more accurate that the above.
3044 * spi_nor_parse_sfdp() or spi_nor_no_sfdp_init_params()
3045 *
3046 * Please note that there is a ->post_bfpt() fixup hook that can overwrite
3047 * the flash parameters and settings immediately after parsing the Basic
3048 * Flash Parameter Table.
3049 * spi_nor_post_sfdp_fixups() is called after the SFDP tables are parsed.
3050 * It is used to tweak various flash parameters when information provided
3051 * by the SFDP tables are wrong.
3052 *
3053 * which can be overwritten by:
3054 * 4/ Late flash parameters initialization, used to initialize flash
3055 * parameters that are not declared in the JESD216 SFDP standard, or where SFDP
3056 * tables are not defined at all.
3057 * spi_nor_late_init_params()
3058 *
3059 * Return: 0 on success, -errno otherwise.
3060 */
3061static int spi_nor_init_params(struct spi_nor *nor)
3062{
3063 int ret;
3064
3065 nor->params = devm_kzalloc(dev: nor->dev, size: sizeof(*nor->params), GFP_KERNEL);
3066 if (!nor->params)
3067 return -ENOMEM;
3068
3069 spi_nor_init_default_params(nor);
3070
3071 if (spi_nor_needs_sfdp(nor)) {
3072 ret = spi_nor_parse_sfdp(nor);
3073 if (ret) {
3074 dev_err(nor->dev, "BFPT parsing failed. Please consider using SPI_NOR_SKIP_SFDP when declaring the flash\n");
3075 return ret;
3076 }
3077 } else if (nor->info->no_sfdp_flags & SPI_NOR_SKIP_SFDP) {
3078 spi_nor_no_sfdp_init_params(nor);
3079 } else {
3080 spi_nor_init_params_deprecated(nor);
3081 }
3082
3083 return spi_nor_late_init_params(nor);
3084}
3085
3086/** spi_nor_set_octal_dtr() - enable or disable Octal DTR I/O.
3087 * @nor: pointer to a 'struct spi_nor'
3088 * @enable: whether to enable or disable Octal DTR
3089 *
3090 * Return: 0 on success, -errno otherwise.
3091 */
3092static int spi_nor_set_octal_dtr(struct spi_nor *nor, bool enable)
3093{
3094 int ret;
3095
3096 if (!nor->params->set_octal_dtr)
3097 return 0;
3098
3099 if (!(nor->read_proto == SNOR_PROTO_8_8_8_DTR &&
3100 nor->write_proto == SNOR_PROTO_8_8_8_DTR))
3101 return 0;
3102
3103 if (!(nor->flags & SNOR_F_IO_MODE_EN_VOLATILE))
3104 return 0;
3105
3106 ret = nor->params->set_octal_dtr(nor, enable);
3107 if (ret)
3108 return ret;
3109
3110 if (enable)
3111 nor->reg_proto = SNOR_PROTO_8_8_8_DTR;
3112 else
3113 nor->reg_proto = SNOR_PROTO_1_1_1;
3114
3115 return 0;
3116}
3117
3118/**
3119 * spi_nor_quad_enable() - enable Quad I/O if needed.
3120 * @nor: pointer to a 'struct spi_nor'
3121 *
3122 * Return: 0 on success, -errno otherwise.
3123 */
3124static int spi_nor_quad_enable(struct spi_nor *nor)
3125{
3126 if (!nor->params->quad_enable)
3127 return 0;
3128
3129 if (!(spi_nor_get_protocol_width(proto: nor->read_proto) == 4 ||
3130 spi_nor_get_protocol_width(proto: nor->write_proto) == 4))
3131 return 0;
3132
3133 return nor->params->quad_enable(nor);
3134}
3135
3136/**
3137 * spi_nor_set_4byte_addr_mode() - Set address mode.
3138 * @nor: pointer to a 'struct spi_nor'.
3139 * @enable: enable/disable 4 byte address mode.
3140 *
3141 * Return: 0 on success, -errno otherwise.
3142 */
3143int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
3144{
3145 struct spi_nor_flash_parameter *params = nor->params;
3146 int ret;
3147
3148 ret = params->set_4byte_addr_mode(nor, enable);
3149 if (ret && ret != -ENOTSUPP)
3150 return ret;
3151
3152 if (enable) {
3153 params->addr_nbytes = 4;
3154 params->addr_mode_nbytes = 4;
3155 } else {
3156 params->addr_nbytes = 3;
3157 params->addr_mode_nbytes = 3;
3158 }
3159
3160 return 0;
3161}
3162
3163static int spi_nor_init(struct spi_nor *nor)
3164{
3165 int err;
3166
3167 err = spi_nor_set_octal_dtr(nor, enable: true);
3168 if (err) {
3169 dev_dbg(nor->dev, "octal mode not supported\n");
3170 return err;
3171 }
3172
3173 err = spi_nor_quad_enable(nor);
3174 if (err) {
3175 dev_dbg(nor->dev, "quad mode not supported\n");
3176 return err;
3177 }
3178
3179 /*
3180 * Some SPI NOR flashes are write protected by default after a power-on
3181 * reset cycle, in order to avoid inadvertent writes during power-up.
3182 * Backward compatibility imposes to unlock the entire flash memory
3183 * array at power-up by default. Depending on the kernel configuration
3184 * (1) do nothing, (2) always unlock the entire flash array or (3)
3185 * unlock the entire flash array only when the software write
3186 * protection bits are volatile. The latter is indicated by
3187 * SNOR_F_SWP_IS_VOLATILE.
3188 */
3189 if (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE) ||
3190 (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE) &&
3191 nor->flags & SNOR_F_SWP_IS_VOLATILE))
3192 spi_nor_try_unlock_all(nor);
3193
3194 if (nor->addr_nbytes == 4 &&
3195 nor->read_proto != SNOR_PROTO_8_8_8_DTR &&
3196 !(nor->flags & SNOR_F_4B_OPCODES)) {
3197 /*
3198 * If the RESET# pin isn't hooked up properly, or the system
3199 * otherwise doesn't perform a reset command in the boot
3200 * sequence, it's impossible to 100% protect against unexpected
3201 * reboots (e.g., crashes). Warn the user (or hopefully, system
3202 * designer) that this is bad.
3203 */
3204 WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
3205 "enabling reset hack; may not recover from unexpected reboots\n");
3206 err = spi_nor_set_4byte_addr_mode(nor, enable: true);
3207 if (err)
3208 return err;
3209 }
3210
3211 return 0;
3212}
3213
3214/**
3215 * spi_nor_soft_reset() - Perform a software reset
3216 * @nor: pointer to 'struct spi_nor'
3217 *
3218 * Performs a "Soft Reset and Enter Default Protocol Mode" sequence which resets
3219 * the device to its power-on-reset state. This is useful when the software has
3220 * made some changes to device (volatile) registers and needs to reset it before
3221 * shutting down, for example.
3222 *
3223 * Not every flash supports this sequence. The same set of opcodes might be used
3224 * for some other operation on a flash that does not support this. Support for
3225 * this sequence can be discovered via SFDP in the BFPT table.
3226 *
3227 * Return: 0 on success, -errno otherwise.
3228 */
3229static void spi_nor_soft_reset(struct spi_nor *nor)
3230{
3231 struct spi_mem_op op;
3232 int ret;
3233
3234 op = (struct spi_mem_op)SPINOR_SRSTEN_OP;
3235
3236 spi_nor_spimem_setup_op(nor, op: &op, proto: nor->reg_proto);
3237
3238 ret = spi_mem_exec_op(mem: nor->spimem, op: &op);
3239 if (ret) {
3240 dev_warn(nor->dev, "Software reset failed: %d\n", ret);
3241 return;
3242 }
3243
3244 op = (struct spi_mem_op)SPINOR_SRST_OP;
3245
3246 spi_nor_spimem_setup_op(nor, op: &op, proto: nor->reg_proto);
3247
3248 ret = spi_mem_exec_op(mem: nor->spimem, op: &op);
3249 if (ret) {
3250 dev_warn(nor->dev, "Software reset failed: %d\n", ret);
3251 return;
3252 }
3253
3254 /*
3255 * Software Reset is not instant, and the delay varies from flash to
3256 * flash. Looking at a few flashes, most range somewhere below 100
3257 * microseconds. So, sleep for a range of 200-400 us.
3258 */
3259 usleep_range(SPI_NOR_SRST_SLEEP_MIN, SPI_NOR_SRST_SLEEP_MAX);
3260}
3261
3262/* mtd suspend handler */
3263static int spi_nor_suspend(struct mtd_info *mtd)
3264{
3265 struct spi_nor *nor = mtd_to_spi_nor(mtd);
3266 int ret;
3267
3268 /* Disable octal DTR mode if we enabled it. */
3269 ret = spi_nor_set_octal_dtr(nor, enable: false);
3270 if (ret)
3271 dev_err(nor->dev, "suspend() failed\n");
3272
3273 return ret;
3274}
3275
3276/* mtd resume handler */
3277static void spi_nor_resume(struct mtd_info *mtd)
3278{
3279 struct spi_nor *nor = mtd_to_spi_nor(mtd);
3280 struct device *dev = nor->dev;
3281 int ret;
3282
3283 /* re-initialize the nor chip */
3284 ret = spi_nor_init(nor);
3285 if (ret)
3286 dev_err(dev, "resume() failed\n");
3287}
3288
3289static int spi_nor_get_device(struct mtd_info *mtd)
3290{
3291 struct mtd_info *master = mtd_get_master(mtd);
3292 struct spi_nor *nor = mtd_to_spi_nor(mtd: master);
3293 struct device *dev;
3294
3295 if (nor->spimem)
3296 dev = nor->spimem->spi->controller->dev.parent;
3297 else
3298 dev = nor->dev;
3299
3300 if (!try_module_get(module: dev->driver->owner))
3301 return -ENODEV;
3302
3303 return 0;
3304}
3305
3306static void spi_nor_put_device(struct mtd_info *mtd)
3307{
3308 struct mtd_info *master = mtd_get_master(mtd);
3309 struct spi_nor *nor = mtd_to_spi_nor(mtd: master);
3310 struct device *dev;
3311
3312 if (nor->spimem)
3313 dev = nor->spimem->spi->controller->dev.parent;
3314 else
3315 dev = nor->dev;
3316
3317 module_put(module: dev->driver->owner);
3318}
3319
3320static void spi_nor_restore(struct spi_nor *nor)
3321{
3322 int ret;
3323
3324 /* restore the addressing mode */
3325 if (nor->addr_nbytes == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
3326 nor->flags & SNOR_F_BROKEN_RESET) {
3327 ret = spi_nor_set_4byte_addr_mode(nor, enable: false);
3328 if (ret)
3329 /*
3330 * Do not stop the execution in the hope that the flash
3331 * will default to the 3-byte address mode after the
3332 * software reset.
3333 */
3334 dev_err(nor->dev, "Failed to exit 4-byte address mode, err = %d\n", ret);
3335 }
3336
3337 if (nor->flags & SNOR_F_SOFT_RESET)
3338 spi_nor_soft_reset(nor);
3339}
3340
3341static const struct flash_info *spi_nor_match_name(struct spi_nor *nor,
3342 const char *name)
3343{
3344 unsigned int i, j;
3345
3346 for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
3347 for (j = 0; j < manufacturers[i]->nparts; j++) {
3348 if (!strcmp(name, manufacturers[i]->parts[j].name)) {
3349 nor->manufacturer = manufacturers[i];
3350 return &manufacturers[i]->parts[j];
3351 }
3352 }
3353 }
3354
3355 return NULL;
3356}
3357
3358static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
3359 const char *name)
3360{
3361 const struct flash_info *info = NULL;
3362
3363 if (name)
3364 info = spi_nor_match_name(nor, name);
3365 /* Try to auto-detect if chip name wasn't specified or not found */
3366 if (!info)
3367 return spi_nor_detect(nor);
3368
3369 /*
3370 * If caller has specified name of flash model that can normally be
3371 * detected using JEDEC, let's verify it.
3372 */
3373 if (name && info->id) {
3374 const struct flash_info *jinfo;
3375
3376 jinfo = spi_nor_detect(nor);
3377 if (IS_ERR(ptr: jinfo)) {
3378 return jinfo;
3379 } else if (jinfo != info) {
3380 /*
3381 * JEDEC knows better, so overwrite platform ID. We
3382 * can't trust partitions any longer, but we'll let
3383 * mtd apply them anyway, since some partitions may be
3384 * marked read-only, and we don't want to loose that
3385 * information, even if it's not 100% accurate.
3386 */
3387 dev_warn(nor->dev, "found %s, expected %s\n",
3388 jinfo->name, info->name);
3389 info = jinfo;
3390 }
3391 }
3392
3393 return info;
3394}
3395
3396static void spi_nor_set_mtd_info(struct spi_nor *nor)
3397{
3398 struct mtd_info *mtd = &nor->mtd;
3399 struct device *dev = nor->dev;
3400
3401 spi_nor_set_mtd_locking_ops(nor);
3402 spi_nor_set_mtd_otp_ops(nor);
3403
3404 mtd->dev.parent = dev;
3405 if (!mtd->name)
3406 mtd->name = dev_name(dev);
3407 mtd->type = MTD_NORFLASH;
3408 mtd->flags = MTD_CAP_NORFLASH;
3409 /* Unset BIT_WRITEABLE to enable JFFS2 write buffer for ECC'd NOR */
3410 if (nor->flags & SNOR_F_ECC)
3411 mtd->flags &= ~MTD_BIT_WRITEABLE;
3412 if (nor->info->flags & SPI_NOR_NO_ERASE)
3413 mtd->flags |= MTD_NO_ERASE;
3414 else
3415 mtd->_erase = spi_nor_erase;
3416 mtd->writesize = nor->params->writesize;
3417 mtd->writebufsize = nor->params->page_size;
3418 mtd->size = nor->params->size;
3419 mtd->_read = spi_nor_read;
3420 /* Might be already set by some SST flashes. */
3421 if (!mtd->_write)
3422 mtd->_write = spi_nor_write;
3423 mtd->_suspend = spi_nor_suspend;
3424 mtd->_resume = spi_nor_resume;
3425 mtd->_get_device = spi_nor_get_device;
3426 mtd->_put_device = spi_nor_put_device;
3427}
3428
3429static int spi_nor_hw_reset(struct spi_nor *nor)
3430{
3431 struct gpio_desc *reset;
3432
3433 reset = devm_gpiod_get_optional(dev: nor->dev, con_id: "reset", flags: GPIOD_OUT_LOW);
3434 if (IS_ERR_OR_NULL(ptr: reset))
3435 return PTR_ERR_OR_ZERO(ptr: reset);
3436
3437 /*
3438 * Experimental delay values by looking at different flash device
3439 * vendors datasheets.
3440 */
3441 usleep_range(min: 1, max: 5);
3442 gpiod_set_value_cansleep(desc: reset, value: 1);
3443 usleep_range(min: 100, max: 150);
3444 gpiod_set_value_cansleep(desc: reset, value: 0);
3445 usleep_range(min: 1000, max: 1200);
3446
3447 return 0;
3448}
3449
3450int spi_nor_scan(struct spi_nor *nor, const char *name,
3451 const struct spi_nor_hwcaps *hwcaps)
3452{
3453 const struct flash_info *info;
3454 struct device *dev = nor->dev;
3455 struct mtd_info *mtd = &nor->mtd;
3456 int ret;
3457 int i;
3458
3459 ret = spi_nor_check(nor);
3460 if (ret)
3461 return ret;
3462
3463 /* Reset SPI protocol for all commands. */
3464 nor->reg_proto = SNOR_PROTO_1_1_1;
3465 nor->read_proto = SNOR_PROTO_1_1_1;
3466 nor->write_proto = SNOR_PROTO_1_1_1;
3467
3468 /*
3469 * We need the bounce buffer early to read/write registers when going
3470 * through the spi-mem layer (buffers have to be DMA-able).
3471 * For spi-mem drivers, we'll reallocate a new buffer if
3472 * nor->params->page_size turns out to be greater than PAGE_SIZE (which
3473 * shouldn't happen before long since NOR pages are usually less
3474 * than 1KB) after spi_nor_scan() returns.
3475 */
3476 nor->bouncebuf_size = PAGE_SIZE;
3477 nor->bouncebuf = devm_kmalloc(dev, size: nor->bouncebuf_size,
3478 GFP_KERNEL);
3479 if (!nor->bouncebuf)
3480 return -ENOMEM;
3481
3482 ret = spi_nor_hw_reset(nor);
3483 if (ret)
3484 return ret;
3485
3486 info = spi_nor_get_flash_info(nor, name);
3487 if (IS_ERR(ptr: info))
3488 return PTR_ERR(ptr: info);
3489
3490 nor->info = info;
3491
3492 mutex_init(&nor->lock);
3493
3494 /* Init flash parameters based on flash_info struct and SFDP */
3495 ret = spi_nor_init_params(nor);
3496 if (ret)
3497 return ret;
3498
3499 if (spi_nor_use_parallel_locking(nor))
3500 init_waitqueue_head(&nor->rww.wait);
3501
3502 /*
3503 * Configure the SPI memory:
3504 * - select op codes for (Fast) Read, Page Program and Sector Erase.
3505 * - set the number of dummy cycles (mode cycles + wait states).
3506 * - set the SPI protocols for register and memory accesses.
3507 * - set the number of address bytes.
3508 */
3509 ret = spi_nor_setup(nor, hwcaps);
3510 if (ret)
3511 return ret;
3512
3513 /* Send all the required SPI flash commands to initialize device */
3514 ret = spi_nor_init(nor);
3515 if (ret)
3516 return ret;
3517
3518 /* No mtd_info fields should be used up to this point. */
3519 spi_nor_set_mtd_info(nor);
3520
3521 dev_info(dev, "%s (%lld Kbytes)\n", info->name,
3522 (long long)mtd->size >> 10);
3523
3524 dev_dbg(dev,
3525 "mtd .name = %s, .size = 0x%llx (%lldMiB), "
3526 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
3527 mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
3528 mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
3529
3530 if (mtd->numeraseregions)
3531 for (i = 0; i < mtd->numeraseregions; i++)
3532 dev_dbg(dev,
3533 "mtd.eraseregions[%d] = { .offset = 0x%llx, "
3534 ".erasesize = 0x%.8x (%uKiB), "
3535 ".numblocks = %d }\n",
3536 i, (long long)mtd->eraseregions[i].offset,
3537 mtd->eraseregions[i].erasesize,
3538 mtd->eraseregions[i].erasesize / 1024,
3539 mtd->eraseregions[i].numblocks);
3540 return 0;
3541}
3542EXPORT_SYMBOL_GPL(spi_nor_scan);
3543
3544static int spi_nor_create_read_dirmap(struct spi_nor *nor)
3545{
3546 struct spi_mem_dirmap_info info = {
3547 .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
3548 SPI_MEM_OP_ADDR(nor->addr_nbytes, 0, 0),
3549 SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
3550 SPI_MEM_OP_DATA_IN(0, NULL, 0)),
3551 .offset = 0,
3552 .length = nor->params->size,
3553 };
3554 struct spi_mem_op *op = &info.op_tmpl;
3555
3556 spi_nor_spimem_setup_op(nor, op, proto: nor->read_proto);
3557
3558 /* convert the dummy cycles to the number of bytes */
3559 op->dummy.nbytes = (nor->read_dummy * op->dummy.buswidth) / 8;
3560 if (spi_nor_protocol_is_dtr(proto: nor->read_proto))
3561 op->dummy.nbytes *= 2;
3562
3563 /*
3564 * Since spi_nor_spimem_setup_op() only sets buswidth when the number
3565 * of data bytes is non-zero, the data buswidth won't be set here. So,
3566 * do it explicitly.
3567 */
3568 op->data.buswidth = spi_nor_get_protocol_data_nbits(proto: nor->read_proto);
3569
3570 nor->dirmap.rdesc = devm_spi_mem_dirmap_create(dev: nor->dev, mem: nor->spimem,
3571 info: &info);
3572 return PTR_ERR_OR_ZERO(ptr: nor->dirmap.rdesc);
3573}
3574
3575static int spi_nor_create_write_dirmap(struct spi_nor *nor)
3576{
3577 struct spi_mem_dirmap_info info = {
3578 .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
3579 SPI_MEM_OP_ADDR(nor->addr_nbytes, 0, 0),
3580 SPI_MEM_OP_NO_DUMMY,
3581 SPI_MEM_OP_DATA_OUT(0, NULL, 0)),
3582 .offset = 0,
3583 .length = nor->params->size,
3584 };
3585 struct spi_mem_op *op = &info.op_tmpl;
3586
3587 if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
3588 op->addr.nbytes = 0;
3589
3590 spi_nor_spimem_setup_op(nor, op, proto: nor->write_proto);
3591
3592 /*
3593 * Since spi_nor_spimem_setup_op() only sets buswidth when the number
3594 * of data bytes is non-zero, the data buswidth won't be set here. So,
3595 * do it explicitly.
3596 */
3597 op->data.buswidth = spi_nor_get_protocol_data_nbits(proto: nor->write_proto);
3598
3599 nor->dirmap.wdesc = devm_spi_mem_dirmap_create(dev: nor->dev, mem: nor->spimem,
3600 info: &info);
3601 return PTR_ERR_OR_ZERO(ptr: nor->dirmap.wdesc);
3602}
3603
3604static int spi_nor_probe(struct spi_mem *spimem)
3605{
3606 struct spi_device *spi = spimem->spi;
3607 struct flash_platform_data *data = dev_get_platdata(dev: &spi->dev);
3608 struct spi_nor *nor;
3609 /*
3610 * Enable all caps by default. The core will mask them after
3611 * checking what's really supported using spi_mem_supports_op().
3612 */
3613 const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL };
3614 char *flash_name;
3615 int ret;
3616
3617 nor = devm_kzalloc(dev: &spi->dev, size: sizeof(*nor), GFP_KERNEL);
3618 if (!nor)
3619 return -ENOMEM;
3620
3621 nor->spimem = spimem;
3622 nor->dev = &spi->dev;
3623 spi_nor_set_flash_node(nor, np: spi->dev.of_node);
3624
3625 spi_mem_set_drvdata(mem: spimem, data: nor);
3626
3627 if (data && data->name)
3628 nor->mtd.name = data->name;
3629
3630 if (!nor->mtd.name)
3631 nor->mtd.name = spi_mem_get_name(mem: spimem);
3632
3633 /*
3634 * For some (historical?) reason many platforms provide two different
3635 * names in flash_platform_data: "name" and "type". Quite often name is
3636 * set to "m25p80" and then "type" provides a real chip name.
3637 * If that's the case, respect "type" and ignore a "name".
3638 */
3639 if (data && data->type)
3640 flash_name = data->type;
3641 else if (!strcmp(spi->modalias, "spi-nor"))
3642 flash_name = NULL; /* auto-detect */
3643 else
3644 flash_name = spi->modalias;
3645
3646 ret = spi_nor_scan(nor, flash_name, &hwcaps);
3647 if (ret)
3648 return ret;
3649
3650 spi_nor_debugfs_register(nor);
3651
3652 /*
3653 * None of the existing parts have > 512B pages, but let's play safe
3654 * and add this logic so that if anyone ever adds support for such
3655 * a NOR we don't end up with buffer overflows.
3656 */
3657 if (nor->params->page_size > PAGE_SIZE) {
3658 nor->bouncebuf_size = nor->params->page_size;
3659 devm_kfree(dev: nor->dev, p: nor->bouncebuf);
3660 nor->bouncebuf = devm_kmalloc(dev: nor->dev,
3661 size: nor->bouncebuf_size,
3662 GFP_KERNEL);
3663 if (!nor->bouncebuf)
3664 return -ENOMEM;
3665 }
3666
3667 ret = spi_nor_create_read_dirmap(nor);
3668 if (ret)
3669 return ret;
3670
3671 ret = spi_nor_create_write_dirmap(nor);
3672 if (ret)
3673 return ret;
3674
3675 return mtd_device_register(&nor->mtd, data ? data->parts : NULL,
3676 data ? data->nr_parts : 0);
3677}
3678
3679static int spi_nor_remove(struct spi_mem *spimem)
3680{
3681 struct spi_nor *nor = spi_mem_get_drvdata(mem: spimem);
3682
3683 spi_nor_restore(nor);
3684
3685 /* Clean up MTD stuff. */
3686 return mtd_device_unregister(master: &nor->mtd);
3687}
3688
3689static void spi_nor_shutdown(struct spi_mem *spimem)
3690{
3691 struct spi_nor *nor = spi_mem_get_drvdata(mem: spimem);
3692
3693 spi_nor_restore(nor);
3694}
3695
3696/*
3697 * Do NOT add to this array without reading the following:
3698 *
3699 * Historically, many flash devices are bound to this driver by their name. But
3700 * since most of these flash are compatible to some extent, and their
3701 * differences can often be differentiated by the JEDEC read-ID command, we
3702 * encourage new users to add support to the spi-nor library, and simply bind
3703 * against a generic string here (e.g., "jedec,spi-nor").
3704 *
3705 * Many flash names are kept here in this list to keep them available
3706 * as module aliases for existing platforms.
3707 */
3708static const struct spi_device_id spi_nor_dev_ids[] = {
3709 /*
3710 * Allow non-DT platform devices to bind to the "spi-nor" modalias, and
3711 * hack around the fact that the SPI core does not provide uevent
3712 * matching for .of_match_table
3713 */
3714 {"spi-nor"},
3715
3716 /*
3717 * Entries not used in DTs that should be safe to drop after replacing
3718 * them with "spi-nor" in platform data.
3719 */
3720 {"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"},
3721
3722 /*
3723 * Entries that were used in DTs without "jedec,spi-nor" fallback and
3724 * should be kept for backward compatibility.
3725 */
3726 {"at25df321a"}, {"at25df641"}, {"at26df081a"},
3727 {"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"},
3728 {"mx25l25635e"},{"mx66l51235l"},
3729 {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"},
3730 {"s25fl256s1"}, {"s25fl512s"}, {"s25sl12801"}, {"s25fl008k"},
3731 {"s25fl064k"},
3732 {"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"},
3733 {"m25p40"}, {"m25p80"}, {"m25p16"}, {"m25p32"},
3734 {"m25p64"}, {"m25p128"},
3735 {"w25x80"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"},
3736 {"w25q80bl"}, {"w25q128"}, {"w25q256"},
3737
3738 /* Flashes that can't be detected using JEDEC */
3739 {"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"},
3740 {"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"},
3741 {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"},
3742
3743 /* Everspin MRAMs (non-JEDEC) */
3744 { "mr25h128" }, /* 128 Kib, 40 MHz */
3745 { "mr25h256" }, /* 256 Kib, 40 MHz */
3746 { "mr25h10" }, /* 1 Mib, 40 MHz */
3747 { "mr25h40" }, /* 4 Mib, 40 MHz */
3748
3749 { },
3750};
3751MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids);
3752
3753static const struct of_device_id spi_nor_of_table[] = {
3754 /*
3755 * Generic compatibility for SPI NOR that can be identified by the
3756 * JEDEC READ ID opcode (0x9F). Use this, if possible.
3757 */
3758 { .compatible = "jedec,spi-nor" },
3759 { /* sentinel */ },
3760};
3761MODULE_DEVICE_TABLE(of, spi_nor_of_table);
3762
3763/*
3764 * REVISIT: many of these chips have deep power-down modes, which
3765 * should clearly be entered on suspend() to minimize power use.
3766 * And also when they're otherwise idle...
3767 */
3768static struct spi_mem_driver spi_nor_driver = {
3769 .spidrv = {
3770 .driver = {
3771 .name = "spi-nor",
3772 .of_match_table = spi_nor_of_table,
3773 .dev_groups = spi_nor_sysfs_groups,
3774 },
3775 .id_table = spi_nor_dev_ids,
3776 },
3777 .probe = spi_nor_probe,
3778 .remove = spi_nor_remove,
3779 .shutdown = spi_nor_shutdown,
3780};
3781
3782static int __init spi_nor_module_init(void)
3783{
3784 return spi_mem_driver_register(&spi_nor_driver);
3785}
3786module_init(spi_nor_module_init);
3787
3788static void __exit spi_nor_module_exit(void)
3789{
3790 spi_mem_driver_unregister(drv: &spi_nor_driver);
3791 spi_nor_debugfs_shutdown();
3792}
3793module_exit(spi_nor_module_exit);
3794
3795MODULE_LICENSE("GPL v2");
3796MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
3797MODULE_AUTHOR("Mike Lavender");
3798MODULE_DESCRIPTION("framework for SPI NOR");
3799

source code of linux/drivers/mtd/spi-nor/core.c