1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Common Flash Interface support: |
4 | * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002) |
5 | * |
6 | * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp> |
7 | * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com> |
8 | * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com> |
9 | * |
10 | * 2_by_8 routines added by Simon Munton |
11 | * |
12 | * 4_by_16 work by Carolyn J. Smith |
13 | * |
14 | * XIP support hooks by Vitaly Wool (based on code for Intel flash |
15 | * by Nicolas Pitre) |
16 | * |
17 | * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0 |
18 | * |
19 | * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com |
20 | */ |
21 | |
22 | #include <linux/module.h> |
23 | #include <linux/types.h> |
24 | #include <linux/kernel.h> |
25 | #include <linux/sched.h> |
26 | #include <asm/io.h> |
27 | #include <asm/byteorder.h> |
28 | |
29 | #include <linux/errno.h> |
30 | #include <linux/slab.h> |
31 | #include <linux/delay.h> |
32 | #include <linux/interrupt.h> |
33 | #include <linux/reboot.h> |
34 | #include <linux/of.h> |
35 | #include <linux/mtd/map.h> |
36 | #include <linux/mtd/mtd.h> |
37 | #include <linux/mtd/cfi.h> |
38 | #include <linux/mtd/xip.h> |
39 | |
40 | #define AMD_BOOTLOC_BUG |
41 | #define FORCE_WORD_WRITE 0 |
42 | |
43 | #define MAX_RETRIES 3 |
44 | |
45 | #define SST49LF004B 0x0060 |
46 | #define SST49LF040B 0x0050 |
47 | #define SST49LF008A 0x005a |
48 | #define AT49BV6416 0x00d6 |
49 | #define S29GL064N_MN12 0x0c01 |
50 | |
51 | /* |
52 | * Status Register bit description. Used by flash devices that don't |
53 | * support DQ polling (e.g. HyperFlash) |
54 | */ |
55 | #define CFI_SR_DRB BIT(7) |
56 | #define CFI_SR_ESB BIT(5) |
57 | #define CFI_SR_PSB BIT(4) |
58 | #define CFI_SR_WBASB BIT(3) |
59 | #define CFI_SR_SLSB BIT(1) |
60 | |
61 | enum cfi_quirks { |
62 | CFI_QUIRK_DQ_TRUE_DATA = BIT(0), |
63 | }; |
64 | |
65 | static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); |
66 | static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); |
67 | #if !FORCE_WORD_WRITE |
68 | static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); |
69 | #endif |
70 | static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *); |
71 | static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *); |
72 | static void cfi_amdstd_sync (struct mtd_info *); |
73 | static int cfi_amdstd_suspend (struct mtd_info *); |
74 | static void cfi_amdstd_resume (struct mtd_info *); |
75 | static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *); |
76 | static int cfi_amdstd_get_fact_prot_info(struct mtd_info *, size_t, |
77 | size_t *, struct otp_info *); |
78 | static int cfi_amdstd_get_user_prot_info(struct mtd_info *, size_t, |
79 | size_t *, struct otp_info *); |
80 | static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); |
81 | static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *, loff_t, size_t, |
82 | size_t *, u_char *); |
83 | static int cfi_amdstd_read_user_prot_reg(struct mtd_info *, loff_t, size_t, |
84 | size_t *, u_char *); |
85 | static int cfi_amdstd_write_user_prot_reg(struct mtd_info *, loff_t, size_t, |
86 | size_t *, const u_char *); |
87 | static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *, loff_t, size_t); |
88 | |
89 | static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, |
90 | size_t *retlen, const u_char *buf); |
91 | |
92 | static void cfi_amdstd_destroy(struct mtd_info *); |
93 | |
94 | struct mtd_info *cfi_cmdset_0002(struct map_info *, int); |
95 | static struct mtd_info *cfi_amdstd_setup (struct mtd_info *); |
96 | |
97 | static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode); |
98 | static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr); |
99 | #include "fwh_lock.h" |
100 | |
101 | static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); |
102 | static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); |
103 | |
104 | static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); |
105 | static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); |
106 | static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len); |
107 | |
108 | static struct mtd_chip_driver cfi_amdstd_chipdrv = { |
109 | .probe = NULL, /* Not usable directly */ |
110 | .destroy = cfi_amdstd_destroy, |
111 | .name = "cfi_cmdset_0002" , |
112 | .module = THIS_MODULE |
113 | }; |
114 | |
115 | /* |
116 | * Use status register to poll for Erase/write completion when DQ is not |
117 | * supported. This is indicated by Bit[1:0] of SoftwareFeatures field in |
118 | * CFI Primary Vendor-Specific Extended Query table 1.5 |
119 | */ |
120 | static int cfi_use_status_reg(struct cfi_private *cfi) |
121 | { |
122 | struct cfi_pri_amdstd *extp = cfi->cmdset_priv; |
123 | u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ; |
124 | |
125 | return extp && extp->MinorVersion >= '5' && |
126 | (extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG; |
127 | } |
128 | |
129 | static int cfi_check_err_status(struct map_info *map, struct flchip *chip, |
130 | unsigned long adr) |
131 | { |
132 | struct cfi_private *cfi = map->fldrv_priv; |
133 | map_word status; |
134 | |
135 | if (!cfi_use_status_reg(cfi)) |
136 | return 0; |
137 | |
138 | cfi_send_gen_cmd(cmd: 0x70, cmd_addr: cfi->addr_unlock1, base: chip->start, map, cfi, |
139 | type: cfi->device_type, NULL); |
140 | status = map_read(map, adr); |
141 | |
142 | /* The error bits are invalid while the chip's busy */ |
143 | if (!map_word_bitsset(map, status, CMD(CFI_SR_DRB))) |
144 | return 0; |
145 | |
146 | if (map_word_bitsset(map, status, CMD(0x3a))) { |
147 | unsigned long chipstatus = MERGESTATUS(status); |
148 | |
149 | if (chipstatus & CFI_SR_ESB) |
150 | pr_err("%s erase operation failed, status %lx\n" , |
151 | map->name, chipstatus); |
152 | if (chipstatus & CFI_SR_PSB) |
153 | pr_err("%s program operation failed, status %lx\n" , |
154 | map->name, chipstatus); |
155 | if (chipstatus & CFI_SR_WBASB) |
156 | pr_err("%s buffer program command aborted, status %lx\n" , |
157 | map->name, chipstatus); |
158 | if (chipstatus & CFI_SR_SLSB) |
159 | pr_err("%s sector write protected, status %lx\n" , |
160 | map->name, chipstatus); |
161 | |
162 | /* Erase/Program status bits are set on the operation failure */ |
163 | if (chipstatus & (CFI_SR_ESB | CFI_SR_PSB)) |
164 | return 1; |
165 | } |
166 | return 0; |
167 | } |
168 | |
169 | /* #define DEBUG_CFI_FEATURES */ |
170 | |
171 | |
172 | #ifdef DEBUG_CFI_FEATURES |
173 | static void cfi_tell_features(struct cfi_pri_amdstd *extp) |
174 | { |
175 | const char* erase_suspend[3] = { |
176 | "Not supported" , "Read only" , "Read/write" |
177 | }; |
178 | const char* top_bottom[6] = { |
179 | "No WP" , "8x8KiB sectors at top & bottom, no WP" , |
180 | "Bottom boot" , "Top boot" , |
181 | "Uniform, Bottom WP" , "Uniform, Top WP" |
182 | }; |
183 | |
184 | printk(" Silicon revision: %d\n" , extp->SiliconRevision >> 1); |
185 | printk(" Address sensitive unlock: %s\n" , |
186 | (extp->SiliconRevision & 1) ? "Not required" : "Required" ); |
187 | |
188 | if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend)) |
189 | printk(" Erase Suspend: %s\n" , erase_suspend[extp->EraseSuspend]); |
190 | else |
191 | printk(" Erase Suspend: Unknown value %d\n" , extp->EraseSuspend); |
192 | |
193 | if (extp->BlkProt == 0) |
194 | printk(" Block protection: Not supported\n" ); |
195 | else |
196 | printk(" Block protection: %d sectors per group\n" , extp->BlkProt); |
197 | |
198 | |
199 | printk(" Temporary block unprotect: %s\n" , |
200 | extp->TmpBlkUnprotect ? "Supported" : "Not supported" ); |
201 | printk(" Block protect/unprotect scheme: %d\n" , extp->BlkProtUnprot); |
202 | printk(" Number of simultaneous operations: %d\n" , extp->SimultaneousOps); |
203 | printk(" Burst mode: %s\n" , |
204 | extp->BurstMode ? "Supported" : "Not supported" ); |
205 | if (extp->PageMode == 0) |
206 | printk(" Page mode: Not supported\n" ); |
207 | else |
208 | printk(" Page mode: %d word page\n" , extp->PageMode << 2); |
209 | |
210 | printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n" , |
211 | extp->VppMin >> 4, extp->VppMin & 0xf); |
212 | printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n" , |
213 | extp->VppMax >> 4, extp->VppMax & 0xf); |
214 | |
215 | if (extp->TopBottom < ARRAY_SIZE(top_bottom)) |
216 | printk(" Top/Bottom Boot Block: %s\n" , top_bottom[extp->TopBottom]); |
217 | else |
218 | printk(" Top/Bottom Boot Block: Unknown value %d\n" , extp->TopBottom); |
219 | } |
220 | #endif |
221 | |
222 | #ifdef AMD_BOOTLOC_BUG |
223 | /* Wheee. Bring me the head of someone at AMD. */ |
224 | static void fixup_amd_bootblock(struct mtd_info *mtd) |
225 | { |
226 | struct map_info *map = mtd->priv; |
227 | struct cfi_private *cfi = map->fldrv_priv; |
228 | struct cfi_pri_amdstd *extp = cfi->cmdset_priv; |
229 | __u8 major = extp->MajorVersion; |
230 | __u8 minor = extp->MinorVersion; |
231 | |
232 | if (((major << 8) | minor) < 0x3131) { |
233 | /* CFI version 1.0 => don't trust bootloc */ |
234 | |
235 | pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n" , |
236 | map->name, cfi->mfr, cfi->id); |
237 | |
238 | /* AFAICS all 29LV400 with a bottom boot block have a device ID |
239 | * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode. |
240 | * These were badly detected as they have the 0x80 bit set |
241 | * so treat them as a special case. |
242 | */ |
243 | if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) && |
244 | |
245 | /* Macronix added CFI to their 2nd generation |
246 | * MX29LV400C B/T but AFAICS no other 29LV400 (AMD, |
247 | * Fujitsu, Spansion, EON, ESI and older Macronix) |
248 | * has CFI. |
249 | * |
250 | * Therefore also check the manufacturer. |
251 | * This reduces the risk of false detection due to |
252 | * the 8-bit device ID. |
253 | */ |
254 | (cfi->mfr == CFI_MFR_MACRONIX)) { |
255 | pr_debug("%s: Macronix MX29LV400C with bottom boot block" |
256 | " detected\n" , map->name); |
257 | extp->TopBottom = 2; /* bottom boot */ |
258 | } else |
259 | if (cfi->id & 0x80) { |
260 | printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n" , map->name, cfi->id); |
261 | extp->TopBottom = 3; /* top boot */ |
262 | } else { |
263 | extp->TopBottom = 2; /* bottom boot */ |
264 | } |
265 | |
266 | pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;" |
267 | " deduced %s from Device ID\n" , map->name, major, minor, |
268 | extp->TopBottom == 2 ? "bottom" : "top" ); |
269 | } |
270 | } |
271 | #endif |
272 | |
273 | #if !FORCE_WORD_WRITE |
274 | static void fixup_use_write_buffers(struct mtd_info *mtd) |
275 | { |
276 | struct map_info *map = mtd->priv; |
277 | struct cfi_private *cfi = map->fldrv_priv; |
278 | |
279 | if (cfi->mfr == CFI_MFR_AMD && cfi->id == 0x2201) |
280 | return; |
281 | |
282 | if (cfi->cfiq->BufWriteTimeoutTyp) { |
283 | pr_debug("Using buffer write method\n" ); |
284 | mtd->_write = cfi_amdstd_write_buffers; |
285 | } |
286 | } |
287 | #endif /* !FORCE_WORD_WRITE */ |
288 | |
289 | /* Atmel chips don't use the same PRI format as AMD chips */ |
290 | static void fixup_convert_atmel_pri(struct mtd_info *mtd) |
291 | { |
292 | struct map_info *map = mtd->priv; |
293 | struct cfi_private *cfi = map->fldrv_priv; |
294 | struct cfi_pri_amdstd *extp = cfi->cmdset_priv; |
295 | struct cfi_pri_atmel atmel_pri; |
296 | |
297 | memcpy(&atmel_pri, extp, sizeof(atmel_pri)); |
298 | memset((char *)extp + 5, 0, sizeof(*extp) - 5); |
299 | |
300 | if (atmel_pri.Features & 0x02) |
301 | extp->EraseSuspend = 2; |
302 | |
303 | /* Some chips got it backwards... */ |
304 | if (cfi->id == AT49BV6416) { |
305 | if (atmel_pri.BottomBoot) |
306 | extp->TopBottom = 3; |
307 | else |
308 | extp->TopBottom = 2; |
309 | } else { |
310 | if (atmel_pri.BottomBoot) |
311 | extp->TopBottom = 2; |
312 | else |
313 | extp->TopBottom = 3; |
314 | } |
315 | |
316 | /* burst write mode not supported */ |
317 | cfi->cfiq->BufWriteTimeoutTyp = 0; |
318 | cfi->cfiq->BufWriteTimeoutMax = 0; |
319 | } |
320 | |
321 | static void fixup_use_secsi(struct mtd_info *mtd) |
322 | { |
323 | /* Setup for chips with a secsi area */ |
324 | mtd->_read_user_prot_reg = cfi_amdstd_secsi_read; |
325 | mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read; |
326 | } |
327 | |
328 | static void fixup_use_erase_chip(struct mtd_info *mtd) |
329 | { |
330 | struct map_info *map = mtd->priv; |
331 | struct cfi_private *cfi = map->fldrv_priv; |
332 | if ((cfi->cfiq->NumEraseRegions == 1) && |
333 | ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) { |
334 | mtd->_erase = cfi_amdstd_erase_chip; |
335 | } |
336 | |
337 | } |
338 | |
339 | /* |
340 | * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors |
341 | * locked by default. |
342 | */ |
343 | static void fixup_use_atmel_lock(struct mtd_info *mtd) |
344 | { |
345 | mtd->_lock = cfi_atmel_lock; |
346 | mtd->_unlock = cfi_atmel_unlock; |
347 | mtd->flags |= MTD_POWERUP_LOCK; |
348 | } |
349 | |
350 | static void fixup_old_sst_eraseregion(struct mtd_info *mtd) |
351 | { |
352 | struct map_info *map = mtd->priv; |
353 | struct cfi_private *cfi = map->fldrv_priv; |
354 | |
355 | /* |
356 | * These flashes report two separate eraseblock regions based on the |
357 | * sector_erase-size and block_erase-size, although they both operate on the |
358 | * same memory. This is not allowed according to CFI, so we just pick the |
359 | * sector_erase-size. |
360 | */ |
361 | cfi->cfiq->NumEraseRegions = 1; |
362 | } |
363 | |
364 | static void fixup_sst39vf(struct mtd_info *mtd) |
365 | { |
366 | struct map_info *map = mtd->priv; |
367 | struct cfi_private *cfi = map->fldrv_priv; |
368 | |
369 | fixup_old_sst_eraseregion(mtd); |
370 | |
371 | cfi->addr_unlock1 = 0x5555; |
372 | cfi->addr_unlock2 = 0x2AAA; |
373 | } |
374 | |
375 | static void fixup_sst39vf_rev_b(struct mtd_info *mtd) |
376 | { |
377 | struct map_info *map = mtd->priv; |
378 | struct cfi_private *cfi = map->fldrv_priv; |
379 | |
380 | fixup_old_sst_eraseregion(mtd); |
381 | |
382 | cfi->addr_unlock1 = 0x555; |
383 | cfi->addr_unlock2 = 0x2AA; |
384 | |
385 | cfi->sector_erase_cmd = CMD(0x50); |
386 | } |
387 | |
388 | static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd) |
389 | { |
390 | struct map_info *map = mtd->priv; |
391 | struct cfi_private *cfi = map->fldrv_priv; |
392 | |
393 | fixup_sst39vf_rev_b(mtd); |
394 | |
395 | /* |
396 | * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where |
397 | * it should report a size of 8KBytes (0x0020*256). |
398 | */ |
399 | cfi->cfiq->EraseRegionInfo[0] = 0x002003ff; |
400 | pr_warn("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n" , |
401 | mtd->name); |
402 | } |
403 | |
404 | static void fixup_s29gl064n_sectors(struct mtd_info *mtd) |
405 | { |
406 | struct map_info *map = mtd->priv; |
407 | struct cfi_private *cfi = map->fldrv_priv; |
408 | |
409 | if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) { |
410 | cfi->cfiq->EraseRegionInfo[0] |= 0x0040; |
411 | pr_warn("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n" , |
412 | mtd->name); |
413 | } |
414 | } |
415 | |
416 | static void fixup_s29gl032n_sectors(struct mtd_info *mtd) |
417 | { |
418 | struct map_info *map = mtd->priv; |
419 | struct cfi_private *cfi = map->fldrv_priv; |
420 | |
421 | if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) { |
422 | cfi->cfiq->EraseRegionInfo[1] &= ~0x0040; |
423 | pr_warn("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n" , |
424 | mtd->name); |
425 | } |
426 | } |
427 | |
428 | static void fixup_s29ns512p_sectors(struct mtd_info *mtd) |
429 | { |
430 | struct map_info *map = mtd->priv; |
431 | struct cfi_private *cfi = map->fldrv_priv; |
432 | |
433 | /* |
434 | * S29NS512P flash uses more than 8bits to report number of sectors, |
435 | * which is not permitted by CFI. |
436 | */ |
437 | cfi->cfiq->EraseRegionInfo[0] = 0x020001ff; |
438 | pr_warn("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n" , |
439 | mtd->name); |
440 | } |
441 | |
442 | static void fixup_quirks(struct mtd_info *mtd) |
443 | { |
444 | struct map_info *map = mtd->priv; |
445 | struct cfi_private *cfi = map->fldrv_priv; |
446 | |
447 | if (cfi->mfr == CFI_MFR_AMD && cfi->id == S29GL064N_MN12) |
448 | cfi->quirks |= CFI_QUIRK_DQ_TRUE_DATA; |
449 | } |
450 | |
451 | /* Used to fix CFI-Tables of chips without Extended Query Tables */ |
452 | static struct cfi_fixup cfi_nopri_fixup_table[] = { |
453 | { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */ |
454 | { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */ |
455 | { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */ |
456 | { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */ |
457 | { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */ |
458 | { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */ |
459 | { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */ |
460 | { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */ |
461 | { 0, 0, NULL } |
462 | }; |
463 | |
464 | static struct cfi_fixup cfi_fixup_table[] = { |
465 | { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri }, |
466 | #ifdef AMD_BOOTLOC_BUG |
467 | { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock }, |
468 | { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock }, |
469 | { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock }, |
470 | #endif |
471 | { CFI_MFR_AMD, 0x0050, fixup_use_secsi }, |
472 | { CFI_MFR_AMD, 0x0053, fixup_use_secsi }, |
473 | { CFI_MFR_AMD, 0x0055, fixup_use_secsi }, |
474 | { CFI_MFR_AMD, 0x0056, fixup_use_secsi }, |
475 | { CFI_MFR_AMD, 0x005C, fixup_use_secsi }, |
476 | { CFI_MFR_AMD, 0x005F, fixup_use_secsi }, |
477 | { CFI_MFR_AMD, S29GL064N_MN12, fixup_s29gl064n_sectors }, |
478 | { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors }, |
479 | { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors }, |
480 | { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors }, |
481 | { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors }, |
482 | { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */ |
483 | { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */ |
484 | { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */ |
485 | { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */ |
486 | #if !FORCE_WORD_WRITE |
487 | { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers }, |
488 | #endif |
489 | { CFI_MFR_ANY, CFI_ID_ANY, fixup_quirks }, |
490 | { 0, 0, NULL } |
491 | }; |
492 | static struct cfi_fixup jedec_fixup_table[] = { |
493 | { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock }, |
494 | { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock }, |
495 | { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock }, |
496 | { 0, 0, NULL } |
497 | }; |
498 | |
499 | static struct cfi_fixup fixup_table[] = { |
500 | /* The CFI vendor ids and the JEDEC vendor IDs appear |
501 | * to be common. It is like the devices id's are as |
502 | * well. This table is to pick all cases where |
503 | * we know that is the case. |
504 | */ |
505 | { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip }, |
506 | { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock }, |
507 | { 0, 0, NULL } |
508 | }; |
509 | |
510 | |
511 | static void cfi_fixup_major_minor(struct cfi_private *cfi, |
512 | struct cfi_pri_amdstd *extp) |
513 | { |
514 | if (cfi->mfr == CFI_MFR_SAMSUNG) { |
515 | if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') || |
516 | (extp->MajorVersion == '3' && extp->MinorVersion == '3')) { |
517 | /* |
518 | * Samsung K8P2815UQB and K8D6x16UxM chips |
519 | * report major=0 / minor=0. |
520 | * K8D3x16UxC chips report major=3 / minor=3. |
521 | */ |
522 | printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu" |
523 | " Extended Query version to 1.%c\n" , |
524 | extp->MinorVersion); |
525 | extp->MajorVersion = '1'; |
526 | } |
527 | } |
528 | |
529 | /* |
530 | * SST 38VF640x chips report major=0xFF / minor=0xFF. |
531 | */ |
532 | if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) { |
533 | extp->MajorVersion = '1'; |
534 | extp->MinorVersion = '0'; |
535 | } |
536 | } |
537 | |
538 | static int is_m29ew(struct cfi_private *cfi) |
539 | { |
540 | if (cfi->mfr == CFI_MFR_INTEL && |
541 | ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) || |
542 | (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e))) |
543 | return 1; |
544 | return 0; |
545 | } |
546 | |
547 | /* |
548 | * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20: |
549 | * Some revisions of the M29EW suffer from erase suspend hang ups. In |
550 | * particular, it can occur when the sequence |
551 | * Erase Confirm -> Suspend -> Program -> Resume |
552 | * causes a lockup due to internal timing issues. The consequence is that the |
553 | * erase cannot be resumed without inserting a dummy command after programming |
554 | * and prior to resuming. [...] The work-around is to issue a dummy write cycle |
555 | * that writes an F0 command code before the RESUME command. |
556 | */ |
557 | static void cfi_fixup_m29ew_erase_suspend(struct map_info *map, |
558 | unsigned long adr) |
559 | { |
560 | struct cfi_private *cfi = map->fldrv_priv; |
561 | /* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */ |
562 | if (is_m29ew(cfi)) |
563 | map_write(map, CMD(0xF0), adr); |
564 | } |
565 | |
566 | /* |
567 | * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22: |
568 | * |
569 | * Some revisions of the M29EW (for example, A1 and A2 step revisions) |
570 | * are affected by a problem that could cause a hang up when an ERASE SUSPEND |
571 | * command is issued after an ERASE RESUME operation without waiting for a |
572 | * minimum delay. The result is that once the ERASE seems to be completed |
573 | * (no bits are toggling), the contents of the Flash memory block on which |
574 | * the erase was ongoing could be inconsistent with the expected values |
575 | * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84 |
576 | * values), causing a consequent failure of the ERASE operation. |
577 | * The occurrence of this issue could be high, especially when file system |
578 | * operations on the Flash are intensive. As a result, it is recommended |
579 | * that a patch be applied. Intensive file system operations can cause many |
580 | * calls to the garbage routine to free Flash space (also by erasing physical |
581 | * Flash blocks) and as a result, many consecutive SUSPEND and RESUME |
582 | * commands can occur. The problem disappears when a delay is inserted after |
583 | * the RESUME command by using the udelay() function available in Linux. |
584 | * The DELAY value must be tuned based on the customer's platform. |
585 | * The maximum value that fixes the problem in all cases is 500us. |
586 | * But, in our experience, a delay of 30 µs to 50 µs is sufficient |
587 | * in most cases. |
588 | * We have chosen 500µs because this latency is acceptable. |
589 | */ |
590 | static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi) |
591 | { |
592 | /* |
593 | * Resolving the Delay After Resume Issue see Micron TN-13-07 |
594 | * Worst case delay must be 500µs but 30-50µs should be ok as well |
595 | */ |
596 | if (is_m29ew(cfi)) |
597 | cfi_udelay(us: 500); |
598 | } |
599 | |
600 | struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) |
601 | { |
602 | struct cfi_private *cfi = map->fldrv_priv; |
603 | struct device_node __maybe_unused *np = map->device_node; |
604 | struct mtd_info *mtd; |
605 | int i; |
606 | |
607 | mtd = kzalloc(size: sizeof(*mtd), GFP_KERNEL); |
608 | if (!mtd) |
609 | return NULL; |
610 | mtd->priv = map; |
611 | mtd->type = MTD_NORFLASH; |
612 | |
613 | /* Fill in the default mtd operations */ |
614 | mtd->_erase = cfi_amdstd_erase_varsize; |
615 | mtd->_write = cfi_amdstd_write_words; |
616 | mtd->_read = cfi_amdstd_read; |
617 | mtd->_sync = cfi_amdstd_sync; |
618 | mtd->_suspend = cfi_amdstd_suspend; |
619 | mtd->_resume = cfi_amdstd_resume; |
620 | mtd->_read_user_prot_reg = cfi_amdstd_read_user_prot_reg; |
621 | mtd->_read_fact_prot_reg = cfi_amdstd_read_fact_prot_reg; |
622 | mtd->_get_fact_prot_info = cfi_amdstd_get_fact_prot_info; |
623 | mtd->_get_user_prot_info = cfi_amdstd_get_user_prot_info; |
624 | mtd->_write_user_prot_reg = cfi_amdstd_write_user_prot_reg; |
625 | mtd->_lock_user_prot_reg = cfi_amdstd_lock_user_prot_reg; |
626 | mtd->flags = MTD_CAP_NORFLASH; |
627 | mtd->name = map->name; |
628 | mtd->writesize = 1; |
629 | mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; |
630 | |
631 | pr_debug("MTD %s(): write buffer size %d\n" , __func__, |
632 | mtd->writebufsize); |
633 | |
634 | mtd->_panic_write = cfi_amdstd_panic_write; |
635 | mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot; |
636 | |
637 | if (cfi->cfi_mode==CFI_MODE_CFI){ |
638 | unsigned char bootloc; |
639 | __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR; |
640 | struct cfi_pri_amdstd *extp; |
641 | |
642 | extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, size: sizeof(*extp), name: "Amd/Fujitsu" ); |
643 | if (extp) { |
644 | /* |
645 | * It's a real CFI chip, not one for which the probe |
646 | * routine faked a CFI structure. |
647 | */ |
648 | cfi_fixup_major_minor(cfi, extp); |
649 | |
650 | /* |
651 | * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5 |
652 | * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19 |
653 | * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf |
654 | * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf |
655 | * http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf |
656 | */ |
657 | if (extp->MajorVersion != '1' || |
658 | (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) { |
659 | printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query " |
660 | "version %c.%c (%#02x/%#02x).\n" , |
661 | extp->MajorVersion, extp->MinorVersion, |
662 | extp->MajorVersion, extp->MinorVersion); |
663 | kfree(objp: extp); |
664 | kfree(objp: mtd); |
665 | return NULL; |
666 | } |
667 | |
668 | printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n" , |
669 | extp->MajorVersion, extp->MinorVersion); |
670 | |
671 | /* Install our own private info structure */ |
672 | cfi->cmdset_priv = extp; |
673 | |
674 | /* Apply cfi device specific fixups */ |
675 | cfi_fixup(mtd, fixups: cfi_fixup_table); |
676 | |
677 | #ifdef DEBUG_CFI_FEATURES |
678 | /* Tell the user about it in lots of lovely detail */ |
679 | cfi_tell_features(extp); |
680 | #endif |
681 | |
682 | #ifdef CONFIG_OF |
683 | if (np && of_property_read_bool( |
684 | np, propname: "use-advanced-sector-protection" ) |
685 | && extp->BlkProtUnprot == 8) { |
686 | printk(KERN_INFO " Advanced Sector Protection (PPB Locking) supported\n" ); |
687 | mtd->_lock = cfi_ppb_lock; |
688 | mtd->_unlock = cfi_ppb_unlock; |
689 | mtd->_is_locked = cfi_ppb_is_locked; |
690 | } |
691 | #endif |
692 | |
693 | bootloc = extp->TopBottom; |
694 | if ((bootloc < 2) || (bootloc > 5)) { |
695 | printk(KERN_WARNING "%s: CFI contains unrecognised boot " |
696 | "bank location (%d). Assuming bottom.\n" , |
697 | map->name, bootloc); |
698 | bootloc = 2; |
699 | } |
700 | |
701 | if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { |
702 | printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n" , map->name); |
703 | |
704 | for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) { |
705 | int j = (cfi->cfiq->NumEraseRegions-1)-i; |
706 | |
707 | swap(cfi->cfiq->EraseRegionInfo[i], |
708 | cfi->cfiq->EraseRegionInfo[j]); |
709 | } |
710 | } |
711 | /* Set the default CFI lock/unlock addresses */ |
712 | cfi->addr_unlock1 = 0x555; |
713 | cfi->addr_unlock2 = 0x2aa; |
714 | } |
715 | cfi_fixup(mtd, fixups: cfi_nopri_fixup_table); |
716 | |
717 | if (!cfi->addr_unlock1 || !cfi->addr_unlock2) { |
718 | kfree(objp: mtd); |
719 | return NULL; |
720 | } |
721 | |
722 | } /* CFI mode */ |
723 | else if (cfi->cfi_mode == CFI_MODE_JEDEC) { |
724 | /* Apply jedec specific fixups */ |
725 | cfi_fixup(mtd, fixups: jedec_fixup_table); |
726 | } |
727 | /* Apply generic fixups */ |
728 | cfi_fixup(mtd, fixups: fixup_table); |
729 | |
730 | for (i=0; i< cfi->numchips; i++) { |
731 | cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; |
732 | cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; |
733 | cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; |
734 | /* |
735 | * First calculate the timeout max according to timeout field |
736 | * of struct cfi_ident that probed from chip's CFI aera, if |
737 | * available. Specify a minimum of 2000us, in case the CFI data |
738 | * is wrong. |
739 | */ |
740 | if (cfi->cfiq->BufWriteTimeoutTyp && |
741 | cfi->cfiq->BufWriteTimeoutMax) |
742 | cfi->chips[i].buffer_write_time_max = |
743 | 1 << (cfi->cfiq->BufWriteTimeoutTyp + |
744 | cfi->cfiq->BufWriteTimeoutMax); |
745 | else |
746 | cfi->chips[i].buffer_write_time_max = 0; |
747 | |
748 | cfi->chips[i].buffer_write_time_max = |
749 | max(cfi->chips[i].buffer_write_time_max, 2000); |
750 | |
751 | cfi->chips[i].ref_point_counter = 0; |
752 | init_waitqueue_head(&(cfi->chips[i].wq)); |
753 | } |
754 | |
755 | map->fldrv = &cfi_amdstd_chipdrv; |
756 | |
757 | return cfi_amdstd_setup(mtd); |
758 | } |
759 | struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002" ))); |
760 | struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002" ))); |
761 | EXPORT_SYMBOL_GPL(cfi_cmdset_0002); |
762 | EXPORT_SYMBOL_GPL(cfi_cmdset_0006); |
763 | EXPORT_SYMBOL_GPL(cfi_cmdset_0701); |
764 | |
765 | static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) |
766 | { |
767 | struct map_info *map = mtd->priv; |
768 | struct cfi_private *cfi = map->fldrv_priv; |
769 | unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave; |
770 | unsigned long offset = 0; |
771 | int i,j; |
772 | |
773 | printk(KERN_NOTICE "number of %s chips: %d\n" , |
774 | (cfi->cfi_mode == CFI_MODE_CFI)?"CFI" :"JEDEC" ,cfi->numchips); |
775 | /* Select the correct geometry setup */ |
776 | mtd->size = devsize * cfi->numchips; |
777 | |
778 | mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; |
779 | mtd->eraseregions = kmalloc_array(n: mtd->numeraseregions, |
780 | size: sizeof(struct mtd_erase_region_info), |
781 | GFP_KERNEL); |
782 | if (!mtd->eraseregions) |
783 | goto setup_err; |
784 | |
785 | for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { |
786 | unsigned long ernum, ersize; |
787 | ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; |
788 | ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1; |
789 | |
790 | if (mtd->erasesize < ersize) { |
791 | mtd->erasesize = ersize; |
792 | } |
793 | for (j=0; j<cfi->numchips; j++) { |
794 | mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset; |
795 | mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize; |
796 | mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum; |
797 | } |
798 | offset += (ersize * ernum); |
799 | } |
800 | if (offset != devsize) { |
801 | /* Argh */ |
802 | printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n" , offset, devsize); |
803 | goto setup_err; |
804 | } |
805 | |
806 | __module_get(THIS_MODULE); |
807 | register_reboot_notifier(&mtd->reboot_notifier); |
808 | return mtd; |
809 | |
810 | setup_err: |
811 | kfree(objp: mtd->eraseregions); |
812 | kfree(objp: mtd); |
813 | kfree(objp: cfi->cmdset_priv); |
814 | return NULL; |
815 | } |
816 | |
817 | /* |
818 | * Return true if the chip is ready and has the correct value. |
819 | * |
820 | * Ready is one of: read mode, query mode, erase-suspend-read mode (in any |
821 | * non-suspended sector) and is indicated by no toggle bits toggling. |
822 | * |
823 | * Error are indicated by toggling bits or bits held with the wrong value, |
824 | * or with bits toggling. |
825 | * |
826 | * Note that anything more complicated than checking if no bits are toggling |
827 | * (including checking DQ5 for an error status) is tricky to get working |
828 | * correctly and is therefore not done (particularly with interleaved chips |
829 | * as each chip must be checked independently of the others). |
830 | */ |
831 | static int __xipram chip_ready(struct map_info *map, struct flchip *chip, |
832 | unsigned long addr, map_word *expected) |
833 | { |
834 | struct cfi_private *cfi = map->fldrv_priv; |
835 | map_word oldd, curd; |
836 | int ret; |
837 | |
838 | if (cfi_use_status_reg(cfi)) { |
839 | map_word ready = CMD(CFI_SR_DRB); |
840 | /* |
841 | * For chips that support status register, check device |
842 | * ready bit |
843 | */ |
844 | cfi_send_gen_cmd(cmd: 0x70, cmd_addr: cfi->addr_unlock1, base: chip->start, map, cfi, |
845 | type: cfi->device_type, NULL); |
846 | curd = map_read(map, addr); |
847 | |
848 | return map_word_andequal(map, curd, ready, ready); |
849 | } |
850 | |
851 | oldd = map_read(map, addr); |
852 | curd = map_read(map, addr); |
853 | |
854 | ret = map_word_equal(map, oldd, curd); |
855 | |
856 | if (!ret || !expected) |
857 | return ret; |
858 | |
859 | return map_word_equal(map, curd, *expected); |
860 | } |
861 | |
862 | static int __xipram chip_good(struct map_info *map, struct flchip *chip, |
863 | unsigned long addr, map_word *expected) |
864 | { |
865 | struct cfi_private *cfi = map->fldrv_priv; |
866 | map_word *datum = expected; |
867 | |
868 | if (cfi->quirks & CFI_QUIRK_DQ_TRUE_DATA) |
869 | datum = NULL; |
870 | |
871 | return chip_ready(map, chip, addr, expected: datum); |
872 | } |
873 | |
874 | static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode) |
875 | { |
876 | DECLARE_WAITQUEUE(wait, current); |
877 | struct cfi_private *cfi = map->fldrv_priv; |
878 | unsigned long timeo; |
879 | struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv; |
880 | |
881 | resettime: |
882 | timeo = jiffies + HZ; |
883 | retry: |
884 | switch (chip->state) { |
885 | |
886 | case FL_STATUS: |
887 | for (;;) { |
888 | if (chip_ready(map, chip, addr: adr, NULL)) |
889 | break; |
890 | |
891 | if (time_after(jiffies, timeo)) { |
892 | printk(KERN_ERR "Waiting for chip to be ready timed out.\n" ); |
893 | return -EIO; |
894 | } |
895 | mutex_unlock(lock: &chip->mutex); |
896 | cfi_udelay(us: 1); |
897 | mutex_lock(&chip->mutex); |
898 | /* Someone else might have been playing with it. */ |
899 | goto retry; |
900 | } |
901 | return 0; |
902 | |
903 | case FL_READY: |
904 | case FL_CFI_QUERY: |
905 | case FL_JEDEC_QUERY: |
906 | return 0; |
907 | |
908 | case FL_ERASING: |
909 | if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) || |
910 | !(mode == FL_READY || mode == FL_POINT || |
911 | (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)))) |
912 | goto sleep; |
913 | |
914 | /* Do not allow suspend iff read/write to EB address */ |
915 | if ((adr & chip->in_progress_block_mask) == |
916 | chip->in_progress_block_addr) |
917 | goto sleep; |
918 | |
919 | /* Erase suspend */ |
920 | /* It's harmless to issue the Erase-Suspend and Erase-Resume |
921 | * commands when the erase algorithm isn't in progress. */ |
922 | map_write(map, CMD(0xB0), chip->in_progress_block_addr); |
923 | chip->oldstate = FL_ERASING; |
924 | chip->state = FL_ERASE_SUSPENDING; |
925 | chip->erase_suspended = 1; |
926 | for (;;) { |
927 | if (chip_ready(map, chip, addr: adr, NULL)) |
928 | break; |
929 | |
930 | if (time_after(jiffies, timeo)) { |
931 | /* Should have suspended the erase by now. |
932 | * Send an Erase-Resume command as either |
933 | * there was an error (so leave the erase |
934 | * routine to recover from it) or we trying to |
935 | * use the erase-in-progress sector. */ |
936 | put_chip(map, chip, adr); |
937 | printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n" , __func__); |
938 | return -EIO; |
939 | } |
940 | |
941 | mutex_unlock(lock: &chip->mutex); |
942 | cfi_udelay(us: 1); |
943 | mutex_lock(&chip->mutex); |
944 | /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. |
945 | So we can just loop here. */ |
946 | } |
947 | chip->state = FL_READY; |
948 | return 0; |
949 | |
950 | case FL_XIP_WHILE_ERASING: |
951 | if (mode != FL_READY && mode != FL_POINT && |
952 | (!cfip || !(cfip->EraseSuspend&2))) |
953 | goto sleep; |
954 | chip->oldstate = chip->state; |
955 | chip->state = FL_READY; |
956 | return 0; |
957 | |
958 | case FL_SHUTDOWN: |
959 | /* The machine is rebooting */ |
960 | return -EIO; |
961 | |
962 | case FL_POINT: |
963 | /* Only if there's no operation suspended... */ |
964 | if (mode == FL_READY && chip->oldstate == FL_READY) |
965 | return 0; |
966 | fallthrough; |
967 | default: |
968 | sleep: |
969 | set_current_state(TASK_UNINTERRUPTIBLE); |
970 | add_wait_queue(wq_head: &chip->wq, wq_entry: &wait); |
971 | mutex_unlock(lock: &chip->mutex); |
972 | schedule(); |
973 | remove_wait_queue(wq_head: &chip->wq, wq_entry: &wait); |
974 | mutex_lock(&chip->mutex); |
975 | goto resettime; |
976 | } |
977 | } |
978 | |
979 | |
980 | static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr) |
981 | { |
982 | struct cfi_private *cfi = map->fldrv_priv; |
983 | |
984 | switch(chip->oldstate) { |
985 | case FL_ERASING: |
986 | cfi_fixup_m29ew_erase_suspend(map, |
987 | adr: chip->in_progress_block_addr); |
988 | map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr); |
989 | cfi_fixup_m29ew_delay_after_resume(cfi); |
990 | chip->oldstate = FL_READY; |
991 | chip->state = FL_ERASING; |
992 | break; |
993 | |
994 | case FL_XIP_WHILE_ERASING: |
995 | chip->state = chip->oldstate; |
996 | chip->oldstate = FL_READY; |
997 | break; |
998 | |
999 | case FL_READY: |
1000 | case FL_STATUS: |
1001 | break; |
1002 | default: |
1003 | printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n" , chip->oldstate); |
1004 | } |
1005 | wake_up(&chip->wq); |
1006 | } |
1007 | |
1008 | #ifdef CONFIG_MTD_XIP |
1009 | |
1010 | /* |
1011 | * No interrupt what so ever can be serviced while the flash isn't in array |
1012 | * mode. This is ensured by the xip_disable() and xip_enable() functions |
1013 | * enclosing any code path where the flash is known not to be in array mode. |
1014 | * And within a XIP disabled code path, only functions marked with __xipram |
1015 | * may be called and nothing else (it's a good thing to inspect generated |
1016 | * assembly to make sure inline functions were actually inlined and that gcc |
1017 | * didn't emit calls to its own support functions). Also configuring MTD CFI |
1018 | * support to a single buswidth and a single interleave is also recommended. |
1019 | */ |
1020 | |
1021 | static void xip_disable(struct map_info *map, struct flchip *chip, |
1022 | unsigned long adr) |
1023 | { |
1024 | /* TODO: chips with no XIP use should ignore and return */ |
1025 | (void) map_read(map, adr); /* ensure mmu mapping is up to date */ |
1026 | local_irq_disable(); |
1027 | } |
1028 | |
1029 | static void __xipram xip_enable(struct map_info *map, struct flchip *chip, |
1030 | unsigned long adr) |
1031 | { |
1032 | struct cfi_private *cfi = map->fldrv_priv; |
1033 | |
1034 | if (chip->state != FL_POINT && chip->state != FL_READY) { |
1035 | map_write(map, CMD(0xf0), adr); |
1036 | chip->state = FL_READY; |
1037 | } |
1038 | (void) map_read(map, adr); |
1039 | xip_iprefetch(); |
1040 | local_irq_enable(); |
1041 | } |
1042 | |
1043 | /* |
1044 | * When a delay is required for the flash operation to complete, the |
1045 | * xip_udelay() function is polling for both the given timeout and pending |
1046 | * (but still masked) hardware interrupts. Whenever there is an interrupt |
1047 | * pending then the flash erase operation is suspended, array mode restored |
1048 | * and interrupts unmasked. Task scheduling might also happen at that |
1049 | * point. The CPU eventually returns from the interrupt or the call to |
1050 | * schedule() and the suspended flash operation is resumed for the remaining |
1051 | * of the delay period. |
1052 | * |
1053 | * Warning: this function _will_ fool interrupt latency tracing tools. |
1054 | */ |
1055 | |
1056 | static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, |
1057 | unsigned long adr, int usec) |
1058 | { |
1059 | struct cfi_private *cfi = map->fldrv_priv; |
1060 | struct cfi_pri_amdstd *extp = cfi->cmdset_priv; |
1061 | map_word status, OK = CMD(0x80); |
1062 | unsigned long suspended, start = xip_currtime(); |
1063 | flstate_t oldstate; |
1064 | |
1065 | do { |
1066 | cpu_relax(); |
1067 | if (xip_irqpending() && extp && |
1068 | ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) && |
1069 | (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) { |
1070 | /* |
1071 | * Let's suspend the erase operation when supported. |
1072 | * Note that we currently don't try to suspend |
1073 | * interleaved chips if there is already another |
1074 | * operation suspended (imagine what happens |
1075 | * when one chip was already done with the current |
1076 | * operation while another chip suspended it, then |
1077 | * we resume the whole thing at once). Yes, it |
1078 | * can happen! |
1079 | */ |
1080 | map_write(map, CMD(0xb0), adr); |
1081 | usec -= xip_elapsed_since(start); |
1082 | suspended = xip_currtime(); |
1083 | do { |
1084 | if (xip_elapsed_since(suspended) > 100000) { |
1085 | /* |
1086 | * The chip doesn't want to suspend |
1087 | * after waiting for 100 msecs. |
1088 | * This is a critical error but there |
1089 | * is not much we can do here. |
1090 | */ |
1091 | return; |
1092 | } |
1093 | status = map_read(map, adr); |
1094 | } while (!map_word_andequal(map, status, OK, OK)); |
1095 | |
1096 | /* Suspend succeeded */ |
1097 | oldstate = chip->state; |
1098 | if (!map_word_bitsset(map, status, CMD(0x40))) |
1099 | break; |
1100 | chip->state = FL_XIP_WHILE_ERASING; |
1101 | chip->erase_suspended = 1; |
1102 | map_write(map, CMD(0xf0), adr); |
1103 | (void) map_read(map, adr); |
1104 | xip_iprefetch(); |
1105 | local_irq_enable(); |
1106 | mutex_unlock(&chip->mutex); |
1107 | xip_iprefetch(); |
1108 | cond_resched(); |
1109 | |
1110 | /* |
1111 | * We're back. However someone else might have |
1112 | * decided to go write to the chip if we are in |
1113 | * a suspended erase state. If so let's wait |
1114 | * until it's done. |
1115 | */ |
1116 | mutex_lock(&chip->mutex); |
1117 | while (chip->state != FL_XIP_WHILE_ERASING) { |
1118 | DECLARE_WAITQUEUE(wait, current); |
1119 | set_current_state(TASK_UNINTERRUPTIBLE); |
1120 | add_wait_queue(&chip->wq, &wait); |
1121 | mutex_unlock(&chip->mutex); |
1122 | schedule(); |
1123 | remove_wait_queue(&chip->wq, &wait); |
1124 | mutex_lock(&chip->mutex); |
1125 | } |
1126 | /* Disallow XIP again */ |
1127 | local_irq_disable(); |
1128 | |
1129 | /* Correct Erase Suspend Hangups for M29EW */ |
1130 | cfi_fixup_m29ew_erase_suspend(map, adr); |
1131 | /* Resume the write or erase operation */ |
1132 | map_write(map, cfi->sector_erase_cmd, adr); |
1133 | chip->state = oldstate; |
1134 | start = xip_currtime(); |
1135 | } else if (usec >= 1000000/HZ) { |
1136 | /* |
1137 | * Try to save on CPU power when waiting delay |
1138 | * is at least a system timer tick period. |
1139 | * No need to be extremely accurate here. |
1140 | */ |
1141 | xip_cpu_idle(); |
1142 | } |
1143 | status = map_read(map, adr); |
1144 | } while (!map_word_andequal(map, status, OK, OK) |
1145 | && xip_elapsed_since(start) < usec); |
1146 | } |
1147 | |
1148 | #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec) |
1149 | |
1150 | /* |
1151 | * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while |
1152 | * the flash is actively programming or erasing since we have to poll for |
1153 | * the operation to complete anyway. We can't do that in a generic way with |
1154 | * a XIP setup so do it before the actual flash operation in this case |
1155 | * and stub it out from INVALIDATE_CACHE_UDELAY. |
1156 | */ |
1157 | #define XIP_INVAL_CACHED_RANGE(map, from, size) \ |
1158 | INVALIDATE_CACHED_RANGE(map, from, size) |
1159 | |
1160 | #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ |
1161 | UDELAY(map, chip, adr, usec) |
1162 | |
1163 | /* |
1164 | * Extra notes: |
1165 | * |
1166 | * Activating this XIP support changes the way the code works a bit. For |
1167 | * example the code to suspend the current process when concurrent access |
1168 | * happens is never executed because xip_udelay() will always return with the |
1169 | * same chip state as it was entered with. This is why there is no care for |
1170 | * the presence of add_wait_queue() or schedule() calls from within a couple |
1171 | * xip_disable()'d areas of code, like in do_erase_oneblock for example. |
1172 | * The queueing and scheduling are always happening within xip_udelay(). |
1173 | * |
1174 | * Similarly, get_chip() and put_chip() just happen to always be executed |
1175 | * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state |
1176 | * is in array mode, therefore never executing many cases therein and not |
1177 | * causing any problem with XIP. |
1178 | */ |
1179 | |
1180 | #else |
1181 | |
1182 | #define xip_disable(map, chip, adr) |
1183 | #define xip_enable(map, chip, adr) |
1184 | #define XIP_INVAL_CACHED_RANGE(x...) |
1185 | |
1186 | #define UDELAY(map, chip, adr, usec) \ |
1187 | do { \ |
1188 | mutex_unlock(&chip->mutex); \ |
1189 | cfi_udelay(usec); \ |
1190 | mutex_lock(&chip->mutex); \ |
1191 | } while (0) |
1192 | |
1193 | #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ |
1194 | do { \ |
1195 | mutex_unlock(&chip->mutex); \ |
1196 | INVALIDATE_CACHED_RANGE(map, adr, len); \ |
1197 | cfi_udelay(usec); \ |
1198 | mutex_lock(&chip->mutex); \ |
1199 | } while (0) |
1200 | |
1201 | #endif |
1202 | |
1203 | static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) |
1204 | { |
1205 | unsigned long cmd_addr; |
1206 | struct cfi_private *cfi = map->fldrv_priv; |
1207 | int ret; |
1208 | |
1209 | adr += chip->start; |
1210 | |
1211 | /* Ensure cmd read/writes are aligned. */ |
1212 | cmd_addr = adr & ~(map_bankwidth(map)-1); |
1213 | |
1214 | mutex_lock(&chip->mutex); |
1215 | ret = get_chip(map, chip, adr: cmd_addr, mode: FL_READY); |
1216 | if (ret) { |
1217 | mutex_unlock(lock: &chip->mutex); |
1218 | return ret; |
1219 | } |
1220 | |
1221 | if (chip->state != FL_POINT && chip->state != FL_READY) { |
1222 | map_write(map, CMD(0xf0), cmd_addr); |
1223 | chip->state = FL_READY; |
1224 | } |
1225 | |
1226 | map_copy_from(map, buf, adr, len); |
1227 | |
1228 | put_chip(map, chip, adr: cmd_addr); |
1229 | |
1230 | mutex_unlock(lock: &chip->mutex); |
1231 | return 0; |
1232 | } |
1233 | |
1234 | |
1235 | static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) |
1236 | { |
1237 | struct map_info *map = mtd->priv; |
1238 | struct cfi_private *cfi = map->fldrv_priv; |
1239 | unsigned long ofs; |
1240 | int chipnum; |
1241 | int ret = 0; |
1242 | |
1243 | /* ofs: offset within the first chip that the first read should start */ |
1244 | chipnum = (from >> cfi->chipshift); |
1245 | ofs = from - (chipnum << cfi->chipshift); |
1246 | |
1247 | while (len) { |
1248 | unsigned long thislen; |
1249 | |
1250 | if (chipnum >= cfi->numchips) |
1251 | break; |
1252 | |
1253 | if ((len + ofs -1) >> cfi->chipshift) |
1254 | thislen = (1<<cfi->chipshift) - ofs; |
1255 | else |
1256 | thislen = len; |
1257 | |
1258 | ret = do_read_onechip(map, chip: &cfi->chips[chipnum], adr: ofs, len: thislen, buf); |
1259 | if (ret) |
1260 | break; |
1261 | |
1262 | *retlen += thislen; |
1263 | len -= thislen; |
1264 | buf += thislen; |
1265 | |
1266 | ofs = 0; |
1267 | chipnum++; |
1268 | } |
1269 | return ret; |
1270 | } |
1271 | |
1272 | typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip, |
1273 | loff_t adr, size_t len, u_char *buf, size_t grouplen); |
1274 | |
1275 | static inline void otp_enter(struct map_info *map, struct flchip *chip, |
1276 | loff_t adr, size_t len) |
1277 | { |
1278 | struct cfi_private *cfi = map->fldrv_priv; |
1279 | |
1280 | cfi_send_gen_cmd(cmd: 0xAA, cmd_addr: cfi->addr_unlock1, base: chip->start, map, cfi, |
1281 | type: cfi->device_type, NULL); |
1282 | cfi_send_gen_cmd(cmd: 0x55, cmd_addr: cfi->addr_unlock2, base: chip->start, map, cfi, |
1283 | type: cfi->device_type, NULL); |
1284 | cfi_send_gen_cmd(cmd: 0x88, cmd_addr: cfi->addr_unlock1, base: chip->start, map, cfi, |
1285 | type: cfi->device_type, NULL); |
1286 | |
1287 | INVALIDATE_CACHED_RANGE(map, chip->start + adr, len); |
1288 | } |
1289 | |
1290 | static inline void otp_exit(struct map_info *map, struct flchip *chip, |
1291 | loff_t adr, size_t len) |
1292 | { |
1293 | struct cfi_private *cfi = map->fldrv_priv; |
1294 | |
1295 | cfi_send_gen_cmd(cmd: 0xAA, cmd_addr: cfi->addr_unlock1, base: chip->start, map, cfi, |
1296 | type: cfi->device_type, NULL); |
1297 | cfi_send_gen_cmd(cmd: 0x55, cmd_addr: cfi->addr_unlock2, base: chip->start, map, cfi, |
1298 | type: cfi->device_type, NULL); |
1299 | cfi_send_gen_cmd(cmd: 0x90, cmd_addr: cfi->addr_unlock1, base: chip->start, map, cfi, |
1300 | type: cfi->device_type, NULL); |
1301 | cfi_send_gen_cmd(cmd: 0x00, cmd_addr: cfi->addr_unlock1, base: chip->start, map, cfi, |
1302 | type: cfi->device_type, NULL); |
1303 | |
1304 | INVALIDATE_CACHED_RANGE(map, chip->start + adr, len); |
1305 | } |
1306 | |
1307 | static inline int do_read_secsi_onechip(struct map_info *map, |
1308 | struct flchip *chip, loff_t adr, |
1309 | size_t len, u_char *buf, |
1310 | size_t grouplen) |
1311 | { |
1312 | DECLARE_WAITQUEUE(wait, current); |
1313 | |
1314 | retry: |
1315 | mutex_lock(&chip->mutex); |
1316 | |
1317 | if (chip->state != FL_READY){ |
1318 | set_current_state(TASK_UNINTERRUPTIBLE); |
1319 | add_wait_queue(wq_head: &chip->wq, wq_entry: &wait); |
1320 | |
1321 | mutex_unlock(lock: &chip->mutex); |
1322 | |
1323 | schedule(); |
1324 | remove_wait_queue(wq_head: &chip->wq, wq_entry: &wait); |
1325 | |
1326 | goto retry; |
1327 | } |
1328 | |
1329 | adr += chip->start; |
1330 | |
1331 | chip->state = FL_READY; |
1332 | |
1333 | otp_enter(map, chip, adr, len); |
1334 | map_copy_from(map, buf, adr, len); |
1335 | otp_exit(map, chip, adr, len); |
1336 | |
1337 | wake_up(&chip->wq); |
1338 | mutex_unlock(lock: &chip->mutex); |
1339 | |
1340 | return 0; |
1341 | } |
1342 | |
1343 | static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) |
1344 | { |
1345 | struct map_info *map = mtd->priv; |
1346 | struct cfi_private *cfi = map->fldrv_priv; |
1347 | unsigned long ofs; |
1348 | int chipnum; |
1349 | int ret = 0; |
1350 | |
1351 | /* ofs: offset within the first chip that the first read should start */ |
1352 | /* 8 secsi bytes per chip */ |
1353 | chipnum=from>>3; |
1354 | ofs=from & 7; |
1355 | |
1356 | while (len) { |
1357 | unsigned long thislen; |
1358 | |
1359 | if (chipnum >= cfi->numchips) |
1360 | break; |
1361 | |
1362 | if ((len + ofs -1) >> 3) |
1363 | thislen = (1<<3) - ofs; |
1364 | else |
1365 | thislen = len; |
1366 | |
1367 | ret = do_read_secsi_onechip(map, chip: &cfi->chips[chipnum], adr: ofs, |
1368 | len: thislen, buf, grouplen: 0); |
1369 | if (ret) |
1370 | break; |
1371 | |
1372 | *retlen += thislen; |
1373 | len -= thislen; |
1374 | buf += thislen; |
1375 | |
1376 | ofs = 0; |
1377 | chipnum++; |
1378 | } |
1379 | return ret; |
1380 | } |
1381 | |
1382 | static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, |
1383 | unsigned long adr, map_word datum, |
1384 | int mode); |
1385 | |
1386 | static int do_otp_write(struct map_info *map, struct flchip *chip, loff_t adr, |
1387 | size_t len, u_char *buf, size_t grouplen) |
1388 | { |
1389 | int ret; |
1390 | while (len) { |
1391 | unsigned long bus_ofs = adr & ~(map_bankwidth(map)-1); |
1392 | int gap = adr - bus_ofs; |
1393 | int n = min_t(int, len, map_bankwidth(map) - gap); |
1394 | map_word datum = map_word_ff(map); |
1395 | |
1396 | if (n != map_bankwidth(map)) { |
1397 | /* partial write of a word, load old contents */ |
1398 | otp_enter(map, chip, adr: bus_ofs, map_bankwidth(map)); |
1399 | datum = map_read(map, bus_ofs); |
1400 | otp_exit(map, chip, adr: bus_ofs, map_bankwidth(map)); |
1401 | } |
1402 | |
1403 | datum = map_word_load_partial(map, orig: datum, buf, start: gap, len: n); |
1404 | ret = do_write_oneword(map, chip, adr: bus_ofs, datum, mode: FL_OTP_WRITE); |
1405 | if (ret) |
1406 | return ret; |
1407 | |
1408 | adr += n; |
1409 | buf += n; |
1410 | len -= n; |
1411 | } |
1412 | |
1413 | return 0; |
1414 | } |
1415 | |
1416 | static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr, |
1417 | size_t len, u_char *buf, size_t grouplen) |
1418 | { |
1419 | struct cfi_private *cfi = map->fldrv_priv; |
1420 | uint8_t lockreg; |
1421 | unsigned long timeo; |
1422 | int ret; |
1423 | |
1424 | /* make sure area matches group boundaries */ |
1425 | if ((adr != 0) || (len != grouplen)) |
1426 | return -EINVAL; |
1427 | |
1428 | mutex_lock(&chip->mutex); |
1429 | ret = get_chip(map, chip, adr: chip->start, mode: FL_LOCKING); |
1430 | if (ret) { |
1431 | mutex_unlock(lock: &chip->mutex); |
1432 | return ret; |
1433 | } |
1434 | chip->state = FL_LOCKING; |
1435 | |
1436 | /* Enter lock register command */ |
1437 | cfi_send_gen_cmd(cmd: 0xAA, cmd_addr: cfi->addr_unlock1, base: chip->start, map, cfi, |
1438 | type: cfi->device_type, NULL); |
1439 | cfi_send_gen_cmd(cmd: 0x55, cmd_addr: cfi->addr_unlock2, base: chip->start, map, cfi, |
1440 | type: cfi->device_type, NULL); |
1441 | cfi_send_gen_cmd(cmd: 0x40, cmd_addr: cfi->addr_unlock1, base: chip->start, map, cfi, |
1442 | type: cfi->device_type, NULL); |
1443 | |
1444 | /* read lock register */ |
1445 | lockreg = cfi_read_query(map, addr: 0); |
1446 | |
1447 | /* set bit 0 to protect extended memory block */ |
1448 | lockreg &= ~0x01; |
1449 | |
1450 | /* set bit 0 to protect extended memory block */ |
1451 | /* write lock register */ |
1452 | map_write(map, CMD(0xA0), chip->start); |
1453 | map_write(map, CMD(lockreg), chip->start); |
1454 | |
1455 | /* wait for chip to become ready */ |
1456 | timeo = jiffies + msecs_to_jiffies(m: 2); |
1457 | for (;;) { |
1458 | if (chip_ready(map, chip, addr: adr, NULL)) |
1459 | break; |
1460 | |
1461 | if (time_after(jiffies, timeo)) { |
1462 | pr_err("Waiting for chip to be ready timed out.\n" ); |
1463 | ret = -EIO; |
1464 | break; |
1465 | } |
1466 | UDELAY(map, chip, 0, 1); |
1467 | } |
1468 | |
1469 | /* exit protection commands */ |
1470 | map_write(map, CMD(0x90), chip->start); |
1471 | map_write(map, CMD(0x00), chip->start); |
1472 | |
1473 | chip->state = FL_READY; |
1474 | put_chip(map, chip, adr: chip->start); |
1475 | mutex_unlock(lock: &chip->mutex); |
1476 | |
1477 | return ret; |
1478 | } |
1479 | |
1480 | static int cfi_amdstd_otp_walk(struct mtd_info *mtd, loff_t from, size_t len, |
1481 | size_t *retlen, u_char *buf, |
1482 | otp_op_t action, int user_regs) |
1483 | { |
1484 | struct map_info *map = mtd->priv; |
1485 | struct cfi_private *cfi = map->fldrv_priv; |
1486 | int ofs_factor = cfi->interleave * cfi->device_type; |
1487 | unsigned long base; |
1488 | int chipnum; |
1489 | struct flchip *chip; |
1490 | uint8_t otp, lockreg; |
1491 | int ret; |
1492 | |
1493 | size_t user_size, factory_size, otpsize; |
1494 | loff_t user_offset, factory_offset, otpoffset; |
1495 | int user_locked = 0, otplocked; |
1496 | |
1497 | *retlen = 0; |
1498 | |
1499 | for (chipnum = 0; chipnum < cfi->numchips; chipnum++) { |
1500 | chip = &cfi->chips[chipnum]; |
1501 | factory_size = 0; |
1502 | user_size = 0; |
1503 | |
1504 | /* Micron M29EW family */ |
1505 | if (is_m29ew(cfi)) { |
1506 | base = chip->start; |
1507 | |
1508 | /* check whether secsi area is factory locked |
1509 | or user lockable */ |
1510 | mutex_lock(&chip->mutex); |
1511 | ret = get_chip(map, chip, adr: base, mode: FL_CFI_QUERY); |
1512 | if (ret) { |
1513 | mutex_unlock(lock: &chip->mutex); |
1514 | return ret; |
1515 | } |
1516 | cfi_qry_mode_on(base, map, cfi); |
1517 | otp = cfi_read_query(map, addr: base + 0x3 * ofs_factor); |
1518 | cfi_qry_mode_off(base, map, cfi); |
1519 | put_chip(map, chip, adr: base); |
1520 | mutex_unlock(lock: &chip->mutex); |
1521 | |
1522 | if (otp & 0x80) { |
1523 | /* factory locked */ |
1524 | factory_offset = 0; |
1525 | factory_size = 0x100; |
1526 | } else { |
1527 | /* customer lockable */ |
1528 | user_offset = 0; |
1529 | user_size = 0x100; |
1530 | |
1531 | mutex_lock(&chip->mutex); |
1532 | ret = get_chip(map, chip, adr: base, mode: FL_LOCKING); |
1533 | if (ret) { |
1534 | mutex_unlock(lock: &chip->mutex); |
1535 | return ret; |
1536 | } |
1537 | |
1538 | /* Enter lock register command */ |
1539 | cfi_send_gen_cmd(cmd: 0xAA, cmd_addr: cfi->addr_unlock1, |
1540 | base: chip->start, map, cfi, |
1541 | type: cfi->device_type, NULL); |
1542 | cfi_send_gen_cmd(cmd: 0x55, cmd_addr: cfi->addr_unlock2, |
1543 | base: chip->start, map, cfi, |
1544 | type: cfi->device_type, NULL); |
1545 | cfi_send_gen_cmd(cmd: 0x40, cmd_addr: cfi->addr_unlock1, |
1546 | base: chip->start, map, cfi, |
1547 | type: cfi->device_type, NULL); |
1548 | /* read lock register */ |
1549 | lockreg = cfi_read_query(map, addr: 0); |
1550 | /* exit protection commands */ |
1551 | map_write(map, CMD(0x90), chip->start); |
1552 | map_write(map, CMD(0x00), chip->start); |
1553 | put_chip(map, chip, adr: chip->start); |
1554 | mutex_unlock(lock: &chip->mutex); |
1555 | |
1556 | user_locked = ((lockreg & 0x01) == 0x00); |
1557 | } |
1558 | } |
1559 | |
1560 | otpsize = user_regs ? user_size : factory_size; |
1561 | if (!otpsize) |
1562 | continue; |
1563 | otpoffset = user_regs ? user_offset : factory_offset; |
1564 | otplocked = user_regs ? user_locked : 1; |
1565 | |
1566 | if (!action) { |
1567 | /* return otpinfo */ |
1568 | struct otp_info *otpinfo; |
1569 | len -= sizeof(*otpinfo); |
1570 | if (len <= 0) |
1571 | return -ENOSPC; |
1572 | otpinfo = (struct otp_info *)buf; |
1573 | otpinfo->start = from; |
1574 | otpinfo->length = otpsize; |
1575 | otpinfo->locked = otplocked; |
1576 | buf += sizeof(*otpinfo); |
1577 | *retlen += sizeof(*otpinfo); |
1578 | from += otpsize; |
1579 | } else if ((from < otpsize) && (len > 0)) { |
1580 | size_t size; |
1581 | size = (len < otpsize - from) ? len : otpsize - from; |
1582 | ret = action(map, chip, otpoffset + from, size, buf, |
1583 | otpsize); |
1584 | if (ret < 0) |
1585 | return ret; |
1586 | |
1587 | buf += size; |
1588 | len -= size; |
1589 | *retlen += size; |
1590 | from = 0; |
1591 | } else { |
1592 | from -= otpsize; |
1593 | } |
1594 | } |
1595 | return 0; |
1596 | } |
1597 | |
1598 | static int cfi_amdstd_get_fact_prot_info(struct mtd_info *mtd, size_t len, |
1599 | size_t *retlen, struct otp_info *buf) |
1600 | { |
1601 | return cfi_amdstd_otp_walk(mtd, from: 0, len, retlen, buf: (u_char *)buf, |
1602 | NULL, user_regs: 0); |
1603 | } |
1604 | |
1605 | static int cfi_amdstd_get_user_prot_info(struct mtd_info *mtd, size_t len, |
1606 | size_t *retlen, struct otp_info *buf) |
1607 | { |
1608 | return cfi_amdstd_otp_walk(mtd, from: 0, len, retlen, buf: (u_char *)buf, |
1609 | NULL, user_regs: 1); |
1610 | } |
1611 | |
1612 | static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, |
1613 | size_t len, size_t *retlen, |
1614 | u_char *buf) |
1615 | { |
1616 | return cfi_amdstd_otp_walk(mtd, from, len, retlen, |
1617 | buf, action: do_read_secsi_onechip, user_regs: 0); |
1618 | } |
1619 | |
1620 | static int cfi_amdstd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, |
1621 | size_t len, size_t *retlen, |
1622 | u_char *buf) |
1623 | { |
1624 | return cfi_amdstd_otp_walk(mtd, from, len, retlen, |
1625 | buf, action: do_read_secsi_onechip, user_regs: 1); |
1626 | } |
1627 | |
1628 | static int cfi_amdstd_write_user_prot_reg(struct mtd_info *mtd, loff_t from, |
1629 | size_t len, size_t *retlen, |
1630 | const u_char *buf) |
1631 | { |
1632 | return cfi_amdstd_otp_walk(mtd, from, len, retlen, buf: (u_char *)buf, |
1633 | action: do_otp_write, user_regs: 1); |
1634 | } |
1635 | |
1636 | static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, |
1637 | size_t len) |
1638 | { |
1639 | size_t retlen; |
1640 | return cfi_amdstd_otp_walk(mtd, from, len, retlen: &retlen, NULL, |
1641 | action: do_otp_lock, user_regs: 1); |
1642 | } |
1643 | |
1644 | static int __xipram do_write_oneword_once(struct map_info *map, |
1645 | struct flchip *chip, |
1646 | unsigned long adr, map_word datum, |
1647 | int mode, struct cfi_private *cfi) |
1648 | { |
1649 | unsigned long timeo; |
1650 | /* |
1651 | * We use a 1ms + 1 jiffies generic timeout for writes (most devices |
1652 | * have a max write time of a few hundreds usec). However, we should |
1653 | * use the maximum timeout value given by the chip at probe time |
1654 | * instead. Unfortunately, struct flchip does have a field for |
1655 | * maximum timeout, only for typical which can be far too short |
1656 | * depending of the conditions. The ' + 1' is to avoid having a |
1657 | * timeout of 0 jiffies if HZ is smaller than 1000. |
1658 | */ |
1659 | unsigned long uWriteTimeout = (HZ / 1000) + 1; |
1660 | int ret = 0; |
1661 | |
1662 | cfi_send_gen_cmd(cmd: 0xAA, cmd_addr: cfi->addr_unlock1, base: chip->start, map, cfi, type: cfi->device_type, NULL); |
1663 | cfi_send_gen_cmd(cmd: 0x55, cmd_addr: cfi->addr_unlock2, base: chip->start, map, cfi, type: cfi->device_type, NULL); |
1664 | cfi_send_gen_cmd(cmd: 0xA0, cmd_addr: cfi->addr_unlock1, base: chip->start, map, cfi, type: cfi->device_type, NULL); |
1665 | map_write(map, datum, adr); |
1666 | chip->state = mode; |
1667 | |
1668 | INVALIDATE_CACHE_UDELAY(map, chip, |
1669 | adr, map_bankwidth(map), |
1670 | chip->word_write_time); |
1671 | |
1672 | /* See comment above for timeout value. */ |
1673 | timeo = jiffies + uWriteTimeout; |
1674 | for (;;) { |
1675 | if (chip->state != mode) { |
1676 | /* Someone's suspended the write. Sleep */ |
1677 | DECLARE_WAITQUEUE(wait, current); |
1678 | |
1679 | set_current_state(TASK_UNINTERRUPTIBLE); |
1680 | add_wait_queue(wq_head: &chip->wq, wq_entry: &wait); |
1681 | mutex_unlock(lock: &chip->mutex); |
1682 | schedule(); |
1683 | remove_wait_queue(wq_head: &chip->wq, wq_entry: &wait); |
1684 | timeo = jiffies + (HZ / 2); /* FIXME */ |
1685 | mutex_lock(&chip->mutex); |
1686 | continue; |
1687 | } |
1688 | |
1689 | /* |
1690 | * We check "time_after" and "!chip_good" before checking |
1691 | * "chip_good" to avoid the failure due to scheduling. |
1692 | */ |
1693 | if (time_after(jiffies, timeo) && |
1694 | !chip_good(map, chip, addr: adr, expected: &datum)) { |
1695 | xip_enable(map, chip, adr); |
1696 | printk(KERN_WARNING "MTD %s(): software timeout\n" , __func__); |
1697 | xip_disable(map, chip, adr); |
1698 | ret = -EIO; |
1699 | break; |
1700 | } |
1701 | |
1702 | if (chip_good(map, chip, addr: adr, expected: &datum)) { |
1703 | if (cfi_check_err_status(map, chip, adr)) |
1704 | ret = -EIO; |
1705 | break; |
1706 | } |
1707 | |
1708 | /* Latency issues. Drop the lock, wait a while and retry */ |
1709 | UDELAY(map, chip, adr, 1); |
1710 | } |
1711 | |
1712 | return ret; |
1713 | } |
1714 | |
1715 | static int __xipram do_write_oneword_start(struct map_info *map, |
1716 | struct flchip *chip, |
1717 | unsigned long adr, int mode) |
1718 | { |
1719 | int ret; |
1720 | |
1721 | mutex_lock(&chip->mutex); |
1722 | |
1723 | ret = get_chip(map, chip, adr, mode); |
1724 | if (ret) { |
1725 | mutex_unlock(lock: &chip->mutex); |
1726 | return ret; |
1727 | } |
1728 | |
1729 | if (mode == FL_OTP_WRITE) |
1730 | otp_enter(map, chip, adr, map_bankwidth(map)); |
1731 | |
1732 | return ret; |
1733 | } |
1734 | |
1735 | static void __xipram do_write_oneword_done(struct map_info *map, |
1736 | struct flchip *chip, |
1737 | unsigned long adr, int mode) |
1738 | { |
1739 | if (mode == FL_OTP_WRITE) |
1740 | otp_exit(map, chip, adr, map_bankwidth(map)); |
1741 | |
1742 | chip->state = FL_READY; |
1743 | DISABLE_VPP(map); |
1744 | put_chip(map, chip, adr); |
1745 | |
1746 | mutex_unlock(lock: &chip->mutex); |
1747 | } |
1748 | |
1749 | static int __xipram do_write_oneword_retry(struct map_info *map, |
1750 | struct flchip *chip, |
1751 | unsigned long adr, map_word datum, |
1752 | int mode) |
1753 | { |
1754 | struct cfi_private *cfi = map->fldrv_priv; |
1755 | int ret = 0; |
1756 | map_word oldd; |
1757 | int retry_cnt = 0; |
1758 | |
1759 | /* |
1760 | * Check for a NOP for the case when the datum to write is already |
1761 | * present - it saves time and works around buggy chips that corrupt |
1762 | * data at other locations when 0xff is written to a location that |
1763 | * already contains 0xff. |
1764 | */ |
1765 | oldd = map_read(map, adr); |
1766 | if (map_word_equal(map, oldd, datum)) { |
1767 | pr_debug("MTD %s(): NOP\n" , __func__); |
1768 | return ret; |
1769 | } |
1770 | |
1771 | XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map)); |
1772 | ENABLE_VPP(map); |
1773 | xip_disable(map, chip, adr); |
1774 | |
1775 | retry: |
1776 | ret = do_write_oneword_once(map, chip, adr, datum, mode, cfi); |
1777 | if (ret) { |
1778 | /* reset on all failures. */ |
1779 | map_write(map, CMD(0xF0), chip->start); |
1780 | /* FIXME - should have reset delay before continuing */ |
1781 | |
1782 | if (++retry_cnt <= MAX_RETRIES) { |
1783 | ret = 0; |
1784 | goto retry; |
1785 | } |
1786 | } |
1787 | xip_enable(map, chip, adr); |
1788 | |
1789 | return ret; |
1790 | } |
1791 | |
1792 | static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, |
1793 | unsigned long adr, map_word datum, |
1794 | int mode) |
1795 | { |
1796 | int ret; |
1797 | |
1798 | adr += chip->start; |
1799 | |
1800 | pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n" , __func__, adr, |
1801 | datum.x[0]); |
1802 | |
1803 | ret = do_write_oneword_start(map, chip, adr, mode); |
1804 | if (ret) |
1805 | return ret; |
1806 | |
1807 | ret = do_write_oneword_retry(map, chip, adr, datum, mode); |
1808 | |
1809 | do_write_oneword_done(map, chip, adr, mode); |
1810 | |
1811 | return ret; |
1812 | } |
1813 | |
1814 | |
1815 | static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, |
1816 | size_t *retlen, const u_char *buf) |
1817 | { |
1818 | struct map_info *map = mtd->priv; |
1819 | struct cfi_private *cfi = map->fldrv_priv; |
1820 | int ret; |
1821 | int chipnum; |
1822 | unsigned long ofs, chipstart; |
1823 | DECLARE_WAITQUEUE(wait, current); |
1824 | |
1825 | chipnum = to >> cfi->chipshift; |
1826 | ofs = to - (chipnum << cfi->chipshift); |
1827 | chipstart = cfi->chips[chipnum].start; |
1828 | |
1829 | /* If it's not bus-aligned, do the first byte write */ |
1830 | if (ofs & (map_bankwidth(map)-1)) { |
1831 | unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1); |
1832 | int i = ofs - bus_ofs; |
1833 | int n = 0; |
1834 | map_word tmp_buf; |
1835 | |
1836 | retry: |
1837 | mutex_lock(&cfi->chips[chipnum].mutex); |
1838 | |
1839 | if (cfi->chips[chipnum].state != FL_READY) { |
1840 | set_current_state(TASK_UNINTERRUPTIBLE); |
1841 | add_wait_queue(wq_head: &cfi->chips[chipnum].wq, wq_entry: &wait); |
1842 | |
1843 | mutex_unlock(lock: &cfi->chips[chipnum].mutex); |
1844 | |
1845 | schedule(); |
1846 | remove_wait_queue(wq_head: &cfi->chips[chipnum].wq, wq_entry: &wait); |
1847 | goto retry; |
1848 | } |
1849 | |
1850 | /* Load 'tmp_buf' with old contents of flash */ |
1851 | tmp_buf = map_read(map, bus_ofs+chipstart); |
1852 | |
1853 | mutex_unlock(lock: &cfi->chips[chipnum].mutex); |
1854 | |
1855 | /* Number of bytes to copy from buffer */ |
1856 | n = min_t(int, len, map_bankwidth(map)-i); |
1857 | |
1858 | tmp_buf = map_word_load_partial(map, orig: tmp_buf, buf, start: i, len: n); |
1859 | |
1860 | ret = do_write_oneword(map, chip: &cfi->chips[chipnum], |
1861 | adr: bus_ofs, datum: tmp_buf, mode: FL_WRITING); |
1862 | if (ret) |
1863 | return ret; |
1864 | |
1865 | ofs += n; |
1866 | buf += n; |
1867 | (*retlen) += n; |
1868 | len -= n; |
1869 | |
1870 | if (ofs >> cfi->chipshift) { |
1871 | chipnum ++; |
1872 | ofs = 0; |
1873 | if (chipnum == cfi->numchips) |
1874 | return 0; |
1875 | } |
1876 | } |
1877 | |
1878 | /* We are now aligned, write as much as possible */ |
1879 | while(len >= map_bankwidth(map)) { |
1880 | map_word datum; |
1881 | |
1882 | datum = map_word_load(map, ptr: buf); |
1883 | |
1884 | ret = do_write_oneword(map, chip: &cfi->chips[chipnum], |
1885 | adr: ofs, datum, mode: FL_WRITING); |
1886 | if (ret) |
1887 | return ret; |
1888 | |
1889 | ofs += map_bankwidth(map); |
1890 | buf += map_bankwidth(map); |
1891 | (*retlen) += map_bankwidth(map); |
1892 | len -= map_bankwidth(map); |
1893 | |
1894 | if (ofs >> cfi->chipshift) { |
1895 | chipnum ++; |
1896 | ofs = 0; |
1897 | if (chipnum == cfi->numchips) |
1898 | return 0; |
1899 | chipstart = cfi->chips[chipnum].start; |
1900 | } |
1901 | } |
1902 | |
1903 | /* Write the trailing bytes if any */ |
1904 | if (len & (map_bankwidth(map)-1)) { |
1905 | map_word tmp_buf; |
1906 | |
1907 | retry1: |
1908 | mutex_lock(&cfi->chips[chipnum].mutex); |
1909 | |
1910 | if (cfi->chips[chipnum].state != FL_READY) { |
1911 | set_current_state(TASK_UNINTERRUPTIBLE); |
1912 | add_wait_queue(wq_head: &cfi->chips[chipnum].wq, wq_entry: &wait); |
1913 | |
1914 | mutex_unlock(lock: &cfi->chips[chipnum].mutex); |
1915 | |
1916 | schedule(); |
1917 | remove_wait_queue(wq_head: &cfi->chips[chipnum].wq, wq_entry: &wait); |
1918 | goto retry1; |
1919 | } |
1920 | |
1921 | tmp_buf = map_read(map, ofs + chipstart); |
1922 | |
1923 | mutex_unlock(lock: &cfi->chips[chipnum].mutex); |
1924 | |
1925 | tmp_buf = map_word_load_partial(map, orig: tmp_buf, buf, start: 0, len); |
1926 | |
1927 | ret = do_write_oneword(map, chip: &cfi->chips[chipnum], |
1928 | adr: ofs, datum: tmp_buf, mode: FL_WRITING); |
1929 | if (ret) |
1930 | return ret; |
1931 | |
1932 | (*retlen) += len; |
1933 | } |
1934 | |
1935 | return 0; |
1936 | } |
1937 | |
1938 | #if !FORCE_WORD_WRITE |
1939 | static int __xipram do_write_buffer_wait(struct map_info *map, |
1940 | struct flchip *chip, unsigned long adr, |
1941 | map_word datum) |
1942 | { |
1943 | unsigned long timeo; |
1944 | unsigned long u_write_timeout; |
1945 | int ret = 0; |
1946 | |
1947 | /* |
1948 | * Timeout is calculated according to CFI data, if available. |
1949 | * See more comments in cfi_cmdset_0002(). |
1950 | */ |
1951 | u_write_timeout = usecs_to_jiffies(u: chip->buffer_write_time_max); |
1952 | timeo = jiffies + u_write_timeout; |
1953 | |
1954 | for (;;) { |
1955 | if (chip->state != FL_WRITING) { |
1956 | /* Someone's suspended the write. Sleep */ |
1957 | DECLARE_WAITQUEUE(wait, current); |
1958 | |
1959 | set_current_state(TASK_UNINTERRUPTIBLE); |
1960 | add_wait_queue(wq_head: &chip->wq, wq_entry: &wait); |
1961 | mutex_unlock(lock: &chip->mutex); |
1962 | schedule(); |
1963 | remove_wait_queue(wq_head: &chip->wq, wq_entry: &wait); |
1964 | timeo = jiffies + (HZ / 2); /* FIXME */ |
1965 | mutex_lock(&chip->mutex); |
1966 | continue; |
1967 | } |
1968 | |
1969 | /* |
1970 | * We check "time_after" and "!chip_good" before checking |
1971 | * "chip_good" to avoid the failure due to scheduling. |
1972 | */ |
1973 | if (time_after(jiffies, timeo) && |
1974 | !chip_good(map, chip, addr: adr, expected: &datum)) { |
1975 | pr_err("MTD %s(): software timeout, address:0x%.8lx.\n" , |
1976 | __func__, adr); |
1977 | ret = -EIO; |
1978 | break; |
1979 | } |
1980 | |
1981 | if (chip_good(map, chip, addr: adr, expected: &datum)) { |
1982 | if (cfi_check_err_status(map, chip, adr)) |
1983 | ret = -EIO; |
1984 | break; |
1985 | } |
1986 | |
1987 | /* Latency issues. Drop the lock, wait a while and retry */ |
1988 | UDELAY(map, chip, adr, 1); |
1989 | } |
1990 | |
1991 | return ret; |
1992 | } |
1993 | |
1994 | static void __xipram do_write_buffer_reset(struct map_info *map, |
1995 | struct flchip *chip, |
1996 | struct cfi_private *cfi) |
1997 | { |
1998 | /* |
1999 | * Recovery from write-buffer programming failures requires |
2000 | * the write-to-buffer-reset sequence. Since the last part |
2001 | * of the sequence also works as a normal reset, we can run |
2002 | * the same commands regardless of why we are here. |
2003 | * See e.g. |
2004 | * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf |
2005 | */ |
2006 | cfi_send_gen_cmd(cmd: 0xAA, cmd_addr: cfi->addr_unlock1, base: chip->start, map, cfi, |
2007 | type: cfi->device_type, NULL); |
2008 | cfi_send_gen_cmd(cmd: 0x55, cmd_addr: cfi->addr_unlock2, base: chip->start, map, cfi, |
2009 | type: cfi->device_type, NULL); |
2010 | cfi_send_gen_cmd(cmd: 0xF0, cmd_addr: cfi->addr_unlock1, base: chip->start, map, cfi, |
2011 | type: cfi->device_type, NULL); |
2012 | |
2013 | /* FIXME - should have reset delay before continuing */ |
2014 | } |
2015 | |
2016 | /* |
2017 | * FIXME: interleaved mode not tested, and probably not supported! |
2018 | */ |
2019 | static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, |
2020 | unsigned long adr, const u_char *buf, |
2021 | int len) |
2022 | { |
2023 | struct cfi_private *cfi = map->fldrv_priv; |
2024 | int ret; |
2025 | unsigned long cmd_adr; |
2026 | int z, words; |
2027 | map_word datum; |
2028 | |
2029 | adr += chip->start; |
2030 | cmd_adr = adr; |
2031 | |
2032 | mutex_lock(&chip->mutex); |
2033 | ret = get_chip(map, chip, adr, mode: FL_WRITING); |
2034 | if (ret) { |
2035 | mutex_unlock(lock: &chip->mutex); |
2036 | return ret; |
2037 | } |
2038 | |
2039 | datum = map_word_load(map, ptr: buf); |
2040 | |
2041 | pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n" , |
2042 | __func__, adr, datum.x[0]); |
2043 | |
2044 | XIP_INVAL_CACHED_RANGE(map, adr, len); |
2045 | ENABLE_VPP(map); |
2046 | xip_disable(map, chip, cmd_adr); |
2047 | |
2048 | cfi_send_gen_cmd(cmd: 0xAA, cmd_addr: cfi->addr_unlock1, base: chip->start, map, cfi, type: cfi->device_type, NULL); |
2049 | cfi_send_gen_cmd(cmd: 0x55, cmd_addr: cfi->addr_unlock2, base: chip->start, map, cfi, type: cfi->device_type, NULL); |
2050 | |
2051 | /* Write Buffer Load */ |
2052 | map_write(map, CMD(0x25), cmd_adr); |
2053 | |
2054 | chip->state = FL_WRITING_TO_BUFFER; |
2055 | |
2056 | /* Write length of data to come */ |
2057 | words = len / map_bankwidth(map); |
2058 | map_write(map, CMD(words - 1), cmd_adr); |
2059 | /* Write data */ |
2060 | z = 0; |
2061 | while(z < words * map_bankwidth(map)) { |
2062 | datum = map_word_load(map, ptr: buf); |
2063 | map_write(map, datum, adr + z); |
2064 | |
2065 | z += map_bankwidth(map); |
2066 | buf += map_bankwidth(map); |
2067 | } |
2068 | z -= map_bankwidth(map); |
2069 | |
2070 | adr += z; |
2071 | |
2072 | /* Write Buffer Program Confirm: GO GO GO */ |
2073 | map_write(map, CMD(0x29), cmd_adr); |
2074 | chip->state = FL_WRITING; |
2075 | |
2076 | INVALIDATE_CACHE_UDELAY(map, chip, |
2077 | adr, map_bankwidth(map), |
2078 | chip->word_write_time); |
2079 | |
2080 | ret = do_write_buffer_wait(map, chip, adr, datum); |
2081 | if (ret) |
2082 | do_write_buffer_reset(map, chip, cfi); |
2083 | |
2084 | xip_enable(map, chip, adr); |
2085 | |
2086 | chip->state = FL_READY; |
2087 | DISABLE_VPP(map); |
2088 | put_chip(map, chip, adr); |
2089 | mutex_unlock(lock: &chip->mutex); |
2090 | |
2091 | return ret; |
2092 | } |
2093 | |
2094 | |
2095 | static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, |
2096 | size_t *retlen, const u_char *buf) |
2097 | { |
2098 | struct map_info *map = mtd->priv; |
2099 | struct cfi_private *cfi = map->fldrv_priv; |
2100 | int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; |
2101 | int ret; |
2102 | int chipnum; |
2103 | unsigned long ofs; |
2104 | |
2105 | chipnum = to >> cfi->chipshift; |
2106 | ofs = to - (chipnum << cfi->chipshift); |
2107 | |
2108 | /* If it's not bus-aligned, do the first word write */ |
2109 | if (ofs & (map_bankwidth(map)-1)) { |
2110 | size_t local_len = (-ofs)&(map_bankwidth(map)-1); |
2111 | if (local_len > len) |
2112 | local_len = len; |
2113 | ret = cfi_amdstd_write_words(mtd, to: ofs + (chipnum<<cfi->chipshift), |
2114 | len: local_len, retlen, buf); |
2115 | if (ret) |
2116 | return ret; |
2117 | ofs += local_len; |
2118 | buf += local_len; |
2119 | len -= local_len; |
2120 | |
2121 | if (ofs >> cfi->chipshift) { |
2122 | chipnum ++; |
2123 | ofs = 0; |
2124 | if (chipnum == cfi->numchips) |
2125 | return 0; |
2126 | } |
2127 | } |
2128 | |
2129 | /* Write buffer is worth it only if more than one word to write... */ |
2130 | while (len >= map_bankwidth(map) * 2) { |
2131 | /* We must not cross write block boundaries */ |
2132 | int size = wbufsize - (ofs & (wbufsize-1)); |
2133 | |
2134 | if (size > len) |
2135 | size = len; |
2136 | if (size % map_bankwidth(map)) |
2137 | size -= size % map_bankwidth(map); |
2138 | |
2139 | ret = do_write_buffer(map, chip: &cfi->chips[chipnum], |
2140 | adr: ofs, buf, len: size); |
2141 | if (ret) |
2142 | return ret; |
2143 | |
2144 | ofs += size; |
2145 | buf += size; |
2146 | (*retlen) += size; |
2147 | len -= size; |
2148 | |
2149 | if (ofs >> cfi->chipshift) { |
2150 | chipnum ++; |
2151 | ofs = 0; |
2152 | if (chipnum == cfi->numchips) |
2153 | return 0; |
2154 | } |
2155 | } |
2156 | |
2157 | if (len) { |
2158 | size_t retlen_dregs = 0; |
2159 | |
2160 | ret = cfi_amdstd_write_words(mtd, to: ofs + (chipnum<<cfi->chipshift), |
2161 | len, retlen: &retlen_dregs, buf); |
2162 | |
2163 | *retlen += retlen_dregs; |
2164 | return ret; |
2165 | } |
2166 | |
2167 | return 0; |
2168 | } |
2169 | #endif /* !FORCE_WORD_WRITE */ |
2170 | |
2171 | /* |
2172 | * Wait for the flash chip to become ready to write data |
2173 | * |
2174 | * This is only called during the panic_write() path. When panic_write() |
2175 | * is called, the kernel is in the process of a panic, and will soon be |
2176 | * dead. Therefore we don't take any locks, and attempt to get access |
2177 | * to the chip as soon as possible. |
2178 | */ |
2179 | static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip, |
2180 | unsigned long adr) |
2181 | { |
2182 | struct cfi_private *cfi = map->fldrv_priv; |
2183 | int retries = 10; |
2184 | int i; |
2185 | |
2186 | /* |
2187 | * If the driver thinks the chip is idle, and no toggle bits |
2188 | * are changing, then the chip is actually idle for sure. |
2189 | */ |
2190 | if (chip->state == FL_READY && chip_ready(map, chip, addr: adr, NULL)) |
2191 | return 0; |
2192 | |
2193 | /* |
2194 | * Try several times to reset the chip and then wait for it |
2195 | * to become idle. The upper limit of a few milliseconds of |
2196 | * delay isn't a big problem: the kernel is dying anyway. It |
2197 | * is more important to save the messages. |
2198 | */ |
2199 | while (retries > 0) { |
2200 | const unsigned long timeo = (HZ / 1000) + 1; |
2201 | |
2202 | /* send the reset command */ |
2203 | map_write(map, CMD(0xF0), chip->start); |
2204 | |
2205 | /* wait for the chip to become ready */ |
2206 | for (i = 0; i < jiffies_to_usecs(j: timeo); i++) { |
2207 | if (chip_ready(map, chip, addr: adr, NULL)) |
2208 | return 0; |
2209 | |
2210 | udelay(1); |
2211 | } |
2212 | |
2213 | retries--; |
2214 | } |
2215 | |
2216 | /* the chip never became ready */ |
2217 | return -EBUSY; |
2218 | } |
2219 | |
2220 | /* |
2221 | * Write out one word of data to a single flash chip during a kernel panic |
2222 | * |
2223 | * This is only called during the panic_write() path. When panic_write() |
2224 | * is called, the kernel is in the process of a panic, and will soon be |
2225 | * dead. Therefore we don't take any locks, and attempt to get access |
2226 | * to the chip as soon as possible. |
2227 | * |
2228 | * The implementation of this routine is intentionally similar to |
2229 | * do_write_oneword(), in order to ease code maintenance. |
2230 | */ |
2231 | static int do_panic_write_oneword(struct map_info *map, struct flchip *chip, |
2232 | unsigned long adr, map_word datum) |
2233 | { |
2234 | const unsigned long uWriteTimeout = (HZ / 1000) + 1; |
2235 | struct cfi_private *cfi = map->fldrv_priv; |
2236 | int retry_cnt = 0; |
2237 | map_word oldd; |
2238 | int ret; |
2239 | int i; |
2240 | |
2241 | adr += chip->start; |
2242 | |
2243 | ret = cfi_amdstd_panic_wait(map, chip, adr); |
2244 | if (ret) |
2245 | return ret; |
2246 | |
2247 | pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n" , |
2248 | __func__, adr, datum.x[0]); |
2249 | |
2250 | /* |
2251 | * Check for a NOP for the case when the datum to write is already |
2252 | * present - it saves time and works around buggy chips that corrupt |
2253 | * data at other locations when 0xff is written to a location that |
2254 | * already contains 0xff. |
2255 | */ |
2256 | oldd = map_read(map, adr); |
2257 | if (map_word_equal(map, oldd, datum)) { |
2258 | pr_debug("MTD %s(): NOP\n" , __func__); |
2259 | goto op_done; |
2260 | } |
2261 | |
2262 | ENABLE_VPP(map); |
2263 | |
2264 | retry: |
2265 | cfi_send_gen_cmd(cmd: 0xAA, cmd_addr: cfi->addr_unlock1, base: chip->start, map, cfi, type: cfi->device_type, NULL); |
2266 | cfi_send_gen_cmd(cmd: 0x55, cmd_addr: cfi->addr_unlock2, base: chip->start, map, cfi, type: cfi->device_type, NULL); |
2267 | cfi_send_gen_cmd(cmd: 0xA0, cmd_addr: cfi->addr_unlock1, base: chip->start, map, cfi, type: cfi->device_type, NULL); |
2268 | map_write(map, datum, adr); |
2269 | |
2270 | for (i = 0; i < jiffies_to_usecs(j: uWriteTimeout); i++) { |
2271 | if (chip_ready(map, chip, addr: adr, NULL)) |
2272 | break; |
2273 | |
2274 | udelay(1); |
2275 | } |
2276 | |
2277 | if (!chip_ready(map, chip, addr: adr, expected: &datum) || |
2278 | cfi_check_err_status(map, chip, adr)) { |
2279 | /* reset on all failures. */ |
2280 | map_write(map, CMD(0xF0), chip->start); |
2281 | /* FIXME - should have reset delay before continuing */ |
2282 | |
2283 | if (++retry_cnt <= MAX_RETRIES) |
2284 | goto retry; |
2285 | |
2286 | ret = -EIO; |
2287 | } |
2288 | |
2289 | op_done: |
2290 | DISABLE_VPP(map); |
2291 | return ret; |
2292 | } |
2293 | |
2294 | /* |
2295 | * Write out some data during a kernel panic |
2296 | * |
2297 | * This is used by the mtdoops driver to save the dying messages from a |
2298 | * kernel which has panic'd. |
2299 | * |
2300 | * This routine ignores all of the locking used throughout the rest of the |
2301 | * driver, in order to ensure that the data gets written out no matter what |
2302 | * state this driver (and the flash chip itself) was in when the kernel crashed. |
2303 | * |
2304 | * The implementation of this routine is intentionally similar to |
2305 | * cfi_amdstd_write_words(), in order to ease code maintenance. |
2306 | */ |
2307 | static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, |
2308 | size_t *retlen, const u_char *buf) |
2309 | { |
2310 | struct map_info *map = mtd->priv; |
2311 | struct cfi_private *cfi = map->fldrv_priv; |
2312 | unsigned long ofs, chipstart; |
2313 | int ret; |
2314 | int chipnum; |
2315 | |
2316 | chipnum = to >> cfi->chipshift; |
2317 | ofs = to - (chipnum << cfi->chipshift); |
2318 | chipstart = cfi->chips[chipnum].start; |
2319 | |
2320 | /* If it's not bus aligned, do the first byte write */ |
2321 | if (ofs & (map_bankwidth(map) - 1)) { |
2322 | unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1); |
2323 | int i = ofs - bus_ofs; |
2324 | int n = 0; |
2325 | map_word tmp_buf; |
2326 | |
2327 | ret = cfi_amdstd_panic_wait(map, chip: &cfi->chips[chipnum], adr: bus_ofs); |
2328 | if (ret) |
2329 | return ret; |
2330 | |
2331 | /* Load 'tmp_buf' with old contents of flash */ |
2332 | tmp_buf = map_read(map, bus_ofs + chipstart); |
2333 | |
2334 | /* Number of bytes to copy from buffer */ |
2335 | n = min_t(int, len, map_bankwidth(map) - i); |
2336 | |
2337 | tmp_buf = map_word_load_partial(map, orig: tmp_buf, buf, start: i, len: n); |
2338 | |
2339 | ret = do_panic_write_oneword(map, chip: &cfi->chips[chipnum], |
2340 | adr: bus_ofs, datum: tmp_buf); |
2341 | if (ret) |
2342 | return ret; |
2343 | |
2344 | ofs += n; |
2345 | buf += n; |
2346 | (*retlen) += n; |
2347 | len -= n; |
2348 | |
2349 | if (ofs >> cfi->chipshift) { |
2350 | chipnum++; |
2351 | ofs = 0; |
2352 | if (chipnum == cfi->numchips) |
2353 | return 0; |
2354 | } |
2355 | } |
2356 | |
2357 | /* We are now aligned, write as much as possible */ |
2358 | while (len >= map_bankwidth(map)) { |
2359 | map_word datum; |
2360 | |
2361 | datum = map_word_load(map, ptr: buf); |
2362 | |
2363 | ret = do_panic_write_oneword(map, chip: &cfi->chips[chipnum], |
2364 | adr: ofs, datum); |
2365 | if (ret) |
2366 | return ret; |
2367 | |
2368 | ofs += map_bankwidth(map); |
2369 | buf += map_bankwidth(map); |
2370 | (*retlen) += map_bankwidth(map); |
2371 | len -= map_bankwidth(map); |
2372 | |
2373 | if (ofs >> cfi->chipshift) { |
2374 | chipnum++; |
2375 | ofs = 0; |
2376 | if (chipnum == cfi->numchips) |
2377 | return 0; |
2378 | |
2379 | chipstart = cfi->chips[chipnum].start; |
2380 | } |
2381 | } |
2382 | |
2383 | /* Write the trailing bytes if any */ |
2384 | if (len & (map_bankwidth(map) - 1)) { |
2385 | map_word tmp_buf; |
2386 | |
2387 | ret = cfi_amdstd_panic_wait(map, chip: &cfi->chips[chipnum], adr: ofs); |
2388 | if (ret) |
2389 | return ret; |
2390 | |
2391 | tmp_buf = map_read(map, ofs + chipstart); |
2392 | |
2393 | tmp_buf = map_word_load_partial(map, orig: tmp_buf, buf, start: 0, len); |
2394 | |
2395 | ret = do_panic_write_oneword(map, chip: &cfi->chips[chipnum], |
2396 | adr: ofs, datum: tmp_buf); |
2397 | if (ret) |
2398 | return ret; |
2399 | |
2400 | (*retlen) += len; |
2401 | } |
2402 | |
2403 | return 0; |
2404 | } |
2405 | |
2406 | |
2407 | /* |
2408 | * Handle devices with one erase region, that only implement |
2409 | * the chip erase command. |
2410 | */ |
2411 | static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) |
2412 | { |
2413 | struct cfi_private *cfi = map->fldrv_priv; |
2414 | unsigned long timeo; |
2415 | unsigned long int adr; |
2416 | DECLARE_WAITQUEUE(wait, current); |
2417 | int ret; |
2418 | int retry_cnt = 0; |
2419 | map_word datum = map_word_ff(map); |
2420 | |
2421 | adr = cfi->addr_unlock1; |
2422 | |
2423 | mutex_lock(&chip->mutex); |
2424 | ret = get_chip(map, chip, adr, mode: FL_ERASING); |
2425 | if (ret) { |
2426 | mutex_unlock(lock: &chip->mutex); |
2427 | return ret; |
2428 | } |
2429 | |
2430 | pr_debug("MTD %s(): ERASE 0x%.8lx\n" , |
2431 | __func__, chip->start); |
2432 | |
2433 | XIP_INVAL_CACHED_RANGE(map, adr, map->size); |
2434 | ENABLE_VPP(map); |
2435 | xip_disable(map, chip, adr); |
2436 | |
2437 | retry: |
2438 | cfi_send_gen_cmd(cmd: 0xAA, cmd_addr: cfi->addr_unlock1, base: chip->start, map, cfi, type: cfi->device_type, NULL); |
2439 | cfi_send_gen_cmd(cmd: 0x55, cmd_addr: cfi->addr_unlock2, base: chip->start, map, cfi, type: cfi->device_type, NULL); |
2440 | cfi_send_gen_cmd(cmd: 0x80, cmd_addr: cfi->addr_unlock1, base: chip->start, map, cfi, type: cfi->device_type, NULL); |
2441 | cfi_send_gen_cmd(cmd: 0xAA, cmd_addr: cfi->addr_unlock1, base: chip->start, map, cfi, type: cfi->device_type, NULL); |
2442 | cfi_send_gen_cmd(cmd: 0x55, cmd_addr: cfi->addr_unlock2, base: chip->start, map, cfi, type: cfi->device_type, NULL); |
2443 | cfi_send_gen_cmd(cmd: 0x10, cmd_addr: cfi->addr_unlock1, base: chip->start, map, cfi, type: cfi->device_type, NULL); |
2444 | |
2445 | chip->state = FL_ERASING; |
2446 | chip->erase_suspended = 0; |
2447 | chip->in_progress_block_addr = adr; |
2448 | chip->in_progress_block_mask = ~(map->size - 1); |
2449 | |
2450 | INVALIDATE_CACHE_UDELAY(map, chip, |
2451 | adr, map->size, |
2452 | chip->erase_time*500); |
2453 | |
2454 | timeo = jiffies + (HZ*20); |
2455 | |
2456 | for (;;) { |
2457 | if (chip->state != FL_ERASING) { |
2458 | /* Someone's suspended the erase. Sleep */ |
2459 | set_current_state(TASK_UNINTERRUPTIBLE); |
2460 | add_wait_queue(wq_head: &chip->wq, wq_entry: &wait); |
2461 | mutex_unlock(lock: &chip->mutex); |
2462 | schedule(); |
2463 | remove_wait_queue(wq_head: &chip->wq, wq_entry: &wait); |
2464 | mutex_lock(&chip->mutex); |
2465 | continue; |
2466 | } |
2467 | if (chip->erase_suspended) { |
2468 | /* This erase was suspended and resumed. |
2469 | Adjust the timeout */ |
2470 | timeo = jiffies + (HZ*20); /* FIXME */ |
2471 | chip->erase_suspended = 0; |
2472 | } |
2473 | |
2474 | if (chip_ready(map, chip, addr: adr, expected: &datum)) { |
2475 | if (cfi_check_err_status(map, chip, adr)) |
2476 | ret = -EIO; |
2477 | break; |
2478 | } |
2479 | |
2480 | if (time_after(jiffies, timeo)) { |
2481 | printk(KERN_WARNING "MTD %s(): software timeout\n" , |
2482 | __func__); |
2483 | ret = -EIO; |
2484 | break; |
2485 | } |
2486 | |
2487 | /* Latency issues. Drop the lock, wait a while and retry */ |
2488 | UDELAY(map, chip, adr, 1000000/HZ); |
2489 | } |
2490 | /* Did we succeed? */ |
2491 | if (ret) { |
2492 | /* reset on all failures. */ |
2493 | map_write(map, CMD(0xF0), chip->start); |
2494 | /* FIXME - should have reset delay before continuing */ |
2495 | |
2496 | if (++retry_cnt <= MAX_RETRIES) { |
2497 | ret = 0; |
2498 | goto retry; |
2499 | } |
2500 | } |
2501 | |
2502 | chip->state = FL_READY; |
2503 | xip_enable(map, chip, adr); |
2504 | DISABLE_VPP(map); |
2505 | put_chip(map, chip, adr); |
2506 | mutex_unlock(lock: &chip->mutex); |
2507 | |
2508 | return ret; |
2509 | } |
2510 | |
2511 | |
2512 | static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk) |
2513 | { |
2514 | struct cfi_private *cfi = map->fldrv_priv; |
2515 | unsigned long timeo; |
2516 | DECLARE_WAITQUEUE(wait, current); |
2517 | int ret; |
2518 | int retry_cnt = 0; |
2519 | map_word datum = map_word_ff(map); |
2520 | |
2521 | adr += chip->start; |
2522 | |
2523 | mutex_lock(&chip->mutex); |
2524 | ret = get_chip(map, chip, adr, mode: FL_ERASING); |
2525 | if (ret) { |
2526 | mutex_unlock(lock: &chip->mutex); |
2527 | return ret; |
2528 | } |
2529 | |
2530 | pr_debug("MTD %s(): ERASE 0x%.8lx\n" , |
2531 | __func__, adr); |
2532 | |
2533 | XIP_INVAL_CACHED_RANGE(map, adr, len); |
2534 | ENABLE_VPP(map); |
2535 | xip_disable(map, chip, adr); |
2536 | |
2537 | retry: |
2538 | cfi_send_gen_cmd(cmd: 0xAA, cmd_addr: cfi->addr_unlock1, base: chip->start, map, cfi, type: cfi->device_type, NULL); |
2539 | cfi_send_gen_cmd(cmd: 0x55, cmd_addr: cfi->addr_unlock2, base: chip->start, map, cfi, type: cfi->device_type, NULL); |
2540 | cfi_send_gen_cmd(cmd: 0x80, cmd_addr: cfi->addr_unlock1, base: chip->start, map, cfi, type: cfi->device_type, NULL); |
2541 | cfi_send_gen_cmd(cmd: 0xAA, cmd_addr: cfi->addr_unlock1, base: chip->start, map, cfi, type: cfi->device_type, NULL); |
2542 | cfi_send_gen_cmd(cmd: 0x55, cmd_addr: cfi->addr_unlock2, base: chip->start, map, cfi, type: cfi->device_type, NULL); |
2543 | map_write(map, cfi->sector_erase_cmd, adr); |
2544 | |
2545 | chip->state = FL_ERASING; |
2546 | chip->erase_suspended = 0; |
2547 | chip->in_progress_block_addr = adr; |
2548 | chip->in_progress_block_mask = ~(len - 1); |
2549 | |
2550 | INVALIDATE_CACHE_UDELAY(map, chip, |
2551 | adr, len, |
2552 | chip->erase_time*500); |
2553 | |
2554 | timeo = jiffies + (HZ*20); |
2555 | |
2556 | for (;;) { |
2557 | if (chip->state != FL_ERASING) { |
2558 | /* Someone's suspended the erase. Sleep */ |
2559 | set_current_state(TASK_UNINTERRUPTIBLE); |
2560 | add_wait_queue(wq_head: &chip->wq, wq_entry: &wait); |
2561 | mutex_unlock(lock: &chip->mutex); |
2562 | schedule(); |
2563 | remove_wait_queue(wq_head: &chip->wq, wq_entry: &wait); |
2564 | mutex_lock(&chip->mutex); |
2565 | continue; |
2566 | } |
2567 | if (chip->erase_suspended) { |
2568 | /* This erase was suspended and resumed. |
2569 | Adjust the timeout */ |
2570 | timeo = jiffies + (HZ*20); /* FIXME */ |
2571 | chip->erase_suspended = 0; |
2572 | } |
2573 | |
2574 | if (chip_ready(map, chip, addr: adr, expected: &datum)) { |
2575 | if (cfi_check_err_status(map, chip, adr)) |
2576 | ret = -EIO; |
2577 | break; |
2578 | } |
2579 | |
2580 | if (time_after(jiffies, timeo)) { |
2581 | printk(KERN_WARNING "MTD %s(): software timeout\n" , |
2582 | __func__); |
2583 | ret = -EIO; |
2584 | break; |
2585 | } |
2586 | |
2587 | /* Latency issues. Drop the lock, wait a while and retry */ |
2588 | UDELAY(map, chip, adr, 1000000/HZ); |
2589 | } |
2590 | /* Did we succeed? */ |
2591 | if (ret) { |
2592 | /* reset on all failures. */ |
2593 | map_write(map, CMD(0xF0), chip->start); |
2594 | /* FIXME - should have reset delay before continuing */ |
2595 | |
2596 | if (++retry_cnt <= MAX_RETRIES) { |
2597 | ret = 0; |
2598 | goto retry; |
2599 | } |
2600 | } |
2601 | |
2602 | chip->state = FL_READY; |
2603 | xip_enable(map, chip, adr); |
2604 | DISABLE_VPP(map); |
2605 | put_chip(map, chip, adr); |
2606 | mutex_unlock(lock: &chip->mutex); |
2607 | return ret; |
2608 | } |
2609 | |
2610 | |
2611 | static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) |
2612 | { |
2613 | return cfi_varsize_frob(mtd, frob: do_erase_oneblock, ofs: instr->addr, |
2614 | len: instr->len, NULL); |
2615 | } |
2616 | |
2617 | |
2618 | static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr) |
2619 | { |
2620 | struct map_info *map = mtd->priv; |
2621 | struct cfi_private *cfi = map->fldrv_priv; |
2622 | |
2623 | if (instr->addr != 0) |
2624 | return -EINVAL; |
2625 | |
2626 | if (instr->len != mtd->size) |
2627 | return -EINVAL; |
2628 | |
2629 | return do_erase_chip(map, chip: &cfi->chips[0]); |
2630 | } |
2631 | |
2632 | static int do_atmel_lock(struct map_info *map, struct flchip *chip, |
2633 | unsigned long adr, int len, void *thunk) |
2634 | { |
2635 | struct cfi_private *cfi = map->fldrv_priv; |
2636 | int ret; |
2637 | |
2638 | mutex_lock(&chip->mutex); |
2639 | ret = get_chip(map, chip, adr: adr + chip->start, mode: FL_LOCKING); |
2640 | if (ret) |
2641 | goto out_unlock; |
2642 | chip->state = FL_LOCKING; |
2643 | |
2644 | pr_debug("MTD %s(): LOCK 0x%08lx len %d\n" , __func__, adr, len); |
2645 | |
2646 | cfi_send_gen_cmd(cmd: 0xAA, cmd_addr: cfi->addr_unlock1, base: chip->start, map, cfi, |
2647 | type: cfi->device_type, NULL); |
2648 | cfi_send_gen_cmd(cmd: 0x55, cmd_addr: cfi->addr_unlock2, base: chip->start, map, cfi, |
2649 | type: cfi->device_type, NULL); |
2650 | cfi_send_gen_cmd(cmd: 0x80, cmd_addr: cfi->addr_unlock1, base: chip->start, map, cfi, |
2651 | type: cfi->device_type, NULL); |
2652 | cfi_send_gen_cmd(cmd: 0xAA, cmd_addr: cfi->addr_unlock1, base: chip->start, map, cfi, |
2653 | type: cfi->device_type, NULL); |
2654 | cfi_send_gen_cmd(cmd: 0x55, cmd_addr: cfi->addr_unlock2, base: chip->start, map, cfi, |
2655 | type: cfi->device_type, NULL); |
2656 | map_write(map, CMD(0x40), chip->start + adr); |
2657 | |
2658 | chip->state = FL_READY; |
2659 | put_chip(map, chip, adr: adr + chip->start); |
2660 | ret = 0; |
2661 | |
2662 | out_unlock: |
2663 | mutex_unlock(lock: &chip->mutex); |
2664 | return ret; |
2665 | } |
2666 | |
2667 | static int do_atmel_unlock(struct map_info *map, struct flchip *chip, |
2668 | unsigned long adr, int len, void *thunk) |
2669 | { |
2670 | struct cfi_private *cfi = map->fldrv_priv; |
2671 | int ret; |
2672 | |
2673 | mutex_lock(&chip->mutex); |
2674 | ret = get_chip(map, chip, adr: adr + chip->start, mode: FL_UNLOCKING); |
2675 | if (ret) |
2676 | goto out_unlock; |
2677 | chip->state = FL_UNLOCKING; |
2678 | |
2679 | pr_debug("MTD %s(): LOCK 0x%08lx len %d\n" , __func__, adr, len); |
2680 | |
2681 | cfi_send_gen_cmd(cmd: 0xAA, cmd_addr: cfi->addr_unlock1, base: chip->start, map, cfi, |
2682 | type: cfi->device_type, NULL); |
2683 | map_write(map, CMD(0x70), adr); |
2684 | |
2685 | chip->state = FL_READY; |
2686 | put_chip(map, chip, adr: adr + chip->start); |
2687 | ret = 0; |
2688 | |
2689 | out_unlock: |
2690 | mutex_unlock(lock: &chip->mutex); |
2691 | return ret; |
2692 | } |
2693 | |
2694 | static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
2695 | { |
2696 | return cfi_varsize_frob(mtd, frob: do_atmel_lock, ofs, len, NULL); |
2697 | } |
2698 | |
2699 | static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
2700 | { |
2701 | return cfi_varsize_frob(mtd, frob: do_atmel_unlock, ofs, len, NULL); |
2702 | } |
2703 | |
2704 | /* |
2705 | * Advanced Sector Protection - PPB (Persistent Protection Bit) locking |
2706 | */ |
2707 | |
2708 | struct ppb_lock { |
2709 | struct flchip *chip; |
2710 | unsigned long adr; |
2711 | int locked; |
2712 | }; |
2713 | |
2714 | #define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1) |
2715 | #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2) |
2716 | #define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3) |
2717 | |
2718 | static int __maybe_unused do_ppb_xxlock(struct map_info *map, |
2719 | struct flchip *chip, |
2720 | unsigned long adr, int len, void *thunk) |
2721 | { |
2722 | struct cfi_private *cfi = map->fldrv_priv; |
2723 | unsigned long timeo; |
2724 | int ret; |
2725 | |
2726 | adr += chip->start; |
2727 | mutex_lock(&chip->mutex); |
2728 | ret = get_chip(map, chip, adr, mode: FL_LOCKING); |
2729 | if (ret) { |
2730 | mutex_unlock(lock: &chip->mutex); |
2731 | return ret; |
2732 | } |
2733 | |
2734 | pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n" , __func__, adr, len); |
2735 | |
2736 | cfi_send_gen_cmd(cmd: 0xAA, cmd_addr: cfi->addr_unlock1, base: chip->start, map, cfi, |
2737 | type: cfi->device_type, NULL); |
2738 | cfi_send_gen_cmd(cmd: 0x55, cmd_addr: cfi->addr_unlock2, base: chip->start, map, cfi, |
2739 | type: cfi->device_type, NULL); |
2740 | /* PPB entry command */ |
2741 | cfi_send_gen_cmd(cmd: 0xC0, cmd_addr: cfi->addr_unlock1, base: chip->start, map, cfi, |
2742 | type: cfi->device_type, NULL); |
2743 | |
2744 | if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) { |
2745 | chip->state = FL_LOCKING; |
2746 | map_write(map, CMD(0xA0), adr); |
2747 | map_write(map, CMD(0x00), adr); |
2748 | } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) { |
2749 | /* |
2750 | * Unlocking of one specific sector is not supported, so we |
2751 | * have to unlock all sectors of this device instead |
2752 | */ |
2753 | chip->state = FL_UNLOCKING; |
2754 | map_write(map, CMD(0x80), chip->start); |
2755 | map_write(map, CMD(0x30), chip->start); |
2756 | } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) { |
2757 | chip->state = FL_JEDEC_QUERY; |
2758 | /* Return locked status: 0->locked, 1->unlocked */ |
2759 | ret = !cfi_read_query(map, addr: adr); |
2760 | } else |
2761 | BUG(); |
2762 | |
2763 | /* |
2764 | * Wait for some time as unlocking of all sectors takes quite long |
2765 | */ |
2766 | timeo = jiffies + msecs_to_jiffies(m: 2000); /* 2s max (un)locking */ |
2767 | for (;;) { |
2768 | if (chip_ready(map, chip, addr: adr, NULL)) |
2769 | break; |
2770 | |
2771 | if (time_after(jiffies, timeo)) { |
2772 | printk(KERN_ERR "Waiting for chip to be ready timed out.\n" ); |
2773 | ret = -EIO; |
2774 | break; |
2775 | } |
2776 | |
2777 | UDELAY(map, chip, adr, 1); |
2778 | } |
2779 | |
2780 | /* Exit BC commands */ |
2781 | map_write(map, CMD(0x90), chip->start); |
2782 | map_write(map, CMD(0x00), chip->start); |
2783 | |
2784 | chip->state = FL_READY; |
2785 | put_chip(map, chip, adr); |
2786 | mutex_unlock(lock: &chip->mutex); |
2787 | |
2788 | return ret; |
2789 | } |
2790 | |
2791 | static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, |
2792 | uint64_t len) |
2793 | { |
2794 | return cfi_varsize_frob(mtd, frob: do_ppb_xxlock, ofs, len, |
2795 | DO_XXLOCK_ONEBLOCK_LOCK); |
2796 | } |
2797 | |
2798 | static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, |
2799 | uint64_t len) |
2800 | { |
2801 | struct mtd_erase_region_info *regions = mtd->eraseregions; |
2802 | struct map_info *map = mtd->priv; |
2803 | struct cfi_private *cfi = map->fldrv_priv; |
2804 | struct ppb_lock *sect; |
2805 | unsigned long adr; |
2806 | loff_t offset; |
2807 | uint64_t length; |
2808 | int chipnum; |
2809 | int i; |
2810 | int sectors; |
2811 | int ret; |
2812 | int max_sectors; |
2813 | |
2814 | /* |
2815 | * PPB unlocking always unlocks all sectors of the flash chip. |
2816 | * We need to re-lock all previously locked sectors. So lets |
2817 | * first check the locking status of all sectors and save |
2818 | * it for future use. |
2819 | */ |
2820 | max_sectors = 0; |
2821 | for (i = 0; i < mtd->numeraseregions; i++) |
2822 | max_sectors += regions[i].numblocks; |
2823 | |
2824 | sect = kcalloc(n: max_sectors, size: sizeof(struct ppb_lock), GFP_KERNEL); |
2825 | if (!sect) |
2826 | return -ENOMEM; |
2827 | |
2828 | /* |
2829 | * This code to walk all sectors is a slightly modified version |
2830 | * of the cfi_varsize_frob() code. |
2831 | */ |
2832 | i = 0; |
2833 | chipnum = 0; |
2834 | adr = 0; |
2835 | sectors = 0; |
2836 | offset = 0; |
2837 | length = mtd->size; |
2838 | |
2839 | while (length) { |
2840 | int size = regions[i].erasesize; |
2841 | |
2842 | /* |
2843 | * Only test sectors that shall not be unlocked. The other |
2844 | * sectors shall be unlocked, so lets keep their locking |
2845 | * status at "unlocked" (locked=0) for the final re-locking. |
2846 | */ |
2847 | if ((offset < ofs) || (offset >= (ofs + len))) { |
2848 | sect[sectors].chip = &cfi->chips[chipnum]; |
2849 | sect[sectors].adr = adr; |
2850 | sect[sectors].locked = do_ppb_xxlock( |
2851 | map, chip: &cfi->chips[chipnum], adr, len: 0, |
2852 | DO_XXLOCK_ONEBLOCK_GETLOCK); |
2853 | } |
2854 | |
2855 | adr += size; |
2856 | offset += size; |
2857 | length -= size; |
2858 | |
2859 | if (offset == regions[i].offset + size * regions[i].numblocks) |
2860 | i++; |
2861 | |
2862 | if (adr >> cfi->chipshift) { |
2863 | if (offset >= (ofs + len)) |
2864 | break; |
2865 | adr = 0; |
2866 | chipnum++; |
2867 | |
2868 | if (chipnum >= cfi->numchips) |
2869 | break; |
2870 | } |
2871 | |
2872 | sectors++; |
2873 | if (sectors >= max_sectors) { |
2874 | printk(KERN_ERR "Only %d sectors for PPB locking supported!\n" , |
2875 | max_sectors); |
2876 | kfree(objp: sect); |
2877 | return -EINVAL; |
2878 | } |
2879 | } |
2880 | |
2881 | /* Now unlock the whole chip */ |
2882 | ret = cfi_varsize_frob(mtd, frob: do_ppb_xxlock, ofs, len, |
2883 | DO_XXLOCK_ONEBLOCK_UNLOCK); |
2884 | if (ret) { |
2885 | kfree(objp: sect); |
2886 | return ret; |
2887 | } |
2888 | |
2889 | /* |
2890 | * PPB unlocking always unlocks all sectors of the flash chip. |
2891 | * We need to re-lock all previously locked sectors. |
2892 | */ |
2893 | for (i = 0; i < sectors; i++) { |
2894 | if (sect[i].locked) |
2895 | do_ppb_xxlock(map, chip: sect[i].chip, adr: sect[i].adr, len: 0, |
2896 | DO_XXLOCK_ONEBLOCK_LOCK); |
2897 | } |
2898 | |
2899 | kfree(objp: sect); |
2900 | return ret; |
2901 | } |
2902 | |
2903 | static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, |
2904 | uint64_t len) |
2905 | { |
2906 | return cfi_varsize_frob(mtd, frob: do_ppb_xxlock, ofs, len, |
2907 | DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0; |
2908 | } |
2909 | |
2910 | static void cfi_amdstd_sync (struct mtd_info *mtd) |
2911 | { |
2912 | struct map_info *map = mtd->priv; |
2913 | struct cfi_private *cfi = map->fldrv_priv; |
2914 | int i; |
2915 | struct flchip *chip; |
2916 | int ret = 0; |
2917 | DECLARE_WAITQUEUE(wait, current); |
2918 | |
2919 | for (i=0; !ret && i<cfi->numchips; i++) { |
2920 | chip = &cfi->chips[i]; |
2921 | |
2922 | retry: |
2923 | mutex_lock(&chip->mutex); |
2924 | |
2925 | switch(chip->state) { |
2926 | case FL_READY: |
2927 | case FL_STATUS: |
2928 | case FL_CFI_QUERY: |
2929 | case FL_JEDEC_QUERY: |
2930 | chip->oldstate = chip->state; |
2931 | chip->state = FL_SYNCING; |
2932 | /* No need to wake_up() on this state change - |
2933 | * as the whole point is that nobody can do anything |
2934 | * with the chip now anyway. |
2935 | */ |
2936 | fallthrough; |
2937 | case FL_SYNCING: |
2938 | mutex_unlock(lock: &chip->mutex); |
2939 | break; |
2940 | |
2941 | default: |
2942 | /* Not an idle state */ |
2943 | set_current_state(TASK_UNINTERRUPTIBLE); |
2944 | add_wait_queue(wq_head: &chip->wq, wq_entry: &wait); |
2945 | |
2946 | mutex_unlock(lock: &chip->mutex); |
2947 | |
2948 | schedule(); |
2949 | |
2950 | remove_wait_queue(wq_head: &chip->wq, wq_entry: &wait); |
2951 | |
2952 | goto retry; |
2953 | } |
2954 | } |
2955 | |
2956 | /* Unlock the chips again */ |
2957 | |
2958 | for (i--; i >=0; i--) { |
2959 | chip = &cfi->chips[i]; |
2960 | |
2961 | mutex_lock(&chip->mutex); |
2962 | |
2963 | if (chip->state == FL_SYNCING) { |
2964 | chip->state = chip->oldstate; |
2965 | wake_up(&chip->wq); |
2966 | } |
2967 | mutex_unlock(lock: &chip->mutex); |
2968 | } |
2969 | } |
2970 | |
2971 | |
2972 | static int cfi_amdstd_suspend(struct mtd_info *mtd) |
2973 | { |
2974 | struct map_info *map = mtd->priv; |
2975 | struct cfi_private *cfi = map->fldrv_priv; |
2976 | int i; |
2977 | struct flchip *chip; |
2978 | int ret = 0; |
2979 | |
2980 | for (i=0; !ret && i<cfi->numchips; i++) { |
2981 | chip = &cfi->chips[i]; |
2982 | |
2983 | mutex_lock(&chip->mutex); |
2984 | |
2985 | switch(chip->state) { |
2986 | case FL_READY: |
2987 | case FL_STATUS: |
2988 | case FL_CFI_QUERY: |
2989 | case FL_JEDEC_QUERY: |
2990 | chip->oldstate = chip->state; |
2991 | chip->state = FL_PM_SUSPENDED; |
2992 | /* No need to wake_up() on this state change - |
2993 | * as the whole point is that nobody can do anything |
2994 | * with the chip now anyway. |
2995 | */ |
2996 | break; |
2997 | case FL_PM_SUSPENDED: |
2998 | break; |
2999 | |
3000 | default: |
3001 | ret = -EAGAIN; |
3002 | break; |
3003 | } |
3004 | mutex_unlock(lock: &chip->mutex); |
3005 | } |
3006 | |
3007 | /* Unlock the chips again */ |
3008 | |
3009 | if (ret) { |
3010 | for (i--; i >=0; i--) { |
3011 | chip = &cfi->chips[i]; |
3012 | |
3013 | mutex_lock(&chip->mutex); |
3014 | |
3015 | if (chip->state == FL_PM_SUSPENDED) { |
3016 | chip->state = chip->oldstate; |
3017 | wake_up(&chip->wq); |
3018 | } |
3019 | mutex_unlock(lock: &chip->mutex); |
3020 | } |
3021 | } |
3022 | |
3023 | return ret; |
3024 | } |
3025 | |
3026 | |
3027 | static void cfi_amdstd_resume(struct mtd_info *mtd) |
3028 | { |
3029 | struct map_info *map = mtd->priv; |
3030 | struct cfi_private *cfi = map->fldrv_priv; |
3031 | int i; |
3032 | struct flchip *chip; |
3033 | |
3034 | for (i=0; i<cfi->numchips; i++) { |
3035 | |
3036 | chip = &cfi->chips[i]; |
3037 | |
3038 | mutex_lock(&chip->mutex); |
3039 | |
3040 | if (chip->state == FL_PM_SUSPENDED) { |
3041 | chip->state = FL_READY; |
3042 | map_write(map, CMD(0xF0), chip->start); |
3043 | wake_up(&chip->wq); |
3044 | } |
3045 | else |
3046 | printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n" ); |
3047 | |
3048 | mutex_unlock(lock: &chip->mutex); |
3049 | } |
3050 | } |
3051 | |
3052 | |
3053 | /* |
3054 | * Ensure that the flash device is put back into read array mode before |
3055 | * unloading the driver or rebooting. On some systems, rebooting while |
3056 | * the flash is in query/program/erase mode will prevent the CPU from |
3057 | * fetching the bootloader code, requiring a hard reset or power cycle. |
3058 | */ |
3059 | static int cfi_amdstd_reset(struct mtd_info *mtd) |
3060 | { |
3061 | struct map_info *map = mtd->priv; |
3062 | struct cfi_private *cfi = map->fldrv_priv; |
3063 | int i, ret; |
3064 | struct flchip *chip; |
3065 | |
3066 | for (i = 0; i < cfi->numchips; i++) { |
3067 | |
3068 | chip = &cfi->chips[i]; |
3069 | |
3070 | mutex_lock(&chip->mutex); |
3071 | |
3072 | ret = get_chip(map, chip, adr: chip->start, mode: FL_SHUTDOWN); |
3073 | if (!ret) { |
3074 | map_write(map, CMD(0xF0), chip->start); |
3075 | chip->state = FL_SHUTDOWN; |
3076 | put_chip(map, chip, adr: chip->start); |
3077 | } |
3078 | |
3079 | mutex_unlock(lock: &chip->mutex); |
3080 | } |
3081 | |
3082 | return 0; |
3083 | } |
3084 | |
3085 | |
3086 | static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val, |
3087 | void *v) |
3088 | { |
3089 | struct mtd_info *mtd; |
3090 | |
3091 | mtd = container_of(nb, struct mtd_info, reboot_notifier); |
3092 | cfi_amdstd_reset(mtd); |
3093 | return NOTIFY_DONE; |
3094 | } |
3095 | |
3096 | |
3097 | static void cfi_amdstd_destroy(struct mtd_info *mtd) |
3098 | { |
3099 | struct map_info *map = mtd->priv; |
3100 | struct cfi_private *cfi = map->fldrv_priv; |
3101 | |
3102 | cfi_amdstd_reset(mtd); |
3103 | unregister_reboot_notifier(&mtd->reboot_notifier); |
3104 | kfree(objp: cfi->cmdset_priv); |
3105 | kfree(objp: cfi->cfiq); |
3106 | kfree(objp: cfi); |
3107 | kfree(objp: mtd->eraseregions); |
3108 | } |
3109 | |
3110 | MODULE_LICENSE("GPL" ); |
3111 | MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al." ); |
3112 | MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips" ); |
3113 | MODULE_ALIAS("cfi_cmdset_0006" ); |
3114 | MODULE_ALIAS("cfi_cmdset_0701" ); |
3115 | |