1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Core registration and callback routines for MTD |
4 | * drivers and users. |
5 | * |
6 | * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> |
7 | * Copyright © 2006 Red Hat UK Limited |
8 | */ |
9 | |
10 | #include <linux/module.h> |
11 | #include <linux/kernel.h> |
12 | #include <linux/ptrace.h> |
13 | #include <linux/seq_file.h> |
14 | #include <linux/string.h> |
15 | #include <linux/timer.h> |
16 | #include <linux/major.h> |
17 | #include <linux/fs.h> |
18 | #include <linux/err.h> |
19 | #include <linux/ioctl.h> |
20 | #include <linux/init.h> |
21 | #include <linux/of.h> |
22 | #include <linux/proc_fs.h> |
23 | #include <linux/idr.h> |
24 | #include <linux/backing-dev.h> |
25 | #include <linux/gfp.h> |
26 | #include <linux/random.h> |
27 | #include <linux/slab.h> |
28 | #include <linux/reboot.h> |
29 | #include <linux/leds.h> |
30 | #include <linux/debugfs.h> |
31 | #include <linux/nvmem-provider.h> |
32 | #include <linux/root_dev.h> |
33 | |
34 | #include <linux/mtd/mtd.h> |
35 | #include <linux/mtd/partitions.h> |
36 | |
37 | #include "mtdcore.h" |
38 | |
39 | struct backing_dev_info *mtd_bdi; |
40 | |
41 | #ifdef CONFIG_PM_SLEEP |
42 | |
43 | static int mtd_cls_suspend(struct device *dev) |
44 | { |
45 | struct mtd_info *mtd = dev_get_drvdata(dev); |
46 | |
47 | return mtd ? mtd_suspend(mtd) : 0; |
48 | } |
49 | |
50 | static int mtd_cls_resume(struct device *dev) |
51 | { |
52 | struct mtd_info *mtd = dev_get_drvdata(dev); |
53 | |
54 | if (mtd) |
55 | mtd_resume(mtd); |
56 | return 0; |
57 | } |
58 | |
59 | static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume); |
60 | #define MTD_CLS_PM_OPS (&mtd_cls_pm_ops) |
61 | #else |
62 | #define MTD_CLS_PM_OPS NULL |
63 | #endif |
64 | |
65 | static struct class mtd_class = { |
66 | .name = "mtd" , |
67 | .pm = MTD_CLS_PM_OPS, |
68 | }; |
69 | |
70 | static DEFINE_IDR(mtd_idr); |
71 | |
72 | /* These are exported solely for the purpose of mtd_blkdevs.c. You |
73 | should not use them for _anything_ else */ |
74 | DEFINE_MUTEX(mtd_table_mutex); |
75 | EXPORT_SYMBOL_GPL(mtd_table_mutex); |
76 | |
77 | struct mtd_info *__mtd_next_device(int i) |
78 | { |
79 | return idr_get_next(&mtd_idr, nextid: &i); |
80 | } |
81 | EXPORT_SYMBOL_GPL(__mtd_next_device); |
82 | |
83 | static LIST_HEAD(mtd_notifiers); |
84 | |
85 | |
86 | #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2) |
87 | |
88 | /* REVISIT once MTD uses the driver model better, whoever allocates |
89 | * the mtd_info will probably want to use the release() hook... |
90 | */ |
91 | static void mtd_release(struct device *dev) |
92 | { |
93 | struct mtd_info *mtd = dev_get_drvdata(dev); |
94 | dev_t index = MTD_DEVT(mtd->index); |
95 | |
96 | idr_remove(&mtd_idr, id: mtd->index); |
97 | of_node_put(node: mtd_get_of_node(mtd)); |
98 | |
99 | if (mtd_is_partition(mtd)) |
100 | release_mtd_partition(mtd); |
101 | |
102 | /* remove /dev/mtdXro node */ |
103 | device_destroy(cls: &mtd_class, devt: index + 1); |
104 | } |
105 | |
106 | static void mtd_device_release(struct kref *kref) |
107 | { |
108 | struct mtd_info *mtd = container_of(kref, struct mtd_info, refcnt); |
109 | bool is_partition = mtd_is_partition(mtd); |
110 | |
111 | debugfs_remove_recursive(dentry: mtd->dbg.dfs_dir); |
112 | |
113 | /* Try to remove the NVMEM provider */ |
114 | nvmem_unregister(nvmem: mtd->nvmem); |
115 | |
116 | device_unregister(dev: &mtd->dev); |
117 | |
118 | /* |
119 | * Clear dev so mtd can be safely re-registered later if desired. |
120 | * Should not be done for partition, |
121 | * as it was already destroyed in device_unregister(). |
122 | */ |
123 | if (!is_partition) |
124 | memset(&mtd->dev, 0, sizeof(mtd->dev)); |
125 | |
126 | module_put(THIS_MODULE); |
127 | } |
128 | |
129 | #define MTD_DEVICE_ATTR_RO(name) \ |
130 | static DEVICE_ATTR(name, 0444, mtd_##name##_show, NULL) |
131 | |
132 | #define MTD_DEVICE_ATTR_RW(name) \ |
133 | static DEVICE_ATTR(name, 0644, mtd_##name##_show, mtd_##name##_store) |
134 | |
135 | static ssize_t mtd_type_show(struct device *dev, |
136 | struct device_attribute *attr, char *buf) |
137 | { |
138 | struct mtd_info *mtd = dev_get_drvdata(dev); |
139 | char *type; |
140 | |
141 | switch (mtd->type) { |
142 | case MTD_ABSENT: |
143 | type = "absent" ; |
144 | break; |
145 | case MTD_RAM: |
146 | type = "ram" ; |
147 | break; |
148 | case MTD_ROM: |
149 | type = "rom" ; |
150 | break; |
151 | case MTD_NORFLASH: |
152 | type = "nor" ; |
153 | break; |
154 | case MTD_NANDFLASH: |
155 | type = "nand" ; |
156 | break; |
157 | case MTD_DATAFLASH: |
158 | type = "dataflash" ; |
159 | break; |
160 | case MTD_UBIVOLUME: |
161 | type = "ubi" ; |
162 | break; |
163 | case MTD_MLCNANDFLASH: |
164 | type = "mlc-nand" ; |
165 | break; |
166 | default: |
167 | type = "unknown" ; |
168 | } |
169 | |
170 | return sysfs_emit(buf, fmt: "%s\n" , type); |
171 | } |
172 | MTD_DEVICE_ATTR_RO(type); |
173 | |
174 | static ssize_t mtd_flags_show(struct device *dev, |
175 | struct device_attribute *attr, char *buf) |
176 | { |
177 | struct mtd_info *mtd = dev_get_drvdata(dev); |
178 | |
179 | return sysfs_emit(buf, fmt: "0x%lx\n" , (unsigned long)mtd->flags); |
180 | } |
181 | MTD_DEVICE_ATTR_RO(flags); |
182 | |
183 | static ssize_t mtd_size_show(struct device *dev, |
184 | struct device_attribute *attr, char *buf) |
185 | { |
186 | struct mtd_info *mtd = dev_get_drvdata(dev); |
187 | |
188 | return sysfs_emit(buf, fmt: "%llu\n" , (unsigned long long)mtd->size); |
189 | } |
190 | MTD_DEVICE_ATTR_RO(size); |
191 | |
192 | static ssize_t mtd_erasesize_show(struct device *dev, |
193 | struct device_attribute *attr, char *buf) |
194 | { |
195 | struct mtd_info *mtd = dev_get_drvdata(dev); |
196 | |
197 | return sysfs_emit(buf, fmt: "%lu\n" , (unsigned long)mtd->erasesize); |
198 | } |
199 | MTD_DEVICE_ATTR_RO(erasesize); |
200 | |
201 | static ssize_t mtd_writesize_show(struct device *dev, |
202 | struct device_attribute *attr, char *buf) |
203 | { |
204 | struct mtd_info *mtd = dev_get_drvdata(dev); |
205 | |
206 | return sysfs_emit(buf, fmt: "%lu\n" , (unsigned long)mtd->writesize); |
207 | } |
208 | MTD_DEVICE_ATTR_RO(writesize); |
209 | |
210 | static ssize_t mtd_subpagesize_show(struct device *dev, |
211 | struct device_attribute *attr, char *buf) |
212 | { |
213 | struct mtd_info *mtd = dev_get_drvdata(dev); |
214 | unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft; |
215 | |
216 | return sysfs_emit(buf, fmt: "%u\n" , subpagesize); |
217 | } |
218 | MTD_DEVICE_ATTR_RO(subpagesize); |
219 | |
220 | static ssize_t mtd_oobsize_show(struct device *dev, |
221 | struct device_attribute *attr, char *buf) |
222 | { |
223 | struct mtd_info *mtd = dev_get_drvdata(dev); |
224 | |
225 | return sysfs_emit(buf, fmt: "%lu\n" , (unsigned long)mtd->oobsize); |
226 | } |
227 | MTD_DEVICE_ATTR_RO(oobsize); |
228 | |
229 | static ssize_t mtd_oobavail_show(struct device *dev, |
230 | struct device_attribute *attr, char *buf) |
231 | { |
232 | struct mtd_info *mtd = dev_get_drvdata(dev); |
233 | |
234 | return sysfs_emit(buf, fmt: "%u\n" , mtd->oobavail); |
235 | } |
236 | MTD_DEVICE_ATTR_RO(oobavail); |
237 | |
238 | static ssize_t mtd_numeraseregions_show(struct device *dev, |
239 | struct device_attribute *attr, char *buf) |
240 | { |
241 | struct mtd_info *mtd = dev_get_drvdata(dev); |
242 | |
243 | return sysfs_emit(buf, fmt: "%u\n" , mtd->numeraseregions); |
244 | } |
245 | MTD_DEVICE_ATTR_RO(numeraseregions); |
246 | |
247 | static ssize_t mtd_name_show(struct device *dev, |
248 | struct device_attribute *attr, char *buf) |
249 | { |
250 | struct mtd_info *mtd = dev_get_drvdata(dev); |
251 | |
252 | return sysfs_emit(buf, fmt: "%s\n" , mtd->name); |
253 | } |
254 | MTD_DEVICE_ATTR_RO(name); |
255 | |
256 | static ssize_t mtd_ecc_strength_show(struct device *dev, |
257 | struct device_attribute *attr, char *buf) |
258 | { |
259 | struct mtd_info *mtd = dev_get_drvdata(dev); |
260 | |
261 | return sysfs_emit(buf, fmt: "%u\n" , mtd->ecc_strength); |
262 | } |
263 | MTD_DEVICE_ATTR_RO(ecc_strength); |
264 | |
265 | static ssize_t mtd_bitflip_threshold_show(struct device *dev, |
266 | struct device_attribute *attr, |
267 | char *buf) |
268 | { |
269 | struct mtd_info *mtd = dev_get_drvdata(dev); |
270 | |
271 | return sysfs_emit(buf, fmt: "%u\n" , mtd->bitflip_threshold); |
272 | } |
273 | |
274 | static ssize_t mtd_bitflip_threshold_store(struct device *dev, |
275 | struct device_attribute *attr, |
276 | const char *buf, size_t count) |
277 | { |
278 | struct mtd_info *mtd = dev_get_drvdata(dev); |
279 | unsigned int bitflip_threshold; |
280 | int retval; |
281 | |
282 | retval = kstrtouint(s: buf, base: 0, res: &bitflip_threshold); |
283 | if (retval) |
284 | return retval; |
285 | |
286 | mtd->bitflip_threshold = bitflip_threshold; |
287 | return count; |
288 | } |
289 | MTD_DEVICE_ATTR_RW(bitflip_threshold); |
290 | |
291 | static ssize_t mtd_ecc_step_size_show(struct device *dev, |
292 | struct device_attribute *attr, char *buf) |
293 | { |
294 | struct mtd_info *mtd = dev_get_drvdata(dev); |
295 | |
296 | return sysfs_emit(buf, fmt: "%u\n" , mtd->ecc_step_size); |
297 | |
298 | } |
299 | MTD_DEVICE_ATTR_RO(ecc_step_size); |
300 | |
301 | static ssize_t mtd_corrected_bits_show(struct device *dev, |
302 | struct device_attribute *attr, char *buf) |
303 | { |
304 | struct mtd_info *mtd = dev_get_drvdata(dev); |
305 | struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; |
306 | |
307 | return sysfs_emit(buf, fmt: "%u\n" , ecc_stats->corrected); |
308 | } |
309 | MTD_DEVICE_ATTR_RO(corrected_bits); /* ecc stats corrected */ |
310 | |
311 | static ssize_t mtd_ecc_failures_show(struct device *dev, |
312 | struct device_attribute *attr, char *buf) |
313 | { |
314 | struct mtd_info *mtd = dev_get_drvdata(dev); |
315 | struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; |
316 | |
317 | return sysfs_emit(buf, fmt: "%u\n" , ecc_stats->failed); |
318 | } |
319 | MTD_DEVICE_ATTR_RO(ecc_failures); /* ecc stats errors */ |
320 | |
321 | static ssize_t mtd_bad_blocks_show(struct device *dev, |
322 | struct device_attribute *attr, char *buf) |
323 | { |
324 | struct mtd_info *mtd = dev_get_drvdata(dev); |
325 | struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; |
326 | |
327 | return sysfs_emit(buf, fmt: "%u\n" , ecc_stats->badblocks); |
328 | } |
329 | MTD_DEVICE_ATTR_RO(bad_blocks); |
330 | |
331 | static ssize_t mtd_bbt_blocks_show(struct device *dev, |
332 | struct device_attribute *attr, char *buf) |
333 | { |
334 | struct mtd_info *mtd = dev_get_drvdata(dev); |
335 | struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; |
336 | |
337 | return sysfs_emit(buf, fmt: "%u\n" , ecc_stats->bbtblocks); |
338 | } |
339 | MTD_DEVICE_ATTR_RO(bbt_blocks); |
340 | |
341 | static struct attribute *mtd_attrs[] = { |
342 | &dev_attr_type.attr, |
343 | &dev_attr_flags.attr, |
344 | &dev_attr_size.attr, |
345 | &dev_attr_erasesize.attr, |
346 | &dev_attr_writesize.attr, |
347 | &dev_attr_subpagesize.attr, |
348 | &dev_attr_oobsize.attr, |
349 | &dev_attr_oobavail.attr, |
350 | &dev_attr_numeraseregions.attr, |
351 | &dev_attr_name.attr, |
352 | &dev_attr_ecc_strength.attr, |
353 | &dev_attr_ecc_step_size.attr, |
354 | &dev_attr_corrected_bits.attr, |
355 | &dev_attr_ecc_failures.attr, |
356 | &dev_attr_bad_blocks.attr, |
357 | &dev_attr_bbt_blocks.attr, |
358 | &dev_attr_bitflip_threshold.attr, |
359 | NULL, |
360 | }; |
361 | ATTRIBUTE_GROUPS(mtd); |
362 | |
363 | static const struct device_type mtd_devtype = { |
364 | .name = "mtd" , |
365 | .groups = mtd_groups, |
366 | .release = mtd_release, |
367 | }; |
368 | |
369 | static bool mtd_expert_analysis_mode; |
370 | |
371 | #ifdef CONFIG_DEBUG_FS |
372 | bool mtd_check_expert_analysis_mode(void) |
373 | { |
374 | const char *mtd_expert_analysis_warning = |
375 | "Bad block checks have been entirely disabled.\n" |
376 | "This is only reserved for post-mortem forensics and debug purposes.\n" |
377 | "Never enable this mode if you do not know what you are doing!\n" ; |
378 | |
379 | return WARN_ONCE(mtd_expert_analysis_mode, mtd_expert_analysis_warning); |
380 | } |
381 | EXPORT_SYMBOL_GPL(mtd_check_expert_analysis_mode); |
382 | #endif |
383 | |
384 | static struct dentry *dfs_dir_mtd; |
385 | |
386 | static void mtd_debugfs_populate(struct mtd_info *mtd) |
387 | { |
388 | struct device *dev = &mtd->dev; |
389 | |
390 | if (IS_ERR_OR_NULL(ptr: dfs_dir_mtd)) |
391 | return; |
392 | |
393 | mtd->dbg.dfs_dir = debugfs_create_dir(name: dev_name(dev), parent: dfs_dir_mtd); |
394 | } |
395 | |
396 | #ifndef CONFIG_MMU |
397 | unsigned mtd_mmap_capabilities(struct mtd_info *mtd) |
398 | { |
399 | switch (mtd->type) { |
400 | case MTD_RAM: |
401 | return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC | |
402 | NOMMU_MAP_READ | NOMMU_MAP_WRITE; |
403 | case MTD_ROM: |
404 | return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC | |
405 | NOMMU_MAP_READ; |
406 | default: |
407 | return NOMMU_MAP_COPY; |
408 | } |
409 | } |
410 | EXPORT_SYMBOL_GPL(mtd_mmap_capabilities); |
411 | #endif |
412 | |
413 | static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state, |
414 | void *cmd) |
415 | { |
416 | struct mtd_info *mtd; |
417 | |
418 | mtd = container_of(n, struct mtd_info, reboot_notifier); |
419 | mtd->_reboot(mtd); |
420 | |
421 | return NOTIFY_DONE; |
422 | } |
423 | |
424 | /** |
425 | * mtd_wunit_to_pairing_info - get pairing information of a wunit |
426 | * @mtd: pointer to new MTD device info structure |
427 | * @wunit: write unit we are interested in |
428 | * @info: returned pairing information |
429 | * |
430 | * Retrieve pairing information associated to the wunit. |
431 | * This is mainly useful when dealing with MLC/TLC NANDs where pages can be |
432 | * paired together, and where programming a page may influence the page it is |
433 | * paired with. |
434 | * The notion of page is replaced by the term wunit (write-unit) to stay |
435 | * consistent with the ->writesize field. |
436 | * |
437 | * The @wunit argument can be extracted from an absolute offset using |
438 | * mtd_offset_to_wunit(). @info is filled with the pairing information attached |
439 | * to @wunit. |
440 | * |
441 | * From the pairing info the MTD user can find all the wunits paired with |
442 | * @wunit using the following loop: |
443 | * |
444 | * for (i = 0; i < mtd_pairing_groups(mtd); i++) { |
445 | * info.pair = i; |
446 | * mtd_pairing_info_to_wunit(mtd, &info); |
447 | * ... |
448 | * } |
449 | */ |
450 | int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit, |
451 | struct mtd_pairing_info *info) |
452 | { |
453 | struct mtd_info *master = mtd_get_master(mtd); |
454 | int npairs = mtd_wunit_per_eb(mtd: master) / mtd_pairing_groups(mtd: master); |
455 | |
456 | if (wunit < 0 || wunit >= npairs) |
457 | return -EINVAL; |
458 | |
459 | if (master->pairing && master->pairing->get_info) |
460 | return master->pairing->get_info(master, wunit, info); |
461 | |
462 | info->group = 0; |
463 | info->pair = wunit; |
464 | |
465 | return 0; |
466 | } |
467 | EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info); |
468 | |
469 | /** |
470 | * mtd_pairing_info_to_wunit - get wunit from pairing information |
471 | * @mtd: pointer to new MTD device info structure |
472 | * @info: pairing information struct |
473 | * |
474 | * Returns a positive number representing the wunit associated to the info |
475 | * struct, or a negative error code. |
476 | * |
477 | * This is the reverse of mtd_wunit_to_pairing_info(), and can help one to |
478 | * iterate over all wunits of a given pair (see mtd_wunit_to_pairing_info() |
479 | * doc). |
480 | * |
481 | * It can also be used to only program the first page of each pair (i.e. |
482 | * page attached to group 0), which allows one to use an MLC NAND in |
483 | * software-emulated SLC mode: |
484 | * |
485 | * info.group = 0; |
486 | * npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd); |
487 | * for (info.pair = 0; info.pair < npairs; info.pair++) { |
488 | * wunit = mtd_pairing_info_to_wunit(mtd, &info); |
489 | * mtd_write(mtd, mtd_wunit_to_offset(mtd, blkoffs, wunit), |
490 | * mtd->writesize, &retlen, buf + (i * mtd->writesize)); |
491 | * } |
492 | */ |
493 | int mtd_pairing_info_to_wunit(struct mtd_info *mtd, |
494 | const struct mtd_pairing_info *info) |
495 | { |
496 | struct mtd_info *master = mtd_get_master(mtd); |
497 | int ngroups = mtd_pairing_groups(mtd: master); |
498 | int npairs = mtd_wunit_per_eb(mtd: master) / ngroups; |
499 | |
500 | if (!info || info->pair < 0 || info->pair >= npairs || |
501 | info->group < 0 || info->group >= ngroups) |
502 | return -EINVAL; |
503 | |
504 | if (master->pairing && master->pairing->get_wunit) |
505 | return mtd->pairing->get_wunit(master, info); |
506 | |
507 | return info->pair; |
508 | } |
509 | EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit); |
510 | |
511 | /** |
512 | * mtd_pairing_groups - get the number of pairing groups |
513 | * @mtd: pointer to new MTD device info structure |
514 | * |
515 | * Returns the number of pairing groups. |
516 | * |
517 | * This number is usually equal to the number of bits exposed by a single |
518 | * cell, and can be used in conjunction with mtd_pairing_info_to_wunit() |
519 | * to iterate over all pages of a given pair. |
520 | */ |
521 | int mtd_pairing_groups(struct mtd_info *mtd) |
522 | { |
523 | struct mtd_info *master = mtd_get_master(mtd); |
524 | |
525 | if (!master->pairing || !master->pairing->ngroups) |
526 | return 1; |
527 | |
528 | return master->pairing->ngroups; |
529 | } |
530 | EXPORT_SYMBOL_GPL(mtd_pairing_groups); |
531 | |
532 | static int mtd_nvmem_reg_read(void *priv, unsigned int offset, |
533 | void *val, size_t bytes) |
534 | { |
535 | struct mtd_info *mtd = priv; |
536 | size_t retlen; |
537 | int err; |
538 | |
539 | err = mtd_read(mtd, from: offset, len: bytes, retlen: &retlen, buf: val); |
540 | if (err && err != -EUCLEAN) |
541 | return err; |
542 | |
543 | return retlen == bytes ? 0 : -EIO; |
544 | } |
545 | |
546 | static int mtd_nvmem_add(struct mtd_info *mtd) |
547 | { |
548 | struct device_node *node = mtd_get_of_node(mtd); |
549 | struct nvmem_config config = {}; |
550 | |
551 | config.id = NVMEM_DEVID_NONE; |
552 | config.dev = &mtd->dev; |
553 | config.name = dev_name(dev: &mtd->dev); |
554 | config.owner = THIS_MODULE; |
555 | config.add_legacy_fixed_of_cells = of_device_is_compatible(device: node, "nvmem-cells" ); |
556 | config.reg_read = mtd_nvmem_reg_read; |
557 | config.size = mtd->size; |
558 | config.word_size = 1; |
559 | config.stride = 1; |
560 | config.read_only = true; |
561 | config.root_only = true; |
562 | config.ignore_wp = true; |
563 | config.priv = mtd; |
564 | |
565 | mtd->nvmem = nvmem_register(cfg: &config); |
566 | if (IS_ERR(ptr: mtd->nvmem)) { |
567 | /* Just ignore if there is no NVMEM support in the kernel */ |
568 | if (PTR_ERR(ptr: mtd->nvmem) == -EOPNOTSUPP) |
569 | mtd->nvmem = NULL; |
570 | else |
571 | return dev_err_probe(dev: &mtd->dev, err: PTR_ERR(ptr: mtd->nvmem), |
572 | fmt: "Failed to register NVMEM device\n" ); |
573 | } |
574 | |
575 | return 0; |
576 | } |
577 | |
578 | static void mtd_check_of_node(struct mtd_info *mtd) |
579 | { |
580 | struct device_node *partitions, *parent_dn, *mtd_dn = NULL; |
581 | const char *pname, *prefix = "partition-" ; |
582 | int plen, mtd_name_len, offset, prefix_len; |
583 | |
584 | /* Check if MTD already has a device node */ |
585 | if (mtd_get_of_node(mtd)) |
586 | return; |
587 | |
588 | if (!mtd_is_partition(mtd)) |
589 | return; |
590 | |
591 | parent_dn = of_node_get(node: mtd_get_of_node(mtd: mtd->parent)); |
592 | if (!parent_dn) |
593 | return; |
594 | |
595 | if (mtd_is_partition(mtd: mtd->parent)) |
596 | partitions = of_node_get(node: parent_dn); |
597 | else |
598 | partitions = of_get_child_by_name(node: parent_dn, name: "partitions" ); |
599 | if (!partitions) |
600 | goto exit_parent; |
601 | |
602 | prefix_len = strlen(prefix); |
603 | mtd_name_len = strlen(mtd->name); |
604 | |
605 | /* Search if a partition is defined with the same name */ |
606 | for_each_child_of_node(partitions, mtd_dn) { |
607 | /* Skip partition with no/wrong prefix */ |
608 | if (!of_node_name_prefix(np: mtd_dn, prefix)) |
609 | continue; |
610 | |
611 | /* Label have priority. Check that first */ |
612 | if (!of_property_read_string(np: mtd_dn, propname: "label" , out_string: &pname)) { |
613 | offset = 0; |
614 | } else { |
615 | pname = mtd_dn->name; |
616 | offset = prefix_len; |
617 | } |
618 | |
619 | plen = strlen(pname) - offset; |
620 | if (plen == mtd_name_len && |
621 | !strncmp(mtd->name, pname + offset, plen)) { |
622 | mtd_set_of_node(mtd, np: mtd_dn); |
623 | break; |
624 | } |
625 | } |
626 | |
627 | of_node_put(node: partitions); |
628 | exit_parent: |
629 | of_node_put(node: parent_dn); |
630 | } |
631 | |
632 | /** |
633 | * add_mtd_device - register an MTD device |
634 | * @mtd: pointer to new MTD device info structure |
635 | * |
636 | * Add a device to the list of MTD devices present in the system, and |
637 | * notify each currently active MTD 'user' of its arrival. Returns |
638 | * zero on success or non-zero on failure. |
639 | */ |
640 | |
641 | int add_mtd_device(struct mtd_info *mtd) |
642 | { |
643 | struct device_node *np = mtd_get_of_node(mtd); |
644 | struct mtd_info *master = mtd_get_master(mtd); |
645 | struct mtd_notifier *not; |
646 | int i, error, ofidx; |
647 | |
648 | /* |
649 | * May occur, for instance, on buggy drivers which call |
650 | * mtd_device_parse_register() multiple times on the same master MTD, |
651 | * especially with CONFIG_MTD_PARTITIONED_MASTER=y. |
652 | */ |
653 | if (WARN_ONCE(mtd->dev.type, "MTD already registered\n" )) |
654 | return -EEXIST; |
655 | |
656 | BUG_ON(mtd->writesize == 0); |
657 | |
658 | /* |
659 | * MTD drivers should implement ->_{write,read}() or |
660 | * ->_{write,read}_oob(), but not both. |
661 | */ |
662 | if (WARN_ON((mtd->_write && mtd->_write_oob) || |
663 | (mtd->_read && mtd->_read_oob))) |
664 | return -EINVAL; |
665 | |
666 | if (WARN_ON((!mtd->erasesize || !master->_erase) && |
667 | !(mtd->flags & MTD_NO_ERASE))) |
668 | return -EINVAL; |
669 | |
670 | /* |
671 | * MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the |
672 | * master is an MLC NAND and has a proper pairing scheme defined. |
673 | * We also reject masters that implement ->_writev() for now, because |
674 | * NAND controller drivers don't implement this hook, and adding the |
675 | * SLC -> MLC address/length conversion to this path is useless if we |
676 | * don't have a user. |
677 | */ |
678 | if (mtd->flags & MTD_SLC_ON_MLC_EMULATION && |
679 | (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH || |
680 | !master->pairing || master->_writev)) |
681 | return -EINVAL; |
682 | |
683 | mutex_lock(&mtd_table_mutex); |
684 | |
685 | ofidx = -1; |
686 | if (np) |
687 | ofidx = of_alias_get_id(np, stem: "mtd" ); |
688 | if (ofidx >= 0) |
689 | i = idr_alloc(&mtd_idr, ptr: mtd, start: ofidx, end: ofidx + 1, GFP_KERNEL); |
690 | else |
691 | i = idr_alloc(&mtd_idr, ptr: mtd, start: 0, end: 0, GFP_KERNEL); |
692 | if (i < 0) { |
693 | error = i; |
694 | goto fail_locked; |
695 | } |
696 | |
697 | mtd->index = i; |
698 | kref_init(kref: &mtd->refcnt); |
699 | |
700 | /* default value if not set by driver */ |
701 | if (mtd->bitflip_threshold == 0) |
702 | mtd->bitflip_threshold = mtd->ecc_strength; |
703 | |
704 | if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { |
705 | int ngroups = mtd_pairing_groups(master); |
706 | |
707 | mtd->erasesize /= ngroups; |
708 | mtd->size = (u64)mtd_div_by_eb(sz: mtd->size, mtd: master) * |
709 | mtd->erasesize; |
710 | } |
711 | |
712 | if (is_power_of_2(n: mtd->erasesize)) |
713 | mtd->erasesize_shift = ffs(mtd->erasesize) - 1; |
714 | else |
715 | mtd->erasesize_shift = 0; |
716 | |
717 | if (is_power_of_2(n: mtd->writesize)) |
718 | mtd->writesize_shift = ffs(mtd->writesize) - 1; |
719 | else |
720 | mtd->writesize_shift = 0; |
721 | |
722 | mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1; |
723 | mtd->writesize_mask = (1 << mtd->writesize_shift) - 1; |
724 | |
725 | /* Some chips always power up locked. Unlock them now */ |
726 | if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) { |
727 | error = mtd_unlock(mtd, ofs: 0, len: mtd->size); |
728 | if (error && error != -EOPNOTSUPP) |
729 | printk(KERN_WARNING |
730 | "%s: unlock failed, writes may not work\n" , |
731 | mtd->name); |
732 | /* Ignore unlock failures? */ |
733 | error = 0; |
734 | } |
735 | |
736 | /* Caller should have set dev.parent to match the |
737 | * physical device, if appropriate. |
738 | */ |
739 | mtd->dev.type = &mtd_devtype; |
740 | mtd->dev.class = &mtd_class; |
741 | mtd->dev.devt = MTD_DEVT(i); |
742 | dev_set_name(dev: &mtd->dev, name: "mtd%d" , i); |
743 | dev_set_drvdata(dev: &mtd->dev, data: mtd); |
744 | mtd_check_of_node(mtd); |
745 | of_node_get(node: mtd_get_of_node(mtd)); |
746 | error = device_register(dev: &mtd->dev); |
747 | if (error) { |
748 | put_device(dev: &mtd->dev); |
749 | goto fail_added; |
750 | } |
751 | |
752 | /* Add the nvmem provider */ |
753 | error = mtd_nvmem_add(mtd); |
754 | if (error) |
755 | goto fail_nvmem_add; |
756 | |
757 | mtd_debugfs_populate(mtd); |
758 | |
759 | device_create(cls: &mtd_class, parent: mtd->dev.parent, MTD_DEVT(i) + 1, NULL, |
760 | fmt: "mtd%dro" , i); |
761 | |
762 | pr_debug("mtd: Giving out device %d to %s\n" , i, mtd->name); |
763 | /* No need to get a refcount on the module containing |
764 | the notifier, since we hold the mtd_table_mutex */ |
765 | list_for_each_entry(not, &mtd_notifiers, list) |
766 | not->add(mtd); |
767 | |
768 | mutex_unlock(lock: &mtd_table_mutex); |
769 | |
770 | if (of_property_read_bool(np: mtd_get_of_node(mtd), propname: "linux,rootfs" )) { |
771 | if (IS_BUILTIN(CONFIG_MTD)) { |
772 | pr_info("mtd: setting mtd%d (%s) as root device\n" , mtd->index, mtd->name); |
773 | ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index); |
774 | } else { |
775 | pr_warn("mtd: can't set mtd%d (%s) as root device - mtd must be builtin\n" , |
776 | mtd->index, mtd->name); |
777 | } |
778 | } |
779 | |
780 | /* We _know_ we aren't being removed, because |
781 | our caller is still holding us here. So none |
782 | of this try_ nonsense, and no bitching about it |
783 | either. :) */ |
784 | __module_get(THIS_MODULE); |
785 | return 0; |
786 | |
787 | fail_nvmem_add: |
788 | device_unregister(dev: &mtd->dev); |
789 | fail_added: |
790 | of_node_put(node: mtd_get_of_node(mtd)); |
791 | idr_remove(&mtd_idr, id: i); |
792 | fail_locked: |
793 | mutex_unlock(lock: &mtd_table_mutex); |
794 | return error; |
795 | } |
796 | |
797 | /** |
798 | * del_mtd_device - unregister an MTD device |
799 | * @mtd: pointer to MTD device info structure |
800 | * |
801 | * Remove a device from the list of MTD devices present in the system, |
802 | * and notify each currently active MTD 'user' of its departure. |
803 | * Returns zero on success or 1 on failure, which currently will happen |
804 | * if the requested device does not appear to be present in the list. |
805 | */ |
806 | |
807 | int del_mtd_device(struct mtd_info *mtd) |
808 | { |
809 | int ret; |
810 | struct mtd_notifier *not; |
811 | |
812 | mutex_lock(&mtd_table_mutex); |
813 | |
814 | if (idr_find(&mtd_idr, id: mtd->index) != mtd) { |
815 | ret = -ENODEV; |
816 | goto out_error; |
817 | } |
818 | |
819 | /* No need to get a refcount on the module containing |
820 | the notifier, since we hold the mtd_table_mutex */ |
821 | list_for_each_entry(not, &mtd_notifiers, list) |
822 | not->remove(mtd); |
823 | |
824 | kref_put(kref: &mtd->refcnt, release: mtd_device_release); |
825 | ret = 0; |
826 | |
827 | out_error: |
828 | mutex_unlock(lock: &mtd_table_mutex); |
829 | return ret; |
830 | } |
831 | |
832 | /* |
833 | * Set a few defaults based on the parent devices, if not provided by the |
834 | * driver |
835 | */ |
836 | static void mtd_set_dev_defaults(struct mtd_info *mtd) |
837 | { |
838 | if (mtd->dev.parent) { |
839 | if (!mtd->owner && mtd->dev.parent->driver) |
840 | mtd->owner = mtd->dev.parent->driver->owner; |
841 | if (!mtd->name) |
842 | mtd->name = dev_name(dev: mtd->dev.parent); |
843 | } else { |
844 | pr_debug("mtd device won't show a device symlink in sysfs\n" ); |
845 | } |
846 | |
847 | INIT_LIST_HEAD(list: &mtd->partitions); |
848 | mutex_init(&mtd->master.partitions_lock); |
849 | mutex_init(&mtd->master.chrdev_lock); |
850 | } |
851 | |
852 | static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user) |
853 | { |
854 | struct otp_info *info; |
855 | ssize_t size = 0; |
856 | unsigned int i; |
857 | size_t retlen; |
858 | int ret; |
859 | |
860 | info = kmalloc(PAGE_SIZE, GFP_KERNEL); |
861 | if (!info) |
862 | return -ENOMEM; |
863 | |
864 | if (is_user) |
865 | ret = mtd_get_user_prot_info(mtd, PAGE_SIZE, retlen: &retlen, buf: info); |
866 | else |
867 | ret = mtd_get_fact_prot_info(mtd, PAGE_SIZE, retlen: &retlen, buf: info); |
868 | if (ret) |
869 | goto err; |
870 | |
871 | for (i = 0; i < retlen / sizeof(*info); i++) |
872 | size += info[i].length; |
873 | |
874 | kfree(objp: info); |
875 | return size; |
876 | |
877 | err: |
878 | kfree(objp: info); |
879 | |
880 | /* ENODATA means there is no OTP region. */ |
881 | return ret == -ENODATA ? 0 : ret; |
882 | } |
883 | |
884 | static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd, |
885 | const char *compatible, |
886 | int size, |
887 | nvmem_reg_read_t reg_read) |
888 | { |
889 | struct nvmem_device *nvmem = NULL; |
890 | struct nvmem_config config = {}; |
891 | struct device_node *np; |
892 | |
893 | /* DT binding is optional */ |
894 | np = of_get_compatible_child(parent: mtd->dev.of_node, compatible); |
895 | |
896 | /* OTP nvmem will be registered on the physical device */ |
897 | config.dev = mtd->dev.parent; |
898 | config.name = compatible; |
899 | config.id = NVMEM_DEVID_AUTO; |
900 | config.owner = THIS_MODULE; |
901 | config.add_legacy_fixed_of_cells = true; |
902 | config.type = NVMEM_TYPE_OTP; |
903 | config.root_only = true; |
904 | config.ignore_wp = true; |
905 | config.reg_read = reg_read; |
906 | config.size = size; |
907 | config.of_node = np; |
908 | config.priv = mtd; |
909 | |
910 | nvmem = nvmem_register(cfg: &config); |
911 | /* Just ignore if there is no NVMEM support in the kernel */ |
912 | if (IS_ERR(ptr: nvmem) && PTR_ERR(ptr: nvmem) == -EOPNOTSUPP) |
913 | nvmem = NULL; |
914 | |
915 | of_node_put(node: np); |
916 | |
917 | return nvmem; |
918 | } |
919 | |
920 | static int mtd_nvmem_user_otp_reg_read(void *priv, unsigned int offset, |
921 | void *val, size_t bytes) |
922 | { |
923 | struct mtd_info *mtd = priv; |
924 | size_t retlen; |
925 | int ret; |
926 | |
927 | ret = mtd_read_user_prot_reg(mtd, from: offset, len: bytes, retlen: &retlen, buf: val); |
928 | if (ret) |
929 | return ret; |
930 | |
931 | return retlen == bytes ? 0 : -EIO; |
932 | } |
933 | |
934 | static int mtd_nvmem_fact_otp_reg_read(void *priv, unsigned int offset, |
935 | void *val, size_t bytes) |
936 | { |
937 | struct mtd_info *mtd = priv; |
938 | size_t retlen; |
939 | int ret; |
940 | |
941 | ret = mtd_read_fact_prot_reg(mtd, from: offset, len: bytes, retlen: &retlen, buf: val); |
942 | if (ret) |
943 | return ret; |
944 | |
945 | return retlen == bytes ? 0 : -EIO; |
946 | } |
947 | |
948 | static int mtd_otp_nvmem_add(struct mtd_info *mtd) |
949 | { |
950 | struct device *dev = mtd->dev.parent; |
951 | struct nvmem_device *nvmem; |
952 | ssize_t size; |
953 | int err; |
954 | |
955 | if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) { |
956 | size = mtd_otp_size(mtd, is_user: true); |
957 | if (size < 0) |
958 | return size; |
959 | |
960 | if (size > 0) { |
961 | nvmem = mtd_otp_nvmem_register(mtd, compatible: "user-otp" , size, |
962 | reg_read: mtd_nvmem_user_otp_reg_read); |
963 | if (IS_ERR(ptr: nvmem)) { |
964 | err = PTR_ERR(ptr: nvmem); |
965 | goto err; |
966 | } |
967 | mtd->otp_user_nvmem = nvmem; |
968 | } |
969 | } |
970 | |
971 | if (mtd->_get_fact_prot_info && mtd->_read_fact_prot_reg) { |
972 | size = mtd_otp_size(mtd, is_user: false); |
973 | if (size < 0) { |
974 | err = size; |
975 | goto err; |
976 | } |
977 | |
978 | if (size > 0) { |
979 | /* |
980 | * The factory OTP contains thing such as a unique serial |
981 | * number and is small, so let's read it out and put it |
982 | * into the entropy pool. |
983 | */ |
984 | void *otp; |
985 | |
986 | otp = kmalloc(size, GFP_KERNEL); |
987 | if (!otp) { |
988 | err = -ENOMEM; |
989 | goto err; |
990 | } |
991 | err = mtd_nvmem_fact_otp_reg_read(priv: mtd, offset: 0, val: otp, bytes: size); |
992 | if (err < 0) { |
993 | kfree(objp: otp); |
994 | goto err; |
995 | } |
996 | add_device_randomness(buf: otp, len: err); |
997 | kfree(objp: otp); |
998 | |
999 | nvmem = mtd_otp_nvmem_register(mtd, compatible: "factory-otp" , size, |
1000 | reg_read: mtd_nvmem_fact_otp_reg_read); |
1001 | if (IS_ERR(ptr: nvmem)) { |
1002 | err = PTR_ERR(ptr: nvmem); |
1003 | goto err; |
1004 | } |
1005 | mtd->otp_factory_nvmem = nvmem; |
1006 | } |
1007 | } |
1008 | |
1009 | return 0; |
1010 | |
1011 | err: |
1012 | nvmem_unregister(nvmem: mtd->otp_user_nvmem); |
1013 | return dev_err_probe(dev, err, fmt: "Failed to register OTP NVMEM device\n" ); |
1014 | } |
1015 | |
1016 | /** |
1017 | * mtd_device_parse_register - parse partitions and register an MTD device. |
1018 | * |
1019 | * @mtd: the MTD device to register |
1020 | * @types: the list of MTD partition probes to try, see |
1021 | * 'parse_mtd_partitions()' for more information |
1022 | * @parser_data: MTD partition parser-specific data |
1023 | * @parts: fallback partition information to register, if parsing fails; |
1024 | * only valid if %nr_parts > %0 |
1025 | * @nr_parts: the number of partitions in parts, if zero then the full |
1026 | * MTD device is registered if no partition info is found |
1027 | * |
1028 | * This function aggregates MTD partitions parsing (done by |
1029 | * 'parse_mtd_partitions()') and MTD device and partitions registering. It |
1030 | * basically follows the most common pattern found in many MTD drivers: |
1031 | * |
1032 | * * If the MTD_PARTITIONED_MASTER option is set, then the device as a whole is |
1033 | * registered first. |
1034 | * * Then It tries to probe partitions on MTD device @mtd using parsers |
1035 | * specified in @types (if @types is %NULL, then the default list of parsers |
1036 | * is used, see 'parse_mtd_partitions()' for more information). If none are |
1037 | * found this functions tries to fallback to information specified in |
1038 | * @parts/@nr_parts. |
1039 | * * If no partitions were found this function just registers the MTD device |
1040 | * @mtd and exits. |
1041 | * |
1042 | * Returns zero in case of success and a negative error code in case of failure. |
1043 | */ |
1044 | int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types, |
1045 | struct mtd_part_parser_data *parser_data, |
1046 | const struct mtd_partition *parts, |
1047 | int nr_parts) |
1048 | { |
1049 | int ret; |
1050 | |
1051 | mtd_set_dev_defaults(mtd); |
1052 | |
1053 | ret = mtd_otp_nvmem_add(mtd); |
1054 | if (ret) |
1055 | goto out; |
1056 | |
1057 | if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) { |
1058 | ret = add_mtd_device(mtd); |
1059 | if (ret) |
1060 | goto out; |
1061 | } |
1062 | |
1063 | /* Prefer parsed partitions over driver-provided fallback */ |
1064 | ret = parse_mtd_partitions(master: mtd, types, data: parser_data); |
1065 | if (ret == -EPROBE_DEFER) |
1066 | goto out; |
1067 | |
1068 | if (ret > 0) |
1069 | ret = 0; |
1070 | else if (nr_parts) |
1071 | ret = add_mtd_partitions(mtd, parts, nr_parts); |
1072 | else if (!device_is_registered(dev: &mtd->dev)) |
1073 | ret = add_mtd_device(mtd); |
1074 | else |
1075 | ret = 0; |
1076 | |
1077 | if (ret) |
1078 | goto out; |
1079 | |
1080 | /* |
1081 | * FIXME: some drivers unfortunately call this function more than once. |
1082 | * So we have to check if we've already assigned the reboot notifier. |
1083 | * |
1084 | * Generally, we can make multiple calls work for most cases, but it |
1085 | * does cause problems with parse_mtd_partitions() above (e.g., |
1086 | * cmdlineparts will register partitions more than once). |
1087 | */ |
1088 | WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call, |
1089 | "MTD already registered\n" ); |
1090 | if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) { |
1091 | mtd->reboot_notifier.notifier_call = mtd_reboot_notifier; |
1092 | register_reboot_notifier(&mtd->reboot_notifier); |
1093 | } |
1094 | |
1095 | out: |
1096 | if (ret) { |
1097 | nvmem_unregister(nvmem: mtd->otp_user_nvmem); |
1098 | nvmem_unregister(nvmem: mtd->otp_factory_nvmem); |
1099 | } |
1100 | |
1101 | if (ret && device_is_registered(dev: &mtd->dev)) |
1102 | del_mtd_device(mtd); |
1103 | |
1104 | return ret; |
1105 | } |
1106 | EXPORT_SYMBOL_GPL(mtd_device_parse_register); |
1107 | |
1108 | /** |
1109 | * mtd_device_unregister - unregister an existing MTD device. |
1110 | * |
1111 | * @master: the MTD device to unregister. This will unregister both the master |
1112 | * and any partitions if registered. |
1113 | */ |
1114 | int mtd_device_unregister(struct mtd_info *master) |
1115 | { |
1116 | int err; |
1117 | |
1118 | if (master->_reboot) { |
1119 | unregister_reboot_notifier(&master->reboot_notifier); |
1120 | memset(&master->reboot_notifier, 0, sizeof(master->reboot_notifier)); |
1121 | } |
1122 | |
1123 | nvmem_unregister(nvmem: master->otp_user_nvmem); |
1124 | nvmem_unregister(nvmem: master->otp_factory_nvmem); |
1125 | |
1126 | err = del_mtd_partitions(master); |
1127 | if (err) |
1128 | return err; |
1129 | |
1130 | if (!device_is_registered(dev: &master->dev)) |
1131 | return 0; |
1132 | |
1133 | return del_mtd_device(mtd: master); |
1134 | } |
1135 | EXPORT_SYMBOL_GPL(mtd_device_unregister); |
1136 | |
1137 | /** |
1138 | * register_mtd_user - register a 'user' of MTD devices. |
1139 | * @new: pointer to notifier info structure |
1140 | * |
1141 | * Registers a pair of callbacks function to be called upon addition |
1142 | * or removal of MTD devices. Causes the 'add' callback to be immediately |
1143 | * invoked for each MTD device currently present in the system. |
1144 | */ |
1145 | void register_mtd_user (struct mtd_notifier *new) |
1146 | { |
1147 | struct mtd_info *mtd; |
1148 | |
1149 | mutex_lock(&mtd_table_mutex); |
1150 | |
1151 | list_add(new: &new->list, head: &mtd_notifiers); |
1152 | |
1153 | __module_get(THIS_MODULE); |
1154 | |
1155 | mtd_for_each_device(mtd) |
1156 | new->add(mtd); |
1157 | |
1158 | mutex_unlock(lock: &mtd_table_mutex); |
1159 | } |
1160 | EXPORT_SYMBOL_GPL(register_mtd_user); |
1161 | |
1162 | /** |
1163 | * unregister_mtd_user - unregister a 'user' of MTD devices. |
1164 | * @old: pointer to notifier info structure |
1165 | * |
1166 | * Removes a callback function pair from the list of 'users' to be |
1167 | * notified upon addition or removal of MTD devices. Causes the |
1168 | * 'remove' callback to be immediately invoked for each MTD device |
1169 | * currently present in the system. |
1170 | */ |
1171 | int unregister_mtd_user (struct mtd_notifier *old) |
1172 | { |
1173 | struct mtd_info *mtd; |
1174 | |
1175 | mutex_lock(&mtd_table_mutex); |
1176 | |
1177 | module_put(THIS_MODULE); |
1178 | |
1179 | mtd_for_each_device(mtd) |
1180 | old->remove(mtd); |
1181 | |
1182 | list_del(entry: &old->list); |
1183 | mutex_unlock(lock: &mtd_table_mutex); |
1184 | return 0; |
1185 | } |
1186 | EXPORT_SYMBOL_GPL(unregister_mtd_user); |
1187 | |
1188 | /** |
1189 | * get_mtd_device - obtain a validated handle for an MTD device |
1190 | * @mtd: last known address of the required MTD device |
1191 | * @num: internal device number of the required MTD device |
1192 | * |
1193 | * Given a number and NULL address, return the num'th entry in the device |
1194 | * table, if any. Given an address and num == -1, search the device table |
1195 | * for a device with that address and return if it's still present. Given |
1196 | * both, return the num'th driver only if its address matches. Return |
1197 | * error code if not. |
1198 | */ |
1199 | struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num) |
1200 | { |
1201 | struct mtd_info *ret = NULL, *other; |
1202 | int err = -ENODEV; |
1203 | |
1204 | mutex_lock(&mtd_table_mutex); |
1205 | |
1206 | if (num == -1) { |
1207 | mtd_for_each_device(other) { |
1208 | if (other == mtd) { |
1209 | ret = mtd; |
1210 | break; |
1211 | } |
1212 | } |
1213 | } else if (num >= 0) { |
1214 | ret = idr_find(&mtd_idr, id: num); |
1215 | if (mtd && mtd != ret) |
1216 | ret = NULL; |
1217 | } |
1218 | |
1219 | if (!ret) { |
1220 | ret = ERR_PTR(error: err); |
1221 | goto out; |
1222 | } |
1223 | |
1224 | err = __get_mtd_device(mtd: ret); |
1225 | if (err) |
1226 | ret = ERR_PTR(error: err); |
1227 | out: |
1228 | mutex_unlock(lock: &mtd_table_mutex); |
1229 | return ret; |
1230 | } |
1231 | EXPORT_SYMBOL_GPL(get_mtd_device); |
1232 | |
1233 | |
1234 | int __get_mtd_device(struct mtd_info *mtd) |
1235 | { |
1236 | struct mtd_info *master = mtd_get_master(mtd); |
1237 | int err; |
1238 | |
1239 | if (master->_get_device) { |
1240 | err = master->_get_device(mtd); |
1241 | if (err) |
1242 | return err; |
1243 | } |
1244 | |
1245 | if (!try_module_get(module: master->owner)) { |
1246 | if (master->_put_device) |
1247 | master->_put_device(master); |
1248 | return -ENODEV; |
1249 | } |
1250 | |
1251 | while (mtd) { |
1252 | if (mtd != master) |
1253 | kref_get(kref: &mtd->refcnt); |
1254 | mtd = mtd->parent; |
1255 | } |
1256 | |
1257 | if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) |
1258 | kref_get(kref: &master->refcnt); |
1259 | |
1260 | return 0; |
1261 | } |
1262 | EXPORT_SYMBOL_GPL(__get_mtd_device); |
1263 | |
1264 | /** |
1265 | * of_get_mtd_device_by_node - obtain an MTD device associated with a given node |
1266 | * |
1267 | * @np: device tree node |
1268 | */ |
1269 | struct mtd_info *of_get_mtd_device_by_node(struct device_node *np) |
1270 | { |
1271 | struct mtd_info *mtd = NULL; |
1272 | struct mtd_info *tmp; |
1273 | int err; |
1274 | |
1275 | mutex_lock(&mtd_table_mutex); |
1276 | |
1277 | err = -EPROBE_DEFER; |
1278 | mtd_for_each_device(tmp) { |
1279 | if (mtd_get_of_node(mtd: tmp) == np) { |
1280 | mtd = tmp; |
1281 | err = __get_mtd_device(mtd); |
1282 | break; |
1283 | } |
1284 | } |
1285 | |
1286 | mutex_unlock(lock: &mtd_table_mutex); |
1287 | |
1288 | return err ? ERR_PTR(error: err) : mtd; |
1289 | } |
1290 | EXPORT_SYMBOL_GPL(of_get_mtd_device_by_node); |
1291 | |
1292 | /** |
1293 | * get_mtd_device_nm - obtain a validated handle for an MTD device by |
1294 | * device name |
1295 | * @name: MTD device name to open |
1296 | * |
1297 | * This function returns MTD device description structure in case of |
1298 | * success and an error code in case of failure. |
1299 | */ |
1300 | struct mtd_info *get_mtd_device_nm(const char *name) |
1301 | { |
1302 | int err = -ENODEV; |
1303 | struct mtd_info *mtd = NULL, *other; |
1304 | |
1305 | mutex_lock(&mtd_table_mutex); |
1306 | |
1307 | mtd_for_each_device(other) { |
1308 | if (!strcmp(name, other->name)) { |
1309 | mtd = other; |
1310 | break; |
1311 | } |
1312 | } |
1313 | |
1314 | if (!mtd) |
1315 | goto out_unlock; |
1316 | |
1317 | err = __get_mtd_device(mtd); |
1318 | if (err) |
1319 | goto out_unlock; |
1320 | |
1321 | mutex_unlock(lock: &mtd_table_mutex); |
1322 | return mtd; |
1323 | |
1324 | out_unlock: |
1325 | mutex_unlock(lock: &mtd_table_mutex); |
1326 | return ERR_PTR(error: err); |
1327 | } |
1328 | EXPORT_SYMBOL_GPL(get_mtd_device_nm); |
1329 | |
1330 | void put_mtd_device(struct mtd_info *mtd) |
1331 | { |
1332 | mutex_lock(&mtd_table_mutex); |
1333 | __put_mtd_device(mtd); |
1334 | mutex_unlock(lock: &mtd_table_mutex); |
1335 | |
1336 | } |
1337 | EXPORT_SYMBOL_GPL(put_mtd_device); |
1338 | |
1339 | void __put_mtd_device(struct mtd_info *mtd) |
1340 | { |
1341 | struct mtd_info *master = mtd_get_master(mtd); |
1342 | |
1343 | while (mtd) { |
1344 | /* kref_put() can relese mtd, so keep a reference mtd->parent */ |
1345 | struct mtd_info *parent = mtd->parent; |
1346 | |
1347 | if (mtd != master) |
1348 | kref_put(kref: &mtd->refcnt, release: mtd_device_release); |
1349 | mtd = parent; |
1350 | } |
1351 | |
1352 | if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) |
1353 | kref_put(kref: &master->refcnt, release: mtd_device_release); |
1354 | |
1355 | module_put(module: master->owner); |
1356 | |
1357 | /* must be the last as master can be freed in the _put_device */ |
1358 | if (master->_put_device) |
1359 | master->_put_device(master); |
1360 | } |
1361 | EXPORT_SYMBOL_GPL(__put_mtd_device); |
1362 | |
1363 | /* |
1364 | * Erase is an synchronous operation. Device drivers are epected to return a |
1365 | * negative error code if the operation failed and update instr->fail_addr |
1366 | * to point the portion that was not properly erased. |
1367 | */ |
1368 | int mtd_erase(struct mtd_info *mtd, struct erase_info *instr) |
1369 | { |
1370 | struct mtd_info *master = mtd_get_master(mtd); |
1371 | u64 mst_ofs = mtd_get_master_ofs(mtd, ofs: 0); |
1372 | struct erase_info adjinstr; |
1373 | int ret; |
1374 | |
1375 | instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; |
1376 | adjinstr = *instr; |
1377 | |
1378 | if (!mtd->erasesize || !master->_erase) |
1379 | return -ENOTSUPP; |
1380 | |
1381 | if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr) |
1382 | return -EINVAL; |
1383 | if (!(mtd->flags & MTD_WRITEABLE)) |
1384 | return -EROFS; |
1385 | |
1386 | if (!instr->len) |
1387 | return 0; |
1388 | |
1389 | ledtrig_mtd_activity(); |
1390 | |
1391 | if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { |
1392 | adjinstr.addr = (loff_t)mtd_div_by_eb(sz: instr->addr, mtd) * |
1393 | master->erasesize; |
1394 | adjinstr.len = ((u64)mtd_div_by_eb(sz: instr->addr + instr->len, mtd) * |
1395 | master->erasesize) - |
1396 | adjinstr.addr; |
1397 | } |
1398 | |
1399 | adjinstr.addr += mst_ofs; |
1400 | |
1401 | ret = master->_erase(master, &adjinstr); |
1402 | |
1403 | if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) { |
1404 | instr->fail_addr = adjinstr.fail_addr - mst_ofs; |
1405 | if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { |
1406 | instr->fail_addr = mtd_div_by_eb(sz: instr->fail_addr, |
1407 | mtd: master); |
1408 | instr->fail_addr *= mtd->erasesize; |
1409 | } |
1410 | } |
1411 | |
1412 | return ret; |
1413 | } |
1414 | EXPORT_SYMBOL_GPL(mtd_erase); |
1415 | |
1416 | /* |
1417 | * This stuff for eXecute-In-Place. phys is optional and may be set to NULL. |
1418 | */ |
1419 | int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, |
1420 | void **virt, resource_size_t *phys) |
1421 | { |
1422 | struct mtd_info *master = mtd_get_master(mtd); |
1423 | |
1424 | *retlen = 0; |
1425 | *virt = NULL; |
1426 | if (phys) |
1427 | *phys = 0; |
1428 | if (!master->_point) |
1429 | return -EOPNOTSUPP; |
1430 | if (from < 0 || from >= mtd->size || len > mtd->size - from) |
1431 | return -EINVAL; |
1432 | if (!len) |
1433 | return 0; |
1434 | |
1435 | from = mtd_get_master_ofs(mtd, ofs: from); |
1436 | return master->_point(master, from, len, retlen, virt, phys); |
1437 | } |
1438 | EXPORT_SYMBOL_GPL(mtd_point); |
1439 | |
1440 | /* We probably shouldn't allow XIP if the unpoint isn't a NULL */ |
1441 | int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len) |
1442 | { |
1443 | struct mtd_info *master = mtd_get_master(mtd); |
1444 | |
1445 | if (!master->_unpoint) |
1446 | return -EOPNOTSUPP; |
1447 | if (from < 0 || from >= mtd->size || len > mtd->size - from) |
1448 | return -EINVAL; |
1449 | if (!len) |
1450 | return 0; |
1451 | return master->_unpoint(master, mtd_get_master_ofs(mtd, ofs: from), len); |
1452 | } |
1453 | EXPORT_SYMBOL_GPL(mtd_unpoint); |
1454 | |
1455 | /* |
1456 | * Allow NOMMU mmap() to directly map the device (if not NULL) |
1457 | * - return the address to which the offset maps |
1458 | * - return -ENOSYS to indicate refusal to do the mapping |
1459 | */ |
1460 | unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len, |
1461 | unsigned long offset, unsigned long flags) |
1462 | { |
1463 | size_t retlen; |
1464 | void *virt; |
1465 | int ret; |
1466 | |
1467 | ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL); |
1468 | if (ret) |
1469 | return ret; |
1470 | if (retlen != len) { |
1471 | mtd_unpoint(mtd, offset, retlen); |
1472 | return -ENOSYS; |
1473 | } |
1474 | return (unsigned long)virt; |
1475 | } |
1476 | EXPORT_SYMBOL_GPL(mtd_get_unmapped_area); |
1477 | |
1478 | static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master, |
1479 | const struct mtd_ecc_stats *old_stats) |
1480 | { |
1481 | struct mtd_ecc_stats diff; |
1482 | |
1483 | if (master == mtd) |
1484 | return; |
1485 | |
1486 | diff = master->ecc_stats; |
1487 | diff.failed -= old_stats->failed; |
1488 | diff.corrected -= old_stats->corrected; |
1489 | |
1490 | while (mtd->parent) { |
1491 | mtd->ecc_stats.failed += diff.failed; |
1492 | mtd->ecc_stats.corrected += diff.corrected; |
1493 | mtd = mtd->parent; |
1494 | } |
1495 | } |
1496 | |
1497 | int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, |
1498 | u_char *buf) |
1499 | { |
1500 | struct mtd_oob_ops ops = { |
1501 | .len = len, |
1502 | .datbuf = buf, |
1503 | }; |
1504 | int ret; |
1505 | |
1506 | ret = mtd_read_oob(mtd, from, ops: &ops); |
1507 | *retlen = ops.retlen; |
1508 | |
1509 | WARN_ON_ONCE(*retlen != len && mtd_is_bitflip_or_eccerr(ret)); |
1510 | |
1511 | return ret; |
1512 | } |
1513 | EXPORT_SYMBOL_GPL(mtd_read); |
1514 | |
1515 | int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, |
1516 | const u_char *buf) |
1517 | { |
1518 | struct mtd_oob_ops ops = { |
1519 | .len = len, |
1520 | .datbuf = (u8 *)buf, |
1521 | }; |
1522 | int ret; |
1523 | |
1524 | ret = mtd_write_oob(mtd, to, ops: &ops); |
1525 | *retlen = ops.retlen; |
1526 | |
1527 | return ret; |
1528 | } |
1529 | EXPORT_SYMBOL_GPL(mtd_write); |
1530 | |
1531 | /* |
1532 | * In blackbox flight recorder like scenarios we want to make successful writes |
1533 | * in interrupt context. panic_write() is only intended to be called when its |
1534 | * known the kernel is about to panic and we need the write to succeed. Since |
1535 | * the kernel is not going to be running for much longer, this function can |
1536 | * break locks and delay to ensure the write succeeds (but not sleep). |
1537 | */ |
1538 | int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, |
1539 | const u_char *buf) |
1540 | { |
1541 | struct mtd_info *master = mtd_get_master(mtd); |
1542 | |
1543 | *retlen = 0; |
1544 | if (!master->_panic_write) |
1545 | return -EOPNOTSUPP; |
1546 | if (to < 0 || to >= mtd->size || len > mtd->size - to) |
1547 | return -EINVAL; |
1548 | if (!(mtd->flags & MTD_WRITEABLE)) |
1549 | return -EROFS; |
1550 | if (!len) |
1551 | return 0; |
1552 | if (!master->oops_panic_write) |
1553 | master->oops_panic_write = true; |
1554 | |
1555 | return master->_panic_write(master, mtd_get_master_ofs(mtd, ofs: to), len, |
1556 | retlen, buf); |
1557 | } |
1558 | EXPORT_SYMBOL_GPL(mtd_panic_write); |
1559 | |
1560 | static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs, |
1561 | struct mtd_oob_ops *ops) |
1562 | { |
1563 | /* |
1564 | * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving |
1565 | * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in |
1566 | * this case. |
1567 | */ |
1568 | if (!ops->datbuf) |
1569 | ops->len = 0; |
1570 | |
1571 | if (!ops->oobbuf) |
1572 | ops->ooblen = 0; |
1573 | |
1574 | if (offs < 0 || offs + ops->len > mtd->size) |
1575 | return -EINVAL; |
1576 | |
1577 | if (ops->ooblen) { |
1578 | size_t maxooblen; |
1579 | |
1580 | if (ops->ooboffs >= mtd_oobavail(mtd, ops)) |
1581 | return -EINVAL; |
1582 | |
1583 | maxooblen = ((size_t)(mtd_div_by_ws(sz: mtd->size, mtd) - |
1584 | mtd_div_by_ws(sz: offs, mtd)) * |
1585 | mtd_oobavail(mtd, ops)) - ops->ooboffs; |
1586 | if (ops->ooblen > maxooblen) |
1587 | return -EINVAL; |
1588 | } |
1589 | |
1590 | return 0; |
1591 | } |
1592 | |
1593 | static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from, |
1594 | struct mtd_oob_ops *ops) |
1595 | { |
1596 | struct mtd_info *master = mtd_get_master(mtd); |
1597 | int ret; |
1598 | |
1599 | from = mtd_get_master_ofs(mtd, ofs: from); |
1600 | if (master->_read_oob) |
1601 | ret = master->_read_oob(master, from, ops); |
1602 | else |
1603 | ret = master->_read(master, from, ops->len, &ops->retlen, |
1604 | ops->datbuf); |
1605 | |
1606 | return ret; |
1607 | } |
1608 | |
1609 | static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to, |
1610 | struct mtd_oob_ops *ops) |
1611 | { |
1612 | struct mtd_info *master = mtd_get_master(mtd); |
1613 | int ret; |
1614 | |
1615 | to = mtd_get_master_ofs(mtd, ofs: to); |
1616 | if (master->_write_oob) |
1617 | ret = master->_write_oob(master, to, ops); |
1618 | else |
1619 | ret = master->_write(master, to, ops->len, &ops->retlen, |
1620 | ops->datbuf); |
1621 | |
1622 | return ret; |
1623 | } |
1624 | |
1625 | static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read, |
1626 | struct mtd_oob_ops *ops) |
1627 | { |
1628 | struct mtd_info *master = mtd_get_master(mtd); |
1629 | int ngroups = mtd_pairing_groups(master); |
1630 | int npairs = mtd_wunit_per_eb(mtd: master) / ngroups; |
1631 | struct mtd_oob_ops adjops = *ops; |
1632 | unsigned int wunit, oobavail; |
1633 | struct mtd_pairing_info info; |
1634 | int max_bitflips = 0; |
1635 | u32 ebofs, pageofs; |
1636 | loff_t base, pos; |
1637 | |
1638 | ebofs = mtd_mod_by_eb(sz: start, mtd); |
1639 | base = (loff_t)mtd_div_by_eb(sz: start, mtd) * master->erasesize; |
1640 | info.group = 0; |
1641 | info.pair = mtd_div_by_ws(sz: ebofs, mtd); |
1642 | pageofs = mtd_mod_by_ws(sz: ebofs, mtd); |
1643 | oobavail = mtd_oobavail(mtd, ops); |
1644 | |
1645 | while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) { |
1646 | int ret; |
1647 | |
1648 | if (info.pair >= npairs) { |
1649 | info.pair = 0; |
1650 | base += master->erasesize; |
1651 | } |
1652 | |
1653 | wunit = mtd_pairing_info_to_wunit(master, &info); |
1654 | pos = mtd_wunit_to_offset(mtd, base, wunit); |
1655 | |
1656 | adjops.len = ops->len - ops->retlen; |
1657 | if (adjops.len > mtd->writesize - pageofs) |
1658 | adjops.len = mtd->writesize - pageofs; |
1659 | |
1660 | adjops.ooblen = ops->ooblen - ops->oobretlen; |
1661 | if (adjops.ooblen > oobavail - adjops.ooboffs) |
1662 | adjops.ooblen = oobavail - adjops.ooboffs; |
1663 | |
1664 | if (read) { |
1665 | ret = mtd_read_oob_std(mtd, from: pos + pageofs, ops: &adjops); |
1666 | if (ret > 0) |
1667 | max_bitflips = max(max_bitflips, ret); |
1668 | } else { |
1669 | ret = mtd_write_oob_std(mtd, to: pos + pageofs, ops: &adjops); |
1670 | } |
1671 | |
1672 | if (ret < 0) |
1673 | return ret; |
1674 | |
1675 | max_bitflips = max(max_bitflips, ret); |
1676 | ops->retlen += adjops.retlen; |
1677 | ops->oobretlen += adjops.oobretlen; |
1678 | adjops.datbuf += adjops.retlen; |
1679 | adjops.oobbuf += adjops.oobretlen; |
1680 | adjops.ooboffs = 0; |
1681 | pageofs = 0; |
1682 | info.pair++; |
1683 | } |
1684 | |
1685 | return max_bitflips; |
1686 | } |
1687 | |
1688 | int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) |
1689 | { |
1690 | struct mtd_info *master = mtd_get_master(mtd); |
1691 | struct mtd_ecc_stats old_stats = master->ecc_stats; |
1692 | int ret_code; |
1693 | |
1694 | ops->retlen = ops->oobretlen = 0; |
1695 | |
1696 | ret_code = mtd_check_oob_ops(mtd, offs: from, ops); |
1697 | if (ret_code) |
1698 | return ret_code; |
1699 | |
1700 | ledtrig_mtd_activity(); |
1701 | |
1702 | /* Check the validity of a potential fallback on mtd->_read */ |
1703 | if (!master->_read_oob && (!master->_read || ops->oobbuf)) |
1704 | return -EOPNOTSUPP; |
1705 | |
1706 | if (ops->stats) |
1707 | memset(ops->stats, 0, sizeof(*ops->stats)); |
1708 | |
1709 | if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) |
1710 | ret_code = mtd_io_emulated_slc(mtd, start: from, read: true, ops); |
1711 | else |
1712 | ret_code = mtd_read_oob_std(mtd, from, ops); |
1713 | |
1714 | mtd_update_ecc_stats(mtd, master, old_stats: &old_stats); |
1715 | |
1716 | /* |
1717 | * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics |
1718 | * similar to mtd->_read(), returning a non-negative integer |
1719 | * representing max bitflips. In other cases, mtd->_read_oob() may |
1720 | * return -EUCLEAN. In all cases, perform similar logic to mtd_read(). |
1721 | */ |
1722 | if (unlikely(ret_code < 0)) |
1723 | return ret_code; |
1724 | if (mtd->ecc_strength == 0) |
1725 | return 0; /* device lacks ecc */ |
1726 | if (ops->stats) |
1727 | ops->stats->max_bitflips = ret_code; |
1728 | return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0; |
1729 | } |
1730 | EXPORT_SYMBOL_GPL(mtd_read_oob); |
1731 | |
1732 | int mtd_write_oob(struct mtd_info *mtd, loff_t to, |
1733 | struct mtd_oob_ops *ops) |
1734 | { |
1735 | struct mtd_info *master = mtd_get_master(mtd); |
1736 | int ret; |
1737 | |
1738 | ops->retlen = ops->oobretlen = 0; |
1739 | |
1740 | if (!(mtd->flags & MTD_WRITEABLE)) |
1741 | return -EROFS; |
1742 | |
1743 | ret = mtd_check_oob_ops(mtd, offs: to, ops); |
1744 | if (ret) |
1745 | return ret; |
1746 | |
1747 | ledtrig_mtd_activity(); |
1748 | |
1749 | /* Check the validity of a potential fallback on mtd->_write */ |
1750 | if (!master->_write_oob && (!master->_write || ops->oobbuf)) |
1751 | return -EOPNOTSUPP; |
1752 | |
1753 | if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) |
1754 | return mtd_io_emulated_slc(mtd, start: to, read: false, ops); |
1755 | |
1756 | return mtd_write_oob_std(mtd, to, ops); |
1757 | } |
1758 | EXPORT_SYMBOL_GPL(mtd_write_oob); |
1759 | |
1760 | /** |
1761 | * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section |
1762 | * @mtd: MTD device structure |
1763 | * @section: ECC section. Depending on the layout you may have all the ECC |
1764 | * bytes stored in a single contiguous section, or one section |
1765 | * per ECC chunk (and sometime several sections for a single ECC |
1766 | * ECC chunk) |
1767 | * @oobecc: OOB region struct filled with the appropriate ECC position |
1768 | * information |
1769 | * |
1770 | * This function returns ECC section information in the OOB area. If you want |
1771 | * to get all the ECC bytes information, then you should call |
1772 | * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE. |
1773 | * |
1774 | * Returns zero on success, a negative error code otherwise. |
1775 | */ |
1776 | int mtd_ooblayout_ecc(struct mtd_info *mtd, int section, |
1777 | struct mtd_oob_region *oobecc) |
1778 | { |
1779 | struct mtd_info *master = mtd_get_master(mtd); |
1780 | |
1781 | memset(oobecc, 0, sizeof(*oobecc)); |
1782 | |
1783 | if (!master || section < 0) |
1784 | return -EINVAL; |
1785 | |
1786 | if (!master->ooblayout || !master->ooblayout->ecc) |
1787 | return -ENOTSUPP; |
1788 | |
1789 | return master->ooblayout->ecc(master, section, oobecc); |
1790 | } |
1791 | EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc); |
1792 | |
1793 | /** |
1794 | * mtd_ooblayout_free - Get the OOB region definition of a specific free |
1795 | * section |
1796 | * @mtd: MTD device structure |
1797 | * @section: Free section you are interested in. Depending on the layout |
1798 | * you may have all the free bytes stored in a single contiguous |
1799 | * section, or one section per ECC chunk plus an extra section |
1800 | * for the remaining bytes (or other funky layout). |
1801 | * @oobfree: OOB region struct filled with the appropriate free position |
1802 | * information |
1803 | * |
1804 | * This function returns free bytes position in the OOB area. If you want |
1805 | * to get all the free bytes information, then you should call |
1806 | * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE. |
1807 | * |
1808 | * Returns zero on success, a negative error code otherwise. |
1809 | */ |
1810 | int mtd_ooblayout_free(struct mtd_info *mtd, int section, |
1811 | struct mtd_oob_region *oobfree) |
1812 | { |
1813 | struct mtd_info *master = mtd_get_master(mtd); |
1814 | |
1815 | memset(oobfree, 0, sizeof(*oobfree)); |
1816 | |
1817 | if (!master || section < 0) |
1818 | return -EINVAL; |
1819 | |
1820 | if (!master->ooblayout || !master->ooblayout->free) |
1821 | return -ENOTSUPP; |
1822 | |
1823 | return master->ooblayout->free(master, section, oobfree); |
1824 | } |
1825 | EXPORT_SYMBOL_GPL(mtd_ooblayout_free); |
1826 | |
1827 | /** |
1828 | * mtd_ooblayout_find_region - Find the region attached to a specific byte |
1829 | * @mtd: mtd info structure |
1830 | * @byte: the byte we are searching for |
1831 | * @sectionp: pointer where the section id will be stored |
1832 | * @oobregion: used to retrieve the ECC position |
1833 | * @iter: iterator function. Should be either mtd_ooblayout_free or |
1834 | * mtd_ooblayout_ecc depending on the region type you're searching for |
1835 | * |
1836 | * This function returns the section id and oobregion information of a |
1837 | * specific byte. For example, say you want to know where the 4th ECC byte is |
1838 | * stored, you'll use: |
1839 | * |
1840 | * mtd_ooblayout_find_region(mtd, 3, §ion, &oobregion, mtd_ooblayout_ecc); |
1841 | * |
1842 | * Returns zero on success, a negative error code otherwise. |
1843 | */ |
1844 | static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte, |
1845 | int *sectionp, struct mtd_oob_region *oobregion, |
1846 | int (*iter)(struct mtd_info *, |
1847 | int section, |
1848 | struct mtd_oob_region *oobregion)) |
1849 | { |
1850 | int pos = 0, ret, section = 0; |
1851 | |
1852 | memset(oobregion, 0, sizeof(*oobregion)); |
1853 | |
1854 | while (1) { |
1855 | ret = iter(mtd, section, oobregion); |
1856 | if (ret) |
1857 | return ret; |
1858 | |
1859 | if (pos + oobregion->length > byte) |
1860 | break; |
1861 | |
1862 | pos += oobregion->length; |
1863 | section++; |
1864 | } |
1865 | |
1866 | /* |
1867 | * Adjust region info to make it start at the beginning at the |
1868 | * 'start' ECC byte. |
1869 | */ |
1870 | oobregion->offset += byte - pos; |
1871 | oobregion->length -= byte - pos; |
1872 | *sectionp = section; |
1873 | |
1874 | return 0; |
1875 | } |
1876 | |
1877 | /** |
1878 | * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific |
1879 | * ECC byte |
1880 | * @mtd: mtd info structure |
1881 | * @eccbyte: the byte we are searching for |
1882 | * @section: pointer where the section id will be stored |
1883 | * @oobregion: OOB region information |
1884 | * |
1885 | * Works like mtd_ooblayout_find_region() except it searches for a specific ECC |
1886 | * byte. |
1887 | * |
1888 | * Returns zero on success, a negative error code otherwise. |
1889 | */ |
1890 | int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte, |
1891 | int *section, |
1892 | struct mtd_oob_region *oobregion) |
1893 | { |
1894 | return mtd_ooblayout_find_region(mtd, byte: eccbyte, sectionp: section, oobregion, |
1895 | iter: mtd_ooblayout_ecc); |
1896 | } |
1897 | EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion); |
1898 | |
1899 | /** |
1900 | * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer |
1901 | * @mtd: mtd info structure |
1902 | * @buf: destination buffer to store OOB bytes |
1903 | * @oobbuf: OOB buffer |
1904 | * @start: first byte to retrieve |
1905 | * @nbytes: number of bytes to retrieve |
1906 | * @iter: section iterator |
1907 | * |
1908 | * Extract bytes attached to a specific category (ECC or free) |
1909 | * from the OOB buffer and copy them into buf. |
1910 | * |
1911 | * Returns zero on success, a negative error code otherwise. |
1912 | */ |
1913 | static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf, |
1914 | const u8 *oobbuf, int start, int nbytes, |
1915 | int (*iter)(struct mtd_info *, |
1916 | int section, |
1917 | struct mtd_oob_region *oobregion)) |
1918 | { |
1919 | struct mtd_oob_region oobregion; |
1920 | int section, ret; |
1921 | |
1922 | ret = mtd_ooblayout_find_region(mtd, byte: start, sectionp: §ion, |
1923 | oobregion: &oobregion, iter); |
1924 | |
1925 | while (!ret) { |
1926 | int cnt; |
1927 | |
1928 | cnt = min_t(int, nbytes, oobregion.length); |
1929 | memcpy(buf, oobbuf + oobregion.offset, cnt); |
1930 | buf += cnt; |
1931 | nbytes -= cnt; |
1932 | |
1933 | if (!nbytes) |
1934 | break; |
1935 | |
1936 | ret = iter(mtd, ++section, &oobregion); |
1937 | } |
1938 | |
1939 | return ret; |
1940 | } |
1941 | |
1942 | /** |
1943 | * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer |
1944 | * @mtd: mtd info structure |
1945 | * @buf: source buffer to get OOB bytes from |
1946 | * @oobbuf: OOB buffer |
1947 | * @start: first OOB byte to set |
1948 | * @nbytes: number of OOB bytes to set |
1949 | * @iter: section iterator |
1950 | * |
1951 | * Fill the OOB buffer with data provided in buf. The category (ECC or free) |
1952 | * is selected by passing the appropriate iterator. |
1953 | * |
1954 | * Returns zero on success, a negative error code otherwise. |
1955 | */ |
1956 | static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf, |
1957 | u8 *oobbuf, int start, int nbytes, |
1958 | int (*iter)(struct mtd_info *, |
1959 | int section, |
1960 | struct mtd_oob_region *oobregion)) |
1961 | { |
1962 | struct mtd_oob_region oobregion; |
1963 | int section, ret; |
1964 | |
1965 | ret = mtd_ooblayout_find_region(mtd, byte: start, sectionp: §ion, |
1966 | oobregion: &oobregion, iter); |
1967 | |
1968 | while (!ret) { |
1969 | int cnt; |
1970 | |
1971 | cnt = min_t(int, nbytes, oobregion.length); |
1972 | memcpy(oobbuf + oobregion.offset, buf, cnt); |
1973 | buf += cnt; |
1974 | nbytes -= cnt; |
1975 | |
1976 | if (!nbytes) |
1977 | break; |
1978 | |
1979 | ret = iter(mtd, ++section, &oobregion); |
1980 | } |
1981 | |
1982 | return ret; |
1983 | } |
1984 | |
1985 | /** |
1986 | * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category |
1987 | * @mtd: mtd info structure |
1988 | * @iter: category iterator |
1989 | * |
1990 | * Count the number of bytes in a given category. |
1991 | * |
1992 | * Returns a positive value on success, a negative error code otherwise. |
1993 | */ |
1994 | static int mtd_ooblayout_count_bytes(struct mtd_info *mtd, |
1995 | int (*iter)(struct mtd_info *, |
1996 | int section, |
1997 | struct mtd_oob_region *oobregion)) |
1998 | { |
1999 | struct mtd_oob_region oobregion; |
2000 | int section = 0, ret, nbytes = 0; |
2001 | |
2002 | while (1) { |
2003 | ret = iter(mtd, section++, &oobregion); |
2004 | if (ret) { |
2005 | if (ret == -ERANGE) |
2006 | ret = nbytes; |
2007 | break; |
2008 | } |
2009 | |
2010 | nbytes += oobregion.length; |
2011 | } |
2012 | |
2013 | return ret; |
2014 | } |
2015 | |
2016 | /** |
2017 | * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer |
2018 | * @mtd: mtd info structure |
2019 | * @eccbuf: destination buffer to store ECC bytes |
2020 | * @oobbuf: OOB buffer |
2021 | * @start: first ECC byte to retrieve |
2022 | * @nbytes: number of ECC bytes to retrieve |
2023 | * |
2024 | * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes. |
2025 | * |
2026 | * Returns zero on success, a negative error code otherwise. |
2027 | */ |
2028 | int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf, |
2029 | const u8 *oobbuf, int start, int nbytes) |
2030 | { |
2031 | return mtd_ooblayout_get_bytes(mtd, buf: eccbuf, oobbuf, start, nbytes, |
2032 | iter: mtd_ooblayout_ecc); |
2033 | } |
2034 | EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes); |
2035 | |
2036 | /** |
2037 | * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer |
2038 | * @mtd: mtd info structure |
2039 | * @eccbuf: source buffer to get ECC bytes from |
2040 | * @oobbuf: OOB buffer |
2041 | * @start: first ECC byte to set |
2042 | * @nbytes: number of ECC bytes to set |
2043 | * |
2044 | * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes. |
2045 | * |
2046 | * Returns zero on success, a negative error code otherwise. |
2047 | */ |
2048 | int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf, |
2049 | u8 *oobbuf, int start, int nbytes) |
2050 | { |
2051 | return mtd_ooblayout_set_bytes(mtd, buf: eccbuf, oobbuf, start, nbytes, |
2052 | iter: mtd_ooblayout_ecc); |
2053 | } |
2054 | EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes); |
2055 | |
2056 | /** |
2057 | * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer |
2058 | * @mtd: mtd info structure |
2059 | * @databuf: destination buffer to store ECC bytes |
2060 | * @oobbuf: OOB buffer |
2061 | * @start: first ECC byte to retrieve |
2062 | * @nbytes: number of ECC bytes to retrieve |
2063 | * |
2064 | * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes. |
2065 | * |
2066 | * Returns zero on success, a negative error code otherwise. |
2067 | */ |
2068 | int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf, |
2069 | const u8 *oobbuf, int start, int nbytes) |
2070 | { |
2071 | return mtd_ooblayout_get_bytes(mtd, buf: databuf, oobbuf, start, nbytes, |
2072 | iter: mtd_ooblayout_free); |
2073 | } |
2074 | EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes); |
2075 | |
2076 | /** |
2077 | * mtd_ooblayout_set_databytes - set data bytes into the oob buffer |
2078 | * @mtd: mtd info structure |
2079 | * @databuf: source buffer to get data bytes from |
2080 | * @oobbuf: OOB buffer |
2081 | * @start: first ECC byte to set |
2082 | * @nbytes: number of ECC bytes to set |
2083 | * |
2084 | * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes. |
2085 | * |
2086 | * Returns zero on success, a negative error code otherwise. |
2087 | */ |
2088 | int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf, |
2089 | u8 *oobbuf, int start, int nbytes) |
2090 | { |
2091 | return mtd_ooblayout_set_bytes(mtd, buf: databuf, oobbuf, start, nbytes, |
2092 | iter: mtd_ooblayout_free); |
2093 | } |
2094 | EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes); |
2095 | |
2096 | /** |
2097 | * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB |
2098 | * @mtd: mtd info structure |
2099 | * |
2100 | * Works like mtd_ooblayout_count_bytes(), except it count free bytes. |
2101 | * |
2102 | * Returns zero on success, a negative error code otherwise. |
2103 | */ |
2104 | int mtd_ooblayout_count_freebytes(struct mtd_info *mtd) |
2105 | { |
2106 | return mtd_ooblayout_count_bytes(mtd, iter: mtd_ooblayout_free); |
2107 | } |
2108 | EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes); |
2109 | |
2110 | /** |
2111 | * mtd_ooblayout_count_eccbytes - count the number of ECC bytes in OOB |
2112 | * @mtd: mtd info structure |
2113 | * |
2114 | * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes. |
2115 | * |
2116 | * Returns zero on success, a negative error code otherwise. |
2117 | */ |
2118 | int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd) |
2119 | { |
2120 | return mtd_ooblayout_count_bytes(mtd, iter: mtd_ooblayout_ecc); |
2121 | } |
2122 | EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes); |
2123 | |
2124 | /* |
2125 | * Method to access the protection register area, present in some flash |
2126 | * devices. The user data is one time programmable but the factory data is read |
2127 | * only. |
2128 | */ |
2129 | int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, |
2130 | struct otp_info *buf) |
2131 | { |
2132 | struct mtd_info *master = mtd_get_master(mtd); |
2133 | |
2134 | if (!master->_get_fact_prot_info) |
2135 | return -EOPNOTSUPP; |
2136 | if (!len) |
2137 | return 0; |
2138 | return master->_get_fact_prot_info(master, len, retlen, buf); |
2139 | } |
2140 | EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info); |
2141 | |
2142 | int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, |
2143 | size_t *retlen, u_char *buf) |
2144 | { |
2145 | struct mtd_info *master = mtd_get_master(mtd); |
2146 | |
2147 | *retlen = 0; |
2148 | if (!master->_read_fact_prot_reg) |
2149 | return -EOPNOTSUPP; |
2150 | if (!len) |
2151 | return 0; |
2152 | return master->_read_fact_prot_reg(master, from, len, retlen, buf); |
2153 | } |
2154 | EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg); |
2155 | |
2156 | int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, |
2157 | struct otp_info *buf) |
2158 | { |
2159 | struct mtd_info *master = mtd_get_master(mtd); |
2160 | |
2161 | if (!master->_get_user_prot_info) |
2162 | return -EOPNOTSUPP; |
2163 | if (!len) |
2164 | return 0; |
2165 | return master->_get_user_prot_info(master, len, retlen, buf); |
2166 | } |
2167 | EXPORT_SYMBOL_GPL(mtd_get_user_prot_info); |
2168 | |
2169 | int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, |
2170 | size_t *retlen, u_char *buf) |
2171 | { |
2172 | struct mtd_info *master = mtd_get_master(mtd); |
2173 | |
2174 | *retlen = 0; |
2175 | if (!master->_read_user_prot_reg) |
2176 | return -EOPNOTSUPP; |
2177 | if (!len) |
2178 | return 0; |
2179 | return master->_read_user_prot_reg(master, from, len, retlen, buf); |
2180 | } |
2181 | EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg); |
2182 | |
2183 | int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len, |
2184 | size_t *retlen, const u_char *buf) |
2185 | { |
2186 | struct mtd_info *master = mtd_get_master(mtd); |
2187 | int ret; |
2188 | |
2189 | *retlen = 0; |
2190 | if (!master->_write_user_prot_reg) |
2191 | return -EOPNOTSUPP; |
2192 | if (!len) |
2193 | return 0; |
2194 | ret = master->_write_user_prot_reg(master, to, len, retlen, buf); |
2195 | if (ret) |
2196 | return ret; |
2197 | |
2198 | /* |
2199 | * If no data could be written at all, we are out of memory and |
2200 | * must return -ENOSPC. |
2201 | */ |
2202 | return (*retlen) ? 0 : -ENOSPC; |
2203 | } |
2204 | EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg); |
2205 | |
2206 | int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len) |
2207 | { |
2208 | struct mtd_info *master = mtd_get_master(mtd); |
2209 | |
2210 | if (!master->_lock_user_prot_reg) |
2211 | return -EOPNOTSUPP; |
2212 | if (!len) |
2213 | return 0; |
2214 | return master->_lock_user_prot_reg(master, from, len); |
2215 | } |
2216 | EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg); |
2217 | |
2218 | int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len) |
2219 | { |
2220 | struct mtd_info *master = mtd_get_master(mtd); |
2221 | |
2222 | if (!master->_erase_user_prot_reg) |
2223 | return -EOPNOTSUPP; |
2224 | if (!len) |
2225 | return 0; |
2226 | return master->_erase_user_prot_reg(master, from, len); |
2227 | } |
2228 | EXPORT_SYMBOL_GPL(mtd_erase_user_prot_reg); |
2229 | |
2230 | /* Chip-supported device locking */ |
2231 | int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
2232 | { |
2233 | struct mtd_info *master = mtd_get_master(mtd); |
2234 | |
2235 | if (!master->_lock) |
2236 | return -EOPNOTSUPP; |
2237 | if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) |
2238 | return -EINVAL; |
2239 | if (!len) |
2240 | return 0; |
2241 | |
2242 | if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { |
2243 | ofs = (loff_t)mtd_div_by_eb(sz: ofs, mtd) * master->erasesize; |
2244 | len = (u64)mtd_div_by_eb(sz: len, mtd) * master->erasesize; |
2245 | } |
2246 | |
2247 | return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len); |
2248 | } |
2249 | EXPORT_SYMBOL_GPL(mtd_lock); |
2250 | |
2251 | int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
2252 | { |
2253 | struct mtd_info *master = mtd_get_master(mtd); |
2254 | |
2255 | if (!master->_unlock) |
2256 | return -EOPNOTSUPP; |
2257 | if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) |
2258 | return -EINVAL; |
2259 | if (!len) |
2260 | return 0; |
2261 | |
2262 | if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { |
2263 | ofs = (loff_t)mtd_div_by_eb(sz: ofs, mtd) * master->erasesize; |
2264 | len = (u64)mtd_div_by_eb(sz: len, mtd) * master->erasesize; |
2265 | } |
2266 | |
2267 | return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len); |
2268 | } |
2269 | EXPORT_SYMBOL_GPL(mtd_unlock); |
2270 | |
2271 | int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
2272 | { |
2273 | struct mtd_info *master = mtd_get_master(mtd); |
2274 | |
2275 | if (!master->_is_locked) |
2276 | return -EOPNOTSUPP; |
2277 | if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) |
2278 | return -EINVAL; |
2279 | if (!len) |
2280 | return 0; |
2281 | |
2282 | if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { |
2283 | ofs = (loff_t)mtd_div_by_eb(sz: ofs, mtd) * master->erasesize; |
2284 | len = (u64)mtd_div_by_eb(sz: len, mtd) * master->erasesize; |
2285 | } |
2286 | |
2287 | return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len); |
2288 | } |
2289 | EXPORT_SYMBOL_GPL(mtd_is_locked); |
2290 | |
2291 | int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs) |
2292 | { |
2293 | struct mtd_info *master = mtd_get_master(mtd); |
2294 | |
2295 | if (ofs < 0 || ofs >= mtd->size) |
2296 | return -EINVAL; |
2297 | if (!master->_block_isreserved) |
2298 | return 0; |
2299 | |
2300 | if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) |
2301 | ofs = (loff_t)mtd_div_by_eb(sz: ofs, mtd) * master->erasesize; |
2302 | |
2303 | return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs)); |
2304 | } |
2305 | EXPORT_SYMBOL_GPL(mtd_block_isreserved); |
2306 | |
2307 | int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs) |
2308 | { |
2309 | struct mtd_info *master = mtd_get_master(mtd); |
2310 | |
2311 | if (ofs < 0 || ofs >= mtd->size) |
2312 | return -EINVAL; |
2313 | if (!master->_block_isbad) |
2314 | return 0; |
2315 | |
2316 | if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) |
2317 | ofs = (loff_t)mtd_div_by_eb(sz: ofs, mtd) * master->erasesize; |
2318 | |
2319 | return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs)); |
2320 | } |
2321 | EXPORT_SYMBOL_GPL(mtd_block_isbad); |
2322 | |
2323 | int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs) |
2324 | { |
2325 | struct mtd_info *master = mtd_get_master(mtd); |
2326 | int ret; |
2327 | |
2328 | if (!master->_block_markbad) |
2329 | return -EOPNOTSUPP; |
2330 | if (ofs < 0 || ofs >= mtd->size) |
2331 | return -EINVAL; |
2332 | if (!(mtd->flags & MTD_WRITEABLE)) |
2333 | return -EROFS; |
2334 | |
2335 | if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) |
2336 | ofs = (loff_t)mtd_div_by_eb(sz: ofs, mtd) * master->erasesize; |
2337 | |
2338 | ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs)); |
2339 | if (ret) |
2340 | return ret; |
2341 | |
2342 | while (mtd->parent) { |
2343 | mtd->ecc_stats.badblocks++; |
2344 | mtd = mtd->parent; |
2345 | } |
2346 | |
2347 | return 0; |
2348 | } |
2349 | EXPORT_SYMBOL_GPL(mtd_block_markbad); |
2350 | |
2351 | /* |
2352 | * default_mtd_writev - the default writev method |
2353 | * @mtd: mtd device description object pointer |
2354 | * @vecs: the vectors to write |
2355 | * @count: count of vectors in @vecs |
2356 | * @to: the MTD device offset to write to |
2357 | * @retlen: on exit contains the count of bytes written to the MTD device. |
2358 | * |
2359 | * This function returns zero in case of success and a negative error code in |
2360 | * case of failure. |
2361 | */ |
2362 | static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, |
2363 | unsigned long count, loff_t to, size_t *retlen) |
2364 | { |
2365 | unsigned long i; |
2366 | size_t totlen = 0, thislen; |
2367 | int ret = 0; |
2368 | |
2369 | for (i = 0; i < count; i++) { |
2370 | if (!vecs[i].iov_len) |
2371 | continue; |
2372 | ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen, |
2373 | vecs[i].iov_base); |
2374 | totlen += thislen; |
2375 | if (ret || thislen != vecs[i].iov_len) |
2376 | break; |
2377 | to += vecs[i].iov_len; |
2378 | } |
2379 | *retlen = totlen; |
2380 | return ret; |
2381 | } |
2382 | |
2383 | /* |
2384 | * mtd_writev - the vector-based MTD write method |
2385 | * @mtd: mtd device description object pointer |
2386 | * @vecs: the vectors to write |
2387 | * @count: count of vectors in @vecs |
2388 | * @to: the MTD device offset to write to |
2389 | * @retlen: on exit contains the count of bytes written to the MTD device. |
2390 | * |
2391 | * This function returns zero in case of success and a negative error code in |
2392 | * case of failure. |
2393 | */ |
2394 | int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, |
2395 | unsigned long count, loff_t to, size_t *retlen) |
2396 | { |
2397 | struct mtd_info *master = mtd_get_master(mtd); |
2398 | |
2399 | *retlen = 0; |
2400 | if (!(mtd->flags & MTD_WRITEABLE)) |
2401 | return -EROFS; |
2402 | |
2403 | if (!master->_writev) |
2404 | return default_mtd_writev(mtd, vecs, count, to, retlen); |
2405 | |
2406 | return master->_writev(master, vecs, count, |
2407 | mtd_get_master_ofs(mtd, ofs: to), retlen); |
2408 | } |
2409 | EXPORT_SYMBOL_GPL(mtd_writev); |
2410 | |
2411 | /** |
2412 | * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size |
2413 | * @mtd: mtd device description object pointer |
2414 | * @size: a pointer to the ideal or maximum size of the allocation, points |
2415 | * to the actual allocation size on success. |
2416 | * |
2417 | * This routine attempts to allocate a contiguous kernel buffer up to |
2418 | * the specified size, backing off the size of the request exponentially |
2419 | * until the request succeeds or until the allocation size falls below |
2420 | * the system page size. This attempts to make sure it does not adversely |
2421 | * impact system performance, so when allocating more than one page, we |
2422 | * ask the memory allocator to avoid re-trying, swapping, writing back |
2423 | * or performing I/O. |
2424 | * |
2425 | * Note, this function also makes sure that the allocated buffer is aligned to |
2426 | * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value. |
2427 | * |
2428 | * This is called, for example by mtd_{read,write} and jffs2_scan_medium, |
2429 | * to handle smaller (i.e. degraded) buffer allocations under low- or |
2430 | * fragmented-memory situations where such reduced allocations, from a |
2431 | * requested ideal, are allowed. |
2432 | * |
2433 | * Returns a pointer to the allocated buffer on success; otherwise, NULL. |
2434 | */ |
2435 | void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size) |
2436 | { |
2437 | gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY; |
2438 | size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE); |
2439 | void *kbuf; |
2440 | |
2441 | *size = min_t(size_t, *size, KMALLOC_MAX_SIZE); |
2442 | |
2443 | while (*size > min_alloc) { |
2444 | kbuf = kmalloc(size: *size, flags); |
2445 | if (kbuf) |
2446 | return kbuf; |
2447 | |
2448 | *size >>= 1; |
2449 | *size = ALIGN(*size, mtd->writesize); |
2450 | } |
2451 | |
2452 | /* |
2453 | * For the last resort allocation allow 'kmalloc()' to do all sorts of |
2454 | * things (write-back, dropping caches, etc) by using GFP_KERNEL. |
2455 | */ |
2456 | return kmalloc(size: *size, GFP_KERNEL); |
2457 | } |
2458 | EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to); |
2459 | |
2460 | #ifdef CONFIG_PROC_FS |
2461 | |
2462 | /*====================================================================*/ |
2463 | /* Support for /proc/mtd */ |
2464 | |
2465 | static int mtd_proc_show(struct seq_file *m, void *v) |
2466 | { |
2467 | struct mtd_info *mtd; |
2468 | |
2469 | seq_puts(m, s: "dev: size erasesize name\n" ); |
2470 | mutex_lock(&mtd_table_mutex); |
2471 | mtd_for_each_device(mtd) { |
2472 | seq_printf(m, fmt: "mtd%d: %8.8llx %8.8x \"%s\"\n" , |
2473 | mtd->index, (unsigned long long)mtd->size, |
2474 | mtd->erasesize, mtd->name); |
2475 | } |
2476 | mutex_unlock(lock: &mtd_table_mutex); |
2477 | return 0; |
2478 | } |
2479 | #endif /* CONFIG_PROC_FS */ |
2480 | |
2481 | /*====================================================================*/ |
2482 | /* Init code */ |
2483 | |
2484 | static struct backing_dev_info * __init mtd_bdi_init(const char *name) |
2485 | { |
2486 | struct backing_dev_info *bdi; |
2487 | int ret; |
2488 | |
2489 | bdi = bdi_alloc(NUMA_NO_NODE); |
2490 | if (!bdi) |
2491 | return ERR_PTR(error: -ENOMEM); |
2492 | bdi->ra_pages = 0; |
2493 | bdi->io_pages = 0; |
2494 | |
2495 | /* |
2496 | * We put '-0' suffix to the name to get the same name format as we |
2497 | * used to get. Since this is called only once, we get a unique name. |
2498 | */ |
2499 | ret = bdi_register(bdi, fmt: "%.28s-0" , name); |
2500 | if (ret) |
2501 | bdi_put(bdi); |
2502 | |
2503 | return ret ? ERR_PTR(error: ret) : bdi; |
2504 | } |
2505 | |
2506 | static struct proc_dir_entry *proc_mtd; |
2507 | |
2508 | static int __init init_mtd(void) |
2509 | { |
2510 | int ret; |
2511 | |
2512 | ret = class_register(class: &mtd_class); |
2513 | if (ret) |
2514 | goto err_reg; |
2515 | |
2516 | mtd_bdi = mtd_bdi_init(name: "mtd" ); |
2517 | if (IS_ERR(ptr: mtd_bdi)) { |
2518 | ret = PTR_ERR(ptr: mtd_bdi); |
2519 | goto err_bdi; |
2520 | } |
2521 | |
2522 | proc_mtd = proc_create_single("mtd" , 0, NULL, mtd_proc_show); |
2523 | |
2524 | ret = init_mtdchar(); |
2525 | if (ret) |
2526 | goto out_procfs; |
2527 | |
2528 | dfs_dir_mtd = debugfs_create_dir(name: "mtd" , NULL); |
2529 | debugfs_create_bool(name: "expert_analysis_mode" , mode: 0600, parent: dfs_dir_mtd, |
2530 | value: &mtd_expert_analysis_mode); |
2531 | |
2532 | return 0; |
2533 | |
2534 | out_procfs: |
2535 | if (proc_mtd) |
2536 | remove_proc_entry("mtd" , NULL); |
2537 | bdi_unregister(bdi: mtd_bdi); |
2538 | bdi_put(bdi: mtd_bdi); |
2539 | err_bdi: |
2540 | class_unregister(class: &mtd_class); |
2541 | err_reg: |
2542 | pr_err("Error registering mtd class or bdi: %d\n" , ret); |
2543 | return ret; |
2544 | } |
2545 | |
2546 | static void __exit cleanup_mtd(void) |
2547 | { |
2548 | debugfs_remove_recursive(dentry: dfs_dir_mtd); |
2549 | cleanup_mtdchar(); |
2550 | if (proc_mtd) |
2551 | remove_proc_entry("mtd" , NULL); |
2552 | class_unregister(class: &mtd_class); |
2553 | bdi_unregister(bdi: mtd_bdi); |
2554 | bdi_put(bdi: mtd_bdi); |
2555 | idr_destroy(&mtd_idr); |
2556 | } |
2557 | |
2558 | module_init(init_mtd); |
2559 | module_exit(cleanup_mtd); |
2560 | |
2561 | MODULE_LICENSE("GPL" ); |
2562 | MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>" ); |
2563 | MODULE_DESCRIPTION("Core MTD registration and access routines" ); |
2564 | |