1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Copyright (c) International Business Machines Corp., 2006 |
4 | * |
5 | * Author: Artem Bityutskiy (Битюцкий Артём) |
6 | */ |
7 | |
8 | /* |
9 | * The UBI Eraseblock Association (EBA) sub-system. |
10 | * |
11 | * This sub-system is responsible for I/O to/from logical eraseblock. |
12 | * |
13 | * Although in this implementation the EBA table is fully kept and managed in |
14 | * RAM, which assumes poor scalability, it might be (partially) maintained on |
15 | * flash in future implementations. |
16 | * |
17 | * The EBA sub-system implements per-logical eraseblock locking. Before |
18 | * accessing a logical eraseblock it is locked for reading or writing. The |
19 | * per-logical eraseblock locking is implemented by means of the lock tree. The |
20 | * lock tree is an RB-tree which refers all the currently locked logical |
21 | * eraseblocks. The lock tree elements are &struct ubi_ltree_entry objects. |
22 | * They are indexed by (@vol_id, @lnum) pairs. |
23 | * |
24 | * EBA also maintains the global sequence counter which is incremented each |
25 | * time a logical eraseblock is mapped to a physical eraseblock and it is |
26 | * stored in the volume identifier header. This means that each VID header has |
27 | * a unique sequence number. The sequence number is only increased an we assume |
28 | * 64 bits is enough to never overflow. |
29 | */ |
30 | |
31 | #include <linux/slab.h> |
32 | #include <linux/crc32.h> |
33 | #include <linux/err.h> |
34 | #include "ubi.h" |
35 | |
36 | /** |
37 | * struct ubi_eba_entry - structure encoding a single LEB -> PEB association |
38 | * @pnum: the physical eraseblock number attached to the LEB |
39 | * |
40 | * This structure is encoding a LEB -> PEB association. Note that the LEB |
41 | * number is not stored here, because it is the index used to access the |
42 | * entries table. |
43 | */ |
44 | struct ubi_eba_entry { |
45 | int pnum; |
46 | }; |
47 | |
48 | /** |
49 | * struct ubi_eba_table - LEB -> PEB association information |
50 | * @entries: the LEB to PEB mapping (one entry per LEB). |
51 | * |
52 | * This structure is private to the EBA logic and should be kept here. |
53 | * It is encoding the LEB to PEB association table, and is subject to |
54 | * changes. |
55 | */ |
56 | struct ubi_eba_table { |
57 | struct ubi_eba_entry *entries; |
58 | }; |
59 | |
60 | /** |
61 | * ubi_next_sqnum - get next sequence number. |
62 | * @ubi: UBI device description object |
63 | * |
64 | * This function returns next sequence number to use, which is just the current |
65 | * global sequence counter value. It also increases the global sequence |
66 | * counter. |
67 | */ |
68 | unsigned long long ubi_next_sqnum(struct ubi_device *ubi) |
69 | { |
70 | unsigned long long sqnum; |
71 | |
72 | spin_lock(lock: &ubi->ltree_lock); |
73 | sqnum = ubi->global_sqnum++; |
74 | spin_unlock(lock: &ubi->ltree_lock); |
75 | |
76 | return sqnum; |
77 | } |
78 | |
79 | /** |
80 | * ubi_get_compat - get compatibility flags of a volume. |
81 | * @ubi: UBI device description object |
82 | * @vol_id: volume ID |
83 | * |
84 | * This function returns compatibility flags for an internal volume. User |
85 | * volumes have no compatibility flags, so %0 is returned. |
86 | */ |
87 | static int ubi_get_compat(const struct ubi_device *ubi, int vol_id) |
88 | { |
89 | if (vol_id == UBI_LAYOUT_VOLUME_ID) |
90 | return UBI_LAYOUT_VOLUME_COMPAT; |
91 | return 0; |
92 | } |
93 | |
94 | /** |
95 | * ubi_eba_get_ldesc - get information about a LEB |
96 | * @vol: volume description object |
97 | * @lnum: logical eraseblock number |
98 | * @ldesc: the LEB descriptor to fill |
99 | * |
100 | * Used to query information about a specific LEB. |
101 | * It is currently only returning the physical position of the LEB, but will be |
102 | * extended to provide more information. |
103 | */ |
104 | void ubi_eba_get_ldesc(struct ubi_volume *vol, int lnum, |
105 | struct ubi_eba_leb_desc *ldesc) |
106 | { |
107 | ldesc->lnum = lnum; |
108 | ldesc->pnum = vol->eba_tbl->entries[lnum].pnum; |
109 | } |
110 | |
111 | /** |
112 | * ubi_eba_create_table - allocate a new EBA table and initialize it with all |
113 | * LEBs unmapped |
114 | * @vol: volume containing the EBA table to copy |
115 | * @nentries: number of entries in the table |
116 | * |
117 | * Allocate a new EBA table and initialize it with all LEBs unmapped. |
118 | * Returns a valid pointer if it succeed, an ERR_PTR() otherwise. |
119 | */ |
120 | struct ubi_eba_table *ubi_eba_create_table(struct ubi_volume *vol, |
121 | int nentries) |
122 | { |
123 | struct ubi_eba_table *tbl; |
124 | int err = -ENOMEM; |
125 | int i; |
126 | |
127 | tbl = kzalloc(size: sizeof(*tbl), GFP_KERNEL); |
128 | if (!tbl) |
129 | return ERR_PTR(error: -ENOMEM); |
130 | |
131 | tbl->entries = kmalloc_array(n: nentries, size: sizeof(*tbl->entries), |
132 | GFP_KERNEL); |
133 | if (!tbl->entries) |
134 | goto err; |
135 | |
136 | for (i = 0; i < nentries; i++) |
137 | tbl->entries[i].pnum = UBI_LEB_UNMAPPED; |
138 | |
139 | return tbl; |
140 | |
141 | err: |
142 | kfree(objp: tbl); |
143 | |
144 | return ERR_PTR(error: err); |
145 | } |
146 | |
147 | /** |
148 | * ubi_eba_destroy_table - destroy an EBA table |
149 | * @tbl: the table to destroy |
150 | * |
151 | * Destroy an EBA table. |
152 | */ |
153 | void ubi_eba_destroy_table(struct ubi_eba_table *tbl) |
154 | { |
155 | if (!tbl) |
156 | return; |
157 | |
158 | kfree(objp: tbl->entries); |
159 | kfree(objp: tbl); |
160 | } |
161 | |
162 | /** |
163 | * ubi_eba_copy_table - copy the EBA table attached to vol into another table |
164 | * @vol: volume containing the EBA table to copy |
165 | * @dst: destination |
166 | * @nentries: number of entries to copy |
167 | * |
168 | * Copy the EBA table stored in vol into the one pointed by dst. |
169 | */ |
170 | void ubi_eba_copy_table(struct ubi_volume *vol, struct ubi_eba_table *dst, |
171 | int nentries) |
172 | { |
173 | struct ubi_eba_table *src; |
174 | int i; |
175 | |
176 | ubi_assert(dst && vol && vol->eba_tbl); |
177 | |
178 | src = vol->eba_tbl; |
179 | |
180 | for (i = 0; i < nentries; i++) |
181 | dst->entries[i].pnum = src->entries[i].pnum; |
182 | } |
183 | |
184 | /** |
185 | * ubi_eba_replace_table - assign a new EBA table to a volume |
186 | * @vol: volume containing the EBA table to copy |
187 | * @tbl: new EBA table |
188 | * |
189 | * Assign a new EBA table to the volume and release the old one. |
190 | */ |
191 | void ubi_eba_replace_table(struct ubi_volume *vol, struct ubi_eba_table *tbl) |
192 | { |
193 | ubi_eba_destroy_table(tbl: vol->eba_tbl); |
194 | vol->eba_tbl = tbl; |
195 | } |
196 | |
197 | /** |
198 | * ltree_lookup - look up the lock tree. |
199 | * @ubi: UBI device description object |
200 | * @vol_id: volume ID |
201 | * @lnum: logical eraseblock number |
202 | * |
203 | * This function returns a pointer to the corresponding &struct ubi_ltree_entry |
204 | * object if the logical eraseblock is locked and %NULL if it is not. |
205 | * @ubi->ltree_lock has to be locked. |
206 | */ |
207 | static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id, |
208 | int lnum) |
209 | { |
210 | struct rb_node *p; |
211 | |
212 | p = ubi->ltree.rb_node; |
213 | while (p) { |
214 | struct ubi_ltree_entry *le; |
215 | |
216 | le = rb_entry(p, struct ubi_ltree_entry, rb); |
217 | |
218 | if (vol_id < le->vol_id) |
219 | p = p->rb_left; |
220 | else if (vol_id > le->vol_id) |
221 | p = p->rb_right; |
222 | else { |
223 | if (lnum < le->lnum) |
224 | p = p->rb_left; |
225 | else if (lnum > le->lnum) |
226 | p = p->rb_right; |
227 | else |
228 | return le; |
229 | } |
230 | } |
231 | |
232 | return NULL; |
233 | } |
234 | |
235 | /** |
236 | * ltree_add_entry - add new entry to the lock tree. |
237 | * @ubi: UBI device description object |
238 | * @vol_id: volume ID |
239 | * @lnum: logical eraseblock number |
240 | * |
241 | * This function adds new entry for logical eraseblock (@vol_id, @lnum) to the |
242 | * lock tree. If such entry is already there, its usage counter is increased. |
243 | * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation |
244 | * failed. |
245 | */ |
246 | static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi, |
247 | int vol_id, int lnum) |
248 | { |
249 | struct ubi_ltree_entry *le, *le1, *le_free; |
250 | |
251 | le = kmalloc(size: sizeof(struct ubi_ltree_entry), GFP_NOFS); |
252 | if (!le) |
253 | return ERR_PTR(error: -ENOMEM); |
254 | |
255 | le->users = 0; |
256 | init_rwsem(&le->mutex); |
257 | le->vol_id = vol_id; |
258 | le->lnum = lnum; |
259 | |
260 | spin_lock(lock: &ubi->ltree_lock); |
261 | le1 = ltree_lookup(ubi, vol_id, lnum); |
262 | |
263 | if (le1) { |
264 | /* |
265 | * This logical eraseblock is already locked. The newly |
266 | * allocated lock entry is not needed. |
267 | */ |
268 | le_free = le; |
269 | le = le1; |
270 | } else { |
271 | struct rb_node **p, *parent = NULL; |
272 | |
273 | /* |
274 | * No lock entry, add the newly allocated one to the |
275 | * @ubi->ltree RB-tree. |
276 | */ |
277 | le_free = NULL; |
278 | |
279 | p = &ubi->ltree.rb_node; |
280 | while (*p) { |
281 | parent = *p; |
282 | le1 = rb_entry(parent, struct ubi_ltree_entry, rb); |
283 | |
284 | if (vol_id < le1->vol_id) |
285 | p = &(*p)->rb_left; |
286 | else if (vol_id > le1->vol_id) |
287 | p = &(*p)->rb_right; |
288 | else { |
289 | ubi_assert(lnum != le1->lnum); |
290 | if (lnum < le1->lnum) |
291 | p = &(*p)->rb_left; |
292 | else |
293 | p = &(*p)->rb_right; |
294 | } |
295 | } |
296 | |
297 | rb_link_node(node: &le->rb, parent, rb_link: p); |
298 | rb_insert_color(&le->rb, &ubi->ltree); |
299 | } |
300 | le->users += 1; |
301 | spin_unlock(lock: &ubi->ltree_lock); |
302 | |
303 | kfree(objp: le_free); |
304 | return le; |
305 | } |
306 | |
307 | /** |
308 | * leb_read_lock - lock logical eraseblock for reading. |
309 | * @ubi: UBI device description object |
310 | * @vol_id: volume ID |
311 | * @lnum: logical eraseblock number |
312 | * |
313 | * This function locks a logical eraseblock for reading. Returns zero in case |
314 | * of success and a negative error code in case of failure. |
315 | */ |
316 | static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum) |
317 | { |
318 | struct ubi_ltree_entry *le; |
319 | |
320 | le = ltree_add_entry(ubi, vol_id, lnum); |
321 | if (IS_ERR(ptr: le)) |
322 | return PTR_ERR(ptr: le); |
323 | down_read(sem: &le->mutex); |
324 | return 0; |
325 | } |
326 | |
327 | /** |
328 | * leb_read_unlock - unlock logical eraseblock. |
329 | * @ubi: UBI device description object |
330 | * @vol_id: volume ID |
331 | * @lnum: logical eraseblock number |
332 | */ |
333 | static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum) |
334 | { |
335 | struct ubi_ltree_entry *le; |
336 | |
337 | spin_lock(lock: &ubi->ltree_lock); |
338 | le = ltree_lookup(ubi, vol_id, lnum); |
339 | le->users -= 1; |
340 | ubi_assert(le->users >= 0); |
341 | up_read(sem: &le->mutex); |
342 | if (le->users == 0) { |
343 | rb_erase(&le->rb, &ubi->ltree); |
344 | kfree(objp: le); |
345 | } |
346 | spin_unlock(lock: &ubi->ltree_lock); |
347 | } |
348 | |
349 | /** |
350 | * leb_write_lock - lock logical eraseblock for writing. |
351 | * @ubi: UBI device description object |
352 | * @vol_id: volume ID |
353 | * @lnum: logical eraseblock number |
354 | * |
355 | * This function locks a logical eraseblock for writing. Returns zero in case |
356 | * of success and a negative error code in case of failure. |
357 | */ |
358 | static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum) |
359 | { |
360 | struct ubi_ltree_entry *le; |
361 | |
362 | le = ltree_add_entry(ubi, vol_id, lnum); |
363 | if (IS_ERR(ptr: le)) |
364 | return PTR_ERR(ptr: le); |
365 | down_write(sem: &le->mutex); |
366 | return 0; |
367 | } |
368 | |
369 | /** |
370 | * leb_write_trylock - try to lock logical eraseblock for writing. |
371 | * @ubi: UBI device description object |
372 | * @vol_id: volume ID |
373 | * @lnum: logical eraseblock number |
374 | * |
375 | * This function locks a logical eraseblock for writing if there is no |
376 | * contention and does nothing if there is contention. Returns %0 in case of |
377 | * success, %1 in case of contention, and a negative error code in case of |
378 | * failure. |
379 | */ |
380 | static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum) |
381 | { |
382 | struct ubi_ltree_entry *le; |
383 | |
384 | le = ltree_add_entry(ubi, vol_id, lnum); |
385 | if (IS_ERR(ptr: le)) |
386 | return PTR_ERR(ptr: le); |
387 | if (down_write_trylock(sem: &le->mutex)) |
388 | return 0; |
389 | |
390 | /* Contention, cancel */ |
391 | spin_lock(lock: &ubi->ltree_lock); |
392 | le->users -= 1; |
393 | ubi_assert(le->users >= 0); |
394 | if (le->users == 0) { |
395 | rb_erase(&le->rb, &ubi->ltree); |
396 | kfree(objp: le); |
397 | } |
398 | spin_unlock(lock: &ubi->ltree_lock); |
399 | |
400 | return 1; |
401 | } |
402 | |
403 | /** |
404 | * leb_write_unlock - unlock logical eraseblock. |
405 | * @ubi: UBI device description object |
406 | * @vol_id: volume ID |
407 | * @lnum: logical eraseblock number |
408 | */ |
409 | static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum) |
410 | { |
411 | struct ubi_ltree_entry *le; |
412 | |
413 | spin_lock(lock: &ubi->ltree_lock); |
414 | le = ltree_lookup(ubi, vol_id, lnum); |
415 | le->users -= 1; |
416 | ubi_assert(le->users >= 0); |
417 | up_write(sem: &le->mutex); |
418 | if (le->users == 0) { |
419 | rb_erase(&le->rb, &ubi->ltree); |
420 | kfree(objp: le); |
421 | } |
422 | spin_unlock(lock: &ubi->ltree_lock); |
423 | } |
424 | |
425 | /** |
426 | * ubi_eba_is_mapped - check if a LEB is mapped. |
427 | * @vol: volume description object |
428 | * @lnum: logical eraseblock number |
429 | * |
430 | * This function returns true if the LEB is mapped, false otherwise. |
431 | */ |
432 | bool ubi_eba_is_mapped(struct ubi_volume *vol, int lnum) |
433 | { |
434 | return vol->eba_tbl->entries[lnum].pnum >= 0; |
435 | } |
436 | |
437 | /** |
438 | * ubi_eba_unmap_leb - un-map logical eraseblock. |
439 | * @ubi: UBI device description object |
440 | * @vol: volume description object |
441 | * @lnum: logical eraseblock number |
442 | * |
443 | * This function un-maps logical eraseblock @lnum and schedules corresponding |
444 | * physical eraseblock for erasure. Returns zero in case of success and a |
445 | * negative error code in case of failure. |
446 | */ |
447 | int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, |
448 | int lnum) |
449 | { |
450 | int err, pnum, vol_id = vol->vol_id; |
451 | |
452 | if (ubi->ro_mode) |
453 | return -EROFS; |
454 | |
455 | err = leb_write_lock(ubi, vol_id, lnum); |
456 | if (err) |
457 | return err; |
458 | |
459 | pnum = vol->eba_tbl->entries[lnum].pnum; |
460 | if (pnum < 0) |
461 | /* This logical eraseblock is already unmapped */ |
462 | goto out_unlock; |
463 | |
464 | dbg_eba("erase LEB %d:%d, PEB %d" , vol_id, lnum, pnum); |
465 | |
466 | down_read(sem: &ubi->fm_eba_sem); |
467 | vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED; |
468 | up_read(sem: &ubi->fm_eba_sem); |
469 | err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, torture: 0); |
470 | |
471 | out_unlock: |
472 | leb_write_unlock(ubi, vol_id, lnum); |
473 | return err; |
474 | } |
475 | |
476 | #ifdef CONFIG_MTD_UBI_FASTMAP |
477 | /** |
478 | * check_mapping - check and fixup a mapping |
479 | * @ubi: UBI device description object |
480 | * @vol: volume description object |
481 | * @lnum: logical eraseblock number |
482 | * @pnum: physical eraseblock number |
483 | * |
484 | * Checks whether a given mapping is valid. Fastmap cannot track LEB unmap |
485 | * operations, if such an operation is interrupted the mapping still looks |
486 | * good, but upon first read an ECC is reported to the upper layer. |
487 | * Normaly during the full-scan at attach time this is fixed, for Fastmap |
488 | * we have to deal with it while reading. |
489 | * If the PEB behind a LEB shows this symthom we change the mapping to |
490 | * %UBI_LEB_UNMAPPED and schedule the PEB for erasure. |
491 | * |
492 | * Returns 0 on success, negative error code in case of failure. |
493 | */ |
494 | static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, |
495 | int *pnum) |
496 | { |
497 | int err; |
498 | struct ubi_vid_io_buf *vidb; |
499 | struct ubi_vid_hdr *vid_hdr; |
500 | |
501 | if (!ubi->fast_attach) |
502 | return 0; |
503 | |
504 | if (!vol->checkmap || test_bit(lnum, vol->checkmap)) |
505 | return 0; |
506 | |
507 | vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS); |
508 | if (!vidb) |
509 | return -ENOMEM; |
510 | |
511 | err = ubi_io_read_vid_hdr(ubi, pnum: *pnum, vidb, verbose: 0); |
512 | if (err > 0 && err != UBI_IO_BITFLIPS) { |
513 | int torture = 0; |
514 | |
515 | switch (err) { |
516 | case UBI_IO_FF: |
517 | case UBI_IO_FF_BITFLIPS: |
518 | case UBI_IO_BAD_HDR: |
519 | case UBI_IO_BAD_HDR_EBADMSG: |
520 | break; |
521 | default: |
522 | ubi_assert(0); |
523 | } |
524 | |
525 | if (err == UBI_IO_BAD_HDR_EBADMSG || err == UBI_IO_FF_BITFLIPS) |
526 | torture = 1; |
527 | |
528 | down_read(sem: &ubi->fm_eba_sem); |
529 | vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED; |
530 | up_read(sem: &ubi->fm_eba_sem); |
531 | ubi_wl_put_peb(ubi, vol_id: vol->vol_id, lnum, pnum: *pnum, torture); |
532 | |
533 | *pnum = UBI_LEB_UNMAPPED; |
534 | } else if (err < 0) { |
535 | ubi_err(ubi, fmt: "unable to read VID header back from PEB %i: %i" , |
536 | *pnum, err); |
537 | |
538 | goto out_free; |
539 | } else { |
540 | int found_vol_id, found_lnum; |
541 | |
542 | ubi_assert(err == 0 || err == UBI_IO_BITFLIPS); |
543 | |
544 | vid_hdr = ubi_get_vid_hdr(vidb); |
545 | found_vol_id = be32_to_cpu(vid_hdr->vol_id); |
546 | found_lnum = be32_to_cpu(vid_hdr->lnum); |
547 | |
548 | if (found_lnum != lnum || found_vol_id != vol->vol_id) { |
549 | ubi_err(ubi, fmt: "EBA mismatch! PEB %i is LEB %i:%i instead of LEB %i:%i" , |
550 | *pnum, found_vol_id, found_lnum, vol->vol_id, lnum); |
551 | ubi_ro_mode(ubi); |
552 | err = -EINVAL; |
553 | goto out_free; |
554 | } |
555 | } |
556 | |
557 | set_bit(nr: lnum, addr: vol->checkmap); |
558 | err = 0; |
559 | |
560 | out_free: |
561 | ubi_free_vid_buf(vidb); |
562 | |
563 | return err; |
564 | } |
565 | #else |
566 | static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, |
567 | int *pnum) |
568 | { |
569 | return 0; |
570 | } |
571 | #endif |
572 | |
573 | /** |
574 | * ubi_eba_read_leb - read data. |
575 | * @ubi: UBI device description object |
576 | * @vol: volume description object |
577 | * @lnum: logical eraseblock number |
578 | * @buf: buffer to store the read data |
579 | * @offset: offset from where to read |
580 | * @len: how many bytes to read |
581 | * @check: data CRC check flag |
582 | * |
583 | * If the logical eraseblock @lnum is unmapped, @buf is filled with 0xFF |
584 | * bytes. The @check flag only makes sense for static volumes and forces |
585 | * eraseblock data CRC checking. |
586 | * |
587 | * In case of success this function returns zero. In case of a static volume, |
588 | * if data CRC mismatches - %-EBADMSG is returned. %-EBADMSG may also be |
589 | * returned for any volume type if an ECC error was detected by the MTD device |
590 | * driver. Other negative error cored may be returned in case of other errors. |
591 | */ |
592 | int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, |
593 | void *buf, int offset, int len, int check) |
594 | { |
595 | int err, pnum, scrub = 0, vol_id = vol->vol_id; |
596 | struct ubi_vid_io_buf *vidb; |
597 | struct ubi_vid_hdr *vid_hdr; |
598 | uint32_t crc; |
599 | |
600 | err = leb_read_lock(ubi, vol_id, lnum); |
601 | if (err) |
602 | return err; |
603 | |
604 | pnum = vol->eba_tbl->entries[lnum].pnum; |
605 | if (pnum >= 0) { |
606 | err = check_mapping(ubi, vol, lnum, pnum: &pnum); |
607 | if (err < 0) |
608 | goto out_unlock; |
609 | } |
610 | |
611 | if (pnum == UBI_LEB_UNMAPPED) { |
612 | /* |
613 | * The logical eraseblock is not mapped, fill the whole buffer |
614 | * with 0xFF bytes. The exception is static volumes for which |
615 | * it is an error to read unmapped logical eraseblocks. |
616 | */ |
617 | dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)" , |
618 | len, offset, vol_id, lnum); |
619 | leb_read_unlock(ubi, vol_id, lnum); |
620 | ubi_assert(vol->vol_type != UBI_STATIC_VOLUME); |
621 | memset(buf, 0xFF, len); |
622 | return 0; |
623 | } |
624 | |
625 | dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d" , |
626 | len, offset, vol_id, lnum, pnum); |
627 | |
628 | if (vol->vol_type == UBI_DYNAMIC_VOLUME) |
629 | check = 0; |
630 | |
631 | retry: |
632 | if (check) { |
633 | vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS); |
634 | if (!vidb) { |
635 | err = -ENOMEM; |
636 | goto out_unlock; |
637 | } |
638 | |
639 | vid_hdr = ubi_get_vid_hdr(vidb); |
640 | |
641 | err = ubi_io_read_vid_hdr(ubi, pnum, vidb, verbose: 1); |
642 | if (err && err != UBI_IO_BITFLIPS) { |
643 | if (err > 0) { |
644 | /* |
645 | * The header is either absent or corrupted. |
646 | * The former case means there is a bug - |
647 | * switch to read-only mode just in case. |
648 | * The latter case means a real corruption - we |
649 | * may try to recover data. FIXME: but this is |
650 | * not implemented. |
651 | */ |
652 | if (err == UBI_IO_BAD_HDR_EBADMSG || |
653 | err == UBI_IO_BAD_HDR) { |
654 | ubi_warn(ubi, fmt: "corrupted VID header at PEB %d, LEB %d:%d" , |
655 | pnum, vol_id, lnum); |
656 | err = -EBADMSG; |
657 | } else { |
658 | /* |
659 | * Ending up here in the non-Fastmap case |
660 | * is a clear bug as the VID header had to |
661 | * be present at scan time to have it referenced. |
662 | * With fastmap the story is more complicated. |
663 | * Fastmap has the mapping info without the need |
664 | * of a full scan. So the LEB could have been |
665 | * unmapped, Fastmap cannot know this and keeps |
666 | * the LEB referenced. |
667 | * This is valid and works as the layer above UBI |
668 | * has to do bookkeeping about used/referenced |
669 | * LEBs in any case. |
670 | */ |
671 | if (ubi->fast_attach) { |
672 | err = -EBADMSG; |
673 | } else { |
674 | err = -EINVAL; |
675 | ubi_ro_mode(ubi); |
676 | } |
677 | } |
678 | } |
679 | goto out_free; |
680 | } else if (err == UBI_IO_BITFLIPS) |
681 | scrub = 1; |
682 | |
683 | ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs)); |
684 | ubi_assert(len == be32_to_cpu(vid_hdr->data_size)); |
685 | |
686 | crc = be32_to_cpu(vid_hdr->data_crc); |
687 | ubi_free_vid_buf(vidb); |
688 | } |
689 | |
690 | err = ubi_io_read_data(ubi, buf, pnum, offset, len); |
691 | if (err) { |
692 | if (err == UBI_IO_BITFLIPS) |
693 | scrub = 1; |
694 | else if (mtd_is_eccerr(err)) { |
695 | if (vol->vol_type == UBI_DYNAMIC_VOLUME) |
696 | goto out_unlock; |
697 | scrub = 1; |
698 | if (!check) { |
699 | ubi_msg(ubi, fmt: "force data checking" ); |
700 | check = 1; |
701 | goto retry; |
702 | } |
703 | } else |
704 | goto out_unlock; |
705 | } |
706 | |
707 | if (check) { |
708 | uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len); |
709 | if (crc1 != crc) { |
710 | ubi_warn(ubi, fmt: "CRC error: calculated %#08x, must be %#08x" , |
711 | crc1, crc); |
712 | err = -EBADMSG; |
713 | goto out_unlock; |
714 | } |
715 | } |
716 | |
717 | if (scrub) |
718 | err = ubi_wl_scrub_peb(ubi, pnum); |
719 | |
720 | leb_read_unlock(ubi, vol_id, lnum); |
721 | return err; |
722 | |
723 | out_free: |
724 | ubi_free_vid_buf(vidb); |
725 | out_unlock: |
726 | leb_read_unlock(ubi, vol_id, lnum); |
727 | return err; |
728 | } |
729 | |
730 | /** |
731 | * ubi_eba_read_leb_sg - read data into a scatter gather list. |
732 | * @ubi: UBI device description object |
733 | * @vol: volume description object |
734 | * @lnum: logical eraseblock number |
735 | * @sgl: UBI scatter gather list to store the read data |
736 | * @offset: offset from where to read |
737 | * @len: how many bytes to read |
738 | * @check: data CRC check flag |
739 | * |
740 | * This function works exactly like ubi_eba_read_leb(). But instead of |
741 | * storing the read data into a buffer it writes to an UBI scatter gather |
742 | * list. |
743 | */ |
744 | int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol, |
745 | struct ubi_sgl *sgl, int lnum, int offset, int len, |
746 | int check) |
747 | { |
748 | int to_read; |
749 | int ret; |
750 | struct scatterlist *sg; |
751 | |
752 | for (;;) { |
753 | ubi_assert(sgl->list_pos < UBI_MAX_SG_COUNT); |
754 | sg = &sgl->sg[sgl->list_pos]; |
755 | if (len < sg->length - sgl->page_pos) |
756 | to_read = len; |
757 | else |
758 | to_read = sg->length - sgl->page_pos; |
759 | |
760 | ret = ubi_eba_read_leb(ubi, vol, lnum, |
761 | buf: sg_virt(sg) + sgl->page_pos, offset, |
762 | len: to_read, check); |
763 | if (ret < 0) |
764 | return ret; |
765 | |
766 | offset += to_read; |
767 | len -= to_read; |
768 | if (!len) { |
769 | sgl->page_pos += to_read; |
770 | if (sgl->page_pos == sg->length) { |
771 | sgl->list_pos++; |
772 | sgl->page_pos = 0; |
773 | } |
774 | |
775 | break; |
776 | } |
777 | |
778 | sgl->list_pos++; |
779 | sgl->page_pos = 0; |
780 | } |
781 | |
782 | return ret; |
783 | } |
784 | |
785 | /** |
786 | * try_recover_peb - try to recover from write failure. |
787 | * @vol: volume description object |
788 | * @pnum: the physical eraseblock to recover |
789 | * @lnum: logical eraseblock number |
790 | * @buf: data which was not written because of the write failure |
791 | * @offset: offset of the failed write |
792 | * @len: how many bytes should have been written |
793 | * @vidb: VID buffer |
794 | * @retry: whether the caller should retry in case of failure |
795 | * |
796 | * This function is called in case of a write failure and moves all good data |
797 | * from the potentially bad physical eraseblock to a good physical eraseblock. |
798 | * This function also writes the data which was not written due to the failure. |
799 | * Returns 0 in case of success, and a negative error code in case of failure. |
800 | * In case of failure, the %retry parameter is set to false if this is a fatal |
801 | * error (retrying won't help), and true otherwise. |
802 | */ |
803 | static int try_recover_peb(struct ubi_volume *vol, int pnum, int lnum, |
804 | const void *buf, int offset, int len, |
805 | struct ubi_vid_io_buf *vidb, bool *retry) |
806 | { |
807 | struct ubi_device *ubi = vol->ubi; |
808 | struct ubi_vid_hdr *vid_hdr; |
809 | int new_pnum, err, vol_id = vol->vol_id, data_size; |
810 | uint32_t crc; |
811 | |
812 | *retry = false; |
813 | |
814 | new_pnum = ubi_wl_get_peb(ubi); |
815 | if (new_pnum < 0) { |
816 | err = new_pnum; |
817 | goto out_put; |
818 | } |
819 | |
820 | ubi_msg(ubi, fmt: "recover PEB %d, move data to PEB %d" , |
821 | pnum, new_pnum); |
822 | |
823 | err = ubi_io_read_vid_hdr(ubi, pnum, vidb, verbose: 1); |
824 | if (err && err != UBI_IO_BITFLIPS) { |
825 | if (err > 0) |
826 | err = -EIO; |
827 | goto out_put; |
828 | } |
829 | |
830 | vid_hdr = ubi_get_vid_hdr(vidb); |
831 | ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC); |
832 | |
833 | mutex_lock(&ubi->buf_mutex); |
834 | memset(ubi->peb_buf + offset, 0xFF, len); |
835 | |
836 | /* Read everything before the area where the write failure happened */ |
837 | if (offset > 0) { |
838 | err = ubi_io_read_data(ubi, buf: ubi->peb_buf, pnum, offset: 0, len: offset); |
839 | if (err && err != UBI_IO_BITFLIPS) |
840 | goto out_unlock; |
841 | } |
842 | |
843 | *retry = true; |
844 | |
845 | memcpy(ubi->peb_buf + offset, buf, len); |
846 | |
847 | data_size = offset + len; |
848 | crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size); |
849 | vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); |
850 | vid_hdr->copy_flag = 1; |
851 | vid_hdr->data_size = cpu_to_be32(data_size); |
852 | vid_hdr->data_crc = cpu_to_be32(crc); |
853 | err = ubi_io_write_vid_hdr(ubi, pnum: new_pnum, vidb); |
854 | if (err) |
855 | goto out_unlock; |
856 | |
857 | err = ubi_io_write_data(ubi, buf: ubi->peb_buf, pnum: new_pnum, offset: 0, len: data_size); |
858 | |
859 | out_unlock: |
860 | mutex_unlock(lock: &ubi->buf_mutex); |
861 | |
862 | if (!err) |
863 | vol->eba_tbl->entries[lnum].pnum = new_pnum; |
864 | |
865 | out_put: |
866 | up_read(sem: &ubi->fm_eba_sem); |
867 | |
868 | if (!err) { |
869 | ubi_wl_put_peb(ubi, vol_id, lnum, pnum, torture: 1); |
870 | ubi_msg(ubi, fmt: "data was successfully recovered" ); |
871 | } else if (new_pnum >= 0) { |
872 | /* |
873 | * Bad luck? This physical eraseblock is bad too? Crud. Let's |
874 | * try to get another one. |
875 | */ |
876 | ubi_wl_put_peb(ubi, vol_id, lnum, pnum: new_pnum, torture: 1); |
877 | ubi_warn(ubi, fmt: "failed to write to PEB %d" , new_pnum); |
878 | } |
879 | |
880 | return err; |
881 | } |
882 | |
883 | /** |
884 | * recover_peb - recover from write failure. |
885 | * @ubi: UBI device description object |
886 | * @pnum: the physical eraseblock to recover |
887 | * @vol_id: volume ID |
888 | * @lnum: logical eraseblock number |
889 | * @buf: data which was not written because of the write failure |
890 | * @offset: offset of the failed write |
891 | * @len: how many bytes should have been written |
892 | * |
893 | * This function is called in case of a write failure and moves all good data |
894 | * from the potentially bad physical eraseblock to a good physical eraseblock. |
895 | * This function also writes the data which was not written due to the failure. |
896 | * Returns 0 in case of success, and a negative error code in case of failure. |
897 | * This function tries %UBI_IO_RETRIES before giving up. |
898 | */ |
899 | static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum, |
900 | const void *buf, int offset, int len) |
901 | { |
902 | int err, idx = vol_id2idx(ubi, vol_id), tries; |
903 | struct ubi_volume *vol = ubi->volumes[idx]; |
904 | struct ubi_vid_io_buf *vidb; |
905 | |
906 | vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS); |
907 | if (!vidb) |
908 | return -ENOMEM; |
909 | |
910 | for (tries = 0; tries <= UBI_IO_RETRIES; tries++) { |
911 | bool retry; |
912 | |
913 | err = try_recover_peb(vol, pnum, lnum, buf, offset, len, vidb, |
914 | retry: &retry); |
915 | if (!err || !retry) |
916 | break; |
917 | |
918 | ubi_msg(ubi, fmt: "try again" ); |
919 | } |
920 | |
921 | ubi_free_vid_buf(vidb); |
922 | |
923 | return err; |
924 | } |
925 | |
926 | /** |
927 | * try_write_vid_and_data - try to write VID header and data to a new PEB. |
928 | * @vol: volume description object |
929 | * @lnum: logical eraseblock number |
930 | * @vidb: the VID buffer to write |
931 | * @buf: buffer containing the data |
932 | * @offset: where to start writing data |
933 | * @len: how many bytes should be written |
934 | * |
935 | * This function tries to write VID header and data belonging to logical |
936 | * eraseblock @lnum of volume @vol to a new physical eraseblock. Returns zero |
937 | * in case of success and a negative error code in case of failure. |
938 | * In case of error, it is possible that something was still written to the |
939 | * flash media, but may be some garbage. |
940 | */ |
941 | static int try_write_vid_and_data(struct ubi_volume *vol, int lnum, |
942 | struct ubi_vid_io_buf *vidb, const void *buf, |
943 | int offset, int len) |
944 | { |
945 | struct ubi_device *ubi = vol->ubi; |
946 | int pnum, opnum, err, err2, vol_id = vol->vol_id; |
947 | |
948 | pnum = ubi_wl_get_peb(ubi); |
949 | if (pnum < 0) { |
950 | err = pnum; |
951 | goto out_put; |
952 | } |
953 | |
954 | opnum = vol->eba_tbl->entries[lnum].pnum; |
955 | |
956 | dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d" , |
957 | len, offset, vol_id, lnum, pnum); |
958 | |
959 | err = ubi_io_write_vid_hdr(ubi, pnum, vidb); |
960 | if (err) { |
961 | ubi_warn(ubi, fmt: "failed to write VID header to LEB %d:%d, PEB %d" , |
962 | vol_id, lnum, pnum); |
963 | goto out_put; |
964 | } |
965 | |
966 | if (len) { |
967 | err = ubi_io_write_data(ubi, buf, pnum, offset, len); |
968 | if (err) { |
969 | ubi_warn(ubi, |
970 | fmt: "failed to write %d bytes at offset %d of LEB %d:%d, PEB %d" , |
971 | len, offset, vol_id, lnum, pnum); |
972 | goto out_put; |
973 | } |
974 | } |
975 | |
976 | vol->eba_tbl->entries[lnum].pnum = pnum; |
977 | |
978 | out_put: |
979 | up_read(sem: &ubi->fm_eba_sem); |
980 | |
981 | if (err && pnum >= 0) { |
982 | err2 = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, torture: 1); |
983 | if (err2) { |
984 | ubi_warn(ubi, fmt: "failed to return physical eraseblock %d, error %d" , |
985 | pnum, err2); |
986 | } |
987 | } else if (!err && opnum >= 0) { |
988 | err2 = ubi_wl_put_peb(ubi, vol_id, lnum, pnum: opnum, torture: 0); |
989 | if (err2) { |
990 | ubi_warn(ubi, fmt: "failed to return physical eraseblock %d, error %d" , |
991 | opnum, err2); |
992 | } |
993 | } |
994 | |
995 | return err; |
996 | } |
997 | |
998 | /** |
999 | * ubi_eba_write_leb - write data to dynamic volume. |
1000 | * @ubi: UBI device description object |
1001 | * @vol: volume description object |
1002 | * @lnum: logical eraseblock number |
1003 | * @buf: the data to write |
1004 | * @offset: offset within the logical eraseblock where to write |
1005 | * @len: how many bytes to write |
1006 | * |
1007 | * This function writes data to logical eraseblock @lnum of a dynamic volume |
1008 | * @vol. Returns zero in case of success and a negative error code in case |
1009 | * of failure. In case of error, it is possible that something was still |
1010 | * written to the flash media, but may be some garbage. |
1011 | * This function retries %UBI_IO_RETRIES times before giving up. |
1012 | */ |
1013 | int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, |
1014 | const void *buf, int offset, int len) |
1015 | { |
1016 | int err, pnum, tries, vol_id = vol->vol_id; |
1017 | struct ubi_vid_io_buf *vidb; |
1018 | struct ubi_vid_hdr *vid_hdr; |
1019 | |
1020 | if (ubi->ro_mode) |
1021 | return -EROFS; |
1022 | |
1023 | err = leb_write_lock(ubi, vol_id, lnum); |
1024 | if (err) |
1025 | return err; |
1026 | |
1027 | pnum = vol->eba_tbl->entries[lnum].pnum; |
1028 | if (pnum >= 0) { |
1029 | err = check_mapping(ubi, vol, lnum, pnum: &pnum); |
1030 | if (err < 0) |
1031 | goto out; |
1032 | } |
1033 | |
1034 | if (pnum >= 0) { |
1035 | dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d" , |
1036 | len, offset, vol_id, lnum, pnum); |
1037 | |
1038 | err = ubi_io_write_data(ubi, buf, pnum, offset, len); |
1039 | if (err) { |
1040 | ubi_warn(ubi, fmt: "failed to write data to PEB %d" , pnum); |
1041 | if (err == -EIO && ubi->bad_allowed) |
1042 | err = recover_peb(ubi, pnum, vol_id, lnum, buf, |
1043 | offset, len); |
1044 | } |
1045 | |
1046 | goto out; |
1047 | } |
1048 | |
1049 | /* |
1050 | * The logical eraseblock is not mapped. We have to get a free physical |
1051 | * eraseblock and write the volume identifier header there first. |
1052 | */ |
1053 | vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS); |
1054 | if (!vidb) { |
1055 | leb_write_unlock(ubi, vol_id, lnum); |
1056 | return -ENOMEM; |
1057 | } |
1058 | |
1059 | vid_hdr = ubi_get_vid_hdr(vidb); |
1060 | |
1061 | vid_hdr->vol_type = UBI_VID_DYNAMIC; |
1062 | vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); |
1063 | vid_hdr->vol_id = cpu_to_be32(vol_id); |
1064 | vid_hdr->lnum = cpu_to_be32(lnum); |
1065 | vid_hdr->compat = ubi_get_compat(ubi, vol_id); |
1066 | vid_hdr->data_pad = cpu_to_be32(vol->data_pad); |
1067 | |
1068 | for (tries = 0; tries <= UBI_IO_RETRIES; tries++) { |
1069 | err = try_write_vid_and_data(vol, lnum, vidb, buf, offset, len); |
1070 | if (err != -EIO || !ubi->bad_allowed) |
1071 | break; |
1072 | |
1073 | /* |
1074 | * Fortunately, this is the first write operation to this |
1075 | * physical eraseblock, so just put it and request a new one. |
1076 | * We assume that if this physical eraseblock went bad, the |
1077 | * erase code will handle that. |
1078 | */ |
1079 | vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); |
1080 | ubi_msg(ubi, fmt: "try another PEB" ); |
1081 | } |
1082 | |
1083 | ubi_free_vid_buf(vidb); |
1084 | |
1085 | out: |
1086 | if (err) |
1087 | ubi_ro_mode(ubi); |
1088 | |
1089 | leb_write_unlock(ubi, vol_id, lnum); |
1090 | |
1091 | return err; |
1092 | } |
1093 | |
1094 | /** |
1095 | * ubi_eba_write_leb_st - write data to static volume. |
1096 | * @ubi: UBI device description object |
1097 | * @vol: volume description object |
1098 | * @lnum: logical eraseblock number |
1099 | * @buf: data to write |
1100 | * @len: how many bytes to write |
1101 | * @used_ebs: how many logical eraseblocks will this volume contain |
1102 | * |
1103 | * This function writes data to logical eraseblock @lnum of static volume |
1104 | * @vol. The @used_ebs argument should contain total number of logical |
1105 | * eraseblock in this static volume. |
1106 | * |
1107 | * When writing to the last logical eraseblock, the @len argument doesn't have |
1108 | * to be aligned to the minimal I/O unit size. Instead, it has to be equivalent |
1109 | * to the real data size, although the @buf buffer has to contain the |
1110 | * alignment. In all other cases, @len has to be aligned. |
1111 | * |
1112 | * It is prohibited to write more than once to logical eraseblocks of static |
1113 | * volumes. This function returns zero in case of success and a negative error |
1114 | * code in case of failure. |
1115 | */ |
1116 | int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol, |
1117 | int lnum, const void *buf, int len, int used_ebs) |
1118 | { |
1119 | int err, tries, data_size = len, vol_id = vol->vol_id; |
1120 | struct ubi_vid_io_buf *vidb; |
1121 | struct ubi_vid_hdr *vid_hdr; |
1122 | uint32_t crc; |
1123 | |
1124 | if (ubi->ro_mode) |
1125 | return -EROFS; |
1126 | |
1127 | if (lnum == used_ebs - 1) |
1128 | /* If this is the last LEB @len may be unaligned */ |
1129 | len = ALIGN(data_size, ubi->min_io_size); |
1130 | else |
1131 | ubi_assert(!(len & (ubi->min_io_size - 1))); |
1132 | |
1133 | vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS); |
1134 | if (!vidb) |
1135 | return -ENOMEM; |
1136 | |
1137 | vid_hdr = ubi_get_vid_hdr(vidb); |
1138 | |
1139 | err = leb_write_lock(ubi, vol_id, lnum); |
1140 | if (err) |
1141 | goto out; |
1142 | |
1143 | vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); |
1144 | vid_hdr->vol_id = cpu_to_be32(vol_id); |
1145 | vid_hdr->lnum = cpu_to_be32(lnum); |
1146 | vid_hdr->compat = ubi_get_compat(ubi, vol_id); |
1147 | vid_hdr->data_pad = cpu_to_be32(vol->data_pad); |
1148 | |
1149 | crc = crc32(UBI_CRC32_INIT, buf, data_size); |
1150 | vid_hdr->vol_type = UBI_VID_STATIC; |
1151 | vid_hdr->data_size = cpu_to_be32(data_size); |
1152 | vid_hdr->used_ebs = cpu_to_be32(used_ebs); |
1153 | vid_hdr->data_crc = cpu_to_be32(crc); |
1154 | |
1155 | ubi_assert(vol->eba_tbl->entries[lnum].pnum < 0); |
1156 | |
1157 | for (tries = 0; tries <= UBI_IO_RETRIES; tries++) { |
1158 | err = try_write_vid_and_data(vol, lnum, vidb, buf, offset: 0, len); |
1159 | if (err != -EIO || !ubi->bad_allowed) |
1160 | break; |
1161 | |
1162 | vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); |
1163 | ubi_msg(ubi, fmt: "try another PEB" ); |
1164 | } |
1165 | |
1166 | if (err) |
1167 | ubi_ro_mode(ubi); |
1168 | |
1169 | leb_write_unlock(ubi, vol_id, lnum); |
1170 | |
1171 | out: |
1172 | ubi_free_vid_buf(vidb); |
1173 | |
1174 | return err; |
1175 | } |
1176 | |
1177 | /* |
1178 | * ubi_eba_atomic_leb_change - change logical eraseblock atomically. |
1179 | * @ubi: UBI device description object |
1180 | * @vol: volume description object |
1181 | * @lnum: logical eraseblock number |
1182 | * @buf: data to write |
1183 | * @len: how many bytes to write |
1184 | * |
1185 | * This function changes the contents of a logical eraseblock atomically. @buf |
1186 | * has to contain new logical eraseblock data, and @len - the length of the |
1187 | * data, which has to be aligned. This function guarantees that in case of an |
1188 | * unclean reboot the old contents is preserved. Returns zero in case of |
1189 | * success and a negative error code in case of failure. |
1190 | * |
1191 | * UBI reserves one LEB for the "atomic LEB change" operation, so only one |
1192 | * LEB change may be done at a time. This is ensured by @ubi->alc_mutex. |
1193 | */ |
1194 | int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, |
1195 | int lnum, const void *buf, int len) |
1196 | { |
1197 | int err, tries, vol_id = vol->vol_id; |
1198 | struct ubi_vid_io_buf *vidb; |
1199 | struct ubi_vid_hdr *vid_hdr; |
1200 | uint32_t crc; |
1201 | |
1202 | if (ubi->ro_mode) |
1203 | return -EROFS; |
1204 | |
1205 | if (len == 0) { |
1206 | /* |
1207 | * Special case when data length is zero. In this case the LEB |
1208 | * has to be unmapped and mapped somewhere else. |
1209 | */ |
1210 | err = ubi_eba_unmap_leb(ubi, vol, lnum); |
1211 | if (err) |
1212 | return err; |
1213 | return ubi_eba_write_leb(ubi, vol, lnum, NULL, offset: 0, len: 0); |
1214 | } |
1215 | |
1216 | vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS); |
1217 | if (!vidb) |
1218 | return -ENOMEM; |
1219 | |
1220 | vid_hdr = ubi_get_vid_hdr(vidb); |
1221 | |
1222 | mutex_lock(&ubi->alc_mutex); |
1223 | err = leb_write_lock(ubi, vol_id, lnum); |
1224 | if (err) |
1225 | goto out_mutex; |
1226 | |
1227 | vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); |
1228 | vid_hdr->vol_id = cpu_to_be32(vol_id); |
1229 | vid_hdr->lnum = cpu_to_be32(lnum); |
1230 | vid_hdr->compat = ubi_get_compat(ubi, vol_id); |
1231 | vid_hdr->data_pad = cpu_to_be32(vol->data_pad); |
1232 | |
1233 | crc = crc32(UBI_CRC32_INIT, buf, len); |
1234 | vid_hdr->vol_type = UBI_VID_DYNAMIC; |
1235 | vid_hdr->data_size = cpu_to_be32(len); |
1236 | vid_hdr->copy_flag = 1; |
1237 | vid_hdr->data_crc = cpu_to_be32(crc); |
1238 | |
1239 | dbg_eba("change LEB %d:%d" , vol_id, lnum); |
1240 | |
1241 | for (tries = 0; tries <= UBI_IO_RETRIES; tries++) { |
1242 | err = try_write_vid_and_data(vol, lnum, vidb, buf, offset: 0, len); |
1243 | if (err != -EIO || !ubi->bad_allowed) |
1244 | break; |
1245 | |
1246 | vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); |
1247 | ubi_msg(ubi, fmt: "try another PEB" ); |
1248 | } |
1249 | |
1250 | /* |
1251 | * This flash device does not admit of bad eraseblocks or |
1252 | * something nasty and unexpected happened. Switch to read-only |
1253 | * mode just in case. |
1254 | */ |
1255 | if (err) |
1256 | ubi_ro_mode(ubi); |
1257 | |
1258 | leb_write_unlock(ubi, vol_id, lnum); |
1259 | |
1260 | out_mutex: |
1261 | mutex_unlock(lock: &ubi->alc_mutex); |
1262 | ubi_free_vid_buf(vidb); |
1263 | return err; |
1264 | } |
1265 | |
1266 | /** |
1267 | * is_error_sane - check whether a read error is sane. |
1268 | * @err: code of the error happened during reading |
1269 | * |
1270 | * This is a helper function for 'ubi_eba_copy_leb()' which is called when we |
1271 | * cannot read data from the target PEB (an error @err happened). If the error |
1272 | * code is sane, then we treat this error as non-fatal. Otherwise the error is |
1273 | * fatal and UBI will be switched to R/O mode later. |
1274 | * |
1275 | * The idea is that we try not to switch to R/O mode if the read error is |
1276 | * something which suggests there was a real read problem. E.g., %-EIO. Or a |
1277 | * memory allocation failed (-%ENOMEM). Otherwise, it is safer to switch to R/O |
1278 | * mode, simply because we do not know what happened at the MTD level, and we |
1279 | * cannot handle this. E.g., the underlying driver may have become crazy, and |
1280 | * it is safer to switch to R/O mode to preserve the data. |
1281 | * |
1282 | * And bear in mind, this is about reading from the target PEB, i.e. the PEB |
1283 | * which we have just written. |
1284 | */ |
1285 | static int is_error_sane(int err) |
1286 | { |
1287 | if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_HDR || |
1288 | err == UBI_IO_BAD_HDR_EBADMSG || err == -ETIMEDOUT) |
1289 | return 0; |
1290 | return 1; |
1291 | } |
1292 | |
1293 | /** |
1294 | * ubi_eba_copy_leb - copy logical eraseblock. |
1295 | * @ubi: UBI device description object |
1296 | * @from: physical eraseblock number from where to copy |
1297 | * @to: physical eraseblock number where to copy |
1298 | * @vidb: data structure from where the VID header is derived |
1299 | * |
1300 | * This function copies logical eraseblock from physical eraseblock @from to |
1301 | * physical eraseblock @to. The @vid_hdr buffer may be changed by this |
1302 | * function. Returns: |
1303 | * o %0 in case of success; |
1304 | * o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_TARGET_BITFLIPS, etc; |
1305 | * o a negative error code in case of failure. |
1306 | */ |
1307 | int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, |
1308 | struct ubi_vid_io_buf *vidb) |
1309 | { |
1310 | int err, vol_id, lnum, data_size, aldata_size, idx; |
1311 | struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb); |
1312 | struct ubi_volume *vol; |
1313 | uint32_t crc; |
1314 | |
1315 | ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem)); |
1316 | |
1317 | vol_id = be32_to_cpu(vid_hdr->vol_id); |
1318 | lnum = be32_to_cpu(vid_hdr->lnum); |
1319 | |
1320 | dbg_wl("copy LEB %d:%d, PEB %d to PEB %d" , vol_id, lnum, from, to); |
1321 | |
1322 | if (vid_hdr->vol_type == UBI_VID_STATIC) { |
1323 | data_size = be32_to_cpu(vid_hdr->data_size); |
1324 | aldata_size = ALIGN(data_size, ubi->min_io_size); |
1325 | } else |
1326 | data_size = aldata_size = |
1327 | ubi->leb_size - be32_to_cpu(vid_hdr->data_pad); |
1328 | |
1329 | idx = vol_id2idx(ubi, vol_id); |
1330 | spin_lock(lock: &ubi->volumes_lock); |
1331 | /* |
1332 | * Note, we may race with volume deletion, which means that the volume |
1333 | * this logical eraseblock belongs to might be being deleted. Since the |
1334 | * volume deletion un-maps all the volume's logical eraseblocks, it will |
1335 | * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish. |
1336 | */ |
1337 | vol = ubi->volumes[idx]; |
1338 | spin_unlock(lock: &ubi->volumes_lock); |
1339 | if (!vol) { |
1340 | /* No need to do further work, cancel */ |
1341 | dbg_wl("volume %d is being removed, cancel" , vol_id); |
1342 | return MOVE_CANCEL_RACE; |
1343 | } |
1344 | |
1345 | /* |
1346 | * We do not want anybody to write to this logical eraseblock while we |
1347 | * are moving it, so lock it. |
1348 | * |
1349 | * Note, we are using non-waiting locking here, because we cannot sleep |
1350 | * on the LEB, since it may cause deadlocks. Indeed, imagine a task is |
1351 | * unmapping the LEB which is mapped to the PEB we are going to move |
1352 | * (@from). This task locks the LEB and goes sleep in the |
1353 | * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are |
1354 | * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the |
1355 | * LEB is already locked, we just do not move it and return |
1356 | * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because |
1357 | * we do not know the reasons of the contention - it may be just a |
1358 | * normal I/O on this LEB, so we want to re-try. |
1359 | */ |
1360 | err = leb_write_trylock(ubi, vol_id, lnum); |
1361 | if (err) { |
1362 | dbg_wl("contention on LEB %d:%d, cancel" , vol_id, lnum); |
1363 | return MOVE_RETRY; |
1364 | } |
1365 | |
1366 | /* |
1367 | * The LEB might have been put meanwhile, and the task which put it is |
1368 | * probably waiting on @ubi->move_mutex. No need to continue the work, |
1369 | * cancel it. |
1370 | */ |
1371 | if (vol->eba_tbl->entries[lnum].pnum != from) { |
1372 | dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel" , |
1373 | vol_id, lnum, from, vol->eba_tbl->entries[lnum].pnum); |
1374 | err = MOVE_CANCEL_RACE; |
1375 | goto out_unlock_leb; |
1376 | } |
1377 | |
1378 | /* |
1379 | * OK, now the LEB is locked and we can safely start moving it. Since |
1380 | * this function utilizes the @ubi->peb_buf buffer which is shared |
1381 | * with some other functions - we lock the buffer by taking the |
1382 | * @ubi->buf_mutex. |
1383 | */ |
1384 | mutex_lock(&ubi->buf_mutex); |
1385 | dbg_wl("read %d bytes of data" , aldata_size); |
1386 | err = ubi_io_read_data(ubi, buf: ubi->peb_buf, pnum: from, offset: 0, len: aldata_size); |
1387 | if (err && err != UBI_IO_BITFLIPS) { |
1388 | ubi_warn(ubi, fmt: "error %d while reading data from PEB %d" , |
1389 | err, from); |
1390 | err = MOVE_SOURCE_RD_ERR; |
1391 | goto out_unlock_buf; |
1392 | } |
1393 | |
1394 | /* |
1395 | * Now we have got to calculate how much data we have to copy. In |
1396 | * case of a static volume it is fairly easy - the VID header contains |
1397 | * the data size. In case of a dynamic volume it is more difficult - we |
1398 | * have to read the contents, cut 0xFF bytes from the end and copy only |
1399 | * the first part. We must do this to avoid writing 0xFF bytes as it |
1400 | * may have some side-effects. And not only this. It is important not |
1401 | * to include those 0xFFs to CRC because later the they may be filled |
1402 | * by data. |
1403 | */ |
1404 | if (vid_hdr->vol_type == UBI_VID_DYNAMIC) |
1405 | aldata_size = data_size = |
1406 | ubi_calc_data_len(ubi, buf: ubi->peb_buf, length: data_size); |
1407 | |
1408 | cond_resched(); |
1409 | crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size); |
1410 | cond_resched(); |
1411 | |
1412 | /* |
1413 | * It may turn out to be that the whole @from physical eraseblock |
1414 | * contains only 0xFF bytes. Then we have to only write the VID header |
1415 | * and do not write any data. This also means we should not set |
1416 | * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc. |
1417 | */ |
1418 | if (data_size > 0) { |
1419 | vid_hdr->copy_flag = 1; |
1420 | vid_hdr->data_size = cpu_to_be32(data_size); |
1421 | vid_hdr->data_crc = cpu_to_be32(crc); |
1422 | } |
1423 | vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); |
1424 | |
1425 | err = ubi_io_write_vid_hdr(ubi, pnum: to, vidb); |
1426 | if (err) { |
1427 | if (err == -EIO) |
1428 | err = MOVE_TARGET_WR_ERR; |
1429 | goto out_unlock_buf; |
1430 | } |
1431 | |
1432 | cond_resched(); |
1433 | |
1434 | /* Read the VID header back and check if it was written correctly */ |
1435 | err = ubi_io_read_vid_hdr(ubi, pnum: to, vidb, verbose: 1); |
1436 | if (err) { |
1437 | if (err != UBI_IO_BITFLIPS) { |
1438 | ubi_warn(ubi, fmt: "error %d while reading VID header back from PEB %d" , |
1439 | err, to); |
1440 | if (is_error_sane(err)) |
1441 | err = MOVE_TARGET_RD_ERR; |
1442 | } else |
1443 | err = MOVE_TARGET_BITFLIPS; |
1444 | goto out_unlock_buf; |
1445 | } |
1446 | |
1447 | if (data_size > 0) { |
1448 | err = ubi_io_write_data(ubi, buf: ubi->peb_buf, pnum: to, offset: 0, len: aldata_size); |
1449 | if (err) { |
1450 | if (err == -EIO) |
1451 | err = MOVE_TARGET_WR_ERR; |
1452 | goto out_unlock_buf; |
1453 | } |
1454 | |
1455 | cond_resched(); |
1456 | } |
1457 | |
1458 | ubi_assert(vol->eba_tbl->entries[lnum].pnum == from); |
1459 | |
1460 | /** |
1461 | * The volumes_lock lock is needed here to prevent the expired old eba_tbl |
1462 | * being updated when the eba_tbl is copied in the ubi_resize_volume() process. |
1463 | */ |
1464 | spin_lock(lock: &ubi->volumes_lock); |
1465 | vol->eba_tbl->entries[lnum].pnum = to; |
1466 | spin_unlock(lock: &ubi->volumes_lock); |
1467 | |
1468 | out_unlock_buf: |
1469 | mutex_unlock(lock: &ubi->buf_mutex); |
1470 | out_unlock_leb: |
1471 | leb_write_unlock(ubi, vol_id, lnum); |
1472 | return err; |
1473 | } |
1474 | |
1475 | /** |
1476 | * print_rsvd_warning - warn about not having enough reserved PEBs. |
1477 | * @ubi: UBI device description object |
1478 | * @ai: UBI attach info object |
1479 | * |
1480 | * This is a helper function for 'ubi_eba_init()' which is called when UBI |
1481 | * cannot reserve enough PEBs for bad block handling. This function makes a |
1482 | * decision whether we have to print a warning or not. The algorithm is as |
1483 | * follows: |
1484 | * o if this is a new UBI image, then just print the warning |
1485 | * o if this is an UBI image which has already been used for some time, print |
1486 | * a warning only if we can reserve less than 10% of the expected amount of |
1487 | * the reserved PEB. |
1488 | * |
1489 | * The idea is that when UBI is used, PEBs become bad, and the reserved pool |
1490 | * of PEBs becomes smaller, which is normal and we do not want to scare users |
1491 | * with a warning every time they attach the MTD device. This was an issue |
1492 | * reported by real users. |
1493 | */ |
1494 | static void print_rsvd_warning(struct ubi_device *ubi, |
1495 | struct ubi_attach_info *ai) |
1496 | { |
1497 | /* |
1498 | * The 1 << 18 (256KiB) number is picked randomly, just a reasonably |
1499 | * large number to distinguish between newly flashed and used images. |
1500 | */ |
1501 | if (ai->max_sqnum > (1 << 18)) { |
1502 | int min = ubi->beb_rsvd_level / 10; |
1503 | |
1504 | if (!min) |
1505 | min = 1; |
1506 | if (ubi->beb_rsvd_pebs > min) |
1507 | return; |
1508 | } |
1509 | |
1510 | ubi_warn(ubi, fmt: "cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d" , |
1511 | ubi->beb_rsvd_pebs, ubi->beb_rsvd_level); |
1512 | if (ubi->corr_peb_count) |
1513 | ubi_warn(ubi, fmt: "%d PEBs are corrupted and not used" , |
1514 | ubi->corr_peb_count); |
1515 | } |
1516 | |
1517 | /** |
1518 | * self_check_eba - run a self check on the EBA table constructed by fastmap. |
1519 | * @ubi: UBI device description object |
1520 | * @ai_fastmap: UBI attach info object created by fastmap |
1521 | * @ai_scan: UBI attach info object created by scanning |
1522 | * |
1523 | * Returns < 0 in case of an internal error, 0 otherwise. |
1524 | * If a bad EBA table entry was found it will be printed out and |
1525 | * ubi_assert() triggers. |
1526 | */ |
1527 | int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap, |
1528 | struct ubi_attach_info *ai_scan) |
1529 | { |
1530 | int i, j, num_volumes, ret = 0; |
1531 | int **scan_eba, **fm_eba; |
1532 | struct ubi_ainf_volume *av; |
1533 | struct ubi_volume *vol; |
1534 | struct ubi_ainf_peb *aeb; |
1535 | struct rb_node *rb; |
1536 | |
1537 | num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; |
1538 | |
1539 | scan_eba = kmalloc_array(n: num_volumes, size: sizeof(*scan_eba), GFP_KERNEL); |
1540 | if (!scan_eba) |
1541 | return -ENOMEM; |
1542 | |
1543 | fm_eba = kmalloc_array(n: num_volumes, size: sizeof(*fm_eba), GFP_KERNEL); |
1544 | if (!fm_eba) { |
1545 | kfree(objp: scan_eba); |
1546 | return -ENOMEM; |
1547 | } |
1548 | |
1549 | for (i = 0; i < num_volumes; i++) { |
1550 | vol = ubi->volumes[i]; |
1551 | if (!vol) |
1552 | continue; |
1553 | |
1554 | scan_eba[i] = kmalloc_array(n: vol->reserved_pebs, |
1555 | size: sizeof(**scan_eba), |
1556 | GFP_KERNEL); |
1557 | if (!scan_eba[i]) { |
1558 | ret = -ENOMEM; |
1559 | goto out_free; |
1560 | } |
1561 | |
1562 | fm_eba[i] = kmalloc_array(n: vol->reserved_pebs, |
1563 | size: sizeof(**fm_eba), |
1564 | GFP_KERNEL); |
1565 | if (!fm_eba[i]) { |
1566 | ret = -ENOMEM; |
1567 | goto out_free; |
1568 | } |
1569 | |
1570 | for (j = 0; j < vol->reserved_pebs; j++) |
1571 | scan_eba[i][j] = fm_eba[i][j] = UBI_LEB_UNMAPPED; |
1572 | |
1573 | av = ubi_find_av(ai: ai_scan, vol_id: idx2vol_id(ubi, idx: i)); |
1574 | if (!av) |
1575 | continue; |
1576 | |
1577 | ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) |
1578 | scan_eba[i][aeb->lnum] = aeb->pnum; |
1579 | |
1580 | av = ubi_find_av(ai: ai_fastmap, vol_id: idx2vol_id(ubi, idx: i)); |
1581 | if (!av) |
1582 | continue; |
1583 | |
1584 | ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) |
1585 | fm_eba[i][aeb->lnum] = aeb->pnum; |
1586 | |
1587 | for (j = 0; j < vol->reserved_pebs; j++) { |
1588 | if (scan_eba[i][j] != fm_eba[i][j]) { |
1589 | if (scan_eba[i][j] == UBI_LEB_UNMAPPED || |
1590 | fm_eba[i][j] == UBI_LEB_UNMAPPED) |
1591 | continue; |
1592 | |
1593 | ubi_err(ubi, fmt: "LEB:%i:%i is PEB:%i instead of %i!" , |
1594 | vol->vol_id, j, fm_eba[i][j], |
1595 | scan_eba[i][j]); |
1596 | ubi_assert(0); |
1597 | } |
1598 | } |
1599 | } |
1600 | |
1601 | out_free: |
1602 | for (i = 0; i < num_volumes; i++) { |
1603 | if (!ubi->volumes[i]) |
1604 | continue; |
1605 | |
1606 | kfree(objp: scan_eba[i]); |
1607 | kfree(objp: fm_eba[i]); |
1608 | } |
1609 | |
1610 | kfree(objp: scan_eba); |
1611 | kfree(objp: fm_eba); |
1612 | return ret; |
1613 | } |
1614 | |
1615 | /** |
1616 | * ubi_eba_init - initialize the EBA sub-system using attaching information. |
1617 | * @ubi: UBI device description object |
1618 | * @ai: attaching information |
1619 | * |
1620 | * This function returns zero in case of success and a negative error code in |
1621 | * case of failure. |
1622 | */ |
1623 | int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai) |
1624 | { |
1625 | int i, err, num_volumes; |
1626 | struct ubi_ainf_volume *av; |
1627 | struct ubi_volume *vol; |
1628 | struct ubi_ainf_peb *aeb; |
1629 | struct rb_node *rb; |
1630 | |
1631 | dbg_eba("initialize EBA sub-system" ); |
1632 | |
1633 | spin_lock_init(&ubi->ltree_lock); |
1634 | mutex_init(&ubi->alc_mutex); |
1635 | ubi->ltree = RB_ROOT; |
1636 | |
1637 | ubi->global_sqnum = ai->max_sqnum + 1; |
1638 | num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; |
1639 | |
1640 | for (i = 0; i < num_volumes; i++) { |
1641 | struct ubi_eba_table *tbl; |
1642 | |
1643 | vol = ubi->volumes[i]; |
1644 | if (!vol) |
1645 | continue; |
1646 | |
1647 | cond_resched(); |
1648 | |
1649 | tbl = ubi_eba_create_table(vol, nentries: vol->reserved_pebs); |
1650 | if (IS_ERR(ptr: tbl)) { |
1651 | err = PTR_ERR(ptr: tbl); |
1652 | goto out_free; |
1653 | } |
1654 | |
1655 | ubi_eba_replace_table(vol, tbl); |
1656 | |
1657 | av = ubi_find_av(ai, vol_id: idx2vol_id(ubi, idx: i)); |
1658 | if (!av) |
1659 | continue; |
1660 | |
1661 | ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) { |
1662 | if (aeb->lnum >= vol->reserved_pebs) { |
1663 | /* |
1664 | * This may happen in case of an unclean reboot |
1665 | * during re-size. |
1666 | */ |
1667 | ubi_move_aeb_to_list(av, aeb, list: &ai->erase); |
1668 | } else { |
1669 | struct ubi_eba_entry *entry; |
1670 | |
1671 | entry = &vol->eba_tbl->entries[aeb->lnum]; |
1672 | entry->pnum = aeb->pnum; |
1673 | } |
1674 | } |
1675 | } |
1676 | |
1677 | if (ubi->avail_pebs < EBA_RESERVED_PEBS) { |
1678 | ubi_err(ubi, fmt: "no enough physical eraseblocks (%d, need %d)" , |
1679 | ubi->avail_pebs, EBA_RESERVED_PEBS); |
1680 | if (ubi->corr_peb_count) |
1681 | ubi_err(ubi, fmt: "%d PEBs are corrupted and not used" , |
1682 | ubi->corr_peb_count); |
1683 | err = -ENOSPC; |
1684 | goto out_free; |
1685 | } |
1686 | ubi->avail_pebs -= EBA_RESERVED_PEBS; |
1687 | ubi->rsvd_pebs += EBA_RESERVED_PEBS; |
1688 | |
1689 | if (ubi->bad_allowed) { |
1690 | ubi_calculate_reserved(ubi); |
1691 | |
1692 | if (ubi->avail_pebs < ubi->beb_rsvd_level) { |
1693 | /* No enough free physical eraseblocks */ |
1694 | ubi->beb_rsvd_pebs = ubi->avail_pebs; |
1695 | print_rsvd_warning(ubi, ai); |
1696 | } else |
1697 | ubi->beb_rsvd_pebs = ubi->beb_rsvd_level; |
1698 | |
1699 | ubi->avail_pebs -= ubi->beb_rsvd_pebs; |
1700 | ubi->rsvd_pebs += ubi->beb_rsvd_pebs; |
1701 | } |
1702 | |
1703 | dbg_eba("EBA sub-system is initialized" ); |
1704 | return 0; |
1705 | |
1706 | out_free: |
1707 | for (i = 0; i < num_volumes; i++) { |
1708 | if (!ubi->volumes[i]) |
1709 | continue; |
1710 | ubi_eba_replace_table(vol: ubi->volumes[i], NULL); |
1711 | } |
1712 | return err; |
1713 | } |
1714 | |