1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Direct MTD block device access |
4 | * |
5 | * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> |
6 | * Copyright © 2000-2003 Nicolas Pitre <nico@fluxnic.net> |
7 | */ |
8 | |
9 | #include <linux/fs.h> |
10 | #include <linux/init.h> |
11 | #include <linux/kernel.h> |
12 | #include <linux/module.h> |
13 | #include <linux/sched.h> |
14 | #include <linux/slab.h> |
15 | #include <linux/types.h> |
16 | #include <linux/vmalloc.h> |
17 | |
18 | #include <linux/mtd/mtd.h> |
19 | #include <linux/mtd/blktrans.h> |
20 | #include <linux/mutex.h> |
21 | #include <linux/major.h> |
22 | |
23 | |
24 | struct mtdblk_dev { |
25 | struct mtd_blktrans_dev mbd; |
26 | int count; |
27 | struct mutex cache_mutex; |
28 | unsigned char *cache_data; |
29 | unsigned long cache_offset; |
30 | unsigned int cache_size; |
31 | enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state; |
32 | }; |
33 | |
34 | /* |
35 | * Cache stuff... |
36 | * |
37 | * Since typical flash erasable sectors are much larger than what Linux's |
38 | * buffer cache can handle, we must implement read-modify-write on flash |
39 | * sectors for each block write requests. To avoid over-erasing flash sectors |
40 | * and to speed things up, we locally cache a whole flash sector while it is |
41 | * being written to until a different sector is required. |
42 | */ |
43 | |
44 | static int erase_write (struct mtd_info *mtd, unsigned long pos, |
45 | unsigned int len, const char *buf) |
46 | { |
47 | struct erase_info erase; |
48 | size_t retlen; |
49 | int ret; |
50 | |
51 | /* |
52 | * First, let's erase the flash block. |
53 | */ |
54 | erase.addr = pos; |
55 | erase.len = len; |
56 | |
57 | ret = mtd_erase(mtd, instr: &erase); |
58 | if (ret) { |
59 | printk (KERN_WARNING "mtdblock: erase of region [0x%lx, 0x%x] " |
60 | "on \"%s\" failed\n" , |
61 | pos, len, mtd->name); |
62 | return ret; |
63 | } |
64 | |
65 | /* |
66 | * Next, write the data to flash. |
67 | */ |
68 | |
69 | ret = mtd_write(mtd, to: pos, len, retlen: &retlen, buf); |
70 | if (ret) |
71 | return ret; |
72 | if (retlen != len) |
73 | return -EIO; |
74 | return 0; |
75 | } |
76 | |
77 | |
78 | static int write_cached_data (struct mtdblk_dev *mtdblk) |
79 | { |
80 | struct mtd_info *mtd = mtdblk->mbd.mtd; |
81 | int ret; |
82 | |
83 | if (mtdblk->cache_state != STATE_DIRTY) |
84 | return 0; |
85 | |
86 | pr_debug("mtdblock: writing cached data for \"%s\" " |
87 | "at 0x%lx, size 0x%x\n" , mtd->name, |
88 | mtdblk->cache_offset, mtdblk->cache_size); |
89 | |
90 | ret = erase_write (mtd, pos: mtdblk->cache_offset, |
91 | len: mtdblk->cache_size, buf: mtdblk->cache_data); |
92 | |
93 | /* |
94 | * Here we could arguably set the cache state to STATE_CLEAN. |
95 | * However this could lead to inconsistency since we will not |
96 | * be notified if this content is altered on the flash by other |
97 | * means. Let's declare it empty and leave buffering tasks to |
98 | * the buffer cache instead. |
99 | * |
100 | * If this cache_offset points to a bad block, data cannot be |
101 | * written to the device. Clear cache_state to avoid writing to |
102 | * bad blocks repeatedly. |
103 | */ |
104 | if (ret == 0 || ret == -EIO) |
105 | mtdblk->cache_state = STATE_EMPTY; |
106 | return ret; |
107 | } |
108 | |
109 | |
110 | static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos, |
111 | int len, const char *buf) |
112 | { |
113 | struct mtd_info *mtd = mtdblk->mbd.mtd; |
114 | unsigned int sect_size = mtdblk->cache_size; |
115 | size_t retlen; |
116 | int ret; |
117 | |
118 | pr_debug("mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n" , |
119 | mtd->name, pos, len); |
120 | |
121 | if (!sect_size) |
122 | return mtd_write(mtd, to: pos, len, retlen: &retlen, buf); |
123 | |
124 | while (len > 0) { |
125 | unsigned long sect_start = (pos/sect_size)*sect_size; |
126 | unsigned int offset = pos - sect_start; |
127 | unsigned int size = sect_size - offset; |
128 | if( size > len ) |
129 | size = len; |
130 | |
131 | if (size == sect_size) { |
132 | /* |
133 | * We are covering a whole sector. Thus there is no |
134 | * need to bother with the cache while it may still be |
135 | * useful for other partial writes. |
136 | */ |
137 | ret = erase_write (mtd, pos, len: size, buf); |
138 | if (ret) |
139 | return ret; |
140 | } else { |
141 | /* Partial sector: need to use the cache */ |
142 | |
143 | if (mtdblk->cache_state == STATE_DIRTY && |
144 | mtdblk->cache_offset != sect_start) { |
145 | ret = write_cached_data(mtdblk); |
146 | if (ret) |
147 | return ret; |
148 | } |
149 | |
150 | if (mtdblk->cache_state == STATE_EMPTY || |
151 | mtdblk->cache_offset != sect_start) { |
152 | /* fill the cache with the current sector */ |
153 | mtdblk->cache_state = STATE_EMPTY; |
154 | ret = mtd_read(mtd, from: sect_start, len: sect_size, |
155 | retlen: &retlen, buf: mtdblk->cache_data); |
156 | if (ret && !mtd_is_bitflip(err: ret)) |
157 | return ret; |
158 | if (retlen != sect_size) |
159 | return -EIO; |
160 | |
161 | mtdblk->cache_offset = sect_start; |
162 | mtdblk->cache_size = sect_size; |
163 | mtdblk->cache_state = STATE_CLEAN; |
164 | } |
165 | |
166 | /* write data to our local cache */ |
167 | memcpy (mtdblk->cache_data + offset, buf, size); |
168 | mtdblk->cache_state = STATE_DIRTY; |
169 | } |
170 | |
171 | buf += size; |
172 | pos += size; |
173 | len -= size; |
174 | } |
175 | |
176 | return 0; |
177 | } |
178 | |
179 | |
180 | static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos, |
181 | int len, char *buf) |
182 | { |
183 | struct mtd_info *mtd = mtdblk->mbd.mtd; |
184 | unsigned int sect_size = mtdblk->cache_size; |
185 | size_t retlen; |
186 | int ret; |
187 | |
188 | pr_debug("mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n" , |
189 | mtd->name, pos, len); |
190 | |
191 | if (!sect_size) { |
192 | ret = mtd_read(mtd, from: pos, len, retlen: &retlen, buf); |
193 | if (ret && !mtd_is_bitflip(err: ret)) |
194 | return ret; |
195 | return 0; |
196 | } |
197 | |
198 | while (len > 0) { |
199 | unsigned long sect_start = (pos/sect_size)*sect_size; |
200 | unsigned int offset = pos - sect_start; |
201 | unsigned int size = sect_size - offset; |
202 | if (size > len) |
203 | size = len; |
204 | |
205 | /* |
206 | * Check if the requested data is already cached |
207 | * Read the requested amount of data from our internal cache if it |
208 | * contains what we want, otherwise we read the data directly |
209 | * from flash. |
210 | */ |
211 | if (mtdblk->cache_state != STATE_EMPTY && |
212 | mtdblk->cache_offset == sect_start) { |
213 | memcpy (buf, mtdblk->cache_data + offset, size); |
214 | } else { |
215 | ret = mtd_read(mtd, from: pos, len: size, retlen: &retlen, buf); |
216 | if (ret && !mtd_is_bitflip(err: ret)) |
217 | return ret; |
218 | if (retlen != size) |
219 | return -EIO; |
220 | } |
221 | |
222 | buf += size; |
223 | pos += size; |
224 | len -= size; |
225 | } |
226 | |
227 | return 0; |
228 | } |
229 | |
230 | static int mtdblock_readsect(struct mtd_blktrans_dev *dev, |
231 | unsigned long block, char *buf) |
232 | { |
233 | struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd); |
234 | return do_cached_read(mtdblk, pos: block<<9, len: 512, buf); |
235 | } |
236 | |
237 | static int mtdblock_writesect(struct mtd_blktrans_dev *dev, |
238 | unsigned long block, char *buf) |
239 | { |
240 | struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd); |
241 | if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) { |
242 | mtdblk->cache_data = vmalloc(size: mtdblk->mbd.mtd->erasesize); |
243 | if (!mtdblk->cache_data) |
244 | return -EINTR; |
245 | /* -EINTR is not really correct, but it is the best match |
246 | * documented in man 2 write for all cases. We could also |
247 | * return -EAGAIN sometimes, but why bother? |
248 | */ |
249 | } |
250 | return do_cached_write(mtdblk, pos: block<<9, len: 512, buf); |
251 | } |
252 | |
253 | static int mtdblock_open(struct mtd_blktrans_dev *mbd) |
254 | { |
255 | struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd); |
256 | |
257 | pr_debug("mtdblock_open\n" ); |
258 | |
259 | if (mtdblk->count) { |
260 | mtdblk->count++; |
261 | return 0; |
262 | } |
263 | |
264 | if (mtd_type_is_nand(mtd: mbd->mtd)) |
265 | pr_warn_ratelimited("%s: MTD device '%s' is NAND, please consider using UBI block devices instead.\n" , |
266 | mbd->tr->name, mbd->mtd->name); |
267 | |
268 | /* OK, it's not open. Create cache info for it */ |
269 | mtdblk->count = 1; |
270 | mutex_init(&mtdblk->cache_mutex); |
271 | mtdblk->cache_state = STATE_EMPTY; |
272 | if (!(mbd->mtd->flags & MTD_NO_ERASE) && mbd->mtd->erasesize) { |
273 | mtdblk->cache_size = mbd->mtd->erasesize; |
274 | mtdblk->cache_data = NULL; |
275 | } |
276 | |
277 | pr_debug("ok\n" ); |
278 | |
279 | return 0; |
280 | } |
281 | |
282 | static void mtdblock_release(struct mtd_blktrans_dev *mbd) |
283 | { |
284 | struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd); |
285 | |
286 | pr_debug("mtdblock_release\n" ); |
287 | |
288 | mutex_lock(&mtdblk->cache_mutex); |
289 | write_cached_data(mtdblk); |
290 | mutex_unlock(lock: &mtdblk->cache_mutex); |
291 | |
292 | if (!--mtdblk->count) { |
293 | /* |
294 | * It was the last usage. Free the cache, but only sync if |
295 | * opened for writing. |
296 | */ |
297 | if (mbd->writable) |
298 | mtd_sync(mtd: mbd->mtd); |
299 | vfree(addr: mtdblk->cache_data); |
300 | } |
301 | |
302 | pr_debug("ok\n" ); |
303 | } |
304 | |
305 | static int mtdblock_flush(struct mtd_blktrans_dev *dev) |
306 | { |
307 | struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd); |
308 | int ret; |
309 | |
310 | mutex_lock(&mtdblk->cache_mutex); |
311 | ret = write_cached_data(mtdblk); |
312 | mutex_unlock(lock: &mtdblk->cache_mutex); |
313 | mtd_sync(mtd: dev->mtd); |
314 | return ret; |
315 | } |
316 | |
317 | static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) |
318 | { |
319 | struct mtdblk_dev *dev = kzalloc(size: sizeof(*dev), GFP_KERNEL); |
320 | |
321 | if (!dev) |
322 | return; |
323 | |
324 | dev->mbd.mtd = mtd; |
325 | dev->mbd.devnum = mtd->index; |
326 | |
327 | dev->mbd.size = mtd->size >> 9; |
328 | dev->mbd.tr = tr; |
329 | |
330 | if (!(mtd->flags & MTD_WRITEABLE)) |
331 | dev->mbd.readonly = 1; |
332 | |
333 | if (add_mtd_blktrans_dev(dev: &dev->mbd)) |
334 | kfree(objp: dev); |
335 | } |
336 | |
337 | static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev) |
338 | { |
339 | del_mtd_blktrans_dev(dev); |
340 | } |
341 | |
342 | static struct mtd_blktrans_ops mtdblock_tr = { |
343 | .name = "mtdblock" , |
344 | .major = MTD_BLOCK_MAJOR, |
345 | .part_bits = 0, |
346 | .blksize = 512, |
347 | .open = mtdblock_open, |
348 | .flush = mtdblock_flush, |
349 | .release = mtdblock_release, |
350 | .readsect = mtdblock_readsect, |
351 | .writesect = mtdblock_writesect, |
352 | .add_mtd = mtdblock_add_mtd, |
353 | .remove_dev = mtdblock_remove_dev, |
354 | .owner = THIS_MODULE, |
355 | }; |
356 | |
357 | module_mtd_blktrans(mtdblock_tr); |
358 | |
359 | MODULE_LICENSE("GPL" ); |
360 | MODULE_AUTHOR("Nicolas Pitre <nico@fluxnic.net> et al." ); |
361 | MODULE_DESCRIPTION("Caching read/erase/writeback block device emulation access to MTD devices" ); |
362 | |