1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Block Translation Table |
4 | * Copyright (c) 2014-2015, Intel Corporation. |
5 | */ |
6 | #include <linux/highmem.h> |
7 | #include <linux/debugfs.h> |
8 | #include <linux/blkdev.h> |
9 | #include <linux/pagemap.h> |
10 | #include <linux/module.h> |
11 | #include <linux/device.h> |
12 | #include <linux/mutex.h> |
13 | #include <linux/hdreg.h> |
14 | #include <linux/sizes.h> |
15 | #include <linux/ndctl.h> |
16 | #include <linux/fs.h> |
17 | #include <linux/nd.h> |
18 | #include <linux/backing-dev.h> |
19 | #include "btt.h" |
20 | #include "nd.h" |
21 | |
22 | enum log_ent_request { |
23 | LOG_NEW_ENT = 0, |
24 | LOG_OLD_ENT |
25 | }; |
26 | |
27 | static struct device *to_dev(struct arena_info *arena) |
28 | { |
29 | return &arena->nd_btt->dev; |
30 | } |
31 | |
32 | static u64 adjust_initial_offset(struct nd_btt *nd_btt, u64 offset) |
33 | { |
34 | return offset + nd_btt->initial_offset; |
35 | } |
36 | |
37 | static int arena_read_bytes(struct arena_info *arena, resource_size_t offset, |
38 | void *buf, size_t n, unsigned long flags) |
39 | { |
40 | struct nd_btt *nd_btt = arena->nd_btt; |
41 | struct nd_namespace_common *ndns = nd_btt->ndns; |
42 | |
43 | /* arena offsets may be shifted from the base of the device */ |
44 | offset = adjust_initial_offset(nd_btt, offset); |
45 | return nvdimm_read_bytes(ndns, offset, buf, size: n, flags); |
46 | } |
47 | |
48 | static int arena_write_bytes(struct arena_info *arena, resource_size_t offset, |
49 | void *buf, size_t n, unsigned long flags) |
50 | { |
51 | struct nd_btt *nd_btt = arena->nd_btt; |
52 | struct nd_namespace_common *ndns = nd_btt->ndns; |
53 | |
54 | /* arena offsets may be shifted from the base of the device */ |
55 | offset = adjust_initial_offset(nd_btt, offset); |
56 | return nvdimm_write_bytes(ndns, offset, buf, size: n, flags); |
57 | } |
58 | |
59 | static int btt_info_write(struct arena_info *arena, struct btt_sb *super) |
60 | { |
61 | int ret; |
62 | |
63 | /* |
64 | * infooff and info2off should always be at least 512B aligned. |
65 | * We rely on that to make sure rw_bytes does error clearing |
66 | * correctly, so make sure that is the case. |
67 | */ |
68 | dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->infooff, 512), |
69 | "arena->infooff: %#llx is unaligned\n" , arena->infooff); |
70 | dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->info2off, 512), |
71 | "arena->info2off: %#llx is unaligned\n" , arena->info2off); |
72 | |
73 | ret = arena_write_bytes(arena, offset: arena->info2off, buf: super, |
74 | n: sizeof(struct btt_sb), flags: 0); |
75 | if (ret) |
76 | return ret; |
77 | |
78 | return arena_write_bytes(arena, offset: arena->infooff, buf: super, |
79 | n: sizeof(struct btt_sb), flags: 0); |
80 | } |
81 | |
82 | static int btt_info_read(struct arena_info *arena, struct btt_sb *super) |
83 | { |
84 | return arena_read_bytes(arena, offset: arena->infooff, buf: super, |
85 | n: sizeof(struct btt_sb), flags: 0); |
86 | } |
87 | |
88 | /* |
89 | * 'raw' version of btt_map write |
90 | * Assumptions: |
91 | * mapping is in little-endian |
92 | * mapping contains 'E' and 'Z' flags as desired |
93 | */ |
94 | static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping, |
95 | unsigned long flags) |
96 | { |
97 | u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE); |
98 | |
99 | if (unlikely(lba >= arena->external_nlba)) |
100 | dev_err_ratelimited(to_dev(arena), |
101 | "%s: lba %#x out of range (max: %#x)\n" , |
102 | __func__, lba, arena->external_nlba); |
103 | return arena_write_bytes(arena, offset: ns_off, buf: &mapping, MAP_ENT_SIZE, flags); |
104 | } |
105 | |
106 | static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping, |
107 | u32 z_flag, u32 e_flag, unsigned long rwb_flags) |
108 | { |
109 | u32 ze; |
110 | __le32 mapping_le; |
111 | |
112 | /* |
113 | * This 'mapping' is supposed to be just the LBA mapping, without |
114 | * any flags set, so strip the flag bits. |
115 | */ |
116 | mapping = ent_lba(mapping); |
117 | |
118 | ze = (z_flag << 1) + e_flag; |
119 | switch (ze) { |
120 | case 0: |
121 | /* |
122 | * We want to set neither of the Z or E flags, and |
123 | * in the actual layout, this means setting the bit |
124 | * positions of both to '1' to indicate a 'normal' |
125 | * map entry |
126 | */ |
127 | mapping |= MAP_ENT_NORMAL; |
128 | break; |
129 | case 1: |
130 | mapping |= (1 << MAP_ERR_SHIFT); |
131 | break; |
132 | case 2: |
133 | mapping |= (1 << MAP_TRIM_SHIFT); |
134 | break; |
135 | default: |
136 | /* |
137 | * The case where Z and E are both sent in as '1' could be |
138 | * construed as a valid 'normal' case, but we decide not to, |
139 | * to avoid confusion |
140 | */ |
141 | dev_err_ratelimited(to_dev(arena), |
142 | "Invalid use of Z and E flags\n" ); |
143 | return -EIO; |
144 | } |
145 | |
146 | mapping_le = cpu_to_le32(mapping); |
147 | return __btt_map_write(arena, lba, mapping: mapping_le, flags: rwb_flags); |
148 | } |
149 | |
150 | static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping, |
151 | int *trim, int *error, unsigned long rwb_flags) |
152 | { |
153 | int ret; |
154 | __le32 in; |
155 | u32 raw_mapping, postmap, ze, z_flag, e_flag; |
156 | u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE); |
157 | |
158 | if (unlikely(lba >= arena->external_nlba)) |
159 | dev_err_ratelimited(to_dev(arena), |
160 | "%s: lba %#x out of range (max: %#x)\n" , |
161 | __func__, lba, arena->external_nlba); |
162 | |
163 | ret = arena_read_bytes(arena, offset: ns_off, buf: &in, MAP_ENT_SIZE, flags: rwb_flags); |
164 | if (ret) |
165 | return ret; |
166 | |
167 | raw_mapping = le32_to_cpu(in); |
168 | |
169 | z_flag = ent_z_flag(raw_mapping); |
170 | e_flag = ent_e_flag(raw_mapping); |
171 | ze = (z_flag << 1) + e_flag; |
172 | postmap = ent_lba(raw_mapping); |
173 | |
174 | /* Reuse the {z,e}_flag variables for *trim and *error */ |
175 | z_flag = 0; |
176 | e_flag = 0; |
177 | |
178 | switch (ze) { |
179 | case 0: |
180 | /* Initial state. Return postmap = premap */ |
181 | *mapping = lba; |
182 | break; |
183 | case 1: |
184 | *mapping = postmap; |
185 | e_flag = 1; |
186 | break; |
187 | case 2: |
188 | *mapping = postmap; |
189 | z_flag = 1; |
190 | break; |
191 | case 3: |
192 | *mapping = postmap; |
193 | break; |
194 | default: |
195 | return -EIO; |
196 | } |
197 | |
198 | if (trim) |
199 | *trim = z_flag; |
200 | if (error) |
201 | *error = e_flag; |
202 | |
203 | return ret; |
204 | } |
205 | |
206 | static int btt_log_group_read(struct arena_info *arena, u32 lane, |
207 | struct log_group *log) |
208 | { |
209 | return arena_read_bytes(arena, |
210 | offset: arena->logoff + (lane * LOG_GRP_SIZE), buf: log, |
211 | LOG_GRP_SIZE, flags: 0); |
212 | } |
213 | |
214 | static struct dentry *debugfs_root; |
215 | |
216 | static void arena_debugfs_init(struct arena_info *a, struct dentry *parent, |
217 | int idx) |
218 | { |
219 | char dirname[32]; |
220 | struct dentry *d; |
221 | |
222 | /* If for some reason, parent bttN was not created, exit */ |
223 | if (!parent) |
224 | return; |
225 | |
226 | snprintf(buf: dirname, size: 32, fmt: "arena%d" , idx); |
227 | d = debugfs_create_dir(name: dirname, parent); |
228 | if (IS_ERR_OR_NULL(ptr: d)) |
229 | return; |
230 | a->debugfs_dir = d; |
231 | |
232 | debugfs_create_x64(name: "size" , S_IRUGO, parent: d, value: &a->size); |
233 | debugfs_create_x64(name: "external_lba_start" , S_IRUGO, parent: d, |
234 | value: &a->external_lba_start); |
235 | debugfs_create_x32(name: "internal_nlba" , S_IRUGO, parent: d, value: &a->internal_nlba); |
236 | debugfs_create_u32(name: "internal_lbasize" , S_IRUGO, parent: d, |
237 | value: &a->internal_lbasize); |
238 | debugfs_create_x32(name: "external_nlba" , S_IRUGO, parent: d, value: &a->external_nlba); |
239 | debugfs_create_u32(name: "external_lbasize" , S_IRUGO, parent: d, |
240 | value: &a->external_lbasize); |
241 | debugfs_create_u32(name: "nfree" , S_IRUGO, parent: d, value: &a->nfree); |
242 | debugfs_create_u16(name: "version_major" , S_IRUGO, parent: d, value: &a->version_major); |
243 | debugfs_create_u16(name: "version_minor" , S_IRUGO, parent: d, value: &a->version_minor); |
244 | debugfs_create_x64(name: "nextoff" , S_IRUGO, parent: d, value: &a->nextoff); |
245 | debugfs_create_x64(name: "infooff" , S_IRUGO, parent: d, value: &a->infooff); |
246 | debugfs_create_x64(name: "dataoff" , S_IRUGO, parent: d, value: &a->dataoff); |
247 | debugfs_create_x64(name: "mapoff" , S_IRUGO, parent: d, value: &a->mapoff); |
248 | debugfs_create_x64(name: "logoff" , S_IRUGO, parent: d, value: &a->logoff); |
249 | debugfs_create_x64(name: "info2off" , S_IRUGO, parent: d, value: &a->info2off); |
250 | debugfs_create_x32(name: "flags" , S_IRUGO, parent: d, value: &a->flags); |
251 | debugfs_create_u32(name: "log_index_0" , S_IRUGO, parent: d, value: &a->log_index[0]); |
252 | debugfs_create_u32(name: "log_index_1" , S_IRUGO, parent: d, value: &a->log_index[1]); |
253 | } |
254 | |
255 | static void btt_debugfs_init(struct btt *btt) |
256 | { |
257 | int i = 0; |
258 | struct arena_info *arena; |
259 | |
260 | btt->debugfs_dir = debugfs_create_dir(name: dev_name(dev: &btt->nd_btt->dev), |
261 | parent: debugfs_root); |
262 | if (IS_ERR_OR_NULL(ptr: btt->debugfs_dir)) |
263 | return; |
264 | |
265 | list_for_each_entry(arena, &btt->arena_list, list) { |
266 | arena_debugfs_init(a: arena, parent: btt->debugfs_dir, idx: i); |
267 | i++; |
268 | } |
269 | } |
270 | |
271 | static u32 log_seq(struct log_group *log, int log_idx) |
272 | { |
273 | return le32_to_cpu(log->ent[log_idx].seq); |
274 | } |
275 | |
276 | /* |
277 | * This function accepts two log entries, and uses the |
278 | * sequence number to find the 'older' entry. |
279 | * It also updates the sequence number in this old entry to |
280 | * make it the 'new' one if the mark_flag is set. |
281 | * Finally, it returns which of the entries was the older one. |
282 | * |
283 | * TODO The logic feels a bit kludge-y. make it better.. |
284 | */ |
285 | static int btt_log_get_old(struct arena_info *a, struct log_group *log) |
286 | { |
287 | int idx0 = a->log_index[0]; |
288 | int idx1 = a->log_index[1]; |
289 | int old; |
290 | |
291 | /* |
292 | * the first ever time this is seen, the entry goes into [0] |
293 | * the next time, the following logic works out to put this |
294 | * (next) entry into [1] |
295 | */ |
296 | if (log_seq(log, log_idx: idx0) == 0) { |
297 | log->ent[idx0].seq = cpu_to_le32(1); |
298 | return 0; |
299 | } |
300 | |
301 | if (log_seq(log, log_idx: idx0) == log_seq(log, log_idx: idx1)) |
302 | return -EINVAL; |
303 | if (log_seq(log, log_idx: idx0) + log_seq(log, log_idx: idx1) > 5) |
304 | return -EINVAL; |
305 | |
306 | if (log_seq(log, log_idx: idx0) < log_seq(log, log_idx: idx1)) { |
307 | if ((log_seq(log, log_idx: idx1) - log_seq(log, log_idx: idx0)) == 1) |
308 | old = 0; |
309 | else |
310 | old = 1; |
311 | } else { |
312 | if ((log_seq(log, log_idx: idx0) - log_seq(log, log_idx: idx1)) == 1) |
313 | old = 1; |
314 | else |
315 | old = 0; |
316 | } |
317 | |
318 | return old; |
319 | } |
320 | |
321 | /* |
322 | * This function copies the desired (old/new) log entry into ent if |
323 | * it is not NULL. It returns the sub-slot number (0 or 1) |
324 | * where the desired log entry was found. Negative return values |
325 | * indicate errors. |
326 | */ |
327 | static int btt_log_read(struct arena_info *arena, u32 lane, |
328 | struct log_entry *ent, int old_flag) |
329 | { |
330 | int ret; |
331 | int old_ent, ret_ent; |
332 | struct log_group log; |
333 | |
334 | ret = btt_log_group_read(arena, lane, log: &log); |
335 | if (ret) |
336 | return -EIO; |
337 | |
338 | old_ent = btt_log_get_old(a: arena, log: &log); |
339 | if (old_ent < 0 || old_ent > 1) { |
340 | dev_err(to_dev(arena), |
341 | "log corruption (%d): lane %d seq [%d, %d]\n" , |
342 | old_ent, lane, log.ent[arena->log_index[0]].seq, |
343 | log.ent[arena->log_index[1]].seq); |
344 | /* TODO set error state? */ |
345 | return -EIO; |
346 | } |
347 | |
348 | ret_ent = (old_flag ? old_ent : (1 - old_ent)); |
349 | |
350 | if (ent != NULL) |
351 | memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE); |
352 | |
353 | return ret_ent; |
354 | } |
355 | |
356 | /* |
357 | * This function commits a log entry to media |
358 | * It does _not_ prepare the freelist entry for the next write |
359 | * btt_flog_write is the wrapper for updating the freelist elements |
360 | */ |
361 | static int __btt_log_write(struct arena_info *arena, u32 lane, |
362 | u32 sub, struct log_entry *ent, unsigned long flags) |
363 | { |
364 | int ret; |
365 | u32 group_slot = arena->log_index[sub]; |
366 | unsigned int log_half = LOG_ENT_SIZE / 2; |
367 | void *src = ent; |
368 | u64 ns_off; |
369 | |
370 | ns_off = arena->logoff + (lane * LOG_GRP_SIZE) + |
371 | (group_slot * LOG_ENT_SIZE); |
372 | /* split the 16B write into atomic, durable halves */ |
373 | ret = arena_write_bytes(arena, offset: ns_off, buf: src, n: log_half, flags); |
374 | if (ret) |
375 | return ret; |
376 | |
377 | ns_off += log_half; |
378 | src += log_half; |
379 | return arena_write_bytes(arena, offset: ns_off, buf: src, n: log_half, flags); |
380 | } |
381 | |
382 | static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub, |
383 | struct log_entry *ent) |
384 | { |
385 | int ret; |
386 | |
387 | ret = __btt_log_write(arena, lane, sub, ent, flags: NVDIMM_IO_ATOMIC); |
388 | if (ret) |
389 | return ret; |
390 | |
391 | /* prepare the next free entry */ |
392 | arena->freelist[lane].sub = 1 - arena->freelist[lane].sub; |
393 | if (++(arena->freelist[lane].seq) == 4) |
394 | arena->freelist[lane].seq = 1; |
395 | if (ent_e_flag(le32_to_cpu(ent->old_map))) |
396 | arena->freelist[lane].has_err = 1; |
397 | arena->freelist[lane].block = ent_lba(le32_to_cpu(ent->old_map)); |
398 | |
399 | return ret; |
400 | } |
401 | |
402 | /* |
403 | * This function initializes the BTT map to the initial state, which is |
404 | * all-zeroes, and indicates an identity mapping |
405 | */ |
406 | static int btt_map_init(struct arena_info *arena) |
407 | { |
408 | int ret = -EINVAL; |
409 | void *zerobuf; |
410 | size_t offset = 0; |
411 | size_t chunk_size = SZ_2M; |
412 | size_t mapsize = arena->logoff - arena->mapoff; |
413 | |
414 | zerobuf = kzalloc(size: chunk_size, GFP_KERNEL); |
415 | if (!zerobuf) |
416 | return -ENOMEM; |
417 | |
418 | /* |
419 | * mapoff should always be at least 512B aligned. We rely on that to |
420 | * make sure rw_bytes does error clearing correctly, so make sure that |
421 | * is the case. |
422 | */ |
423 | dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->mapoff, 512), |
424 | "arena->mapoff: %#llx is unaligned\n" , arena->mapoff); |
425 | |
426 | while (mapsize) { |
427 | size_t size = min(mapsize, chunk_size); |
428 | |
429 | dev_WARN_ONCE(to_dev(arena), size < 512, |
430 | "chunk size: %#zx is unaligned\n" , size); |
431 | ret = arena_write_bytes(arena, offset: arena->mapoff + offset, buf: zerobuf, |
432 | n: size, flags: 0); |
433 | if (ret) |
434 | goto free; |
435 | |
436 | offset += size; |
437 | mapsize -= size; |
438 | cond_resched(); |
439 | } |
440 | |
441 | free: |
442 | kfree(objp: zerobuf); |
443 | return ret; |
444 | } |
445 | |
446 | /* |
447 | * This function initializes the BTT log with 'fake' entries pointing |
448 | * to the initial reserved set of blocks as being free |
449 | */ |
450 | static int btt_log_init(struct arena_info *arena) |
451 | { |
452 | size_t logsize = arena->info2off - arena->logoff; |
453 | size_t chunk_size = SZ_4K, offset = 0; |
454 | struct log_entry ent; |
455 | void *zerobuf; |
456 | int ret; |
457 | u32 i; |
458 | |
459 | zerobuf = kzalloc(size: chunk_size, GFP_KERNEL); |
460 | if (!zerobuf) |
461 | return -ENOMEM; |
462 | /* |
463 | * logoff should always be at least 512B aligned. We rely on that to |
464 | * make sure rw_bytes does error clearing correctly, so make sure that |
465 | * is the case. |
466 | */ |
467 | dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->logoff, 512), |
468 | "arena->logoff: %#llx is unaligned\n" , arena->logoff); |
469 | |
470 | while (logsize) { |
471 | size_t size = min(logsize, chunk_size); |
472 | |
473 | dev_WARN_ONCE(to_dev(arena), size < 512, |
474 | "chunk size: %#zx is unaligned\n" , size); |
475 | ret = arena_write_bytes(arena, offset: arena->logoff + offset, buf: zerobuf, |
476 | n: size, flags: 0); |
477 | if (ret) |
478 | goto free; |
479 | |
480 | offset += size; |
481 | logsize -= size; |
482 | cond_resched(); |
483 | } |
484 | |
485 | for (i = 0; i < arena->nfree; i++) { |
486 | ent.lba = cpu_to_le32(i); |
487 | ent.old_map = cpu_to_le32(arena->external_nlba + i); |
488 | ent.new_map = cpu_to_le32(arena->external_nlba + i); |
489 | ent.seq = cpu_to_le32(LOG_SEQ_INIT); |
490 | ret = __btt_log_write(arena, lane: i, sub: 0, ent: &ent, flags: 0); |
491 | if (ret) |
492 | goto free; |
493 | } |
494 | |
495 | free: |
496 | kfree(objp: zerobuf); |
497 | return ret; |
498 | } |
499 | |
500 | static u64 to_namespace_offset(struct arena_info *arena, u64 lba) |
501 | { |
502 | return arena->dataoff + ((u64)lba * arena->internal_lbasize); |
503 | } |
504 | |
505 | static int arena_clear_freelist_error(struct arena_info *arena, u32 lane) |
506 | { |
507 | int ret = 0; |
508 | |
509 | if (arena->freelist[lane].has_err) { |
510 | void *zero_page = page_address(ZERO_PAGE(0)); |
511 | u32 lba = arena->freelist[lane].block; |
512 | u64 nsoff = to_namespace_offset(arena, lba); |
513 | unsigned long len = arena->sector_size; |
514 | |
515 | mutex_lock(&arena->err_lock); |
516 | |
517 | while (len) { |
518 | unsigned long chunk = min(len, PAGE_SIZE); |
519 | |
520 | ret = arena_write_bytes(arena, offset: nsoff, buf: zero_page, |
521 | n: chunk, flags: 0); |
522 | if (ret) |
523 | break; |
524 | len -= chunk; |
525 | nsoff += chunk; |
526 | if (len == 0) |
527 | arena->freelist[lane].has_err = 0; |
528 | } |
529 | mutex_unlock(lock: &arena->err_lock); |
530 | } |
531 | return ret; |
532 | } |
533 | |
534 | static int btt_freelist_init(struct arena_info *arena) |
535 | { |
536 | int new, ret; |
537 | struct log_entry log_new; |
538 | u32 i, map_entry, log_oldmap, log_newmap; |
539 | |
540 | arena->freelist = kcalloc(n: arena->nfree, size: sizeof(struct free_entry), |
541 | GFP_KERNEL); |
542 | if (!arena->freelist) |
543 | return -ENOMEM; |
544 | |
545 | for (i = 0; i < arena->nfree; i++) { |
546 | new = btt_log_read(arena, lane: i, ent: &log_new, old_flag: LOG_NEW_ENT); |
547 | if (new < 0) |
548 | return new; |
549 | |
550 | /* old and new map entries with any flags stripped out */ |
551 | log_oldmap = ent_lba(le32_to_cpu(log_new.old_map)); |
552 | log_newmap = ent_lba(le32_to_cpu(log_new.new_map)); |
553 | |
554 | /* sub points to the next one to be overwritten */ |
555 | arena->freelist[i].sub = 1 - new; |
556 | arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq)); |
557 | arena->freelist[i].block = log_oldmap; |
558 | |
559 | /* |
560 | * FIXME: if error clearing fails during init, we want to make |
561 | * the BTT read-only |
562 | */ |
563 | if (ent_e_flag(le32_to_cpu(log_new.old_map)) && |
564 | !ent_normal(le32_to_cpu(log_new.old_map))) { |
565 | arena->freelist[i].has_err = 1; |
566 | ret = arena_clear_freelist_error(arena, lane: i); |
567 | if (ret) |
568 | dev_err_ratelimited(to_dev(arena), |
569 | "Unable to clear known errors\n" ); |
570 | } |
571 | |
572 | /* This implies a newly created or untouched flog entry */ |
573 | if (log_oldmap == log_newmap) |
574 | continue; |
575 | |
576 | /* Check if map recovery is needed */ |
577 | ret = btt_map_read(arena, le32_to_cpu(log_new.lba), mapping: &map_entry, |
578 | NULL, NULL, rwb_flags: 0); |
579 | if (ret) |
580 | return ret; |
581 | |
582 | /* |
583 | * The map_entry from btt_read_map is stripped of any flag bits, |
584 | * so use the stripped out versions from the log as well for |
585 | * testing whether recovery is needed. For restoration, use the |
586 | * 'raw' version of the log entries as that captured what we |
587 | * were going to write originally. |
588 | */ |
589 | if ((log_newmap != map_entry) && (log_oldmap == map_entry)) { |
590 | /* |
591 | * Last transaction wrote the flog, but wasn't able |
592 | * to complete the map write. So fix up the map. |
593 | */ |
594 | ret = btt_map_write(arena, le32_to_cpu(log_new.lba), |
595 | le32_to_cpu(log_new.new_map), z_flag: 0, e_flag: 0, rwb_flags: 0); |
596 | if (ret) |
597 | return ret; |
598 | } |
599 | } |
600 | |
601 | return 0; |
602 | } |
603 | |
604 | static bool ent_is_padding(struct log_entry *ent) |
605 | { |
606 | return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0) |
607 | && (ent->seq == 0); |
608 | } |
609 | |
610 | /* |
611 | * Detecting valid log indices: We read a log group (see the comments in btt.h |
612 | * for a description of a 'log_group' and its 'slots'), and iterate over its |
613 | * four slots. We expect that a padding slot will be all-zeroes, and use this |
614 | * to detect a padding slot vs. an actual entry. |
615 | * |
616 | * If a log_group is in the initial state, i.e. hasn't been used since the |
617 | * creation of this BTT layout, it will have three of the four slots with |
618 | * zeroes. We skip over these log_groups for the detection of log_index. If |
619 | * all log_groups are in the initial state (i.e. the BTT has never been |
620 | * written to), it is safe to assume the 'new format' of log entries in slots |
621 | * (0, 1). |
622 | */ |
623 | static int log_set_indices(struct arena_info *arena) |
624 | { |
625 | bool idx_set = false, initial_state = true; |
626 | int ret, log_index[2] = {-1, -1}; |
627 | u32 i, j, next_idx = 0; |
628 | struct log_group log; |
629 | u32 pad_count = 0; |
630 | |
631 | for (i = 0; i < arena->nfree; i++) { |
632 | ret = btt_log_group_read(arena, lane: i, log: &log); |
633 | if (ret < 0) |
634 | return ret; |
635 | |
636 | for (j = 0; j < 4; j++) { |
637 | if (!idx_set) { |
638 | if (ent_is_padding(ent: &log.ent[j])) { |
639 | pad_count++; |
640 | continue; |
641 | } else { |
642 | /* Skip if index has been recorded */ |
643 | if ((next_idx == 1) && |
644 | (j == log_index[0])) |
645 | continue; |
646 | /* valid entry, record index */ |
647 | log_index[next_idx] = j; |
648 | next_idx++; |
649 | } |
650 | if (next_idx == 2) { |
651 | /* two valid entries found */ |
652 | idx_set = true; |
653 | } else if (next_idx > 2) { |
654 | /* too many valid indices */ |
655 | return -ENXIO; |
656 | } |
657 | } else { |
658 | /* |
659 | * once the indices have been set, just verify |
660 | * that all subsequent log groups are either in |
661 | * their initial state or follow the same |
662 | * indices. |
663 | */ |
664 | if (j == log_index[0]) { |
665 | /* entry must be 'valid' */ |
666 | if (ent_is_padding(ent: &log.ent[j])) |
667 | return -ENXIO; |
668 | } else if (j == log_index[1]) { |
669 | ; |
670 | /* |
671 | * log_index[1] can be padding if the |
672 | * lane never got used and it is still |
673 | * in the initial state (three 'padding' |
674 | * entries) |
675 | */ |
676 | } else { |
677 | /* entry must be invalid (padding) */ |
678 | if (!ent_is_padding(ent: &log.ent[j])) |
679 | return -ENXIO; |
680 | } |
681 | } |
682 | } |
683 | /* |
684 | * If any of the log_groups have more than one valid, |
685 | * non-padding entry, then the we are no longer in the |
686 | * initial_state |
687 | */ |
688 | if (pad_count < 3) |
689 | initial_state = false; |
690 | pad_count = 0; |
691 | } |
692 | |
693 | if (!initial_state && !idx_set) |
694 | return -ENXIO; |
695 | |
696 | /* |
697 | * If all the entries in the log were in the initial state, |
698 | * assume new padding scheme |
699 | */ |
700 | if (initial_state) |
701 | log_index[1] = 1; |
702 | |
703 | /* |
704 | * Only allow the known permutations of log/padding indices, |
705 | * i.e. (0, 1), and (0, 2) |
706 | */ |
707 | if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2))) |
708 | ; /* known index possibilities */ |
709 | else { |
710 | dev_err(to_dev(arena), "Found an unknown padding scheme\n" ); |
711 | return -ENXIO; |
712 | } |
713 | |
714 | arena->log_index[0] = log_index[0]; |
715 | arena->log_index[1] = log_index[1]; |
716 | dev_dbg(to_dev(arena), "log_index_0 = %d\n" , log_index[0]); |
717 | dev_dbg(to_dev(arena), "log_index_1 = %d\n" , log_index[1]); |
718 | return 0; |
719 | } |
720 | |
721 | static int btt_rtt_init(struct arena_info *arena) |
722 | { |
723 | arena->rtt = kcalloc(n: arena->nfree, size: sizeof(u32), GFP_KERNEL); |
724 | if (arena->rtt == NULL) |
725 | return -ENOMEM; |
726 | |
727 | return 0; |
728 | } |
729 | |
730 | static int btt_maplocks_init(struct arena_info *arena) |
731 | { |
732 | u32 i; |
733 | |
734 | arena->map_locks = kcalloc(n: arena->nfree, size: sizeof(struct aligned_lock), |
735 | GFP_KERNEL); |
736 | if (!arena->map_locks) |
737 | return -ENOMEM; |
738 | |
739 | for (i = 0; i < arena->nfree; i++) |
740 | spin_lock_init(&arena->map_locks[i].lock); |
741 | |
742 | return 0; |
743 | } |
744 | |
745 | static struct arena_info *alloc_arena(struct btt *btt, size_t size, |
746 | size_t start, size_t arena_off) |
747 | { |
748 | struct arena_info *arena; |
749 | u64 logsize, mapsize, datasize; |
750 | u64 available = size; |
751 | |
752 | arena = kzalloc(size: sizeof(struct arena_info), GFP_KERNEL); |
753 | if (!arena) |
754 | return NULL; |
755 | arena->nd_btt = btt->nd_btt; |
756 | arena->sector_size = btt->sector_size; |
757 | mutex_init(&arena->err_lock); |
758 | |
759 | if (!size) |
760 | return arena; |
761 | |
762 | arena->size = size; |
763 | arena->external_lba_start = start; |
764 | arena->external_lbasize = btt->lbasize; |
765 | arena->internal_lbasize = roundup(arena->external_lbasize, |
766 | INT_LBASIZE_ALIGNMENT); |
767 | arena->nfree = BTT_DEFAULT_NFREE; |
768 | arena->version_major = btt->nd_btt->version_major; |
769 | arena->version_minor = btt->nd_btt->version_minor; |
770 | |
771 | if (available % BTT_PG_SIZE) |
772 | available -= (available % BTT_PG_SIZE); |
773 | |
774 | /* Two pages are reserved for the super block and its copy */ |
775 | available -= 2 * BTT_PG_SIZE; |
776 | |
777 | /* The log takes a fixed amount of space based on nfree */ |
778 | logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE); |
779 | available -= logsize; |
780 | |
781 | /* Calculate optimal split between map and data area */ |
782 | arena->internal_nlba = div_u64(dividend: available - BTT_PG_SIZE, |
783 | divisor: arena->internal_lbasize + MAP_ENT_SIZE); |
784 | arena->external_nlba = arena->internal_nlba - arena->nfree; |
785 | |
786 | mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE); |
787 | datasize = available - mapsize; |
788 | |
789 | /* 'Absolute' values, relative to start of storage space */ |
790 | arena->infooff = arena_off; |
791 | arena->dataoff = arena->infooff + BTT_PG_SIZE; |
792 | arena->mapoff = arena->dataoff + datasize; |
793 | arena->logoff = arena->mapoff + mapsize; |
794 | arena->info2off = arena->logoff + logsize; |
795 | |
796 | /* Default log indices are (0,1) */ |
797 | arena->log_index[0] = 0; |
798 | arena->log_index[1] = 1; |
799 | return arena; |
800 | } |
801 | |
802 | static void free_arenas(struct btt *btt) |
803 | { |
804 | struct arena_info *arena, *next; |
805 | |
806 | list_for_each_entry_safe(arena, next, &btt->arena_list, list) { |
807 | list_del(entry: &arena->list); |
808 | kfree(objp: arena->rtt); |
809 | kfree(objp: arena->map_locks); |
810 | kfree(objp: arena->freelist); |
811 | debugfs_remove_recursive(dentry: arena->debugfs_dir); |
812 | kfree(objp: arena); |
813 | } |
814 | } |
815 | |
816 | /* |
817 | * This function reads an existing valid btt superblock and |
818 | * populates the corresponding arena_info struct |
819 | */ |
820 | static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super, |
821 | u64 arena_off) |
822 | { |
823 | arena->internal_nlba = le32_to_cpu(super->internal_nlba); |
824 | arena->internal_lbasize = le32_to_cpu(super->internal_lbasize); |
825 | arena->external_nlba = le32_to_cpu(super->external_nlba); |
826 | arena->external_lbasize = le32_to_cpu(super->external_lbasize); |
827 | arena->nfree = le32_to_cpu(super->nfree); |
828 | arena->version_major = le16_to_cpu(super->version_major); |
829 | arena->version_minor = le16_to_cpu(super->version_minor); |
830 | |
831 | arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off + |
832 | le64_to_cpu(super->nextoff)); |
833 | arena->infooff = arena_off; |
834 | arena->dataoff = arena_off + le64_to_cpu(super->dataoff); |
835 | arena->mapoff = arena_off + le64_to_cpu(super->mapoff); |
836 | arena->logoff = arena_off + le64_to_cpu(super->logoff); |
837 | arena->info2off = arena_off + le64_to_cpu(super->info2off); |
838 | |
839 | arena->size = (le64_to_cpu(super->nextoff) > 0) |
840 | ? (le64_to_cpu(super->nextoff)) |
841 | : (arena->info2off - arena->infooff + BTT_PG_SIZE); |
842 | |
843 | arena->flags = le32_to_cpu(super->flags); |
844 | } |
845 | |
846 | static int discover_arenas(struct btt *btt) |
847 | { |
848 | int ret = 0; |
849 | struct arena_info *arena; |
850 | struct btt_sb *super; |
851 | size_t remaining = btt->rawsize; |
852 | u64 cur_nlba = 0; |
853 | size_t cur_off = 0; |
854 | int num_arenas = 0; |
855 | |
856 | super = kzalloc(size: sizeof(*super), GFP_KERNEL); |
857 | if (!super) |
858 | return -ENOMEM; |
859 | |
860 | while (remaining) { |
861 | /* Alloc memory for arena */ |
862 | arena = alloc_arena(btt, size: 0, start: 0, arena_off: 0); |
863 | if (!arena) { |
864 | ret = -ENOMEM; |
865 | goto out_super; |
866 | } |
867 | |
868 | arena->infooff = cur_off; |
869 | ret = btt_info_read(arena, super); |
870 | if (ret) |
871 | goto out; |
872 | |
873 | if (!nd_btt_arena_is_valid(nd_btt: btt->nd_btt, super)) { |
874 | if (remaining == btt->rawsize) { |
875 | btt->init_state = INIT_NOTFOUND; |
876 | dev_info(to_dev(arena), "No existing arenas\n" ); |
877 | goto out; |
878 | } else { |
879 | dev_err(to_dev(arena), |
880 | "Found corrupted metadata!\n" ); |
881 | ret = -ENODEV; |
882 | goto out; |
883 | } |
884 | } |
885 | |
886 | arena->external_lba_start = cur_nlba; |
887 | parse_arena_meta(arena, super, arena_off: cur_off); |
888 | |
889 | ret = log_set_indices(arena); |
890 | if (ret) { |
891 | dev_err(to_dev(arena), |
892 | "Unable to deduce log/padding indices\n" ); |
893 | goto out; |
894 | } |
895 | |
896 | ret = btt_freelist_init(arena); |
897 | if (ret) |
898 | goto out; |
899 | |
900 | ret = btt_rtt_init(arena); |
901 | if (ret) |
902 | goto out; |
903 | |
904 | ret = btt_maplocks_init(arena); |
905 | if (ret) |
906 | goto out; |
907 | |
908 | list_add_tail(new: &arena->list, head: &btt->arena_list); |
909 | |
910 | remaining -= arena->size; |
911 | cur_off += arena->size; |
912 | cur_nlba += arena->external_nlba; |
913 | num_arenas++; |
914 | |
915 | if (arena->nextoff == 0) |
916 | break; |
917 | } |
918 | btt->num_arenas = num_arenas; |
919 | btt->nlba = cur_nlba; |
920 | btt->init_state = INIT_READY; |
921 | |
922 | kfree(objp: super); |
923 | return ret; |
924 | |
925 | out: |
926 | kfree(objp: arena); |
927 | free_arenas(btt); |
928 | out_super: |
929 | kfree(objp: super); |
930 | return ret; |
931 | } |
932 | |
933 | static int create_arenas(struct btt *btt) |
934 | { |
935 | size_t remaining = btt->rawsize; |
936 | size_t cur_off = 0; |
937 | |
938 | while (remaining) { |
939 | struct arena_info *arena; |
940 | size_t arena_size = min_t(u64, ARENA_MAX_SIZE, remaining); |
941 | |
942 | remaining -= arena_size; |
943 | if (arena_size < ARENA_MIN_SIZE) |
944 | break; |
945 | |
946 | arena = alloc_arena(btt, size: arena_size, start: btt->nlba, arena_off: cur_off); |
947 | if (!arena) { |
948 | free_arenas(btt); |
949 | return -ENOMEM; |
950 | } |
951 | btt->nlba += arena->external_nlba; |
952 | if (remaining >= ARENA_MIN_SIZE) |
953 | arena->nextoff = arena->size; |
954 | else |
955 | arena->nextoff = 0; |
956 | cur_off += arena_size; |
957 | list_add_tail(new: &arena->list, head: &btt->arena_list); |
958 | } |
959 | |
960 | return 0; |
961 | } |
962 | |
963 | /* |
964 | * This function completes arena initialization by writing |
965 | * all the metadata. |
966 | * It is only called for an uninitialized arena when a write |
967 | * to that arena occurs for the first time. |
968 | */ |
969 | static int btt_arena_write_layout(struct arena_info *arena) |
970 | { |
971 | int ret; |
972 | u64 sum; |
973 | struct btt_sb *super; |
974 | struct nd_btt *nd_btt = arena->nd_btt; |
975 | const uuid_t *parent_uuid = nd_dev_to_uuid(dev: &nd_btt->ndns->dev); |
976 | |
977 | ret = btt_map_init(arena); |
978 | if (ret) |
979 | return ret; |
980 | |
981 | ret = btt_log_init(arena); |
982 | if (ret) |
983 | return ret; |
984 | |
985 | super = kzalloc(size: sizeof(struct btt_sb), GFP_NOIO); |
986 | if (!super) |
987 | return -ENOMEM; |
988 | |
989 | strncpy(p: super->signature, BTT_SIG, BTT_SIG_LEN); |
990 | export_uuid(dst: super->uuid, src: nd_btt->uuid); |
991 | export_uuid(dst: super->parent_uuid, src: parent_uuid); |
992 | super->flags = cpu_to_le32(arena->flags); |
993 | super->version_major = cpu_to_le16(arena->version_major); |
994 | super->version_minor = cpu_to_le16(arena->version_minor); |
995 | super->external_lbasize = cpu_to_le32(arena->external_lbasize); |
996 | super->external_nlba = cpu_to_le32(arena->external_nlba); |
997 | super->internal_lbasize = cpu_to_le32(arena->internal_lbasize); |
998 | super->internal_nlba = cpu_to_le32(arena->internal_nlba); |
999 | super->nfree = cpu_to_le32(arena->nfree); |
1000 | super->infosize = cpu_to_le32(sizeof(struct btt_sb)); |
1001 | super->nextoff = cpu_to_le64(arena->nextoff); |
1002 | /* |
1003 | * Subtract arena->infooff (arena start) so numbers are relative |
1004 | * to 'this' arena |
1005 | */ |
1006 | super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff); |
1007 | super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff); |
1008 | super->logoff = cpu_to_le64(arena->logoff - arena->infooff); |
1009 | super->info2off = cpu_to_le64(arena->info2off - arena->infooff); |
1010 | |
1011 | super->flags = 0; |
1012 | sum = nd_sb_checksum(sb: (struct nd_gen_sb *) super); |
1013 | super->checksum = cpu_to_le64(sum); |
1014 | |
1015 | ret = btt_info_write(arena, super); |
1016 | |
1017 | kfree(objp: super); |
1018 | return ret; |
1019 | } |
1020 | |
1021 | /* |
1022 | * This function completes the initialization for the BTT namespace |
1023 | * such that it is ready to accept IOs |
1024 | */ |
1025 | static int btt_meta_init(struct btt *btt) |
1026 | { |
1027 | int ret = 0; |
1028 | struct arena_info *arena; |
1029 | |
1030 | mutex_lock(&btt->init_lock); |
1031 | list_for_each_entry(arena, &btt->arena_list, list) { |
1032 | ret = btt_arena_write_layout(arena); |
1033 | if (ret) |
1034 | goto unlock; |
1035 | |
1036 | ret = btt_freelist_init(arena); |
1037 | if (ret) |
1038 | goto unlock; |
1039 | |
1040 | ret = btt_rtt_init(arena); |
1041 | if (ret) |
1042 | goto unlock; |
1043 | |
1044 | ret = btt_maplocks_init(arena); |
1045 | if (ret) |
1046 | goto unlock; |
1047 | } |
1048 | |
1049 | btt->init_state = INIT_READY; |
1050 | |
1051 | unlock: |
1052 | mutex_unlock(lock: &btt->init_lock); |
1053 | return ret; |
1054 | } |
1055 | |
1056 | static u32 btt_meta_size(struct btt *btt) |
1057 | { |
1058 | return btt->lbasize - btt->sector_size; |
1059 | } |
1060 | |
1061 | /* |
1062 | * This function calculates the arena in which the given LBA lies |
1063 | * by doing a linear walk. This is acceptable since we expect only |
1064 | * a few arenas. If we have backing devices that get much larger, |
1065 | * we can construct a balanced binary tree of arenas at init time |
1066 | * so that this range search becomes faster. |
1067 | */ |
1068 | static int lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap, |
1069 | struct arena_info **arena) |
1070 | { |
1071 | struct arena_info *arena_list; |
1072 | __u64 lba = div_u64(dividend: sector << SECTOR_SHIFT, divisor: btt->sector_size); |
1073 | |
1074 | list_for_each_entry(arena_list, &btt->arena_list, list) { |
1075 | if (lba < arena_list->external_nlba) { |
1076 | *arena = arena_list; |
1077 | *premap = lba; |
1078 | return 0; |
1079 | } |
1080 | lba -= arena_list->external_nlba; |
1081 | } |
1082 | |
1083 | return -EIO; |
1084 | } |
1085 | |
1086 | /* |
1087 | * The following (lock_map, unlock_map) are mostly just to improve |
1088 | * readability, since they index into an array of locks |
1089 | */ |
1090 | static void lock_map(struct arena_info *arena, u32 premap) |
1091 | __acquires(&arena->map_locks[idx].lock) |
1092 | { |
1093 | u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree; |
1094 | |
1095 | spin_lock(lock: &arena->map_locks[idx].lock); |
1096 | } |
1097 | |
1098 | static void unlock_map(struct arena_info *arena, u32 premap) |
1099 | __releases(&arena->map_locks[idx].lock) |
1100 | { |
1101 | u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree; |
1102 | |
1103 | spin_unlock(lock: &arena->map_locks[idx].lock); |
1104 | } |
1105 | |
1106 | static int btt_data_read(struct arena_info *arena, struct page *page, |
1107 | unsigned int off, u32 lba, u32 len) |
1108 | { |
1109 | int ret; |
1110 | u64 nsoff = to_namespace_offset(arena, lba); |
1111 | void *mem = kmap_atomic(page); |
1112 | |
1113 | ret = arena_read_bytes(arena, offset: nsoff, buf: mem + off, n: len, flags: NVDIMM_IO_ATOMIC); |
1114 | kunmap_atomic(mem); |
1115 | |
1116 | return ret; |
1117 | } |
1118 | |
1119 | static int btt_data_write(struct arena_info *arena, u32 lba, |
1120 | struct page *page, unsigned int off, u32 len) |
1121 | { |
1122 | int ret; |
1123 | u64 nsoff = to_namespace_offset(arena, lba); |
1124 | void *mem = kmap_atomic(page); |
1125 | |
1126 | ret = arena_write_bytes(arena, offset: nsoff, buf: mem + off, n: len, flags: NVDIMM_IO_ATOMIC); |
1127 | kunmap_atomic(mem); |
1128 | |
1129 | return ret; |
1130 | } |
1131 | |
1132 | static void zero_fill_data(struct page *page, unsigned int off, u32 len) |
1133 | { |
1134 | void *mem = kmap_atomic(page); |
1135 | |
1136 | memset(mem + off, 0, len); |
1137 | kunmap_atomic(mem); |
1138 | } |
1139 | |
1140 | #ifdef CONFIG_BLK_DEV_INTEGRITY |
1141 | static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip, |
1142 | struct arena_info *arena, u32 postmap, int rw) |
1143 | { |
1144 | unsigned int len = btt_meta_size(btt); |
1145 | u64 meta_nsoff; |
1146 | int ret = 0; |
1147 | |
1148 | if (bip == NULL) |
1149 | return 0; |
1150 | |
1151 | meta_nsoff = to_namespace_offset(arena, lba: postmap) + btt->sector_size; |
1152 | |
1153 | while (len) { |
1154 | unsigned int cur_len; |
1155 | struct bio_vec bv; |
1156 | void *mem; |
1157 | |
1158 | bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter); |
1159 | /* |
1160 | * The 'bv' obtained from bvec_iter_bvec has its .bv_len and |
1161 | * .bv_offset already adjusted for iter->bi_bvec_done, and we |
1162 | * can use those directly |
1163 | */ |
1164 | |
1165 | cur_len = min(len, bv.bv_len); |
1166 | mem = bvec_kmap_local(bvec: &bv); |
1167 | if (rw) |
1168 | ret = arena_write_bytes(arena, offset: meta_nsoff, buf: mem, n: cur_len, |
1169 | flags: NVDIMM_IO_ATOMIC); |
1170 | else |
1171 | ret = arena_read_bytes(arena, offset: meta_nsoff, buf: mem, n: cur_len, |
1172 | flags: NVDIMM_IO_ATOMIC); |
1173 | |
1174 | kunmap_local(mem); |
1175 | if (ret) |
1176 | return ret; |
1177 | |
1178 | len -= cur_len; |
1179 | meta_nsoff += cur_len; |
1180 | if (!bvec_iter_advance(bv: bip->bip_vec, iter: &bip->bip_iter, bytes: cur_len)) |
1181 | return -EIO; |
1182 | } |
1183 | |
1184 | return ret; |
1185 | } |
1186 | |
1187 | #else /* CONFIG_BLK_DEV_INTEGRITY */ |
1188 | static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip, |
1189 | struct arena_info *arena, u32 postmap, int rw) |
1190 | { |
1191 | return 0; |
1192 | } |
1193 | #endif |
1194 | |
1195 | static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip, |
1196 | struct page *page, unsigned int off, sector_t sector, |
1197 | unsigned int len) |
1198 | { |
1199 | int ret = 0; |
1200 | int t_flag, e_flag; |
1201 | struct arena_info *arena = NULL; |
1202 | u32 lane = 0, premap, postmap; |
1203 | |
1204 | while (len) { |
1205 | u32 cur_len; |
1206 | |
1207 | lane = nd_region_acquire_lane(nd_region: btt->nd_region); |
1208 | |
1209 | ret = lba_to_arena(btt, sector, premap: &premap, arena: &arena); |
1210 | if (ret) |
1211 | goto out_lane; |
1212 | |
1213 | cur_len = min(btt->sector_size, len); |
1214 | |
1215 | ret = btt_map_read(arena, lba: premap, mapping: &postmap, trim: &t_flag, error: &e_flag, |
1216 | rwb_flags: NVDIMM_IO_ATOMIC); |
1217 | if (ret) |
1218 | goto out_lane; |
1219 | |
1220 | /* |
1221 | * We loop to make sure that the post map LBA didn't change |
1222 | * from under us between writing the RTT and doing the actual |
1223 | * read. |
1224 | */ |
1225 | while (1) { |
1226 | u32 new_map; |
1227 | int new_t, new_e; |
1228 | |
1229 | if (t_flag) { |
1230 | zero_fill_data(page, off, len: cur_len); |
1231 | goto out_lane; |
1232 | } |
1233 | |
1234 | if (e_flag) { |
1235 | ret = -EIO; |
1236 | goto out_lane; |
1237 | } |
1238 | |
1239 | arena->rtt[lane] = RTT_VALID | postmap; |
1240 | /* |
1241 | * Barrier to make sure this write is not reordered |
1242 | * to do the verification map_read before the RTT store |
1243 | */ |
1244 | barrier(); |
1245 | |
1246 | ret = btt_map_read(arena, lba: premap, mapping: &new_map, trim: &new_t, |
1247 | error: &new_e, rwb_flags: NVDIMM_IO_ATOMIC); |
1248 | if (ret) |
1249 | goto out_rtt; |
1250 | |
1251 | if ((postmap == new_map) && (t_flag == new_t) && |
1252 | (e_flag == new_e)) |
1253 | break; |
1254 | |
1255 | postmap = new_map; |
1256 | t_flag = new_t; |
1257 | e_flag = new_e; |
1258 | } |
1259 | |
1260 | ret = btt_data_read(arena, page, off, lba: postmap, len: cur_len); |
1261 | if (ret) { |
1262 | /* Media error - set the e_flag */ |
1263 | if (btt_map_write(arena, lba: premap, mapping: postmap, z_flag: 0, e_flag: 1, rwb_flags: NVDIMM_IO_ATOMIC)) |
1264 | dev_warn_ratelimited(to_dev(arena), |
1265 | "Error persistently tracking bad blocks at %#x\n" , |
1266 | premap); |
1267 | goto out_rtt; |
1268 | } |
1269 | |
1270 | if (bip) { |
1271 | ret = btt_rw_integrity(btt, bip, arena, postmap, READ); |
1272 | if (ret) |
1273 | goto out_rtt; |
1274 | } |
1275 | |
1276 | arena->rtt[lane] = RTT_INVALID; |
1277 | nd_region_release_lane(nd_region: btt->nd_region, lane); |
1278 | |
1279 | len -= cur_len; |
1280 | off += cur_len; |
1281 | sector += btt->sector_size >> SECTOR_SHIFT; |
1282 | } |
1283 | |
1284 | return 0; |
1285 | |
1286 | out_rtt: |
1287 | arena->rtt[lane] = RTT_INVALID; |
1288 | out_lane: |
1289 | nd_region_release_lane(nd_region: btt->nd_region, lane); |
1290 | return ret; |
1291 | } |
1292 | |
1293 | /* |
1294 | * Normally, arena_{read,write}_bytes will take care of the initial offset |
1295 | * adjustment, but in the case of btt_is_badblock, where we query is_bad_pmem, |
1296 | * we need the final, raw namespace offset here |
1297 | */ |
1298 | static bool btt_is_badblock(struct btt *btt, struct arena_info *arena, |
1299 | u32 postmap) |
1300 | { |
1301 | u64 nsoff = adjust_initial_offset(nd_btt: arena->nd_btt, |
1302 | offset: to_namespace_offset(arena, lba: postmap)); |
1303 | sector_t phys_sector = nsoff >> 9; |
1304 | |
1305 | return is_bad_pmem(bb: btt->phys_bb, sector: phys_sector, len: arena->internal_lbasize); |
1306 | } |
1307 | |
1308 | static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip, |
1309 | sector_t sector, struct page *page, unsigned int off, |
1310 | unsigned int len) |
1311 | { |
1312 | int ret = 0; |
1313 | struct arena_info *arena = NULL; |
1314 | u32 premap = 0, old_postmap, new_postmap, lane = 0, i; |
1315 | struct log_entry log; |
1316 | int sub; |
1317 | |
1318 | while (len) { |
1319 | u32 cur_len; |
1320 | int e_flag; |
1321 | |
1322 | retry: |
1323 | lane = nd_region_acquire_lane(nd_region: btt->nd_region); |
1324 | |
1325 | ret = lba_to_arena(btt, sector, premap: &premap, arena: &arena); |
1326 | if (ret) |
1327 | goto out_lane; |
1328 | cur_len = min(btt->sector_size, len); |
1329 | |
1330 | if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) { |
1331 | ret = -EIO; |
1332 | goto out_lane; |
1333 | } |
1334 | |
1335 | if (btt_is_badblock(btt, arena, postmap: arena->freelist[lane].block)) |
1336 | arena->freelist[lane].has_err = 1; |
1337 | |
1338 | if (mutex_is_locked(lock: &arena->err_lock) |
1339 | || arena->freelist[lane].has_err) { |
1340 | nd_region_release_lane(nd_region: btt->nd_region, lane); |
1341 | |
1342 | ret = arena_clear_freelist_error(arena, lane); |
1343 | if (ret) |
1344 | return ret; |
1345 | |
1346 | /* OK to acquire a different lane/free block */ |
1347 | goto retry; |
1348 | } |
1349 | |
1350 | new_postmap = arena->freelist[lane].block; |
1351 | |
1352 | /* Wait if the new block is being read from */ |
1353 | for (i = 0; i < arena->nfree; i++) |
1354 | while (arena->rtt[i] == (RTT_VALID | new_postmap)) |
1355 | cpu_relax(); |
1356 | |
1357 | |
1358 | if (new_postmap >= arena->internal_nlba) { |
1359 | ret = -EIO; |
1360 | goto out_lane; |
1361 | } |
1362 | |
1363 | ret = btt_data_write(arena, lba: new_postmap, page, off, len: cur_len); |
1364 | if (ret) |
1365 | goto out_lane; |
1366 | |
1367 | if (bip) { |
1368 | ret = btt_rw_integrity(btt, bip, arena, postmap: new_postmap, |
1369 | WRITE); |
1370 | if (ret) |
1371 | goto out_lane; |
1372 | } |
1373 | |
1374 | lock_map(arena, premap); |
1375 | ret = btt_map_read(arena, lba: premap, mapping: &old_postmap, NULL, error: &e_flag, |
1376 | rwb_flags: NVDIMM_IO_ATOMIC); |
1377 | if (ret) |
1378 | goto out_map; |
1379 | if (old_postmap >= arena->internal_nlba) { |
1380 | ret = -EIO; |
1381 | goto out_map; |
1382 | } |
1383 | if (e_flag) |
1384 | set_e_flag(old_postmap); |
1385 | |
1386 | log.lba = cpu_to_le32(premap); |
1387 | log.old_map = cpu_to_le32(old_postmap); |
1388 | log.new_map = cpu_to_le32(new_postmap); |
1389 | log.seq = cpu_to_le32(arena->freelist[lane].seq); |
1390 | sub = arena->freelist[lane].sub; |
1391 | ret = btt_flog_write(arena, lane, sub, ent: &log); |
1392 | if (ret) |
1393 | goto out_map; |
1394 | |
1395 | ret = btt_map_write(arena, lba: premap, mapping: new_postmap, z_flag: 0, e_flag: 0, |
1396 | rwb_flags: NVDIMM_IO_ATOMIC); |
1397 | if (ret) |
1398 | goto out_map; |
1399 | |
1400 | unlock_map(arena, premap); |
1401 | nd_region_release_lane(nd_region: btt->nd_region, lane); |
1402 | |
1403 | if (e_flag) { |
1404 | ret = arena_clear_freelist_error(arena, lane); |
1405 | if (ret) |
1406 | return ret; |
1407 | } |
1408 | |
1409 | len -= cur_len; |
1410 | off += cur_len; |
1411 | sector += btt->sector_size >> SECTOR_SHIFT; |
1412 | } |
1413 | |
1414 | return 0; |
1415 | |
1416 | out_map: |
1417 | unlock_map(arena, premap); |
1418 | out_lane: |
1419 | nd_region_release_lane(nd_region: btt->nd_region, lane); |
1420 | return ret; |
1421 | } |
1422 | |
1423 | static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip, |
1424 | struct page *page, unsigned int len, unsigned int off, |
1425 | enum req_op op, sector_t sector) |
1426 | { |
1427 | int ret; |
1428 | |
1429 | if (!op_is_write(op)) { |
1430 | ret = btt_read_pg(btt, bip, page, off, sector, len); |
1431 | flush_dcache_page(page); |
1432 | } else { |
1433 | flush_dcache_page(page); |
1434 | ret = btt_write_pg(btt, bip, sector, page, off, len); |
1435 | } |
1436 | |
1437 | return ret; |
1438 | } |
1439 | |
1440 | static void btt_submit_bio(struct bio *bio) |
1441 | { |
1442 | struct bio_integrity_payload *bip = bio_integrity(bio); |
1443 | struct btt *btt = bio->bi_bdev->bd_disk->private_data; |
1444 | struct bvec_iter iter; |
1445 | unsigned long start; |
1446 | struct bio_vec bvec; |
1447 | int err = 0; |
1448 | bool do_acct; |
1449 | |
1450 | if (!bio_integrity_prep(bio)) |
1451 | return; |
1452 | |
1453 | do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue); |
1454 | if (do_acct) |
1455 | start = bio_start_io_acct(bio); |
1456 | bio_for_each_segment(bvec, bio, iter) { |
1457 | unsigned int len = bvec.bv_len; |
1458 | |
1459 | if (len > PAGE_SIZE || len < btt->sector_size || |
1460 | len % btt->sector_size) { |
1461 | dev_err_ratelimited(&btt->nd_btt->dev, |
1462 | "unaligned bio segment (len: %d)\n" , len); |
1463 | bio->bi_status = BLK_STS_IOERR; |
1464 | break; |
1465 | } |
1466 | |
1467 | err = btt_do_bvec(btt, bip, page: bvec.bv_page, len, off: bvec.bv_offset, |
1468 | op: bio_op(bio), sector: iter.bi_sector); |
1469 | if (err) { |
1470 | dev_err(&btt->nd_btt->dev, |
1471 | "io error in %s sector %lld, len %d,\n" , |
1472 | (op_is_write(bio_op(bio))) ? "WRITE" : |
1473 | "READ" , |
1474 | (unsigned long long) iter.bi_sector, len); |
1475 | bio->bi_status = errno_to_blk_status(errno: err); |
1476 | break; |
1477 | } |
1478 | } |
1479 | if (do_acct) |
1480 | bio_end_io_acct(bio, start_time: start); |
1481 | |
1482 | bio_endio(bio); |
1483 | } |
1484 | |
1485 | static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo) |
1486 | { |
1487 | /* some standard values */ |
1488 | geo->heads = 1 << 6; |
1489 | geo->sectors = 1 << 5; |
1490 | geo->cylinders = get_capacity(disk: bd->bd_disk) >> 11; |
1491 | return 0; |
1492 | } |
1493 | |
1494 | static const struct block_device_operations btt_fops = { |
1495 | .owner = THIS_MODULE, |
1496 | .submit_bio = btt_submit_bio, |
1497 | .getgeo = btt_getgeo, |
1498 | }; |
1499 | |
1500 | static int btt_blk_init(struct btt *btt) |
1501 | { |
1502 | struct nd_btt *nd_btt = btt->nd_btt; |
1503 | struct nd_namespace_common *ndns = nd_btt->ndns; |
1504 | int rc = -ENOMEM; |
1505 | |
1506 | btt->btt_disk = blk_alloc_disk(NUMA_NO_NODE); |
1507 | if (!btt->btt_disk) |
1508 | return -ENOMEM; |
1509 | |
1510 | nvdimm_namespace_disk_name(ndns, name: btt->btt_disk->disk_name); |
1511 | btt->btt_disk->first_minor = 0; |
1512 | btt->btt_disk->fops = &btt_fops; |
1513 | btt->btt_disk->private_data = btt; |
1514 | |
1515 | blk_queue_logical_block_size(btt->btt_disk->queue, btt->sector_size); |
1516 | blk_queue_max_hw_sectors(btt->btt_disk->queue, UINT_MAX); |
1517 | blk_queue_flag_set(QUEUE_FLAG_NONROT, q: btt->btt_disk->queue); |
1518 | blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, q: btt->btt_disk->queue); |
1519 | |
1520 | if (btt_meta_size(btt)) { |
1521 | rc = nd_integrity_init(disk: btt->btt_disk, meta_size: btt_meta_size(btt)); |
1522 | if (rc) |
1523 | goto out_cleanup_disk; |
1524 | } |
1525 | |
1526 | set_capacity(disk: btt->btt_disk, size: btt->nlba * btt->sector_size >> 9); |
1527 | rc = device_add_disk(parent: &btt->nd_btt->dev, disk: btt->btt_disk, NULL); |
1528 | if (rc) |
1529 | goto out_cleanup_disk; |
1530 | |
1531 | btt->nd_btt->size = btt->nlba * (u64)btt->sector_size; |
1532 | nvdimm_check_and_set_ro(disk: btt->btt_disk); |
1533 | |
1534 | return 0; |
1535 | |
1536 | out_cleanup_disk: |
1537 | put_disk(disk: btt->btt_disk); |
1538 | return rc; |
1539 | } |
1540 | |
1541 | static void btt_blk_cleanup(struct btt *btt) |
1542 | { |
1543 | del_gendisk(gp: btt->btt_disk); |
1544 | put_disk(disk: btt->btt_disk); |
1545 | } |
1546 | |
1547 | /** |
1548 | * btt_init - initialize a block translation table for the given device |
1549 | * @nd_btt: device with BTT geometry and backing device info |
1550 | * @rawsize: raw size in bytes of the backing device |
1551 | * @lbasize: lba size of the backing device |
1552 | * @uuid: A uuid for the backing device - this is stored on media |
1553 | * @maxlane: maximum number of parallel requests the device can handle |
1554 | * |
1555 | * Initialize a Block Translation Table on a backing device to provide |
1556 | * single sector power fail atomicity. |
1557 | * |
1558 | * Context: |
1559 | * Might sleep. |
1560 | * |
1561 | * Returns: |
1562 | * Pointer to a new struct btt on success, NULL on failure. |
1563 | */ |
1564 | static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize, |
1565 | u32 lbasize, uuid_t *uuid, |
1566 | struct nd_region *nd_region) |
1567 | { |
1568 | int ret; |
1569 | struct btt *btt; |
1570 | struct nd_namespace_io *nsio; |
1571 | struct device *dev = &nd_btt->dev; |
1572 | |
1573 | btt = devm_kzalloc(dev, size: sizeof(struct btt), GFP_KERNEL); |
1574 | if (!btt) |
1575 | return NULL; |
1576 | |
1577 | btt->nd_btt = nd_btt; |
1578 | btt->rawsize = rawsize; |
1579 | btt->lbasize = lbasize; |
1580 | btt->sector_size = ((lbasize >= 4096) ? 4096 : 512); |
1581 | INIT_LIST_HEAD(list: &btt->arena_list); |
1582 | mutex_init(&btt->init_lock); |
1583 | btt->nd_region = nd_region; |
1584 | nsio = to_nd_namespace_io(dev: &nd_btt->ndns->dev); |
1585 | btt->phys_bb = &nsio->bb; |
1586 | |
1587 | ret = discover_arenas(btt); |
1588 | if (ret) { |
1589 | dev_err(dev, "init: error in arena_discover: %d\n" , ret); |
1590 | return NULL; |
1591 | } |
1592 | |
1593 | if (btt->init_state != INIT_READY && nd_region->ro) { |
1594 | dev_warn(dev, "%s is read-only, unable to init btt metadata\n" , |
1595 | dev_name(&nd_region->dev)); |
1596 | return NULL; |
1597 | } else if (btt->init_state != INIT_READY) { |
1598 | btt->num_arenas = (rawsize / ARENA_MAX_SIZE) + |
1599 | ((rawsize % ARENA_MAX_SIZE) ? 1 : 0); |
1600 | dev_dbg(dev, "init: %d arenas for %llu rawsize\n" , |
1601 | btt->num_arenas, rawsize); |
1602 | |
1603 | ret = create_arenas(btt); |
1604 | if (ret) { |
1605 | dev_info(dev, "init: create_arenas: %d\n" , ret); |
1606 | return NULL; |
1607 | } |
1608 | |
1609 | ret = btt_meta_init(btt); |
1610 | if (ret) { |
1611 | dev_err(dev, "init: error in meta_init: %d\n" , ret); |
1612 | return NULL; |
1613 | } |
1614 | } |
1615 | |
1616 | ret = btt_blk_init(btt); |
1617 | if (ret) { |
1618 | dev_err(dev, "init: error in blk_init: %d\n" , ret); |
1619 | return NULL; |
1620 | } |
1621 | |
1622 | btt_debugfs_init(btt); |
1623 | |
1624 | return btt; |
1625 | } |
1626 | |
1627 | /** |
1628 | * btt_fini - de-initialize a BTT |
1629 | * @btt: the BTT handle that was generated by btt_init |
1630 | * |
1631 | * De-initialize a Block Translation Table on device removal |
1632 | * |
1633 | * Context: |
1634 | * Might sleep. |
1635 | */ |
1636 | static void btt_fini(struct btt *btt) |
1637 | { |
1638 | if (btt) { |
1639 | btt_blk_cleanup(btt); |
1640 | free_arenas(btt); |
1641 | debugfs_remove_recursive(dentry: btt->debugfs_dir); |
1642 | } |
1643 | } |
1644 | |
1645 | int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns) |
1646 | { |
1647 | struct nd_btt *nd_btt = to_nd_btt(dev: ndns->claim); |
1648 | struct nd_region *nd_region; |
1649 | struct btt_sb *btt_sb; |
1650 | struct btt *btt; |
1651 | size_t size, rawsize; |
1652 | int rc; |
1653 | |
1654 | if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize) { |
1655 | dev_dbg(&nd_btt->dev, "incomplete btt configuration\n" ); |
1656 | return -ENODEV; |
1657 | } |
1658 | |
1659 | btt_sb = devm_kzalloc(dev: &nd_btt->dev, size: sizeof(*btt_sb), GFP_KERNEL); |
1660 | if (!btt_sb) |
1661 | return -ENOMEM; |
1662 | |
1663 | size = nvdimm_namespace_capacity(ndns); |
1664 | rc = devm_namespace_enable(dev: &nd_btt->dev, ndns, size); |
1665 | if (rc) |
1666 | return rc; |
1667 | |
1668 | /* |
1669 | * If this returns < 0, that is ok as it just means there wasn't |
1670 | * an existing BTT, and we're creating a new one. We still need to |
1671 | * call this as we need the version dependent fields in nd_btt to be |
1672 | * set correctly based on the holder class |
1673 | */ |
1674 | nd_btt_version(nd_btt, ndns, btt_sb); |
1675 | |
1676 | rawsize = size - nd_btt->initial_offset; |
1677 | if (rawsize < ARENA_MIN_SIZE) { |
1678 | dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n" , |
1679 | dev_name(&ndns->dev), |
1680 | ARENA_MIN_SIZE + nd_btt->initial_offset); |
1681 | return -ENXIO; |
1682 | } |
1683 | nd_region = to_nd_region(dev: nd_btt->dev.parent); |
1684 | btt = btt_init(nd_btt, rawsize, lbasize: nd_btt->lbasize, uuid: nd_btt->uuid, |
1685 | nd_region); |
1686 | if (!btt) |
1687 | return -ENOMEM; |
1688 | nd_btt->btt = btt; |
1689 | |
1690 | return 0; |
1691 | } |
1692 | EXPORT_SYMBOL(nvdimm_namespace_attach_btt); |
1693 | |
1694 | int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt) |
1695 | { |
1696 | struct btt *btt = nd_btt->btt; |
1697 | |
1698 | btt_fini(btt); |
1699 | nd_btt->btt = NULL; |
1700 | |
1701 | return 0; |
1702 | } |
1703 | EXPORT_SYMBOL(nvdimm_namespace_detach_btt); |
1704 | |
1705 | static int __init nd_btt_init(void) |
1706 | { |
1707 | int rc = 0; |
1708 | |
1709 | debugfs_root = debugfs_create_dir(name: "btt" , NULL); |
1710 | if (IS_ERR_OR_NULL(ptr: debugfs_root)) |
1711 | rc = -ENXIO; |
1712 | |
1713 | return rc; |
1714 | } |
1715 | |
1716 | static void __exit nd_btt_exit(void) |
1717 | { |
1718 | debugfs_remove_recursive(dentry: debugfs_root); |
1719 | } |
1720 | |
1721 | MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT); |
1722 | MODULE_AUTHOR("Vishal Verma <vishal.l.verma@linux.intel.com>" ); |
1723 | MODULE_LICENSE("GPL v2" ); |
1724 | module_init(nd_btt_init); |
1725 | module_exit(nd_btt_exit); |
1726 | |