1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Persistent Memory Driver |
4 | * |
5 | * Copyright (c) 2014-2015, Intel Corporation. |
6 | * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>. |
7 | * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>. |
8 | */ |
9 | |
10 | #include <linux/blkdev.h> |
11 | #include <linux/pagemap.h> |
12 | #include <linux/hdreg.h> |
13 | #include <linux/init.h> |
14 | #include <linux/platform_device.h> |
15 | #include <linux/set_memory.h> |
16 | #include <linux/module.h> |
17 | #include <linux/moduleparam.h> |
18 | #include <linux/badblocks.h> |
19 | #include <linux/memremap.h> |
20 | #include <linux/kstrtox.h> |
21 | #include <linux/vmalloc.h> |
22 | #include <linux/blk-mq.h> |
23 | #include <linux/pfn_t.h> |
24 | #include <linux/slab.h> |
25 | #include <linux/uio.h> |
26 | #include <linux/dax.h> |
27 | #include <linux/nd.h> |
28 | #include <linux/mm.h> |
29 | #include <asm/cacheflush.h> |
30 | #include "pmem.h" |
31 | #include "btt.h" |
32 | #include "pfn.h" |
33 | #include "nd.h" |
34 | |
35 | static struct device *to_dev(struct pmem_device *pmem) |
36 | { |
37 | /* |
38 | * nvdimm bus services need a 'dev' parameter, and we record the device |
39 | * at init in bb.dev. |
40 | */ |
41 | return pmem->bb.dev; |
42 | } |
43 | |
44 | static struct nd_region *to_region(struct pmem_device *pmem) |
45 | { |
46 | return to_nd_region(dev: to_dev(pmem)->parent); |
47 | } |
48 | |
49 | static phys_addr_t pmem_to_phys(struct pmem_device *pmem, phys_addr_t offset) |
50 | { |
51 | return pmem->phys_addr + offset; |
52 | } |
53 | |
54 | static sector_t to_sect(struct pmem_device *pmem, phys_addr_t offset) |
55 | { |
56 | return (offset - pmem->data_offset) >> SECTOR_SHIFT; |
57 | } |
58 | |
59 | static phys_addr_t to_offset(struct pmem_device *pmem, sector_t sector) |
60 | { |
61 | return (sector << SECTOR_SHIFT) + pmem->data_offset; |
62 | } |
63 | |
64 | static void pmem_mkpage_present(struct pmem_device *pmem, phys_addr_t offset, |
65 | unsigned int len) |
66 | { |
67 | phys_addr_t phys = pmem_to_phys(pmem, offset); |
68 | unsigned long pfn_start, pfn_end, pfn; |
69 | |
70 | /* only pmem in the linear map supports HWPoison */ |
71 | if (is_vmalloc_addr(x: pmem->virt_addr)) |
72 | return; |
73 | |
74 | pfn_start = PHYS_PFN(phys); |
75 | pfn_end = pfn_start + PHYS_PFN(len); |
76 | for (pfn = pfn_start; pfn < pfn_end; pfn++) { |
77 | struct page *page = pfn_to_page(pfn); |
78 | |
79 | /* |
80 | * Note, no need to hold a get_dev_pagemap() reference |
81 | * here since we're in the driver I/O path and |
82 | * outstanding I/O requests pin the dev_pagemap. |
83 | */ |
84 | if (test_and_clear_pmem_poison(page)) |
85 | clear_mce_nospec(pfn); |
86 | } |
87 | } |
88 | |
89 | static void pmem_clear_bb(struct pmem_device *pmem, sector_t sector, long blks) |
90 | { |
91 | if (blks == 0) |
92 | return; |
93 | badblocks_clear(bb: &pmem->bb, s: sector, sectors: blks); |
94 | if (pmem->bb_state) |
95 | sysfs_notify_dirent(kn: pmem->bb_state); |
96 | } |
97 | |
98 | static long __pmem_clear_poison(struct pmem_device *pmem, |
99 | phys_addr_t offset, unsigned int len) |
100 | { |
101 | phys_addr_t phys = pmem_to_phys(pmem, offset); |
102 | long cleared = nvdimm_clear_poison(dev: to_dev(pmem), phys, len); |
103 | |
104 | if (cleared > 0) { |
105 | pmem_mkpage_present(pmem, offset, len: cleared); |
106 | arch_invalidate_pmem(addr: pmem->virt_addr + offset, size: len); |
107 | } |
108 | return cleared; |
109 | } |
110 | |
111 | static blk_status_t pmem_clear_poison(struct pmem_device *pmem, |
112 | phys_addr_t offset, unsigned int len) |
113 | { |
114 | long cleared = __pmem_clear_poison(pmem, offset, len); |
115 | |
116 | if (cleared < 0) |
117 | return BLK_STS_IOERR; |
118 | |
119 | pmem_clear_bb(pmem, sector: to_sect(pmem, offset), blks: cleared >> SECTOR_SHIFT); |
120 | if (cleared < len) |
121 | return BLK_STS_IOERR; |
122 | return BLK_STS_OK; |
123 | } |
124 | |
125 | static void write_pmem(void *pmem_addr, struct page *page, |
126 | unsigned int off, unsigned int len) |
127 | { |
128 | unsigned int chunk; |
129 | void *mem; |
130 | |
131 | while (len) { |
132 | mem = kmap_atomic(page); |
133 | chunk = min_t(unsigned int, len, PAGE_SIZE - off); |
134 | memcpy_flushcache(dst: pmem_addr, src: mem + off, cnt: chunk); |
135 | kunmap_atomic(mem); |
136 | len -= chunk; |
137 | off = 0; |
138 | page++; |
139 | pmem_addr += chunk; |
140 | } |
141 | } |
142 | |
143 | static blk_status_t read_pmem(struct page *page, unsigned int off, |
144 | void *pmem_addr, unsigned int len) |
145 | { |
146 | unsigned int chunk; |
147 | unsigned long rem; |
148 | void *mem; |
149 | |
150 | while (len) { |
151 | mem = kmap_atomic(page); |
152 | chunk = min_t(unsigned int, len, PAGE_SIZE - off); |
153 | rem = copy_mc_to_kernel(to: mem + off, from: pmem_addr, len: chunk); |
154 | kunmap_atomic(mem); |
155 | if (rem) |
156 | return BLK_STS_IOERR; |
157 | len -= chunk; |
158 | off = 0; |
159 | page++; |
160 | pmem_addr += chunk; |
161 | } |
162 | return BLK_STS_OK; |
163 | } |
164 | |
165 | static blk_status_t pmem_do_read(struct pmem_device *pmem, |
166 | struct page *page, unsigned int page_off, |
167 | sector_t sector, unsigned int len) |
168 | { |
169 | blk_status_t rc; |
170 | phys_addr_t pmem_off = to_offset(pmem, sector); |
171 | void *pmem_addr = pmem->virt_addr + pmem_off; |
172 | |
173 | if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) |
174 | return BLK_STS_IOERR; |
175 | |
176 | rc = read_pmem(page, off: page_off, pmem_addr, len); |
177 | flush_dcache_page(page); |
178 | return rc; |
179 | } |
180 | |
181 | static blk_status_t pmem_do_write(struct pmem_device *pmem, |
182 | struct page *page, unsigned int page_off, |
183 | sector_t sector, unsigned int len) |
184 | { |
185 | phys_addr_t pmem_off = to_offset(pmem, sector); |
186 | void *pmem_addr = pmem->virt_addr + pmem_off; |
187 | |
188 | if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) { |
189 | blk_status_t rc = pmem_clear_poison(pmem, offset: pmem_off, len); |
190 | |
191 | if (rc != BLK_STS_OK) |
192 | return rc; |
193 | } |
194 | |
195 | flush_dcache_page(page); |
196 | write_pmem(pmem_addr, page, off: page_off, len); |
197 | |
198 | return BLK_STS_OK; |
199 | } |
200 | |
201 | static void pmem_submit_bio(struct bio *bio) |
202 | { |
203 | int ret = 0; |
204 | blk_status_t rc = 0; |
205 | bool do_acct; |
206 | unsigned long start; |
207 | struct bio_vec bvec; |
208 | struct bvec_iter iter; |
209 | struct pmem_device *pmem = bio->bi_bdev->bd_disk->private_data; |
210 | struct nd_region *nd_region = to_region(pmem); |
211 | |
212 | if (bio->bi_opf & REQ_PREFLUSH) |
213 | ret = nvdimm_flush(nd_region, bio); |
214 | |
215 | do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue); |
216 | if (do_acct) |
217 | start = bio_start_io_acct(bio); |
218 | bio_for_each_segment(bvec, bio, iter) { |
219 | if (op_is_write(op: bio_op(bio))) |
220 | rc = pmem_do_write(pmem, page: bvec.bv_page, page_off: bvec.bv_offset, |
221 | sector: iter.bi_sector, len: bvec.bv_len); |
222 | else |
223 | rc = pmem_do_read(pmem, page: bvec.bv_page, page_off: bvec.bv_offset, |
224 | sector: iter.bi_sector, len: bvec.bv_len); |
225 | if (rc) { |
226 | bio->bi_status = rc; |
227 | break; |
228 | } |
229 | } |
230 | if (do_acct) |
231 | bio_end_io_acct(bio, start_time: start); |
232 | |
233 | if (bio->bi_opf & REQ_FUA) |
234 | ret = nvdimm_flush(nd_region, bio); |
235 | |
236 | if (ret) |
237 | bio->bi_status = errno_to_blk_status(errno: ret); |
238 | |
239 | bio_endio(bio); |
240 | } |
241 | |
242 | /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */ |
243 | __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff, |
244 | long nr_pages, enum dax_access_mode mode, void **kaddr, |
245 | pfn_t *pfn) |
246 | { |
247 | resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset; |
248 | sector_t sector = PFN_PHYS(pgoff) >> SECTOR_SHIFT; |
249 | unsigned int num = PFN_PHYS(nr_pages) >> SECTOR_SHIFT; |
250 | struct badblocks *bb = &pmem->bb; |
251 | sector_t first_bad; |
252 | int num_bad; |
253 | |
254 | if (kaddr) |
255 | *kaddr = pmem->virt_addr + offset; |
256 | if (pfn) |
257 | *pfn = phys_to_pfn_t(addr: pmem->phys_addr + offset, flags: pmem->pfn_flags); |
258 | |
259 | if (bb->count && |
260 | badblocks_check(bb, s: sector, sectors: num, first_bad: &first_bad, bad_sectors: &num_bad)) { |
261 | long actual_nr; |
262 | |
263 | if (mode != DAX_RECOVERY_WRITE) |
264 | return -EHWPOISON; |
265 | |
266 | /* |
267 | * Set the recovery stride is set to kernel page size because |
268 | * the underlying driver and firmware clear poison functions |
269 | * don't appear to handle large chunk(such as 2MiB) reliably. |
270 | */ |
271 | actual_nr = PHYS_PFN( |
272 | PAGE_ALIGN((first_bad - sector) << SECTOR_SHIFT)); |
273 | dev_dbg(pmem->bb.dev, "start sector(%llu), nr_pages(%ld), first_bad(%llu), actual_nr(%ld)\n" , |
274 | sector, nr_pages, first_bad, actual_nr); |
275 | if (actual_nr) |
276 | return actual_nr; |
277 | return 1; |
278 | } |
279 | |
280 | /* |
281 | * If badblocks are present but not in the range, limit known good range |
282 | * to the requested range. |
283 | */ |
284 | if (bb->count) |
285 | return nr_pages; |
286 | return PHYS_PFN(pmem->size - pmem->pfn_pad - offset); |
287 | } |
288 | |
289 | static const struct block_device_operations pmem_fops = { |
290 | .owner = THIS_MODULE, |
291 | .submit_bio = pmem_submit_bio, |
292 | }; |
293 | |
294 | static int pmem_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, |
295 | size_t nr_pages) |
296 | { |
297 | struct pmem_device *pmem = dax_get_private(dax_dev); |
298 | |
299 | return blk_status_to_errno(status: pmem_do_write(pmem, ZERO_PAGE(0), page_off: 0, |
300 | PFN_PHYS(pgoff) >> SECTOR_SHIFT, |
301 | PAGE_SIZE)); |
302 | } |
303 | |
304 | static long pmem_dax_direct_access(struct dax_device *dax_dev, |
305 | pgoff_t pgoff, long nr_pages, enum dax_access_mode mode, |
306 | void **kaddr, pfn_t *pfn) |
307 | { |
308 | struct pmem_device *pmem = dax_get_private(dax_dev); |
309 | |
310 | return __pmem_direct_access(pmem, pgoff, nr_pages, mode, kaddr, pfn); |
311 | } |
312 | |
313 | /* |
314 | * The recovery write thread started out as a normal pwrite thread and |
315 | * when the filesystem was told about potential media error in the |
316 | * range, filesystem turns the normal pwrite to a dax_recovery_write. |
317 | * |
318 | * The recovery write consists of clearing media poison, clearing page |
319 | * HWPoison bit, reenable page-wide read-write permission, flush the |
320 | * caches and finally write. A competing pread thread will be held |
321 | * off during the recovery process since data read back might not be |
322 | * valid, and this is achieved by clearing the badblock records after |
323 | * the recovery write is complete. Competing recovery write threads |
324 | * are already serialized by writer lock held by dax_iomap_rw(). |
325 | */ |
326 | static size_t pmem_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff, |
327 | void *addr, size_t bytes, struct iov_iter *i) |
328 | { |
329 | struct pmem_device *pmem = dax_get_private(dax_dev); |
330 | size_t olen, len, off; |
331 | phys_addr_t pmem_off; |
332 | struct device *dev = pmem->bb.dev; |
333 | long cleared; |
334 | |
335 | off = offset_in_page(addr); |
336 | len = PFN_PHYS(PFN_UP(off + bytes)); |
337 | if (!is_bad_pmem(bb: &pmem->bb, PFN_PHYS(pgoff) >> SECTOR_SHIFT, len)) |
338 | return _copy_from_iter_flushcache(addr, bytes, i); |
339 | |
340 | /* |
341 | * Not page-aligned range cannot be recovered. This should not |
342 | * happen unless something else went wrong. |
343 | */ |
344 | if (off || !PAGE_ALIGNED(bytes)) { |
345 | dev_dbg(dev, "Found poison, but addr(%p) or bytes(%#zx) not page aligned\n" , |
346 | addr, bytes); |
347 | return 0; |
348 | } |
349 | |
350 | pmem_off = PFN_PHYS(pgoff) + pmem->data_offset; |
351 | cleared = __pmem_clear_poison(pmem, offset: pmem_off, len); |
352 | if (cleared > 0 && cleared < len) { |
353 | dev_dbg(dev, "poison cleared only %ld out of %zu bytes\n" , |
354 | cleared, len); |
355 | return 0; |
356 | } |
357 | if (cleared < 0) { |
358 | dev_dbg(dev, "poison clear failed: %ld\n" , cleared); |
359 | return 0; |
360 | } |
361 | |
362 | olen = _copy_from_iter_flushcache(addr, bytes, i); |
363 | pmem_clear_bb(pmem, sector: to_sect(pmem, offset: pmem_off), blks: cleared >> SECTOR_SHIFT); |
364 | |
365 | return olen; |
366 | } |
367 | |
368 | static const struct dax_operations pmem_dax_ops = { |
369 | .direct_access = pmem_dax_direct_access, |
370 | .zero_page_range = pmem_dax_zero_page_range, |
371 | .recovery_write = pmem_recovery_write, |
372 | }; |
373 | |
374 | static ssize_t write_cache_show(struct device *dev, |
375 | struct device_attribute *attr, char *buf) |
376 | { |
377 | struct pmem_device *pmem = dev_to_disk(dev)->private_data; |
378 | |
379 | return sprintf(buf, fmt: "%d\n" , !!dax_write_cache_enabled(dax_dev: pmem->dax_dev)); |
380 | } |
381 | |
382 | static ssize_t write_cache_store(struct device *dev, |
383 | struct device_attribute *attr, const char *buf, size_t len) |
384 | { |
385 | struct pmem_device *pmem = dev_to_disk(dev)->private_data; |
386 | bool write_cache; |
387 | int rc; |
388 | |
389 | rc = kstrtobool(s: buf, res: &write_cache); |
390 | if (rc) |
391 | return rc; |
392 | dax_write_cache(dax_dev: pmem->dax_dev, wc: write_cache); |
393 | return len; |
394 | } |
395 | static DEVICE_ATTR_RW(write_cache); |
396 | |
397 | static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n) |
398 | { |
399 | #ifndef CONFIG_ARCH_HAS_PMEM_API |
400 | if (a == &dev_attr_write_cache.attr) |
401 | return 0; |
402 | #endif |
403 | return a->mode; |
404 | } |
405 | |
406 | static struct attribute *dax_attributes[] = { |
407 | &dev_attr_write_cache.attr, |
408 | NULL, |
409 | }; |
410 | |
411 | static const struct attribute_group dax_attribute_group = { |
412 | .name = "dax" , |
413 | .attrs = dax_attributes, |
414 | .is_visible = dax_visible, |
415 | }; |
416 | |
417 | static const struct attribute_group *pmem_attribute_groups[] = { |
418 | &dax_attribute_group, |
419 | NULL, |
420 | }; |
421 | |
422 | static void pmem_release_disk(void *__pmem) |
423 | { |
424 | struct pmem_device *pmem = __pmem; |
425 | |
426 | dax_remove_host(disk: pmem->disk); |
427 | kill_dax(dax_dev: pmem->dax_dev); |
428 | put_dax(dax_dev: pmem->dax_dev); |
429 | del_gendisk(gp: pmem->disk); |
430 | |
431 | put_disk(disk: pmem->disk); |
432 | } |
433 | |
434 | static int pmem_pagemap_memory_failure(struct dev_pagemap *pgmap, |
435 | unsigned long pfn, unsigned long nr_pages, int mf_flags) |
436 | { |
437 | struct pmem_device *pmem = |
438 | container_of(pgmap, struct pmem_device, pgmap); |
439 | u64 offset = PFN_PHYS(pfn) - pmem->phys_addr - pmem->data_offset; |
440 | u64 len = nr_pages << PAGE_SHIFT; |
441 | |
442 | return dax_holder_notify_failure(dax_dev: pmem->dax_dev, off: offset, len, mf_flags); |
443 | } |
444 | |
445 | static const struct dev_pagemap_ops fsdax_pagemap_ops = { |
446 | .memory_failure = pmem_pagemap_memory_failure, |
447 | }; |
448 | |
449 | static int pmem_attach_disk(struct device *dev, |
450 | struct nd_namespace_common *ndns) |
451 | { |
452 | struct nd_namespace_io *nsio = to_nd_namespace_io(dev: &ndns->dev); |
453 | struct nd_region *nd_region = to_nd_region(dev: dev->parent); |
454 | int nid = dev_to_node(dev), fua; |
455 | struct resource *res = &nsio->res; |
456 | struct range bb_range; |
457 | struct nd_pfn *nd_pfn = NULL; |
458 | struct dax_device *dax_dev; |
459 | struct nd_pfn_sb *pfn_sb; |
460 | struct pmem_device *pmem; |
461 | struct request_queue *q; |
462 | struct gendisk *disk; |
463 | void *addr; |
464 | int rc; |
465 | |
466 | pmem = devm_kzalloc(dev, size: sizeof(*pmem), GFP_KERNEL); |
467 | if (!pmem) |
468 | return -ENOMEM; |
469 | |
470 | rc = devm_namespace_enable(dev, ndns, size: nd_info_block_reserve()); |
471 | if (rc) |
472 | return rc; |
473 | |
474 | /* while nsio_rw_bytes is active, parse a pfn info block if present */ |
475 | if (is_nd_pfn(dev)) { |
476 | nd_pfn = to_nd_pfn(dev); |
477 | rc = nvdimm_setup_pfn(nd_pfn, pgmap: &pmem->pgmap); |
478 | if (rc) |
479 | return rc; |
480 | } |
481 | |
482 | /* we're attaching a block device, disable raw namespace access */ |
483 | devm_namespace_disable(dev, ndns); |
484 | |
485 | dev_set_drvdata(dev, data: pmem); |
486 | pmem->phys_addr = res->start; |
487 | pmem->size = resource_size(res); |
488 | fua = nvdimm_has_flush(nd_region); |
489 | if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) { |
490 | dev_warn(dev, "unable to guarantee persistence of writes\n" ); |
491 | fua = 0; |
492 | } |
493 | |
494 | if (!devm_request_mem_region(dev, res->start, resource_size(res), |
495 | dev_name(&ndns->dev))) { |
496 | dev_warn(dev, "could not reserve region %pR\n" , res); |
497 | return -EBUSY; |
498 | } |
499 | |
500 | disk = blk_alloc_disk(nid); |
501 | if (!disk) |
502 | return -ENOMEM; |
503 | q = disk->queue; |
504 | |
505 | pmem->disk = disk; |
506 | pmem->pgmap.owner = pmem; |
507 | pmem->pfn_flags = PFN_DEV; |
508 | if (is_nd_pfn(dev)) { |
509 | pmem->pgmap.type = MEMORY_DEVICE_FS_DAX; |
510 | pmem->pgmap.ops = &fsdax_pagemap_ops; |
511 | addr = devm_memremap_pages(dev, pgmap: &pmem->pgmap); |
512 | pfn_sb = nd_pfn->pfn_sb; |
513 | pmem->data_offset = le64_to_cpu(pfn_sb->dataoff); |
514 | pmem->pfn_pad = resource_size(res) - |
515 | range_len(range: &pmem->pgmap.range); |
516 | pmem->pfn_flags |= PFN_MAP; |
517 | bb_range = pmem->pgmap.range; |
518 | bb_range.start += pmem->data_offset; |
519 | } else if (pmem_should_map_pages(dev)) { |
520 | pmem->pgmap.range.start = res->start; |
521 | pmem->pgmap.range.end = res->end; |
522 | pmem->pgmap.nr_range = 1; |
523 | pmem->pgmap.type = MEMORY_DEVICE_FS_DAX; |
524 | pmem->pgmap.ops = &fsdax_pagemap_ops; |
525 | addr = devm_memremap_pages(dev, pgmap: &pmem->pgmap); |
526 | pmem->pfn_flags |= PFN_MAP; |
527 | bb_range = pmem->pgmap.range; |
528 | } else { |
529 | addr = devm_memremap(dev, offset: pmem->phys_addr, |
530 | size: pmem->size, ARCH_MEMREMAP_PMEM); |
531 | bb_range.start = res->start; |
532 | bb_range.end = res->end; |
533 | } |
534 | |
535 | if (IS_ERR(ptr: addr)) { |
536 | rc = PTR_ERR(ptr: addr); |
537 | goto out; |
538 | } |
539 | pmem->virt_addr = addr; |
540 | |
541 | blk_queue_write_cache(q, enabled: true, fua); |
542 | blk_queue_physical_block_size(q, PAGE_SIZE); |
543 | blk_queue_logical_block_size(q, pmem_sector_size(ndns)); |
544 | blk_queue_max_hw_sectors(q, UINT_MAX); |
545 | blk_queue_flag_set(QUEUE_FLAG_NONROT, q); |
546 | blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, q); |
547 | if (pmem->pfn_flags & PFN_MAP) |
548 | blk_queue_flag_set(QUEUE_FLAG_DAX, q); |
549 | |
550 | disk->fops = &pmem_fops; |
551 | disk->private_data = pmem; |
552 | nvdimm_namespace_disk_name(ndns, name: disk->disk_name); |
553 | set_capacity(disk, size: (pmem->size - pmem->pfn_pad - pmem->data_offset) |
554 | / 512); |
555 | if (devm_init_badblocks(dev, bb: &pmem->bb)) |
556 | return -ENOMEM; |
557 | nvdimm_badblocks_populate(nd_region, bb: &pmem->bb, range: &bb_range); |
558 | disk->bb = &pmem->bb; |
559 | |
560 | dax_dev = alloc_dax(private: pmem, ops: &pmem_dax_ops); |
561 | if (IS_ERR(ptr: dax_dev)) { |
562 | rc = PTR_ERR(ptr: dax_dev); |
563 | goto out; |
564 | } |
565 | set_dax_nocache(dax_dev); |
566 | set_dax_nomc(dax_dev); |
567 | if (is_nvdimm_sync(nd_region)) |
568 | set_dax_synchronous(dax_dev); |
569 | rc = dax_add_host(dax_dev, disk); |
570 | if (rc) |
571 | goto out_cleanup_dax; |
572 | dax_write_cache(dax_dev, wc: nvdimm_has_cache(nd_region)); |
573 | pmem->dax_dev = dax_dev; |
574 | |
575 | rc = device_add_disk(parent: dev, disk, groups: pmem_attribute_groups); |
576 | if (rc) |
577 | goto out_remove_host; |
578 | if (devm_add_action_or_reset(dev, pmem_release_disk, pmem)) |
579 | return -ENOMEM; |
580 | |
581 | nvdimm_check_and_set_ro(disk); |
582 | |
583 | pmem->bb_state = sysfs_get_dirent(disk_to_dev(disk)->kobj.sd, |
584 | name: "badblocks" ); |
585 | if (!pmem->bb_state) |
586 | dev_warn(dev, "'badblocks' notification disabled\n" ); |
587 | return 0; |
588 | |
589 | out_remove_host: |
590 | dax_remove_host(disk: pmem->disk); |
591 | out_cleanup_dax: |
592 | kill_dax(dax_dev: pmem->dax_dev); |
593 | put_dax(dax_dev: pmem->dax_dev); |
594 | out: |
595 | put_disk(disk: pmem->disk); |
596 | return rc; |
597 | } |
598 | |
599 | static int nd_pmem_probe(struct device *dev) |
600 | { |
601 | int ret; |
602 | struct nd_namespace_common *ndns; |
603 | |
604 | ndns = nvdimm_namespace_common_probe(dev); |
605 | if (IS_ERR(ptr: ndns)) |
606 | return PTR_ERR(ptr: ndns); |
607 | |
608 | if (is_nd_btt(dev)) |
609 | return nvdimm_namespace_attach_btt(ndns); |
610 | |
611 | if (is_nd_pfn(dev)) |
612 | return pmem_attach_disk(dev, ndns); |
613 | |
614 | ret = devm_namespace_enable(dev, ndns, size: nd_info_block_reserve()); |
615 | if (ret) |
616 | return ret; |
617 | |
618 | ret = nd_btt_probe(dev, ndns); |
619 | if (ret == 0) |
620 | return -ENXIO; |
621 | |
622 | /* |
623 | * We have two failure conditions here, there is no |
624 | * info reserver block or we found a valid info reserve block |
625 | * but failed to initialize the pfn superblock. |
626 | * |
627 | * For the first case consider namespace as a raw pmem namespace |
628 | * and attach a disk. |
629 | * |
630 | * For the latter, consider this a success and advance the namespace |
631 | * seed. |
632 | */ |
633 | ret = nd_pfn_probe(dev, ndns); |
634 | if (ret == 0) |
635 | return -ENXIO; |
636 | else if (ret == -EOPNOTSUPP) |
637 | return ret; |
638 | |
639 | ret = nd_dax_probe(dev, ndns); |
640 | if (ret == 0) |
641 | return -ENXIO; |
642 | else if (ret == -EOPNOTSUPP) |
643 | return ret; |
644 | |
645 | /* probe complete, attach handles namespace enabling */ |
646 | devm_namespace_disable(dev, ndns); |
647 | |
648 | return pmem_attach_disk(dev, ndns); |
649 | } |
650 | |
651 | static void nd_pmem_remove(struct device *dev) |
652 | { |
653 | struct pmem_device *pmem = dev_get_drvdata(dev); |
654 | |
655 | if (is_nd_btt(dev)) |
656 | nvdimm_namespace_detach_btt(nd_btt: to_nd_btt(dev)); |
657 | else { |
658 | /* |
659 | * Note, this assumes device_lock() context to not |
660 | * race nd_pmem_notify() |
661 | */ |
662 | sysfs_put(kn: pmem->bb_state); |
663 | pmem->bb_state = NULL; |
664 | } |
665 | nvdimm_flush(nd_region: to_nd_region(dev: dev->parent), NULL); |
666 | } |
667 | |
668 | static void nd_pmem_shutdown(struct device *dev) |
669 | { |
670 | nvdimm_flush(nd_region: to_nd_region(dev: dev->parent), NULL); |
671 | } |
672 | |
673 | static void pmem_revalidate_poison(struct device *dev) |
674 | { |
675 | struct nd_region *nd_region; |
676 | resource_size_t offset = 0, end_trunc = 0; |
677 | struct nd_namespace_common *ndns; |
678 | struct nd_namespace_io *nsio; |
679 | struct badblocks *bb; |
680 | struct range range; |
681 | struct kernfs_node *bb_state; |
682 | |
683 | if (is_nd_btt(dev)) { |
684 | struct nd_btt *nd_btt = to_nd_btt(dev); |
685 | |
686 | ndns = nd_btt->ndns; |
687 | nd_region = to_nd_region(dev: ndns->dev.parent); |
688 | nsio = to_nd_namespace_io(dev: &ndns->dev); |
689 | bb = &nsio->bb; |
690 | bb_state = NULL; |
691 | } else { |
692 | struct pmem_device *pmem = dev_get_drvdata(dev); |
693 | |
694 | nd_region = to_region(pmem); |
695 | bb = &pmem->bb; |
696 | bb_state = pmem->bb_state; |
697 | |
698 | if (is_nd_pfn(dev)) { |
699 | struct nd_pfn *nd_pfn = to_nd_pfn(dev); |
700 | struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; |
701 | |
702 | ndns = nd_pfn->ndns; |
703 | offset = pmem->data_offset + |
704 | __le32_to_cpu(pfn_sb->start_pad); |
705 | end_trunc = __le32_to_cpu(pfn_sb->end_trunc); |
706 | } else { |
707 | ndns = to_ndns(dev); |
708 | } |
709 | |
710 | nsio = to_nd_namespace_io(dev: &ndns->dev); |
711 | } |
712 | |
713 | range.start = nsio->res.start + offset; |
714 | range.end = nsio->res.end - end_trunc; |
715 | nvdimm_badblocks_populate(nd_region, bb, range: &range); |
716 | if (bb_state) |
717 | sysfs_notify_dirent(kn: bb_state); |
718 | } |
719 | |
720 | static void pmem_revalidate_region(struct device *dev) |
721 | { |
722 | struct pmem_device *pmem; |
723 | |
724 | if (is_nd_btt(dev)) { |
725 | struct nd_btt *nd_btt = to_nd_btt(dev); |
726 | struct btt *btt = nd_btt->btt; |
727 | |
728 | nvdimm_check_and_set_ro(disk: btt->btt_disk); |
729 | return; |
730 | } |
731 | |
732 | pmem = dev_get_drvdata(dev); |
733 | nvdimm_check_and_set_ro(disk: pmem->disk); |
734 | } |
735 | |
736 | static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) |
737 | { |
738 | switch (event) { |
739 | case NVDIMM_REVALIDATE_POISON: |
740 | pmem_revalidate_poison(dev); |
741 | break; |
742 | case NVDIMM_REVALIDATE_REGION: |
743 | pmem_revalidate_region(dev); |
744 | break; |
745 | default: |
746 | dev_WARN_ONCE(dev, 1, "notify: unknown event: %d\n" , event); |
747 | break; |
748 | } |
749 | } |
750 | |
751 | MODULE_ALIAS("pmem" ); |
752 | MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO); |
753 | MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM); |
754 | static struct nd_device_driver nd_pmem_driver = { |
755 | .probe = nd_pmem_probe, |
756 | .remove = nd_pmem_remove, |
757 | .notify = nd_pmem_notify, |
758 | .shutdown = nd_pmem_shutdown, |
759 | .drv = { |
760 | .name = "nd_pmem" , |
761 | }, |
762 | .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM, |
763 | }; |
764 | |
765 | module_nd_driver(nd_pmem_driver); |
766 | |
767 | MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>" ); |
768 | MODULE_LICENSE("GPL v2" ); |
769 | |