1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _LINUX_DAX_H |
3 | #define _LINUX_DAX_H |
4 | |
5 | #include <linux/fs.h> |
6 | #include <linux/mm.h> |
7 | #include <linux/radix-tree.h> |
8 | |
9 | typedef unsigned long dax_entry_t; |
10 | |
11 | struct dax_device; |
12 | struct gendisk; |
13 | struct iomap_ops; |
14 | struct iomap_iter; |
15 | struct iomap; |
16 | |
17 | enum dax_access_mode { |
18 | DAX_ACCESS, |
19 | DAX_RECOVERY_WRITE, |
20 | }; |
21 | |
22 | struct dax_operations { |
23 | /* |
24 | * direct_access: translate a device-relative |
25 | * logical-page-offset into an absolute physical pfn. Return the |
26 | * number of pages available for DAX at that pfn. |
27 | */ |
28 | long (*direct_access)(struct dax_device *, pgoff_t, long, |
29 | enum dax_access_mode, void **, pfn_t *); |
30 | /* |
31 | * Validate whether this device is usable as an fsdax backing |
32 | * device. |
33 | */ |
34 | bool (*dax_supported)(struct dax_device *, struct block_device *, int, |
35 | sector_t, sector_t); |
36 | /* zero_page_range: required operation. Zero page range */ |
37 | int (*zero_page_range)(struct dax_device *, pgoff_t, size_t); |
38 | /* |
39 | * recovery_write: recover a poisoned range by DAX device driver |
40 | * capable of clearing poison. |
41 | */ |
42 | size_t (*recovery_write)(struct dax_device *dax_dev, pgoff_t pgoff, |
43 | void *addr, size_t bytes, struct iov_iter *iter); |
44 | }; |
45 | |
46 | struct dax_holder_operations { |
47 | /* |
48 | * notify_failure - notify memory failure into inner holder device |
49 | * @dax_dev: the dax device which contains the holder |
50 | * @offset: offset on this dax device where memory failure occurs |
51 | * @len: length of this memory failure event |
52 | * @flags: action flags for memory failure handler |
53 | */ |
54 | int (*notify_failure)(struct dax_device *dax_dev, u64 offset, |
55 | u64 len, int mf_flags); |
56 | }; |
57 | |
58 | #if IS_ENABLED(CONFIG_DAX) |
59 | struct dax_device *alloc_dax(void *private, const struct dax_operations *ops); |
60 | void *dax_holder(struct dax_device *dax_dev); |
61 | void put_dax(struct dax_device *dax_dev); |
62 | void kill_dax(struct dax_device *dax_dev); |
63 | void dax_write_cache(struct dax_device *dax_dev, bool wc); |
64 | bool dax_write_cache_enabled(struct dax_device *dax_dev); |
65 | bool dax_synchronous(struct dax_device *dax_dev); |
66 | void set_dax_nocache(struct dax_device *dax_dev); |
67 | void set_dax_nomc(struct dax_device *dax_dev); |
68 | void set_dax_synchronous(struct dax_device *dax_dev); |
69 | size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff, |
70 | void *addr, size_t bytes, struct iov_iter *i); |
71 | /* |
72 | * Check if given mapping is supported by the file / underlying device. |
73 | */ |
74 | static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, |
75 | struct dax_device *dax_dev) |
76 | { |
77 | if (!(vma->vm_flags & VM_SYNC)) |
78 | return true; |
79 | if (!IS_DAX(file_inode(vma->vm_file))) |
80 | return false; |
81 | return dax_synchronous(dax_dev); |
82 | } |
83 | #else |
84 | static inline void *dax_holder(struct dax_device *dax_dev) |
85 | { |
86 | return NULL; |
87 | } |
88 | static inline struct dax_device *alloc_dax(void *private, |
89 | const struct dax_operations *ops) |
90 | { |
91 | return ERR_PTR(-EOPNOTSUPP); |
92 | } |
93 | static inline void put_dax(struct dax_device *dax_dev) |
94 | { |
95 | } |
96 | static inline void kill_dax(struct dax_device *dax_dev) |
97 | { |
98 | } |
99 | static inline void dax_write_cache(struct dax_device *dax_dev, bool wc) |
100 | { |
101 | } |
102 | static inline bool dax_write_cache_enabled(struct dax_device *dax_dev) |
103 | { |
104 | return false; |
105 | } |
106 | static inline bool dax_synchronous(struct dax_device *dax_dev) |
107 | { |
108 | return true; |
109 | } |
110 | static inline void set_dax_nocache(struct dax_device *dax_dev) |
111 | { |
112 | } |
113 | static inline void set_dax_nomc(struct dax_device *dax_dev) |
114 | { |
115 | } |
116 | static inline void set_dax_synchronous(struct dax_device *dax_dev) |
117 | { |
118 | } |
119 | static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, |
120 | struct dax_device *dax_dev) |
121 | { |
122 | return !(vma->vm_flags & VM_SYNC); |
123 | } |
124 | static inline size_t dax_recovery_write(struct dax_device *dax_dev, |
125 | pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) |
126 | { |
127 | return 0; |
128 | } |
129 | #endif |
130 | |
131 | struct writeback_control; |
132 | #if defined(CONFIG_BLOCK) && defined(CONFIG_FS_DAX) |
133 | int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk); |
134 | void dax_remove_host(struct gendisk *disk); |
135 | struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off, |
136 | void *holder, const struct dax_holder_operations *ops); |
137 | void fs_put_dax(struct dax_device *dax_dev, void *holder); |
138 | #else |
139 | static inline int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk) |
140 | { |
141 | return 0; |
142 | } |
143 | static inline void dax_remove_host(struct gendisk *disk) |
144 | { |
145 | } |
146 | static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, |
147 | u64 *start_off, void *holder, |
148 | const struct dax_holder_operations *ops) |
149 | { |
150 | return NULL; |
151 | } |
152 | static inline void fs_put_dax(struct dax_device *dax_dev, void *holder) |
153 | { |
154 | } |
155 | #endif /* CONFIG_BLOCK && CONFIG_FS_DAX */ |
156 | |
157 | #if IS_ENABLED(CONFIG_FS_DAX) |
158 | int dax_writeback_mapping_range(struct address_space *mapping, |
159 | struct dax_device *dax_dev, struct writeback_control *wbc); |
160 | |
161 | struct page *dax_layout_busy_page(struct address_space *mapping); |
162 | struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end); |
163 | dax_entry_t dax_lock_folio(struct folio *folio); |
164 | void dax_unlock_folio(struct folio *folio, dax_entry_t cookie); |
165 | dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, |
166 | unsigned long index, struct page **page); |
167 | void dax_unlock_mapping_entry(struct address_space *mapping, |
168 | unsigned long index, dax_entry_t cookie); |
169 | #else |
170 | static inline struct page *dax_layout_busy_page(struct address_space *mapping) |
171 | { |
172 | return NULL; |
173 | } |
174 | |
175 | static inline struct page *dax_layout_busy_page_range(struct address_space *mapping, pgoff_t start, pgoff_t nr_pages) |
176 | { |
177 | return NULL; |
178 | } |
179 | |
180 | static inline int dax_writeback_mapping_range(struct address_space *mapping, |
181 | struct dax_device *dax_dev, struct writeback_control *wbc) |
182 | { |
183 | return -EOPNOTSUPP; |
184 | } |
185 | |
186 | static inline dax_entry_t dax_lock_folio(struct folio *folio) |
187 | { |
188 | if (IS_DAX(folio->mapping->host)) |
189 | return ~0UL; |
190 | return 0; |
191 | } |
192 | |
193 | static inline void dax_unlock_folio(struct folio *folio, dax_entry_t cookie) |
194 | { |
195 | } |
196 | |
197 | static inline dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, |
198 | unsigned long index, struct page **page) |
199 | { |
200 | return 0; |
201 | } |
202 | |
203 | static inline void dax_unlock_mapping_entry(struct address_space *mapping, |
204 | unsigned long index, dax_entry_t cookie) |
205 | { |
206 | } |
207 | #endif |
208 | |
209 | int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len, |
210 | const struct iomap_ops *ops); |
211 | int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, |
212 | const struct iomap_ops *ops); |
213 | int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, |
214 | const struct iomap_ops *ops); |
215 | |
216 | #if IS_ENABLED(CONFIG_DAX) |
217 | int dax_read_lock(void); |
218 | void dax_read_unlock(int id); |
219 | #else |
220 | static inline int dax_read_lock(void) |
221 | { |
222 | return 0; |
223 | } |
224 | |
225 | static inline void dax_read_unlock(int id) |
226 | { |
227 | } |
228 | #endif /* CONFIG_DAX */ |
229 | bool dax_alive(struct dax_device *dax_dev); |
230 | void *dax_get_private(struct dax_device *dax_dev); |
231 | long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, |
232 | enum dax_access_mode mode, void **kaddr, pfn_t *pfn); |
233 | size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, |
234 | size_t bytes, struct iov_iter *i); |
235 | size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, |
236 | size_t bytes, struct iov_iter *i); |
237 | int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, |
238 | size_t nr_pages); |
239 | int dax_holder_notify_failure(struct dax_device *dax_dev, u64 off, u64 len, |
240 | int mf_flags); |
241 | void dax_flush(struct dax_device *dax_dev, void *addr, size_t size); |
242 | |
243 | ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, |
244 | const struct iomap_ops *ops); |
245 | vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order, |
246 | pfn_t *pfnp, int *errp, const struct iomap_ops *ops); |
247 | vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, |
248 | unsigned int order, pfn_t pfn); |
249 | int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); |
250 | int dax_invalidate_mapping_entry_sync(struct address_space *mapping, |
251 | pgoff_t index); |
252 | int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff, |
253 | struct inode *dest, loff_t destoff, |
254 | loff_t len, bool *is_same, |
255 | const struct iomap_ops *ops); |
256 | int dax_remap_file_range_prep(struct file *file_in, loff_t pos_in, |
257 | struct file *file_out, loff_t pos_out, |
258 | loff_t *len, unsigned int remap_flags, |
259 | const struct iomap_ops *ops); |
260 | static inline bool dax_mapping(struct address_space *mapping) |
261 | { |
262 | return mapping->host && IS_DAX(mapping->host); |
263 | } |
264 | |
265 | /* |
266 | * Due to dax's memory and block duo personalities, hwpoison reporting |
267 | * takes into consideration which personality is presently visible. |
268 | * When dax acts like a block device, such as in block IO, an encounter of |
269 | * dax hwpoison is reported as -EIO. |
270 | * When dax acts like memory, such as in page fault, a detection of hwpoison |
271 | * is reported as -EHWPOISON which leads to VM_FAULT_HWPOISON. |
272 | */ |
273 | static inline int dax_mem2blk_err(int err) |
274 | { |
275 | return (err == -EHWPOISON) ? -EIO : err; |
276 | } |
277 | |
278 | #ifdef CONFIG_DEV_DAX_HMEM_DEVICES |
279 | void hmem_register_resource(int target_nid, struct resource *r); |
280 | #else |
281 | static inline void hmem_register_resource(int target_nid, struct resource *r) |
282 | { |
283 | } |
284 | #endif |
285 | |
286 | typedef int (*walk_hmem_fn)(struct device *dev, int target_nid, |
287 | const struct resource *res); |
288 | int walk_hmem_resources(struct device *dev, walk_hmem_fn fn); |
289 | #endif |
290 | |