1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* |
3 | * include/linux/buffer_head.h |
4 | * |
5 | * Everything to do with buffer_heads. |
6 | */ |
7 | |
8 | #ifndef _LINUX_BUFFER_HEAD_H |
9 | #define _LINUX_BUFFER_HEAD_H |
10 | |
11 | #include <linux/types.h> |
12 | #include <linux/fs.h> |
13 | #include <linux/linkage.h> |
14 | #include <linux/pagemap.h> |
15 | #include <linux/wait.h> |
16 | #include <linux/atomic.h> |
17 | |
18 | #ifdef CONFIG_BLOCK |
19 | |
20 | enum bh_state_bits { |
21 | BH_Uptodate, /* Contains valid data */ |
22 | BH_Dirty, /* Is dirty */ |
23 | BH_Lock, /* Is locked */ |
24 | BH_Req, /* Has been submitted for I/O */ |
25 | BH_Uptodate_Lock,/* Used by the first bh in a page, to serialise |
26 | * IO completion of other buffers in the page |
27 | */ |
28 | |
29 | BH_Mapped, /* Has a disk mapping */ |
30 | BH_New, /* Disk mapping was newly created by get_block */ |
31 | BH_Async_Read, /* Is under end_buffer_async_read I/O */ |
32 | BH_Async_Write, /* Is under end_buffer_async_write I/O */ |
33 | BH_Delay, /* Buffer is not yet allocated on disk */ |
34 | BH_Boundary, /* Block is followed by a discontiguity */ |
35 | BH_Write_EIO, /* I/O error on write */ |
36 | BH_Unwritten, /* Buffer is allocated on disk but not written */ |
37 | BH_Quiet, /* Buffer Error Prinks to be quiet */ |
38 | BH_Meta, /* Buffer contains metadata */ |
39 | BH_Prio, /* Buffer should be submitted with REQ_PRIO */ |
40 | BH_Defer_Completion, /* Defer AIO completion to workqueue */ |
41 | |
42 | BH_PrivateStart,/* not a state bit, but the first bit available |
43 | * for private allocation by other entities |
44 | */ |
45 | }; |
46 | |
47 | #define MAX_BUF_PER_PAGE (PAGE_SIZE / 512) |
48 | |
49 | struct page; |
50 | struct buffer_head; |
51 | struct address_space; |
52 | typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate); |
53 | |
54 | /* |
55 | * Historically, a buffer_head was used to map a single block |
56 | * within a page, and of course as the unit of I/O through the |
57 | * filesystem and block layers. Nowadays the basic I/O unit |
58 | * is the bio, and buffer_heads are used for extracting block |
59 | * mappings (via a get_block_t call), for tracking state within |
60 | * a page (via a page_mapping) and for wrapping bio submission |
61 | * for backward compatibility reasons (e.g. submit_bh). |
62 | */ |
63 | struct buffer_head { |
64 | unsigned long b_state; /* buffer state bitmap (see above) */ |
65 | struct buffer_head *b_this_page;/* circular list of page's buffers */ |
66 | struct page *b_page; /* the page this bh is mapped to */ |
67 | |
68 | sector_t b_blocknr; /* start block number */ |
69 | size_t b_size; /* size of mapping */ |
70 | char *b_data; /* pointer to data within the page */ |
71 | |
72 | struct block_device *b_bdev; |
73 | bh_end_io_t *b_end_io; /* I/O completion */ |
74 | void *b_private; /* reserved for b_end_io */ |
75 | struct list_head b_assoc_buffers; /* associated with another mapping */ |
76 | struct address_space *b_assoc_map; /* mapping this buffer is |
77 | associated with */ |
78 | atomic_t b_count; /* users using this buffer_head */ |
79 | }; |
80 | |
81 | /* |
82 | * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() |
83 | * and buffer_foo() functions. |
84 | * To avoid reset buffer flags that are already set, because that causes |
85 | * a costly cache line transition, check the flag first. |
86 | */ |
87 | #define BUFFER_FNS(bit, name) \ |
88 | static __always_inline void set_buffer_##name(struct buffer_head *bh) \ |
89 | { \ |
90 | if (!test_bit(BH_##bit, &(bh)->b_state)) \ |
91 | set_bit(BH_##bit, &(bh)->b_state); \ |
92 | } \ |
93 | static __always_inline void clear_buffer_##name(struct buffer_head *bh) \ |
94 | { \ |
95 | clear_bit(BH_##bit, &(bh)->b_state); \ |
96 | } \ |
97 | static __always_inline int buffer_##name(const struct buffer_head *bh) \ |
98 | { \ |
99 | return test_bit(BH_##bit, &(bh)->b_state); \ |
100 | } |
101 | |
102 | /* |
103 | * test_set_buffer_foo() and test_clear_buffer_foo() |
104 | */ |
105 | #define TAS_BUFFER_FNS(bit, name) \ |
106 | static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \ |
107 | { \ |
108 | return test_and_set_bit(BH_##bit, &(bh)->b_state); \ |
109 | } \ |
110 | static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \ |
111 | { \ |
112 | return test_and_clear_bit(BH_##bit, &(bh)->b_state); \ |
113 | } \ |
114 | |
115 | /* |
116 | * Emit the buffer bitops functions. Note that there are also functions |
117 | * of the form "mark_buffer_foo()". These are higher-level functions which |
118 | * do something in addition to setting a b_state bit. |
119 | */ |
120 | BUFFER_FNS(Uptodate, uptodate) |
121 | BUFFER_FNS(Dirty, dirty) |
122 | TAS_BUFFER_FNS(Dirty, dirty) |
123 | BUFFER_FNS(Lock, locked) |
124 | BUFFER_FNS(Req, req) |
125 | TAS_BUFFER_FNS(Req, req) |
126 | BUFFER_FNS(Mapped, mapped) |
127 | BUFFER_FNS(New, new) |
128 | BUFFER_FNS(Async_Read, async_read) |
129 | BUFFER_FNS(Async_Write, async_write) |
130 | BUFFER_FNS(Delay, delay) |
131 | BUFFER_FNS(Boundary, boundary) |
132 | BUFFER_FNS(Write_EIO, write_io_error) |
133 | BUFFER_FNS(Unwritten, unwritten) |
134 | BUFFER_FNS(Meta, meta) |
135 | BUFFER_FNS(Prio, prio) |
136 | BUFFER_FNS(Defer_Completion, defer_completion) |
137 | |
138 | #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) |
139 | |
140 | /* If we *know* page->private refers to buffer_heads */ |
141 | #define page_buffers(page) \ |
142 | ({ \ |
143 | BUG_ON(!PagePrivate(page)); \ |
144 | ((struct buffer_head *)page_private(page)); \ |
145 | }) |
146 | #define page_has_buffers(page) PagePrivate(page) |
147 | |
148 | void buffer_check_dirty_writeback(struct page *page, |
149 | bool *dirty, bool *writeback); |
150 | |
151 | /* |
152 | * Declarations |
153 | */ |
154 | |
155 | void mark_buffer_dirty(struct buffer_head *bh); |
156 | void mark_buffer_write_io_error(struct buffer_head *bh); |
157 | void touch_buffer(struct buffer_head *bh); |
158 | void set_bh_page(struct buffer_head *bh, |
159 | struct page *page, unsigned long offset); |
160 | int try_to_free_buffers(struct page *); |
161 | struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, |
162 | bool retry); |
163 | void create_empty_buffers(struct page *, unsigned long, |
164 | unsigned long b_state); |
165 | void end_buffer_read_sync(struct buffer_head *bh, int uptodate); |
166 | void end_buffer_write_sync(struct buffer_head *bh, int uptodate); |
167 | void end_buffer_async_write(struct buffer_head *bh, int uptodate); |
168 | |
169 | /* Things to do with buffers at mapping->private_list */ |
170 | void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode); |
171 | int inode_has_buffers(struct inode *); |
172 | void invalidate_inode_buffers(struct inode *); |
173 | int remove_inode_buffers(struct inode *inode); |
174 | int sync_mapping_buffers(struct address_space *mapping); |
175 | void clean_bdev_aliases(struct block_device *bdev, sector_t block, |
176 | sector_t len); |
177 | static inline void clean_bdev_bh_alias(struct buffer_head *bh) |
178 | { |
179 | clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1); |
180 | } |
181 | |
182 | void mark_buffer_async_write(struct buffer_head *bh); |
183 | void __wait_on_buffer(struct buffer_head *); |
184 | wait_queue_head_t *bh_waitq_head(struct buffer_head *bh); |
185 | struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block, |
186 | unsigned size); |
187 | struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block, |
188 | unsigned size, gfp_t gfp); |
189 | void __brelse(struct buffer_head *); |
190 | void __bforget(struct buffer_head *); |
191 | void __breadahead(struct block_device *, sector_t block, unsigned int size); |
192 | struct buffer_head *__bread_gfp(struct block_device *, |
193 | sector_t block, unsigned size, gfp_t gfp); |
194 | void invalidate_bh_lrus(void); |
195 | struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); |
196 | void free_buffer_head(struct buffer_head * bh); |
197 | void unlock_buffer(struct buffer_head *bh); |
198 | void __lock_buffer(struct buffer_head *bh); |
199 | void ll_rw_block(int, int, int, struct buffer_head * bh[]); |
200 | int sync_dirty_buffer(struct buffer_head *bh); |
201 | int __sync_dirty_buffer(struct buffer_head *bh, int op_flags); |
202 | void write_dirty_buffer(struct buffer_head *bh, int op_flags); |
203 | int submit_bh(int, int, struct buffer_head *); |
204 | void write_boundary_block(struct block_device *bdev, |
205 | sector_t bblock, unsigned blocksize); |
206 | int bh_uptodate_or_lock(struct buffer_head *bh); |
207 | int bh_submit_read(struct buffer_head *bh); |
208 | |
209 | extern int buffer_heads_over_limit; |
210 | |
211 | /* |
212 | * Generic address_space_operations implementations for buffer_head-backed |
213 | * address_spaces. |
214 | */ |
215 | void block_invalidatepage(struct page *page, unsigned int offset, |
216 | unsigned int length); |
217 | int block_write_full_page(struct page *page, get_block_t *get_block, |
218 | struct writeback_control *wbc); |
219 | int __block_write_full_page(struct inode *inode, struct page *page, |
220 | get_block_t *get_block, struct writeback_control *wbc, |
221 | bh_end_io_t *handler); |
222 | int block_read_full_page(struct page*, get_block_t*); |
223 | int block_is_partially_uptodate(struct page *page, unsigned long from, |
224 | unsigned long count); |
225 | int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, |
226 | unsigned flags, struct page **pagep, get_block_t *get_block); |
227 | int __block_write_begin(struct page *page, loff_t pos, unsigned len, |
228 | get_block_t *get_block); |
229 | int block_write_end(struct file *, struct address_space *, |
230 | loff_t, unsigned, unsigned, |
231 | struct page *, void *); |
232 | int generic_write_end(struct file *, struct address_space *, |
233 | loff_t, unsigned, unsigned, |
234 | struct page *, void *); |
235 | void page_zero_new_buffers(struct page *page, unsigned from, unsigned to); |
236 | void clean_page_buffers(struct page *page); |
237 | int cont_write_begin(struct file *, struct address_space *, loff_t, |
238 | unsigned, unsigned, struct page **, void **, |
239 | get_block_t *, loff_t *); |
240 | int generic_cont_expand_simple(struct inode *inode, loff_t size); |
241 | int block_commit_write(struct page *page, unsigned from, unsigned to); |
242 | int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, |
243 | get_block_t get_block); |
244 | /* Convert errno to return value from ->page_mkwrite() call */ |
245 | static inline vm_fault_t block_page_mkwrite_return(int err) |
246 | { |
247 | if (err == 0) |
248 | return VM_FAULT_LOCKED; |
249 | if (err == -EFAULT || err == -EAGAIN) |
250 | return VM_FAULT_NOPAGE; |
251 | if (err == -ENOMEM) |
252 | return VM_FAULT_OOM; |
253 | /* -ENOSPC, -EDQUOT, -EIO ... */ |
254 | return VM_FAULT_SIGBUS; |
255 | } |
256 | sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); |
257 | int block_truncate_page(struct address_space *, loff_t, get_block_t *); |
258 | int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned, |
259 | struct page **, void **, get_block_t*); |
260 | int nobh_write_end(struct file *, struct address_space *, |
261 | loff_t, unsigned, unsigned, |
262 | struct page *, void *); |
263 | int nobh_truncate_page(struct address_space *, loff_t, get_block_t *); |
264 | int nobh_writepage(struct page *page, get_block_t *get_block, |
265 | struct writeback_control *wbc); |
266 | |
267 | void buffer_init(void); |
268 | |
269 | /* |
270 | * inline definitions |
271 | */ |
272 | |
273 | static inline void attach_page_buffers(struct page *page, |
274 | struct buffer_head *head) |
275 | { |
276 | get_page(page); |
277 | SetPagePrivate(page); |
278 | set_page_private(page, (unsigned long)head); |
279 | } |
280 | |
281 | static inline void get_bh(struct buffer_head *bh) |
282 | { |
283 | atomic_inc(&bh->b_count); |
284 | } |
285 | |
286 | static inline void put_bh(struct buffer_head *bh) |
287 | { |
288 | smp_mb__before_atomic(); |
289 | atomic_dec(&bh->b_count); |
290 | } |
291 | |
292 | static inline void brelse(struct buffer_head *bh) |
293 | { |
294 | if (bh) |
295 | __brelse(bh); |
296 | } |
297 | |
298 | static inline void bforget(struct buffer_head *bh) |
299 | { |
300 | if (bh) |
301 | __bforget(bh); |
302 | } |
303 | |
304 | static inline struct buffer_head * |
305 | sb_bread(struct super_block *sb, sector_t block) |
306 | { |
307 | return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE); |
308 | } |
309 | |
310 | static inline struct buffer_head * |
311 | sb_bread_unmovable(struct super_block *sb, sector_t block) |
312 | { |
313 | return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0); |
314 | } |
315 | |
316 | static inline void |
317 | sb_breadahead(struct super_block *sb, sector_t block) |
318 | { |
319 | __breadahead(sb->s_bdev, block, sb->s_blocksize); |
320 | } |
321 | |
322 | static inline struct buffer_head * |
323 | sb_getblk(struct super_block *sb, sector_t block) |
324 | { |
325 | return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE); |
326 | } |
327 | |
328 | |
329 | static inline struct buffer_head * |
330 | sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp) |
331 | { |
332 | return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp); |
333 | } |
334 | |
335 | static inline struct buffer_head * |
336 | sb_find_get_block(struct super_block *sb, sector_t block) |
337 | { |
338 | return __find_get_block(sb->s_bdev, block, sb->s_blocksize); |
339 | } |
340 | |
341 | static inline void |
342 | map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block) |
343 | { |
344 | set_buffer_mapped(bh); |
345 | bh->b_bdev = sb->s_bdev; |
346 | bh->b_blocknr = block; |
347 | bh->b_size = sb->s_blocksize; |
348 | } |
349 | |
350 | static inline void wait_on_buffer(struct buffer_head *bh) |
351 | { |
352 | might_sleep(); |
353 | if (buffer_locked(bh)) |
354 | __wait_on_buffer(bh); |
355 | } |
356 | |
357 | static inline int trylock_buffer(struct buffer_head *bh) |
358 | { |
359 | return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state)); |
360 | } |
361 | |
362 | static inline void lock_buffer(struct buffer_head *bh) |
363 | { |
364 | might_sleep(); |
365 | if (!trylock_buffer(bh)) |
366 | __lock_buffer(bh); |
367 | } |
368 | |
369 | static inline struct buffer_head *getblk_unmovable(struct block_device *bdev, |
370 | sector_t block, |
371 | unsigned size) |
372 | { |
373 | return __getblk_gfp(bdev, block, size, 0); |
374 | } |
375 | |
376 | static inline struct buffer_head *__getblk(struct block_device *bdev, |
377 | sector_t block, |
378 | unsigned size) |
379 | { |
380 | return __getblk_gfp(bdev, block, size, __GFP_MOVABLE); |
381 | } |
382 | |
383 | /** |
384 | * __bread() - reads a specified block and returns the bh |
385 | * @bdev: the block_device to read from |
386 | * @block: number of block |
387 | * @size: size (in bytes) to read |
388 | * |
389 | * Reads a specified block, and returns buffer head that contains it. |
390 | * The page cache is allocated from movable area so that it can be migrated. |
391 | * It returns NULL if the block was unreadable. |
392 | */ |
393 | static inline struct buffer_head * |
394 | __bread(struct block_device *bdev, sector_t block, unsigned size) |
395 | { |
396 | return __bread_gfp(bdev, block, size, __GFP_MOVABLE); |
397 | } |
398 | |
399 | extern int __set_page_dirty_buffers(struct page *page); |
400 | |
401 | #else /* CONFIG_BLOCK */ |
402 | |
403 | static inline void buffer_init(void) {} |
404 | static inline int try_to_free_buffers(struct page *page) { return 1; } |
405 | static inline int inode_has_buffers(struct inode *inode) { return 0; } |
406 | static inline void invalidate_inode_buffers(struct inode *inode) {} |
407 | static inline int remove_inode_buffers(struct inode *inode) { return 1; } |
408 | static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } |
409 | |
410 | #endif /* CONFIG_BLOCK */ |
411 | #endif /* _LINUX_BUFFER_HEAD_H */ |
412 | |