1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 1991, 1992 Linus Torvalds |
4 | * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE |
5 | * Copyright (C) 2016 - 2020 Christoph Hellwig |
6 | */ |
7 | #include <linux/init.h> |
8 | #include <linux/mm.h> |
9 | #include <linux/blkdev.h> |
10 | #include <linux/buffer_head.h> |
11 | #include <linux/mpage.h> |
12 | #include <linux/uio.h> |
13 | #include <linux/namei.h> |
14 | #include <linux/task_io_accounting_ops.h> |
15 | #include <linux/falloc.h> |
16 | #include <linux/suspend.h> |
17 | #include <linux/fs.h> |
18 | #include <linux/iomap.h> |
19 | #include <linux/module.h> |
20 | #include "blk.h" |
21 | |
22 | static inline struct inode *bdev_file_inode(struct file *file) |
23 | { |
24 | return file->f_mapping->host; |
25 | } |
26 | |
27 | static blk_opf_t dio_bio_write_op(struct kiocb *iocb) |
28 | { |
29 | blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; |
30 | |
31 | /* avoid the need for a I/O completion work item */ |
32 | if (iocb_is_dsync(iocb)) |
33 | opf |= REQ_FUA; |
34 | return opf; |
35 | } |
36 | |
37 | static bool blkdev_dio_unaligned(struct block_device *bdev, loff_t pos, |
38 | struct iov_iter *iter) |
39 | { |
40 | return pos & (bdev_logical_block_size(bdev) - 1) || |
41 | !bdev_iter_is_aligned(bdev, iter); |
42 | } |
43 | |
44 | #define DIO_INLINE_BIO_VECS 4 |
45 | |
46 | static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb, |
47 | struct iov_iter *iter, unsigned int nr_pages) |
48 | { |
49 | struct block_device *bdev = I_BDEV(inode: iocb->ki_filp->f_mapping->host); |
50 | struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs; |
51 | loff_t pos = iocb->ki_pos; |
52 | bool should_dirty = false; |
53 | struct bio bio; |
54 | ssize_t ret; |
55 | |
56 | if (blkdev_dio_unaligned(bdev, pos, iter)) |
57 | return -EINVAL; |
58 | |
59 | if (nr_pages <= DIO_INLINE_BIO_VECS) |
60 | vecs = inline_vecs; |
61 | else { |
62 | vecs = kmalloc_array(n: nr_pages, size: sizeof(struct bio_vec), |
63 | GFP_KERNEL); |
64 | if (!vecs) |
65 | return -ENOMEM; |
66 | } |
67 | |
68 | if (iov_iter_rw(i: iter) == READ) { |
69 | bio_init(bio: &bio, bdev, table: vecs, max_vecs: nr_pages, opf: REQ_OP_READ); |
70 | if (user_backed_iter(i: iter)) |
71 | should_dirty = true; |
72 | } else { |
73 | bio_init(bio: &bio, bdev, table: vecs, max_vecs: nr_pages, opf: dio_bio_write_op(iocb)); |
74 | } |
75 | bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT; |
76 | bio.bi_write_hint = file_inode(f: iocb->ki_filp)->i_write_hint; |
77 | bio.bi_ioprio = iocb->ki_ioprio; |
78 | |
79 | ret = bio_iov_iter_get_pages(bio: &bio, iter); |
80 | if (unlikely(ret)) |
81 | goto out; |
82 | ret = bio.bi_iter.bi_size; |
83 | |
84 | if (iov_iter_rw(i: iter) == WRITE) |
85 | task_io_account_write(bytes: ret); |
86 | |
87 | if (iocb->ki_flags & IOCB_NOWAIT) |
88 | bio.bi_opf |= REQ_NOWAIT; |
89 | |
90 | submit_bio_wait(bio: &bio); |
91 | |
92 | bio_release_pages(bio: &bio, mark_dirty: should_dirty); |
93 | if (unlikely(bio.bi_status)) |
94 | ret = blk_status_to_errno(status: bio.bi_status); |
95 | |
96 | out: |
97 | if (vecs != inline_vecs) |
98 | kfree(objp: vecs); |
99 | |
100 | bio_uninit(&bio); |
101 | |
102 | return ret; |
103 | } |
104 | |
105 | enum { |
106 | DIO_SHOULD_DIRTY = 1, |
107 | DIO_IS_SYNC = 2, |
108 | }; |
109 | |
110 | struct blkdev_dio { |
111 | union { |
112 | struct kiocb *iocb; |
113 | struct task_struct *waiter; |
114 | }; |
115 | size_t size; |
116 | atomic_t ref; |
117 | unsigned int flags; |
118 | struct bio bio ____cacheline_aligned_in_smp; |
119 | }; |
120 | |
121 | static struct bio_set blkdev_dio_pool; |
122 | |
123 | static void blkdev_bio_end_io(struct bio *bio) |
124 | { |
125 | struct blkdev_dio *dio = bio->bi_private; |
126 | bool should_dirty = dio->flags & DIO_SHOULD_DIRTY; |
127 | |
128 | if (bio->bi_status && !dio->bio.bi_status) |
129 | dio->bio.bi_status = bio->bi_status; |
130 | |
131 | if (atomic_dec_and_test(v: &dio->ref)) { |
132 | if (!(dio->flags & DIO_IS_SYNC)) { |
133 | struct kiocb *iocb = dio->iocb; |
134 | ssize_t ret; |
135 | |
136 | WRITE_ONCE(iocb->private, NULL); |
137 | |
138 | if (likely(!dio->bio.bi_status)) { |
139 | ret = dio->size; |
140 | iocb->ki_pos += ret; |
141 | } else { |
142 | ret = blk_status_to_errno(status: dio->bio.bi_status); |
143 | } |
144 | |
145 | dio->iocb->ki_complete(iocb, ret); |
146 | bio_put(&dio->bio); |
147 | } else { |
148 | struct task_struct *waiter = dio->waiter; |
149 | |
150 | WRITE_ONCE(dio->waiter, NULL); |
151 | blk_wake_io_task(waiter); |
152 | } |
153 | } |
154 | |
155 | if (should_dirty) { |
156 | bio_check_pages_dirty(bio); |
157 | } else { |
158 | bio_release_pages(bio, mark_dirty: false); |
159 | bio_put(bio); |
160 | } |
161 | } |
162 | |
163 | static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, |
164 | unsigned int nr_pages) |
165 | { |
166 | struct block_device *bdev = I_BDEV(inode: iocb->ki_filp->f_mapping->host); |
167 | struct blk_plug plug; |
168 | struct blkdev_dio *dio; |
169 | struct bio *bio; |
170 | bool is_read = (iov_iter_rw(i: iter) == READ), is_sync; |
171 | blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb); |
172 | loff_t pos = iocb->ki_pos; |
173 | int ret = 0; |
174 | |
175 | if (blkdev_dio_unaligned(bdev, pos, iter)) |
176 | return -EINVAL; |
177 | |
178 | if (iocb->ki_flags & IOCB_ALLOC_CACHE) |
179 | opf |= REQ_ALLOC_CACHE; |
180 | bio = bio_alloc_bioset(bdev, nr_vecs: nr_pages, opf, GFP_KERNEL, |
181 | bs: &blkdev_dio_pool); |
182 | dio = container_of(bio, struct blkdev_dio, bio); |
183 | atomic_set(v: &dio->ref, i: 1); |
184 | /* |
185 | * Grab an extra reference to ensure the dio structure which is embedded |
186 | * into the first bio stays around. |
187 | */ |
188 | bio_get(bio); |
189 | |
190 | is_sync = is_sync_kiocb(kiocb: iocb); |
191 | if (is_sync) { |
192 | dio->flags = DIO_IS_SYNC; |
193 | dio->waiter = current; |
194 | } else { |
195 | dio->flags = 0; |
196 | dio->iocb = iocb; |
197 | } |
198 | |
199 | dio->size = 0; |
200 | if (is_read && user_backed_iter(i: iter)) |
201 | dio->flags |= DIO_SHOULD_DIRTY; |
202 | |
203 | blk_start_plug(&plug); |
204 | |
205 | for (;;) { |
206 | bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT; |
207 | bio->bi_write_hint = file_inode(f: iocb->ki_filp)->i_write_hint; |
208 | bio->bi_private = dio; |
209 | bio->bi_end_io = blkdev_bio_end_io; |
210 | bio->bi_ioprio = iocb->ki_ioprio; |
211 | |
212 | ret = bio_iov_iter_get_pages(bio, iter); |
213 | if (unlikely(ret)) { |
214 | bio->bi_status = BLK_STS_IOERR; |
215 | bio_endio(bio); |
216 | break; |
217 | } |
218 | if (iocb->ki_flags & IOCB_NOWAIT) { |
219 | /* |
220 | * This is nonblocking IO, and we need to allocate |
221 | * another bio if we have data left to map. As we |
222 | * cannot guarantee that one of the sub bios will not |
223 | * fail getting issued FOR NOWAIT and as error results |
224 | * are coalesced across all of them, be safe and ask for |
225 | * a retry of this from blocking context. |
226 | */ |
227 | if (unlikely(iov_iter_count(iter))) { |
228 | bio_release_pages(bio, mark_dirty: false); |
229 | bio_clear_flag(bio, bit: BIO_REFFED); |
230 | bio_put(bio); |
231 | blk_finish_plug(&plug); |
232 | return -EAGAIN; |
233 | } |
234 | bio->bi_opf |= REQ_NOWAIT; |
235 | } |
236 | |
237 | if (is_read) { |
238 | if (dio->flags & DIO_SHOULD_DIRTY) |
239 | bio_set_pages_dirty(bio); |
240 | } else { |
241 | task_io_account_write(bytes: bio->bi_iter.bi_size); |
242 | } |
243 | dio->size += bio->bi_iter.bi_size; |
244 | pos += bio->bi_iter.bi_size; |
245 | |
246 | nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS); |
247 | if (!nr_pages) { |
248 | submit_bio(bio); |
249 | break; |
250 | } |
251 | atomic_inc(v: &dio->ref); |
252 | submit_bio(bio); |
253 | bio = bio_alloc(bdev, nr_vecs: nr_pages, opf, GFP_KERNEL); |
254 | } |
255 | |
256 | blk_finish_plug(&plug); |
257 | |
258 | if (!is_sync) |
259 | return -EIOCBQUEUED; |
260 | |
261 | for (;;) { |
262 | set_current_state(TASK_UNINTERRUPTIBLE); |
263 | if (!READ_ONCE(dio->waiter)) |
264 | break; |
265 | blk_io_schedule(); |
266 | } |
267 | __set_current_state(TASK_RUNNING); |
268 | |
269 | if (!ret) |
270 | ret = blk_status_to_errno(status: dio->bio.bi_status); |
271 | if (likely(!ret)) |
272 | ret = dio->size; |
273 | |
274 | bio_put(&dio->bio); |
275 | return ret; |
276 | } |
277 | |
278 | static void blkdev_bio_end_io_async(struct bio *bio) |
279 | { |
280 | struct blkdev_dio *dio = container_of(bio, struct blkdev_dio, bio); |
281 | struct kiocb *iocb = dio->iocb; |
282 | ssize_t ret; |
283 | |
284 | WRITE_ONCE(iocb->private, NULL); |
285 | |
286 | if (likely(!bio->bi_status)) { |
287 | ret = dio->size; |
288 | iocb->ki_pos += ret; |
289 | } else { |
290 | ret = blk_status_to_errno(status: bio->bi_status); |
291 | } |
292 | |
293 | iocb->ki_complete(iocb, ret); |
294 | |
295 | if (dio->flags & DIO_SHOULD_DIRTY) { |
296 | bio_check_pages_dirty(bio); |
297 | } else { |
298 | bio_release_pages(bio, mark_dirty: false); |
299 | bio_put(bio); |
300 | } |
301 | } |
302 | |
303 | static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb, |
304 | struct iov_iter *iter, |
305 | unsigned int nr_pages) |
306 | { |
307 | struct block_device *bdev = I_BDEV(inode: iocb->ki_filp->f_mapping->host); |
308 | bool is_read = iov_iter_rw(i: iter) == READ; |
309 | blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb); |
310 | struct blkdev_dio *dio; |
311 | struct bio *bio; |
312 | loff_t pos = iocb->ki_pos; |
313 | int ret = 0; |
314 | |
315 | if (blkdev_dio_unaligned(bdev, pos, iter)) |
316 | return -EINVAL; |
317 | |
318 | if (iocb->ki_flags & IOCB_ALLOC_CACHE) |
319 | opf |= REQ_ALLOC_CACHE; |
320 | bio = bio_alloc_bioset(bdev, nr_vecs: nr_pages, opf, GFP_KERNEL, |
321 | bs: &blkdev_dio_pool); |
322 | dio = container_of(bio, struct blkdev_dio, bio); |
323 | dio->flags = 0; |
324 | dio->iocb = iocb; |
325 | bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT; |
326 | bio->bi_write_hint = file_inode(f: iocb->ki_filp)->i_write_hint; |
327 | bio->bi_end_io = blkdev_bio_end_io_async; |
328 | bio->bi_ioprio = iocb->ki_ioprio; |
329 | |
330 | if (iov_iter_is_bvec(i: iter)) { |
331 | /* |
332 | * Users don't rely on the iterator being in any particular |
333 | * state for async I/O returning -EIOCBQUEUED, hence we can |
334 | * avoid expensive iov_iter_advance(). Bypass |
335 | * bio_iov_iter_get_pages() and set the bvec directly. |
336 | */ |
337 | bio_iov_bvec_set(bio, iter); |
338 | } else { |
339 | ret = bio_iov_iter_get_pages(bio, iter); |
340 | if (unlikely(ret)) { |
341 | bio_put(bio); |
342 | return ret; |
343 | } |
344 | } |
345 | dio->size = bio->bi_iter.bi_size; |
346 | |
347 | if (is_read) { |
348 | if (user_backed_iter(i: iter)) { |
349 | dio->flags |= DIO_SHOULD_DIRTY; |
350 | bio_set_pages_dirty(bio); |
351 | } |
352 | } else { |
353 | task_io_account_write(bytes: bio->bi_iter.bi_size); |
354 | } |
355 | |
356 | if (iocb->ki_flags & IOCB_NOWAIT) |
357 | bio->bi_opf |= REQ_NOWAIT; |
358 | |
359 | if (iocb->ki_flags & IOCB_HIPRI) { |
360 | bio->bi_opf |= REQ_POLLED; |
361 | submit_bio(bio); |
362 | WRITE_ONCE(iocb->private, bio); |
363 | } else { |
364 | submit_bio(bio); |
365 | } |
366 | return -EIOCBQUEUED; |
367 | } |
368 | |
369 | static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter) |
370 | { |
371 | unsigned int nr_pages; |
372 | |
373 | if (!iov_iter_count(i: iter)) |
374 | return 0; |
375 | |
376 | nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1); |
377 | if (likely(nr_pages <= BIO_MAX_VECS)) { |
378 | if (is_sync_kiocb(kiocb: iocb)) |
379 | return __blkdev_direct_IO_simple(iocb, iter, nr_pages); |
380 | return __blkdev_direct_IO_async(iocb, iter, nr_pages); |
381 | } |
382 | return __blkdev_direct_IO(iocb, iter, nr_pages: bio_max_segs(nr_segs: nr_pages)); |
383 | } |
384 | |
385 | static int blkdev_iomap_begin(struct inode *inode, loff_t offset, loff_t length, |
386 | unsigned int flags, struct iomap *iomap, struct iomap *srcmap) |
387 | { |
388 | struct block_device *bdev = I_BDEV(inode); |
389 | loff_t isize = i_size_read(inode); |
390 | |
391 | iomap->bdev = bdev; |
392 | iomap->offset = ALIGN_DOWN(offset, bdev_logical_block_size(bdev)); |
393 | if (iomap->offset >= isize) |
394 | return -EIO; |
395 | iomap->type = IOMAP_MAPPED; |
396 | iomap->addr = iomap->offset; |
397 | iomap->length = isize - iomap->offset; |
398 | iomap->flags |= IOMAP_F_BUFFER_HEAD; /* noop for !CONFIG_BUFFER_HEAD */ |
399 | return 0; |
400 | } |
401 | |
402 | static const struct iomap_ops blkdev_iomap_ops = { |
403 | .iomap_begin = blkdev_iomap_begin, |
404 | }; |
405 | |
406 | #ifdef CONFIG_BUFFER_HEAD |
407 | static int blkdev_get_block(struct inode *inode, sector_t iblock, |
408 | struct buffer_head *bh, int create) |
409 | { |
410 | bh->b_bdev = I_BDEV(inode); |
411 | bh->b_blocknr = iblock; |
412 | set_buffer_mapped(bh); |
413 | return 0; |
414 | } |
415 | |
416 | /* |
417 | * We cannot call mpage_writepages() as it does not take the buffer lock. |
418 | * We must use block_write_full_folio() directly which holds the buffer |
419 | * lock. The buffer lock provides the synchronisation with writeback |
420 | * that filesystems rely on when they use the blockdev's mapping. |
421 | */ |
422 | static int blkdev_writepages(struct address_space *mapping, |
423 | struct writeback_control *wbc) |
424 | { |
425 | struct blk_plug plug; |
426 | int err; |
427 | |
428 | blk_start_plug(&plug); |
429 | err = write_cache_pages(mapping, wbc, writepage: block_write_full_folio, |
430 | data: blkdev_get_block); |
431 | blk_finish_plug(&plug); |
432 | |
433 | return err; |
434 | } |
435 | |
436 | static int blkdev_read_folio(struct file *file, struct folio *folio) |
437 | { |
438 | return block_read_full_folio(folio, blkdev_get_block); |
439 | } |
440 | |
441 | static void blkdev_readahead(struct readahead_control *rac) |
442 | { |
443 | mpage_readahead(rac, get_block: blkdev_get_block); |
444 | } |
445 | |
446 | static int blkdev_write_begin(struct file *file, struct address_space *mapping, |
447 | loff_t pos, unsigned len, struct page **pagep, void **fsdata) |
448 | { |
449 | return block_write_begin(mapping, pos, len, pagep, get_block: blkdev_get_block); |
450 | } |
451 | |
452 | static int blkdev_write_end(struct file *file, struct address_space *mapping, |
453 | loff_t pos, unsigned len, unsigned copied, struct page *page, |
454 | void *fsdata) |
455 | { |
456 | int ret; |
457 | ret = block_write_end(file, mapping, pos, len, copied, page, fsdata); |
458 | |
459 | unlock_page(page); |
460 | put_page(page); |
461 | |
462 | return ret; |
463 | } |
464 | |
465 | const struct address_space_operations def_blk_aops = { |
466 | .dirty_folio = block_dirty_folio, |
467 | .invalidate_folio = block_invalidate_folio, |
468 | .read_folio = blkdev_read_folio, |
469 | .readahead = blkdev_readahead, |
470 | .writepages = blkdev_writepages, |
471 | .write_begin = blkdev_write_begin, |
472 | .write_end = blkdev_write_end, |
473 | .migrate_folio = buffer_migrate_folio_norefs, |
474 | .is_dirty_writeback = buffer_check_dirty_writeback, |
475 | }; |
476 | #else /* CONFIG_BUFFER_HEAD */ |
477 | static int blkdev_read_folio(struct file *file, struct folio *folio) |
478 | { |
479 | return iomap_read_folio(folio, &blkdev_iomap_ops); |
480 | } |
481 | |
482 | static void blkdev_readahead(struct readahead_control *rac) |
483 | { |
484 | iomap_readahead(rac, &blkdev_iomap_ops); |
485 | } |
486 | |
487 | static int blkdev_map_blocks(struct iomap_writepage_ctx *wpc, |
488 | struct inode *inode, loff_t offset, unsigned int len) |
489 | { |
490 | loff_t isize = i_size_read(inode); |
491 | |
492 | if (WARN_ON_ONCE(offset >= isize)) |
493 | return -EIO; |
494 | if (offset >= wpc->iomap.offset && |
495 | offset < wpc->iomap.offset + wpc->iomap.length) |
496 | return 0; |
497 | return blkdev_iomap_begin(inode, offset, isize - offset, |
498 | IOMAP_WRITE, &wpc->iomap, NULL); |
499 | } |
500 | |
501 | static const struct iomap_writeback_ops blkdev_writeback_ops = { |
502 | .map_blocks = blkdev_map_blocks, |
503 | }; |
504 | |
505 | static int blkdev_writepages(struct address_space *mapping, |
506 | struct writeback_control *wbc) |
507 | { |
508 | struct iomap_writepage_ctx wpc = { }; |
509 | |
510 | return iomap_writepages(mapping, wbc, &wpc, &blkdev_writeback_ops); |
511 | } |
512 | |
513 | const struct address_space_operations def_blk_aops = { |
514 | .dirty_folio = filemap_dirty_folio, |
515 | .release_folio = iomap_release_folio, |
516 | .invalidate_folio = iomap_invalidate_folio, |
517 | .read_folio = blkdev_read_folio, |
518 | .readahead = blkdev_readahead, |
519 | .writepages = blkdev_writepages, |
520 | .is_partially_uptodate = iomap_is_partially_uptodate, |
521 | .error_remove_folio = generic_error_remove_folio, |
522 | .migrate_folio = filemap_migrate_folio, |
523 | }; |
524 | #endif /* CONFIG_BUFFER_HEAD */ |
525 | |
526 | /* |
527 | * for a block special file file_inode(file)->i_size is zero |
528 | * so we compute the size by hand (just as in block_read/write above) |
529 | */ |
530 | static loff_t blkdev_llseek(struct file *file, loff_t offset, int whence) |
531 | { |
532 | struct inode *bd_inode = bdev_file_inode(file); |
533 | loff_t retval; |
534 | |
535 | inode_lock(inode: bd_inode); |
536 | retval = fixed_size_llseek(file, offset, whence, size: i_size_read(inode: bd_inode)); |
537 | inode_unlock(inode: bd_inode); |
538 | return retval; |
539 | } |
540 | |
541 | static int blkdev_fsync(struct file *filp, loff_t start, loff_t end, |
542 | int datasync) |
543 | { |
544 | struct block_device *bdev = I_BDEV(inode: filp->f_mapping->host); |
545 | int error; |
546 | |
547 | error = file_write_and_wait_range(file: filp, start, end); |
548 | if (error) |
549 | return error; |
550 | |
551 | /* |
552 | * There is no need to serialise calls to blkdev_issue_flush with |
553 | * i_mutex and doing so causes performance issues with concurrent |
554 | * O_SYNC writers to a block device. |
555 | */ |
556 | error = blkdev_issue_flush(bdev); |
557 | if (error == -EOPNOTSUPP) |
558 | error = 0; |
559 | |
560 | return error; |
561 | } |
562 | |
563 | /** |
564 | * file_to_blk_mode - get block open flags from file flags |
565 | * @file: file whose open flags should be converted |
566 | * |
567 | * Look at file open flags and generate corresponding block open flags from |
568 | * them. The function works both for file just being open (e.g. during ->open |
569 | * callback) and for file that is already open. This is actually non-trivial |
570 | * (see comment in the function). |
571 | */ |
572 | blk_mode_t file_to_blk_mode(struct file *file) |
573 | { |
574 | blk_mode_t mode = 0; |
575 | |
576 | if (file->f_mode & FMODE_READ) |
577 | mode |= BLK_OPEN_READ; |
578 | if (file->f_mode & FMODE_WRITE) |
579 | mode |= BLK_OPEN_WRITE; |
580 | /* |
581 | * do_dentry_open() clears O_EXCL from f_flags, use file->private_data |
582 | * to determine whether the open was exclusive for already open files. |
583 | */ |
584 | if (file->private_data) |
585 | mode |= BLK_OPEN_EXCL; |
586 | else if (file->f_flags & O_EXCL) |
587 | mode |= BLK_OPEN_EXCL; |
588 | if (file->f_flags & O_NDELAY) |
589 | mode |= BLK_OPEN_NDELAY; |
590 | |
591 | /* |
592 | * If all bits in O_ACCMODE set (aka O_RDWR | O_WRONLY), the floppy |
593 | * driver has historically allowed ioctls as if the file was opened for |
594 | * writing, but does not allow and actual reads or writes. |
595 | */ |
596 | if ((file->f_flags & O_ACCMODE) == (O_RDWR | O_WRONLY)) |
597 | mode |= BLK_OPEN_WRITE_IOCTL; |
598 | |
599 | return mode; |
600 | } |
601 | |
602 | static int blkdev_open(struct inode *inode, struct file *filp) |
603 | { |
604 | struct block_device *bdev; |
605 | blk_mode_t mode; |
606 | int ret; |
607 | |
608 | mode = file_to_blk_mode(file: filp); |
609 | /* Use the file as the holder. */ |
610 | if (mode & BLK_OPEN_EXCL) |
611 | filp->private_data = filp; |
612 | ret = bdev_permission(dev: inode->i_rdev, mode, holder: filp->private_data); |
613 | if (ret) |
614 | return ret; |
615 | |
616 | bdev = blkdev_get_no_open(dev: inode->i_rdev); |
617 | if (!bdev) |
618 | return -ENXIO; |
619 | |
620 | ret = bdev_open(bdev, mode, holder: filp->private_data, NULL, bdev_file: filp); |
621 | if (ret) |
622 | blkdev_put_no_open(bdev); |
623 | return ret; |
624 | } |
625 | |
626 | static int blkdev_release(struct inode *inode, struct file *filp) |
627 | { |
628 | bdev_release(bdev_file: filp); |
629 | return 0; |
630 | } |
631 | |
632 | static ssize_t |
633 | blkdev_direct_write(struct kiocb *iocb, struct iov_iter *from) |
634 | { |
635 | size_t count = iov_iter_count(i: from); |
636 | ssize_t written; |
637 | |
638 | written = kiocb_invalidate_pages(iocb, count); |
639 | if (written) { |
640 | if (written == -EBUSY) |
641 | return 0; |
642 | return written; |
643 | } |
644 | |
645 | written = blkdev_direct_IO(iocb, iter: from); |
646 | if (written > 0) { |
647 | kiocb_invalidate_post_direct_write(iocb, count); |
648 | iocb->ki_pos += written; |
649 | count -= written; |
650 | } |
651 | if (written != -EIOCBQUEUED) |
652 | iov_iter_revert(i: from, bytes: count - iov_iter_count(i: from)); |
653 | return written; |
654 | } |
655 | |
656 | static ssize_t blkdev_buffered_write(struct kiocb *iocb, struct iov_iter *from) |
657 | { |
658 | return iomap_file_buffered_write(iocb, from, ops: &blkdev_iomap_ops); |
659 | } |
660 | |
661 | /* |
662 | * Write data to the block device. Only intended for the block device itself |
663 | * and the raw driver which basically is a fake block device. |
664 | * |
665 | * Does not take i_mutex for the write and thus is not for general purpose |
666 | * use. |
667 | */ |
668 | static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from) |
669 | { |
670 | struct file *file = iocb->ki_filp; |
671 | struct block_device *bdev = I_BDEV(inode: file->f_mapping->host); |
672 | struct inode *bd_inode = bdev->bd_inode; |
673 | loff_t size = bdev_nr_bytes(bdev); |
674 | size_t shorted = 0; |
675 | ssize_t ret; |
676 | |
677 | if (bdev_read_only(bdev)) |
678 | return -EPERM; |
679 | |
680 | if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(dev: bd_inode->i_rdev)) |
681 | return -ETXTBSY; |
682 | |
683 | if (!iov_iter_count(i: from)) |
684 | return 0; |
685 | |
686 | if (iocb->ki_pos >= size) |
687 | return -ENOSPC; |
688 | |
689 | if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT) |
690 | return -EOPNOTSUPP; |
691 | |
692 | size -= iocb->ki_pos; |
693 | if (iov_iter_count(i: from) > size) { |
694 | shorted = iov_iter_count(i: from) - size; |
695 | iov_iter_truncate(i: from, count: size); |
696 | } |
697 | |
698 | ret = file_update_time(file); |
699 | if (ret) |
700 | return ret; |
701 | |
702 | if (iocb->ki_flags & IOCB_DIRECT) { |
703 | ret = blkdev_direct_write(iocb, from); |
704 | if (ret >= 0 && iov_iter_count(i: from)) |
705 | ret = direct_write_fallback(iocb, iter: from, direct_written: ret, |
706 | buffered_written: blkdev_buffered_write(iocb, from)); |
707 | } else { |
708 | ret = blkdev_buffered_write(iocb, from); |
709 | } |
710 | |
711 | if (ret > 0) |
712 | ret = generic_write_sync(iocb, count: ret); |
713 | iov_iter_reexpand(i: from, count: iov_iter_count(i: from) + shorted); |
714 | return ret; |
715 | } |
716 | |
717 | static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) |
718 | { |
719 | struct block_device *bdev = I_BDEV(inode: iocb->ki_filp->f_mapping->host); |
720 | loff_t size = bdev_nr_bytes(bdev); |
721 | loff_t pos = iocb->ki_pos; |
722 | size_t shorted = 0; |
723 | ssize_t ret = 0; |
724 | size_t count; |
725 | |
726 | if (unlikely(pos + iov_iter_count(to) > size)) { |
727 | if (pos >= size) |
728 | return 0; |
729 | size -= pos; |
730 | shorted = iov_iter_count(i: to) - size; |
731 | iov_iter_truncate(i: to, count: size); |
732 | } |
733 | |
734 | count = iov_iter_count(i: to); |
735 | if (!count) |
736 | goto reexpand; /* skip atime */ |
737 | |
738 | if (iocb->ki_flags & IOCB_DIRECT) { |
739 | ret = kiocb_write_and_wait(iocb, count); |
740 | if (ret < 0) |
741 | goto reexpand; |
742 | file_accessed(file: iocb->ki_filp); |
743 | |
744 | ret = blkdev_direct_IO(iocb, iter: to); |
745 | if (ret >= 0) { |
746 | iocb->ki_pos += ret; |
747 | count -= ret; |
748 | } |
749 | iov_iter_revert(i: to, bytes: count - iov_iter_count(i: to)); |
750 | if (ret < 0 || !count) |
751 | goto reexpand; |
752 | } |
753 | |
754 | ret = filemap_read(iocb, to, already_read: ret); |
755 | |
756 | reexpand: |
757 | if (unlikely(shorted)) |
758 | iov_iter_reexpand(i: to, count: iov_iter_count(i: to) + shorted); |
759 | return ret; |
760 | } |
761 | |
762 | #define BLKDEV_FALLOC_FL_SUPPORTED \ |
763 | (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \ |
764 | FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE) |
765 | |
766 | static long blkdev_fallocate(struct file *file, int mode, loff_t start, |
767 | loff_t len) |
768 | { |
769 | struct inode *inode = bdev_file_inode(file); |
770 | struct block_device *bdev = I_BDEV(inode); |
771 | loff_t end = start + len - 1; |
772 | loff_t isize; |
773 | int error; |
774 | |
775 | /* Fail if we don't recognize the flags. */ |
776 | if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED) |
777 | return -EOPNOTSUPP; |
778 | |
779 | /* Don't go off the end of the device. */ |
780 | isize = bdev_nr_bytes(bdev); |
781 | if (start >= isize) |
782 | return -EINVAL; |
783 | if (end >= isize) { |
784 | if (mode & FALLOC_FL_KEEP_SIZE) { |
785 | len = isize - start; |
786 | end = start + len - 1; |
787 | } else |
788 | return -EINVAL; |
789 | } |
790 | |
791 | /* |
792 | * Don't allow IO that isn't aligned to logical block size. |
793 | */ |
794 | if ((start | len) & (bdev_logical_block_size(bdev) - 1)) |
795 | return -EINVAL; |
796 | |
797 | filemap_invalidate_lock(mapping: inode->i_mapping); |
798 | |
799 | /* |
800 | * Invalidate the page cache, including dirty pages, for valid |
801 | * de-allocate mode calls to fallocate(). |
802 | */ |
803 | switch (mode) { |
804 | case FALLOC_FL_ZERO_RANGE: |
805 | case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE: |
806 | error = truncate_bdev_range(bdev, mode: file_to_blk_mode(file), lstart: start, lend: end); |
807 | if (error) |
808 | goto fail; |
809 | |
810 | error = blkdev_issue_zeroout(bdev, sector: start >> SECTOR_SHIFT, |
811 | nr_sects: len >> SECTOR_SHIFT, GFP_KERNEL, |
812 | BLKDEV_ZERO_NOUNMAP); |
813 | break; |
814 | case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE: |
815 | error = truncate_bdev_range(bdev, mode: file_to_blk_mode(file), lstart: start, lend: end); |
816 | if (error) |
817 | goto fail; |
818 | |
819 | error = blkdev_issue_zeroout(bdev, sector: start >> SECTOR_SHIFT, |
820 | nr_sects: len >> SECTOR_SHIFT, GFP_KERNEL, |
821 | BLKDEV_ZERO_NOFALLBACK); |
822 | break; |
823 | case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE: |
824 | error = truncate_bdev_range(bdev, mode: file_to_blk_mode(file), lstart: start, lend: end); |
825 | if (error) |
826 | goto fail; |
827 | |
828 | error = blkdev_issue_discard(bdev, sector: start >> SECTOR_SHIFT, |
829 | nr_sects: len >> SECTOR_SHIFT, GFP_KERNEL); |
830 | break; |
831 | default: |
832 | error = -EOPNOTSUPP; |
833 | } |
834 | |
835 | fail: |
836 | filemap_invalidate_unlock(mapping: inode->i_mapping); |
837 | return error; |
838 | } |
839 | |
840 | static int blkdev_mmap(struct file *file, struct vm_area_struct *vma) |
841 | { |
842 | struct inode *bd_inode = bdev_file_inode(file); |
843 | |
844 | if (bdev_read_only(bdev: I_BDEV(inode: bd_inode))) |
845 | return generic_file_readonly_mmap(file, vma); |
846 | |
847 | return generic_file_mmap(file, vma); |
848 | } |
849 | |
850 | const struct file_operations def_blk_fops = { |
851 | .open = blkdev_open, |
852 | .release = blkdev_release, |
853 | .llseek = blkdev_llseek, |
854 | .read_iter = blkdev_read_iter, |
855 | .write_iter = blkdev_write_iter, |
856 | .iopoll = iocb_bio_iopoll, |
857 | .mmap = blkdev_mmap, |
858 | .fsync = blkdev_fsync, |
859 | .unlocked_ioctl = blkdev_ioctl, |
860 | #ifdef CONFIG_COMPAT |
861 | .compat_ioctl = compat_blkdev_ioctl, |
862 | #endif |
863 | .splice_read = filemap_splice_read, |
864 | .splice_write = iter_file_splice_write, |
865 | .fallocate = blkdev_fallocate, |
866 | }; |
867 | |
868 | static __init int blkdev_init(void) |
869 | { |
870 | return bioset_init(&blkdev_dio_pool, 4, |
871 | offsetof(struct blkdev_dio, bio), |
872 | flags: BIOSET_NEED_BVECS|BIOSET_PERCPU_CACHE); |
873 | } |
874 | module_init(blkdev_init); |
875 | |