1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * fs/f2fs/file.c |
4 | * |
5 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. |
6 | * http://www.samsung.com/ |
7 | */ |
8 | #include <linux/fs.h> |
9 | #include <linux/f2fs_fs.h> |
10 | #include <linux/stat.h> |
11 | #include <linux/buffer_head.h> |
12 | #include <linux/writeback.h> |
13 | #include <linux/blkdev.h> |
14 | #include <linux/falloc.h> |
15 | #include <linux/types.h> |
16 | #include <linux/compat.h> |
17 | #include <linux/uaccess.h> |
18 | #include <linux/mount.h> |
19 | #include <linux/pagevec.h> |
20 | #include <linux/uio.h> |
21 | #include <linux/uuid.h> |
22 | #include <linux/file.h> |
23 | #include <linux/nls.h> |
24 | #include <linux/sched/signal.h> |
25 | #include <linux/fileattr.h> |
26 | #include <linux/fadvise.h> |
27 | #include <linux/iomap.h> |
28 | |
29 | #include "f2fs.h" |
30 | #include "node.h" |
31 | #include "segment.h" |
32 | #include "xattr.h" |
33 | #include "acl.h" |
34 | #include "gc.h" |
35 | #include "iostat.h" |
36 | #include <trace/events/f2fs.h> |
37 | #include <uapi/linux/f2fs.h> |
38 | |
39 | static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf) |
40 | { |
41 | struct inode *inode = file_inode(f: vmf->vma->vm_file); |
42 | vm_fault_t ret; |
43 | |
44 | ret = filemap_fault(vmf); |
45 | if (!ret) |
46 | f2fs_update_iostat(sbi: F2FS_I_SB(inode), inode, |
47 | type: APP_MAPPED_READ_IO, F2FS_BLKSIZE); |
48 | |
49 | trace_f2fs_filemap_fault(inode, index: vmf->pgoff, ret: (unsigned long)ret); |
50 | |
51 | return ret; |
52 | } |
53 | |
54 | static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf) |
55 | { |
56 | struct page *page = vmf->page; |
57 | struct inode *inode = file_inode(f: vmf->vma->vm_file); |
58 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
59 | struct dnode_of_data dn; |
60 | bool need_alloc = true; |
61 | int err = 0; |
62 | |
63 | if (unlikely(IS_IMMUTABLE(inode))) |
64 | return VM_FAULT_SIGBUS; |
65 | |
66 | if (is_inode_flag_set(inode, flag: FI_COMPRESS_RELEASED)) |
67 | return VM_FAULT_SIGBUS; |
68 | |
69 | if (unlikely(f2fs_cp_error(sbi))) { |
70 | err = -EIO; |
71 | goto err; |
72 | } |
73 | |
74 | if (!f2fs_is_checkpoint_ready(sbi)) { |
75 | err = -ENOSPC; |
76 | goto err; |
77 | } |
78 | |
79 | err = f2fs_convert_inline_inode(inode); |
80 | if (err) |
81 | goto err; |
82 | |
83 | #ifdef CONFIG_F2FS_FS_COMPRESSION |
84 | if (f2fs_compressed_file(inode)) { |
85 | int ret = f2fs_is_compressed_cluster(inode, index: page->index); |
86 | |
87 | if (ret < 0) { |
88 | err = ret; |
89 | goto err; |
90 | } else if (ret) { |
91 | need_alloc = false; |
92 | } |
93 | } |
94 | #endif |
95 | /* should do out of any locked page */ |
96 | if (need_alloc) |
97 | f2fs_balance_fs(sbi, need: true); |
98 | |
99 | sb_start_pagefault(sb: inode->i_sb); |
100 | |
101 | f2fs_bug_on(sbi, f2fs_has_inline_data(inode)); |
102 | |
103 | file_update_time(file: vmf->vma->vm_file); |
104 | filemap_invalidate_lock_shared(mapping: inode->i_mapping); |
105 | lock_page(page); |
106 | if (unlikely(page->mapping != inode->i_mapping || |
107 | page_offset(page) > i_size_read(inode) || |
108 | !PageUptodate(page))) { |
109 | unlock_page(page); |
110 | err = -EFAULT; |
111 | goto out_sem; |
112 | } |
113 | |
114 | if (need_alloc) { |
115 | /* block allocation */ |
116 | set_new_dnode(dn: &dn, inode, NULL, NULL, nid: 0); |
117 | err = f2fs_get_block_locked(dn: &dn, index: page->index); |
118 | } |
119 | |
120 | #ifdef CONFIG_F2FS_FS_COMPRESSION |
121 | if (!need_alloc) { |
122 | set_new_dnode(dn: &dn, inode, NULL, NULL, nid: 0); |
123 | err = f2fs_get_dnode_of_data(dn: &dn, index: page->index, mode: LOOKUP_NODE); |
124 | f2fs_put_dnode(dn: &dn); |
125 | } |
126 | #endif |
127 | if (err) { |
128 | unlock_page(page); |
129 | goto out_sem; |
130 | } |
131 | |
132 | f2fs_wait_on_page_writeback(page, type: DATA, ordered: false, locked: true); |
133 | |
134 | /* wait for GCed page writeback via META_MAPPING */ |
135 | f2fs_wait_on_block_writeback(inode, blkaddr: dn.data_blkaddr); |
136 | |
137 | /* |
138 | * check to see if the page is mapped already (no holes) |
139 | */ |
140 | if (PageMappedToDisk(page)) |
141 | goto out_sem; |
142 | |
143 | /* page is wholly or partially inside EOF */ |
144 | if (((loff_t)(page->index + 1) << PAGE_SHIFT) > |
145 | i_size_read(inode)) { |
146 | loff_t offset; |
147 | |
148 | offset = i_size_read(inode) & ~PAGE_MASK; |
149 | zero_user_segment(page, start: offset, PAGE_SIZE); |
150 | } |
151 | set_page_dirty(page); |
152 | |
153 | f2fs_update_iostat(sbi, inode, type: APP_MAPPED_IO, F2FS_BLKSIZE); |
154 | f2fs_update_time(sbi, type: REQ_TIME); |
155 | |
156 | trace_f2fs_vm_page_mkwrite(page, type: DATA); |
157 | out_sem: |
158 | filemap_invalidate_unlock_shared(mapping: inode->i_mapping); |
159 | |
160 | sb_end_pagefault(sb: inode->i_sb); |
161 | err: |
162 | return vmf_fs_error(err); |
163 | } |
164 | |
165 | static const struct vm_operations_struct f2fs_file_vm_ops = { |
166 | .fault = f2fs_filemap_fault, |
167 | .map_pages = filemap_map_pages, |
168 | .page_mkwrite = f2fs_vm_page_mkwrite, |
169 | }; |
170 | |
171 | static int get_parent_ino(struct inode *inode, nid_t *pino) |
172 | { |
173 | struct dentry *dentry; |
174 | |
175 | /* |
176 | * Make sure to get the non-deleted alias. The alias associated with |
177 | * the open file descriptor being fsync()'ed may be deleted already. |
178 | */ |
179 | dentry = d_find_alias(inode); |
180 | if (!dentry) |
181 | return 0; |
182 | |
183 | *pino = parent_ino(dentry); |
184 | dput(dentry); |
185 | return 1; |
186 | } |
187 | |
188 | static inline enum cp_reason_type need_do_checkpoint(struct inode *inode) |
189 | { |
190 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
191 | enum cp_reason_type cp_reason = CP_NO_NEEDED; |
192 | |
193 | if (!S_ISREG(inode->i_mode)) |
194 | cp_reason = CP_NON_REGULAR; |
195 | else if (f2fs_compressed_file(inode)) |
196 | cp_reason = CP_COMPRESSED; |
197 | else if (inode->i_nlink != 1) |
198 | cp_reason = CP_HARDLINK; |
199 | else if (is_sbi_flag_set(sbi, type: SBI_NEED_CP)) |
200 | cp_reason = CP_SB_NEED_CP; |
201 | else if (file_wrong_pino(inode)) |
202 | cp_reason = CP_WRONG_PINO; |
203 | else if (!f2fs_space_for_roll_forward(sbi)) |
204 | cp_reason = CP_NO_SPC_ROLL; |
205 | else if (!f2fs_is_checkpointed_node(sbi, nid: F2FS_I(inode)->i_pino)) |
206 | cp_reason = CP_NODE_NEED_CP; |
207 | else if (test_opt(sbi, FASTBOOT)) |
208 | cp_reason = CP_FASTBOOT_MODE; |
209 | else if (F2FS_OPTION(sbi).active_logs == 2) |
210 | cp_reason = CP_SPEC_LOG_NUM; |
211 | else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT && |
212 | f2fs_need_dentry_mark(sbi, nid: inode->i_ino) && |
213 | f2fs_exist_written_data(sbi, ino: F2FS_I(inode)->i_pino, |
214 | mode: TRANS_DIR_INO)) |
215 | cp_reason = CP_RECOVER_DIR; |
216 | |
217 | return cp_reason; |
218 | } |
219 | |
220 | static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino) |
221 | { |
222 | struct page *i = find_get_page(mapping: NODE_MAPPING(sbi), offset: ino); |
223 | bool ret = false; |
224 | /* But we need to avoid that there are some inode updates */ |
225 | if ((i && PageDirty(page: i)) || f2fs_need_inode_block_update(sbi, ino)) |
226 | ret = true; |
227 | f2fs_put_page(page: i, unlock: 0); |
228 | return ret; |
229 | } |
230 | |
231 | static void try_to_fix_pino(struct inode *inode) |
232 | { |
233 | struct f2fs_inode_info *fi = F2FS_I(inode); |
234 | nid_t pino; |
235 | |
236 | f2fs_down_write(sem: &fi->i_sem); |
237 | if (file_wrong_pino(inode) && inode->i_nlink == 1 && |
238 | get_parent_ino(inode, pino: &pino)) { |
239 | f2fs_i_pino_write(inode, pino); |
240 | file_got_pino(inode); |
241 | } |
242 | f2fs_up_write(sem: &fi->i_sem); |
243 | } |
244 | |
245 | static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end, |
246 | int datasync, bool atomic) |
247 | { |
248 | struct inode *inode = file->f_mapping->host; |
249 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
250 | nid_t ino = inode->i_ino; |
251 | int ret = 0; |
252 | enum cp_reason_type cp_reason = 0; |
253 | struct writeback_control wbc = { |
254 | .sync_mode = WB_SYNC_ALL, |
255 | .nr_to_write = LONG_MAX, |
256 | .for_reclaim = 0, |
257 | }; |
258 | unsigned int seq_id = 0; |
259 | |
260 | if (unlikely(f2fs_readonly(inode->i_sb))) |
261 | return 0; |
262 | |
263 | trace_f2fs_sync_file_enter(inode); |
264 | |
265 | if (S_ISDIR(inode->i_mode)) |
266 | goto go_write; |
267 | |
268 | /* if fdatasync is triggered, let's do in-place-update */ |
269 | if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks) |
270 | set_inode_flag(inode, flag: FI_NEED_IPU); |
271 | ret = file_write_and_wait_range(file, start, end); |
272 | clear_inode_flag(inode, flag: FI_NEED_IPU); |
273 | |
274 | if (ret || is_sbi_flag_set(sbi, type: SBI_CP_DISABLED)) { |
275 | trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret); |
276 | return ret; |
277 | } |
278 | |
279 | /* if the inode is dirty, let's recover all the time */ |
280 | if (!f2fs_skip_inode_update(inode, dsync: datasync)) { |
281 | f2fs_write_inode(inode, NULL); |
282 | goto go_write; |
283 | } |
284 | |
285 | /* |
286 | * if there is no written data, don't waste time to write recovery info. |
287 | */ |
288 | if (!is_inode_flag_set(inode, flag: FI_APPEND_WRITE) && |
289 | !f2fs_exist_written_data(sbi, ino, mode: APPEND_INO)) { |
290 | |
291 | /* it may call write_inode just prior to fsync */ |
292 | if (need_inode_page_update(sbi, ino)) |
293 | goto go_write; |
294 | |
295 | if (is_inode_flag_set(inode, flag: FI_UPDATE_WRITE) || |
296 | f2fs_exist_written_data(sbi, ino, mode: UPDATE_INO)) |
297 | goto flush_out; |
298 | goto out; |
299 | } else { |
300 | /* |
301 | * for OPU case, during fsync(), node can be persisted before |
302 | * data when lower device doesn't support write barrier, result |
303 | * in data corruption after SPO. |
304 | * So for strict fsync mode, force to use atomic write semantics |
305 | * to keep write order in between data/node and last node to |
306 | * avoid potential data corruption. |
307 | */ |
308 | if (F2FS_OPTION(sbi).fsync_mode == |
309 | FSYNC_MODE_STRICT && !atomic) |
310 | atomic = true; |
311 | } |
312 | go_write: |
313 | /* |
314 | * Both of fdatasync() and fsync() are able to be recovered from |
315 | * sudden-power-off. |
316 | */ |
317 | f2fs_down_read(sem: &F2FS_I(inode)->i_sem); |
318 | cp_reason = need_do_checkpoint(inode); |
319 | f2fs_up_read(sem: &F2FS_I(inode)->i_sem); |
320 | |
321 | if (cp_reason) { |
322 | /* all the dirty node pages should be flushed for POR */ |
323 | ret = f2fs_sync_fs(sb: inode->i_sb, sync: 1); |
324 | |
325 | /* |
326 | * We've secured consistency through sync_fs. Following pino |
327 | * will be used only for fsynced inodes after checkpoint. |
328 | */ |
329 | try_to_fix_pino(inode); |
330 | clear_inode_flag(inode, flag: FI_APPEND_WRITE); |
331 | clear_inode_flag(inode, flag: FI_UPDATE_WRITE); |
332 | goto out; |
333 | } |
334 | sync_nodes: |
335 | atomic_inc(v: &sbi->wb_sync_req[NODE]); |
336 | ret = f2fs_fsync_node_pages(sbi, inode, wbc: &wbc, atomic, seq_id: &seq_id); |
337 | atomic_dec(v: &sbi->wb_sync_req[NODE]); |
338 | if (ret) |
339 | goto out; |
340 | |
341 | /* if cp_error was enabled, we should avoid infinite loop */ |
342 | if (unlikely(f2fs_cp_error(sbi))) { |
343 | ret = -EIO; |
344 | goto out; |
345 | } |
346 | |
347 | if (f2fs_need_inode_block_update(sbi, ino)) { |
348 | f2fs_mark_inode_dirty_sync(inode, sync: true); |
349 | f2fs_write_inode(inode, NULL); |
350 | goto sync_nodes; |
351 | } |
352 | |
353 | /* |
354 | * If it's atomic_write, it's just fine to keep write ordering. So |
355 | * here we don't need to wait for node write completion, since we use |
356 | * node chain which serializes node blocks. If one of node writes are |
357 | * reordered, we can see simply broken chain, resulting in stopping |
358 | * roll-forward recovery. It means we'll recover all or none node blocks |
359 | * given fsync mark. |
360 | */ |
361 | if (!atomic) { |
362 | ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id); |
363 | if (ret) |
364 | goto out; |
365 | } |
366 | |
367 | /* once recovery info is written, don't need to tack this */ |
368 | f2fs_remove_ino_entry(sbi, ino, type: APPEND_INO); |
369 | clear_inode_flag(inode, flag: FI_APPEND_WRITE); |
370 | flush_out: |
371 | if ((!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER) || |
372 | (atomic && !test_opt(sbi, NOBARRIER) && f2fs_sb_has_blkzoned(sbi))) |
373 | ret = f2fs_issue_flush(sbi, ino: inode->i_ino); |
374 | if (!ret) { |
375 | f2fs_remove_ino_entry(sbi, ino, type: UPDATE_INO); |
376 | clear_inode_flag(inode, flag: FI_UPDATE_WRITE); |
377 | f2fs_remove_ino_entry(sbi, ino, type: FLUSH_INO); |
378 | } |
379 | f2fs_update_time(sbi, type: REQ_TIME); |
380 | out: |
381 | trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret); |
382 | return ret; |
383 | } |
384 | |
385 | int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) |
386 | { |
387 | if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file))))) |
388 | return -EIO; |
389 | return f2fs_do_sync_file(file, start, end, datasync, atomic: false); |
390 | } |
391 | |
392 | static bool __found_offset(struct address_space *mapping, block_t blkaddr, |
393 | pgoff_t index, int whence) |
394 | { |
395 | switch (whence) { |
396 | case SEEK_DATA: |
397 | if (__is_valid_data_blkaddr(blkaddr)) |
398 | return true; |
399 | if (blkaddr == NEW_ADDR && |
400 | xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY)) |
401 | return true; |
402 | break; |
403 | case SEEK_HOLE: |
404 | if (blkaddr == NULL_ADDR) |
405 | return true; |
406 | break; |
407 | } |
408 | return false; |
409 | } |
410 | |
411 | static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence) |
412 | { |
413 | struct inode *inode = file->f_mapping->host; |
414 | loff_t maxbytes = inode->i_sb->s_maxbytes; |
415 | struct dnode_of_data dn; |
416 | pgoff_t pgofs, end_offset; |
417 | loff_t data_ofs = offset; |
418 | loff_t isize; |
419 | int err = 0; |
420 | |
421 | inode_lock(inode); |
422 | |
423 | isize = i_size_read(inode); |
424 | if (offset >= isize) |
425 | goto fail; |
426 | |
427 | /* handle inline data case */ |
428 | if (f2fs_has_inline_data(inode)) { |
429 | if (whence == SEEK_HOLE) { |
430 | data_ofs = isize; |
431 | goto found; |
432 | } else if (whence == SEEK_DATA) { |
433 | data_ofs = offset; |
434 | goto found; |
435 | } |
436 | } |
437 | |
438 | pgofs = (pgoff_t)(offset >> PAGE_SHIFT); |
439 | |
440 | for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) { |
441 | set_new_dnode(dn: &dn, inode, NULL, NULL, nid: 0); |
442 | err = f2fs_get_dnode_of_data(dn: &dn, index: pgofs, mode: LOOKUP_NODE); |
443 | if (err && err != -ENOENT) { |
444 | goto fail; |
445 | } else if (err == -ENOENT) { |
446 | /* direct node does not exists */ |
447 | if (whence == SEEK_DATA) { |
448 | pgofs = f2fs_get_next_page_offset(dn: &dn, pgofs); |
449 | continue; |
450 | } else { |
451 | goto found; |
452 | } |
453 | } |
454 | |
455 | end_offset = ADDRS_PER_PAGE(dn.node_page, inode); |
456 | |
457 | /* find data/hole in dnode block */ |
458 | for (; dn.ofs_in_node < end_offset; |
459 | dn.ofs_in_node++, pgofs++, |
460 | data_ofs = (loff_t)pgofs << PAGE_SHIFT) { |
461 | block_t blkaddr; |
462 | |
463 | blkaddr = f2fs_data_blkaddr(dn: &dn); |
464 | |
465 | if (__is_valid_data_blkaddr(blkaddr) && |
466 | !f2fs_is_valid_blkaddr(sbi: F2FS_I_SB(inode), |
467 | blkaddr, type: DATA_GENERIC_ENHANCE)) { |
468 | f2fs_put_dnode(dn: &dn); |
469 | goto fail; |
470 | } |
471 | |
472 | if (__found_offset(mapping: file->f_mapping, blkaddr, |
473 | index: pgofs, whence)) { |
474 | f2fs_put_dnode(dn: &dn); |
475 | goto found; |
476 | } |
477 | } |
478 | f2fs_put_dnode(dn: &dn); |
479 | } |
480 | |
481 | if (whence == SEEK_DATA) |
482 | goto fail; |
483 | found: |
484 | if (whence == SEEK_HOLE && data_ofs > isize) |
485 | data_ofs = isize; |
486 | inode_unlock(inode); |
487 | return vfs_setpos(file, offset: data_ofs, maxsize: maxbytes); |
488 | fail: |
489 | inode_unlock(inode); |
490 | return -ENXIO; |
491 | } |
492 | |
493 | static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence) |
494 | { |
495 | struct inode *inode = file->f_mapping->host; |
496 | loff_t maxbytes = inode->i_sb->s_maxbytes; |
497 | |
498 | if (f2fs_compressed_file(inode)) |
499 | maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS; |
500 | |
501 | switch (whence) { |
502 | case SEEK_SET: |
503 | case SEEK_CUR: |
504 | case SEEK_END: |
505 | return generic_file_llseek_size(file, offset, whence, |
506 | maxsize: maxbytes, eof: i_size_read(inode)); |
507 | case SEEK_DATA: |
508 | case SEEK_HOLE: |
509 | if (offset < 0) |
510 | return -ENXIO; |
511 | return f2fs_seek_block(file, offset, whence); |
512 | } |
513 | |
514 | return -EINVAL; |
515 | } |
516 | |
517 | static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma) |
518 | { |
519 | struct inode *inode = file_inode(f: file); |
520 | |
521 | if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) |
522 | return -EIO; |
523 | |
524 | if (!f2fs_is_compress_backend_ready(inode)) |
525 | return -EOPNOTSUPP; |
526 | |
527 | file_accessed(file); |
528 | vma->vm_ops = &f2fs_file_vm_ops; |
529 | |
530 | f2fs_down_read(sem: &F2FS_I(inode)->i_sem); |
531 | set_inode_flag(inode, flag: FI_MMAP_FILE); |
532 | f2fs_up_read(sem: &F2FS_I(inode)->i_sem); |
533 | |
534 | return 0; |
535 | } |
536 | |
537 | static int f2fs_file_open(struct inode *inode, struct file *filp) |
538 | { |
539 | int err = fscrypt_file_open(inode, filp); |
540 | |
541 | if (err) |
542 | return err; |
543 | |
544 | if (!f2fs_is_compress_backend_ready(inode)) |
545 | return -EOPNOTSUPP; |
546 | |
547 | err = fsverity_file_open(inode, filp); |
548 | if (err) |
549 | return err; |
550 | |
551 | filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC; |
552 | filp->f_mode |= FMODE_CAN_ODIRECT; |
553 | |
554 | return dquot_file_open(inode, file: filp); |
555 | } |
556 | |
557 | void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count) |
558 | { |
559 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode: dn->inode); |
560 | struct f2fs_node *raw_node; |
561 | int nr_free = 0, ofs = dn->ofs_in_node, len = count; |
562 | __le32 *addr; |
563 | int base = 0; |
564 | bool compressed_cluster = false; |
565 | int cluster_index = 0, valid_blocks = 0; |
566 | int cluster_size = F2FS_I(inode: dn->inode)->i_cluster_size; |
567 | bool released = !atomic_read(v: &F2FS_I(inode: dn->inode)->i_compr_blocks); |
568 | |
569 | if (IS_INODE(page: dn->node_page) && f2fs_has_extra_attr(inode: dn->inode)) |
570 | base = get_extra_isize(inode: dn->inode); |
571 | |
572 | raw_node = F2FS_NODE(page: dn->node_page); |
573 | addr = blkaddr_in_node(node: raw_node) + base + ofs; |
574 | |
575 | /* Assumption: truncation starts with cluster */ |
576 | for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) { |
577 | block_t blkaddr = le32_to_cpu(*addr); |
578 | |
579 | if (f2fs_compressed_file(inode: dn->inode) && |
580 | !(cluster_index & (cluster_size - 1))) { |
581 | if (compressed_cluster) |
582 | f2fs_i_compr_blocks_update(inode: dn->inode, |
583 | blocks: valid_blocks, add: false); |
584 | compressed_cluster = (blkaddr == COMPRESS_ADDR); |
585 | valid_blocks = 0; |
586 | } |
587 | |
588 | if (blkaddr == NULL_ADDR) |
589 | continue; |
590 | |
591 | dn->data_blkaddr = NULL_ADDR; |
592 | f2fs_set_data_blkaddr(dn); |
593 | |
594 | if (__is_valid_data_blkaddr(blkaddr)) { |
595 | if (!f2fs_is_valid_blkaddr(sbi, blkaddr, |
596 | type: DATA_GENERIC_ENHANCE)) |
597 | continue; |
598 | if (compressed_cluster) |
599 | valid_blocks++; |
600 | } |
601 | |
602 | if (dn->ofs_in_node == 0 && IS_INODE(page: dn->node_page)) |
603 | clear_inode_flag(inode: dn->inode, flag: FI_FIRST_BLOCK_WRITTEN); |
604 | |
605 | f2fs_invalidate_blocks(sbi, addr: blkaddr); |
606 | |
607 | if (!released || blkaddr != COMPRESS_ADDR) |
608 | nr_free++; |
609 | } |
610 | |
611 | if (compressed_cluster) |
612 | f2fs_i_compr_blocks_update(inode: dn->inode, blocks: valid_blocks, add: false); |
613 | |
614 | if (nr_free) { |
615 | pgoff_t fofs; |
616 | /* |
617 | * once we invalidate valid blkaddr in range [ofs, ofs + count], |
618 | * we will invalidate all blkaddr in the whole range. |
619 | */ |
620 | fofs = f2fs_start_bidx_of_node(node_ofs: ofs_of_node(node_page: dn->node_page), |
621 | inode: dn->inode) + ofs; |
622 | f2fs_update_read_extent_cache_range(dn, fofs, blkaddr: 0, len); |
623 | f2fs_update_age_extent_cache_range(dn, fofs, len); |
624 | dec_valid_block_count(sbi, inode: dn->inode, count: nr_free); |
625 | } |
626 | dn->ofs_in_node = ofs; |
627 | |
628 | f2fs_update_time(sbi, type: REQ_TIME); |
629 | trace_f2fs_truncate_data_blocks_range(inode: dn->inode, nid: dn->nid, |
630 | ofs: dn->ofs_in_node, free: nr_free); |
631 | } |
632 | |
633 | static int truncate_partial_data_page(struct inode *inode, u64 from, |
634 | bool cache_only) |
635 | { |
636 | loff_t offset = from & (PAGE_SIZE - 1); |
637 | pgoff_t index = from >> PAGE_SHIFT; |
638 | struct address_space *mapping = inode->i_mapping; |
639 | struct page *page; |
640 | |
641 | if (!offset && !cache_only) |
642 | return 0; |
643 | |
644 | if (cache_only) { |
645 | page = find_lock_page(mapping, index); |
646 | if (page && PageUptodate(page)) |
647 | goto truncate_out; |
648 | f2fs_put_page(page, unlock: 1); |
649 | return 0; |
650 | } |
651 | |
652 | page = f2fs_get_lock_data_page(inode, index, for_write: true); |
653 | if (IS_ERR(ptr: page)) |
654 | return PTR_ERR(ptr: page) == -ENOENT ? 0 : PTR_ERR(ptr: page); |
655 | truncate_out: |
656 | f2fs_wait_on_page_writeback(page, type: DATA, ordered: true, locked: true); |
657 | zero_user(page, start: offset, PAGE_SIZE - offset); |
658 | |
659 | /* An encrypted inode should have a key and truncate the last page. */ |
660 | f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode)); |
661 | if (!cache_only) |
662 | set_page_dirty(page); |
663 | f2fs_put_page(page, unlock: 1); |
664 | return 0; |
665 | } |
666 | |
667 | int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock) |
668 | { |
669 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
670 | struct dnode_of_data dn; |
671 | pgoff_t free_from; |
672 | int count = 0, err = 0; |
673 | struct page *ipage; |
674 | bool truncate_page = false; |
675 | |
676 | trace_f2fs_truncate_blocks_enter(inode, from); |
677 | |
678 | free_from = (pgoff_t)F2FS_BLK_ALIGN(from); |
679 | |
680 | if (free_from >= max_file_blocks(inode)) |
681 | goto free_partial; |
682 | |
683 | if (lock) |
684 | f2fs_lock_op(sbi); |
685 | |
686 | ipage = f2fs_get_node_page(sbi, nid: inode->i_ino); |
687 | if (IS_ERR(ptr: ipage)) { |
688 | err = PTR_ERR(ptr: ipage); |
689 | goto out; |
690 | } |
691 | |
692 | if (f2fs_has_inline_data(inode)) { |
693 | f2fs_truncate_inline_inode(inode, ipage, from); |
694 | f2fs_put_page(page: ipage, unlock: 1); |
695 | truncate_page = true; |
696 | goto out; |
697 | } |
698 | |
699 | set_new_dnode(dn: &dn, inode, ipage, NULL, nid: 0); |
700 | err = f2fs_get_dnode_of_data(dn: &dn, index: free_from, mode: LOOKUP_NODE_RA); |
701 | if (err) { |
702 | if (err == -ENOENT) |
703 | goto free_next; |
704 | goto out; |
705 | } |
706 | |
707 | count = ADDRS_PER_PAGE(dn.node_page, inode); |
708 | |
709 | count -= dn.ofs_in_node; |
710 | f2fs_bug_on(sbi, count < 0); |
711 | |
712 | if (dn.ofs_in_node || IS_INODE(page: dn.node_page)) { |
713 | f2fs_truncate_data_blocks_range(dn: &dn, count); |
714 | free_from += count; |
715 | } |
716 | |
717 | f2fs_put_dnode(dn: &dn); |
718 | free_next: |
719 | err = f2fs_truncate_inode_blocks(inode, from: free_from); |
720 | out: |
721 | if (lock) |
722 | f2fs_unlock_op(sbi); |
723 | free_partial: |
724 | /* lastly zero out the first data page */ |
725 | if (!err) |
726 | err = truncate_partial_data_page(inode, from, cache_only: truncate_page); |
727 | |
728 | trace_f2fs_truncate_blocks_exit(inode, ret: err); |
729 | return err; |
730 | } |
731 | |
732 | int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock) |
733 | { |
734 | u64 free_from = from; |
735 | int err; |
736 | |
737 | #ifdef CONFIG_F2FS_FS_COMPRESSION |
738 | /* |
739 | * for compressed file, only support cluster size |
740 | * aligned truncation. |
741 | */ |
742 | if (f2fs_compressed_file(inode)) |
743 | free_from = round_up(from, |
744 | F2FS_I(inode)->i_cluster_size << PAGE_SHIFT); |
745 | #endif |
746 | |
747 | err = f2fs_do_truncate_blocks(inode, from: free_from, lock); |
748 | if (err) |
749 | return err; |
750 | |
751 | #ifdef CONFIG_F2FS_FS_COMPRESSION |
752 | /* |
753 | * For compressed file, after release compress blocks, don't allow write |
754 | * direct, but we should allow write direct after truncate to zero. |
755 | */ |
756 | if (f2fs_compressed_file(inode) && !free_from |
757 | && is_inode_flag_set(inode, flag: FI_COMPRESS_RELEASED)) |
758 | clear_inode_flag(inode, flag: FI_COMPRESS_RELEASED); |
759 | |
760 | if (from != free_from) { |
761 | err = f2fs_truncate_partial_cluster(inode, from, lock); |
762 | if (err) |
763 | return err; |
764 | } |
765 | #endif |
766 | |
767 | return 0; |
768 | } |
769 | |
770 | int f2fs_truncate(struct inode *inode) |
771 | { |
772 | int err; |
773 | |
774 | if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) |
775 | return -EIO; |
776 | |
777 | if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || |
778 | S_ISLNK(inode->i_mode))) |
779 | return 0; |
780 | |
781 | trace_f2fs_truncate(inode); |
782 | |
783 | if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) |
784 | return -EIO; |
785 | |
786 | err = f2fs_dquot_initialize(inode); |
787 | if (err) |
788 | return err; |
789 | |
790 | /* we should check inline_data size */ |
791 | if (!f2fs_may_inline_data(inode)) { |
792 | err = f2fs_convert_inline_inode(inode); |
793 | if (err) |
794 | return err; |
795 | } |
796 | |
797 | err = f2fs_truncate_blocks(inode, from: i_size_read(inode), lock: true); |
798 | if (err) |
799 | return err; |
800 | |
801 | inode_set_mtime_to_ts(inode, ts: inode_set_ctime_current(inode)); |
802 | f2fs_mark_inode_dirty_sync(inode, sync: false); |
803 | return 0; |
804 | } |
805 | |
806 | static bool f2fs_force_buffered_io(struct inode *inode, int rw) |
807 | { |
808 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
809 | |
810 | if (!fscrypt_dio_supported(inode)) |
811 | return true; |
812 | if (fsverity_active(inode)) |
813 | return true; |
814 | if (f2fs_compressed_file(inode)) |
815 | return true; |
816 | |
817 | /* disallow direct IO if any of devices has unaligned blksize */ |
818 | if (f2fs_is_multi_device(sbi) && !sbi->aligned_blksize) |
819 | return true; |
820 | /* |
821 | * for blkzoned device, fallback direct IO to buffered IO, so |
822 | * all IOs can be serialized by log-structured write. |
823 | */ |
824 | if (f2fs_sb_has_blkzoned(sbi) && (rw == WRITE)) |
825 | return true; |
826 | if (f2fs_lfs_mode(sbi) && rw == WRITE && F2FS_IO_ALIGNED(sbi)) |
827 | return true; |
828 | if (is_sbi_flag_set(sbi, type: SBI_CP_DISABLED)) |
829 | return true; |
830 | |
831 | return false; |
832 | } |
833 | |
834 | int f2fs_getattr(struct mnt_idmap *idmap, const struct path *path, |
835 | struct kstat *stat, u32 request_mask, unsigned int query_flags) |
836 | { |
837 | struct inode *inode = d_inode(dentry: path->dentry); |
838 | struct f2fs_inode_info *fi = F2FS_I(inode); |
839 | struct f2fs_inode *ri = NULL; |
840 | unsigned int flags; |
841 | |
842 | if (f2fs_has_extra_attr(inode) && |
843 | f2fs_sb_has_inode_crtime(sbi: F2FS_I_SB(inode)) && |
844 | F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) { |
845 | stat->result_mask |= STATX_BTIME; |
846 | stat->btime.tv_sec = fi->i_crtime.tv_sec; |
847 | stat->btime.tv_nsec = fi->i_crtime.tv_nsec; |
848 | } |
849 | |
850 | /* |
851 | * Return the DIO alignment restrictions if requested. We only return |
852 | * this information when requested, since on encrypted files it might |
853 | * take a fair bit of work to get if the file wasn't opened recently. |
854 | * |
855 | * f2fs sometimes supports DIO reads but not DIO writes. STATX_DIOALIGN |
856 | * cannot represent that, so in that case we report no DIO support. |
857 | */ |
858 | if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) { |
859 | unsigned int bsize = i_blocksize(node: inode); |
860 | |
861 | stat->result_mask |= STATX_DIOALIGN; |
862 | if (!f2fs_force_buffered_io(inode, WRITE)) { |
863 | stat->dio_mem_align = bsize; |
864 | stat->dio_offset_align = bsize; |
865 | } |
866 | } |
867 | |
868 | flags = fi->i_flags; |
869 | if (flags & F2FS_COMPR_FL) |
870 | stat->attributes |= STATX_ATTR_COMPRESSED; |
871 | if (flags & F2FS_APPEND_FL) |
872 | stat->attributes |= STATX_ATTR_APPEND; |
873 | if (IS_ENCRYPTED(inode)) |
874 | stat->attributes |= STATX_ATTR_ENCRYPTED; |
875 | if (flags & F2FS_IMMUTABLE_FL) |
876 | stat->attributes |= STATX_ATTR_IMMUTABLE; |
877 | if (flags & F2FS_NODUMP_FL) |
878 | stat->attributes |= STATX_ATTR_NODUMP; |
879 | if (IS_VERITY(inode)) |
880 | stat->attributes |= STATX_ATTR_VERITY; |
881 | |
882 | stat->attributes_mask |= (STATX_ATTR_COMPRESSED | |
883 | STATX_ATTR_APPEND | |
884 | STATX_ATTR_ENCRYPTED | |
885 | STATX_ATTR_IMMUTABLE | |
886 | STATX_ATTR_NODUMP | |
887 | STATX_ATTR_VERITY); |
888 | |
889 | generic_fillattr(idmap, request_mask, inode, stat); |
890 | |
891 | /* we need to show initial sectors used for inline_data/dentries */ |
892 | if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) || |
893 | f2fs_has_inline_dentry(inode)) |
894 | stat->blocks += (stat->size + 511) >> 9; |
895 | |
896 | return 0; |
897 | } |
898 | |
899 | #ifdef CONFIG_F2FS_FS_POSIX_ACL |
900 | static void __setattr_copy(struct mnt_idmap *idmap, |
901 | struct inode *inode, const struct iattr *attr) |
902 | { |
903 | unsigned int ia_valid = attr->ia_valid; |
904 | |
905 | i_uid_update(idmap, attr, inode); |
906 | i_gid_update(idmap, attr, inode); |
907 | if (ia_valid & ATTR_ATIME) |
908 | inode_set_atime_to_ts(inode, ts: attr->ia_atime); |
909 | if (ia_valid & ATTR_MTIME) |
910 | inode_set_mtime_to_ts(inode, ts: attr->ia_mtime); |
911 | if (ia_valid & ATTR_CTIME) |
912 | inode_set_ctime_to_ts(inode, ts: attr->ia_ctime); |
913 | if (ia_valid & ATTR_MODE) { |
914 | umode_t mode = attr->ia_mode; |
915 | vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, inode); |
916 | |
917 | if (!vfsgid_in_group_p(vfsgid) && |
918 | !capable_wrt_inode_uidgid(idmap, inode, CAP_FSETID)) |
919 | mode &= ~S_ISGID; |
920 | set_acl_inode(inode, mode); |
921 | } |
922 | } |
923 | #else |
924 | #define __setattr_copy setattr_copy |
925 | #endif |
926 | |
927 | int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, |
928 | struct iattr *attr) |
929 | { |
930 | struct inode *inode = d_inode(dentry); |
931 | int err; |
932 | |
933 | if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) |
934 | return -EIO; |
935 | |
936 | if (unlikely(IS_IMMUTABLE(inode))) |
937 | return -EPERM; |
938 | |
939 | if (unlikely(IS_APPEND(inode) && |
940 | (attr->ia_valid & (ATTR_MODE | ATTR_UID | |
941 | ATTR_GID | ATTR_TIMES_SET)))) |
942 | return -EPERM; |
943 | |
944 | if ((attr->ia_valid & ATTR_SIZE) && |
945 | !f2fs_is_compress_backend_ready(inode)) |
946 | return -EOPNOTSUPP; |
947 | |
948 | err = setattr_prepare(idmap, dentry, attr); |
949 | if (err) |
950 | return err; |
951 | |
952 | err = fscrypt_prepare_setattr(dentry, attr); |
953 | if (err) |
954 | return err; |
955 | |
956 | err = fsverity_prepare_setattr(dentry, attr); |
957 | if (err) |
958 | return err; |
959 | |
960 | if (is_quota_modification(idmap, inode, ia: attr)) { |
961 | err = f2fs_dquot_initialize(inode); |
962 | if (err) |
963 | return err; |
964 | } |
965 | if (i_uid_needs_update(idmap, attr, inode) || |
966 | i_gid_needs_update(idmap, attr, inode)) { |
967 | f2fs_lock_op(sbi: F2FS_I_SB(inode)); |
968 | err = dquot_transfer(idmap, inode, iattr: attr); |
969 | if (err) { |
970 | set_sbi_flag(sbi: F2FS_I_SB(inode), |
971 | type: SBI_QUOTA_NEED_REPAIR); |
972 | f2fs_unlock_op(sbi: F2FS_I_SB(inode)); |
973 | return err; |
974 | } |
975 | /* |
976 | * update uid/gid under lock_op(), so that dquot and inode can |
977 | * be updated atomically. |
978 | */ |
979 | i_uid_update(idmap, attr, inode); |
980 | i_gid_update(idmap, attr, inode); |
981 | f2fs_mark_inode_dirty_sync(inode, sync: true); |
982 | f2fs_unlock_op(sbi: F2FS_I_SB(inode)); |
983 | } |
984 | |
985 | if (attr->ia_valid & ATTR_SIZE) { |
986 | loff_t old_size = i_size_read(inode); |
987 | |
988 | if (attr->ia_size > MAX_INLINE_DATA(inode)) { |
989 | /* |
990 | * should convert inline inode before i_size_write to |
991 | * keep smaller than inline_data size with inline flag. |
992 | */ |
993 | err = f2fs_convert_inline_inode(inode); |
994 | if (err) |
995 | return err; |
996 | } |
997 | |
998 | f2fs_down_write(sem: &F2FS_I(inode)->i_gc_rwsem[WRITE]); |
999 | filemap_invalidate_lock(mapping: inode->i_mapping); |
1000 | |
1001 | truncate_setsize(inode, newsize: attr->ia_size); |
1002 | |
1003 | if (attr->ia_size <= old_size) |
1004 | err = f2fs_truncate(inode); |
1005 | /* |
1006 | * do not trim all blocks after i_size if target size is |
1007 | * larger than i_size. |
1008 | */ |
1009 | filemap_invalidate_unlock(mapping: inode->i_mapping); |
1010 | f2fs_up_write(sem: &F2FS_I(inode)->i_gc_rwsem[WRITE]); |
1011 | if (err) |
1012 | return err; |
1013 | |
1014 | spin_lock(lock: &F2FS_I(inode)->i_size_lock); |
1015 | inode_set_mtime_to_ts(inode, ts: inode_set_ctime_current(inode)); |
1016 | F2FS_I(inode)->last_disk_size = i_size_read(inode); |
1017 | spin_unlock(lock: &F2FS_I(inode)->i_size_lock); |
1018 | } |
1019 | |
1020 | __setattr_copy(idmap, inode, attr); |
1021 | |
1022 | if (attr->ia_valid & ATTR_MODE) { |
1023 | err = posix_acl_chmod(idmap, dentry, f2fs_get_inode_mode(inode)); |
1024 | |
1025 | if (is_inode_flag_set(inode, flag: FI_ACL_MODE)) { |
1026 | if (!err) |
1027 | inode->i_mode = F2FS_I(inode)->i_acl_mode; |
1028 | clear_inode_flag(inode, flag: FI_ACL_MODE); |
1029 | } |
1030 | } |
1031 | |
1032 | /* file size may changed here */ |
1033 | f2fs_mark_inode_dirty_sync(inode, sync: true); |
1034 | |
1035 | /* inode change will produce dirty node pages flushed by checkpoint */ |
1036 | f2fs_balance_fs(sbi: F2FS_I_SB(inode), need: true); |
1037 | |
1038 | return err; |
1039 | } |
1040 | |
1041 | const struct inode_operations f2fs_file_inode_operations = { |
1042 | .getattr = f2fs_getattr, |
1043 | .setattr = f2fs_setattr, |
1044 | .get_inode_acl = f2fs_get_acl, |
1045 | .set_acl = f2fs_set_acl, |
1046 | .listxattr = f2fs_listxattr, |
1047 | .fiemap = f2fs_fiemap, |
1048 | .fileattr_get = f2fs_fileattr_get, |
1049 | .fileattr_set = f2fs_fileattr_set, |
1050 | }; |
1051 | |
1052 | static int fill_zero(struct inode *inode, pgoff_t index, |
1053 | loff_t start, loff_t len) |
1054 | { |
1055 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
1056 | struct page *page; |
1057 | |
1058 | if (!len) |
1059 | return 0; |
1060 | |
1061 | f2fs_balance_fs(sbi, need: true); |
1062 | |
1063 | f2fs_lock_op(sbi); |
1064 | page = f2fs_get_new_data_page(inode, NULL, index, new_i_size: false); |
1065 | f2fs_unlock_op(sbi); |
1066 | |
1067 | if (IS_ERR(ptr: page)) |
1068 | return PTR_ERR(ptr: page); |
1069 | |
1070 | f2fs_wait_on_page_writeback(page, type: DATA, ordered: true, locked: true); |
1071 | zero_user(page, start, size: len); |
1072 | set_page_dirty(page); |
1073 | f2fs_put_page(page, unlock: 1); |
1074 | return 0; |
1075 | } |
1076 | |
1077 | int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end) |
1078 | { |
1079 | int err; |
1080 | |
1081 | while (pg_start < pg_end) { |
1082 | struct dnode_of_data dn; |
1083 | pgoff_t end_offset, count; |
1084 | |
1085 | set_new_dnode(dn: &dn, inode, NULL, NULL, nid: 0); |
1086 | err = f2fs_get_dnode_of_data(dn: &dn, index: pg_start, mode: LOOKUP_NODE); |
1087 | if (err) { |
1088 | if (err == -ENOENT) { |
1089 | pg_start = f2fs_get_next_page_offset(dn: &dn, |
1090 | pgofs: pg_start); |
1091 | continue; |
1092 | } |
1093 | return err; |
1094 | } |
1095 | |
1096 | end_offset = ADDRS_PER_PAGE(dn.node_page, inode); |
1097 | count = min(end_offset - dn.ofs_in_node, pg_end - pg_start); |
1098 | |
1099 | f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset); |
1100 | |
1101 | f2fs_truncate_data_blocks_range(dn: &dn, count); |
1102 | f2fs_put_dnode(dn: &dn); |
1103 | |
1104 | pg_start += count; |
1105 | } |
1106 | return 0; |
1107 | } |
1108 | |
1109 | static int f2fs_punch_hole(struct inode *inode, loff_t offset, loff_t len) |
1110 | { |
1111 | pgoff_t pg_start, pg_end; |
1112 | loff_t off_start, off_end; |
1113 | int ret; |
1114 | |
1115 | ret = f2fs_convert_inline_inode(inode); |
1116 | if (ret) |
1117 | return ret; |
1118 | |
1119 | pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; |
1120 | pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; |
1121 | |
1122 | off_start = offset & (PAGE_SIZE - 1); |
1123 | off_end = (offset + len) & (PAGE_SIZE - 1); |
1124 | |
1125 | if (pg_start == pg_end) { |
1126 | ret = fill_zero(inode, index: pg_start, start: off_start, |
1127 | len: off_end - off_start); |
1128 | if (ret) |
1129 | return ret; |
1130 | } else { |
1131 | if (off_start) { |
1132 | ret = fill_zero(inode, index: pg_start++, start: off_start, |
1133 | PAGE_SIZE - off_start); |
1134 | if (ret) |
1135 | return ret; |
1136 | } |
1137 | if (off_end) { |
1138 | ret = fill_zero(inode, index: pg_end, start: 0, len: off_end); |
1139 | if (ret) |
1140 | return ret; |
1141 | } |
1142 | |
1143 | if (pg_start < pg_end) { |
1144 | loff_t blk_start, blk_end; |
1145 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
1146 | |
1147 | f2fs_balance_fs(sbi, need: true); |
1148 | |
1149 | blk_start = (loff_t)pg_start << PAGE_SHIFT; |
1150 | blk_end = (loff_t)pg_end << PAGE_SHIFT; |
1151 | |
1152 | f2fs_down_write(sem: &F2FS_I(inode)->i_gc_rwsem[WRITE]); |
1153 | filemap_invalidate_lock(mapping: inode->i_mapping); |
1154 | |
1155 | truncate_pagecache_range(inode, offset: blk_start, end: blk_end - 1); |
1156 | |
1157 | f2fs_lock_op(sbi); |
1158 | ret = f2fs_truncate_hole(inode, pg_start, pg_end); |
1159 | f2fs_unlock_op(sbi); |
1160 | |
1161 | filemap_invalidate_unlock(mapping: inode->i_mapping); |
1162 | f2fs_up_write(sem: &F2FS_I(inode)->i_gc_rwsem[WRITE]); |
1163 | } |
1164 | } |
1165 | |
1166 | return ret; |
1167 | } |
1168 | |
1169 | static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr, |
1170 | int *do_replace, pgoff_t off, pgoff_t len) |
1171 | { |
1172 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
1173 | struct dnode_of_data dn; |
1174 | int ret, done, i; |
1175 | |
1176 | next_dnode: |
1177 | set_new_dnode(dn: &dn, inode, NULL, NULL, nid: 0); |
1178 | ret = f2fs_get_dnode_of_data(dn: &dn, index: off, mode: LOOKUP_NODE_RA); |
1179 | if (ret && ret != -ENOENT) { |
1180 | return ret; |
1181 | } else if (ret == -ENOENT) { |
1182 | if (dn.max_level == 0) |
1183 | return -ENOENT; |
1184 | done = min((pgoff_t)ADDRS_PER_BLOCK(inode) - |
1185 | dn.ofs_in_node, len); |
1186 | blkaddr += done; |
1187 | do_replace += done; |
1188 | goto next; |
1189 | } |
1190 | |
1191 | done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) - |
1192 | dn.ofs_in_node, len); |
1193 | for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) { |
1194 | *blkaddr = f2fs_data_blkaddr(dn: &dn); |
1195 | |
1196 | if (__is_valid_data_blkaddr(blkaddr: *blkaddr) && |
1197 | !f2fs_is_valid_blkaddr(sbi, blkaddr: *blkaddr, |
1198 | type: DATA_GENERIC_ENHANCE)) { |
1199 | f2fs_put_dnode(dn: &dn); |
1200 | f2fs_handle_error(sbi, error: ERROR_INVALID_BLKADDR); |
1201 | return -EFSCORRUPTED; |
1202 | } |
1203 | |
1204 | if (!f2fs_is_checkpointed_data(sbi, blkaddr: *blkaddr)) { |
1205 | |
1206 | if (f2fs_lfs_mode(sbi)) { |
1207 | f2fs_put_dnode(dn: &dn); |
1208 | return -EOPNOTSUPP; |
1209 | } |
1210 | |
1211 | /* do not invalidate this block address */ |
1212 | f2fs_update_data_blkaddr(dn: &dn, NULL_ADDR); |
1213 | *do_replace = 1; |
1214 | } |
1215 | } |
1216 | f2fs_put_dnode(dn: &dn); |
1217 | next: |
1218 | len -= done; |
1219 | off += done; |
1220 | if (len) |
1221 | goto next_dnode; |
1222 | return 0; |
1223 | } |
1224 | |
1225 | static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr, |
1226 | int *do_replace, pgoff_t off, int len) |
1227 | { |
1228 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
1229 | struct dnode_of_data dn; |
1230 | int ret, i; |
1231 | |
1232 | for (i = 0; i < len; i++, do_replace++, blkaddr++) { |
1233 | if (*do_replace == 0) |
1234 | continue; |
1235 | |
1236 | set_new_dnode(dn: &dn, inode, NULL, NULL, nid: 0); |
1237 | ret = f2fs_get_dnode_of_data(dn: &dn, index: off + i, mode: LOOKUP_NODE_RA); |
1238 | if (ret) { |
1239 | dec_valid_block_count(sbi, inode, count: 1); |
1240 | f2fs_invalidate_blocks(sbi, addr: *blkaddr); |
1241 | } else { |
1242 | f2fs_update_data_blkaddr(dn: &dn, blkaddr: *blkaddr); |
1243 | } |
1244 | f2fs_put_dnode(dn: &dn); |
1245 | } |
1246 | return 0; |
1247 | } |
1248 | |
1249 | static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode, |
1250 | block_t *blkaddr, int *do_replace, |
1251 | pgoff_t src, pgoff_t dst, pgoff_t len, bool full) |
1252 | { |
1253 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode: src_inode); |
1254 | pgoff_t i = 0; |
1255 | int ret; |
1256 | |
1257 | while (i < len) { |
1258 | if (blkaddr[i] == NULL_ADDR && !full) { |
1259 | i++; |
1260 | continue; |
1261 | } |
1262 | |
1263 | if (do_replace[i] || blkaddr[i] == NULL_ADDR) { |
1264 | struct dnode_of_data dn; |
1265 | struct node_info ni; |
1266 | size_t new_size; |
1267 | pgoff_t ilen; |
1268 | |
1269 | set_new_dnode(dn: &dn, inode: dst_inode, NULL, NULL, nid: 0); |
1270 | ret = f2fs_get_dnode_of_data(dn: &dn, index: dst + i, mode: ALLOC_NODE); |
1271 | if (ret) |
1272 | return ret; |
1273 | |
1274 | ret = f2fs_get_node_info(sbi, nid: dn.nid, ni: &ni, checkpoint_context: false); |
1275 | if (ret) { |
1276 | f2fs_put_dnode(dn: &dn); |
1277 | return ret; |
1278 | } |
1279 | |
1280 | ilen = min((pgoff_t) |
1281 | ADDRS_PER_PAGE(dn.node_page, dst_inode) - |
1282 | dn.ofs_in_node, len - i); |
1283 | do { |
1284 | dn.data_blkaddr = f2fs_data_blkaddr(dn: &dn); |
1285 | f2fs_truncate_data_blocks_range(dn: &dn, count: 1); |
1286 | |
1287 | if (do_replace[i]) { |
1288 | f2fs_i_blocks_write(inode: src_inode, |
1289 | diff: 1, add: false, claim: false); |
1290 | f2fs_i_blocks_write(inode: dst_inode, |
1291 | diff: 1, add: true, claim: false); |
1292 | f2fs_replace_block(sbi, dn: &dn, old_addr: dn.data_blkaddr, |
1293 | new_addr: blkaddr[i], version: ni.version, recover_curseg: true, recover_newaddr: false); |
1294 | |
1295 | do_replace[i] = 0; |
1296 | } |
1297 | dn.ofs_in_node++; |
1298 | i++; |
1299 | new_size = (loff_t)(dst + i) << PAGE_SHIFT; |
1300 | if (dst_inode->i_size < new_size) |
1301 | f2fs_i_size_write(inode: dst_inode, i_size: new_size); |
1302 | } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR)); |
1303 | |
1304 | f2fs_put_dnode(dn: &dn); |
1305 | } else { |
1306 | struct page *psrc, *pdst; |
1307 | |
1308 | psrc = f2fs_get_lock_data_page(inode: src_inode, |
1309 | index: src + i, for_write: true); |
1310 | if (IS_ERR(ptr: psrc)) |
1311 | return PTR_ERR(ptr: psrc); |
1312 | pdst = f2fs_get_new_data_page(inode: dst_inode, NULL, index: dst + i, |
1313 | new_i_size: true); |
1314 | if (IS_ERR(ptr: pdst)) { |
1315 | f2fs_put_page(page: psrc, unlock: 1); |
1316 | return PTR_ERR(ptr: pdst); |
1317 | } |
1318 | memcpy_page(dst_page: pdst, dst_off: 0, src_page: psrc, src_off: 0, PAGE_SIZE); |
1319 | set_page_dirty(pdst); |
1320 | f2fs_put_page(page: pdst, unlock: 1); |
1321 | f2fs_put_page(page: psrc, unlock: 1); |
1322 | |
1323 | ret = f2fs_truncate_hole(inode: src_inode, |
1324 | pg_start: src + i, pg_end: src + i + 1); |
1325 | if (ret) |
1326 | return ret; |
1327 | i++; |
1328 | } |
1329 | } |
1330 | return 0; |
1331 | } |
1332 | |
1333 | static int __exchange_data_block(struct inode *src_inode, |
1334 | struct inode *dst_inode, pgoff_t src, pgoff_t dst, |
1335 | pgoff_t len, bool full) |
1336 | { |
1337 | block_t *src_blkaddr; |
1338 | int *do_replace; |
1339 | pgoff_t olen; |
1340 | int ret; |
1341 | |
1342 | while (len) { |
1343 | olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len); |
1344 | |
1345 | src_blkaddr = f2fs_kvzalloc(sbi: F2FS_I_SB(inode: src_inode), |
1346 | array_size(olen, sizeof(block_t)), |
1347 | GFP_NOFS); |
1348 | if (!src_blkaddr) |
1349 | return -ENOMEM; |
1350 | |
1351 | do_replace = f2fs_kvzalloc(sbi: F2FS_I_SB(inode: src_inode), |
1352 | array_size(olen, sizeof(int)), |
1353 | GFP_NOFS); |
1354 | if (!do_replace) { |
1355 | kvfree(addr: src_blkaddr); |
1356 | return -ENOMEM; |
1357 | } |
1358 | |
1359 | ret = __read_out_blkaddrs(inode: src_inode, blkaddr: src_blkaddr, |
1360 | do_replace, off: src, len: olen); |
1361 | if (ret) |
1362 | goto roll_back; |
1363 | |
1364 | ret = __clone_blkaddrs(src_inode, dst_inode, blkaddr: src_blkaddr, |
1365 | do_replace, src, dst, len: olen, full); |
1366 | if (ret) |
1367 | goto roll_back; |
1368 | |
1369 | src += olen; |
1370 | dst += olen; |
1371 | len -= olen; |
1372 | |
1373 | kvfree(addr: src_blkaddr); |
1374 | kvfree(addr: do_replace); |
1375 | } |
1376 | return 0; |
1377 | |
1378 | roll_back: |
1379 | __roll_back_blkaddrs(inode: src_inode, blkaddr: src_blkaddr, do_replace, off: src, len: olen); |
1380 | kvfree(addr: src_blkaddr); |
1381 | kvfree(addr: do_replace); |
1382 | return ret; |
1383 | } |
1384 | |
1385 | static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len) |
1386 | { |
1387 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
1388 | pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); |
1389 | pgoff_t start = offset >> PAGE_SHIFT; |
1390 | pgoff_t end = (offset + len) >> PAGE_SHIFT; |
1391 | int ret; |
1392 | |
1393 | f2fs_balance_fs(sbi, need: true); |
1394 | |
1395 | /* avoid gc operation during block exchange */ |
1396 | f2fs_down_write(sem: &F2FS_I(inode)->i_gc_rwsem[WRITE]); |
1397 | filemap_invalidate_lock(mapping: inode->i_mapping); |
1398 | |
1399 | f2fs_lock_op(sbi); |
1400 | f2fs_drop_extent_tree(inode); |
1401 | truncate_pagecache(inode, new: offset); |
1402 | ret = __exchange_data_block(src_inode: inode, dst_inode: inode, src: end, dst: start, len: nrpages - end, full: true); |
1403 | f2fs_unlock_op(sbi); |
1404 | |
1405 | filemap_invalidate_unlock(mapping: inode->i_mapping); |
1406 | f2fs_up_write(sem: &F2FS_I(inode)->i_gc_rwsem[WRITE]); |
1407 | return ret; |
1408 | } |
1409 | |
1410 | static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len) |
1411 | { |
1412 | loff_t new_size; |
1413 | int ret; |
1414 | |
1415 | if (offset + len >= i_size_read(inode)) |
1416 | return -EINVAL; |
1417 | |
1418 | /* collapse range should be aligned to block size of f2fs. */ |
1419 | if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1)) |
1420 | return -EINVAL; |
1421 | |
1422 | ret = f2fs_convert_inline_inode(inode); |
1423 | if (ret) |
1424 | return ret; |
1425 | |
1426 | /* write out all dirty pages from offset */ |
1427 | ret = filemap_write_and_wait_range(mapping: inode->i_mapping, lstart: offset, LLONG_MAX); |
1428 | if (ret) |
1429 | return ret; |
1430 | |
1431 | ret = f2fs_do_collapse(inode, offset, len); |
1432 | if (ret) |
1433 | return ret; |
1434 | |
1435 | /* write out all moved pages, if possible */ |
1436 | filemap_invalidate_lock(mapping: inode->i_mapping); |
1437 | filemap_write_and_wait_range(mapping: inode->i_mapping, lstart: offset, LLONG_MAX); |
1438 | truncate_pagecache(inode, new: offset); |
1439 | |
1440 | new_size = i_size_read(inode) - len; |
1441 | ret = f2fs_truncate_blocks(inode, from: new_size, lock: true); |
1442 | filemap_invalidate_unlock(mapping: inode->i_mapping); |
1443 | if (!ret) |
1444 | f2fs_i_size_write(inode, i_size: new_size); |
1445 | return ret; |
1446 | } |
1447 | |
1448 | static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start, |
1449 | pgoff_t end) |
1450 | { |
1451 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode: dn->inode); |
1452 | pgoff_t index = start; |
1453 | unsigned int ofs_in_node = dn->ofs_in_node; |
1454 | blkcnt_t count = 0; |
1455 | int ret; |
1456 | |
1457 | for (; index < end; index++, dn->ofs_in_node++) { |
1458 | if (f2fs_data_blkaddr(dn) == NULL_ADDR) |
1459 | count++; |
1460 | } |
1461 | |
1462 | dn->ofs_in_node = ofs_in_node; |
1463 | ret = f2fs_reserve_new_blocks(dn, count); |
1464 | if (ret) |
1465 | return ret; |
1466 | |
1467 | dn->ofs_in_node = ofs_in_node; |
1468 | for (index = start; index < end; index++, dn->ofs_in_node++) { |
1469 | dn->data_blkaddr = f2fs_data_blkaddr(dn); |
1470 | /* |
1471 | * f2fs_reserve_new_blocks will not guarantee entire block |
1472 | * allocation. |
1473 | */ |
1474 | if (dn->data_blkaddr == NULL_ADDR) { |
1475 | ret = -ENOSPC; |
1476 | break; |
1477 | } |
1478 | |
1479 | if (dn->data_blkaddr == NEW_ADDR) |
1480 | continue; |
1481 | |
1482 | if (!f2fs_is_valid_blkaddr(sbi, blkaddr: dn->data_blkaddr, |
1483 | type: DATA_GENERIC_ENHANCE)) { |
1484 | ret = -EFSCORRUPTED; |
1485 | f2fs_handle_error(sbi, error: ERROR_INVALID_BLKADDR); |
1486 | break; |
1487 | } |
1488 | |
1489 | f2fs_invalidate_blocks(sbi, addr: dn->data_blkaddr); |
1490 | dn->data_blkaddr = NEW_ADDR; |
1491 | f2fs_set_data_blkaddr(dn); |
1492 | } |
1493 | |
1494 | f2fs_update_read_extent_cache_range(dn, fofs: start, blkaddr: 0, len: index - start); |
1495 | f2fs_update_age_extent_cache_range(dn, fofs: start, len: index - start); |
1496 | |
1497 | return ret; |
1498 | } |
1499 | |
1500 | static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, |
1501 | int mode) |
1502 | { |
1503 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
1504 | struct address_space *mapping = inode->i_mapping; |
1505 | pgoff_t index, pg_start, pg_end; |
1506 | loff_t new_size = i_size_read(inode); |
1507 | loff_t off_start, off_end; |
1508 | int ret = 0; |
1509 | |
1510 | ret = inode_newsize_ok(inode, offset: (len + offset)); |
1511 | if (ret) |
1512 | return ret; |
1513 | |
1514 | ret = f2fs_convert_inline_inode(inode); |
1515 | if (ret) |
1516 | return ret; |
1517 | |
1518 | ret = filemap_write_and_wait_range(mapping, lstart: offset, lend: offset + len - 1); |
1519 | if (ret) |
1520 | return ret; |
1521 | |
1522 | pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; |
1523 | pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; |
1524 | |
1525 | off_start = offset & (PAGE_SIZE - 1); |
1526 | off_end = (offset + len) & (PAGE_SIZE - 1); |
1527 | |
1528 | if (pg_start == pg_end) { |
1529 | ret = fill_zero(inode, index: pg_start, start: off_start, |
1530 | len: off_end - off_start); |
1531 | if (ret) |
1532 | return ret; |
1533 | |
1534 | new_size = max_t(loff_t, new_size, offset + len); |
1535 | } else { |
1536 | if (off_start) { |
1537 | ret = fill_zero(inode, index: pg_start++, start: off_start, |
1538 | PAGE_SIZE - off_start); |
1539 | if (ret) |
1540 | return ret; |
1541 | |
1542 | new_size = max_t(loff_t, new_size, |
1543 | (loff_t)pg_start << PAGE_SHIFT); |
1544 | } |
1545 | |
1546 | for (index = pg_start; index < pg_end;) { |
1547 | struct dnode_of_data dn; |
1548 | unsigned int end_offset; |
1549 | pgoff_t end; |
1550 | |
1551 | f2fs_down_write(sem: &F2FS_I(inode)->i_gc_rwsem[WRITE]); |
1552 | filemap_invalidate_lock(mapping); |
1553 | |
1554 | truncate_pagecache_range(inode, |
1555 | offset: (loff_t)index << PAGE_SHIFT, |
1556 | end: ((loff_t)pg_end << PAGE_SHIFT) - 1); |
1557 | |
1558 | f2fs_lock_op(sbi); |
1559 | |
1560 | set_new_dnode(dn: &dn, inode, NULL, NULL, nid: 0); |
1561 | ret = f2fs_get_dnode_of_data(dn: &dn, index, mode: ALLOC_NODE); |
1562 | if (ret) { |
1563 | f2fs_unlock_op(sbi); |
1564 | filemap_invalidate_unlock(mapping); |
1565 | f2fs_up_write(sem: &F2FS_I(inode)->i_gc_rwsem[WRITE]); |
1566 | goto out; |
1567 | } |
1568 | |
1569 | end_offset = ADDRS_PER_PAGE(dn.node_page, inode); |
1570 | end = min(pg_end, end_offset - dn.ofs_in_node + index); |
1571 | |
1572 | ret = f2fs_do_zero_range(dn: &dn, start: index, end); |
1573 | f2fs_put_dnode(dn: &dn); |
1574 | |
1575 | f2fs_unlock_op(sbi); |
1576 | filemap_invalidate_unlock(mapping); |
1577 | f2fs_up_write(sem: &F2FS_I(inode)->i_gc_rwsem[WRITE]); |
1578 | |
1579 | f2fs_balance_fs(sbi, need: dn.node_changed); |
1580 | |
1581 | if (ret) |
1582 | goto out; |
1583 | |
1584 | index = end; |
1585 | new_size = max_t(loff_t, new_size, |
1586 | (loff_t)index << PAGE_SHIFT); |
1587 | } |
1588 | |
1589 | if (off_end) { |
1590 | ret = fill_zero(inode, index: pg_end, start: 0, len: off_end); |
1591 | if (ret) |
1592 | goto out; |
1593 | |
1594 | new_size = max_t(loff_t, new_size, offset + len); |
1595 | } |
1596 | } |
1597 | |
1598 | out: |
1599 | if (new_size > i_size_read(inode)) { |
1600 | if (mode & FALLOC_FL_KEEP_SIZE) |
1601 | file_set_keep_isize(inode); |
1602 | else |
1603 | f2fs_i_size_write(inode, i_size: new_size); |
1604 | } |
1605 | return ret; |
1606 | } |
1607 | |
1608 | static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) |
1609 | { |
1610 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
1611 | struct address_space *mapping = inode->i_mapping; |
1612 | pgoff_t nr, pg_start, pg_end, delta, idx; |
1613 | loff_t new_size; |
1614 | int ret = 0; |
1615 | |
1616 | new_size = i_size_read(inode) + len; |
1617 | ret = inode_newsize_ok(inode, offset: new_size); |
1618 | if (ret) |
1619 | return ret; |
1620 | |
1621 | if (offset >= i_size_read(inode)) |
1622 | return -EINVAL; |
1623 | |
1624 | /* insert range should be aligned to block size of f2fs. */ |
1625 | if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1)) |
1626 | return -EINVAL; |
1627 | |
1628 | ret = f2fs_convert_inline_inode(inode); |
1629 | if (ret) |
1630 | return ret; |
1631 | |
1632 | f2fs_balance_fs(sbi, need: true); |
1633 | |
1634 | filemap_invalidate_lock(mapping); |
1635 | ret = f2fs_truncate_blocks(inode, from: i_size_read(inode), lock: true); |
1636 | filemap_invalidate_unlock(mapping); |
1637 | if (ret) |
1638 | return ret; |
1639 | |
1640 | /* write out all dirty pages from offset */ |
1641 | ret = filemap_write_and_wait_range(mapping, lstart: offset, LLONG_MAX); |
1642 | if (ret) |
1643 | return ret; |
1644 | |
1645 | pg_start = offset >> PAGE_SHIFT; |
1646 | pg_end = (offset + len) >> PAGE_SHIFT; |
1647 | delta = pg_end - pg_start; |
1648 | idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); |
1649 | |
1650 | /* avoid gc operation during block exchange */ |
1651 | f2fs_down_write(sem: &F2FS_I(inode)->i_gc_rwsem[WRITE]); |
1652 | filemap_invalidate_lock(mapping); |
1653 | truncate_pagecache(inode, new: offset); |
1654 | |
1655 | while (!ret && idx > pg_start) { |
1656 | nr = idx - pg_start; |
1657 | if (nr > delta) |
1658 | nr = delta; |
1659 | idx -= nr; |
1660 | |
1661 | f2fs_lock_op(sbi); |
1662 | f2fs_drop_extent_tree(inode); |
1663 | |
1664 | ret = __exchange_data_block(src_inode: inode, dst_inode: inode, src: idx, |
1665 | dst: idx + delta, len: nr, full: false); |
1666 | f2fs_unlock_op(sbi); |
1667 | } |
1668 | filemap_invalidate_unlock(mapping); |
1669 | f2fs_up_write(sem: &F2FS_I(inode)->i_gc_rwsem[WRITE]); |
1670 | |
1671 | /* write out all moved pages, if possible */ |
1672 | filemap_invalidate_lock(mapping); |
1673 | filemap_write_and_wait_range(mapping, lstart: offset, LLONG_MAX); |
1674 | truncate_pagecache(inode, new: offset); |
1675 | filemap_invalidate_unlock(mapping); |
1676 | |
1677 | if (!ret) |
1678 | f2fs_i_size_write(inode, i_size: new_size); |
1679 | return ret; |
1680 | } |
1681 | |
1682 | static int f2fs_expand_inode_data(struct inode *inode, loff_t offset, |
1683 | loff_t len, int mode) |
1684 | { |
1685 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
1686 | struct f2fs_map_blocks map = { .m_next_pgofs = NULL, |
1687 | .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE, |
1688 | .m_may_create = true }; |
1689 | struct f2fs_gc_control gc_control = { .victim_segno = NULL_SEGNO, |
1690 | .init_gc_type = FG_GC, |
1691 | .should_migrate_blocks = false, |
1692 | .err_gc_skipped = true, |
1693 | .nr_free_secs = 0 }; |
1694 | pgoff_t pg_start, pg_end; |
1695 | loff_t new_size; |
1696 | loff_t off_end; |
1697 | block_t expanded = 0; |
1698 | int err; |
1699 | |
1700 | err = inode_newsize_ok(inode, offset: (len + offset)); |
1701 | if (err) |
1702 | return err; |
1703 | |
1704 | err = f2fs_convert_inline_inode(inode); |
1705 | if (err) |
1706 | return err; |
1707 | |
1708 | f2fs_balance_fs(sbi, need: true); |
1709 | |
1710 | pg_start = ((unsigned long long)offset) >> PAGE_SHIFT; |
1711 | pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT; |
1712 | off_end = (offset + len) & (PAGE_SIZE - 1); |
1713 | |
1714 | map.m_lblk = pg_start; |
1715 | map.m_len = pg_end - pg_start; |
1716 | if (off_end) |
1717 | map.m_len++; |
1718 | |
1719 | if (!map.m_len) |
1720 | return 0; |
1721 | |
1722 | if (f2fs_is_pinned_file(inode)) { |
1723 | block_t sec_blks = CAP_BLKS_PER_SEC(sbi); |
1724 | block_t sec_len = roundup(map.m_len, sec_blks); |
1725 | |
1726 | map.m_len = sec_blks; |
1727 | next_alloc: |
1728 | if (has_not_enough_free_secs(sbi, freed: 0, |
1729 | GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) { |
1730 | f2fs_down_write(sem: &sbi->gc_lock); |
1731 | stat_inc_gc_call_count(sbi, FOREGROUND); |
1732 | err = f2fs_gc(sbi, gc_control: &gc_control); |
1733 | if (err && err != -ENODATA) |
1734 | goto out_err; |
1735 | } |
1736 | |
1737 | f2fs_down_write(sem: &sbi->pin_sem); |
1738 | |
1739 | f2fs_lock_op(sbi); |
1740 | f2fs_allocate_new_section(sbi, type: CURSEG_COLD_DATA_PINNED, force: false); |
1741 | f2fs_unlock_op(sbi); |
1742 | |
1743 | map.m_seg_type = CURSEG_COLD_DATA_PINNED; |
1744 | err = f2fs_map_blocks(inode, map: &map, flag: F2FS_GET_BLOCK_PRE_DIO); |
1745 | file_dont_truncate(inode); |
1746 | |
1747 | f2fs_up_write(sem: &sbi->pin_sem); |
1748 | |
1749 | expanded += map.m_len; |
1750 | sec_len -= map.m_len; |
1751 | map.m_lblk += map.m_len; |
1752 | if (!err && sec_len) |
1753 | goto next_alloc; |
1754 | |
1755 | map.m_len = expanded; |
1756 | } else { |
1757 | err = f2fs_map_blocks(inode, map: &map, flag: F2FS_GET_BLOCK_PRE_AIO); |
1758 | expanded = map.m_len; |
1759 | } |
1760 | out_err: |
1761 | if (err) { |
1762 | pgoff_t last_off; |
1763 | |
1764 | if (!expanded) |
1765 | return err; |
1766 | |
1767 | last_off = pg_start + expanded - 1; |
1768 | |
1769 | /* update new size to the failed position */ |
1770 | new_size = (last_off == pg_end) ? offset + len : |
1771 | (loff_t)(last_off + 1) << PAGE_SHIFT; |
1772 | } else { |
1773 | new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end; |
1774 | } |
1775 | |
1776 | if (new_size > i_size_read(inode)) { |
1777 | if (mode & FALLOC_FL_KEEP_SIZE) |
1778 | file_set_keep_isize(inode); |
1779 | else |
1780 | f2fs_i_size_write(inode, i_size: new_size); |
1781 | } |
1782 | |
1783 | return err; |
1784 | } |
1785 | |
1786 | static long f2fs_fallocate(struct file *file, int mode, |
1787 | loff_t offset, loff_t len) |
1788 | { |
1789 | struct inode *inode = file_inode(f: file); |
1790 | long ret = 0; |
1791 | |
1792 | if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) |
1793 | return -EIO; |
1794 | if (!f2fs_is_checkpoint_ready(sbi: F2FS_I_SB(inode))) |
1795 | return -ENOSPC; |
1796 | if (!f2fs_is_compress_backend_ready(inode)) |
1797 | return -EOPNOTSUPP; |
1798 | |
1799 | /* f2fs only support ->fallocate for regular file */ |
1800 | if (!S_ISREG(inode->i_mode)) |
1801 | return -EINVAL; |
1802 | |
1803 | if (IS_ENCRYPTED(inode) && |
1804 | (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE))) |
1805 | return -EOPNOTSUPP; |
1806 | |
1807 | /* |
1808 | * Pinned file should not support partial truncation since the block |
1809 | * can be used by applications. |
1810 | */ |
1811 | if ((f2fs_compressed_file(inode) || f2fs_is_pinned_file(inode)) && |
1812 | (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE | |
1813 | FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE))) |
1814 | return -EOPNOTSUPP; |
1815 | |
1816 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | |
1817 | FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | |
1818 | FALLOC_FL_INSERT_RANGE)) |
1819 | return -EOPNOTSUPP; |
1820 | |
1821 | inode_lock(inode); |
1822 | |
1823 | ret = file_modified(file); |
1824 | if (ret) |
1825 | goto out; |
1826 | |
1827 | if (mode & FALLOC_FL_PUNCH_HOLE) { |
1828 | if (offset >= inode->i_size) |
1829 | goto out; |
1830 | |
1831 | ret = f2fs_punch_hole(inode, offset, len); |
1832 | } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { |
1833 | ret = f2fs_collapse_range(inode, offset, len); |
1834 | } else if (mode & FALLOC_FL_ZERO_RANGE) { |
1835 | ret = f2fs_zero_range(inode, offset, len, mode); |
1836 | } else if (mode & FALLOC_FL_INSERT_RANGE) { |
1837 | ret = f2fs_insert_range(inode, offset, len); |
1838 | } else { |
1839 | ret = f2fs_expand_inode_data(inode, offset, len, mode); |
1840 | } |
1841 | |
1842 | if (!ret) { |
1843 | inode_set_mtime_to_ts(inode, ts: inode_set_ctime_current(inode)); |
1844 | f2fs_mark_inode_dirty_sync(inode, sync: false); |
1845 | f2fs_update_time(sbi: F2FS_I_SB(inode), type: REQ_TIME); |
1846 | } |
1847 | |
1848 | out: |
1849 | inode_unlock(inode); |
1850 | |
1851 | trace_f2fs_fallocate(inode, mode, offset, len, ret); |
1852 | return ret; |
1853 | } |
1854 | |
1855 | static int f2fs_release_file(struct inode *inode, struct file *filp) |
1856 | { |
1857 | /* |
1858 | * f2fs_release_file is called at every close calls. So we should |
1859 | * not drop any inmemory pages by close called by other process. |
1860 | */ |
1861 | if (!(filp->f_mode & FMODE_WRITE) || |
1862 | atomic_read(v: &inode->i_writecount) != 1) |
1863 | return 0; |
1864 | |
1865 | inode_lock(inode); |
1866 | f2fs_abort_atomic_write(inode, clean: true); |
1867 | inode_unlock(inode); |
1868 | |
1869 | return 0; |
1870 | } |
1871 | |
1872 | static int f2fs_file_flush(struct file *file, fl_owner_t id) |
1873 | { |
1874 | struct inode *inode = file_inode(f: file); |
1875 | |
1876 | /* |
1877 | * If the process doing a transaction is crashed, we should do |
1878 | * roll-back. Otherwise, other reader/write can see corrupted database |
1879 | * until all the writers close its file. Since this should be done |
1880 | * before dropping file lock, it needs to do in ->flush. |
1881 | */ |
1882 | if (F2FS_I(inode)->atomic_write_task == current && |
1883 | (current->flags & PF_EXITING)) { |
1884 | inode_lock(inode); |
1885 | f2fs_abort_atomic_write(inode, clean: true); |
1886 | inode_unlock(inode); |
1887 | } |
1888 | |
1889 | return 0; |
1890 | } |
1891 | |
1892 | static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask) |
1893 | { |
1894 | struct f2fs_inode_info *fi = F2FS_I(inode); |
1895 | u32 masked_flags = fi->i_flags & mask; |
1896 | |
1897 | /* mask can be shrunk by flags_valid selector */ |
1898 | iflags &= mask; |
1899 | |
1900 | /* Is it quota file? Do not allow user to mess with it */ |
1901 | if (IS_NOQUOTA(inode)) |
1902 | return -EPERM; |
1903 | |
1904 | if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) { |
1905 | if (!f2fs_sb_has_casefold(sbi: F2FS_I_SB(inode))) |
1906 | return -EOPNOTSUPP; |
1907 | if (!f2fs_empty_dir(dir: inode)) |
1908 | return -ENOTEMPTY; |
1909 | } |
1910 | |
1911 | if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) { |
1912 | if (!f2fs_sb_has_compression(sbi: F2FS_I_SB(inode))) |
1913 | return -EOPNOTSUPP; |
1914 | if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL)) |
1915 | return -EINVAL; |
1916 | } |
1917 | |
1918 | if ((iflags ^ masked_flags) & F2FS_COMPR_FL) { |
1919 | if (masked_flags & F2FS_COMPR_FL) { |
1920 | if (!f2fs_disable_compressed_file(inode)) |
1921 | return -EINVAL; |
1922 | } else { |
1923 | /* try to convert inline_data to support compression */ |
1924 | int err = f2fs_convert_inline_inode(inode); |
1925 | if (err) |
1926 | return err; |
1927 | |
1928 | f2fs_down_write(sem: &F2FS_I(inode)->i_sem); |
1929 | if (!f2fs_may_compress(inode) || |
1930 | (S_ISREG(inode->i_mode) && |
1931 | F2FS_HAS_BLOCKS(inode))) { |
1932 | f2fs_up_write(sem: &F2FS_I(inode)->i_sem); |
1933 | return -EINVAL; |
1934 | } |
1935 | err = set_compress_context(inode); |
1936 | f2fs_up_write(sem: &F2FS_I(inode)->i_sem); |
1937 | |
1938 | if (err) |
1939 | return err; |
1940 | } |
1941 | } |
1942 | |
1943 | fi->i_flags = iflags | (fi->i_flags & ~mask); |
1944 | f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) && |
1945 | (fi->i_flags & F2FS_NOCOMP_FL)); |
1946 | |
1947 | if (fi->i_flags & F2FS_PROJINHERIT_FL) |
1948 | set_inode_flag(inode, flag: FI_PROJ_INHERIT); |
1949 | else |
1950 | clear_inode_flag(inode, flag: FI_PROJ_INHERIT); |
1951 | |
1952 | inode_set_ctime_current(inode); |
1953 | f2fs_set_inode_flags(inode); |
1954 | f2fs_mark_inode_dirty_sync(inode, sync: true); |
1955 | return 0; |
1956 | } |
1957 | |
1958 | /* FS_IOC_[GS]ETFLAGS and FS_IOC_FS[GS]ETXATTR support */ |
1959 | |
1960 | /* |
1961 | * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry |
1962 | * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to |
1963 | * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add |
1964 | * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL. |
1965 | * |
1966 | * Translating flags to fsx_flags value used by FS_IOC_FSGETXATTR and |
1967 | * FS_IOC_FSSETXATTR is done by the VFS. |
1968 | */ |
1969 | |
1970 | static const struct { |
1971 | u32 iflag; |
1972 | u32 fsflag; |
1973 | } f2fs_fsflags_map[] = { |
1974 | { F2FS_COMPR_FL, FS_COMPR_FL }, |
1975 | { F2FS_SYNC_FL, FS_SYNC_FL }, |
1976 | { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL }, |
1977 | { F2FS_APPEND_FL, FS_APPEND_FL }, |
1978 | { F2FS_NODUMP_FL, FS_NODUMP_FL }, |
1979 | { F2FS_NOATIME_FL, FS_NOATIME_FL }, |
1980 | { F2FS_NOCOMP_FL, FS_NOCOMP_FL }, |
1981 | { F2FS_INDEX_FL, FS_INDEX_FL }, |
1982 | { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL }, |
1983 | { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL }, |
1984 | { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL }, |
1985 | }; |
1986 | |
1987 | #define F2FS_GETTABLE_FS_FL ( \ |
1988 | FS_COMPR_FL | \ |
1989 | FS_SYNC_FL | \ |
1990 | FS_IMMUTABLE_FL | \ |
1991 | FS_APPEND_FL | \ |
1992 | FS_NODUMP_FL | \ |
1993 | FS_NOATIME_FL | \ |
1994 | FS_NOCOMP_FL | \ |
1995 | FS_INDEX_FL | \ |
1996 | FS_DIRSYNC_FL | \ |
1997 | FS_PROJINHERIT_FL | \ |
1998 | FS_ENCRYPT_FL | \ |
1999 | FS_INLINE_DATA_FL | \ |
2000 | FS_NOCOW_FL | \ |
2001 | FS_VERITY_FL | \ |
2002 | FS_CASEFOLD_FL) |
2003 | |
2004 | #define F2FS_SETTABLE_FS_FL ( \ |
2005 | FS_COMPR_FL | \ |
2006 | FS_SYNC_FL | \ |
2007 | FS_IMMUTABLE_FL | \ |
2008 | FS_APPEND_FL | \ |
2009 | FS_NODUMP_FL | \ |
2010 | FS_NOATIME_FL | \ |
2011 | FS_NOCOMP_FL | \ |
2012 | FS_DIRSYNC_FL | \ |
2013 | FS_PROJINHERIT_FL | \ |
2014 | FS_CASEFOLD_FL) |
2015 | |
2016 | /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */ |
2017 | static inline u32 f2fs_iflags_to_fsflags(u32 iflags) |
2018 | { |
2019 | u32 fsflags = 0; |
2020 | int i; |
2021 | |
2022 | for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++) |
2023 | if (iflags & f2fs_fsflags_map[i].iflag) |
2024 | fsflags |= f2fs_fsflags_map[i].fsflag; |
2025 | |
2026 | return fsflags; |
2027 | } |
2028 | |
2029 | /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */ |
2030 | static inline u32 f2fs_fsflags_to_iflags(u32 fsflags) |
2031 | { |
2032 | u32 iflags = 0; |
2033 | int i; |
2034 | |
2035 | for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++) |
2036 | if (fsflags & f2fs_fsflags_map[i].fsflag) |
2037 | iflags |= f2fs_fsflags_map[i].iflag; |
2038 | |
2039 | return iflags; |
2040 | } |
2041 | |
2042 | static int f2fs_ioc_getversion(struct file *filp, unsigned long arg) |
2043 | { |
2044 | struct inode *inode = file_inode(f: filp); |
2045 | |
2046 | return put_user(inode->i_generation, (int __user *)arg); |
2047 | } |
2048 | |
2049 | static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate) |
2050 | { |
2051 | struct inode *inode = file_inode(f: filp); |
2052 | struct mnt_idmap *idmap = file_mnt_idmap(file: filp); |
2053 | struct f2fs_inode_info *fi = F2FS_I(inode); |
2054 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
2055 | struct inode *pinode; |
2056 | loff_t isize; |
2057 | int ret; |
2058 | |
2059 | if (!inode_owner_or_capable(idmap, inode)) |
2060 | return -EACCES; |
2061 | |
2062 | if (!S_ISREG(inode->i_mode)) |
2063 | return -EINVAL; |
2064 | |
2065 | if (filp->f_flags & O_DIRECT) |
2066 | return -EINVAL; |
2067 | |
2068 | ret = mnt_want_write_file(file: filp); |
2069 | if (ret) |
2070 | return ret; |
2071 | |
2072 | inode_lock(inode); |
2073 | |
2074 | if (!f2fs_disable_compressed_file(inode)) { |
2075 | ret = -EINVAL; |
2076 | goto out; |
2077 | } |
2078 | |
2079 | if (f2fs_is_atomic_file(inode)) |
2080 | goto out; |
2081 | |
2082 | ret = f2fs_convert_inline_inode(inode); |
2083 | if (ret) |
2084 | goto out; |
2085 | |
2086 | f2fs_down_write(sem: &fi->i_gc_rwsem[WRITE]); |
2087 | |
2088 | /* |
2089 | * Should wait end_io to count F2FS_WB_CP_DATA correctly by |
2090 | * f2fs_is_atomic_file. |
2091 | */ |
2092 | if (get_dirty_pages(inode)) |
2093 | f2fs_warn(sbi, "Unexpected flush for atomic writes: ino=%lu, npages=%u" , |
2094 | inode->i_ino, get_dirty_pages(inode)); |
2095 | ret = filemap_write_and_wait_range(mapping: inode->i_mapping, lstart: 0, LLONG_MAX); |
2096 | if (ret) { |
2097 | f2fs_up_write(sem: &fi->i_gc_rwsem[WRITE]); |
2098 | goto out; |
2099 | } |
2100 | |
2101 | /* Check if the inode already has a COW inode */ |
2102 | if (fi->cow_inode == NULL) { |
2103 | /* Create a COW inode for atomic write */ |
2104 | pinode = f2fs_iget(sb: inode->i_sb, ino: fi->i_pino); |
2105 | if (IS_ERR(ptr: pinode)) { |
2106 | f2fs_up_write(sem: &fi->i_gc_rwsem[WRITE]); |
2107 | ret = PTR_ERR(ptr: pinode); |
2108 | goto out; |
2109 | } |
2110 | |
2111 | ret = f2fs_get_tmpfile(idmap, dir: pinode, new_inode: &fi->cow_inode); |
2112 | iput(pinode); |
2113 | if (ret) { |
2114 | f2fs_up_write(sem: &fi->i_gc_rwsem[WRITE]); |
2115 | goto out; |
2116 | } |
2117 | |
2118 | set_inode_flag(inode: fi->cow_inode, flag: FI_COW_FILE); |
2119 | clear_inode_flag(inode: fi->cow_inode, flag: FI_INLINE_DATA); |
2120 | } else { |
2121 | /* Reuse the already created COW inode */ |
2122 | ret = f2fs_do_truncate_blocks(inode: fi->cow_inode, from: 0, lock: true); |
2123 | if (ret) { |
2124 | f2fs_up_write(sem: &fi->i_gc_rwsem[WRITE]); |
2125 | goto out; |
2126 | } |
2127 | } |
2128 | |
2129 | f2fs_write_inode(inode, NULL); |
2130 | |
2131 | stat_inc_atomic_inode(inode); |
2132 | |
2133 | set_inode_flag(inode, flag: FI_ATOMIC_FILE); |
2134 | |
2135 | isize = i_size_read(inode); |
2136 | fi->original_i_size = isize; |
2137 | if (truncate) { |
2138 | set_inode_flag(inode, flag: FI_ATOMIC_REPLACE); |
2139 | truncate_inode_pages_final(inode->i_mapping); |
2140 | f2fs_i_size_write(inode, i_size: 0); |
2141 | isize = 0; |
2142 | } |
2143 | f2fs_i_size_write(inode: fi->cow_inode, i_size: isize); |
2144 | |
2145 | f2fs_up_write(sem: &fi->i_gc_rwsem[WRITE]); |
2146 | |
2147 | f2fs_update_time(sbi, type: REQ_TIME); |
2148 | fi->atomic_write_task = current; |
2149 | stat_update_max_atomic_write(inode); |
2150 | fi->atomic_write_cnt = 0; |
2151 | out: |
2152 | inode_unlock(inode); |
2153 | mnt_drop_write_file(file: filp); |
2154 | return ret; |
2155 | } |
2156 | |
2157 | static int f2fs_ioc_commit_atomic_write(struct file *filp) |
2158 | { |
2159 | struct inode *inode = file_inode(f: filp); |
2160 | struct mnt_idmap *idmap = file_mnt_idmap(file: filp); |
2161 | int ret; |
2162 | |
2163 | if (!inode_owner_or_capable(idmap, inode)) |
2164 | return -EACCES; |
2165 | |
2166 | ret = mnt_want_write_file(file: filp); |
2167 | if (ret) |
2168 | return ret; |
2169 | |
2170 | f2fs_balance_fs(sbi: F2FS_I_SB(inode), need: true); |
2171 | |
2172 | inode_lock(inode); |
2173 | |
2174 | if (f2fs_is_atomic_file(inode)) { |
2175 | ret = f2fs_commit_atomic_write(inode); |
2176 | if (!ret) |
2177 | ret = f2fs_do_sync_file(file: filp, start: 0, LLONG_MAX, datasync: 0, atomic: true); |
2178 | |
2179 | f2fs_abort_atomic_write(inode, clean: ret); |
2180 | } else { |
2181 | ret = f2fs_do_sync_file(file: filp, start: 0, LLONG_MAX, datasync: 1, atomic: false); |
2182 | } |
2183 | |
2184 | inode_unlock(inode); |
2185 | mnt_drop_write_file(file: filp); |
2186 | return ret; |
2187 | } |
2188 | |
2189 | static int f2fs_ioc_abort_atomic_write(struct file *filp) |
2190 | { |
2191 | struct inode *inode = file_inode(f: filp); |
2192 | struct mnt_idmap *idmap = file_mnt_idmap(file: filp); |
2193 | int ret; |
2194 | |
2195 | if (!inode_owner_or_capable(idmap, inode)) |
2196 | return -EACCES; |
2197 | |
2198 | ret = mnt_want_write_file(file: filp); |
2199 | if (ret) |
2200 | return ret; |
2201 | |
2202 | inode_lock(inode); |
2203 | |
2204 | f2fs_abort_atomic_write(inode, clean: true); |
2205 | |
2206 | inode_unlock(inode); |
2207 | |
2208 | mnt_drop_write_file(file: filp); |
2209 | f2fs_update_time(sbi: F2FS_I_SB(inode), type: REQ_TIME); |
2210 | return ret; |
2211 | } |
2212 | |
2213 | static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) |
2214 | { |
2215 | struct inode *inode = file_inode(f: filp); |
2216 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
2217 | struct super_block *sb = sbi->sb; |
2218 | __u32 in; |
2219 | int ret = 0; |
2220 | |
2221 | if (!capable(CAP_SYS_ADMIN)) |
2222 | return -EPERM; |
2223 | |
2224 | if (get_user(in, (__u32 __user *)arg)) |
2225 | return -EFAULT; |
2226 | |
2227 | if (in != F2FS_GOING_DOWN_FULLSYNC) { |
2228 | ret = mnt_want_write_file(file: filp); |
2229 | if (ret) { |
2230 | if (ret == -EROFS) { |
2231 | ret = 0; |
2232 | f2fs_stop_checkpoint(sbi, end_io: false, |
2233 | reason: STOP_CP_REASON_SHUTDOWN); |
2234 | trace_f2fs_shutdown(sbi, mode: in, ret); |
2235 | } |
2236 | return ret; |
2237 | } |
2238 | } |
2239 | |
2240 | switch (in) { |
2241 | case F2FS_GOING_DOWN_FULLSYNC: |
2242 | ret = freeze_bdev(bdev: sb->s_bdev); |
2243 | if (ret) |
2244 | goto out; |
2245 | f2fs_stop_checkpoint(sbi, end_io: false, reason: STOP_CP_REASON_SHUTDOWN); |
2246 | thaw_bdev(bdev: sb->s_bdev); |
2247 | break; |
2248 | case F2FS_GOING_DOWN_METASYNC: |
2249 | /* do checkpoint only */ |
2250 | ret = f2fs_sync_fs(sb, sync: 1); |
2251 | if (ret) |
2252 | goto out; |
2253 | f2fs_stop_checkpoint(sbi, end_io: false, reason: STOP_CP_REASON_SHUTDOWN); |
2254 | break; |
2255 | case F2FS_GOING_DOWN_NOSYNC: |
2256 | f2fs_stop_checkpoint(sbi, end_io: false, reason: STOP_CP_REASON_SHUTDOWN); |
2257 | break; |
2258 | case F2FS_GOING_DOWN_METAFLUSH: |
2259 | f2fs_sync_meta_pages(sbi, type: META, LONG_MAX, io_type: FS_META_IO); |
2260 | f2fs_stop_checkpoint(sbi, end_io: false, reason: STOP_CP_REASON_SHUTDOWN); |
2261 | break; |
2262 | case F2FS_GOING_DOWN_NEED_FSCK: |
2263 | set_sbi_flag(sbi, type: SBI_NEED_FSCK); |
2264 | set_sbi_flag(sbi, type: SBI_CP_DISABLED_QUICK); |
2265 | set_sbi_flag(sbi, type: SBI_IS_DIRTY); |
2266 | /* do checkpoint only */ |
2267 | ret = f2fs_sync_fs(sb, sync: 1); |
2268 | goto out; |
2269 | default: |
2270 | ret = -EINVAL; |
2271 | goto out; |
2272 | } |
2273 | |
2274 | f2fs_stop_gc_thread(sbi); |
2275 | f2fs_stop_discard_thread(sbi); |
2276 | |
2277 | f2fs_drop_discard_cmd(sbi); |
2278 | clear_opt(sbi, DISCARD); |
2279 | |
2280 | f2fs_update_time(sbi, type: REQ_TIME); |
2281 | out: |
2282 | if (in != F2FS_GOING_DOWN_FULLSYNC) |
2283 | mnt_drop_write_file(file: filp); |
2284 | |
2285 | trace_f2fs_shutdown(sbi, mode: in, ret); |
2286 | |
2287 | return ret; |
2288 | } |
2289 | |
2290 | static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg) |
2291 | { |
2292 | struct inode *inode = file_inode(f: filp); |
2293 | struct super_block *sb = inode->i_sb; |
2294 | struct fstrim_range range; |
2295 | int ret; |
2296 | |
2297 | if (!capable(CAP_SYS_ADMIN)) |
2298 | return -EPERM; |
2299 | |
2300 | if (!f2fs_hw_support_discard(sbi: F2FS_SB(sb))) |
2301 | return -EOPNOTSUPP; |
2302 | |
2303 | if (copy_from_user(to: &range, from: (struct fstrim_range __user *)arg, |
2304 | n: sizeof(range))) |
2305 | return -EFAULT; |
2306 | |
2307 | ret = mnt_want_write_file(file: filp); |
2308 | if (ret) |
2309 | return ret; |
2310 | |
2311 | range.minlen = max((unsigned int)range.minlen, |
2312 | bdev_discard_granularity(sb->s_bdev)); |
2313 | ret = f2fs_trim_fs(sbi: F2FS_SB(sb), range: &range); |
2314 | mnt_drop_write_file(file: filp); |
2315 | if (ret < 0) |
2316 | return ret; |
2317 | |
2318 | if (copy_to_user(to: (struct fstrim_range __user *)arg, from: &range, |
2319 | n: sizeof(range))) |
2320 | return -EFAULT; |
2321 | f2fs_update_time(sbi: F2FS_I_SB(inode), type: REQ_TIME); |
2322 | return 0; |
2323 | } |
2324 | |
2325 | static bool uuid_is_nonzero(__u8 u[16]) |
2326 | { |
2327 | int i; |
2328 | |
2329 | for (i = 0; i < 16; i++) |
2330 | if (u[i]) |
2331 | return true; |
2332 | return false; |
2333 | } |
2334 | |
2335 | static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg) |
2336 | { |
2337 | struct inode *inode = file_inode(f: filp); |
2338 | |
2339 | if (!f2fs_sb_has_encrypt(sbi: F2FS_I_SB(inode))) |
2340 | return -EOPNOTSUPP; |
2341 | |
2342 | f2fs_update_time(sbi: F2FS_I_SB(inode), type: REQ_TIME); |
2343 | |
2344 | return fscrypt_ioctl_set_policy(filp, arg: (const void __user *)arg); |
2345 | } |
2346 | |
2347 | static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg) |
2348 | { |
2349 | if (!f2fs_sb_has_encrypt(sbi: F2FS_I_SB(inode: file_inode(f: filp)))) |
2350 | return -EOPNOTSUPP; |
2351 | return fscrypt_ioctl_get_policy(filp, arg: (void __user *)arg); |
2352 | } |
2353 | |
2354 | static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg) |
2355 | { |
2356 | struct inode *inode = file_inode(f: filp); |
2357 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
2358 | u8 encrypt_pw_salt[16]; |
2359 | int err; |
2360 | |
2361 | if (!f2fs_sb_has_encrypt(sbi)) |
2362 | return -EOPNOTSUPP; |
2363 | |
2364 | err = mnt_want_write_file(file: filp); |
2365 | if (err) |
2366 | return err; |
2367 | |
2368 | f2fs_down_write(sem: &sbi->sb_lock); |
2369 | |
2370 | if (uuid_is_nonzero(u: sbi->raw_super->encrypt_pw_salt)) |
2371 | goto got_it; |
2372 | |
2373 | /* update superblock with uuid */ |
2374 | generate_random_uuid(uuid: sbi->raw_super->encrypt_pw_salt); |
2375 | |
2376 | err = f2fs_commit_super(sbi, recover: false); |
2377 | if (err) { |
2378 | /* undo new data */ |
2379 | memset(sbi->raw_super->encrypt_pw_salt, 0, 16); |
2380 | goto out_err; |
2381 | } |
2382 | got_it: |
2383 | memcpy(encrypt_pw_salt, sbi->raw_super->encrypt_pw_salt, 16); |
2384 | out_err: |
2385 | f2fs_up_write(sem: &sbi->sb_lock); |
2386 | mnt_drop_write_file(file: filp); |
2387 | |
2388 | if (!err && copy_to_user(to: (__u8 __user *)arg, from: encrypt_pw_salt, n: 16)) |
2389 | err = -EFAULT; |
2390 | |
2391 | return err; |
2392 | } |
2393 | |
2394 | static int f2fs_ioc_get_encryption_policy_ex(struct file *filp, |
2395 | unsigned long arg) |
2396 | { |
2397 | if (!f2fs_sb_has_encrypt(sbi: F2FS_I_SB(inode: file_inode(f: filp)))) |
2398 | return -EOPNOTSUPP; |
2399 | |
2400 | return fscrypt_ioctl_get_policy_ex(filp, arg: (void __user *)arg); |
2401 | } |
2402 | |
2403 | static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg) |
2404 | { |
2405 | if (!f2fs_sb_has_encrypt(sbi: F2FS_I_SB(inode: file_inode(f: filp)))) |
2406 | return -EOPNOTSUPP; |
2407 | |
2408 | return fscrypt_ioctl_add_key(filp, arg: (void __user *)arg); |
2409 | } |
2410 | |
2411 | static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg) |
2412 | { |
2413 | if (!f2fs_sb_has_encrypt(sbi: F2FS_I_SB(inode: file_inode(f: filp)))) |
2414 | return -EOPNOTSUPP; |
2415 | |
2416 | return fscrypt_ioctl_remove_key(filp, arg: (void __user *)arg); |
2417 | } |
2418 | |
2419 | static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp, |
2420 | unsigned long arg) |
2421 | { |
2422 | if (!f2fs_sb_has_encrypt(sbi: F2FS_I_SB(inode: file_inode(f: filp)))) |
2423 | return -EOPNOTSUPP; |
2424 | |
2425 | return fscrypt_ioctl_remove_key_all_users(filp, arg: (void __user *)arg); |
2426 | } |
2427 | |
2428 | static int f2fs_ioc_get_encryption_key_status(struct file *filp, |
2429 | unsigned long arg) |
2430 | { |
2431 | if (!f2fs_sb_has_encrypt(sbi: F2FS_I_SB(inode: file_inode(f: filp)))) |
2432 | return -EOPNOTSUPP; |
2433 | |
2434 | return fscrypt_ioctl_get_key_status(filp, arg: (void __user *)arg); |
2435 | } |
2436 | |
2437 | static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg) |
2438 | { |
2439 | if (!f2fs_sb_has_encrypt(sbi: F2FS_I_SB(inode: file_inode(f: filp)))) |
2440 | return -EOPNOTSUPP; |
2441 | |
2442 | return fscrypt_ioctl_get_nonce(filp, arg: (void __user *)arg); |
2443 | } |
2444 | |
2445 | static int f2fs_ioc_gc(struct file *filp, unsigned long arg) |
2446 | { |
2447 | struct inode *inode = file_inode(f: filp); |
2448 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
2449 | struct f2fs_gc_control gc_control = { .victim_segno = NULL_SEGNO, |
2450 | .no_bg_gc = false, |
2451 | .should_migrate_blocks = false, |
2452 | .nr_free_secs = 0 }; |
2453 | __u32 sync; |
2454 | int ret; |
2455 | |
2456 | if (!capable(CAP_SYS_ADMIN)) |
2457 | return -EPERM; |
2458 | |
2459 | if (get_user(sync, (__u32 __user *)arg)) |
2460 | return -EFAULT; |
2461 | |
2462 | if (f2fs_readonly(sb: sbi->sb)) |
2463 | return -EROFS; |
2464 | |
2465 | ret = mnt_want_write_file(file: filp); |
2466 | if (ret) |
2467 | return ret; |
2468 | |
2469 | if (!sync) { |
2470 | if (!f2fs_down_write_trylock(sem: &sbi->gc_lock)) { |
2471 | ret = -EBUSY; |
2472 | goto out; |
2473 | } |
2474 | } else { |
2475 | f2fs_down_write(sem: &sbi->gc_lock); |
2476 | } |
2477 | |
2478 | gc_control.init_gc_type = sync ? FG_GC : BG_GC; |
2479 | gc_control.err_gc_skipped = sync; |
2480 | stat_inc_gc_call_count(sbi, FOREGROUND); |
2481 | ret = f2fs_gc(sbi, gc_control: &gc_control); |
2482 | out: |
2483 | mnt_drop_write_file(file: filp); |
2484 | return ret; |
2485 | } |
2486 | |
2487 | static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range) |
2488 | { |
2489 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode: file_inode(f: filp)); |
2490 | struct f2fs_gc_control gc_control = { |
2491 | .init_gc_type = range->sync ? FG_GC : BG_GC, |
2492 | .no_bg_gc = false, |
2493 | .should_migrate_blocks = false, |
2494 | .err_gc_skipped = range->sync, |
2495 | .nr_free_secs = 0 }; |
2496 | u64 end; |
2497 | int ret; |
2498 | |
2499 | if (!capable(CAP_SYS_ADMIN)) |
2500 | return -EPERM; |
2501 | if (f2fs_readonly(sb: sbi->sb)) |
2502 | return -EROFS; |
2503 | |
2504 | end = range->start + range->len; |
2505 | if (end < range->start || range->start < MAIN_BLKADDR(sbi) || |
2506 | end >= MAX_BLKADDR(sbi)) |
2507 | return -EINVAL; |
2508 | |
2509 | ret = mnt_want_write_file(file: filp); |
2510 | if (ret) |
2511 | return ret; |
2512 | |
2513 | do_more: |
2514 | if (!range->sync) { |
2515 | if (!f2fs_down_write_trylock(sem: &sbi->gc_lock)) { |
2516 | ret = -EBUSY; |
2517 | goto out; |
2518 | } |
2519 | } else { |
2520 | f2fs_down_write(sem: &sbi->gc_lock); |
2521 | } |
2522 | |
2523 | gc_control.victim_segno = GET_SEGNO(sbi, range->start); |
2524 | stat_inc_gc_call_count(sbi, FOREGROUND); |
2525 | ret = f2fs_gc(sbi, gc_control: &gc_control); |
2526 | if (ret) { |
2527 | if (ret == -EBUSY) |
2528 | ret = -EAGAIN; |
2529 | goto out; |
2530 | } |
2531 | range->start += CAP_BLKS_PER_SEC(sbi); |
2532 | if (range->start <= end) |
2533 | goto do_more; |
2534 | out: |
2535 | mnt_drop_write_file(file: filp); |
2536 | return ret; |
2537 | } |
2538 | |
2539 | static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg) |
2540 | { |
2541 | struct f2fs_gc_range range; |
2542 | |
2543 | if (copy_from_user(to: &range, from: (struct f2fs_gc_range __user *)arg, |
2544 | n: sizeof(range))) |
2545 | return -EFAULT; |
2546 | return __f2fs_ioc_gc_range(filp, range: &range); |
2547 | } |
2548 | |
2549 | static int f2fs_ioc_write_checkpoint(struct file *filp) |
2550 | { |
2551 | struct inode *inode = file_inode(f: filp); |
2552 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
2553 | int ret; |
2554 | |
2555 | if (!capable(CAP_SYS_ADMIN)) |
2556 | return -EPERM; |
2557 | |
2558 | if (f2fs_readonly(sb: sbi->sb)) |
2559 | return -EROFS; |
2560 | |
2561 | if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { |
2562 | f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled." ); |
2563 | return -EINVAL; |
2564 | } |
2565 | |
2566 | ret = mnt_want_write_file(file: filp); |
2567 | if (ret) |
2568 | return ret; |
2569 | |
2570 | ret = f2fs_sync_fs(sb: sbi->sb, sync: 1); |
2571 | |
2572 | mnt_drop_write_file(file: filp); |
2573 | return ret; |
2574 | } |
2575 | |
2576 | static int f2fs_defragment_range(struct f2fs_sb_info *sbi, |
2577 | struct file *filp, |
2578 | struct f2fs_defragment *range) |
2579 | { |
2580 | struct inode *inode = file_inode(f: filp); |
2581 | struct f2fs_map_blocks map = { .m_next_extent = NULL, |
2582 | .m_seg_type = NO_CHECK_TYPE, |
2583 | .m_may_create = false }; |
2584 | struct extent_info ei = {}; |
2585 | pgoff_t pg_start, pg_end, next_pgofs; |
2586 | unsigned int blk_per_seg = sbi->blocks_per_seg; |
2587 | unsigned int total = 0, sec_num; |
2588 | block_t blk_end = 0; |
2589 | bool fragmented = false; |
2590 | int err; |
2591 | |
2592 | pg_start = range->start >> PAGE_SHIFT; |
2593 | pg_end = (range->start + range->len) >> PAGE_SHIFT; |
2594 | |
2595 | f2fs_balance_fs(sbi, need: true); |
2596 | |
2597 | inode_lock(inode); |
2598 | |
2599 | if (is_inode_flag_set(inode, flag: FI_COMPRESS_RELEASED)) { |
2600 | err = -EINVAL; |
2601 | goto unlock_out; |
2602 | } |
2603 | |
2604 | /* if in-place-update policy is enabled, don't waste time here */ |
2605 | set_inode_flag(inode, flag: FI_OPU_WRITE); |
2606 | if (f2fs_should_update_inplace(inode, NULL)) { |
2607 | err = -EINVAL; |
2608 | goto out; |
2609 | } |
2610 | |
2611 | /* writeback all dirty pages in the range */ |
2612 | err = filemap_write_and_wait_range(mapping: inode->i_mapping, lstart: range->start, |
2613 | lend: range->start + range->len - 1); |
2614 | if (err) |
2615 | goto out; |
2616 | |
2617 | /* |
2618 | * lookup mapping info in extent cache, skip defragmenting if physical |
2619 | * block addresses are continuous. |
2620 | */ |
2621 | if (f2fs_lookup_read_extent_cache(inode, pgofs: pg_start, ei: &ei)) { |
2622 | if (ei.fofs + ei.len >= pg_end) |
2623 | goto out; |
2624 | } |
2625 | |
2626 | map.m_lblk = pg_start; |
2627 | map.m_next_pgofs = &next_pgofs; |
2628 | |
2629 | /* |
2630 | * lookup mapping info in dnode page cache, skip defragmenting if all |
2631 | * physical block addresses are continuous even if there are hole(s) |
2632 | * in logical blocks. |
2633 | */ |
2634 | while (map.m_lblk < pg_end) { |
2635 | map.m_len = pg_end - map.m_lblk; |
2636 | err = f2fs_map_blocks(inode, map: &map, flag: F2FS_GET_BLOCK_DEFAULT); |
2637 | if (err) |
2638 | goto out; |
2639 | |
2640 | if (!(map.m_flags & F2FS_MAP_FLAGS)) { |
2641 | map.m_lblk = next_pgofs; |
2642 | continue; |
2643 | } |
2644 | |
2645 | if (blk_end && blk_end != map.m_pblk) |
2646 | fragmented = true; |
2647 | |
2648 | /* record total count of block that we're going to move */ |
2649 | total += map.m_len; |
2650 | |
2651 | blk_end = map.m_pblk + map.m_len; |
2652 | |
2653 | map.m_lblk += map.m_len; |
2654 | } |
2655 | |
2656 | if (!fragmented) { |
2657 | total = 0; |
2658 | goto out; |
2659 | } |
2660 | |
2661 | sec_num = DIV_ROUND_UP(total, CAP_BLKS_PER_SEC(sbi)); |
2662 | |
2663 | /* |
2664 | * make sure there are enough free section for LFS allocation, this can |
2665 | * avoid defragment running in SSR mode when free section are allocated |
2666 | * intensively |
2667 | */ |
2668 | if (has_not_enough_free_secs(sbi, freed: 0, needed: sec_num)) { |
2669 | err = -EAGAIN; |
2670 | goto out; |
2671 | } |
2672 | |
2673 | map.m_lblk = pg_start; |
2674 | map.m_len = pg_end - pg_start; |
2675 | total = 0; |
2676 | |
2677 | while (map.m_lblk < pg_end) { |
2678 | pgoff_t idx; |
2679 | int cnt = 0; |
2680 | |
2681 | do_map: |
2682 | map.m_len = pg_end - map.m_lblk; |
2683 | err = f2fs_map_blocks(inode, map: &map, flag: F2FS_GET_BLOCK_DEFAULT); |
2684 | if (err) |
2685 | goto clear_out; |
2686 | |
2687 | if (!(map.m_flags & F2FS_MAP_FLAGS)) { |
2688 | map.m_lblk = next_pgofs; |
2689 | goto check; |
2690 | } |
2691 | |
2692 | set_inode_flag(inode, flag: FI_SKIP_WRITES); |
2693 | |
2694 | idx = map.m_lblk; |
2695 | while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) { |
2696 | struct page *page; |
2697 | |
2698 | page = f2fs_get_lock_data_page(inode, index: idx, for_write: true); |
2699 | if (IS_ERR(ptr: page)) { |
2700 | err = PTR_ERR(ptr: page); |
2701 | goto clear_out; |
2702 | } |
2703 | |
2704 | set_page_dirty(page); |
2705 | set_page_private_gcing(page); |
2706 | f2fs_put_page(page, unlock: 1); |
2707 | |
2708 | idx++; |
2709 | cnt++; |
2710 | total++; |
2711 | } |
2712 | |
2713 | map.m_lblk = idx; |
2714 | check: |
2715 | if (map.m_lblk < pg_end && cnt < blk_per_seg) |
2716 | goto do_map; |
2717 | |
2718 | clear_inode_flag(inode, flag: FI_SKIP_WRITES); |
2719 | |
2720 | err = filemap_fdatawrite(inode->i_mapping); |
2721 | if (err) |
2722 | goto out; |
2723 | } |
2724 | clear_out: |
2725 | clear_inode_flag(inode, flag: FI_SKIP_WRITES); |
2726 | out: |
2727 | clear_inode_flag(inode, flag: FI_OPU_WRITE); |
2728 | unlock_out: |
2729 | inode_unlock(inode); |
2730 | if (!err) |
2731 | range->len = (u64)total << PAGE_SHIFT; |
2732 | return err; |
2733 | } |
2734 | |
2735 | static int f2fs_ioc_defragment(struct file *filp, unsigned long arg) |
2736 | { |
2737 | struct inode *inode = file_inode(f: filp); |
2738 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
2739 | struct f2fs_defragment range; |
2740 | int err; |
2741 | |
2742 | if (!capable(CAP_SYS_ADMIN)) |
2743 | return -EPERM; |
2744 | |
2745 | if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode)) |
2746 | return -EINVAL; |
2747 | |
2748 | if (f2fs_readonly(sb: sbi->sb)) |
2749 | return -EROFS; |
2750 | |
2751 | if (copy_from_user(to: &range, from: (struct f2fs_defragment __user *)arg, |
2752 | n: sizeof(range))) |
2753 | return -EFAULT; |
2754 | |
2755 | /* verify alignment of offset & size */ |
2756 | if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1)) |
2757 | return -EINVAL; |
2758 | |
2759 | if (unlikely((range.start + range.len) >> PAGE_SHIFT > |
2760 | max_file_blocks(inode))) |
2761 | return -EINVAL; |
2762 | |
2763 | err = mnt_want_write_file(file: filp); |
2764 | if (err) |
2765 | return err; |
2766 | |
2767 | err = f2fs_defragment_range(sbi, filp, range: &range); |
2768 | mnt_drop_write_file(file: filp); |
2769 | |
2770 | f2fs_update_time(sbi, type: REQ_TIME); |
2771 | if (err < 0) |
2772 | return err; |
2773 | |
2774 | if (copy_to_user(to: (struct f2fs_defragment __user *)arg, from: &range, |
2775 | n: sizeof(range))) |
2776 | return -EFAULT; |
2777 | |
2778 | return 0; |
2779 | } |
2780 | |
2781 | static int f2fs_move_file_range(struct file *file_in, loff_t pos_in, |
2782 | struct file *file_out, loff_t pos_out, size_t len) |
2783 | { |
2784 | struct inode *src = file_inode(f: file_in); |
2785 | struct inode *dst = file_inode(f: file_out); |
2786 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode: src); |
2787 | size_t olen = len, dst_max_i_size = 0; |
2788 | size_t dst_osize; |
2789 | int ret; |
2790 | |
2791 | if (file_in->f_path.mnt != file_out->f_path.mnt || |
2792 | src->i_sb != dst->i_sb) |
2793 | return -EXDEV; |
2794 | |
2795 | if (unlikely(f2fs_readonly(src->i_sb))) |
2796 | return -EROFS; |
2797 | |
2798 | if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode)) |
2799 | return -EINVAL; |
2800 | |
2801 | if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst)) |
2802 | return -EOPNOTSUPP; |
2803 | |
2804 | if (pos_out < 0 || pos_in < 0) |
2805 | return -EINVAL; |
2806 | |
2807 | if (src == dst) { |
2808 | if (pos_in == pos_out) |
2809 | return 0; |
2810 | if (pos_out > pos_in && pos_out < pos_in + len) |
2811 | return -EINVAL; |
2812 | } |
2813 | |
2814 | inode_lock(inode: src); |
2815 | if (src != dst) { |
2816 | ret = -EBUSY; |
2817 | if (!inode_trylock(inode: dst)) |
2818 | goto out; |
2819 | } |
2820 | |
2821 | ret = -EINVAL; |
2822 | if (pos_in + len > src->i_size || pos_in + len < pos_in) |
2823 | goto out_unlock; |
2824 | if (len == 0) |
2825 | olen = len = src->i_size - pos_in; |
2826 | if (pos_in + len == src->i_size) |
2827 | len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in; |
2828 | if (len == 0) { |
2829 | ret = 0; |
2830 | goto out_unlock; |
2831 | } |
2832 | |
2833 | dst_osize = dst->i_size; |
2834 | if (pos_out + olen > dst->i_size) |
2835 | dst_max_i_size = pos_out + olen; |
2836 | |
2837 | /* verify the end result is block aligned */ |
2838 | if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) || |
2839 | !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) || |
2840 | !IS_ALIGNED(pos_out, F2FS_BLKSIZE)) |
2841 | goto out_unlock; |
2842 | |
2843 | ret = f2fs_convert_inline_inode(inode: src); |
2844 | if (ret) |
2845 | goto out_unlock; |
2846 | |
2847 | ret = f2fs_convert_inline_inode(inode: dst); |
2848 | if (ret) |
2849 | goto out_unlock; |
2850 | |
2851 | /* write out all dirty pages from offset */ |
2852 | ret = filemap_write_and_wait_range(mapping: src->i_mapping, |
2853 | lstart: pos_in, lend: pos_in + len); |
2854 | if (ret) |
2855 | goto out_unlock; |
2856 | |
2857 | ret = filemap_write_and_wait_range(mapping: dst->i_mapping, |
2858 | lstart: pos_out, lend: pos_out + len); |
2859 | if (ret) |
2860 | goto out_unlock; |
2861 | |
2862 | f2fs_balance_fs(sbi, need: true); |
2863 | |
2864 | f2fs_down_write(sem: &F2FS_I(inode: src)->i_gc_rwsem[WRITE]); |
2865 | if (src != dst) { |
2866 | ret = -EBUSY; |
2867 | if (!f2fs_down_write_trylock(sem: &F2FS_I(inode: dst)->i_gc_rwsem[WRITE])) |
2868 | goto out_src; |
2869 | } |
2870 | |
2871 | f2fs_lock_op(sbi); |
2872 | ret = __exchange_data_block(src_inode: src, dst_inode: dst, src: pos_in >> F2FS_BLKSIZE_BITS, |
2873 | dst: pos_out >> F2FS_BLKSIZE_BITS, |
2874 | len: len >> F2FS_BLKSIZE_BITS, full: false); |
2875 | |
2876 | if (!ret) { |
2877 | if (dst_max_i_size) |
2878 | f2fs_i_size_write(inode: dst, i_size: dst_max_i_size); |
2879 | else if (dst_osize != dst->i_size) |
2880 | f2fs_i_size_write(inode: dst, i_size: dst_osize); |
2881 | } |
2882 | f2fs_unlock_op(sbi); |
2883 | |
2884 | if (src != dst) |
2885 | f2fs_up_write(sem: &F2FS_I(inode: dst)->i_gc_rwsem[WRITE]); |
2886 | out_src: |
2887 | f2fs_up_write(sem: &F2FS_I(inode: src)->i_gc_rwsem[WRITE]); |
2888 | if (ret) |
2889 | goto out_unlock; |
2890 | |
2891 | inode_set_mtime_to_ts(inode: src, ts: inode_set_ctime_current(inode: src)); |
2892 | f2fs_mark_inode_dirty_sync(inode: src, sync: false); |
2893 | if (src != dst) { |
2894 | inode_set_mtime_to_ts(inode: dst, ts: inode_set_ctime_current(inode: dst)); |
2895 | f2fs_mark_inode_dirty_sync(inode: dst, sync: false); |
2896 | } |
2897 | f2fs_update_time(sbi, type: REQ_TIME); |
2898 | |
2899 | out_unlock: |
2900 | if (src != dst) |
2901 | inode_unlock(inode: dst); |
2902 | out: |
2903 | inode_unlock(inode: src); |
2904 | return ret; |
2905 | } |
2906 | |
2907 | static int __f2fs_ioc_move_range(struct file *filp, |
2908 | struct f2fs_move_range *range) |
2909 | { |
2910 | struct fd dst; |
2911 | int err; |
2912 | |
2913 | if (!(filp->f_mode & FMODE_READ) || |
2914 | !(filp->f_mode & FMODE_WRITE)) |
2915 | return -EBADF; |
2916 | |
2917 | dst = fdget(fd: range->dst_fd); |
2918 | if (!dst.file) |
2919 | return -EBADF; |
2920 | |
2921 | if (!(dst.file->f_mode & FMODE_WRITE)) { |
2922 | err = -EBADF; |
2923 | goto err_out; |
2924 | } |
2925 | |
2926 | err = mnt_want_write_file(file: filp); |
2927 | if (err) |
2928 | goto err_out; |
2929 | |
2930 | err = f2fs_move_file_range(file_in: filp, pos_in: range->pos_in, file_out: dst.file, |
2931 | pos_out: range->pos_out, len: range->len); |
2932 | |
2933 | mnt_drop_write_file(file: filp); |
2934 | err_out: |
2935 | fdput(fd: dst); |
2936 | return err; |
2937 | } |
2938 | |
2939 | static int f2fs_ioc_move_range(struct file *filp, unsigned long arg) |
2940 | { |
2941 | struct f2fs_move_range range; |
2942 | |
2943 | if (copy_from_user(to: &range, from: (struct f2fs_move_range __user *)arg, |
2944 | n: sizeof(range))) |
2945 | return -EFAULT; |
2946 | return __f2fs_ioc_move_range(filp, range: &range); |
2947 | } |
2948 | |
2949 | static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg) |
2950 | { |
2951 | struct inode *inode = file_inode(f: filp); |
2952 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
2953 | struct sit_info *sm = SIT_I(sbi); |
2954 | unsigned int start_segno = 0, end_segno = 0; |
2955 | unsigned int dev_start_segno = 0, dev_end_segno = 0; |
2956 | struct f2fs_flush_device range; |
2957 | struct f2fs_gc_control gc_control = { |
2958 | .init_gc_type = FG_GC, |
2959 | .should_migrate_blocks = true, |
2960 | .err_gc_skipped = true, |
2961 | .nr_free_secs = 0 }; |
2962 | int ret; |
2963 | |
2964 | if (!capable(CAP_SYS_ADMIN)) |
2965 | return -EPERM; |
2966 | |
2967 | if (f2fs_readonly(sb: sbi->sb)) |
2968 | return -EROFS; |
2969 | |
2970 | if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) |
2971 | return -EINVAL; |
2972 | |
2973 | if (copy_from_user(to: &range, from: (struct f2fs_flush_device __user *)arg, |
2974 | n: sizeof(range))) |
2975 | return -EFAULT; |
2976 | |
2977 | if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num || |
2978 | __is_large_section(sbi)) { |
2979 | f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1" , |
2980 | range.dev_num, sbi->s_ndevs, sbi->segs_per_sec); |
2981 | return -EINVAL; |
2982 | } |
2983 | |
2984 | ret = mnt_want_write_file(file: filp); |
2985 | if (ret) |
2986 | return ret; |
2987 | |
2988 | if (range.dev_num != 0) |
2989 | dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk); |
2990 | dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk); |
2991 | |
2992 | start_segno = sm->last_victim[FLUSH_DEVICE]; |
2993 | if (start_segno < dev_start_segno || start_segno >= dev_end_segno) |
2994 | start_segno = dev_start_segno; |
2995 | end_segno = min(start_segno + range.segments, dev_end_segno); |
2996 | |
2997 | while (start_segno < end_segno) { |
2998 | if (!f2fs_down_write_trylock(sem: &sbi->gc_lock)) { |
2999 | ret = -EBUSY; |
3000 | goto out; |
3001 | } |
3002 | sm->last_victim[GC_CB] = end_segno + 1; |
3003 | sm->last_victim[GC_GREEDY] = end_segno + 1; |
3004 | sm->last_victim[ALLOC_NEXT] = end_segno + 1; |
3005 | |
3006 | gc_control.victim_segno = start_segno; |
3007 | stat_inc_gc_call_count(sbi, FOREGROUND); |
3008 | ret = f2fs_gc(sbi, gc_control: &gc_control); |
3009 | if (ret == -EAGAIN) |
3010 | ret = 0; |
3011 | else if (ret < 0) |
3012 | break; |
3013 | start_segno++; |
3014 | } |
3015 | out: |
3016 | mnt_drop_write_file(file: filp); |
3017 | return ret; |
3018 | } |
3019 | |
3020 | static int f2fs_ioc_get_features(struct file *filp, unsigned long arg) |
3021 | { |
3022 | struct inode *inode = file_inode(f: filp); |
3023 | u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature); |
3024 | |
3025 | /* Must validate to set it with SQLite behavior in Android. */ |
3026 | sb_feature |= F2FS_FEATURE_ATOMIC_WRITE; |
3027 | |
3028 | return put_user(sb_feature, (u32 __user *)arg); |
3029 | } |
3030 | |
3031 | #ifdef CONFIG_QUOTA |
3032 | int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid) |
3033 | { |
3034 | struct dquot *transfer_to[MAXQUOTAS] = {}; |
3035 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
3036 | struct super_block *sb = sbi->sb; |
3037 | int err; |
3038 | |
3039 | transfer_to[PRJQUOTA] = dqget(sb, qid: make_kqid_projid(projid: kprojid)); |
3040 | if (IS_ERR(ptr: transfer_to[PRJQUOTA])) |
3041 | return PTR_ERR(ptr: transfer_to[PRJQUOTA]); |
3042 | |
3043 | err = __dquot_transfer(inode, transfer_to); |
3044 | if (err) |
3045 | set_sbi_flag(sbi, type: SBI_QUOTA_NEED_REPAIR); |
3046 | dqput(dquot: transfer_to[PRJQUOTA]); |
3047 | return err; |
3048 | } |
3049 | |
3050 | static int f2fs_ioc_setproject(struct inode *inode, __u32 projid) |
3051 | { |
3052 | struct f2fs_inode_info *fi = F2FS_I(inode); |
3053 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
3054 | struct f2fs_inode *ri = NULL; |
3055 | kprojid_t kprojid; |
3056 | int err; |
3057 | |
3058 | if (!f2fs_sb_has_project_quota(sbi)) { |
3059 | if (projid != F2FS_DEF_PROJID) |
3060 | return -EOPNOTSUPP; |
3061 | else |
3062 | return 0; |
3063 | } |
3064 | |
3065 | if (!f2fs_has_extra_attr(inode)) |
3066 | return -EOPNOTSUPP; |
3067 | |
3068 | kprojid = make_kprojid(from: &init_user_ns, projid: (projid_t)projid); |
3069 | |
3070 | if (projid_eq(left: kprojid, right: fi->i_projid)) |
3071 | return 0; |
3072 | |
3073 | err = -EPERM; |
3074 | /* Is it quota file? Do not allow user to mess with it */ |
3075 | if (IS_NOQUOTA(inode)) |
3076 | return err; |
3077 | |
3078 | if (!F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid)) |
3079 | return -EOVERFLOW; |
3080 | |
3081 | err = f2fs_dquot_initialize(inode); |
3082 | if (err) |
3083 | return err; |
3084 | |
3085 | f2fs_lock_op(sbi); |
3086 | err = f2fs_transfer_project_quota(inode, kprojid); |
3087 | if (err) |
3088 | goto out_unlock; |
3089 | |
3090 | fi->i_projid = kprojid; |
3091 | inode_set_ctime_current(inode); |
3092 | f2fs_mark_inode_dirty_sync(inode, sync: true); |
3093 | out_unlock: |
3094 | f2fs_unlock_op(sbi); |
3095 | return err; |
3096 | } |
3097 | #else |
3098 | int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid) |
3099 | { |
3100 | return 0; |
3101 | } |
3102 | |
3103 | static int f2fs_ioc_setproject(struct inode *inode, __u32 projid) |
3104 | { |
3105 | if (projid != F2FS_DEF_PROJID) |
3106 | return -EOPNOTSUPP; |
3107 | return 0; |
3108 | } |
3109 | #endif |
3110 | |
3111 | int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa) |
3112 | { |
3113 | struct inode *inode = d_inode(dentry); |
3114 | struct f2fs_inode_info *fi = F2FS_I(inode); |
3115 | u32 fsflags = f2fs_iflags_to_fsflags(iflags: fi->i_flags); |
3116 | |
3117 | if (IS_ENCRYPTED(inode)) |
3118 | fsflags |= FS_ENCRYPT_FL; |
3119 | if (IS_VERITY(inode)) |
3120 | fsflags |= FS_VERITY_FL; |
3121 | if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) |
3122 | fsflags |= FS_INLINE_DATA_FL; |
3123 | if (is_inode_flag_set(inode, flag: FI_PIN_FILE)) |
3124 | fsflags |= FS_NOCOW_FL; |
3125 | |
3126 | fileattr_fill_flags(fa, flags: fsflags & F2FS_GETTABLE_FS_FL); |
3127 | |
3128 | if (f2fs_sb_has_project_quota(sbi: F2FS_I_SB(inode))) |
3129 | fa->fsx_projid = from_kprojid(to: &init_user_ns, projid: fi->i_projid); |
3130 | |
3131 | return 0; |
3132 | } |
3133 | |
3134 | int f2fs_fileattr_set(struct mnt_idmap *idmap, |
3135 | struct dentry *dentry, struct fileattr *fa) |
3136 | { |
3137 | struct inode *inode = d_inode(dentry); |
3138 | u32 fsflags = fa->flags, mask = F2FS_SETTABLE_FS_FL; |
3139 | u32 iflags; |
3140 | int err; |
3141 | |
3142 | if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) |
3143 | return -EIO; |
3144 | if (!f2fs_is_checkpoint_ready(sbi: F2FS_I_SB(inode))) |
3145 | return -ENOSPC; |
3146 | if (fsflags & ~F2FS_GETTABLE_FS_FL) |
3147 | return -EOPNOTSUPP; |
3148 | fsflags &= F2FS_SETTABLE_FS_FL; |
3149 | if (!fa->flags_valid) |
3150 | mask &= FS_COMMON_FL; |
3151 | |
3152 | iflags = f2fs_fsflags_to_iflags(fsflags); |
3153 | if (f2fs_mask_flags(mode: inode->i_mode, flags: iflags) != iflags) |
3154 | return -EOPNOTSUPP; |
3155 | |
3156 | err = f2fs_setflags_common(inode, iflags, mask: f2fs_fsflags_to_iflags(fsflags: mask)); |
3157 | if (!err) |
3158 | err = f2fs_ioc_setproject(inode, projid: fa->fsx_projid); |
3159 | |
3160 | return err; |
3161 | } |
3162 | |
3163 | int f2fs_pin_file_control(struct inode *inode, bool inc) |
3164 | { |
3165 | struct f2fs_inode_info *fi = F2FS_I(inode); |
3166 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
3167 | |
3168 | /* Use i_gc_failures for normal file as a risk signal. */ |
3169 | if (inc) |
3170 | f2fs_i_gc_failures_write(inode, |
3171 | count: fi->i_gc_failures[GC_FAILURE_PIN] + 1); |
3172 | |
3173 | if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) { |
3174 | f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials" , |
3175 | __func__, inode->i_ino, |
3176 | fi->i_gc_failures[GC_FAILURE_PIN]); |
3177 | clear_inode_flag(inode, flag: FI_PIN_FILE); |
3178 | return -EAGAIN; |
3179 | } |
3180 | return 0; |
3181 | } |
3182 | |
3183 | static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg) |
3184 | { |
3185 | struct inode *inode = file_inode(f: filp); |
3186 | __u32 pin; |
3187 | int ret = 0; |
3188 | |
3189 | if (get_user(pin, (__u32 __user *)arg)) |
3190 | return -EFAULT; |
3191 | |
3192 | if (!S_ISREG(inode->i_mode)) |
3193 | return -EINVAL; |
3194 | |
3195 | if (f2fs_readonly(sb: F2FS_I_SB(inode)->sb)) |
3196 | return -EROFS; |
3197 | |
3198 | ret = mnt_want_write_file(file: filp); |
3199 | if (ret) |
3200 | return ret; |
3201 | |
3202 | inode_lock(inode); |
3203 | |
3204 | if (!pin) { |
3205 | clear_inode_flag(inode, flag: FI_PIN_FILE); |
3206 | f2fs_i_gc_failures_write(inode, count: 0); |
3207 | goto done; |
3208 | } |
3209 | |
3210 | if (f2fs_should_update_outplace(inode, NULL)) { |
3211 | ret = -EINVAL; |
3212 | goto out; |
3213 | } |
3214 | |
3215 | if (f2fs_pin_file_control(inode, inc: false)) { |
3216 | ret = -EAGAIN; |
3217 | goto out; |
3218 | } |
3219 | |
3220 | ret = f2fs_convert_inline_inode(inode); |
3221 | if (ret) |
3222 | goto out; |
3223 | |
3224 | if (!f2fs_disable_compressed_file(inode)) { |
3225 | ret = -EOPNOTSUPP; |
3226 | goto out; |
3227 | } |
3228 | |
3229 | set_inode_flag(inode, flag: FI_PIN_FILE); |
3230 | ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]; |
3231 | done: |
3232 | f2fs_update_time(sbi: F2FS_I_SB(inode), type: REQ_TIME); |
3233 | out: |
3234 | inode_unlock(inode); |
3235 | mnt_drop_write_file(file: filp); |
3236 | return ret; |
3237 | } |
3238 | |
3239 | static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg) |
3240 | { |
3241 | struct inode *inode = file_inode(f: filp); |
3242 | __u32 pin = 0; |
3243 | |
3244 | if (is_inode_flag_set(inode, flag: FI_PIN_FILE)) |
3245 | pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]; |
3246 | return put_user(pin, (u32 __user *)arg); |
3247 | } |
3248 | |
3249 | int f2fs_precache_extents(struct inode *inode) |
3250 | { |
3251 | struct f2fs_inode_info *fi = F2FS_I(inode); |
3252 | struct f2fs_map_blocks map; |
3253 | pgoff_t m_next_extent; |
3254 | loff_t end; |
3255 | int err; |
3256 | |
3257 | if (is_inode_flag_set(inode, flag: FI_NO_EXTENT)) |
3258 | return -EOPNOTSUPP; |
3259 | |
3260 | map.m_lblk = 0; |
3261 | map.m_pblk = 0; |
3262 | map.m_next_pgofs = NULL; |
3263 | map.m_next_extent = &m_next_extent; |
3264 | map.m_seg_type = NO_CHECK_TYPE; |
3265 | map.m_may_create = false; |
3266 | end = F2FS_BLK_ALIGN(i_size_read(inode)); |
3267 | |
3268 | while (map.m_lblk < end) { |
3269 | map.m_len = end - map.m_lblk; |
3270 | |
3271 | f2fs_down_write(sem: &fi->i_gc_rwsem[WRITE]); |
3272 | err = f2fs_map_blocks(inode, map: &map, flag: F2FS_GET_BLOCK_PRECACHE); |
3273 | f2fs_up_write(sem: &fi->i_gc_rwsem[WRITE]); |
3274 | if (err || !map.m_len) |
3275 | return err; |
3276 | |
3277 | map.m_lblk = m_next_extent; |
3278 | } |
3279 | |
3280 | return 0; |
3281 | } |
3282 | |
3283 | static int f2fs_ioc_precache_extents(struct file *filp) |
3284 | { |
3285 | return f2fs_precache_extents(inode: file_inode(f: filp)); |
3286 | } |
3287 | |
3288 | static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg) |
3289 | { |
3290 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode: file_inode(f: filp)); |
3291 | __u64 block_count; |
3292 | |
3293 | if (!capable(CAP_SYS_ADMIN)) |
3294 | return -EPERM; |
3295 | |
3296 | if (f2fs_readonly(sb: sbi->sb)) |
3297 | return -EROFS; |
3298 | |
3299 | if (copy_from_user(to: &block_count, from: (void __user *)arg, |
3300 | n: sizeof(block_count))) |
3301 | return -EFAULT; |
3302 | |
3303 | return f2fs_resize_fs(filp, block_count); |
3304 | } |
3305 | |
3306 | static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg) |
3307 | { |
3308 | struct inode *inode = file_inode(f: filp); |
3309 | |
3310 | f2fs_update_time(sbi: F2FS_I_SB(inode), type: REQ_TIME); |
3311 | |
3312 | if (!f2fs_sb_has_verity(sbi: F2FS_I_SB(inode))) { |
3313 | f2fs_warn(F2FS_I_SB(inode), |
3314 | "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem" , |
3315 | inode->i_ino); |
3316 | return -EOPNOTSUPP; |
3317 | } |
3318 | |
3319 | return fsverity_ioctl_enable(filp, arg: (const void __user *)arg); |
3320 | } |
3321 | |
3322 | static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg) |
3323 | { |
3324 | if (!f2fs_sb_has_verity(sbi: F2FS_I_SB(inode: file_inode(f: filp)))) |
3325 | return -EOPNOTSUPP; |
3326 | |
3327 | return fsverity_ioctl_measure(filp, arg: (void __user *)arg); |
3328 | } |
3329 | |
3330 | static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg) |
3331 | { |
3332 | if (!f2fs_sb_has_verity(sbi: F2FS_I_SB(inode: file_inode(f: filp)))) |
3333 | return -EOPNOTSUPP; |
3334 | |
3335 | return fsverity_ioctl_read_metadata(filp, uarg: (const void __user *)arg); |
3336 | } |
3337 | |
3338 | static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg) |
3339 | { |
3340 | struct inode *inode = file_inode(f: filp); |
3341 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
3342 | char *vbuf; |
3343 | int count; |
3344 | int err = 0; |
3345 | |
3346 | vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL); |
3347 | if (!vbuf) |
3348 | return -ENOMEM; |
3349 | |
3350 | f2fs_down_read(sem: &sbi->sb_lock); |
3351 | count = utf16s_to_utf8s(pwcs: sbi->raw_super->volume_name, |
3352 | ARRAY_SIZE(sbi->raw_super->volume_name), |
3353 | endian: UTF16_LITTLE_ENDIAN, s: vbuf, MAX_VOLUME_NAME); |
3354 | f2fs_up_read(sem: &sbi->sb_lock); |
3355 | |
3356 | if (copy_to_user(to: (char __user *)arg, from: vbuf, |
3357 | min(FSLABEL_MAX, count))) |
3358 | err = -EFAULT; |
3359 | |
3360 | kfree(objp: vbuf); |
3361 | return err; |
3362 | } |
3363 | |
3364 | static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg) |
3365 | { |
3366 | struct inode *inode = file_inode(f: filp); |
3367 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
3368 | char *vbuf; |
3369 | int err = 0; |
3370 | |
3371 | if (!capable(CAP_SYS_ADMIN)) |
3372 | return -EPERM; |
3373 | |
3374 | vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX); |
3375 | if (IS_ERR(ptr: vbuf)) |
3376 | return PTR_ERR(ptr: vbuf); |
3377 | |
3378 | err = mnt_want_write_file(file: filp); |
3379 | if (err) |
3380 | goto out; |
3381 | |
3382 | f2fs_down_write(sem: &sbi->sb_lock); |
3383 | |
3384 | memset(sbi->raw_super->volume_name, 0, |
3385 | sizeof(sbi->raw_super->volume_name)); |
3386 | utf8s_to_utf16s(s: vbuf, strlen(vbuf), endian: UTF16_LITTLE_ENDIAN, |
3387 | pwcs: sbi->raw_super->volume_name, |
3388 | ARRAY_SIZE(sbi->raw_super->volume_name)); |
3389 | |
3390 | err = f2fs_commit_super(sbi, recover: false); |
3391 | |
3392 | f2fs_up_write(sem: &sbi->sb_lock); |
3393 | |
3394 | mnt_drop_write_file(file: filp); |
3395 | out: |
3396 | kfree(objp: vbuf); |
3397 | return err; |
3398 | } |
3399 | |
3400 | static int f2fs_get_compress_blocks(struct inode *inode, __u64 *blocks) |
3401 | { |
3402 | if (!f2fs_sb_has_compression(sbi: F2FS_I_SB(inode))) |
3403 | return -EOPNOTSUPP; |
3404 | |
3405 | if (!f2fs_compressed_file(inode)) |
3406 | return -EINVAL; |
3407 | |
3408 | *blocks = atomic_read(v: &F2FS_I(inode)->i_compr_blocks); |
3409 | |
3410 | return 0; |
3411 | } |
3412 | |
3413 | static int f2fs_ioc_get_compress_blocks(struct file *filp, unsigned long arg) |
3414 | { |
3415 | struct inode *inode = file_inode(f: filp); |
3416 | __u64 blocks; |
3417 | int ret; |
3418 | |
3419 | ret = f2fs_get_compress_blocks(inode, blocks: &blocks); |
3420 | if (ret < 0) |
3421 | return ret; |
3422 | |
3423 | return put_user(blocks, (u64 __user *)arg); |
3424 | } |
3425 | |
3426 | static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count) |
3427 | { |
3428 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode: dn->inode); |
3429 | unsigned int released_blocks = 0; |
3430 | int cluster_size = F2FS_I(inode: dn->inode)->i_cluster_size; |
3431 | block_t blkaddr; |
3432 | int i; |
3433 | |
3434 | for (i = 0; i < count; i++) { |
3435 | blkaddr = data_blkaddr(inode: dn->inode, node_page: dn->node_page, |
3436 | offset: dn->ofs_in_node + i); |
3437 | |
3438 | if (!__is_valid_data_blkaddr(blkaddr)) |
3439 | continue; |
3440 | if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr, |
3441 | DATA_GENERIC_ENHANCE))) { |
3442 | f2fs_handle_error(sbi, error: ERROR_INVALID_BLKADDR); |
3443 | return -EFSCORRUPTED; |
3444 | } |
3445 | } |
3446 | |
3447 | while (count) { |
3448 | int compr_blocks = 0; |
3449 | |
3450 | for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) { |
3451 | blkaddr = f2fs_data_blkaddr(dn); |
3452 | |
3453 | if (i == 0) { |
3454 | if (blkaddr == COMPRESS_ADDR) |
3455 | continue; |
3456 | dn->ofs_in_node += cluster_size; |
3457 | goto next; |
3458 | } |
3459 | |
3460 | if (__is_valid_data_blkaddr(blkaddr)) |
3461 | compr_blocks++; |
3462 | |
3463 | if (blkaddr != NEW_ADDR) |
3464 | continue; |
3465 | |
3466 | dn->data_blkaddr = NULL_ADDR; |
3467 | f2fs_set_data_blkaddr(dn); |
3468 | } |
3469 | |
3470 | f2fs_i_compr_blocks_update(inode: dn->inode, blocks: compr_blocks, add: false); |
3471 | dec_valid_block_count(sbi, inode: dn->inode, |
3472 | count: cluster_size - compr_blocks); |
3473 | |
3474 | released_blocks += cluster_size - compr_blocks; |
3475 | next: |
3476 | count -= cluster_size; |
3477 | } |
3478 | |
3479 | return released_blocks; |
3480 | } |
3481 | |
3482 | static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg) |
3483 | { |
3484 | struct inode *inode = file_inode(f: filp); |
3485 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
3486 | pgoff_t page_idx = 0, last_idx; |
3487 | unsigned int released_blocks = 0; |
3488 | int ret; |
3489 | int writecount; |
3490 | |
3491 | if (!f2fs_sb_has_compression(sbi)) |
3492 | return -EOPNOTSUPP; |
3493 | |
3494 | if (!f2fs_compressed_file(inode)) |
3495 | return -EINVAL; |
3496 | |
3497 | if (f2fs_readonly(sb: sbi->sb)) |
3498 | return -EROFS; |
3499 | |
3500 | ret = mnt_want_write_file(file: filp); |
3501 | if (ret) |
3502 | return ret; |
3503 | |
3504 | f2fs_balance_fs(sbi, need: true); |
3505 | |
3506 | inode_lock(inode); |
3507 | |
3508 | writecount = atomic_read(v: &inode->i_writecount); |
3509 | if ((filp->f_mode & FMODE_WRITE && writecount != 1) || |
3510 | (!(filp->f_mode & FMODE_WRITE) && writecount)) { |
3511 | ret = -EBUSY; |
3512 | goto out; |
3513 | } |
3514 | |
3515 | if (is_inode_flag_set(inode, flag: FI_COMPRESS_RELEASED)) { |
3516 | ret = -EINVAL; |
3517 | goto out; |
3518 | } |
3519 | |
3520 | ret = filemap_write_and_wait_range(mapping: inode->i_mapping, lstart: 0, LLONG_MAX); |
3521 | if (ret) |
3522 | goto out; |
3523 | |
3524 | if (!atomic_read(v: &F2FS_I(inode)->i_compr_blocks)) { |
3525 | ret = -EPERM; |
3526 | goto out; |
3527 | } |
3528 | |
3529 | set_inode_flag(inode, flag: FI_COMPRESS_RELEASED); |
3530 | inode_set_ctime_current(inode); |
3531 | f2fs_mark_inode_dirty_sync(inode, sync: true); |
3532 | |
3533 | f2fs_down_write(sem: &F2FS_I(inode)->i_gc_rwsem[WRITE]); |
3534 | filemap_invalidate_lock(mapping: inode->i_mapping); |
3535 | |
3536 | last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); |
3537 | |
3538 | while (page_idx < last_idx) { |
3539 | struct dnode_of_data dn; |
3540 | pgoff_t end_offset, count; |
3541 | |
3542 | set_new_dnode(dn: &dn, inode, NULL, NULL, nid: 0); |
3543 | ret = f2fs_get_dnode_of_data(dn: &dn, index: page_idx, mode: LOOKUP_NODE); |
3544 | if (ret) { |
3545 | if (ret == -ENOENT) { |
3546 | page_idx = f2fs_get_next_page_offset(dn: &dn, |
3547 | pgofs: page_idx); |
3548 | ret = 0; |
3549 | continue; |
3550 | } |
3551 | break; |
3552 | } |
3553 | |
3554 | end_offset = ADDRS_PER_PAGE(dn.node_page, inode); |
3555 | count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); |
3556 | count = round_up(count, F2FS_I(inode)->i_cluster_size); |
3557 | |
3558 | ret = release_compress_blocks(dn: &dn, count); |
3559 | |
3560 | f2fs_put_dnode(dn: &dn); |
3561 | |
3562 | if (ret < 0) |
3563 | break; |
3564 | |
3565 | page_idx += count; |
3566 | released_blocks += ret; |
3567 | } |
3568 | |
3569 | filemap_invalidate_unlock(mapping: inode->i_mapping); |
3570 | f2fs_up_write(sem: &F2FS_I(inode)->i_gc_rwsem[WRITE]); |
3571 | out: |
3572 | inode_unlock(inode); |
3573 | |
3574 | mnt_drop_write_file(file: filp); |
3575 | |
3576 | if (ret >= 0) { |
3577 | ret = put_user(released_blocks, (u64 __user *)arg); |
3578 | } else if (released_blocks && |
3579 | atomic_read(v: &F2FS_I(inode)->i_compr_blocks)) { |
3580 | set_sbi_flag(sbi, type: SBI_NEED_FSCK); |
3581 | f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx " |
3582 | "iblocks=%llu, released=%u, compr_blocks=%u, " |
3583 | "run fsck to fix." , |
3584 | __func__, inode->i_ino, inode->i_blocks, |
3585 | released_blocks, |
3586 | atomic_read(&F2FS_I(inode)->i_compr_blocks)); |
3587 | } |
3588 | |
3589 | return ret; |
3590 | } |
3591 | |
3592 | static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count) |
3593 | { |
3594 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode: dn->inode); |
3595 | unsigned int reserved_blocks = 0; |
3596 | int cluster_size = F2FS_I(inode: dn->inode)->i_cluster_size; |
3597 | block_t blkaddr; |
3598 | int i; |
3599 | |
3600 | for (i = 0; i < count; i++) { |
3601 | blkaddr = data_blkaddr(inode: dn->inode, node_page: dn->node_page, |
3602 | offset: dn->ofs_in_node + i); |
3603 | |
3604 | if (!__is_valid_data_blkaddr(blkaddr)) |
3605 | continue; |
3606 | if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr, |
3607 | DATA_GENERIC_ENHANCE))) { |
3608 | f2fs_handle_error(sbi, error: ERROR_INVALID_BLKADDR); |
3609 | return -EFSCORRUPTED; |
3610 | } |
3611 | } |
3612 | |
3613 | while (count) { |
3614 | int compr_blocks = 0; |
3615 | blkcnt_t reserved; |
3616 | int ret; |
3617 | |
3618 | for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) { |
3619 | blkaddr = f2fs_data_blkaddr(dn); |
3620 | |
3621 | if (i == 0) { |
3622 | if (blkaddr == COMPRESS_ADDR) |
3623 | continue; |
3624 | dn->ofs_in_node += cluster_size; |
3625 | goto next; |
3626 | } |
3627 | |
3628 | if (__is_valid_data_blkaddr(blkaddr)) { |
3629 | compr_blocks++; |
3630 | continue; |
3631 | } |
3632 | |
3633 | dn->data_blkaddr = NEW_ADDR; |
3634 | f2fs_set_data_blkaddr(dn); |
3635 | } |
3636 | |
3637 | reserved = cluster_size - compr_blocks; |
3638 | ret = inc_valid_block_count(sbi, inode: dn->inode, count: &reserved); |
3639 | if (ret) |
3640 | return ret; |
3641 | |
3642 | if (reserved != cluster_size - compr_blocks) |
3643 | return -ENOSPC; |
3644 | |
3645 | f2fs_i_compr_blocks_update(inode: dn->inode, blocks: compr_blocks, add: true); |
3646 | |
3647 | reserved_blocks += reserved; |
3648 | next: |
3649 | count -= cluster_size; |
3650 | } |
3651 | |
3652 | return reserved_blocks; |
3653 | } |
3654 | |
3655 | static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg) |
3656 | { |
3657 | struct inode *inode = file_inode(f: filp); |
3658 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
3659 | pgoff_t page_idx = 0, last_idx; |
3660 | unsigned int reserved_blocks = 0; |
3661 | int ret; |
3662 | |
3663 | if (!f2fs_sb_has_compression(sbi)) |
3664 | return -EOPNOTSUPP; |
3665 | |
3666 | if (!f2fs_compressed_file(inode)) |
3667 | return -EINVAL; |
3668 | |
3669 | if (f2fs_readonly(sb: sbi->sb)) |
3670 | return -EROFS; |
3671 | |
3672 | ret = mnt_want_write_file(file: filp); |
3673 | if (ret) |
3674 | return ret; |
3675 | |
3676 | if (atomic_read(v: &F2FS_I(inode)->i_compr_blocks)) |
3677 | goto out; |
3678 | |
3679 | f2fs_balance_fs(sbi, need: true); |
3680 | |
3681 | inode_lock(inode); |
3682 | |
3683 | if (!is_inode_flag_set(inode, flag: FI_COMPRESS_RELEASED)) { |
3684 | ret = -EINVAL; |
3685 | goto unlock_inode; |
3686 | } |
3687 | |
3688 | f2fs_down_write(sem: &F2FS_I(inode)->i_gc_rwsem[WRITE]); |
3689 | filemap_invalidate_lock(mapping: inode->i_mapping); |
3690 | |
3691 | last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); |
3692 | |
3693 | while (page_idx < last_idx) { |
3694 | struct dnode_of_data dn; |
3695 | pgoff_t end_offset, count; |
3696 | |
3697 | set_new_dnode(dn: &dn, inode, NULL, NULL, nid: 0); |
3698 | ret = f2fs_get_dnode_of_data(dn: &dn, index: page_idx, mode: LOOKUP_NODE); |
3699 | if (ret) { |
3700 | if (ret == -ENOENT) { |
3701 | page_idx = f2fs_get_next_page_offset(dn: &dn, |
3702 | pgofs: page_idx); |
3703 | ret = 0; |
3704 | continue; |
3705 | } |
3706 | break; |
3707 | } |
3708 | |
3709 | end_offset = ADDRS_PER_PAGE(dn.node_page, inode); |
3710 | count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); |
3711 | count = round_up(count, F2FS_I(inode)->i_cluster_size); |
3712 | |
3713 | ret = reserve_compress_blocks(dn: &dn, count); |
3714 | |
3715 | f2fs_put_dnode(dn: &dn); |
3716 | |
3717 | if (ret < 0) |
3718 | break; |
3719 | |
3720 | page_idx += count; |
3721 | reserved_blocks += ret; |
3722 | } |
3723 | |
3724 | filemap_invalidate_unlock(mapping: inode->i_mapping); |
3725 | f2fs_up_write(sem: &F2FS_I(inode)->i_gc_rwsem[WRITE]); |
3726 | |
3727 | if (ret >= 0) { |
3728 | clear_inode_flag(inode, flag: FI_COMPRESS_RELEASED); |
3729 | inode_set_ctime_current(inode); |
3730 | f2fs_mark_inode_dirty_sync(inode, sync: true); |
3731 | } |
3732 | unlock_inode: |
3733 | inode_unlock(inode); |
3734 | out: |
3735 | mnt_drop_write_file(file: filp); |
3736 | |
3737 | if (ret >= 0) { |
3738 | ret = put_user(reserved_blocks, (u64 __user *)arg); |
3739 | } else if (reserved_blocks && |
3740 | atomic_read(v: &F2FS_I(inode)->i_compr_blocks)) { |
3741 | set_sbi_flag(sbi, type: SBI_NEED_FSCK); |
3742 | f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx " |
3743 | "iblocks=%llu, reserved=%u, compr_blocks=%u, " |
3744 | "run fsck to fix." , |
3745 | __func__, inode->i_ino, inode->i_blocks, |
3746 | reserved_blocks, |
3747 | atomic_read(&F2FS_I(inode)->i_compr_blocks)); |
3748 | } |
3749 | |
3750 | return ret; |
3751 | } |
3752 | |
3753 | static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode, |
3754 | pgoff_t off, block_t block, block_t len, u32 flags) |
3755 | { |
3756 | sector_t sector = SECTOR_FROM_BLOCK(block); |
3757 | sector_t nr_sects = SECTOR_FROM_BLOCK(len); |
3758 | int ret = 0; |
3759 | |
3760 | if (flags & F2FS_TRIM_FILE_DISCARD) { |
3761 | if (bdev_max_secure_erase_sectors(bdev)) |
3762 | ret = blkdev_issue_secure_erase(bdev, sector, nr_sects, |
3763 | GFP_NOFS); |
3764 | else |
3765 | ret = blkdev_issue_discard(bdev, sector, nr_sects, |
3766 | GFP_NOFS); |
3767 | } |
3768 | |
3769 | if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) { |
3770 | if (IS_ENCRYPTED(inode)) |
3771 | ret = fscrypt_zeroout_range(inode, lblk: off, pblk: block, len); |
3772 | else |
3773 | ret = blkdev_issue_zeroout(bdev, sector, nr_sects, |
3774 | GFP_NOFS, flags: 0); |
3775 | } |
3776 | |
3777 | return ret; |
3778 | } |
3779 | |
3780 | static int f2fs_sec_trim_file(struct file *filp, unsigned long arg) |
3781 | { |
3782 | struct inode *inode = file_inode(f: filp); |
3783 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
3784 | struct address_space *mapping = inode->i_mapping; |
3785 | struct block_device *prev_bdev = NULL; |
3786 | struct f2fs_sectrim_range range; |
3787 | pgoff_t index, pg_end, prev_index = 0; |
3788 | block_t prev_block = 0, len = 0; |
3789 | loff_t end_addr; |
3790 | bool to_end = false; |
3791 | int ret = 0; |
3792 | |
3793 | if (!(filp->f_mode & FMODE_WRITE)) |
3794 | return -EBADF; |
3795 | |
3796 | if (copy_from_user(to: &range, from: (struct f2fs_sectrim_range __user *)arg, |
3797 | n: sizeof(range))) |
3798 | return -EFAULT; |
3799 | |
3800 | if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) || |
3801 | !S_ISREG(inode->i_mode)) |
3802 | return -EINVAL; |
3803 | |
3804 | if (((range.flags & F2FS_TRIM_FILE_DISCARD) && |
3805 | !f2fs_hw_support_discard(sbi)) || |
3806 | ((range.flags & F2FS_TRIM_FILE_ZEROOUT) && |
3807 | IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi))) |
3808 | return -EOPNOTSUPP; |
3809 | |
3810 | file_start_write(file: filp); |
3811 | inode_lock(inode); |
3812 | |
3813 | if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) || |
3814 | range.start >= inode->i_size) { |
3815 | ret = -EINVAL; |
3816 | goto err; |
3817 | } |
3818 | |
3819 | if (range.len == 0) |
3820 | goto err; |
3821 | |
3822 | if (inode->i_size - range.start > range.len) { |
3823 | end_addr = range.start + range.len; |
3824 | } else { |
3825 | end_addr = range.len == (u64)-1 ? |
3826 | sbi->sb->s_maxbytes : inode->i_size; |
3827 | to_end = true; |
3828 | } |
3829 | |
3830 | if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) || |
3831 | (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) { |
3832 | ret = -EINVAL; |
3833 | goto err; |
3834 | } |
3835 | |
3836 | index = F2FS_BYTES_TO_BLK(range.start); |
3837 | pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE); |
3838 | |
3839 | ret = f2fs_convert_inline_inode(inode); |
3840 | if (ret) |
3841 | goto err; |
3842 | |
3843 | f2fs_down_write(sem: &F2FS_I(inode)->i_gc_rwsem[WRITE]); |
3844 | filemap_invalidate_lock(mapping); |
3845 | |
3846 | ret = filemap_write_and_wait_range(mapping, lstart: range.start, |
3847 | lend: to_end ? LLONG_MAX : end_addr - 1); |
3848 | if (ret) |
3849 | goto out; |
3850 | |
3851 | truncate_inode_pages_range(mapping, lstart: range.start, |
3852 | lend: to_end ? -1 : end_addr - 1); |
3853 | |
3854 | while (index < pg_end) { |
3855 | struct dnode_of_data dn; |
3856 | pgoff_t end_offset, count; |
3857 | int i; |
3858 | |
3859 | set_new_dnode(dn: &dn, inode, NULL, NULL, nid: 0); |
3860 | ret = f2fs_get_dnode_of_data(dn: &dn, index, mode: LOOKUP_NODE); |
3861 | if (ret) { |
3862 | if (ret == -ENOENT) { |
3863 | index = f2fs_get_next_page_offset(dn: &dn, pgofs: index); |
3864 | continue; |
3865 | } |
3866 | goto out; |
3867 | } |
3868 | |
3869 | end_offset = ADDRS_PER_PAGE(dn.node_page, inode); |
3870 | count = min(end_offset - dn.ofs_in_node, pg_end - index); |
3871 | for (i = 0; i < count; i++, index++, dn.ofs_in_node++) { |
3872 | struct block_device *cur_bdev; |
3873 | block_t blkaddr = f2fs_data_blkaddr(dn: &dn); |
3874 | |
3875 | if (!__is_valid_data_blkaddr(blkaddr)) |
3876 | continue; |
3877 | |
3878 | if (!f2fs_is_valid_blkaddr(sbi, blkaddr, |
3879 | type: DATA_GENERIC_ENHANCE)) { |
3880 | ret = -EFSCORRUPTED; |
3881 | f2fs_put_dnode(dn: &dn); |
3882 | f2fs_handle_error(sbi, |
3883 | error: ERROR_INVALID_BLKADDR); |
3884 | goto out; |
3885 | } |
3886 | |
3887 | cur_bdev = f2fs_target_device(sbi, blk_addr: blkaddr, NULL); |
3888 | if (f2fs_is_multi_device(sbi)) { |
3889 | int di = f2fs_target_device_index(sbi, blkaddr); |
3890 | |
3891 | blkaddr -= FDEV(di).start_blk; |
3892 | } |
3893 | |
3894 | if (len) { |
3895 | if (prev_bdev == cur_bdev && |
3896 | index == prev_index + len && |
3897 | blkaddr == prev_block + len) { |
3898 | len++; |
3899 | } else { |
3900 | ret = f2fs_secure_erase(bdev: prev_bdev, |
3901 | inode, off: prev_index, block: prev_block, |
3902 | len, flags: range.flags); |
3903 | if (ret) { |
3904 | f2fs_put_dnode(dn: &dn); |
3905 | goto out; |
3906 | } |
3907 | |
3908 | len = 0; |
3909 | } |
3910 | } |
3911 | |
3912 | if (!len) { |
3913 | prev_bdev = cur_bdev; |
3914 | prev_index = index; |
3915 | prev_block = blkaddr; |
3916 | len = 1; |
3917 | } |
3918 | } |
3919 | |
3920 | f2fs_put_dnode(dn: &dn); |
3921 | |
3922 | if (fatal_signal_pending(current)) { |
3923 | ret = -EINTR; |
3924 | goto out; |
3925 | } |
3926 | cond_resched(); |
3927 | } |
3928 | |
3929 | if (len) |
3930 | ret = f2fs_secure_erase(bdev: prev_bdev, inode, off: prev_index, |
3931 | block: prev_block, len, flags: range.flags); |
3932 | out: |
3933 | filemap_invalidate_unlock(mapping); |
3934 | f2fs_up_write(sem: &F2FS_I(inode)->i_gc_rwsem[WRITE]); |
3935 | err: |
3936 | inode_unlock(inode); |
3937 | file_end_write(file: filp); |
3938 | |
3939 | return ret; |
3940 | } |
3941 | |
3942 | static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg) |
3943 | { |
3944 | struct inode *inode = file_inode(f: filp); |
3945 | struct f2fs_comp_option option; |
3946 | |
3947 | if (!f2fs_sb_has_compression(sbi: F2FS_I_SB(inode))) |
3948 | return -EOPNOTSUPP; |
3949 | |
3950 | inode_lock_shared(inode); |
3951 | |
3952 | if (!f2fs_compressed_file(inode)) { |
3953 | inode_unlock_shared(inode); |
3954 | return -ENODATA; |
3955 | } |
3956 | |
3957 | option.algorithm = F2FS_I(inode)->i_compress_algorithm; |
3958 | option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size; |
3959 | |
3960 | inode_unlock_shared(inode); |
3961 | |
3962 | if (copy_to_user(to: (struct f2fs_comp_option __user *)arg, from: &option, |
3963 | n: sizeof(option))) |
3964 | return -EFAULT; |
3965 | |
3966 | return 0; |
3967 | } |
3968 | |
3969 | static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg) |
3970 | { |
3971 | struct inode *inode = file_inode(f: filp); |
3972 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
3973 | struct f2fs_comp_option option; |
3974 | int ret = 0; |
3975 | |
3976 | if (!f2fs_sb_has_compression(sbi)) |
3977 | return -EOPNOTSUPP; |
3978 | |
3979 | if (!(filp->f_mode & FMODE_WRITE)) |
3980 | return -EBADF; |
3981 | |
3982 | if (copy_from_user(to: &option, from: (struct f2fs_comp_option __user *)arg, |
3983 | n: sizeof(option))) |
3984 | return -EFAULT; |
3985 | |
3986 | if (!f2fs_compressed_file(inode) || |
3987 | option.log_cluster_size < MIN_COMPRESS_LOG_SIZE || |
3988 | option.log_cluster_size > MAX_COMPRESS_LOG_SIZE || |
3989 | option.algorithm >= COMPRESS_MAX) |
3990 | return -EINVAL; |
3991 | |
3992 | file_start_write(file: filp); |
3993 | inode_lock(inode); |
3994 | |
3995 | f2fs_down_write(sem: &F2FS_I(inode)->i_sem); |
3996 | if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) { |
3997 | ret = -EBUSY; |
3998 | goto out; |
3999 | } |
4000 | |
4001 | if (F2FS_HAS_BLOCKS(inode)) { |
4002 | ret = -EFBIG; |
4003 | goto out; |
4004 | } |
4005 | |
4006 | F2FS_I(inode)->i_compress_algorithm = option.algorithm; |
4007 | F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size; |
4008 | F2FS_I(inode)->i_cluster_size = BIT(option.log_cluster_size); |
4009 | /* Set default level */ |
4010 | if (F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD) |
4011 | F2FS_I(inode)->i_compress_level = F2FS_ZSTD_DEFAULT_CLEVEL; |
4012 | else |
4013 | F2FS_I(inode)->i_compress_level = 0; |
4014 | /* Adjust mount option level */ |
4015 | if (option.algorithm == F2FS_OPTION(sbi).compress_algorithm && |
4016 | F2FS_OPTION(sbi).compress_level) |
4017 | F2FS_I(inode)->i_compress_level = F2FS_OPTION(sbi).compress_level; |
4018 | f2fs_mark_inode_dirty_sync(inode, sync: true); |
4019 | |
4020 | if (!f2fs_is_compress_backend_ready(inode)) |
4021 | f2fs_warn(sbi, "compression algorithm is successfully set, " |
4022 | "but current kernel doesn't support this algorithm." ); |
4023 | out: |
4024 | f2fs_up_write(sem: &F2FS_I(inode)->i_sem); |
4025 | inode_unlock(inode); |
4026 | file_end_write(file: filp); |
4027 | |
4028 | return ret; |
4029 | } |
4030 | |
4031 | static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len) |
4032 | { |
4033 | DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, page_idx); |
4034 | struct address_space *mapping = inode->i_mapping; |
4035 | struct page *page; |
4036 | pgoff_t redirty_idx = page_idx; |
4037 | int i, page_len = 0, ret = 0; |
4038 | |
4039 | page_cache_ra_unbounded(&ractl, nr_to_read: len, lookahead_count: 0); |
4040 | |
4041 | for (i = 0; i < len; i++, page_idx++) { |
4042 | page = read_cache_page(mapping, index: page_idx, NULL, NULL); |
4043 | if (IS_ERR(ptr: page)) { |
4044 | ret = PTR_ERR(ptr: page); |
4045 | break; |
4046 | } |
4047 | page_len++; |
4048 | } |
4049 | |
4050 | for (i = 0; i < page_len; i++, redirty_idx++) { |
4051 | page = find_lock_page(mapping, index: redirty_idx); |
4052 | |
4053 | /* It will never fail, when page has pinned above */ |
4054 | f2fs_bug_on(F2FS_I_SB(inode), !page); |
4055 | |
4056 | set_page_dirty(page); |
4057 | f2fs_put_page(page, unlock: 1); |
4058 | f2fs_put_page(page, unlock: 0); |
4059 | } |
4060 | |
4061 | return ret; |
4062 | } |
4063 | |
4064 | static int f2fs_ioc_decompress_file(struct file *filp) |
4065 | { |
4066 | struct inode *inode = file_inode(f: filp); |
4067 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
4068 | struct f2fs_inode_info *fi = F2FS_I(inode); |
4069 | pgoff_t page_idx = 0, last_idx; |
4070 | unsigned int blk_per_seg = sbi->blocks_per_seg; |
4071 | int cluster_size = fi->i_cluster_size; |
4072 | int count, ret; |
4073 | |
4074 | if (!f2fs_sb_has_compression(sbi) || |
4075 | F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER) |
4076 | return -EOPNOTSUPP; |
4077 | |
4078 | if (!(filp->f_mode & FMODE_WRITE)) |
4079 | return -EBADF; |
4080 | |
4081 | if (!f2fs_compressed_file(inode)) |
4082 | return -EINVAL; |
4083 | |
4084 | f2fs_balance_fs(sbi, need: true); |
4085 | |
4086 | file_start_write(file: filp); |
4087 | inode_lock(inode); |
4088 | |
4089 | if (!f2fs_is_compress_backend_ready(inode)) { |
4090 | ret = -EOPNOTSUPP; |
4091 | goto out; |
4092 | } |
4093 | |
4094 | if (is_inode_flag_set(inode, flag: FI_COMPRESS_RELEASED)) { |
4095 | ret = -EINVAL; |
4096 | goto out; |
4097 | } |
4098 | |
4099 | ret = filemap_write_and_wait_range(mapping: inode->i_mapping, lstart: 0, LLONG_MAX); |
4100 | if (ret) |
4101 | goto out; |
4102 | |
4103 | if (!atomic_read(v: &fi->i_compr_blocks)) |
4104 | goto out; |
4105 | |
4106 | last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); |
4107 | |
4108 | count = last_idx - page_idx; |
4109 | while (count && count >= cluster_size) { |
4110 | ret = redirty_blocks(inode, page_idx, len: cluster_size); |
4111 | if (ret < 0) |
4112 | break; |
4113 | |
4114 | if (get_dirty_pages(inode) >= blk_per_seg) { |
4115 | ret = filemap_fdatawrite(inode->i_mapping); |
4116 | if (ret < 0) |
4117 | break; |
4118 | } |
4119 | |
4120 | count -= cluster_size; |
4121 | page_idx += cluster_size; |
4122 | |
4123 | cond_resched(); |
4124 | if (fatal_signal_pending(current)) { |
4125 | ret = -EINTR; |
4126 | break; |
4127 | } |
4128 | } |
4129 | |
4130 | if (!ret) |
4131 | ret = filemap_write_and_wait_range(mapping: inode->i_mapping, lstart: 0, |
4132 | LLONG_MAX); |
4133 | |
4134 | if (ret) |
4135 | f2fs_warn(sbi, "%s: The file might be partially decompressed (errno=%d). Please delete the file." , |
4136 | __func__, ret); |
4137 | out: |
4138 | inode_unlock(inode); |
4139 | file_end_write(file: filp); |
4140 | |
4141 | return ret; |
4142 | } |
4143 | |
4144 | static int f2fs_ioc_compress_file(struct file *filp) |
4145 | { |
4146 | struct inode *inode = file_inode(f: filp); |
4147 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
4148 | pgoff_t page_idx = 0, last_idx; |
4149 | unsigned int blk_per_seg = sbi->blocks_per_seg; |
4150 | int cluster_size = F2FS_I(inode)->i_cluster_size; |
4151 | int count, ret; |
4152 | |
4153 | if (!f2fs_sb_has_compression(sbi) || |
4154 | F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER) |
4155 | return -EOPNOTSUPP; |
4156 | |
4157 | if (!(filp->f_mode & FMODE_WRITE)) |
4158 | return -EBADF; |
4159 | |
4160 | if (!f2fs_compressed_file(inode)) |
4161 | return -EINVAL; |
4162 | |
4163 | f2fs_balance_fs(sbi, need: true); |
4164 | |
4165 | file_start_write(file: filp); |
4166 | inode_lock(inode); |
4167 | |
4168 | if (!f2fs_is_compress_backend_ready(inode)) { |
4169 | ret = -EOPNOTSUPP; |
4170 | goto out; |
4171 | } |
4172 | |
4173 | if (is_inode_flag_set(inode, flag: FI_COMPRESS_RELEASED)) { |
4174 | ret = -EINVAL; |
4175 | goto out; |
4176 | } |
4177 | |
4178 | ret = filemap_write_and_wait_range(mapping: inode->i_mapping, lstart: 0, LLONG_MAX); |
4179 | if (ret) |
4180 | goto out; |
4181 | |
4182 | set_inode_flag(inode, flag: FI_ENABLE_COMPRESS); |
4183 | |
4184 | last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); |
4185 | |
4186 | count = last_idx - page_idx; |
4187 | while (count && count >= cluster_size) { |
4188 | ret = redirty_blocks(inode, page_idx, len: cluster_size); |
4189 | if (ret < 0) |
4190 | break; |
4191 | |
4192 | if (get_dirty_pages(inode) >= blk_per_seg) { |
4193 | ret = filemap_fdatawrite(inode->i_mapping); |
4194 | if (ret < 0) |
4195 | break; |
4196 | } |
4197 | |
4198 | count -= cluster_size; |
4199 | page_idx += cluster_size; |
4200 | |
4201 | cond_resched(); |
4202 | if (fatal_signal_pending(current)) { |
4203 | ret = -EINTR; |
4204 | break; |
4205 | } |
4206 | } |
4207 | |
4208 | if (!ret) |
4209 | ret = filemap_write_and_wait_range(mapping: inode->i_mapping, lstart: 0, |
4210 | LLONG_MAX); |
4211 | |
4212 | clear_inode_flag(inode, flag: FI_ENABLE_COMPRESS); |
4213 | |
4214 | if (ret) |
4215 | f2fs_warn(sbi, "%s: The file might be partially compressed (errno=%d). Please delete the file." , |
4216 | __func__, ret); |
4217 | out: |
4218 | inode_unlock(inode); |
4219 | file_end_write(file: filp); |
4220 | |
4221 | return ret; |
4222 | } |
4223 | |
4224 | static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) |
4225 | { |
4226 | switch (cmd) { |
4227 | case FS_IOC_GETVERSION: |
4228 | return f2fs_ioc_getversion(filp, arg); |
4229 | case F2FS_IOC_START_ATOMIC_WRITE: |
4230 | return f2fs_ioc_start_atomic_write(filp, truncate: false); |
4231 | case F2FS_IOC_START_ATOMIC_REPLACE: |
4232 | return f2fs_ioc_start_atomic_write(filp, truncate: true); |
4233 | case F2FS_IOC_COMMIT_ATOMIC_WRITE: |
4234 | return f2fs_ioc_commit_atomic_write(filp); |
4235 | case F2FS_IOC_ABORT_ATOMIC_WRITE: |
4236 | return f2fs_ioc_abort_atomic_write(filp); |
4237 | case F2FS_IOC_START_VOLATILE_WRITE: |
4238 | case F2FS_IOC_RELEASE_VOLATILE_WRITE: |
4239 | return -EOPNOTSUPP; |
4240 | case F2FS_IOC_SHUTDOWN: |
4241 | return f2fs_ioc_shutdown(filp, arg); |
4242 | case FITRIM: |
4243 | return f2fs_ioc_fitrim(filp, arg); |
4244 | case FS_IOC_SET_ENCRYPTION_POLICY: |
4245 | return f2fs_ioc_set_encryption_policy(filp, arg); |
4246 | case FS_IOC_GET_ENCRYPTION_POLICY: |
4247 | return f2fs_ioc_get_encryption_policy(filp, arg); |
4248 | case FS_IOC_GET_ENCRYPTION_PWSALT: |
4249 | return f2fs_ioc_get_encryption_pwsalt(filp, arg); |
4250 | case FS_IOC_GET_ENCRYPTION_POLICY_EX: |
4251 | return f2fs_ioc_get_encryption_policy_ex(filp, arg); |
4252 | case FS_IOC_ADD_ENCRYPTION_KEY: |
4253 | return f2fs_ioc_add_encryption_key(filp, arg); |
4254 | case FS_IOC_REMOVE_ENCRYPTION_KEY: |
4255 | return f2fs_ioc_remove_encryption_key(filp, arg); |
4256 | case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS: |
4257 | return f2fs_ioc_remove_encryption_key_all_users(filp, arg); |
4258 | case FS_IOC_GET_ENCRYPTION_KEY_STATUS: |
4259 | return f2fs_ioc_get_encryption_key_status(filp, arg); |
4260 | case FS_IOC_GET_ENCRYPTION_NONCE: |
4261 | return f2fs_ioc_get_encryption_nonce(filp, arg); |
4262 | case F2FS_IOC_GARBAGE_COLLECT: |
4263 | return f2fs_ioc_gc(filp, arg); |
4264 | case F2FS_IOC_GARBAGE_COLLECT_RANGE: |
4265 | return f2fs_ioc_gc_range(filp, arg); |
4266 | case F2FS_IOC_WRITE_CHECKPOINT: |
4267 | return f2fs_ioc_write_checkpoint(filp); |
4268 | case F2FS_IOC_DEFRAGMENT: |
4269 | return f2fs_ioc_defragment(filp, arg); |
4270 | case F2FS_IOC_MOVE_RANGE: |
4271 | return f2fs_ioc_move_range(filp, arg); |
4272 | case F2FS_IOC_FLUSH_DEVICE: |
4273 | return f2fs_ioc_flush_device(filp, arg); |
4274 | case F2FS_IOC_GET_FEATURES: |
4275 | return f2fs_ioc_get_features(filp, arg); |
4276 | case F2FS_IOC_GET_PIN_FILE: |
4277 | return f2fs_ioc_get_pin_file(filp, arg); |
4278 | case F2FS_IOC_SET_PIN_FILE: |
4279 | return f2fs_ioc_set_pin_file(filp, arg); |
4280 | case F2FS_IOC_PRECACHE_EXTENTS: |
4281 | return f2fs_ioc_precache_extents(filp); |
4282 | case F2FS_IOC_RESIZE_FS: |
4283 | return f2fs_ioc_resize_fs(filp, arg); |
4284 | case FS_IOC_ENABLE_VERITY: |
4285 | return f2fs_ioc_enable_verity(filp, arg); |
4286 | case FS_IOC_MEASURE_VERITY: |
4287 | return f2fs_ioc_measure_verity(filp, arg); |
4288 | case FS_IOC_READ_VERITY_METADATA: |
4289 | return f2fs_ioc_read_verity_metadata(filp, arg); |
4290 | case FS_IOC_GETFSLABEL: |
4291 | return f2fs_ioc_getfslabel(filp, arg); |
4292 | case FS_IOC_SETFSLABEL: |
4293 | return f2fs_ioc_setfslabel(filp, arg); |
4294 | case F2FS_IOC_GET_COMPRESS_BLOCKS: |
4295 | return f2fs_ioc_get_compress_blocks(filp, arg); |
4296 | case F2FS_IOC_RELEASE_COMPRESS_BLOCKS: |
4297 | return f2fs_release_compress_blocks(filp, arg); |
4298 | case F2FS_IOC_RESERVE_COMPRESS_BLOCKS: |
4299 | return f2fs_reserve_compress_blocks(filp, arg); |
4300 | case F2FS_IOC_SEC_TRIM_FILE: |
4301 | return f2fs_sec_trim_file(filp, arg); |
4302 | case F2FS_IOC_GET_COMPRESS_OPTION: |
4303 | return f2fs_ioc_get_compress_option(filp, arg); |
4304 | case F2FS_IOC_SET_COMPRESS_OPTION: |
4305 | return f2fs_ioc_set_compress_option(filp, arg); |
4306 | case F2FS_IOC_DECOMPRESS_FILE: |
4307 | return f2fs_ioc_decompress_file(filp); |
4308 | case F2FS_IOC_COMPRESS_FILE: |
4309 | return f2fs_ioc_compress_file(filp); |
4310 | default: |
4311 | return -ENOTTY; |
4312 | } |
4313 | } |
4314 | |
4315 | long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) |
4316 | { |
4317 | if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp))))) |
4318 | return -EIO; |
4319 | if (!f2fs_is_checkpoint_ready(sbi: F2FS_I_SB(inode: file_inode(f: filp)))) |
4320 | return -ENOSPC; |
4321 | |
4322 | return __f2fs_ioctl(filp, cmd, arg); |
4323 | } |
4324 | |
4325 | /* |
4326 | * Return %true if the given read or write request should use direct I/O, or |
4327 | * %false if it should use buffered I/O. |
4328 | */ |
4329 | static bool f2fs_should_use_dio(struct inode *inode, struct kiocb *iocb, |
4330 | struct iov_iter *iter) |
4331 | { |
4332 | unsigned int align; |
4333 | |
4334 | if (!(iocb->ki_flags & IOCB_DIRECT)) |
4335 | return false; |
4336 | |
4337 | if (f2fs_force_buffered_io(inode, rw: iov_iter_rw(i: iter))) |
4338 | return false; |
4339 | |
4340 | /* |
4341 | * Direct I/O not aligned to the disk's logical_block_size will be |
4342 | * attempted, but will fail with -EINVAL. |
4343 | * |
4344 | * f2fs additionally requires that direct I/O be aligned to the |
4345 | * filesystem block size, which is often a stricter requirement. |
4346 | * However, f2fs traditionally falls back to buffered I/O on requests |
4347 | * that are logical_block_size-aligned but not fs-block aligned. |
4348 | * |
4349 | * The below logic implements this behavior. |
4350 | */ |
4351 | align = iocb->ki_pos | iov_iter_alignment(i: iter); |
4352 | if (!IS_ALIGNED(align, i_blocksize(inode)) && |
4353 | IS_ALIGNED(align, bdev_logical_block_size(inode->i_sb->s_bdev))) |
4354 | return false; |
4355 | |
4356 | return true; |
4357 | } |
4358 | |
4359 | static int f2fs_dio_read_end_io(struct kiocb *iocb, ssize_t size, int error, |
4360 | unsigned int flags) |
4361 | { |
4362 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode: file_inode(f: iocb->ki_filp)); |
4363 | |
4364 | dec_page_count(sbi, count_type: F2FS_DIO_READ); |
4365 | if (error) |
4366 | return error; |
4367 | f2fs_update_iostat(sbi, NULL, type: APP_DIRECT_READ_IO, io_bytes: size); |
4368 | return 0; |
4369 | } |
4370 | |
4371 | static const struct iomap_dio_ops f2fs_iomap_dio_read_ops = { |
4372 | .end_io = f2fs_dio_read_end_io, |
4373 | }; |
4374 | |
4375 | static ssize_t f2fs_dio_read_iter(struct kiocb *iocb, struct iov_iter *to) |
4376 | { |
4377 | struct file *file = iocb->ki_filp; |
4378 | struct inode *inode = file_inode(f: file); |
4379 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
4380 | struct f2fs_inode_info *fi = F2FS_I(inode); |
4381 | const loff_t pos = iocb->ki_pos; |
4382 | const size_t count = iov_iter_count(i: to); |
4383 | struct iomap_dio *dio; |
4384 | ssize_t ret; |
4385 | |
4386 | if (count == 0) |
4387 | return 0; /* skip atime update */ |
4388 | |
4389 | trace_f2fs_direct_IO_enter(inode, iocb, len: count, READ); |
4390 | |
4391 | if (iocb->ki_flags & IOCB_NOWAIT) { |
4392 | if (!f2fs_down_read_trylock(sem: &fi->i_gc_rwsem[READ])) { |
4393 | ret = -EAGAIN; |
4394 | goto out; |
4395 | } |
4396 | } else { |
4397 | f2fs_down_read(sem: &fi->i_gc_rwsem[READ]); |
4398 | } |
4399 | |
4400 | /* |
4401 | * We have to use __iomap_dio_rw() and iomap_dio_complete() instead of |
4402 | * the higher-level function iomap_dio_rw() in order to ensure that the |
4403 | * F2FS_DIO_READ counter will be decremented correctly in all cases. |
4404 | */ |
4405 | inc_page_count(sbi, count_type: F2FS_DIO_READ); |
4406 | dio = __iomap_dio_rw(iocb, iter: to, ops: &f2fs_iomap_ops, |
4407 | dops: &f2fs_iomap_dio_read_ops, dio_flags: 0, NULL, done_before: 0); |
4408 | if (IS_ERR_OR_NULL(ptr: dio)) { |
4409 | ret = PTR_ERR_OR_ZERO(ptr: dio); |
4410 | if (ret != -EIOCBQUEUED) |
4411 | dec_page_count(sbi, count_type: F2FS_DIO_READ); |
4412 | } else { |
4413 | ret = iomap_dio_complete(dio); |
4414 | } |
4415 | |
4416 | f2fs_up_read(sem: &fi->i_gc_rwsem[READ]); |
4417 | |
4418 | file_accessed(file); |
4419 | out: |
4420 | trace_f2fs_direct_IO_exit(inode, offset: pos, len: count, READ, ret); |
4421 | return ret; |
4422 | } |
4423 | |
4424 | static void f2fs_trace_rw_file_path(struct file *file, loff_t pos, size_t count, |
4425 | int rw) |
4426 | { |
4427 | struct inode *inode = file_inode(f: file); |
4428 | char *buf, *path; |
4429 | |
4430 | buf = f2fs_getname(sbi: F2FS_I_SB(inode)); |
4431 | if (!buf) |
4432 | return; |
4433 | path = dentry_path_raw(file_dentry(file), buf, PATH_MAX); |
4434 | if (IS_ERR(ptr: path)) |
4435 | goto free_buf; |
4436 | if (rw == WRITE) |
4437 | trace_f2fs_datawrite_start(inode, offset: pos, bytes: count, |
4438 | current->pid, pathname: path, current->comm); |
4439 | else |
4440 | trace_f2fs_dataread_start(inode, offset: pos, bytes: count, |
4441 | current->pid, pathname: path, current->comm); |
4442 | free_buf: |
4443 | f2fs_putname(buf); |
4444 | } |
4445 | |
4446 | static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) |
4447 | { |
4448 | struct inode *inode = file_inode(f: iocb->ki_filp); |
4449 | const loff_t pos = iocb->ki_pos; |
4450 | ssize_t ret; |
4451 | |
4452 | if (!f2fs_is_compress_backend_ready(inode)) |
4453 | return -EOPNOTSUPP; |
4454 | |
4455 | if (trace_f2fs_dataread_start_enabled()) |
4456 | f2fs_trace_rw_file_path(file: iocb->ki_filp, pos: iocb->ki_pos, |
4457 | count: iov_iter_count(i: to), READ); |
4458 | |
4459 | if (f2fs_should_use_dio(inode, iocb, iter: to)) { |
4460 | ret = f2fs_dio_read_iter(iocb, to); |
4461 | } else { |
4462 | ret = filemap_read(iocb, to, already_read: 0); |
4463 | if (ret > 0) |
4464 | f2fs_update_iostat(sbi: F2FS_I_SB(inode), inode, |
4465 | type: APP_BUFFERED_READ_IO, io_bytes: ret); |
4466 | } |
4467 | if (trace_f2fs_dataread_end_enabled()) |
4468 | trace_f2fs_dataread_end(inode, offset: pos, bytes: ret); |
4469 | return ret; |
4470 | } |
4471 | |
4472 | static ssize_t f2fs_file_splice_read(struct file *in, loff_t *ppos, |
4473 | struct pipe_inode_info *pipe, |
4474 | size_t len, unsigned int flags) |
4475 | { |
4476 | struct inode *inode = file_inode(f: in); |
4477 | const loff_t pos = *ppos; |
4478 | ssize_t ret; |
4479 | |
4480 | if (!f2fs_is_compress_backend_ready(inode)) |
4481 | return -EOPNOTSUPP; |
4482 | |
4483 | if (trace_f2fs_dataread_start_enabled()) |
4484 | f2fs_trace_rw_file_path(file: in, pos, count: len, READ); |
4485 | |
4486 | ret = filemap_splice_read(in, ppos, pipe, len, flags); |
4487 | if (ret > 0) |
4488 | f2fs_update_iostat(sbi: F2FS_I_SB(inode), inode, |
4489 | type: APP_BUFFERED_READ_IO, io_bytes: ret); |
4490 | |
4491 | if (trace_f2fs_dataread_end_enabled()) |
4492 | trace_f2fs_dataread_end(inode, offset: pos, bytes: ret); |
4493 | return ret; |
4494 | } |
4495 | |
4496 | static ssize_t f2fs_write_checks(struct kiocb *iocb, struct iov_iter *from) |
4497 | { |
4498 | struct file *file = iocb->ki_filp; |
4499 | struct inode *inode = file_inode(f: file); |
4500 | ssize_t count; |
4501 | int err; |
4502 | |
4503 | if (IS_IMMUTABLE(inode)) |
4504 | return -EPERM; |
4505 | |
4506 | if (is_inode_flag_set(inode, flag: FI_COMPRESS_RELEASED)) |
4507 | return -EPERM; |
4508 | |
4509 | count = generic_write_checks(iocb, from); |
4510 | if (count <= 0) |
4511 | return count; |
4512 | |
4513 | err = file_modified(file); |
4514 | if (err) |
4515 | return err; |
4516 | return count; |
4517 | } |
4518 | |
4519 | /* |
4520 | * Preallocate blocks for a write request, if it is possible and helpful to do |
4521 | * so. Returns a positive number if blocks may have been preallocated, 0 if no |
4522 | * blocks were preallocated, or a negative errno value if something went |
4523 | * seriously wrong. Also sets FI_PREALLOCATED_ALL on the inode if *all* the |
4524 | * requested blocks (not just some of them) have been allocated. |
4525 | */ |
4526 | static int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *iter, |
4527 | bool dio) |
4528 | { |
4529 | struct inode *inode = file_inode(f: iocb->ki_filp); |
4530 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
4531 | const loff_t pos = iocb->ki_pos; |
4532 | const size_t count = iov_iter_count(i: iter); |
4533 | struct f2fs_map_blocks map = {}; |
4534 | int flag; |
4535 | int ret; |
4536 | |
4537 | /* If it will be an out-of-place direct write, don't bother. */ |
4538 | if (dio && f2fs_lfs_mode(sbi)) |
4539 | return 0; |
4540 | /* |
4541 | * Don't preallocate holes aligned to DIO_SKIP_HOLES which turns into |
4542 | * buffered IO, if DIO meets any holes. |
4543 | */ |
4544 | if (dio && i_size_read(inode) && |
4545 | (F2FS_BYTES_TO_BLK(pos) < F2FS_BLK_ALIGN(i_size_read(inode)))) |
4546 | return 0; |
4547 | |
4548 | /* No-wait I/O can't allocate blocks. */ |
4549 | if (iocb->ki_flags & IOCB_NOWAIT) |
4550 | return 0; |
4551 | |
4552 | /* If it will be a short write, don't bother. */ |
4553 | if (fault_in_iov_iter_readable(i: iter, bytes: count)) |
4554 | return 0; |
4555 | |
4556 | if (f2fs_has_inline_data(inode)) { |
4557 | /* If the data will fit inline, don't bother. */ |
4558 | if (pos + count <= MAX_INLINE_DATA(inode)) |
4559 | return 0; |
4560 | ret = f2fs_convert_inline_inode(inode); |
4561 | if (ret) |
4562 | return ret; |
4563 | } |
4564 | |
4565 | /* Do not preallocate blocks that will be written partially in 4KB. */ |
4566 | map.m_lblk = F2FS_BLK_ALIGN(pos); |
4567 | map.m_len = F2FS_BYTES_TO_BLK(pos + count); |
4568 | if (map.m_len > map.m_lblk) |
4569 | map.m_len -= map.m_lblk; |
4570 | else |
4571 | map.m_len = 0; |
4572 | map.m_may_create = true; |
4573 | if (dio) { |
4574 | map.m_seg_type = f2fs_rw_hint_to_seg_type(hint: inode->i_write_hint); |
4575 | flag = F2FS_GET_BLOCK_PRE_DIO; |
4576 | } else { |
4577 | map.m_seg_type = NO_CHECK_TYPE; |
4578 | flag = F2FS_GET_BLOCK_PRE_AIO; |
4579 | } |
4580 | |
4581 | ret = f2fs_map_blocks(inode, map: &map, flag); |
4582 | /* -ENOSPC|-EDQUOT are fine to report the number of allocated blocks. */ |
4583 | if (ret < 0 && !((ret == -ENOSPC || ret == -EDQUOT) && map.m_len > 0)) |
4584 | return ret; |
4585 | if (ret == 0) |
4586 | set_inode_flag(inode, flag: FI_PREALLOCATED_ALL); |
4587 | return map.m_len; |
4588 | } |
4589 | |
4590 | static ssize_t f2fs_buffered_write_iter(struct kiocb *iocb, |
4591 | struct iov_iter *from) |
4592 | { |
4593 | struct file *file = iocb->ki_filp; |
4594 | struct inode *inode = file_inode(f: file); |
4595 | ssize_t ret; |
4596 | |
4597 | if (iocb->ki_flags & IOCB_NOWAIT) |
4598 | return -EOPNOTSUPP; |
4599 | |
4600 | ret = generic_perform_write(iocb, from); |
4601 | |
4602 | if (ret > 0) { |
4603 | f2fs_update_iostat(sbi: F2FS_I_SB(inode), inode, |
4604 | type: APP_BUFFERED_IO, io_bytes: ret); |
4605 | } |
4606 | return ret; |
4607 | } |
4608 | |
4609 | static int f2fs_dio_write_end_io(struct kiocb *iocb, ssize_t size, int error, |
4610 | unsigned int flags) |
4611 | { |
4612 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode: file_inode(f: iocb->ki_filp)); |
4613 | |
4614 | dec_page_count(sbi, count_type: F2FS_DIO_WRITE); |
4615 | if (error) |
4616 | return error; |
4617 | f2fs_update_time(sbi, type: REQ_TIME); |
4618 | f2fs_update_iostat(sbi, NULL, type: APP_DIRECT_IO, io_bytes: size); |
4619 | return 0; |
4620 | } |
4621 | |
4622 | static const struct iomap_dio_ops f2fs_iomap_dio_write_ops = { |
4623 | .end_io = f2fs_dio_write_end_io, |
4624 | }; |
4625 | |
4626 | static void f2fs_flush_buffered_write(struct address_space *mapping, |
4627 | loff_t start_pos, loff_t end_pos) |
4628 | { |
4629 | int ret; |
4630 | |
4631 | ret = filemap_write_and_wait_range(mapping, lstart: start_pos, lend: end_pos); |
4632 | if (ret < 0) |
4633 | return; |
4634 | invalidate_mapping_pages(mapping, |
4635 | start: start_pos >> PAGE_SHIFT, |
4636 | end: end_pos >> PAGE_SHIFT); |
4637 | } |
4638 | |
4639 | static ssize_t f2fs_dio_write_iter(struct kiocb *iocb, struct iov_iter *from, |
4640 | bool *may_need_sync) |
4641 | { |
4642 | struct file *file = iocb->ki_filp; |
4643 | struct inode *inode = file_inode(f: file); |
4644 | struct f2fs_inode_info *fi = F2FS_I(inode); |
4645 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
4646 | const bool do_opu = f2fs_lfs_mode(sbi); |
4647 | const loff_t pos = iocb->ki_pos; |
4648 | const ssize_t count = iov_iter_count(i: from); |
4649 | unsigned int dio_flags; |
4650 | struct iomap_dio *dio; |
4651 | ssize_t ret; |
4652 | |
4653 | trace_f2fs_direct_IO_enter(inode, iocb, len: count, WRITE); |
4654 | |
4655 | if (iocb->ki_flags & IOCB_NOWAIT) { |
4656 | /* f2fs_convert_inline_inode() and block allocation can block */ |
4657 | if (f2fs_has_inline_data(inode) || |
4658 | !f2fs_overwrite_io(inode, pos, len: count)) { |
4659 | ret = -EAGAIN; |
4660 | goto out; |
4661 | } |
4662 | |
4663 | if (!f2fs_down_read_trylock(sem: &fi->i_gc_rwsem[WRITE])) { |
4664 | ret = -EAGAIN; |
4665 | goto out; |
4666 | } |
4667 | if (do_opu && !f2fs_down_read_trylock(sem: &fi->i_gc_rwsem[READ])) { |
4668 | f2fs_up_read(sem: &fi->i_gc_rwsem[WRITE]); |
4669 | ret = -EAGAIN; |
4670 | goto out; |
4671 | } |
4672 | } else { |
4673 | ret = f2fs_convert_inline_inode(inode); |
4674 | if (ret) |
4675 | goto out; |
4676 | |
4677 | f2fs_down_read(sem: &fi->i_gc_rwsem[WRITE]); |
4678 | if (do_opu) |
4679 | f2fs_down_read(sem: &fi->i_gc_rwsem[READ]); |
4680 | } |
4681 | |
4682 | /* |
4683 | * We have to use __iomap_dio_rw() and iomap_dio_complete() instead of |
4684 | * the higher-level function iomap_dio_rw() in order to ensure that the |
4685 | * F2FS_DIO_WRITE counter will be decremented correctly in all cases. |
4686 | */ |
4687 | inc_page_count(sbi, count_type: F2FS_DIO_WRITE); |
4688 | dio_flags = 0; |
4689 | if (pos + count > inode->i_size) |
4690 | dio_flags |= IOMAP_DIO_FORCE_WAIT; |
4691 | dio = __iomap_dio_rw(iocb, iter: from, ops: &f2fs_iomap_ops, |
4692 | dops: &f2fs_iomap_dio_write_ops, dio_flags, NULL, done_before: 0); |
4693 | if (IS_ERR_OR_NULL(ptr: dio)) { |
4694 | ret = PTR_ERR_OR_ZERO(ptr: dio); |
4695 | if (ret == -ENOTBLK) |
4696 | ret = 0; |
4697 | if (ret != -EIOCBQUEUED) |
4698 | dec_page_count(sbi, count_type: F2FS_DIO_WRITE); |
4699 | } else { |
4700 | ret = iomap_dio_complete(dio); |
4701 | } |
4702 | |
4703 | if (do_opu) |
4704 | f2fs_up_read(sem: &fi->i_gc_rwsem[READ]); |
4705 | f2fs_up_read(sem: &fi->i_gc_rwsem[WRITE]); |
4706 | |
4707 | if (ret < 0) |
4708 | goto out; |
4709 | if (pos + ret > inode->i_size) |
4710 | f2fs_i_size_write(inode, i_size: pos + ret); |
4711 | if (!do_opu) |
4712 | set_inode_flag(inode, flag: FI_UPDATE_WRITE); |
4713 | |
4714 | if (iov_iter_count(i: from)) { |
4715 | ssize_t ret2; |
4716 | loff_t bufio_start_pos = iocb->ki_pos; |
4717 | |
4718 | /* |
4719 | * The direct write was partial, so we need to fall back to a |
4720 | * buffered write for the remainder. |
4721 | */ |
4722 | |
4723 | ret2 = f2fs_buffered_write_iter(iocb, from); |
4724 | if (iov_iter_count(i: from)) |
4725 | f2fs_write_failed(inode, to: iocb->ki_pos); |
4726 | if (ret2 < 0) |
4727 | goto out; |
4728 | |
4729 | /* |
4730 | * Ensure that the pagecache pages are written to disk and |
4731 | * invalidated to preserve the expected O_DIRECT semantics. |
4732 | */ |
4733 | if (ret2 > 0) { |
4734 | loff_t bufio_end_pos = bufio_start_pos + ret2 - 1; |
4735 | |
4736 | ret += ret2; |
4737 | |
4738 | f2fs_flush_buffered_write(mapping: file->f_mapping, |
4739 | start_pos: bufio_start_pos, |
4740 | end_pos: bufio_end_pos); |
4741 | } |
4742 | } else { |
4743 | /* iomap_dio_rw() already handled the generic_write_sync(). */ |
4744 | *may_need_sync = false; |
4745 | } |
4746 | out: |
4747 | trace_f2fs_direct_IO_exit(inode, offset: pos, len: count, WRITE, ret); |
4748 | return ret; |
4749 | } |
4750 | |
4751 | static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) |
4752 | { |
4753 | struct inode *inode = file_inode(f: iocb->ki_filp); |
4754 | const loff_t orig_pos = iocb->ki_pos; |
4755 | const size_t orig_count = iov_iter_count(i: from); |
4756 | loff_t target_size; |
4757 | bool dio; |
4758 | bool may_need_sync = true; |
4759 | int preallocated; |
4760 | ssize_t ret; |
4761 | |
4762 | if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) { |
4763 | ret = -EIO; |
4764 | goto out; |
4765 | } |
4766 | |
4767 | if (!f2fs_is_compress_backend_ready(inode)) { |
4768 | ret = -EOPNOTSUPP; |
4769 | goto out; |
4770 | } |
4771 | |
4772 | if (iocb->ki_flags & IOCB_NOWAIT) { |
4773 | if (!inode_trylock(inode)) { |
4774 | ret = -EAGAIN; |
4775 | goto out; |
4776 | } |
4777 | } else { |
4778 | inode_lock(inode); |
4779 | } |
4780 | |
4781 | ret = f2fs_write_checks(iocb, from); |
4782 | if (ret <= 0) |
4783 | goto out_unlock; |
4784 | |
4785 | /* Determine whether we will do a direct write or a buffered write. */ |
4786 | dio = f2fs_should_use_dio(inode, iocb, iter: from); |
4787 | |
4788 | /* Possibly preallocate the blocks for the write. */ |
4789 | target_size = iocb->ki_pos + iov_iter_count(i: from); |
4790 | preallocated = f2fs_preallocate_blocks(iocb, iter: from, dio); |
4791 | if (preallocated < 0) { |
4792 | ret = preallocated; |
4793 | } else { |
4794 | if (trace_f2fs_datawrite_start_enabled()) |
4795 | f2fs_trace_rw_file_path(file: iocb->ki_filp, pos: iocb->ki_pos, |
4796 | count: orig_count, WRITE); |
4797 | |
4798 | /* Do the actual write. */ |
4799 | ret = dio ? |
4800 | f2fs_dio_write_iter(iocb, from, may_need_sync: &may_need_sync) : |
4801 | f2fs_buffered_write_iter(iocb, from); |
4802 | |
4803 | if (trace_f2fs_datawrite_end_enabled()) |
4804 | trace_f2fs_datawrite_end(inode, offset: orig_pos, bytes: ret); |
4805 | } |
4806 | |
4807 | /* Don't leave any preallocated blocks around past i_size. */ |
4808 | if (preallocated && i_size_read(inode) < target_size) { |
4809 | f2fs_down_write(sem: &F2FS_I(inode)->i_gc_rwsem[WRITE]); |
4810 | filemap_invalidate_lock(mapping: inode->i_mapping); |
4811 | if (!f2fs_truncate(inode)) |
4812 | file_dont_truncate(inode); |
4813 | filemap_invalidate_unlock(mapping: inode->i_mapping); |
4814 | f2fs_up_write(sem: &F2FS_I(inode)->i_gc_rwsem[WRITE]); |
4815 | } else { |
4816 | file_dont_truncate(inode); |
4817 | } |
4818 | |
4819 | clear_inode_flag(inode, flag: FI_PREALLOCATED_ALL); |
4820 | out_unlock: |
4821 | inode_unlock(inode); |
4822 | out: |
4823 | trace_f2fs_file_write_iter(inode, offset: orig_pos, length: orig_count, ret); |
4824 | |
4825 | if (ret > 0 && may_need_sync) |
4826 | ret = generic_write_sync(iocb, count: ret); |
4827 | |
4828 | /* If buffered IO was forced, flush and drop the data from |
4829 | * the page cache to preserve O_DIRECT semantics |
4830 | */ |
4831 | if (ret > 0 && !dio && (iocb->ki_flags & IOCB_DIRECT)) |
4832 | f2fs_flush_buffered_write(mapping: iocb->ki_filp->f_mapping, |
4833 | start_pos: orig_pos, |
4834 | end_pos: orig_pos + ret - 1); |
4835 | |
4836 | return ret; |
4837 | } |
4838 | |
4839 | static int f2fs_file_fadvise(struct file *filp, loff_t offset, loff_t len, |
4840 | int advice) |
4841 | { |
4842 | struct address_space *mapping; |
4843 | struct backing_dev_info *bdi; |
4844 | struct inode *inode = file_inode(f: filp); |
4845 | int err; |
4846 | |
4847 | if (advice == POSIX_FADV_SEQUENTIAL) { |
4848 | if (S_ISFIFO(inode->i_mode)) |
4849 | return -ESPIPE; |
4850 | |
4851 | mapping = filp->f_mapping; |
4852 | if (!mapping || len < 0) |
4853 | return -EINVAL; |
4854 | |
4855 | bdi = inode_to_bdi(inode: mapping->host); |
4856 | filp->f_ra.ra_pages = bdi->ra_pages * |
4857 | F2FS_I_SB(inode)->seq_file_ra_mul; |
4858 | spin_lock(lock: &filp->f_lock); |
4859 | filp->f_mode &= ~FMODE_RANDOM; |
4860 | spin_unlock(lock: &filp->f_lock); |
4861 | return 0; |
4862 | } else if (advice == POSIX_FADV_WILLNEED && offset == 0) { |
4863 | /* Load extent cache at the first readahead. */ |
4864 | f2fs_precache_extents(inode); |
4865 | } |
4866 | |
4867 | err = generic_fadvise(file: filp, offset, len, advice); |
4868 | if (!err && advice == POSIX_FADV_DONTNEED && |
4869 | test_opt(F2FS_I_SB(inode), COMPRESS_CACHE) && |
4870 | f2fs_compressed_file(inode)) |
4871 | f2fs_invalidate_compress_pages(sbi: F2FS_I_SB(inode), ino: inode->i_ino); |
4872 | |
4873 | return err; |
4874 | } |
4875 | |
4876 | #ifdef CONFIG_COMPAT |
4877 | struct compat_f2fs_gc_range { |
4878 | u32 sync; |
4879 | compat_u64 start; |
4880 | compat_u64 len; |
4881 | }; |
4882 | #define F2FS_IOC32_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11,\ |
4883 | struct compat_f2fs_gc_range) |
4884 | |
4885 | static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg) |
4886 | { |
4887 | struct compat_f2fs_gc_range __user *urange; |
4888 | struct f2fs_gc_range range; |
4889 | int err; |
4890 | |
4891 | urange = compat_ptr(uptr: arg); |
4892 | err = get_user(range.sync, &urange->sync); |
4893 | err |= get_user(range.start, &urange->start); |
4894 | err |= get_user(range.len, &urange->len); |
4895 | if (err) |
4896 | return -EFAULT; |
4897 | |
4898 | return __f2fs_ioc_gc_range(filp: file, range: &range); |
4899 | } |
4900 | |
4901 | struct compat_f2fs_move_range { |
4902 | u32 dst_fd; |
4903 | compat_u64 pos_in; |
4904 | compat_u64 pos_out; |
4905 | compat_u64 len; |
4906 | }; |
4907 | #define F2FS_IOC32_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \ |
4908 | struct compat_f2fs_move_range) |
4909 | |
4910 | static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg) |
4911 | { |
4912 | struct compat_f2fs_move_range __user *urange; |
4913 | struct f2fs_move_range range; |
4914 | int err; |
4915 | |
4916 | urange = compat_ptr(uptr: arg); |
4917 | err = get_user(range.dst_fd, &urange->dst_fd); |
4918 | err |= get_user(range.pos_in, &urange->pos_in); |
4919 | err |= get_user(range.pos_out, &urange->pos_out); |
4920 | err |= get_user(range.len, &urange->len); |
4921 | if (err) |
4922 | return -EFAULT; |
4923 | |
4924 | return __f2fs_ioc_move_range(filp: file, range: &range); |
4925 | } |
4926 | |
4927 | long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
4928 | { |
4929 | if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file))))) |
4930 | return -EIO; |
4931 | if (!f2fs_is_checkpoint_ready(sbi: F2FS_I_SB(inode: file_inode(f: file)))) |
4932 | return -ENOSPC; |
4933 | |
4934 | switch (cmd) { |
4935 | case FS_IOC32_GETVERSION: |
4936 | cmd = FS_IOC_GETVERSION; |
4937 | break; |
4938 | case F2FS_IOC32_GARBAGE_COLLECT_RANGE: |
4939 | return f2fs_compat_ioc_gc_range(file, arg); |
4940 | case F2FS_IOC32_MOVE_RANGE: |
4941 | return f2fs_compat_ioc_move_range(file, arg); |
4942 | case F2FS_IOC_START_ATOMIC_WRITE: |
4943 | case F2FS_IOC_START_ATOMIC_REPLACE: |
4944 | case F2FS_IOC_COMMIT_ATOMIC_WRITE: |
4945 | case F2FS_IOC_START_VOLATILE_WRITE: |
4946 | case F2FS_IOC_RELEASE_VOLATILE_WRITE: |
4947 | case F2FS_IOC_ABORT_ATOMIC_WRITE: |
4948 | case F2FS_IOC_SHUTDOWN: |
4949 | case FITRIM: |
4950 | case FS_IOC_SET_ENCRYPTION_POLICY: |
4951 | case FS_IOC_GET_ENCRYPTION_PWSALT: |
4952 | case FS_IOC_GET_ENCRYPTION_POLICY: |
4953 | case FS_IOC_GET_ENCRYPTION_POLICY_EX: |
4954 | case FS_IOC_ADD_ENCRYPTION_KEY: |
4955 | case FS_IOC_REMOVE_ENCRYPTION_KEY: |
4956 | case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS: |
4957 | case FS_IOC_GET_ENCRYPTION_KEY_STATUS: |
4958 | case FS_IOC_GET_ENCRYPTION_NONCE: |
4959 | case F2FS_IOC_GARBAGE_COLLECT: |
4960 | case F2FS_IOC_WRITE_CHECKPOINT: |
4961 | case F2FS_IOC_DEFRAGMENT: |
4962 | case F2FS_IOC_FLUSH_DEVICE: |
4963 | case F2FS_IOC_GET_FEATURES: |
4964 | case F2FS_IOC_GET_PIN_FILE: |
4965 | case F2FS_IOC_SET_PIN_FILE: |
4966 | case F2FS_IOC_PRECACHE_EXTENTS: |
4967 | case F2FS_IOC_RESIZE_FS: |
4968 | case FS_IOC_ENABLE_VERITY: |
4969 | case FS_IOC_MEASURE_VERITY: |
4970 | case FS_IOC_READ_VERITY_METADATA: |
4971 | case FS_IOC_GETFSLABEL: |
4972 | case FS_IOC_SETFSLABEL: |
4973 | case F2FS_IOC_GET_COMPRESS_BLOCKS: |
4974 | case F2FS_IOC_RELEASE_COMPRESS_BLOCKS: |
4975 | case F2FS_IOC_RESERVE_COMPRESS_BLOCKS: |
4976 | case F2FS_IOC_SEC_TRIM_FILE: |
4977 | case F2FS_IOC_GET_COMPRESS_OPTION: |
4978 | case F2FS_IOC_SET_COMPRESS_OPTION: |
4979 | case F2FS_IOC_DECOMPRESS_FILE: |
4980 | case F2FS_IOC_COMPRESS_FILE: |
4981 | break; |
4982 | default: |
4983 | return -ENOIOCTLCMD; |
4984 | } |
4985 | return __f2fs_ioctl(filp: file, cmd, arg: (unsigned long) compat_ptr(uptr: arg)); |
4986 | } |
4987 | #endif |
4988 | |
4989 | const struct file_operations f2fs_file_operations = { |
4990 | .llseek = f2fs_llseek, |
4991 | .read_iter = f2fs_file_read_iter, |
4992 | .write_iter = f2fs_file_write_iter, |
4993 | .iopoll = iocb_bio_iopoll, |
4994 | .open = f2fs_file_open, |
4995 | .release = f2fs_release_file, |
4996 | .mmap = f2fs_file_mmap, |
4997 | .flush = f2fs_file_flush, |
4998 | .fsync = f2fs_sync_file, |
4999 | .fallocate = f2fs_fallocate, |
5000 | .unlocked_ioctl = f2fs_ioctl, |
5001 | #ifdef CONFIG_COMPAT |
5002 | .compat_ioctl = f2fs_compat_ioctl, |
5003 | #endif |
5004 | .splice_read = f2fs_file_splice_read, |
5005 | .splice_write = iter_file_splice_write, |
5006 | .fadvise = f2fs_file_fadvise, |
5007 | }; |
5008 | |