1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/ufs/inode.c
4 *
5 * Copyright (C) 1998
6 * Daniel Pirkl <daniel.pirkl@email.cz>
7 * Charles University, Faculty of Mathematics and Physics
8 *
9 * from
10 *
11 * linux/fs/ext2/inode.c
12 *
13 * Copyright (C) 1992, 1993, 1994, 1995
14 * Remy Card (card@masi.ibp.fr)
15 * Laboratoire MASI - Institut Blaise Pascal
16 * Universite Pierre et Marie Curie (Paris VI)
17 *
18 * from
19 *
20 * linux/fs/minix/inode.c
21 *
22 * Copyright (C) 1991, 1992 Linus Torvalds
23 *
24 * Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
25 * Big-endian to little-endian byte-swapping/bitmaps by
26 * David S. Miller (davem@caip.rutgers.edu), 1995
27 */
28
29#include <linux/uaccess.h>
30
31#include <linux/errno.h>
32#include <linux/fs.h>
33#include <linux/time.h>
34#include <linux/stat.h>
35#include <linux/string.h>
36#include <linux/mm.h>
37#include <linux/buffer_head.h>
38#include <linux/writeback.h>
39#include <linux/iversion.h>
40
41#include "ufs_fs.h"
42#include "ufs.h"
43#include "swab.h"
44#include "util.h"
45
46static int ufs_block_to_path(struct inode *inode, sector_t i_block, unsigned offsets[4])
47{
48 struct ufs_sb_private_info *uspi = UFS_SB(sb: inode->i_sb)->s_uspi;
49 int ptrs = uspi->s_apb;
50 int ptrs_bits = uspi->s_apbshift;
51 const long direct_blocks = UFS_NDADDR,
52 indirect_blocks = ptrs,
53 double_blocks = (1 << (ptrs_bits * 2));
54 int n = 0;
55
56
57 UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks);
58 if (i_block < direct_blocks) {
59 offsets[n++] = i_block;
60 } else if ((i_block -= direct_blocks) < indirect_blocks) {
61 offsets[n++] = UFS_IND_BLOCK;
62 offsets[n++] = i_block;
63 } else if ((i_block -= indirect_blocks) < double_blocks) {
64 offsets[n++] = UFS_DIND_BLOCK;
65 offsets[n++] = i_block >> ptrs_bits;
66 offsets[n++] = i_block & (ptrs - 1);
67 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
68 offsets[n++] = UFS_TIND_BLOCK;
69 offsets[n++] = i_block >> (ptrs_bits * 2);
70 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
71 offsets[n++] = i_block & (ptrs - 1);
72 } else {
73 ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big");
74 }
75 return n;
76}
77
78typedef struct {
79 void *p;
80 union {
81 __fs32 key32;
82 __fs64 key64;
83 };
84 struct buffer_head *bh;
85} Indirect;
86
87static inline int grow_chain32(struct ufs_inode_info *ufsi,
88 struct buffer_head *bh, __fs32 *v,
89 Indirect *from, Indirect *to)
90{
91 Indirect *p;
92 unsigned seq;
93 to->bh = bh;
94 do {
95 seq = read_seqbegin(sl: &ufsi->meta_lock);
96 to->key32 = *(__fs32 *)(to->p = v);
97 for (p = from; p <= to && p->key32 == *(__fs32 *)p->p; p++)
98 ;
99 } while (read_seqretry(sl: &ufsi->meta_lock, start: seq));
100 return (p > to);
101}
102
103static inline int grow_chain64(struct ufs_inode_info *ufsi,
104 struct buffer_head *bh, __fs64 *v,
105 Indirect *from, Indirect *to)
106{
107 Indirect *p;
108 unsigned seq;
109 to->bh = bh;
110 do {
111 seq = read_seqbegin(sl: &ufsi->meta_lock);
112 to->key64 = *(__fs64 *)(to->p = v);
113 for (p = from; p <= to && p->key64 == *(__fs64 *)p->p; p++)
114 ;
115 } while (read_seqretry(sl: &ufsi->meta_lock, start: seq));
116 return (p > to);
117}
118
119/*
120 * Returns the location of the fragment from
121 * the beginning of the filesystem.
122 */
123
124static u64 ufs_frag_map(struct inode *inode, unsigned offsets[4], int depth)
125{
126 struct ufs_inode_info *ufsi = UFS_I(inode);
127 struct super_block *sb = inode->i_sb;
128 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
129 u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift;
130 int shift = uspi->s_apbshift-uspi->s_fpbshift;
131 Indirect chain[4], *q = chain;
132 unsigned *p;
133 unsigned flags = UFS_SB(sb)->s_flags;
134 u64 res = 0;
135
136 UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",
137 uspi->s_fpbshift, uspi->s_apbmask,
138 (unsigned long long)mask);
139
140 if (depth == 0)
141 goto no_block;
142
143again:
144 p = offsets;
145
146 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
147 goto ufs2;
148
149 if (!grow_chain32(ufsi, NULL, v: &ufsi->i_u1.i_data[*p++], from: chain, to: q))
150 goto changed;
151 if (!q->key32)
152 goto no_block;
153 while (--depth) {
154 __fs32 *ptr;
155 struct buffer_head *bh;
156 unsigned n = *p++;
157
158 bh = sb_bread(sb, block: uspi->s_sbbase +
159 fs32_to_cpu(sbp: sb, n: q->key32) + (n>>shift));
160 if (!bh)
161 goto no_block;
162 ptr = (__fs32 *)bh->b_data + (n & mask);
163 if (!grow_chain32(ufsi, bh, v: ptr, from: chain, to: ++q))
164 goto changed;
165 if (!q->key32)
166 goto no_block;
167 }
168 res = fs32_to_cpu(sbp: sb, n: q->key32);
169 goto found;
170
171ufs2:
172 if (!grow_chain64(ufsi, NULL, v: &ufsi->i_u1.u2_i_data[*p++], from: chain, to: q))
173 goto changed;
174 if (!q->key64)
175 goto no_block;
176
177 while (--depth) {
178 __fs64 *ptr;
179 struct buffer_head *bh;
180 unsigned n = *p++;
181
182 bh = sb_bread(sb, block: uspi->s_sbbase +
183 fs64_to_cpu(sbp: sb, n: q->key64) + (n>>shift));
184 if (!bh)
185 goto no_block;
186 ptr = (__fs64 *)bh->b_data + (n & mask);
187 if (!grow_chain64(ufsi, bh, v: ptr, from: chain, to: ++q))
188 goto changed;
189 if (!q->key64)
190 goto no_block;
191 }
192 res = fs64_to_cpu(sbp: sb, n: q->key64);
193found:
194 res += uspi->s_sbbase;
195no_block:
196 while (q > chain) {
197 brelse(bh: q->bh);
198 q--;
199 }
200 return res;
201
202changed:
203 while (q > chain) {
204 brelse(bh: q->bh);
205 q--;
206 }
207 goto again;
208}
209
210/*
211 * Unpacking tails: we have a file with partial final block and
212 * we had been asked to extend it. If the fragment being written
213 * is within the same block, we need to extend the tail just to cover
214 * that fragment. Otherwise the tail is extended to full block.
215 *
216 * Note that we might need to create a _new_ tail, but that will
217 * be handled elsewhere; this is strictly for resizing old
218 * ones.
219 */
220static bool
221ufs_extend_tail(struct inode *inode, u64 writes_to,
222 int *err, struct page *locked_page)
223{
224 struct ufs_inode_info *ufsi = UFS_I(inode);
225 struct super_block *sb = inode->i_sb;
226 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
227 unsigned lastfrag = ufsi->i_lastfrag; /* it's a short file, so unsigned is enough */
228 unsigned block = ufs_fragstoblks(lastfrag);
229 unsigned new_size;
230 void *p;
231 u64 tmp;
232
233 if (writes_to < (lastfrag | uspi->s_fpbmask))
234 new_size = (writes_to & uspi->s_fpbmask) + 1;
235 else
236 new_size = uspi->s_fpb;
237
238 p = ufs_get_direct_data_ptr(uspi, ufsi, blk: block);
239 tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p),
240 new_size - (lastfrag & uspi->s_fpbmask), err,
241 locked_page);
242 return tmp != 0;
243}
244
245/**
246 * ufs_inode_getfrag() - allocate new fragment(s)
247 * @inode: pointer to inode
248 * @index: number of block pointer within the inode's array.
249 * @new_fragment: number of new allocated fragment(s)
250 * @err: we set it if something wrong
251 * @new: we set it if we allocate new block
252 * @locked_page: for ufs_new_fragments()
253 */
254static u64
255ufs_inode_getfrag(struct inode *inode, unsigned index,
256 sector_t new_fragment, int *err,
257 int *new, struct page *locked_page)
258{
259 struct ufs_inode_info *ufsi = UFS_I(inode);
260 struct super_block *sb = inode->i_sb;
261 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
262 u64 tmp, goal, lastfrag;
263 unsigned nfrags = uspi->s_fpb;
264 void *p;
265
266 /* TODO : to be done for write support
267 if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
268 goto ufs2;
269 */
270
271 p = ufs_get_direct_data_ptr(uspi, ufsi, blk: index);
272 tmp = ufs_data_ptr_to_cpu(sb, p);
273 if (tmp)
274 goto out;
275
276 lastfrag = ufsi->i_lastfrag;
277
278 /* will that be a new tail? */
279 if (new_fragment < UFS_NDIR_FRAGMENT && new_fragment >= lastfrag)
280 nfrags = (new_fragment & uspi->s_fpbmask) + 1;
281
282 goal = 0;
283 if (index) {
284 goal = ufs_data_ptr_to_cpu(sb,
285 p: ufs_get_direct_data_ptr(uspi, ufsi, blk: index - 1));
286 if (goal)
287 goal += uspi->s_fpb;
288 }
289 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment),
290 goal, nfrags, err, locked_page);
291
292 if (!tmp) {
293 *err = -ENOSPC;
294 return 0;
295 }
296
297 if (new)
298 *new = 1;
299 inode_set_ctime_current(inode);
300 if (IS_SYNC(inode))
301 ufs_sync_inode (inode);
302 mark_inode_dirty(inode);
303out:
304 return tmp + uspi->s_sbbase;
305
306 /* This part : To be implemented ....
307 Required only for writing, not required for READ-ONLY.
308ufs2:
309
310 u2_block = ufs_fragstoblks(fragment);
311 u2_blockoff = ufs_fragnum(fragment);
312 p = ufsi->i_u1.u2_i_data + block;
313 goal = 0;
314
315repeat2:
316 tmp = fs32_to_cpu(sb, *p);
317 lastfrag = ufsi->i_lastfrag;
318
319 */
320}
321
322/**
323 * ufs_inode_getblock() - allocate new block
324 * @inode: pointer to inode
325 * @ind_block: block number of the indirect block
326 * @index: number of pointer within the indirect block
327 * @new_fragment: number of new allocated fragment
328 * (block will hold this fragment and also uspi->s_fpb-1)
329 * @err: see ufs_inode_getfrag()
330 * @new: see ufs_inode_getfrag()
331 * @locked_page: see ufs_inode_getfrag()
332 */
333static u64
334ufs_inode_getblock(struct inode *inode, u64 ind_block,
335 unsigned index, sector_t new_fragment, int *err,
336 int *new, struct page *locked_page)
337{
338 struct super_block *sb = inode->i_sb;
339 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
340 int shift = uspi->s_apbshift - uspi->s_fpbshift;
341 u64 tmp = 0, goal;
342 struct buffer_head *bh;
343 void *p;
344
345 if (!ind_block)
346 return 0;
347
348 bh = sb_bread(sb, block: ind_block + (index >> shift));
349 if (unlikely(!bh)) {
350 *err = -EIO;
351 return 0;
352 }
353
354 index &= uspi->s_apbmask >> uspi->s_fpbshift;
355 if (uspi->fs_magic == UFS2_MAGIC)
356 p = (__fs64 *)bh->b_data + index;
357 else
358 p = (__fs32 *)bh->b_data + index;
359
360 tmp = ufs_data_ptr_to_cpu(sb, p);
361 if (tmp)
362 goto out;
363
364 if (index && (uspi->fs_magic == UFS2_MAGIC ?
365 (tmp = fs64_to_cpu(sbp: sb, n: ((__fs64 *)bh->b_data)[index-1])) :
366 (tmp = fs32_to_cpu(sbp: sb, n: ((__fs32 *)bh->b_data)[index-1]))))
367 goal = tmp + uspi->s_fpb;
368 else
369 goal = bh->b_blocknr + uspi->s_fpb;
370 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal,
371 uspi->s_fpb, err, locked_page);
372 if (!tmp)
373 goto out;
374
375 if (new)
376 *new = 1;
377
378 mark_buffer_dirty(bh);
379 if (IS_SYNC(inode))
380 sync_dirty_buffer(bh);
381 inode_set_ctime_current(inode);
382 mark_inode_dirty(inode);
383out:
384 brelse (bh);
385 UFSD("EXIT\n");
386 if (tmp)
387 tmp += uspi->s_sbbase;
388 return tmp;
389}
390
391/**
392 * ufs_getfrag_block() - `get_block_t' function, interface between UFS and
393 * read_folio, writepage and so on
394 */
395
396static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
397{
398 struct super_block *sb = inode->i_sb;
399 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
400 int err = 0, new = 0;
401 unsigned offsets[4];
402 int depth = ufs_block_to_path(inode, i_block: fragment >> uspi->s_fpbshift, offsets);
403 u64 phys64 = 0;
404 unsigned frag = fragment & uspi->s_fpbmask;
405
406 phys64 = ufs_frag_map(inode, offsets, depth);
407 if (!create)
408 goto done;
409
410 if (phys64) {
411 if (fragment >= UFS_NDIR_FRAGMENT)
412 goto done;
413 read_seqlock_excl(sl: &UFS_I(inode)->meta_lock);
414 if (fragment < UFS_I(inode)->i_lastfrag) {
415 read_sequnlock_excl(sl: &UFS_I(inode)->meta_lock);
416 goto done;
417 }
418 read_sequnlock_excl(sl: &UFS_I(inode)->meta_lock);
419 }
420 /* This code entered only while writing ....? */
421
422 mutex_lock(&UFS_I(inode)->truncate_mutex);
423
424 UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment);
425 if (unlikely(!depth)) {
426 ufs_warning(sb, "ufs_get_block", "block > big");
427 err = -EIO;
428 goto out;
429 }
430
431 if (UFS_I(inode)->i_lastfrag < UFS_NDIR_FRAGMENT) {
432 unsigned lastfrag = UFS_I(inode)->i_lastfrag;
433 unsigned tailfrags = lastfrag & uspi->s_fpbmask;
434 if (tailfrags && fragment >= lastfrag) {
435 if (!ufs_extend_tail(inode, writes_to: fragment,
436 err: &err, locked_page: bh_result->b_page))
437 goto out;
438 }
439 }
440
441 if (depth == 1) {
442 phys64 = ufs_inode_getfrag(inode, index: offsets[0], new_fragment: fragment,
443 err: &err, new: &new, locked_page: bh_result->b_page);
444 } else {
445 int i;
446 phys64 = ufs_inode_getfrag(inode, index: offsets[0], new_fragment: fragment,
447 err: &err, NULL, NULL);
448 for (i = 1; i < depth - 1; i++)
449 phys64 = ufs_inode_getblock(inode, ind_block: phys64, index: offsets[i],
450 new_fragment: fragment, err: &err, NULL, NULL);
451 phys64 = ufs_inode_getblock(inode, ind_block: phys64, index: offsets[depth - 1],
452 new_fragment: fragment, err: &err, new: &new, locked_page: bh_result->b_page);
453 }
454out:
455 if (phys64) {
456 phys64 += frag;
457 map_bh(bh: bh_result, sb, block: phys64);
458 if (new)
459 set_buffer_new(bh_result);
460 }
461 mutex_unlock(lock: &UFS_I(inode)->truncate_mutex);
462 return err;
463
464done:
465 if (phys64)
466 map_bh(bh: bh_result, sb, block: phys64 + frag);
467 return 0;
468}
469
470static int ufs_writepage(struct page *page, struct writeback_control *wbc)
471{
472 return block_write_full_page(page,get_block: ufs_getfrag_block,wbc);
473}
474
475static int ufs_read_folio(struct file *file, struct folio *folio)
476{
477 return block_read_full_folio(folio, ufs_getfrag_block);
478}
479
480int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len)
481{
482 return __block_write_begin(page, pos, len, get_block: ufs_getfrag_block);
483}
484
485static void ufs_truncate_blocks(struct inode *);
486
487static void ufs_write_failed(struct address_space *mapping, loff_t to)
488{
489 struct inode *inode = mapping->host;
490
491 if (to > inode->i_size) {
492 truncate_pagecache(inode, new: inode->i_size);
493 ufs_truncate_blocks(inode);
494 }
495}
496
497static int ufs_write_begin(struct file *file, struct address_space *mapping,
498 loff_t pos, unsigned len,
499 struct page **pagep, void **fsdata)
500{
501 int ret;
502
503 ret = block_write_begin(mapping, pos, len, pagep, get_block: ufs_getfrag_block);
504 if (unlikely(ret))
505 ufs_write_failed(mapping, to: pos + len);
506
507 return ret;
508}
509
510static int ufs_write_end(struct file *file, struct address_space *mapping,
511 loff_t pos, unsigned len, unsigned copied,
512 struct page *page, void *fsdata)
513{
514 int ret;
515
516 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
517 if (ret < len)
518 ufs_write_failed(mapping, to: pos + len);
519 return ret;
520}
521
522static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
523{
524 return generic_block_bmap(mapping,block,ufs_getfrag_block);
525}
526
527const struct address_space_operations ufs_aops = {
528 .dirty_folio = block_dirty_folio,
529 .invalidate_folio = block_invalidate_folio,
530 .read_folio = ufs_read_folio,
531 .writepage = ufs_writepage,
532 .write_begin = ufs_write_begin,
533 .write_end = ufs_write_end,
534 .bmap = ufs_bmap
535};
536
537static void ufs_set_inode_ops(struct inode *inode)
538{
539 if (S_ISREG(inode->i_mode)) {
540 inode->i_op = &ufs_file_inode_operations;
541 inode->i_fop = &ufs_file_operations;
542 inode->i_mapping->a_ops = &ufs_aops;
543 } else if (S_ISDIR(inode->i_mode)) {
544 inode->i_op = &ufs_dir_inode_operations;
545 inode->i_fop = &ufs_dir_operations;
546 inode->i_mapping->a_ops = &ufs_aops;
547 } else if (S_ISLNK(inode->i_mode)) {
548 if (!inode->i_blocks) {
549 inode->i_link = (char *)UFS_I(inode)->i_u1.i_symlink;
550 inode->i_op = &simple_symlink_inode_operations;
551 } else {
552 inode->i_mapping->a_ops = &ufs_aops;
553 inode->i_op = &page_symlink_inode_operations;
554 inode_nohighmem(inode);
555 }
556 } else
557 init_special_inode(inode, inode->i_mode,
558 ufs_get_inode_dev(inode->i_sb, UFS_I(inode)));
559}
560
561static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
562{
563 struct ufs_inode_info *ufsi = UFS_I(inode);
564 struct super_block *sb = inode->i_sb;
565 umode_t mode;
566
567 /*
568 * Copy data to the in-core inode.
569 */
570 inode->i_mode = mode = fs16_to_cpu(sbp: sb, n: ufs_inode->ui_mode);
571 set_nlink(inode, nlink: fs16_to_cpu(sbp: sb, n: ufs_inode->ui_nlink));
572 if (inode->i_nlink == 0)
573 return -ESTALE;
574
575 /*
576 * Linux now has 32-bit uid and gid, so we can support EFT.
577 */
578 i_uid_write(inode, uid: ufs_get_inode_uid(sb, inode: ufs_inode));
579 i_gid_write(inode, gid: ufs_get_inode_gid(sb, inode: ufs_inode));
580
581 inode->i_size = fs64_to_cpu(sbp: sb, n: ufs_inode->ui_size);
582 inode_set_atime(inode,
583 sec: (signed)fs32_to_cpu(sbp: sb, n: ufs_inode->ui_atime.tv_sec),
584 nsec: 0);
585 inode_set_ctime(inode,
586 sec: (signed)fs32_to_cpu(sbp: sb, n: ufs_inode->ui_ctime.tv_sec),
587 nsec: 0);
588 inode_set_mtime(inode,
589 sec: (signed)fs32_to_cpu(sbp: sb, n: ufs_inode->ui_mtime.tv_sec),
590 nsec: 0);
591 inode->i_blocks = fs32_to_cpu(sbp: sb, n: ufs_inode->ui_blocks);
592 inode->i_generation = fs32_to_cpu(sbp: sb, n: ufs_inode->ui_gen);
593 ufsi->i_flags = fs32_to_cpu(sbp: sb, n: ufs_inode->ui_flags);
594 ufsi->i_shadow = fs32_to_cpu(sbp: sb, n: ufs_inode->ui_u3.ui_sun.ui_shadow);
595 ufsi->i_oeftflag = fs32_to_cpu(sbp: sb, n: ufs_inode->ui_u3.ui_sun.ui_oeftflag);
596
597
598 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
599 memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr,
600 sizeof(ufs_inode->ui_u2.ui_addr));
601 } else {
602 memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink,
603 sizeof(ufs_inode->ui_u2.ui_symlink) - 1);
604 ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0;
605 }
606 return 0;
607}
608
609static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
610{
611 struct ufs_inode_info *ufsi = UFS_I(inode);
612 struct super_block *sb = inode->i_sb;
613 umode_t mode;
614
615 UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino);
616 /*
617 * Copy data to the in-core inode.
618 */
619 inode->i_mode = mode = fs16_to_cpu(sbp: sb, n: ufs2_inode->ui_mode);
620 set_nlink(inode, nlink: fs16_to_cpu(sbp: sb, n: ufs2_inode->ui_nlink));
621 if (inode->i_nlink == 0)
622 return -ESTALE;
623
624 /*
625 * Linux now has 32-bit uid and gid, so we can support EFT.
626 */
627 i_uid_write(inode, uid: fs32_to_cpu(sbp: sb, n: ufs2_inode->ui_uid));
628 i_gid_write(inode, gid: fs32_to_cpu(sbp: sb, n: ufs2_inode->ui_gid));
629
630 inode->i_size = fs64_to_cpu(sbp: sb, n: ufs2_inode->ui_size);
631 inode_set_atime(inode, sec: fs64_to_cpu(sbp: sb, n: ufs2_inode->ui_atime),
632 nsec: fs32_to_cpu(sbp: sb, n: ufs2_inode->ui_atimensec));
633 inode_set_ctime(inode, sec: fs64_to_cpu(sbp: sb, n: ufs2_inode->ui_ctime),
634 nsec: fs32_to_cpu(sbp: sb, n: ufs2_inode->ui_ctimensec));
635 inode_set_mtime(inode, sec: fs64_to_cpu(sbp: sb, n: ufs2_inode->ui_mtime),
636 nsec: fs32_to_cpu(sbp: sb, n: ufs2_inode->ui_mtimensec));
637 inode->i_blocks = fs64_to_cpu(sbp: sb, n: ufs2_inode->ui_blocks);
638 inode->i_generation = fs32_to_cpu(sbp: sb, n: ufs2_inode->ui_gen);
639 ufsi->i_flags = fs32_to_cpu(sbp: sb, n: ufs2_inode->ui_flags);
640 /*
641 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
642 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
643 */
644
645 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
646 memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr,
647 sizeof(ufs2_inode->ui_u2.ui_addr));
648 } else {
649 memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink,
650 sizeof(ufs2_inode->ui_u2.ui_symlink) - 1);
651 ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0;
652 }
653 return 0;
654}
655
656struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
657{
658 struct ufs_inode_info *ufsi;
659 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
660 struct buffer_head * bh;
661 struct inode *inode;
662 int err = -EIO;
663
664 UFSD("ENTER, ino %lu\n", ino);
665
666 if (ino < UFS_ROOTINO || ino > (uspi->s_ncg * uspi->s_ipg)) {
667 ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n",
668 ino);
669 return ERR_PTR(error: -EIO);
670 }
671
672 inode = iget_locked(sb, ino);
673 if (!inode)
674 return ERR_PTR(error: -ENOMEM);
675 if (!(inode->i_state & I_NEW))
676 return inode;
677
678 ufsi = UFS_I(inode);
679
680 bh = sb_bread(sb, block: uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
681 if (!bh) {
682 ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n",
683 inode->i_ino);
684 goto bad_inode;
685 }
686 if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
687 struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
688
689 err = ufs2_read_inode(inode,
690 ufs2_inode: ufs2_inode + ufs_inotofsbo(inode->i_ino));
691 } else {
692 struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data;
693
694 err = ufs1_read_inode(inode,
695 ufs_inode: ufs_inode + ufs_inotofsbo(inode->i_ino));
696 }
697 brelse(bh);
698 if (err)
699 goto bad_inode;
700
701 inode_inc_iversion(inode);
702 ufsi->i_lastfrag =
703 (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
704 ufsi->i_dir_start_lookup = 0;
705 ufsi->i_osync = 0;
706
707 ufs_set_inode_ops(inode);
708
709 UFSD("EXIT\n");
710 unlock_new_inode(inode);
711 return inode;
712
713bad_inode:
714 iget_failed(inode);
715 return ERR_PTR(error: err);
716}
717
718static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
719{
720 struct super_block *sb = inode->i_sb;
721 struct ufs_inode_info *ufsi = UFS_I(inode);
722
723 ufs_inode->ui_mode = cpu_to_fs16(sbp: sb, n: inode->i_mode);
724 ufs_inode->ui_nlink = cpu_to_fs16(sbp: sb, n: inode->i_nlink);
725
726 ufs_set_inode_uid(sb, inode: ufs_inode, value: i_uid_read(inode));
727 ufs_set_inode_gid(sb, inode: ufs_inode, value: i_gid_read(inode));
728
729 ufs_inode->ui_size = cpu_to_fs64(sbp: sb, n: inode->i_size);
730 ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sbp: sb,
731 n: inode_get_atime_sec(inode));
732 ufs_inode->ui_atime.tv_usec = 0;
733 ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sbp: sb,
734 n: inode_get_ctime_sec(inode));
735 ufs_inode->ui_ctime.tv_usec = 0;
736 ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sbp: sb,
737 n: inode_get_mtime_sec(inode));
738 ufs_inode->ui_mtime.tv_usec = 0;
739 ufs_inode->ui_blocks = cpu_to_fs32(sbp: sb, n: inode->i_blocks);
740 ufs_inode->ui_flags = cpu_to_fs32(sbp: sb, n: ufsi->i_flags);
741 ufs_inode->ui_gen = cpu_to_fs32(sbp: sb, n: inode->i_generation);
742
743 if ((UFS_SB(sb)->s_flags & UFS_UID_MASK) == UFS_UID_EFT) {
744 ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sbp: sb, n: ufsi->i_shadow);
745 ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sbp: sb, n: ufsi->i_oeftflag);
746 }
747
748 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
749 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
750 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0];
751 } else if (inode->i_blocks) {
752 memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data,
753 sizeof(ufs_inode->ui_u2.ui_addr));
754 }
755 else {
756 memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
757 sizeof(ufs_inode->ui_u2.ui_symlink));
758 }
759
760 if (!inode->i_nlink)
761 memset (ufs_inode, 0, sizeof(struct ufs_inode));
762}
763
764static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
765{
766 struct super_block *sb = inode->i_sb;
767 struct ufs_inode_info *ufsi = UFS_I(inode);
768
769 UFSD("ENTER\n");
770 ufs_inode->ui_mode = cpu_to_fs16(sbp: sb, n: inode->i_mode);
771 ufs_inode->ui_nlink = cpu_to_fs16(sbp: sb, n: inode->i_nlink);
772
773 ufs_inode->ui_uid = cpu_to_fs32(sbp: sb, n: i_uid_read(inode));
774 ufs_inode->ui_gid = cpu_to_fs32(sbp: sb, n: i_gid_read(inode));
775
776 ufs_inode->ui_size = cpu_to_fs64(sbp: sb, n: inode->i_size);
777 ufs_inode->ui_atime = cpu_to_fs64(sbp: sb, n: inode_get_atime_sec(inode));
778 ufs_inode->ui_atimensec = cpu_to_fs32(sbp: sb,
779 n: inode_get_atime_nsec(inode));
780 ufs_inode->ui_ctime = cpu_to_fs64(sbp: sb, n: inode_get_ctime_sec(inode));
781 ufs_inode->ui_ctimensec = cpu_to_fs32(sbp: sb,
782 n: inode_get_ctime_nsec(inode));
783 ufs_inode->ui_mtime = cpu_to_fs64(sbp: sb, n: inode_get_mtime_sec(inode));
784 ufs_inode->ui_mtimensec = cpu_to_fs32(sbp: sb,
785 n: inode_get_mtime_nsec(inode));
786
787 ufs_inode->ui_blocks = cpu_to_fs64(sbp: sb, n: inode->i_blocks);
788 ufs_inode->ui_flags = cpu_to_fs32(sbp: sb, n: ufsi->i_flags);
789 ufs_inode->ui_gen = cpu_to_fs32(sbp: sb, n: inode->i_generation);
790
791 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
792 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
793 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0];
794 } else if (inode->i_blocks) {
795 memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data,
796 sizeof(ufs_inode->ui_u2.ui_addr));
797 } else {
798 memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
799 sizeof(ufs_inode->ui_u2.ui_symlink));
800 }
801
802 if (!inode->i_nlink)
803 memset (ufs_inode, 0, sizeof(struct ufs2_inode));
804 UFSD("EXIT\n");
805}
806
807static int ufs_update_inode(struct inode * inode, int do_sync)
808{
809 struct super_block *sb = inode->i_sb;
810 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
811 struct buffer_head * bh;
812
813 UFSD("ENTER, ino %lu\n", inode->i_ino);
814
815 if (inode->i_ino < UFS_ROOTINO ||
816 inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
817 ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino);
818 return -1;
819 }
820
821 bh = sb_bread(sb, ufs_inotofsba(inode->i_ino));
822 if (!bh) {
823 ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
824 return -1;
825 }
826 if (uspi->fs_magic == UFS2_MAGIC) {
827 struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
828
829 ufs2_update_inode(inode,
830 ufs_inode: ufs2_inode + ufs_inotofsbo(inode->i_ino));
831 } else {
832 struct ufs_inode *ufs_inode = (struct ufs_inode *) bh->b_data;
833
834 ufs1_update_inode(inode, ufs_inode: ufs_inode + ufs_inotofsbo(inode->i_ino));
835 }
836
837 mark_buffer_dirty(bh);
838 if (do_sync)
839 sync_dirty_buffer(bh);
840 brelse (bh);
841
842 UFSD("EXIT\n");
843 return 0;
844}
845
846int ufs_write_inode(struct inode *inode, struct writeback_control *wbc)
847{
848 return ufs_update_inode(inode, do_sync: wbc->sync_mode == WB_SYNC_ALL);
849}
850
851int ufs_sync_inode (struct inode *inode)
852{
853 return ufs_update_inode (inode, do_sync: 1);
854}
855
856void ufs_evict_inode(struct inode * inode)
857{
858 int want_delete = 0;
859
860 if (!inode->i_nlink && !is_bad_inode(inode))
861 want_delete = 1;
862
863 truncate_inode_pages_final(&inode->i_data);
864 if (want_delete) {
865 inode->i_size = 0;
866 if (inode->i_blocks &&
867 (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
868 S_ISLNK(inode->i_mode)))
869 ufs_truncate_blocks(inode);
870 ufs_update_inode(inode, do_sync: inode_needs_sync(inode));
871 }
872
873 invalidate_inode_buffers(inode);
874 clear_inode(inode);
875
876 if (want_delete)
877 ufs_free_inode(inode);
878}
879
880struct to_free {
881 struct inode *inode;
882 u64 to;
883 unsigned count;
884};
885
886static inline void free_data(struct to_free *ctx, u64 from, unsigned count)
887{
888 if (ctx->count && ctx->to != from) {
889 ufs_free_blocks(ctx->inode, ctx->to - ctx->count, ctx->count);
890 ctx->count = 0;
891 }
892 ctx->count += count;
893 ctx->to = from + count;
894}
895
896#define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
897
898static void ufs_trunc_direct(struct inode *inode)
899{
900 struct ufs_inode_info *ufsi = UFS_I(inode);
901 struct super_block * sb;
902 struct ufs_sb_private_info * uspi;
903 void *p;
904 u64 frag1, frag2, frag3, frag4, block1, block2;
905 struct to_free ctx = {.inode = inode};
906 unsigned i, tmp;
907
908 UFSD("ENTER: ino %lu\n", inode->i_ino);
909
910 sb = inode->i_sb;
911 uspi = UFS_SB(sb)->s_uspi;
912
913 frag1 = DIRECT_FRAGMENT;
914 frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag);
915 frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1);
916 frag3 = frag4 & ~uspi->s_fpbmask;
917 block1 = block2 = 0;
918 if (frag2 > frag3) {
919 frag2 = frag4;
920 frag3 = frag4 = 0;
921 } else if (frag2 < frag3) {
922 block1 = ufs_fragstoblks (frag2);
923 block2 = ufs_fragstoblks (frag3);
924 }
925
926 UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu,"
927 " frag3 %llu, frag4 %llu\n", inode->i_ino,
928 (unsigned long long)frag1, (unsigned long long)frag2,
929 (unsigned long long)block1, (unsigned long long)block2,
930 (unsigned long long)frag3, (unsigned long long)frag4);
931
932 if (frag1 >= frag2)
933 goto next1;
934
935 /*
936 * Free first free fragments
937 */
938 p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag1));
939 tmp = ufs_data_ptr_to_cpu(sb, p);
940 if (!tmp )
941 ufs_panic (sb, "ufs_trunc_direct", "internal error");
942 frag2 -= frag1;
943 frag1 = ufs_fragnum (frag1);
944
945 ufs_free_fragments(inode, tmp + frag1, frag2);
946
947next1:
948 /*
949 * Free whole blocks
950 */
951 for (i = block1 ; i < block2; i++) {
952 p = ufs_get_direct_data_ptr(uspi, ufsi, blk: i);
953 tmp = ufs_data_ptr_to_cpu(sb, p);
954 if (!tmp)
955 continue;
956 write_seqlock(sl: &ufsi->meta_lock);
957 ufs_data_ptr_clear(uspi, p);
958 write_sequnlock(sl: &ufsi->meta_lock);
959
960 free_data(ctx: &ctx, from: tmp, count: uspi->s_fpb);
961 }
962
963 free_data(ctx: &ctx, from: 0, count: 0);
964
965 if (frag3 >= frag4)
966 goto next3;
967
968 /*
969 * Free last free fragments
970 */
971 p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag3));
972 tmp = ufs_data_ptr_to_cpu(sb, p);
973 if (!tmp )
974 ufs_panic(sb, "ufs_truncate_direct", "internal error");
975 frag4 = ufs_fragnum (frag4);
976 write_seqlock(sl: &ufsi->meta_lock);
977 ufs_data_ptr_clear(uspi, p);
978 write_sequnlock(sl: &ufsi->meta_lock);
979
980 ufs_free_fragments (inode, tmp, frag4);
981 next3:
982
983 UFSD("EXIT: ino %lu\n", inode->i_ino);
984}
985
986static void free_full_branch(struct inode *inode, u64 ind_block, int depth)
987{
988 struct super_block *sb = inode->i_sb;
989 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
990 struct ufs_buffer_head *ubh = ubh_bread(sb, ind_block, uspi->s_bsize);
991 unsigned i;
992
993 if (!ubh)
994 return;
995
996 if (--depth) {
997 for (i = 0; i < uspi->s_apb; i++) {
998 void *p = ubh_get_data_ptr(uspi, ubh, blk: i);
999 u64 block = ufs_data_ptr_to_cpu(sb, p);
1000 if (block)
1001 free_full_branch(inode, ind_block: block, depth);
1002 }
1003 } else {
1004 struct to_free ctx = {.inode = inode};
1005
1006 for (i = 0; i < uspi->s_apb; i++) {
1007 void *p = ubh_get_data_ptr(uspi, ubh, blk: i);
1008 u64 block = ufs_data_ptr_to_cpu(sb, p);
1009 if (block)
1010 free_data(ctx: &ctx, from: block, count: uspi->s_fpb);
1011 }
1012 free_data(ctx: &ctx, from: 0, count: 0);
1013 }
1014
1015 ubh_bforget(ubh);
1016 ufs_free_blocks(inode, ind_block, uspi->s_fpb);
1017}
1018
1019static void free_branch_tail(struct inode *inode, unsigned from, struct ufs_buffer_head *ubh, int depth)
1020{
1021 struct super_block *sb = inode->i_sb;
1022 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1023 unsigned i;
1024
1025 if (--depth) {
1026 for (i = from; i < uspi->s_apb ; i++) {
1027 void *p = ubh_get_data_ptr(uspi, ubh, blk: i);
1028 u64 block = ufs_data_ptr_to_cpu(sb, p);
1029 if (block) {
1030 write_seqlock(sl: &UFS_I(inode)->meta_lock);
1031 ufs_data_ptr_clear(uspi, p);
1032 write_sequnlock(sl: &UFS_I(inode)->meta_lock);
1033 ubh_mark_buffer_dirty(ubh);
1034 free_full_branch(inode, ind_block: block, depth);
1035 }
1036 }
1037 } else {
1038 struct to_free ctx = {.inode = inode};
1039
1040 for (i = from; i < uspi->s_apb; i++) {
1041 void *p = ubh_get_data_ptr(uspi, ubh, blk: i);
1042 u64 block = ufs_data_ptr_to_cpu(sb, p);
1043 if (block) {
1044 write_seqlock(sl: &UFS_I(inode)->meta_lock);
1045 ufs_data_ptr_clear(uspi, p);
1046 write_sequnlock(sl: &UFS_I(inode)->meta_lock);
1047 ubh_mark_buffer_dirty(ubh);
1048 free_data(ctx: &ctx, from: block, count: uspi->s_fpb);
1049 }
1050 }
1051 free_data(ctx: &ctx, from: 0, count: 0);
1052 }
1053 if (IS_SYNC(inode) && ubh_buffer_dirty(ubh))
1054 ubh_sync_block(ubh);
1055 ubh_brelse(ubh);
1056}
1057
1058static int ufs_alloc_lastblock(struct inode *inode, loff_t size)
1059{
1060 int err = 0;
1061 struct super_block *sb = inode->i_sb;
1062 struct address_space *mapping = inode->i_mapping;
1063 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1064 unsigned i, end;
1065 sector_t lastfrag;
1066 struct folio *folio;
1067 struct buffer_head *bh;
1068 u64 phys64;
1069
1070 lastfrag = (size + uspi->s_fsize - 1) >> uspi->s_fshift;
1071
1072 if (!lastfrag)
1073 goto out;
1074
1075 lastfrag--;
1076
1077 folio = ufs_get_locked_folio(mapping, index: lastfrag >>
1078 (PAGE_SHIFT - inode->i_blkbits));
1079 if (IS_ERR(ptr: folio)) {
1080 err = -EIO;
1081 goto out;
1082 }
1083
1084 end = lastfrag & ((1 << (PAGE_SHIFT - inode->i_blkbits)) - 1);
1085 bh = folio_buffers(folio);
1086 for (i = 0; i < end; ++i)
1087 bh = bh->b_this_page;
1088
1089 err = ufs_getfrag_block(inode, fragment: lastfrag, bh_result: bh, create: 1);
1090
1091 if (unlikely(err))
1092 goto out_unlock;
1093
1094 if (buffer_new(bh)) {
1095 clear_buffer_new(bh);
1096 clean_bdev_bh_alias(bh);
1097 /*
1098 * we do not zeroize fragment, because of
1099 * if it maped to hole, it already contains zeroes
1100 */
1101 set_buffer_uptodate(bh);
1102 mark_buffer_dirty(bh);
1103 folio_mark_dirty(folio);
1104 }
1105
1106 if (lastfrag >= UFS_IND_FRAGMENT) {
1107 end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1;
1108 phys64 = bh->b_blocknr + 1;
1109 for (i = 0; i < end; ++i) {
1110 bh = sb_getblk(sb, block: i + phys64);
1111 lock_buffer(bh);
1112 memset(bh->b_data, 0, sb->s_blocksize);
1113 set_buffer_uptodate(bh);
1114 mark_buffer_dirty(bh);
1115 unlock_buffer(bh);
1116 sync_dirty_buffer(bh);
1117 brelse(bh);
1118 }
1119 }
1120out_unlock:
1121 ufs_put_locked_folio(folio);
1122out:
1123 return err;
1124}
1125
1126static void ufs_truncate_blocks(struct inode *inode)
1127{
1128 struct ufs_inode_info *ufsi = UFS_I(inode);
1129 struct super_block *sb = inode->i_sb;
1130 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1131 unsigned offsets[4];
1132 int depth;
1133 int depth2;
1134 unsigned i;
1135 struct ufs_buffer_head *ubh[3];
1136 void *p;
1137 u64 block;
1138
1139 if (inode->i_size) {
1140 sector_t last = (inode->i_size - 1) >> uspi->s_bshift;
1141 depth = ufs_block_to_path(inode, i_block: last, offsets);
1142 if (!depth)
1143 return;
1144 } else {
1145 depth = 1;
1146 }
1147
1148 for (depth2 = depth - 1; depth2; depth2--)
1149 if (offsets[depth2] != uspi->s_apb - 1)
1150 break;
1151
1152 mutex_lock(&ufsi->truncate_mutex);
1153 if (depth == 1) {
1154 ufs_trunc_direct(inode);
1155 offsets[0] = UFS_IND_BLOCK;
1156 } else {
1157 /* get the blocks that should be partially emptied */
1158 p = ufs_get_direct_data_ptr(uspi, ufsi, blk: offsets[0]++);
1159 for (i = 0; i < depth2; i++) {
1160 block = ufs_data_ptr_to_cpu(sb, p);
1161 if (!block)
1162 break;
1163 ubh[i] = ubh_bread(sb, block, uspi->s_bsize);
1164 if (!ubh[i]) {
1165 write_seqlock(sl: &ufsi->meta_lock);
1166 ufs_data_ptr_clear(uspi, p);
1167 write_sequnlock(sl: &ufsi->meta_lock);
1168 break;
1169 }
1170 p = ubh_get_data_ptr(uspi, ubh: ubh[i], blk: offsets[i + 1]++);
1171 }
1172 while (i--)
1173 free_branch_tail(inode, from: offsets[i + 1], ubh: ubh[i], depth: depth - i - 1);
1174 }
1175 for (i = offsets[0]; i <= UFS_TIND_BLOCK; i++) {
1176 p = ufs_get_direct_data_ptr(uspi, ufsi, blk: i);
1177 block = ufs_data_ptr_to_cpu(sb, p);
1178 if (block) {
1179 write_seqlock(sl: &ufsi->meta_lock);
1180 ufs_data_ptr_clear(uspi, p);
1181 write_sequnlock(sl: &ufsi->meta_lock);
1182 free_full_branch(inode, ind_block: block, depth: i - UFS_IND_BLOCK + 1);
1183 }
1184 }
1185 read_seqlock_excl(sl: &ufsi->meta_lock);
1186 ufsi->i_lastfrag = DIRECT_FRAGMENT;
1187 read_sequnlock_excl(sl: &ufsi->meta_lock);
1188 mark_inode_dirty(inode);
1189 mutex_unlock(lock: &ufsi->truncate_mutex);
1190}
1191
1192static int ufs_truncate(struct inode *inode, loff_t size)
1193{
1194 int err = 0;
1195
1196 UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n",
1197 inode->i_ino, (unsigned long long)size,
1198 (unsigned long long)i_size_read(inode));
1199
1200 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1201 S_ISLNK(inode->i_mode)))
1202 return -EINVAL;
1203 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1204 return -EPERM;
1205
1206 err = ufs_alloc_lastblock(inode, size);
1207
1208 if (err)
1209 goto out;
1210
1211 block_truncate_page(inode->i_mapping, size, ufs_getfrag_block);
1212
1213 truncate_setsize(inode, newsize: size);
1214
1215 ufs_truncate_blocks(inode);
1216 inode_set_mtime_to_ts(inode, ts: inode_set_ctime_current(inode));
1217 mark_inode_dirty(inode);
1218out:
1219 UFSD("EXIT: err %d\n", err);
1220 return err;
1221}
1222
1223int ufs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
1224 struct iattr *attr)
1225{
1226 struct inode *inode = d_inode(dentry);
1227 unsigned int ia_valid = attr->ia_valid;
1228 int error;
1229
1230 error = setattr_prepare(&nop_mnt_idmap, dentry, attr);
1231 if (error)
1232 return error;
1233
1234 if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
1235 error = ufs_truncate(inode, size: attr->ia_size);
1236 if (error)
1237 return error;
1238 }
1239
1240 setattr_copy(&nop_mnt_idmap, inode, attr);
1241 mark_inode_dirty(inode);
1242 return 0;
1243}
1244
1245const struct inode_operations ufs_file_inode_operations = {
1246 .setattr = ufs_setattr,
1247};
1248

source code of linux/fs/ufs/inode.c