1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include "bcachefs.h" |
3 | #include "bkey_buf.h" |
4 | #include "btree_update.h" |
5 | #include "buckets.h" |
6 | #include "error.h" |
7 | #include "extents.h" |
8 | #include "inode.h" |
9 | #include "io_misc.h" |
10 | #include "io_write.h" |
11 | #include "rebalance.h" |
12 | #include "reflink.h" |
13 | #include "subvolume.h" |
14 | #include "super-io.h" |
15 | |
16 | #include <linux/sched/signal.h> |
17 | |
18 | static inline unsigned bkey_type_to_indirect(const struct bkey *k) |
19 | { |
20 | switch (k->type) { |
21 | case KEY_TYPE_extent: |
22 | return KEY_TYPE_reflink_v; |
23 | case KEY_TYPE_inline_data: |
24 | return KEY_TYPE_indirect_inline_data; |
25 | default: |
26 | return 0; |
27 | } |
28 | } |
29 | |
30 | /* reflink pointers */ |
31 | |
32 | int bch2_reflink_p_invalid(struct bch_fs *c, struct bkey_s_c k, |
33 | enum bkey_invalid_flags flags, |
34 | struct printbuf *err) |
35 | { |
36 | struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k); |
37 | int ret = 0; |
38 | |
39 | bkey_fsck_err_on(le64_to_cpu(p.v->idx) < le32_to_cpu(p.v->front_pad), |
40 | c, err, reflink_p_front_pad_bad, |
41 | "idx < front_pad (%llu < %u)" , |
42 | le64_to_cpu(p.v->idx), le32_to_cpu(p.v->front_pad)); |
43 | fsck_err: |
44 | return ret; |
45 | } |
46 | |
47 | void bch2_reflink_p_to_text(struct printbuf *out, struct bch_fs *c, |
48 | struct bkey_s_c k) |
49 | { |
50 | struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k); |
51 | |
52 | prt_printf(out, "idx %llu front_pad %u back_pad %u" , |
53 | le64_to_cpu(p.v->idx), |
54 | le32_to_cpu(p.v->front_pad), |
55 | le32_to_cpu(p.v->back_pad)); |
56 | } |
57 | |
58 | bool bch2_reflink_p_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r) |
59 | { |
60 | struct bkey_s_reflink_p l = bkey_s_to_reflink_p(k: _l); |
61 | struct bkey_s_c_reflink_p r = bkey_s_c_to_reflink_p(k: _r); |
62 | |
63 | /* |
64 | * Disabled for now, the triggers code needs to be reworked for merging |
65 | * of reflink pointers to work: |
66 | */ |
67 | return false; |
68 | |
69 | if (le64_to_cpu(l.v->idx) + l.k->size != le64_to_cpu(r.v->idx)) |
70 | return false; |
71 | |
72 | bch2_key_resize(k: l.k, new_size: l.k->size + r.k->size); |
73 | return true; |
74 | } |
75 | |
76 | static int trans_trigger_reflink_p_segment(struct btree_trans *trans, |
77 | struct bkey_s_c_reflink_p p, |
78 | u64 *idx, unsigned flags) |
79 | { |
80 | struct bch_fs *c = trans->c; |
81 | struct btree_iter iter; |
82 | struct bkey_i *k; |
83 | __le64 *refcount; |
84 | int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1; |
85 | struct printbuf buf = PRINTBUF; |
86 | int ret; |
87 | |
88 | k = bch2_bkey_get_mut_noupdate(trans, iter: &iter, |
89 | btree_id: BTREE_ID_reflink, POS(0, *idx), |
90 | flags: BTREE_ITER_WITH_UPDATES); |
91 | ret = PTR_ERR_OR_ZERO(ptr: k); |
92 | if (ret) |
93 | goto err; |
94 | |
95 | refcount = bkey_refcount(k: bkey_i_to_s(k)); |
96 | if (!refcount) { |
97 | bch2_bkey_val_to_text(&buf, c, p.s_c); |
98 | bch2_trans_inconsistent(trans, |
99 | "nonexistent indirect extent at %llu while marking\n %s" , |
100 | *idx, buf.buf); |
101 | ret = -EIO; |
102 | goto err; |
103 | } |
104 | |
105 | if (!*refcount && (flags & BTREE_TRIGGER_OVERWRITE)) { |
106 | bch2_bkey_val_to_text(&buf, c, p.s_c); |
107 | bch2_trans_inconsistent(trans, |
108 | "indirect extent refcount underflow at %llu while marking\n %s" , |
109 | *idx, buf.buf); |
110 | ret = -EIO; |
111 | goto err; |
112 | } |
113 | |
114 | if (flags & BTREE_TRIGGER_INSERT) { |
115 | struct bch_reflink_p *v = (struct bch_reflink_p *) p.v; |
116 | u64 pad; |
117 | |
118 | pad = max_t(s64, le32_to_cpu(v->front_pad), |
119 | le64_to_cpu(v->idx) - bkey_start_offset(&k->k)); |
120 | BUG_ON(pad > U32_MAX); |
121 | v->front_pad = cpu_to_le32(pad); |
122 | |
123 | pad = max_t(s64, le32_to_cpu(v->back_pad), |
124 | k->k.p.offset - p.k->size - le64_to_cpu(v->idx)); |
125 | BUG_ON(pad > U32_MAX); |
126 | v->back_pad = cpu_to_le32(pad); |
127 | } |
128 | |
129 | le64_add_cpu(var: refcount, val: add); |
130 | |
131 | bch2_btree_iter_set_pos_to_extent_start(iter: &iter); |
132 | ret = bch2_trans_update(trans, &iter, k, 0); |
133 | if (ret) |
134 | goto err; |
135 | |
136 | *idx = k->k.p.offset; |
137 | err: |
138 | bch2_trans_iter_exit(trans, &iter); |
139 | printbuf_exit(&buf); |
140 | return ret; |
141 | } |
142 | |
143 | static s64 gc_trigger_reflink_p_segment(struct btree_trans *trans, |
144 | struct bkey_s_c_reflink_p p, |
145 | u64 *idx, unsigned flags, size_t r_idx) |
146 | { |
147 | struct bch_fs *c = trans->c; |
148 | struct reflink_gc *r; |
149 | int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1; |
150 | u64 start = le64_to_cpu(p.v->idx); |
151 | u64 end = le64_to_cpu(p.v->idx) + p.k->size; |
152 | u64 next_idx = end + le32_to_cpu(p.v->back_pad); |
153 | s64 ret = 0; |
154 | struct printbuf buf = PRINTBUF; |
155 | |
156 | if (r_idx >= c->reflink_gc_nr) |
157 | goto not_found; |
158 | |
159 | r = genradix_ptr(&c->reflink_gc_table, r_idx); |
160 | next_idx = min(next_idx, r->offset - r->size); |
161 | if (*idx < next_idx) |
162 | goto not_found; |
163 | |
164 | BUG_ON((s64) r->refcount + add < 0); |
165 | |
166 | r->refcount += add; |
167 | *idx = r->offset; |
168 | return 0; |
169 | not_found: |
170 | if (fsck_err(c, reflink_p_to_missing_reflink_v, |
171 | "pointer to missing indirect extent\n" |
172 | " %s\n" |
173 | " missing range %llu-%llu" , |
174 | (bch2_bkey_val_to_text(&buf, c, p.s_c), buf.buf), |
175 | *idx, next_idx)) { |
176 | struct bkey_i *update = bch2_bkey_make_mut_noupdate(trans, k: p.s_c); |
177 | ret = PTR_ERR_OR_ZERO(ptr: update); |
178 | if (ret) |
179 | goto err; |
180 | |
181 | if (next_idx <= start) { |
182 | bkey_i_to_reflink_p(k: update)->v.front_pad = cpu_to_le32(start - next_idx); |
183 | } else if (*idx >= end) { |
184 | bkey_i_to_reflink_p(k: update)->v.back_pad = cpu_to_le32(*idx - end); |
185 | } else { |
186 | bkey_error_init(k: update); |
187 | update->k.p = p.k->p; |
188 | update->k.size = p.k->size; |
189 | set_bkey_val_u64s(k: &update->k, val_u64s: 0); |
190 | } |
191 | |
192 | ret = bch2_btree_insert_trans(trans, BTREE_ID_extents, update, BTREE_TRIGGER_NORUN); |
193 | } |
194 | |
195 | *idx = next_idx; |
196 | err: |
197 | fsck_err: |
198 | printbuf_exit(&buf); |
199 | return ret; |
200 | } |
201 | |
202 | static int __trigger_reflink_p(struct btree_trans *trans, |
203 | enum btree_id btree_id, unsigned level, |
204 | struct bkey_s_c k, unsigned flags) |
205 | { |
206 | struct bch_fs *c = trans->c; |
207 | struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k); |
208 | int ret = 0; |
209 | |
210 | u64 idx = le64_to_cpu(p.v->idx) - le32_to_cpu(p.v->front_pad); |
211 | u64 end = le64_to_cpu(p.v->idx) + p.k->size + le32_to_cpu(p.v->back_pad); |
212 | |
213 | if (flags & BTREE_TRIGGER_TRANSACTIONAL) { |
214 | while (idx < end && !ret) |
215 | ret = trans_trigger_reflink_p_segment(trans, p, idx: &idx, flags); |
216 | } |
217 | |
218 | if (flags & BTREE_TRIGGER_GC) { |
219 | size_t l = 0, r = c->reflink_gc_nr; |
220 | |
221 | while (l < r) { |
222 | size_t m = l + (r - l) / 2; |
223 | struct reflink_gc *ref = genradix_ptr(&c->reflink_gc_table, m); |
224 | if (ref->offset <= idx) |
225 | l = m + 1; |
226 | else |
227 | r = m; |
228 | } |
229 | |
230 | while (idx < end && !ret) |
231 | ret = gc_trigger_reflink_p_segment(trans, p, idx: &idx, flags, r_idx: l++); |
232 | } |
233 | |
234 | return ret; |
235 | } |
236 | |
237 | int bch2_trigger_reflink_p(struct btree_trans *trans, |
238 | enum btree_id btree_id, unsigned level, |
239 | struct bkey_s_c old, |
240 | struct bkey_s new, |
241 | unsigned flags) |
242 | { |
243 | if ((flags & BTREE_TRIGGER_TRANSACTIONAL) && |
244 | (flags & BTREE_TRIGGER_INSERT)) { |
245 | struct bch_reflink_p *v = bkey_s_to_reflink_p(k: new).v; |
246 | |
247 | v->front_pad = v->back_pad = 0; |
248 | } |
249 | |
250 | return trigger_run_overwrite_then_insert(__trigger_reflink_p, trans, btree_id, level, old, new, flags); |
251 | } |
252 | |
253 | /* indirect extents */ |
254 | |
255 | int bch2_reflink_v_invalid(struct bch_fs *c, struct bkey_s_c k, |
256 | enum bkey_invalid_flags flags, |
257 | struct printbuf *err) |
258 | { |
259 | return bch2_bkey_ptrs_invalid(c, k, flags, err); |
260 | } |
261 | |
262 | void bch2_reflink_v_to_text(struct printbuf *out, struct bch_fs *c, |
263 | struct bkey_s_c k) |
264 | { |
265 | struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(k); |
266 | |
267 | prt_printf(out, "refcount: %llu " , le64_to_cpu(r.v->refcount)); |
268 | |
269 | bch2_bkey_ptrs_to_text(out, c, k); |
270 | } |
271 | |
272 | #if 0 |
273 | Currently disabled, needs to be debugged: |
274 | |
275 | bool bch2_reflink_v_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r) |
276 | { |
277 | struct bkey_s_reflink_v l = bkey_s_to_reflink_v(_l); |
278 | struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(_r); |
279 | |
280 | return l.v->refcount == r.v->refcount && bch2_extent_merge(c, _l, _r); |
281 | } |
282 | #endif |
283 | |
284 | static inline void check_indirect_extent_deleting(struct bkey_s new, unsigned *flags) |
285 | { |
286 | if ((*flags & BTREE_TRIGGER_INSERT) && !*bkey_refcount(k: new)) { |
287 | new.k->type = KEY_TYPE_deleted; |
288 | new.k->size = 0; |
289 | set_bkey_val_u64s(k: new.k, val_u64s: 0); |
290 | *flags &= ~BTREE_TRIGGER_INSERT; |
291 | } |
292 | } |
293 | |
294 | int bch2_trigger_reflink_v(struct btree_trans *trans, |
295 | enum btree_id btree_id, unsigned level, |
296 | struct bkey_s_c old, struct bkey_s new, |
297 | unsigned flags) |
298 | { |
299 | if ((flags & BTREE_TRIGGER_TRANSACTIONAL) && |
300 | (flags & BTREE_TRIGGER_INSERT)) |
301 | check_indirect_extent_deleting(new, flags: &flags); |
302 | |
303 | return bch2_trigger_extent(trans, btree_id, level, old, new, flags); |
304 | } |
305 | |
306 | /* indirect inline data */ |
307 | |
308 | int bch2_indirect_inline_data_invalid(struct bch_fs *c, struct bkey_s_c k, |
309 | enum bkey_invalid_flags flags, |
310 | struct printbuf *err) |
311 | { |
312 | return 0; |
313 | } |
314 | |
315 | void bch2_indirect_inline_data_to_text(struct printbuf *out, |
316 | struct bch_fs *c, struct bkey_s_c k) |
317 | { |
318 | struct bkey_s_c_indirect_inline_data d = bkey_s_c_to_indirect_inline_data(k); |
319 | unsigned datalen = bkey_inline_data_bytes(k: k.k); |
320 | |
321 | prt_printf(out, "refcount %llu datalen %u: %*phN" , |
322 | le64_to_cpu(d.v->refcount), datalen, |
323 | min(datalen, 32U), d.v->data); |
324 | } |
325 | |
326 | int bch2_trigger_indirect_inline_data(struct btree_trans *trans, |
327 | enum btree_id btree_id, unsigned level, |
328 | struct bkey_s_c old, struct bkey_s new, |
329 | unsigned flags) |
330 | { |
331 | check_indirect_extent_deleting(new, flags: &flags); |
332 | |
333 | return 0; |
334 | } |
335 | |
336 | static int bch2_make_extent_indirect(struct btree_trans *trans, |
337 | struct btree_iter *extent_iter, |
338 | struct bkey_i *orig) |
339 | { |
340 | struct bch_fs *c = trans->c; |
341 | struct btree_iter reflink_iter = { NULL }; |
342 | struct bkey_s_c k; |
343 | struct bkey_i *r_v; |
344 | struct bkey_i_reflink_p *r_p; |
345 | __le64 *refcount; |
346 | int ret; |
347 | |
348 | if (orig->k.type == KEY_TYPE_inline_data) |
349 | bch2_check_set_feature(c, feat: BCH_FEATURE_reflink_inline_data); |
350 | |
351 | bch2_trans_iter_init(trans, iter: &reflink_iter, btree_id: BTREE_ID_reflink, POS_MAX, |
352 | flags: BTREE_ITER_INTENT); |
353 | k = bch2_btree_iter_peek_prev(&reflink_iter); |
354 | ret = bkey_err(k); |
355 | if (ret) |
356 | goto err; |
357 | |
358 | r_v = bch2_trans_kmalloc(trans, size: sizeof(__le64) + bkey_bytes(&orig->k)); |
359 | ret = PTR_ERR_OR_ZERO(ptr: r_v); |
360 | if (ret) |
361 | goto err; |
362 | |
363 | bkey_init(k: &r_v->k); |
364 | r_v->k.type = bkey_type_to_indirect(k: &orig->k); |
365 | r_v->k.p = reflink_iter.pos; |
366 | bch2_key_resize(k: &r_v->k, new_size: orig->k.size); |
367 | r_v->k.version = orig->k.version; |
368 | |
369 | set_bkey_val_bytes(k: &r_v->k, bytes: sizeof(__le64) + bkey_val_bytes(k: &orig->k)); |
370 | |
371 | refcount = bkey_refcount(k: bkey_i_to_s(k: r_v)); |
372 | *refcount = 0; |
373 | memcpy(refcount + 1, &orig->v, bkey_val_bytes(&orig->k)); |
374 | |
375 | ret = bch2_trans_update(trans, &reflink_iter, r_v, 0); |
376 | if (ret) |
377 | goto err; |
378 | |
379 | /* |
380 | * orig is in a bkey_buf which statically allocates 5 64s for the val, |
381 | * so we know it will be big enough: |
382 | */ |
383 | orig->k.type = KEY_TYPE_reflink_p; |
384 | r_p = bkey_i_to_reflink_p(k: orig); |
385 | set_bkey_val_bytes(k: &r_p->k, bytes: sizeof(r_p->v)); |
386 | |
387 | /* FORTIFY_SOURCE is broken here, and doesn't provide unsafe_memset() */ |
388 | #if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE) |
389 | __underlying_memset(&r_p->v, 0, sizeof(r_p->v)); |
390 | #else |
391 | memset(&r_p->v, 0, sizeof(r_p->v)); |
392 | #endif |
393 | |
394 | r_p->v.idx = cpu_to_le64(bkey_start_offset(&r_v->k)); |
395 | |
396 | ret = bch2_trans_update(trans, extent_iter, &r_p->k_i, |
397 | BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE); |
398 | err: |
399 | bch2_trans_iter_exit(trans, &reflink_iter); |
400 | |
401 | return ret; |
402 | } |
403 | |
404 | static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end) |
405 | { |
406 | struct bkey_s_c k; |
407 | int ret; |
408 | |
409 | for_each_btree_key_upto_continue_norestart(*iter, end, 0, k, ret) { |
410 | if (bkey_extent_is_unwritten(k)) |
411 | continue; |
412 | |
413 | if (bkey_extent_is_data(k: k.k)) |
414 | return k; |
415 | } |
416 | |
417 | if (bkey_ge(l: iter->pos, r: end)) |
418 | bch2_btree_iter_set_pos(iter, new_pos: end); |
419 | return ret ? bkey_s_c_err(ret) : bkey_s_c_null; |
420 | } |
421 | |
422 | s64 bch2_remap_range(struct bch_fs *c, |
423 | subvol_inum dst_inum, u64 dst_offset, |
424 | subvol_inum src_inum, u64 src_offset, |
425 | u64 remap_sectors, |
426 | u64 new_i_size, s64 *i_sectors_delta) |
427 | { |
428 | struct btree_trans *trans; |
429 | struct btree_iter dst_iter, src_iter; |
430 | struct bkey_s_c src_k; |
431 | struct bkey_buf new_dst, new_src; |
432 | struct bpos dst_start = POS(dst_inum.inum, dst_offset); |
433 | struct bpos src_start = POS(src_inum.inum, src_offset); |
434 | struct bpos dst_end = dst_start, src_end = src_start; |
435 | struct bch_io_opts opts; |
436 | struct bpos src_want; |
437 | u64 dst_done = 0; |
438 | u32 dst_snapshot, src_snapshot; |
439 | int ret = 0, ret2 = 0; |
440 | |
441 | if (!bch2_write_ref_tryget(c, ref: BCH_WRITE_REF_reflink)) |
442 | return -BCH_ERR_erofs_no_writes; |
443 | |
444 | bch2_check_set_feature(c, feat: BCH_FEATURE_reflink); |
445 | |
446 | dst_end.offset += remap_sectors; |
447 | src_end.offset += remap_sectors; |
448 | |
449 | bch2_bkey_buf_init(s: &new_dst); |
450 | bch2_bkey_buf_init(s: &new_src); |
451 | trans = bch2_trans_get(c); |
452 | |
453 | ret = bch2_inum_opts_get(trans, src_inum, &opts); |
454 | if (ret) |
455 | goto err; |
456 | |
457 | bch2_trans_iter_init(trans, iter: &src_iter, btree_id: BTREE_ID_extents, pos: src_start, |
458 | flags: BTREE_ITER_INTENT); |
459 | bch2_trans_iter_init(trans, iter: &dst_iter, btree_id: BTREE_ID_extents, pos: dst_start, |
460 | flags: BTREE_ITER_INTENT); |
461 | |
462 | while ((ret == 0 || |
463 | bch2_err_matches(ret, BCH_ERR_transaction_restart)) && |
464 | bkey_lt(l: dst_iter.pos, r: dst_end)) { |
465 | struct disk_reservation disk_res = { 0 }; |
466 | |
467 | bch2_trans_begin(trans); |
468 | |
469 | if (fatal_signal_pending(current)) { |
470 | ret = -EINTR; |
471 | break; |
472 | } |
473 | |
474 | ret = bch2_subvolume_get_snapshot(trans, src_inum.subvol, |
475 | &src_snapshot); |
476 | if (ret) |
477 | continue; |
478 | |
479 | bch2_btree_iter_set_snapshot(iter: &src_iter, snapshot: src_snapshot); |
480 | |
481 | ret = bch2_subvolume_get_snapshot(trans, dst_inum.subvol, |
482 | &dst_snapshot); |
483 | if (ret) |
484 | continue; |
485 | |
486 | bch2_btree_iter_set_snapshot(iter: &dst_iter, snapshot: dst_snapshot); |
487 | |
488 | if (dst_inum.inum < src_inum.inum) { |
489 | /* Avoid some lock cycle transaction restarts */ |
490 | ret = bch2_btree_iter_traverse(&dst_iter); |
491 | if (ret) |
492 | continue; |
493 | } |
494 | |
495 | dst_done = dst_iter.pos.offset - dst_start.offset; |
496 | src_want = POS(src_start.inode, src_start.offset + dst_done); |
497 | bch2_btree_iter_set_pos(iter: &src_iter, new_pos: src_want); |
498 | |
499 | src_k = get_next_src(iter: &src_iter, end: src_end); |
500 | ret = bkey_err(src_k); |
501 | if (ret) |
502 | continue; |
503 | |
504 | if (bkey_lt(l: src_want, r: src_iter.pos)) { |
505 | ret = bch2_fpunch_at(trans, &dst_iter, dst_inum, |
506 | min(dst_end.offset, |
507 | dst_iter.pos.offset + |
508 | src_iter.pos.offset - src_want.offset), |
509 | i_sectors_delta); |
510 | continue; |
511 | } |
512 | |
513 | if (src_k.k->type != KEY_TYPE_reflink_p) { |
514 | bch2_btree_iter_set_pos_to_extent_start(iter: &src_iter); |
515 | |
516 | bch2_bkey_buf_reassemble(s: &new_src, c, k: src_k); |
517 | src_k = bkey_i_to_s_c(k: new_src.k); |
518 | |
519 | ret = bch2_make_extent_indirect(trans, extent_iter: &src_iter, |
520 | orig: new_src.k); |
521 | if (ret) |
522 | continue; |
523 | |
524 | BUG_ON(src_k.k->type != KEY_TYPE_reflink_p); |
525 | } |
526 | |
527 | if (src_k.k->type == KEY_TYPE_reflink_p) { |
528 | struct bkey_s_c_reflink_p src_p = |
529 | bkey_s_c_to_reflink_p(k: src_k); |
530 | struct bkey_i_reflink_p *dst_p = |
531 | bkey_reflink_p_init(k: new_dst.k); |
532 | |
533 | u64 offset = le64_to_cpu(src_p.v->idx) + |
534 | (src_want.offset - |
535 | bkey_start_offset(k: src_k.k)); |
536 | |
537 | dst_p->v.idx = cpu_to_le64(offset); |
538 | } else { |
539 | BUG(); |
540 | } |
541 | |
542 | new_dst.k->k.p = dst_iter.pos; |
543 | bch2_key_resize(k: &new_dst.k->k, |
544 | min(src_k.k->p.offset - src_want.offset, |
545 | dst_end.offset - dst_iter.pos.offset)); |
546 | |
547 | ret = bch2_bkey_set_needs_rebalance(c, new_dst.k, &opts) ?: |
548 | bch2_extent_update(trans, dst_inum, &dst_iter, |
549 | new_dst.k, &disk_res, |
550 | new_i_size, i_sectors_delta, |
551 | true); |
552 | bch2_disk_reservation_put(c, res: &disk_res); |
553 | } |
554 | bch2_trans_iter_exit(trans, &dst_iter); |
555 | bch2_trans_iter_exit(trans, &src_iter); |
556 | |
557 | BUG_ON(!ret && !bkey_eq(dst_iter.pos, dst_end)); |
558 | BUG_ON(bkey_gt(dst_iter.pos, dst_end)); |
559 | |
560 | dst_done = dst_iter.pos.offset - dst_start.offset; |
561 | new_i_size = min(dst_iter.pos.offset << 9, new_i_size); |
562 | |
563 | do { |
564 | struct bch_inode_unpacked inode_u; |
565 | struct btree_iter inode_iter = { NULL }; |
566 | |
567 | bch2_trans_begin(trans); |
568 | |
569 | ret2 = bch2_inode_peek(trans, &inode_iter, &inode_u, |
570 | dst_inum, BTREE_ITER_INTENT); |
571 | |
572 | if (!ret2 && |
573 | inode_u.bi_size < new_i_size) { |
574 | inode_u.bi_size = new_i_size; |
575 | ret2 = bch2_inode_write(trans, iter: &inode_iter, inode: &inode_u) ?: |
576 | bch2_trans_commit(trans, NULL, NULL, |
577 | flags: BCH_TRANS_COMMIT_no_enospc); |
578 | } |
579 | |
580 | bch2_trans_iter_exit(trans, &inode_iter); |
581 | } while (bch2_err_matches(ret2, BCH_ERR_transaction_restart)); |
582 | err: |
583 | bch2_trans_put(trans); |
584 | bch2_bkey_buf_exit(s: &new_src, c); |
585 | bch2_bkey_buf_exit(s: &new_dst, c); |
586 | |
587 | bch2_write_ref_put(c, ref: BCH_WRITE_REF_reflink); |
588 | |
589 | return dst_done ?: ret ?: ret2; |
590 | } |
591 | |