1 | // SPDX-License-Identifier: GPL-2.0 |
2 | |
3 | #include "bcachefs.h" |
4 | #include "btree_update.h" |
5 | #include "btree_iter.h" |
6 | #include "btree_journal_iter.h" |
7 | #include "btree_locking.h" |
8 | #include "buckets.h" |
9 | #include "debug.h" |
10 | #include "errcode.h" |
11 | #include "error.h" |
12 | #include "extents.h" |
13 | #include "keylist.h" |
14 | #include "snapshot.h" |
15 | #include "trace.h" |
16 | |
17 | static inline int btree_insert_entry_cmp(const struct btree_insert_entry *l, |
18 | const struct btree_insert_entry *r) |
19 | { |
20 | return cmp_int(l->btree_id, r->btree_id) ?: |
21 | cmp_int(l->cached, r->cached) ?: |
22 | -cmp_int(l->level, r->level) ?: |
23 | bpos_cmp(l: l->k->k.p, r: r->k->k.p); |
24 | } |
25 | |
26 | static int __must_check |
27 | bch2_trans_update_by_path(struct btree_trans *, btree_path_idx_t, |
28 | struct bkey_i *, enum btree_update_flags, |
29 | unsigned long ip); |
30 | |
31 | static noinline int extent_front_merge(struct btree_trans *trans, |
32 | struct btree_iter *iter, |
33 | struct bkey_s_c k, |
34 | struct bkey_i **insert, |
35 | enum btree_update_flags flags) |
36 | { |
37 | struct bch_fs *c = trans->c; |
38 | struct bkey_i *update; |
39 | int ret; |
40 | |
41 | if (unlikely(trans->journal_replay_not_finished)) |
42 | return 0; |
43 | |
44 | update = bch2_bkey_make_mut_noupdate(trans, k); |
45 | ret = PTR_ERR_OR_ZERO(ptr: update); |
46 | if (ret) |
47 | return ret; |
48 | |
49 | if (!bch2_bkey_merge(c, bkey_i_to_s(k: update), bkey_i_to_s_c(k: *insert))) |
50 | return 0; |
51 | |
52 | ret = bch2_key_has_snapshot_overwrites(trans, id: iter->btree_id, pos: k.k->p) ?: |
53 | bch2_key_has_snapshot_overwrites(trans, id: iter->btree_id, pos: (*insert)->k.p); |
54 | if (ret < 0) |
55 | return ret; |
56 | if (ret) |
57 | return 0; |
58 | |
59 | ret = bch2_btree_delete_at(trans, iter, flags); |
60 | if (ret) |
61 | return ret; |
62 | |
63 | *insert = update; |
64 | return 0; |
65 | } |
66 | |
67 | static noinline int extent_back_merge(struct btree_trans *trans, |
68 | struct btree_iter *iter, |
69 | struct bkey_i *insert, |
70 | struct bkey_s_c k) |
71 | { |
72 | struct bch_fs *c = trans->c; |
73 | int ret; |
74 | |
75 | if (unlikely(trans->journal_replay_not_finished)) |
76 | return 0; |
77 | |
78 | ret = bch2_key_has_snapshot_overwrites(trans, id: iter->btree_id, pos: insert->k.p) ?: |
79 | bch2_key_has_snapshot_overwrites(trans, id: iter->btree_id, pos: k.k->p); |
80 | if (ret < 0) |
81 | return ret; |
82 | if (ret) |
83 | return 0; |
84 | |
85 | bch2_bkey_merge(c, bkey_i_to_s(k: insert), k); |
86 | return 0; |
87 | } |
88 | |
89 | /* |
90 | * When deleting, check if we need to emit a whiteout (because we're overwriting |
91 | * something in an ancestor snapshot) |
92 | */ |
93 | static int need_whiteout_for_snapshot(struct btree_trans *trans, |
94 | enum btree_id btree_id, struct bpos pos) |
95 | { |
96 | struct btree_iter iter; |
97 | struct bkey_s_c k; |
98 | u32 snapshot = pos.snapshot; |
99 | int ret; |
100 | |
101 | if (!bch2_snapshot_parent(c: trans->c, id: pos.snapshot)) |
102 | return 0; |
103 | |
104 | pos.snapshot++; |
105 | |
106 | for_each_btree_key_norestart(trans, iter, btree_id, pos, |
107 | BTREE_ITER_ALL_SNAPSHOTS| |
108 | BTREE_ITER_NOPRESERVE, k, ret) { |
109 | if (!bkey_eq(l: k.k->p, r: pos)) |
110 | break; |
111 | |
112 | if (bch2_snapshot_is_ancestor(c: trans->c, id: snapshot, |
113 | ancestor: k.k->p.snapshot)) { |
114 | ret = !bkey_whiteout(k.k); |
115 | break; |
116 | } |
117 | } |
118 | bch2_trans_iter_exit(trans, &iter); |
119 | |
120 | return ret; |
121 | } |
122 | |
123 | int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans, |
124 | enum btree_id id, |
125 | struct bpos old_pos, |
126 | struct bpos new_pos) |
127 | { |
128 | struct bch_fs *c = trans->c; |
129 | struct btree_iter old_iter, new_iter = { NULL }; |
130 | struct bkey_s_c old_k, new_k; |
131 | snapshot_id_list s; |
132 | struct bkey_i *update; |
133 | int ret = 0; |
134 | |
135 | if (!bch2_snapshot_has_children(c, id: old_pos.snapshot)) |
136 | return 0; |
137 | |
138 | darray_init(&s); |
139 | |
140 | bch2_trans_iter_init(trans, iter: &old_iter, btree_id: id, pos: old_pos, |
141 | flags: BTREE_ITER_NOT_EXTENTS| |
142 | BTREE_ITER_ALL_SNAPSHOTS); |
143 | while ((old_k = bch2_btree_iter_prev(&old_iter)).k && |
144 | !(ret = bkey_err(old_k)) && |
145 | bkey_eq(l: old_pos, r: old_k.k->p)) { |
146 | struct bpos whiteout_pos = |
147 | SPOS(inode: new_pos.inode, offset: new_pos.offset, snapshot: old_k.k->p.snapshot);; |
148 | |
149 | if (!bch2_snapshot_is_ancestor(c, id: old_k.k->p.snapshot, ancestor: old_pos.snapshot) || |
150 | snapshot_list_has_ancestor(c, s: &s, id: old_k.k->p.snapshot)) |
151 | continue; |
152 | |
153 | new_k = bch2_bkey_get_iter(trans, iter: &new_iter, btree_id: id, pos: whiteout_pos, |
154 | flags: BTREE_ITER_NOT_EXTENTS| |
155 | BTREE_ITER_INTENT); |
156 | ret = bkey_err(new_k); |
157 | if (ret) |
158 | break; |
159 | |
160 | if (new_k.k->type == KEY_TYPE_deleted) { |
161 | update = bch2_trans_kmalloc(trans, size: sizeof(struct bkey_i)); |
162 | ret = PTR_ERR_OR_ZERO(ptr: update); |
163 | if (ret) |
164 | break; |
165 | |
166 | bkey_init(k: &update->k); |
167 | update->k.p = whiteout_pos; |
168 | update->k.type = KEY_TYPE_whiteout; |
169 | |
170 | ret = bch2_trans_update(trans, &new_iter, update, |
171 | BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE); |
172 | } |
173 | bch2_trans_iter_exit(trans, &new_iter); |
174 | |
175 | ret = snapshot_list_add(c, s: &s, id: old_k.k->p.snapshot); |
176 | if (ret) |
177 | break; |
178 | } |
179 | bch2_trans_iter_exit(trans, &new_iter); |
180 | bch2_trans_iter_exit(trans, &old_iter); |
181 | darray_exit(&s); |
182 | |
183 | return ret; |
184 | } |
185 | |
186 | int bch2_trans_update_extent_overwrite(struct btree_trans *trans, |
187 | struct btree_iter *iter, |
188 | enum btree_update_flags flags, |
189 | struct bkey_s_c old, |
190 | struct bkey_s_c new) |
191 | { |
192 | enum btree_id btree_id = iter->btree_id; |
193 | struct bkey_i *update; |
194 | struct bpos new_start = bkey_start_pos(k: new.k); |
195 | unsigned front_split = bkey_lt(l: bkey_start_pos(k: old.k), r: new_start); |
196 | unsigned back_split = bkey_gt(l: old.k->p, r: new.k->p); |
197 | unsigned middle_split = (front_split || back_split) && |
198 | old.k->p.snapshot != new.k->p.snapshot; |
199 | unsigned nr_splits = front_split + back_split + middle_split; |
200 | int ret = 0, compressed_sectors; |
201 | |
202 | /* |
203 | * If we're going to be splitting a compressed extent, note it |
204 | * so that __bch2_trans_commit() can increase our disk |
205 | * reservation: |
206 | */ |
207 | if (nr_splits > 1 && |
208 | (compressed_sectors = bch2_bkey_sectors_compressed(old))) |
209 | trans->extra_disk_res += compressed_sectors * (nr_splits - 1); |
210 | |
211 | if (front_split) { |
212 | update = bch2_bkey_make_mut_noupdate(trans, k: old); |
213 | if ((ret = PTR_ERR_OR_ZERO(ptr: update))) |
214 | return ret; |
215 | |
216 | bch2_cut_back(where: new_start, k: update); |
217 | |
218 | ret = bch2_insert_snapshot_whiteouts(trans, btree: btree_id, |
219 | old_pos: old.k->p, new_pos: update->k.p) ?: |
220 | bch2_btree_insert_nonextent(trans, btree_id, update, |
221 | BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags); |
222 | if (ret) |
223 | return ret; |
224 | } |
225 | |
226 | /* If we're overwriting in a different snapshot - middle split: */ |
227 | if (middle_split) { |
228 | update = bch2_bkey_make_mut_noupdate(trans, k: old); |
229 | if ((ret = PTR_ERR_OR_ZERO(ptr: update))) |
230 | return ret; |
231 | |
232 | bch2_cut_front(where: new_start, k: update); |
233 | bch2_cut_back(where: new.k->p, k: update); |
234 | |
235 | ret = bch2_insert_snapshot_whiteouts(trans, btree: btree_id, |
236 | old_pos: old.k->p, new_pos: update->k.p) ?: |
237 | bch2_btree_insert_nonextent(trans, btree_id, update, |
238 | BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags); |
239 | if (ret) |
240 | return ret; |
241 | } |
242 | |
243 | if (bkey_le(l: old.k->p, r: new.k->p)) { |
244 | update = bch2_trans_kmalloc(trans, size: sizeof(*update)); |
245 | if ((ret = PTR_ERR_OR_ZERO(ptr: update))) |
246 | return ret; |
247 | |
248 | bkey_init(k: &update->k); |
249 | update->k.p = old.k->p; |
250 | update->k.p.snapshot = new.k->p.snapshot; |
251 | |
252 | if (new.k->p.snapshot != old.k->p.snapshot) { |
253 | update->k.type = KEY_TYPE_whiteout; |
254 | } else if (btree_type_has_snapshots(id: btree_id)) { |
255 | ret = need_whiteout_for_snapshot(trans, btree_id, pos: update->k.p); |
256 | if (ret < 0) |
257 | return ret; |
258 | if (ret) |
259 | update->k.type = KEY_TYPE_whiteout; |
260 | } |
261 | |
262 | ret = bch2_btree_insert_nonextent(trans, btree_id, update, |
263 | BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags); |
264 | if (ret) |
265 | return ret; |
266 | } |
267 | |
268 | if (back_split) { |
269 | update = bch2_bkey_make_mut_noupdate(trans, k: old); |
270 | if ((ret = PTR_ERR_OR_ZERO(ptr: update))) |
271 | return ret; |
272 | |
273 | bch2_cut_front(where: new.k->p, k: update); |
274 | |
275 | ret = bch2_trans_update_by_path(trans, iter->path, update, |
276 | BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE| |
277 | flags, _RET_IP_); |
278 | if (ret) |
279 | return ret; |
280 | } |
281 | |
282 | return 0; |
283 | } |
284 | |
285 | static int bch2_trans_update_extent(struct btree_trans *trans, |
286 | struct btree_iter *orig_iter, |
287 | struct bkey_i *insert, |
288 | enum btree_update_flags flags) |
289 | { |
290 | struct btree_iter iter; |
291 | struct bkey_s_c k; |
292 | enum btree_id btree_id = orig_iter->btree_id; |
293 | int ret = 0; |
294 | |
295 | bch2_trans_iter_init(trans, iter: &iter, btree_id, pos: bkey_start_pos(k: &insert->k), |
296 | flags: BTREE_ITER_INTENT| |
297 | BTREE_ITER_WITH_UPDATES| |
298 | BTREE_ITER_NOT_EXTENTS); |
299 | k = bch2_btree_iter_peek_upto(&iter, POS(insert->k.p.inode, U64_MAX)); |
300 | if ((ret = bkey_err(k))) |
301 | goto err; |
302 | if (!k.k) |
303 | goto out; |
304 | |
305 | if (bkey_eq(l: k.k->p, r: bkey_start_pos(k: &insert->k))) { |
306 | if (bch2_bkey_maybe_mergable(l: k.k, r: &insert->k)) { |
307 | ret = extent_front_merge(trans, iter: &iter, k, insert: &insert, flags); |
308 | if (ret) |
309 | goto err; |
310 | } |
311 | |
312 | goto next; |
313 | } |
314 | |
315 | while (bkey_gt(l: insert->k.p, r: bkey_start_pos(k: k.k))) { |
316 | bool done = bkey_lt(l: insert->k.p, r: k.k->p); |
317 | |
318 | ret = bch2_trans_update_extent_overwrite(trans, iter: &iter, flags, old: k, new: bkey_i_to_s_c(k: insert)); |
319 | if (ret) |
320 | goto err; |
321 | |
322 | if (done) |
323 | goto out; |
324 | next: |
325 | bch2_btree_iter_advance(&iter); |
326 | k = bch2_btree_iter_peek_upto(&iter, POS(insert->k.p.inode, U64_MAX)); |
327 | if ((ret = bkey_err(k))) |
328 | goto err; |
329 | if (!k.k) |
330 | goto out; |
331 | } |
332 | |
333 | if (bch2_bkey_maybe_mergable(l: &insert->k, r: k.k)) { |
334 | ret = extent_back_merge(trans, iter: &iter, insert, k); |
335 | if (ret) |
336 | goto err; |
337 | } |
338 | out: |
339 | if (!bkey_deleted(&insert->k)) |
340 | ret = bch2_btree_insert_nonextent(trans, btree_id, insert, flags); |
341 | err: |
342 | bch2_trans_iter_exit(trans, &iter); |
343 | |
344 | return ret; |
345 | } |
346 | |
347 | static noinline int flush_new_cached_update(struct btree_trans *trans, |
348 | struct btree_insert_entry *i, |
349 | enum btree_update_flags flags, |
350 | unsigned long ip) |
351 | { |
352 | struct bkey k; |
353 | int ret; |
354 | |
355 | btree_path_idx_t path_idx = |
356 | bch2_path_get(trans, i->btree_id, i->old_k.p, 1, 0, |
357 | BTREE_ITER_INTENT, _THIS_IP_); |
358 | ret = bch2_btree_path_traverse(trans, path: path_idx, flags: 0); |
359 | if (ret) |
360 | goto out; |
361 | |
362 | struct btree_path *btree_path = trans->paths + path_idx; |
363 | |
364 | /* |
365 | * The old key in the insert entry might actually refer to an existing |
366 | * key in the btree that has been deleted from cache and not yet |
367 | * flushed. Check for this and skip the flush so we don't run triggers |
368 | * against a stale key. |
369 | */ |
370 | bch2_btree_path_peek_slot_exact(path: btree_path, u: &k); |
371 | if (!bkey_deleted(&k)) |
372 | goto out; |
373 | |
374 | i->key_cache_already_flushed = true; |
375 | i->flags |= BTREE_TRIGGER_NORUN; |
376 | |
377 | btree_path_set_should_be_locked(path: btree_path); |
378 | ret = bch2_trans_update_by_path(trans, path_idx, i->k, flags, ip); |
379 | out: |
380 | bch2_path_put(trans, path_idx, true); |
381 | return ret; |
382 | } |
383 | |
384 | static int __must_check |
385 | bch2_trans_update_by_path(struct btree_trans *trans, btree_path_idx_t path_idx, |
386 | struct bkey_i *k, enum btree_update_flags flags, |
387 | unsigned long ip) |
388 | { |
389 | struct bch_fs *c = trans->c; |
390 | struct btree_insert_entry *i, n; |
391 | int cmp; |
392 | |
393 | struct btree_path *path = trans->paths + path_idx; |
394 | EBUG_ON(!path->should_be_locked); |
395 | EBUG_ON(trans->nr_updates >= trans->nr_paths); |
396 | EBUG_ON(!bpos_eq(k->k.p, path->pos)); |
397 | |
398 | n = (struct btree_insert_entry) { |
399 | .flags = flags, |
400 | .bkey_type = __btree_node_type(level: path->level, id: path->btree_id), |
401 | .btree_id = path->btree_id, |
402 | .level = path->level, |
403 | .cached = path->cached, |
404 | .path = path_idx, |
405 | .k = k, |
406 | .ip_allocated = ip, |
407 | }; |
408 | |
409 | #ifdef CONFIG_BCACHEFS_DEBUG |
410 | trans_for_each_update(trans, i) |
411 | BUG_ON(i != trans->updates && |
412 | btree_insert_entry_cmp(i - 1, i) >= 0); |
413 | #endif |
414 | |
415 | /* |
416 | * Pending updates are kept sorted: first, find position of new update, |
417 | * then delete/trim any updates the new update overwrites: |
418 | */ |
419 | for (i = trans->updates; i < trans->updates + trans->nr_updates; i++) { |
420 | cmp = btree_insert_entry_cmp(l: &n, r: i); |
421 | if (cmp <= 0) |
422 | break; |
423 | } |
424 | |
425 | if (!cmp && i < trans->updates + trans->nr_updates) { |
426 | EBUG_ON(i->insert_trigger_run || i->overwrite_trigger_run); |
427 | |
428 | bch2_path_put(trans, i->path, true); |
429 | i->flags = n.flags; |
430 | i->cached = n.cached; |
431 | i->k = n.k; |
432 | i->path = n.path; |
433 | i->ip_allocated = n.ip_allocated; |
434 | } else { |
435 | array_insert_item(trans->updates, trans->nr_updates, |
436 | i - trans->updates, n); |
437 | |
438 | i->old_v = bch2_btree_path_peek_slot_exact(path, u: &i->old_k).v; |
439 | i->old_btree_u64s = !bkey_deleted(&i->old_k) ? i->old_k.u64s : 0; |
440 | |
441 | if (unlikely(trans->journal_replay_not_finished)) { |
442 | struct bkey_i *j_k = |
443 | bch2_journal_keys_peek_slot(c, n.btree_id, n.level, k->k.p); |
444 | |
445 | if (j_k) { |
446 | i->old_k = j_k->k; |
447 | i->old_v = &j_k->v; |
448 | } |
449 | } |
450 | } |
451 | |
452 | __btree_path_get(path: trans->paths + i->path, intent: true); |
453 | |
454 | /* |
455 | * If a key is present in the key cache, it must also exist in the |
456 | * btree - this is necessary for cache coherency. When iterating over |
457 | * a btree that's cached in the key cache, the btree iter code checks |
458 | * the key cache - but the key has to exist in the btree for that to |
459 | * work: |
460 | */ |
461 | if (path->cached && !i->old_btree_u64s) |
462 | return flush_new_cached_update(trans, i, flags, ip); |
463 | |
464 | return 0; |
465 | } |
466 | |
467 | static noinline int bch2_trans_update_get_key_cache(struct btree_trans *trans, |
468 | struct btree_iter *iter, |
469 | struct btree_path *path) |
470 | { |
471 | struct btree_path *key_cache_path = btree_iter_key_cache_path(trans, iter); |
472 | |
473 | if (!key_cache_path || |
474 | !key_cache_path->should_be_locked || |
475 | !bpos_eq(l: key_cache_path->pos, r: iter->pos)) { |
476 | struct bkey_cached *ck; |
477 | int ret; |
478 | |
479 | if (!iter->key_cache_path) |
480 | iter->key_cache_path = |
481 | bch2_path_get(trans, path->btree_id, path->pos, 1, 0, |
482 | BTREE_ITER_INTENT| |
483 | BTREE_ITER_CACHED, _THIS_IP_); |
484 | |
485 | iter->key_cache_path = |
486 | bch2_btree_path_set_pos(trans, path: iter->key_cache_path, new_pos: path->pos, |
487 | intent: iter->flags & BTREE_ITER_INTENT, |
488 | _THIS_IP_); |
489 | |
490 | ret = bch2_btree_path_traverse(trans, path: iter->key_cache_path, flags: BTREE_ITER_CACHED); |
491 | if (unlikely(ret)) |
492 | return ret; |
493 | |
494 | ck = (void *) trans->paths[iter->key_cache_path].l[0].b; |
495 | |
496 | if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { |
497 | trace_and_count(trans->c, trans_restart_key_cache_raced, trans, _RET_IP_); |
498 | return btree_trans_restart(trans, err: BCH_ERR_transaction_restart_key_cache_raced); |
499 | } |
500 | |
501 | btree_path_set_should_be_locked(path: trans->paths + iter->key_cache_path); |
502 | } |
503 | |
504 | return 0; |
505 | } |
506 | |
507 | int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter, |
508 | struct bkey_i *k, enum btree_update_flags flags) |
509 | { |
510 | btree_path_idx_t path_idx = iter->update_path ?: iter->path; |
511 | int ret; |
512 | |
513 | if (iter->flags & BTREE_ITER_IS_EXTENTS) |
514 | return bch2_trans_update_extent(trans, orig_iter: iter, insert: k, flags); |
515 | |
516 | if (bkey_deleted(&k->k) && |
517 | !(flags & BTREE_UPDATE_KEY_CACHE_RECLAIM) && |
518 | (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)) { |
519 | ret = need_whiteout_for_snapshot(trans, btree_id: iter->btree_id, pos: k->k.p); |
520 | if (unlikely(ret < 0)) |
521 | return ret; |
522 | |
523 | if (ret) |
524 | k->k.type = KEY_TYPE_whiteout; |
525 | } |
526 | |
527 | /* |
528 | * Ensure that updates to cached btrees go to the key cache: |
529 | */ |
530 | struct btree_path *path = trans->paths + path_idx; |
531 | if (!(flags & BTREE_UPDATE_KEY_CACHE_RECLAIM) && |
532 | !path->cached && |
533 | !path->level && |
534 | btree_id_cached(c: trans->c, btree: path->btree_id)) { |
535 | ret = bch2_trans_update_get_key_cache(trans, iter, path); |
536 | if (ret) |
537 | return ret; |
538 | |
539 | path_idx = iter->key_cache_path; |
540 | } |
541 | |
542 | return bch2_trans_update_by_path(trans, path_idx, k, flags, _RET_IP_); |
543 | } |
544 | |
545 | int bch2_btree_insert_clone_trans(struct btree_trans *trans, |
546 | enum btree_id btree, |
547 | struct bkey_i *k) |
548 | { |
549 | struct bkey_i *n = bch2_trans_kmalloc(trans, bkey_bytes(&k->k)); |
550 | int ret = PTR_ERR_OR_ZERO(ptr: n); |
551 | if (ret) |
552 | return ret; |
553 | |
554 | bkey_copy(dst: n, src: k); |
555 | return bch2_btree_insert_trans(trans, btree, n, 0); |
556 | } |
557 | |
558 | struct jset_entry *__bch2_trans_jset_entry_alloc(struct btree_trans *trans, unsigned u64s) |
559 | { |
560 | unsigned new_top = trans->journal_entries_u64s + u64s; |
561 | unsigned old_size = trans->journal_entries_size; |
562 | |
563 | if (new_top > trans->journal_entries_size) { |
564 | trans->journal_entries_size = roundup_pow_of_two(new_top); |
565 | |
566 | btree_trans_stats(trans)->journal_entries_size = trans->journal_entries_size; |
567 | } |
568 | |
569 | struct jset_entry *n = |
570 | bch2_trans_kmalloc_nomemzero(trans, |
571 | size: trans->journal_entries_size * sizeof(u64)); |
572 | if (IS_ERR(ptr: n)) |
573 | return ERR_CAST(ptr: n); |
574 | |
575 | if (trans->journal_entries) |
576 | memcpy(n, trans->journal_entries, old_size * sizeof(u64)); |
577 | trans->journal_entries = n; |
578 | |
579 | struct jset_entry *e = btree_trans_journal_entries_top(trans); |
580 | trans->journal_entries_u64s = new_top; |
581 | return e; |
582 | } |
583 | |
584 | int bch2_bkey_get_empty_slot(struct btree_trans *trans, struct btree_iter *iter, |
585 | enum btree_id btree, struct bpos end) |
586 | { |
587 | struct bkey_s_c k; |
588 | int ret = 0; |
589 | |
590 | bch2_trans_iter_init(trans, iter, btree_id: btree, POS_MAX, flags: BTREE_ITER_INTENT); |
591 | k = bch2_btree_iter_prev(iter); |
592 | ret = bkey_err(k); |
593 | if (ret) |
594 | goto err; |
595 | |
596 | bch2_btree_iter_advance(iter); |
597 | k = bch2_btree_iter_peek_slot(iter); |
598 | ret = bkey_err(k); |
599 | if (ret) |
600 | goto err; |
601 | |
602 | BUG_ON(k.k->type != KEY_TYPE_deleted); |
603 | |
604 | if (bkey_gt(l: k.k->p, r: end)) { |
605 | ret = -BCH_ERR_ENOSPC_btree_slot; |
606 | goto err; |
607 | } |
608 | |
609 | return 0; |
610 | err: |
611 | bch2_trans_iter_exit(trans, iter); |
612 | return ret; |
613 | } |
614 | |
615 | void bch2_trans_commit_hook(struct btree_trans *trans, |
616 | struct btree_trans_commit_hook *h) |
617 | { |
618 | h->next = trans->hooks; |
619 | trans->hooks = h; |
620 | } |
621 | |
622 | int bch2_btree_insert_nonextent(struct btree_trans *trans, |
623 | enum btree_id btree, struct bkey_i *k, |
624 | enum btree_update_flags flags) |
625 | { |
626 | struct btree_iter iter; |
627 | int ret; |
628 | |
629 | bch2_trans_iter_init(trans, iter: &iter, btree_id: btree, pos: k->k.p, |
630 | flags: BTREE_ITER_CACHED| |
631 | BTREE_ITER_NOT_EXTENTS| |
632 | BTREE_ITER_INTENT); |
633 | ret = bch2_btree_iter_traverse(&iter) ?: |
634 | bch2_trans_update(trans, iter: &iter, k, flags); |
635 | bch2_trans_iter_exit(trans, &iter); |
636 | return ret; |
637 | } |
638 | |
639 | int bch2_btree_insert_trans(struct btree_trans *trans, enum btree_id id, |
640 | struct bkey_i *k, enum btree_update_flags flags) |
641 | { |
642 | struct btree_iter iter; |
643 | int ret; |
644 | |
645 | bch2_trans_iter_init(trans, iter: &iter, btree_id: id, pos: bkey_start_pos(k: &k->k), |
646 | flags: BTREE_ITER_CACHED| |
647 | BTREE_ITER_INTENT); |
648 | ret = bch2_btree_iter_traverse(&iter) ?: |
649 | bch2_trans_update(trans, iter: &iter, k, flags); |
650 | bch2_trans_iter_exit(trans, &iter); |
651 | return ret; |
652 | } |
653 | |
654 | /** |
655 | * bch2_btree_insert - insert keys into the extent btree |
656 | * @c: pointer to struct bch_fs |
657 | * @id: btree to insert into |
658 | * @k: key to insert |
659 | * @disk_res: must be non-NULL whenever inserting or potentially |
660 | * splitting data extents |
661 | * @flags: transaction commit flags |
662 | * |
663 | * Returns: 0 on success, error code on failure |
664 | */ |
665 | int bch2_btree_insert(struct bch_fs *c, enum btree_id id, struct bkey_i *k, |
666 | struct disk_reservation *disk_res, int flags) |
667 | { |
668 | return bch2_trans_do(c, disk_res, NULL, flags, |
669 | bch2_btree_insert_trans(trans, id, k, 0)); |
670 | } |
671 | |
672 | int bch2_btree_delete_extent_at(struct btree_trans *trans, struct btree_iter *iter, |
673 | unsigned len, unsigned update_flags) |
674 | { |
675 | struct bkey_i *k; |
676 | |
677 | k = bch2_trans_kmalloc(trans, size: sizeof(*k)); |
678 | if (IS_ERR(ptr: k)) |
679 | return PTR_ERR(ptr: k); |
680 | |
681 | bkey_init(k: &k->k); |
682 | k->k.p = iter->pos; |
683 | bch2_key_resize(k: &k->k, new_size: len); |
684 | return bch2_trans_update(trans, iter, k, flags: update_flags); |
685 | } |
686 | |
687 | int bch2_btree_delete_at(struct btree_trans *trans, |
688 | struct btree_iter *iter, unsigned update_flags) |
689 | { |
690 | return bch2_btree_delete_extent_at(trans, iter, len: 0, update_flags); |
691 | } |
692 | |
693 | int bch2_btree_delete(struct btree_trans *trans, |
694 | enum btree_id btree, struct bpos pos, |
695 | unsigned update_flags) |
696 | { |
697 | struct btree_iter iter; |
698 | int ret; |
699 | |
700 | bch2_trans_iter_init(trans, iter: &iter, btree_id: btree, pos, |
701 | flags: BTREE_ITER_CACHED| |
702 | BTREE_ITER_INTENT); |
703 | ret = bch2_btree_iter_traverse(&iter) ?: |
704 | bch2_btree_delete_at(trans, iter: &iter, update_flags); |
705 | bch2_trans_iter_exit(trans, &iter); |
706 | |
707 | return ret; |
708 | } |
709 | |
710 | int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id, |
711 | struct bpos start, struct bpos end, |
712 | unsigned update_flags, |
713 | u64 *journal_seq) |
714 | { |
715 | u32 restart_count = trans->restart_count; |
716 | struct btree_iter iter; |
717 | struct bkey_s_c k; |
718 | int ret = 0; |
719 | |
720 | bch2_trans_iter_init(trans, iter: &iter, btree_id: id, pos: start, flags: BTREE_ITER_INTENT); |
721 | while ((k = bch2_btree_iter_peek_upto(&iter, end)).k) { |
722 | struct disk_reservation disk_res = |
723 | bch2_disk_reservation_init(c: trans->c, nr_replicas: 0); |
724 | struct bkey_i delete; |
725 | |
726 | ret = bkey_err(k); |
727 | if (ret) |
728 | goto err; |
729 | |
730 | bkey_init(k: &delete.k); |
731 | |
732 | /* |
733 | * This could probably be more efficient for extents: |
734 | */ |
735 | |
736 | /* |
737 | * For extents, iter.pos won't necessarily be the same as |
738 | * bkey_start_pos(k.k) (for non extents they always will be the |
739 | * same). It's important that we delete starting from iter.pos |
740 | * because the range we want to delete could start in the middle |
741 | * of k. |
742 | * |
743 | * (bch2_btree_iter_peek() does guarantee that iter.pos >= |
744 | * bkey_start_pos(k.k)). |
745 | */ |
746 | delete.k.p = iter.pos; |
747 | |
748 | if (iter.flags & BTREE_ITER_IS_EXTENTS) |
749 | bch2_key_resize(k: &delete.k, |
750 | new_size: bpos_min(l: end, r: k.k->p).offset - |
751 | iter.pos.offset); |
752 | |
753 | ret = bch2_trans_update(trans, iter: &iter, k: &delete, flags: update_flags) ?: |
754 | bch2_trans_commit(trans, disk_res: &disk_res, journal_seq, |
755 | flags: BCH_TRANS_COMMIT_no_enospc); |
756 | bch2_disk_reservation_put(c: trans->c, res: &disk_res); |
757 | err: |
758 | /* |
759 | * the bch2_trans_begin() call is in a weird place because we |
760 | * need to call it after every transaction commit, to avoid path |
761 | * overflow, but don't want to call it if the delete operation |
762 | * is a no-op and we have no work to do: |
763 | */ |
764 | bch2_trans_begin(trans); |
765 | |
766 | if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) |
767 | ret = 0; |
768 | if (ret) |
769 | break; |
770 | } |
771 | bch2_trans_iter_exit(trans, &iter); |
772 | |
773 | return ret ?: trans_was_restarted(trans, restart_count); |
774 | } |
775 | |
776 | /* |
777 | * bch_btree_delete_range - delete everything within a given range |
778 | * |
779 | * Range is a half open interval - [start, end) |
780 | */ |
781 | int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id, |
782 | struct bpos start, struct bpos end, |
783 | unsigned update_flags, |
784 | u64 *journal_seq) |
785 | { |
786 | int ret = bch2_trans_run(c, |
787 | bch2_btree_delete_range_trans(trans, id, start, end, |
788 | update_flags, journal_seq)); |
789 | if (ret == -BCH_ERR_transaction_restart_nested) |
790 | ret = 0; |
791 | return ret; |
792 | } |
793 | |
794 | int bch2_btree_bit_mod(struct btree_trans *trans, enum btree_id btree, |
795 | struct bpos pos, bool set) |
796 | { |
797 | struct bkey_i *k = bch2_trans_kmalloc(trans, size: sizeof(*k)); |
798 | int ret = PTR_ERR_OR_ZERO(ptr: k); |
799 | if (ret) |
800 | return ret; |
801 | |
802 | bkey_init(k: &k->k); |
803 | k->k.type = set ? KEY_TYPE_set : KEY_TYPE_deleted; |
804 | k->k.p = pos; |
805 | |
806 | struct btree_iter iter; |
807 | bch2_trans_iter_init(trans, iter: &iter, btree_id: btree, pos, flags: BTREE_ITER_INTENT); |
808 | |
809 | ret = bch2_btree_iter_traverse(&iter) ?: |
810 | bch2_trans_update(trans, iter: &iter, k, flags: 0); |
811 | bch2_trans_iter_exit(trans, &iter); |
812 | return ret; |
813 | } |
814 | |
815 | int bch2_btree_bit_mod_buffered(struct btree_trans *trans, enum btree_id btree, |
816 | struct bpos pos, bool set) |
817 | { |
818 | struct bkey_i k; |
819 | |
820 | bkey_init(k: &k.k); |
821 | k.k.type = set ? KEY_TYPE_set : KEY_TYPE_deleted; |
822 | k.k.p = pos; |
823 | |
824 | return bch2_trans_update_buffered(trans, btree, k: &k); |
825 | } |
826 | |
827 | static int __bch2_trans_log_msg(struct btree_trans *trans, struct printbuf *buf, unsigned u64s) |
828 | { |
829 | struct jset_entry *e = bch2_trans_jset_entry_alloc(trans, u64s: jset_u64s(u64s)); |
830 | int ret = PTR_ERR_OR_ZERO(ptr: e); |
831 | if (ret) |
832 | return ret; |
833 | |
834 | struct jset_entry_log *l = container_of(e, struct jset_entry_log, entry); |
835 | journal_entry_init(entry: e, type: BCH_JSET_ENTRY_log, id: 0, level: 1, u64s); |
836 | memcpy(l->d, buf->buf, buf->pos); |
837 | return 0; |
838 | } |
839 | |
840 | __printf(3, 0) |
841 | static int |
842 | __bch2_fs_log_msg(struct bch_fs *c, unsigned commit_flags, const char *fmt, |
843 | va_list args) |
844 | { |
845 | struct printbuf buf = PRINTBUF; |
846 | prt_vprintf(&buf, fmt, args); |
847 | |
848 | unsigned u64s = DIV_ROUND_UP(buf.pos, sizeof(u64)); |
849 | prt_chars(out: &buf, c: '\0', n: u64s * sizeof(u64) - buf.pos); |
850 | |
851 | int ret = buf.allocation_failure ? -BCH_ERR_ENOMEM_trans_log_msg : 0; |
852 | if (ret) |
853 | goto err; |
854 | |
855 | if (!test_bit(JOURNAL_STARTED, &c->journal.flags)) { |
856 | ret = darray_make_room(&c->journal.early_journal_entries, jset_u64s(u64s)); |
857 | if (ret) |
858 | goto err; |
859 | |
860 | struct jset_entry_log *l = (void *) &darray_top(c->journal.early_journal_entries); |
861 | journal_entry_init(entry: &l->entry, type: BCH_JSET_ENTRY_log, id: 0, level: 1, u64s); |
862 | memcpy(l->d, buf.buf, buf.pos); |
863 | c->journal.early_journal_entries.nr += jset_u64s(u64s); |
864 | } else { |
865 | ret = bch2_trans_do(c, NULL, NULL, |
866 | BCH_TRANS_COMMIT_lazy_rw|commit_flags, |
867 | __bch2_trans_log_msg(trans, &buf, u64s)); |
868 | } |
869 | err: |
870 | printbuf_exit(&buf); |
871 | return ret; |
872 | } |
873 | |
874 | __printf(2, 3) |
875 | int bch2_fs_log_msg(struct bch_fs *c, const char *fmt, ...) |
876 | { |
877 | va_list args; |
878 | int ret; |
879 | |
880 | va_start(args, fmt); |
881 | ret = __bch2_fs_log_msg(c, commit_flags: 0, fmt, args); |
882 | va_end(args); |
883 | return ret; |
884 | } |
885 | |
886 | /* |
887 | * Use for logging messages during recovery to enable reserved space and avoid |
888 | * blocking. |
889 | */ |
890 | __printf(2, 3) |
891 | int bch2_journal_log_msg(struct bch_fs *c, const char *fmt, ...) |
892 | { |
893 | va_list args; |
894 | int ret; |
895 | |
896 | va_start(args, fmt); |
897 | ret = __bch2_fs_log_msg(c, commit_flags: BCH_WATERMARK_reclaim, fmt, args); |
898 | va_end(args); |
899 | return ret; |
900 | } |
901 | |