1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _BCACHEFS_SNAPSHOT_H
3#define _BCACHEFS_SNAPSHOT_H
4
5enum bkey_invalid_flags;
6
7void bch2_snapshot_tree_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
8int bch2_snapshot_tree_invalid(struct bch_fs *, struct bkey_s_c,
9 enum bkey_invalid_flags, struct printbuf *);
10
11#define bch2_bkey_ops_snapshot_tree ((struct bkey_ops) { \
12 .key_invalid = bch2_snapshot_tree_invalid, \
13 .val_to_text = bch2_snapshot_tree_to_text, \
14 .min_val_size = 8, \
15})
16
17struct bkey_i_snapshot_tree *__bch2_snapshot_tree_create(struct btree_trans *);
18
19int bch2_snapshot_tree_lookup(struct btree_trans *, u32, struct bch_snapshot_tree *);
20
21void bch2_snapshot_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
22int bch2_snapshot_invalid(struct bch_fs *, struct bkey_s_c,
23 enum bkey_invalid_flags, struct printbuf *);
24int bch2_mark_snapshot(struct btree_trans *, enum btree_id, unsigned,
25 struct bkey_s_c, struct bkey_s, unsigned);
26
27#define bch2_bkey_ops_snapshot ((struct bkey_ops) { \
28 .key_invalid = bch2_snapshot_invalid, \
29 .val_to_text = bch2_snapshot_to_text, \
30 .trigger = bch2_mark_snapshot, \
31 .min_val_size = 24, \
32})
33
34static inline struct snapshot_t *__snapshot_t(struct snapshot_table *t, u32 id)
35{
36 u32 idx = U32_MAX - id;
37
38 return likely(t && idx < t->nr)
39 ? &t->s[idx]
40 : NULL;
41}
42
43static inline const struct snapshot_t *snapshot_t(struct bch_fs *c, u32 id)
44{
45 return __snapshot_t(rcu_dereference(c->snapshots), id);
46}
47
48static inline u32 bch2_snapshot_tree(struct bch_fs *c, u32 id)
49{
50 rcu_read_lock();
51 const struct snapshot_t *s = snapshot_t(c, id);
52 id = s ? s->tree : 0;
53 rcu_read_unlock();
54
55 return id;
56}
57
58static inline u32 __bch2_snapshot_parent_early(struct bch_fs *c, u32 id)
59{
60 const struct snapshot_t *s = snapshot_t(c, id);
61 return s ? s->parent : 0;
62}
63
64static inline u32 bch2_snapshot_parent_early(struct bch_fs *c, u32 id)
65{
66 rcu_read_lock();
67 id = __bch2_snapshot_parent_early(c, id);
68 rcu_read_unlock();
69
70 return id;
71}
72
73static inline u32 __bch2_snapshot_parent(struct bch_fs *c, u32 id)
74{
75 const struct snapshot_t *s = snapshot_t(c, id);
76 if (!s)
77 return 0;
78
79 u32 parent = s->parent;
80 if (IS_ENABLED(CONFIG_BCACHEFS_DEBU) &&
81 parent &&
82 s->depth != snapshot_t(c, id: parent)->depth + 1)
83 panic(fmt: "id %u depth=%u parent %u depth=%u\n",
84 id, snapshot_t(c, id)->depth,
85 parent, snapshot_t(c, id: parent)->depth);
86
87 return parent;
88}
89
90static inline u32 bch2_snapshot_parent(struct bch_fs *c, u32 id)
91{
92 rcu_read_lock();
93 id = __bch2_snapshot_parent(c, id);
94 rcu_read_unlock();
95
96 return id;
97}
98
99static inline u32 bch2_snapshot_nth_parent(struct bch_fs *c, u32 id, u32 n)
100{
101 rcu_read_lock();
102 while (n--)
103 id = __bch2_snapshot_parent(c, id);
104 rcu_read_unlock();
105
106 return id;
107}
108
109u32 bch2_snapshot_skiplist_get(struct bch_fs *, u32);
110
111static inline u32 bch2_snapshot_root(struct bch_fs *c, u32 id)
112{
113 u32 parent;
114
115 rcu_read_lock();
116 while ((parent = __bch2_snapshot_parent(c, id)))
117 id = parent;
118 rcu_read_unlock();
119
120 return id;
121}
122
123static inline u32 __bch2_snapshot_equiv(struct bch_fs *c, u32 id)
124{
125 const struct snapshot_t *s = snapshot_t(c, id);
126 return s ? s->equiv : 0;
127}
128
129static inline u32 bch2_snapshot_equiv(struct bch_fs *c, u32 id)
130{
131 rcu_read_lock();
132 id = __bch2_snapshot_equiv(c, id);
133 rcu_read_unlock();
134
135 return id;
136}
137
138static inline bool bch2_snapshot_is_equiv(struct bch_fs *c, u32 id)
139{
140 return id == bch2_snapshot_equiv(c, id);
141}
142
143static inline int bch2_snapshot_is_internal_node(struct bch_fs *c, u32 id)
144{
145 rcu_read_lock();
146 const struct snapshot_t *s = snapshot_t(c, id);
147 int ret = s ? s->children[0] : -BCH_ERR_invalid_snapshot_node;
148 rcu_read_unlock();
149
150 return ret;
151}
152
153static inline int bch2_snapshot_is_leaf(struct bch_fs *c, u32 id)
154{
155 int ret = bch2_snapshot_is_internal_node(c, id);
156 if (ret < 0)
157 return ret;
158 return !ret;
159}
160
161static inline u32 bch2_snapshot_depth(struct bch_fs *c, u32 parent)
162{
163 u32 depth;
164
165 rcu_read_lock();
166 depth = parent ? snapshot_t(c, id: parent)->depth + 1 : 0;
167 rcu_read_unlock();
168
169 return depth;
170}
171
172bool __bch2_snapshot_is_ancestor(struct bch_fs *, u32, u32);
173
174static inline bool bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor)
175{
176 return id == ancestor
177 ? true
178 : __bch2_snapshot_is_ancestor(c, id, ancestor);
179}
180
181static inline bool bch2_snapshot_has_children(struct bch_fs *c, u32 id)
182{
183 const struct snapshot_t *t;
184 bool ret;
185
186 rcu_read_lock();
187 t = snapshot_t(c, id);
188 ret = (t->children[0]|t->children[1]) != 0;
189 rcu_read_unlock();
190
191 return ret;
192}
193
194static inline bool snapshot_list_has_id(snapshot_id_list *s, u32 id)
195{
196 darray_for_each(*s, i)
197 if (*i == id)
198 return true;
199 return false;
200}
201
202static inline bool snapshot_list_has_ancestor(struct bch_fs *c, snapshot_id_list *s, u32 id)
203{
204 darray_for_each(*s, i)
205 if (bch2_snapshot_is_ancestor(c, id, ancestor: *i))
206 return true;
207 return false;
208}
209
210static inline int snapshot_list_add(struct bch_fs *c, snapshot_id_list *s, u32 id)
211{
212 BUG_ON(snapshot_list_has_id(s, id));
213 int ret = darray_push(s, id);
214 if (ret)
215 bch_err(c, "error reallocating snapshot_id_list (size %zu)", s->size);
216 return ret;
217}
218
219static inline int snapshot_list_add_nodup(struct bch_fs *c, snapshot_id_list *s, u32 id)
220{
221 int ret = snapshot_list_has_id(s, id)
222 ? 0
223 : darray_push(s, id);
224 if (ret)
225 bch_err(c, "error reallocating snapshot_id_list (size %zu)", s->size);
226 return ret;
227}
228
229static inline int snapshot_list_merge(struct bch_fs *c, snapshot_id_list *dst, snapshot_id_list *src)
230{
231 darray_for_each(*src, i) {
232 int ret = snapshot_list_add_nodup(c, s: dst, id: *i);
233 if (ret)
234 return ret;
235 }
236
237 return 0;
238}
239
240int bch2_snapshot_lookup(struct btree_trans *trans, u32 id,
241 struct bch_snapshot *s);
242int bch2_snapshot_get_subvol(struct btree_trans *, u32,
243 struct bch_subvolume *);
244
245/* only exported for tests: */
246int bch2_snapshot_node_create(struct btree_trans *, u32,
247 u32 *, u32 *, unsigned);
248
249int bch2_check_snapshot_trees(struct bch_fs *);
250int bch2_check_snapshots(struct bch_fs *);
251int bch2_reconstruct_snapshots(struct bch_fs *);
252
253int bch2_snapshot_node_set_deleted(struct btree_trans *, u32);
254void bch2_delete_dead_snapshots_work(struct work_struct *);
255
256int __bch2_key_has_snapshot_overwrites(struct btree_trans *, enum btree_id, struct bpos);
257
258static inline int bch2_key_has_snapshot_overwrites(struct btree_trans *trans,
259 enum btree_id id,
260 struct bpos pos)
261{
262 if (!btree_type_has_snapshots(id) ||
263 bch2_snapshot_is_leaf(c: trans->c, id: pos.snapshot) > 0)
264 return 0;
265
266 return __bch2_key_has_snapshot_overwrites(trans, id, pos);
267}
268
269int bch2_propagate_key_to_snapshot_leaves(struct btree_trans *, enum btree_id,
270 struct bkey_s_c, struct bpos *);
271
272int bch2_snapshots_read(struct bch_fs *);
273void bch2_fs_snapshots_exit(struct bch_fs *);
274
275#endif /* _BCACHEFS_SNAPSHOT_H */
276

source code of linux/fs/bcachefs/snapshot.h