1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 2011 STRATO AG |
4 | * written by Arne Jansen <sensille@gmx.net> |
5 | */ |
6 | |
7 | #include <linux/slab.h> |
8 | #include "messages.h" |
9 | #include "ulist.h" |
10 | #include "ctree.h" |
11 | |
12 | /* |
13 | * ulist is a generic data structure to hold a collection of unique u64 |
14 | * values. The only operations it supports is adding to the list and |
15 | * enumerating it. |
16 | * It is possible to store an auxiliary value along with the key. |
17 | * |
18 | * A sample usage for ulists is the enumeration of directed graphs without |
19 | * visiting a node twice. The pseudo-code could look like this: |
20 | * |
21 | * ulist = ulist_alloc(); |
22 | * ulist_add(ulist, root); |
23 | * ULIST_ITER_INIT(&uiter); |
24 | * |
25 | * while ((elem = ulist_next(ulist, &uiter)) { |
26 | * for (all child nodes n in elem) |
27 | * ulist_add(ulist, n); |
28 | * do something useful with the node; |
29 | * } |
30 | * ulist_free(ulist); |
31 | * |
32 | * This assumes the graph nodes are addressable by u64. This stems from the |
33 | * usage for tree enumeration in btrfs, where the logical addresses are |
34 | * 64 bit. |
35 | * |
36 | * It is also useful for tree enumeration which could be done elegantly |
37 | * recursively, but is not possible due to kernel stack limitations. The |
38 | * loop would be similar to the above. |
39 | */ |
40 | |
41 | /* |
42 | * Freshly initialize a ulist. |
43 | * |
44 | * @ulist: the ulist to initialize |
45 | * |
46 | * Note: don't use this function to init an already used ulist, use |
47 | * ulist_reinit instead. |
48 | */ |
49 | void ulist_init(struct ulist *ulist) |
50 | { |
51 | INIT_LIST_HEAD(list: &ulist->nodes); |
52 | ulist->root = RB_ROOT; |
53 | ulist->nnodes = 0; |
54 | } |
55 | |
56 | /* |
57 | * Free up additionally allocated memory for the ulist. |
58 | * |
59 | * @ulist: the ulist from which to free the additional memory |
60 | * |
61 | * This is useful in cases where the base 'struct ulist' has been statically |
62 | * allocated. |
63 | */ |
64 | void ulist_release(struct ulist *ulist) |
65 | { |
66 | struct ulist_node *node; |
67 | struct ulist_node *next; |
68 | |
69 | list_for_each_entry_safe(node, next, &ulist->nodes, list) { |
70 | kfree(objp: node); |
71 | } |
72 | ulist->root = RB_ROOT; |
73 | INIT_LIST_HEAD(list: &ulist->nodes); |
74 | } |
75 | |
76 | /* |
77 | * Prepare a ulist for reuse. |
78 | * |
79 | * @ulist: ulist to be reused |
80 | * |
81 | * Free up all additional memory allocated for the list elements and reinit |
82 | * the ulist. |
83 | */ |
84 | void ulist_reinit(struct ulist *ulist) |
85 | { |
86 | ulist_release(ulist); |
87 | ulist_init(ulist); |
88 | } |
89 | |
90 | /* |
91 | * Dynamically allocate a ulist. |
92 | * |
93 | * @gfp_mask: allocation flags to for base allocation |
94 | * |
95 | * The allocated ulist will be returned in an initialized state. |
96 | */ |
97 | struct ulist *ulist_alloc(gfp_t gfp_mask) |
98 | { |
99 | struct ulist *ulist = kmalloc(size: sizeof(*ulist), flags: gfp_mask); |
100 | |
101 | if (!ulist) |
102 | return NULL; |
103 | |
104 | ulist_init(ulist); |
105 | |
106 | return ulist; |
107 | } |
108 | |
109 | /* |
110 | * Free dynamically allocated ulist. |
111 | * |
112 | * @ulist: ulist to free |
113 | * |
114 | * It is not necessary to call ulist_release before. |
115 | */ |
116 | void ulist_free(struct ulist *ulist) |
117 | { |
118 | if (!ulist) |
119 | return; |
120 | ulist_release(ulist); |
121 | kfree(objp: ulist); |
122 | } |
123 | |
124 | static struct ulist_node *ulist_rbtree_search(struct ulist *ulist, u64 val) |
125 | { |
126 | struct rb_node *n = ulist->root.rb_node; |
127 | struct ulist_node *u = NULL; |
128 | |
129 | while (n) { |
130 | u = rb_entry(n, struct ulist_node, rb_node); |
131 | if (u->val < val) |
132 | n = n->rb_right; |
133 | else if (u->val > val) |
134 | n = n->rb_left; |
135 | else |
136 | return u; |
137 | } |
138 | return NULL; |
139 | } |
140 | |
141 | static void ulist_rbtree_erase(struct ulist *ulist, struct ulist_node *node) |
142 | { |
143 | rb_erase(&node->rb_node, &ulist->root); |
144 | list_del(entry: &node->list); |
145 | kfree(objp: node); |
146 | BUG_ON(ulist->nnodes == 0); |
147 | ulist->nnodes--; |
148 | } |
149 | |
150 | static int ulist_rbtree_insert(struct ulist *ulist, struct ulist_node *ins) |
151 | { |
152 | struct rb_node **p = &ulist->root.rb_node; |
153 | struct rb_node *parent = NULL; |
154 | struct ulist_node *cur = NULL; |
155 | |
156 | while (*p) { |
157 | parent = *p; |
158 | cur = rb_entry(parent, struct ulist_node, rb_node); |
159 | |
160 | if (cur->val < ins->val) |
161 | p = &(*p)->rb_right; |
162 | else if (cur->val > ins->val) |
163 | p = &(*p)->rb_left; |
164 | else |
165 | return -EEXIST; |
166 | } |
167 | rb_link_node(node: &ins->rb_node, parent, rb_link: p); |
168 | rb_insert_color(&ins->rb_node, &ulist->root); |
169 | return 0; |
170 | } |
171 | |
172 | /* |
173 | * Add an element to the ulist. |
174 | * |
175 | * @ulist: ulist to add the element to |
176 | * @val: value to add to ulist |
177 | * @aux: auxiliary value to store along with val |
178 | * @gfp_mask: flags to use for allocation |
179 | * |
180 | * Note: locking must be provided by the caller. In case of rwlocks write |
181 | * locking is needed |
182 | * |
183 | * Add an element to a ulist. The @val will only be added if it doesn't |
184 | * already exist. If it is added, the auxiliary value @aux is stored along with |
185 | * it. In case @val already exists in the ulist, @aux is ignored, even if |
186 | * it differs from the already stored value. |
187 | * |
188 | * ulist_add returns 0 if @val already exists in ulist and 1 if @val has been |
189 | * inserted. |
190 | * In case of allocation failure -ENOMEM is returned and the ulist stays |
191 | * unaltered. |
192 | */ |
193 | int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask) |
194 | { |
195 | return ulist_add_merge(ulist, val, aux, NULL, gfp_mask); |
196 | } |
197 | |
198 | int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux, |
199 | u64 *old_aux, gfp_t gfp_mask) |
200 | { |
201 | int ret; |
202 | struct ulist_node *node; |
203 | |
204 | node = ulist_rbtree_search(ulist, val); |
205 | if (node) { |
206 | if (old_aux) |
207 | *old_aux = node->aux; |
208 | return 0; |
209 | } |
210 | node = kmalloc(size: sizeof(*node), flags: gfp_mask); |
211 | if (!node) |
212 | return -ENOMEM; |
213 | |
214 | node->val = val; |
215 | node->aux = aux; |
216 | |
217 | ret = ulist_rbtree_insert(ulist, ins: node); |
218 | ASSERT(!ret); |
219 | list_add_tail(new: &node->list, head: &ulist->nodes); |
220 | ulist->nnodes++; |
221 | |
222 | return 1; |
223 | } |
224 | |
225 | /* |
226 | * Delete one node from ulist. |
227 | * |
228 | * @ulist: ulist to remove node from |
229 | * @val: value to delete |
230 | * @aux: aux to delete |
231 | * |
232 | * The deletion will only be done when *BOTH* val and aux matches. |
233 | * Return 0 for successful delete. |
234 | * Return > 0 for not found. |
235 | */ |
236 | int ulist_del(struct ulist *ulist, u64 val, u64 aux) |
237 | { |
238 | struct ulist_node *node; |
239 | |
240 | node = ulist_rbtree_search(ulist, val); |
241 | /* Not found */ |
242 | if (!node) |
243 | return 1; |
244 | |
245 | if (node->aux != aux) |
246 | return 1; |
247 | |
248 | /* Found and delete */ |
249 | ulist_rbtree_erase(ulist, node); |
250 | return 0; |
251 | } |
252 | |
253 | /* |
254 | * Iterate ulist. |
255 | * |
256 | * @ulist: ulist to iterate |
257 | * @uiter: iterator variable, initialized with ULIST_ITER_INIT(&iterator) |
258 | * |
259 | * Note: locking must be provided by the caller. In case of rwlocks only read |
260 | * locking is needed |
261 | * |
262 | * This function is used to iterate an ulist. |
263 | * It returns the next element from the ulist or %NULL when the |
264 | * end is reached. No guarantee is made with respect to the order in which |
265 | * the elements are returned. They might neither be returned in order of |
266 | * addition nor in ascending order. |
267 | * It is allowed to call ulist_add during an enumeration. Newly added items |
268 | * are guaranteed to show up in the running enumeration. |
269 | */ |
270 | struct ulist_node *ulist_next(const struct ulist *ulist, struct ulist_iterator *uiter) |
271 | { |
272 | struct ulist_node *node; |
273 | |
274 | if (list_empty(head: &ulist->nodes)) |
275 | return NULL; |
276 | if (uiter->cur_list && uiter->cur_list->next == &ulist->nodes) |
277 | return NULL; |
278 | if (uiter->cur_list) { |
279 | uiter->cur_list = uiter->cur_list->next; |
280 | } else { |
281 | uiter->cur_list = ulist->nodes.next; |
282 | } |
283 | node = list_entry(uiter->cur_list, struct ulist_node, list); |
284 | return node; |
285 | } |
286 | |