1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* |
3 | * Maple Tree implementation |
4 | * Copyright (c) 2018-2022 Oracle Corporation |
5 | * Authors: Liam R. Howlett <Liam.Howlett@oracle.com> |
6 | * Matthew Wilcox <willy@infradead.org> |
7 | * Copyright (c) 2023 ByteDance |
8 | * Author: Peng Zhang <zhangpeng.00@bytedance.com> |
9 | */ |
10 | |
11 | /* |
12 | * DOC: Interesting implementation details of the Maple Tree |
13 | * |
14 | * Each node type has a number of slots for entries and a number of slots for |
15 | * pivots. In the case of dense nodes, the pivots are implied by the position |
16 | * and are simply the slot index + the minimum of the node. |
17 | * |
18 | * In regular B-Tree terms, pivots are called keys. The term pivot is used to |
19 | * indicate that the tree is specifying ranges. Pivots may appear in the |
20 | * subtree with an entry attached to the value whereas keys are unique to a |
21 | * specific position of a B-tree. Pivot values are inclusive of the slot with |
22 | * the same index. |
23 | * |
24 | * |
25 | * The following illustrates the layout of a range64 nodes slots and pivots. |
26 | * |
27 | * |
28 | * Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 | |
29 | * ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ |
30 | * │ │ │ │ │ │ │ │ └─ Implied maximum |
31 | * │ │ │ │ │ │ │ └─ Pivot 14 |
32 | * │ │ │ │ │ │ └─ Pivot 13 |
33 | * │ │ │ │ │ └─ Pivot 12 |
34 | * │ │ │ │ └─ Pivot 11 |
35 | * │ │ │ └─ Pivot 2 |
36 | * │ │ └─ Pivot 1 |
37 | * │ └─ Pivot 0 |
38 | * └─ Implied minimum |
39 | * |
40 | * Slot contents: |
41 | * Internal (non-leaf) nodes contain pointers to other nodes. |
42 | * Leaf nodes contain entries. |
43 | * |
44 | * The location of interest is often referred to as an offset. All offsets have |
45 | * a slot, but the last offset has an implied pivot from the node above (or |
46 | * UINT_MAX for the root node. |
47 | * |
48 | * Ranges complicate certain write activities. When modifying any of |
49 | * the B-tree variants, it is known that one entry will either be added or |
50 | * deleted. When modifying the Maple Tree, one store operation may overwrite |
51 | * the entire data set, or one half of the tree, or the middle half of the tree. |
52 | * |
53 | */ |
54 | |
55 | |
56 | #include <linux/maple_tree.h> |
57 | #include <linux/xarray.h> |
58 | #include <linux/types.h> |
59 | #include <linux/export.h> |
60 | #include <linux/slab.h> |
61 | #include <linux/limits.h> |
62 | #include <asm/barrier.h> |
63 | |
64 | #define CREATE_TRACE_POINTS |
65 | #include <trace/events/maple_tree.h> |
66 | |
67 | #define MA_ROOT_PARENT 1 |
68 | |
69 | /* |
70 | * Maple state flags |
71 | * * MA_STATE_BULK - Bulk insert mode |
72 | * * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert |
73 | * * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation |
74 | */ |
75 | #define MA_STATE_BULK 1 |
76 | #define MA_STATE_REBALANCE 2 |
77 | #define MA_STATE_PREALLOC 4 |
78 | |
79 | #define ma_parent_ptr(x) ((struct maple_pnode *)(x)) |
80 | #define mas_tree_parent(x) ((unsigned long)(x->tree) | MA_ROOT_PARENT) |
81 | #define ma_mnode_ptr(x) ((struct maple_node *)(x)) |
82 | #define ma_enode_ptr(x) ((struct maple_enode *)(x)) |
83 | static struct kmem_cache *maple_node_cache; |
84 | |
85 | #ifdef CONFIG_DEBUG_MAPLE_TREE |
86 | static const unsigned long mt_max[] = { |
87 | [maple_dense] = MAPLE_NODE_SLOTS, |
88 | [maple_leaf_64] = ULONG_MAX, |
89 | [maple_range_64] = ULONG_MAX, |
90 | [maple_arange_64] = ULONG_MAX, |
91 | }; |
92 | #define mt_node_max(x) mt_max[mte_node_type(x)] |
93 | #endif |
94 | |
95 | static const unsigned char mt_slots[] = { |
96 | [maple_dense] = MAPLE_NODE_SLOTS, |
97 | [maple_leaf_64] = MAPLE_RANGE64_SLOTS, |
98 | [maple_range_64] = MAPLE_RANGE64_SLOTS, |
99 | [maple_arange_64] = MAPLE_ARANGE64_SLOTS, |
100 | }; |
101 | #define mt_slot_count(x) mt_slots[mte_node_type(x)] |
102 | |
103 | static const unsigned char mt_pivots[] = { |
104 | [maple_dense] = 0, |
105 | [maple_leaf_64] = MAPLE_RANGE64_SLOTS - 1, |
106 | [maple_range_64] = MAPLE_RANGE64_SLOTS - 1, |
107 | [maple_arange_64] = MAPLE_ARANGE64_SLOTS - 1, |
108 | }; |
109 | #define mt_pivot_count(x) mt_pivots[mte_node_type(x)] |
110 | |
111 | static const unsigned char mt_min_slots[] = { |
112 | [maple_dense] = MAPLE_NODE_SLOTS / 2, |
113 | [maple_leaf_64] = (MAPLE_RANGE64_SLOTS / 2) - 2, |
114 | [maple_range_64] = (MAPLE_RANGE64_SLOTS / 2) - 2, |
115 | [maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2) - 1, |
116 | }; |
117 | #define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)] |
118 | |
119 | #define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS * 2 + 2) |
120 | #define MAPLE_BIG_NODE_GAPS (MAPLE_ARANGE64_SLOTS * 2 + 1) |
121 | |
122 | struct maple_big_node { |
123 | struct maple_pnode *parent; |
124 | unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1]; |
125 | union { |
126 | struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS]; |
127 | struct { |
128 | unsigned long padding[MAPLE_BIG_NODE_GAPS]; |
129 | unsigned long gap[MAPLE_BIG_NODE_GAPS]; |
130 | }; |
131 | }; |
132 | unsigned char b_end; |
133 | enum maple_type type; |
134 | }; |
135 | |
136 | /* |
137 | * The maple_subtree_state is used to build a tree to replace a segment of an |
138 | * existing tree in a more atomic way. Any walkers of the older tree will hit a |
139 | * dead node and restart on updates. |
140 | */ |
141 | struct maple_subtree_state { |
142 | struct ma_state *orig_l; /* Original left side of subtree */ |
143 | struct ma_state *orig_r; /* Original right side of subtree */ |
144 | struct ma_state *l; /* New left side of subtree */ |
145 | struct ma_state *m; /* New middle of subtree (rare) */ |
146 | struct ma_state *r; /* New right side of subtree */ |
147 | struct ma_topiary *free; /* nodes to be freed */ |
148 | struct ma_topiary *destroy; /* Nodes to be destroyed (walked and freed) */ |
149 | struct maple_big_node *bn; |
150 | }; |
151 | |
152 | #ifdef CONFIG_KASAN_STACK |
153 | /* Prevent mas_wr_bnode() from exceeding the stack frame limit */ |
154 | #define noinline_for_kasan noinline_for_stack |
155 | #else |
156 | #define noinline_for_kasan inline |
157 | #endif |
158 | |
159 | /* Functions */ |
160 | static inline struct maple_node *mt_alloc_one(gfp_t gfp) |
161 | { |
162 | return kmem_cache_alloc(cachep: maple_node_cache, flags: gfp); |
163 | } |
164 | |
165 | static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes) |
166 | { |
167 | return kmem_cache_alloc_bulk(s: maple_node_cache, flags: gfp, size, p: nodes); |
168 | } |
169 | |
170 | static inline void mt_free_one(struct maple_node *node) |
171 | { |
172 | kmem_cache_free(s: maple_node_cache, objp: node); |
173 | } |
174 | |
175 | static inline void mt_free_bulk(size_t size, void __rcu **nodes) |
176 | { |
177 | kmem_cache_free_bulk(s: maple_node_cache, size, p: (void **)nodes); |
178 | } |
179 | |
180 | static void mt_free_rcu(struct rcu_head *head) |
181 | { |
182 | struct maple_node *node = container_of(head, struct maple_node, rcu); |
183 | |
184 | kmem_cache_free(s: maple_node_cache, objp: node); |
185 | } |
186 | |
187 | /* |
188 | * ma_free_rcu() - Use rcu callback to free a maple node |
189 | * @node: The node to free |
190 | * |
191 | * The maple tree uses the parent pointer to indicate this node is no longer in |
192 | * use and will be freed. |
193 | */ |
194 | static void ma_free_rcu(struct maple_node *node) |
195 | { |
196 | WARN_ON(node->parent != ma_parent_ptr(node)); |
197 | call_rcu(head: &node->rcu, func: mt_free_rcu); |
198 | } |
199 | |
200 | static void mas_set_height(struct ma_state *mas) |
201 | { |
202 | unsigned int new_flags = mas->tree->ma_flags; |
203 | |
204 | new_flags &= ~MT_FLAGS_HEIGHT_MASK; |
205 | MAS_BUG_ON(mas, mas->depth > MAPLE_HEIGHT_MAX); |
206 | new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET; |
207 | mas->tree->ma_flags = new_flags; |
208 | } |
209 | |
210 | static unsigned int mas_mt_height(struct ma_state *mas) |
211 | { |
212 | return mt_height(mt: mas->tree); |
213 | } |
214 | |
215 | static inline unsigned int mt_attr(struct maple_tree *mt) |
216 | { |
217 | return mt->ma_flags & ~MT_FLAGS_HEIGHT_MASK; |
218 | } |
219 | |
220 | static __always_inline enum maple_type mte_node_type( |
221 | const struct maple_enode *entry) |
222 | { |
223 | return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) & |
224 | MAPLE_NODE_TYPE_MASK; |
225 | } |
226 | |
227 | static __always_inline bool ma_is_dense(const enum maple_type type) |
228 | { |
229 | return type < maple_leaf_64; |
230 | } |
231 | |
232 | static __always_inline bool ma_is_leaf(const enum maple_type type) |
233 | { |
234 | return type < maple_range_64; |
235 | } |
236 | |
237 | static __always_inline bool mte_is_leaf(const struct maple_enode *entry) |
238 | { |
239 | return ma_is_leaf(type: mte_node_type(entry)); |
240 | } |
241 | |
242 | /* |
243 | * We also reserve values with the bottom two bits set to '10' which are |
244 | * below 4096 |
245 | */ |
246 | static __always_inline bool mt_is_reserved(const void *entry) |
247 | { |
248 | return ((unsigned long)entry < MAPLE_RESERVED_RANGE) && |
249 | xa_is_internal(entry); |
250 | } |
251 | |
252 | static __always_inline void mas_set_err(struct ma_state *mas, long err) |
253 | { |
254 | mas->node = MA_ERROR(err); |
255 | mas->status = ma_error; |
256 | } |
257 | |
258 | static __always_inline bool mas_is_ptr(const struct ma_state *mas) |
259 | { |
260 | return mas->status == ma_root; |
261 | } |
262 | |
263 | static __always_inline bool mas_is_start(const struct ma_state *mas) |
264 | { |
265 | return mas->status == ma_start; |
266 | } |
267 | |
268 | static __always_inline bool mas_is_none(const struct ma_state *mas) |
269 | { |
270 | return mas->status == ma_none; |
271 | } |
272 | |
273 | static __always_inline bool mas_is_paused(const struct ma_state *mas) |
274 | { |
275 | return mas->status == ma_pause; |
276 | } |
277 | |
278 | static __always_inline bool mas_is_overflow(struct ma_state *mas) |
279 | { |
280 | return mas->status == ma_overflow; |
281 | } |
282 | |
283 | static inline bool mas_is_underflow(struct ma_state *mas) |
284 | { |
285 | return mas->status == ma_underflow; |
286 | } |
287 | |
288 | static __always_inline struct maple_node *mte_to_node( |
289 | const struct maple_enode *entry) |
290 | { |
291 | return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK); |
292 | } |
293 | |
294 | /* |
295 | * mte_to_mat() - Convert a maple encoded node to a maple topiary node. |
296 | * @entry: The maple encoded node |
297 | * |
298 | * Return: a maple topiary pointer |
299 | */ |
300 | static inline struct maple_topiary *mte_to_mat(const struct maple_enode *entry) |
301 | { |
302 | return (struct maple_topiary *) |
303 | ((unsigned long)entry & ~MAPLE_NODE_MASK); |
304 | } |
305 | |
306 | /* |
307 | * mas_mn() - Get the maple state node. |
308 | * @mas: The maple state |
309 | * |
310 | * Return: the maple node (not encoded - bare pointer). |
311 | */ |
312 | static inline struct maple_node *mas_mn(const struct ma_state *mas) |
313 | { |
314 | return mte_to_node(entry: mas->node); |
315 | } |
316 | |
317 | /* |
318 | * mte_set_node_dead() - Set a maple encoded node as dead. |
319 | * @mn: The maple encoded node. |
320 | */ |
321 | static inline void mte_set_node_dead(struct maple_enode *mn) |
322 | { |
323 | mte_to_node(entry: mn)->parent = ma_parent_ptr(mte_to_node(mn)); |
324 | smp_wmb(); /* Needed for RCU */ |
325 | } |
326 | |
327 | /* Bit 1 indicates the root is a node */ |
328 | #define MAPLE_ROOT_NODE 0x02 |
329 | /* maple_type stored bit 3-6 */ |
330 | #define MAPLE_ENODE_TYPE_SHIFT 0x03 |
331 | /* Bit 2 means a NULL somewhere below */ |
332 | #define MAPLE_ENODE_NULL 0x04 |
333 | |
334 | static inline struct maple_enode *mt_mk_node(const struct maple_node *node, |
335 | enum maple_type type) |
336 | { |
337 | return (void *)((unsigned long)node | |
338 | (type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL); |
339 | } |
340 | |
341 | static inline void *mte_mk_root(const struct maple_enode *node) |
342 | { |
343 | return (void *)((unsigned long)node | MAPLE_ROOT_NODE); |
344 | } |
345 | |
346 | static inline void *mte_safe_root(const struct maple_enode *node) |
347 | { |
348 | return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE); |
349 | } |
350 | |
351 | static inline void *mte_set_full(const struct maple_enode *node) |
352 | { |
353 | return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL); |
354 | } |
355 | |
356 | static inline void *mte_clear_full(const struct maple_enode *node) |
357 | { |
358 | return (void *)((unsigned long)node | MAPLE_ENODE_NULL); |
359 | } |
360 | |
361 | static inline bool mte_has_null(const struct maple_enode *node) |
362 | { |
363 | return (unsigned long)node & MAPLE_ENODE_NULL; |
364 | } |
365 | |
366 | static __always_inline bool ma_is_root(struct maple_node *node) |
367 | { |
368 | return ((unsigned long)node->parent & MA_ROOT_PARENT); |
369 | } |
370 | |
371 | static __always_inline bool mte_is_root(const struct maple_enode *node) |
372 | { |
373 | return ma_is_root(node: mte_to_node(entry: node)); |
374 | } |
375 | |
376 | static inline bool mas_is_root_limits(const struct ma_state *mas) |
377 | { |
378 | return !mas->min && mas->max == ULONG_MAX; |
379 | } |
380 | |
381 | static __always_inline bool mt_is_alloc(struct maple_tree *mt) |
382 | { |
383 | return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE); |
384 | } |
385 | |
386 | /* |
387 | * The Parent Pointer |
388 | * Excluding root, the parent pointer is 256B aligned like all other tree nodes. |
389 | * When storing a 32 or 64 bit values, the offset can fit into 5 bits. The 16 |
390 | * bit values need an extra bit to store the offset. This extra bit comes from |
391 | * a reuse of the last bit in the node type. This is possible by using bit 1 to |
392 | * indicate if bit 2 is part of the type or the slot. |
393 | * |
394 | * Note types: |
395 | * 0x??1 = Root |
396 | * 0x?00 = 16 bit nodes |
397 | * 0x010 = 32 bit nodes |
398 | * 0x110 = 64 bit nodes |
399 | * |
400 | * Slot size and alignment |
401 | * 0b??1 : Root |
402 | * 0b?00 : 16 bit values, type in 0-1, slot in 2-7 |
403 | * 0b010 : 32 bit values, type in 0-2, slot in 3-7 |
404 | * 0b110 : 64 bit values, type in 0-2, slot in 3-7 |
405 | */ |
406 | |
407 | #define MAPLE_PARENT_ROOT 0x01 |
408 | |
409 | #define MAPLE_PARENT_SLOT_SHIFT 0x03 |
410 | #define MAPLE_PARENT_SLOT_MASK 0xF8 |
411 | |
412 | #define MAPLE_PARENT_16B_SLOT_SHIFT 0x02 |
413 | #define MAPLE_PARENT_16B_SLOT_MASK 0xFC |
414 | |
415 | #define MAPLE_PARENT_RANGE64 0x06 |
416 | #define MAPLE_PARENT_RANGE32 0x04 |
417 | #define MAPLE_PARENT_NOT_RANGE16 0x02 |
418 | |
419 | /* |
420 | * mte_parent_shift() - Get the parent shift for the slot storage. |
421 | * @parent: The parent pointer cast as an unsigned long |
422 | * Return: The shift into that pointer to the star to of the slot |
423 | */ |
424 | static inline unsigned long mte_parent_shift(unsigned long parent) |
425 | { |
426 | /* Note bit 1 == 0 means 16B */ |
427 | if (likely(parent & MAPLE_PARENT_NOT_RANGE16)) |
428 | return MAPLE_PARENT_SLOT_SHIFT; |
429 | |
430 | return MAPLE_PARENT_16B_SLOT_SHIFT; |
431 | } |
432 | |
433 | /* |
434 | * mte_parent_slot_mask() - Get the slot mask for the parent. |
435 | * @parent: The parent pointer cast as an unsigned long. |
436 | * Return: The slot mask for that parent. |
437 | */ |
438 | static inline unsigned long mte_parent_slot_mask(unsigned long parent) |
439 | { |
440 | /* Note bit 1 == 0 means 16B */ |
441 | if (likely(parent & MAPLE_PARENT_NOT_RANGE16)) |
442 | return MAPLE_PARENT_SLOT_MASK; |
443 | |
444 | return MAPLE_PARENT_16B_SLOT_MASK; |
445 | } |
446 | |
447 | /* |
448 | * mas_parent_type() - Return the maple_type of the parent from the stored |
449 | * parent type. |
450 | * @mas: The maple state |
451 | * @enode: The maple_enode to extract the parent's enum |
452 | * Return: The node->parent maple_type |
453 | */ |
454 | static inline |
455 | enum maple_type mas_parent_type(struct ma_state *mas, struct maple_enode *enode) |
456 | { |
457 | unsigned long p_type; |
458 | |
459 | p_type = (unsigned long)mte_to_node(entry: enode)->parent; |
460 | if (WARN_ON(p_type & MAPLE_PARENT_ROOT)) |
461 | return 0; |
462 | |
463 | p_type &= MAPLE_NODE_MASK; |
464 | p_type &= ~mte_parent_slot_mask(parent: p_type); |
465 | switch (p_type) { |
466 | case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */ |
467 | if (mt_is_alloc(mt: mas->tree)) |
468 | return maple_arange_64; |
469 | return maple_range_64; |
470 | } |
471 | |
472 | return 0; |
473 | } |
474 | |
475 | /* |
476 | * mas_set_parent() - Set the parent node and encode the slot |
477 | * @enode: The encoded maple node. |
478 | * @parent: The encoded maple node that is the parent of @enode. |
479 | * @slot: The slot that @enode resides in @parent. |
480 | * |
481 | * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the |
482 | * parent type. |
483 | */ |
484 | static inline |
485 | void mas_set_parent(struct ma_state *mas, struct maple_enode *enode, |
486 | const struct maple_enode *parent, unsigned char slot) |
487 | { |
488 | unsigned long val = (unsigned long)parent; |
489 | unsigned long shift; |
490 | unsigned long type; |
491 | enum maple_type p_type = mte_node_type(entry: parent); |
492 | |
493 | MAS_BUG_ON(mas, p_type == maple_dense); |
494 | MAS_BUG_ON(mas, p_type == maple_leaf_64); |
495 | |
496 | switch (p_type) { |
497 | case maple_range_64: |
498 | case maple_arange_64: |
499 | shift = MAPLE_PARENT_SLOT_SHIFT; |
500 | type = MAPLE_PARENT_RANGE64; |
501 | break; |
502 | default: |
503 | case maple_dense: |
504 | case maple_leaf_64: |
505 | shift = type = 0; |
506 | break; |
507 | } |
508 | |
509 | val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */ |
510 | val |= (slot << shift) | type; |
511 | mte_to_node(entry: enode)->parent = ma_parent_ptr(val); |
512 | } |
513 | |
514 | /* |
515 | * mte_parent_slot() - get the parent slot of @enode. |
516 | * @enode: The encoded maple node. |
517 | * |
518 | * Return: The slot in the parent node where @enode resides. |
519 | */ |
520 | static __always_inline |
521 | unsigned int mte_parent_slot(const struct maple_enode *enode) |
522 | { |
523 | unsigned long val = (unsigned long)mte_to_node(entry: enode)->parent; |
524 | |
525 | if (unlikely(val & MA_ROOT_PARENT)) |
526 | return 0; |
527 | |
528 | /* |
529 | * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost |
530 | * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT |
531 | */ |
532 | return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(parent: val); |
533 | } |
534 | |
535 | /* |
536 | * mte_parent() - Get the parent of @node. |
537 | * @node: The encoded maple node. |
538 | * |
539 | * Return: The parent maple node. |
540 | */ |
541 | static __always_inline |
542 | struct maple_node *mte_parent(const struct maple_enode *enode) |
543 | { |
544 | return (void *)((unsigned long) |
545 | (mte_to_node(entry: enode)->parent) & ~MAPLE_NODE_MASK); |
546 | } |
547 | |
548 | /* |
549 | * ma_dead_node() - check if the @enode is dead. |
550 | * @enode: The encoded maple node |
551 | * |
552 | * Return: true if dead, false otherwise. |
553 | */ |
554 | static __always_inline bool ma_dead_node(const struct maple_node *node) |
555 | { |
556 | struct maple_node *parent; |
557 | |
558 | /* Do not reorder reads from the node prior to the parent check */ |
559 | smp_rmb(); |
560 | parent = (void *)((unsigned long) node->parent & ~MAPLE_NODE_MASK); |
561 | return (parent == node); |
562 | } |
563 | |
564 | /* |
565 | * mte_dead_node() - check if the @enode is dead. |
566 | * @enode: The encoded maple node |
567 | * |
568 | * Return: true if dead, false otherwise. |
569 | */ |
570 | static __always_inline bool mte_dead_node(const struct maple_enode *enode) |
571 | { |
572 | struct maple_node *parent, *node; |
573 | |
574 | node = mte_to_node(entry: enode); |
575 | /* Do not reorder reads from the node prior to the parent check */ |
576 | smp_rmb(); |
577 | parent = mte_parent(enode); |
578 | return (parent == node); |
579 | } |
580 | |
581 | /* |
582 | * mas_allocated() - Get the number of nodes allocated in a maple state. |
583 | * @mas: The maple state |
584 | * |
585 | * The ma_state alloc member is overloaded to hold a pointer to the first |
586 | * allocated node or to the number of requested nodes to allocate. If bit 0 is |
587 | * set, then the alloc contains the number of requested nodes. If there is an |
588 | * allocated node, then the total allocated nodes is in that node. |
589 | * |
590 | * Return: The total number of nodes allocated |
591 | */ |
592 | static inline unsigned long mas_allocated(const struct ma_state *mas) |
593 | { |
594 | if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) |
595 | return 0; |
596 | |
597 | return mas->alloc->total; |
598 | } |
599 | |
600 | /* |
601 | * mas_set_alloc_req() - Set the requested number of allocations. |
602 | * @mas: the maple state |
603 | * @count: the number of allocations. |
604 | * |
605 | * The requested number of allocations is either in the first allocated node, |
606 | * located in @mas->alloc->request_count, or directly in @mas->alloc if there is |
607 | * no allocated node. Set the request either in the node or do the necessary |
608 | * encoding to store in @mas->alloc directly. |
609 | */ |
610 | static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count) |
611 | { |
612 | if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) { |
613 | if (!count) |
614 | mas->alloc = NULL; |
615 | else |
616 | mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U); |
617 | return; |
618 | } |
619 | |
620 | mas->alloc->request_count = count; |
621 | } |
622 | |
623 | /* |
624 | * mas_alloc_req() - get the requested number of allocations. |
625 | * @mas: The maple state |
626 | * |
627 | * The alloc count is either stored directly in @mas, or in |
628 | * @mas->alloc->request_count if there is at least one node allocated. Decode |
629 | * the request count if it's stored directly in @mas->alloc. |
630 | * |
631 | * Return: The allocation request count. |
632 | */ |
633 | static inline unsigned int mas_alloc_req(const struct ma_state *mas) |
634 | { |
635 | if ((unsigned long)mas->alloc & 0x1) |
636 | return (unsigned long)(mas->alloc) >> 1; |
637 | else if (mas->alloc) |
638 | return mas->alloc->request_count; |
639 | return 0; |
640 | } |
641 | |
642 | /* |
643 | * ma_pivots() - Get a pointer to the maple node pivots. |
644 | * @node - the maple node |
645 | * @type - the node type |
646 | * |
647 | * In the event of a dead node, this array may be %NULL |
648 | * |
649 | * Return: A pointer to the maple node pivots |
650 | */ |
651 | static inline unsigned long *ma_pivots(struct maple_node *node, |
652 | enum maple_type type) |
653 | { |
654 | switch (type) { |
655 | case maple_arange_64: |
656 | return node->ma64.pivot; |
657 | case maple_range_64: |
658 | case maple_leaf_64: |
659 | return node->mr64.pivot; |
660 | case maple_dense: |
661 | return NULL; |
662 | } |
663 | return NULL; |
664 | } |
665 | |
666 | /* |
667 | * ma_gaps() - Get a pointer to the maple node gaps. |
668 | * @node - the maple node |
669 | * @type - the node type |
670 | * |
671 | * Return: A pointer to the maple node gaps |
672 | */ |
673 | static inline unsigned long *ma_gaps(struct maple_node *node, |
674 | enum maple_type type) |
675 | { |
676 | switch (type) { |
677 | case maple_arange_64: |
678 | return node->ma64.gap; |
679 | case maple_range_64: |
680 | case maple_leaf_64: |
681 | case maple_dense: |
682 | return NULL; |
683 | } |
684 | return NULL; |
685 | } |
686 | |
687 | /* |
688 | * mas_safe_pivot() - get the pivot at @piv or mas->max. |
689 | * @mas: The maple state |
690 | * @pivots: The pointer to the maple node pivots |
691 | * @piv: The pivot to fetch |
692 | * @type: The maple node type |
693 | * |
694 | * Return: The pivot at @piv within the limit of the @pivots array, @mas->max |
695 | * otherwise. |
696 | */ |
697 | static __always_inline unsigned long |
698 | mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots, |
699 | unsigned char piv, enum maple_type type) |
700 | { |
701 | if (piv >= mt_pivots[type]) |
702 | return mas->max; |
703 | |
704 | return pivots[piv]; |
705 | } |
706 | |
707 | /* |
708 | * mas_safe_min() - Return the minimum for a given offset. |
709 | * @mas: The maple state |
710 | * @pivots: The pointer to the maple node pivots |
711 | * @offset: The offset into the pivot array |
712 | * |
713 | * Return: The minimum range value that is contained in @offset. |
714 | */ |
715 | static inline unsigned long |
716 | mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset) |
717 | { |
718 | if (likely(offset)) |
719 | return pivots[offset - 1] + 1; |
720 | |
721 | return mas->min; |
722 | } |
723 | |
724 | /* |
725 | * mte_set_pivot() - Set a pivot to a value in an encoded maple node. |
726 | * @mn: The encoded maple node |
727 | * @piv: The pivot offset |
728 | * @val: The value of the pivot |
729 | */ |
730 | static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv, |
731 | unsigned long val) |
732 | { |
733 | struct maple_node *node = mte_to_node(entry: mn); |
734 | enum maple_type type = mte_node_type(entry: mn); |
735 | |
736 | BUG_ON(piv >= mt_pivots[type]); |
737 | switch (type) { |
738 | case maple_range_64: |
739 | case maple_leaf_64: |
740 | node->mr64.pivot[piv] = val; |
741 | break; |
742 | case maple_arange_64: |
743 | node->ma64.pivot[piv] = val; |
744 | break; |
745 | case maple_dense: |
746 | break; |
747 | } |
748 | |
749 | } |
750 | |
751 | /* |
752 | * ma_slots() - Get a pointer to the maple node slots. |
753 | * @mn: The maple node |
754 | * @mt: The maple node type |
755 | * |
756 | * Return: A pointer to the maple node slots |
757 | */ |
758 | static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt) |
759 | { |
760 | switch (mt) { |
761 | case maple_arange_64: |
762 | return mn->ma64.slot; |
763 | case maple_range_64: |
764 | case maple_leaf_64: |
765 | return mn->mr64.slot; |
766 | case maple_dense: |
767 | return mn->slot; |
768 | } |
769 | |
770 | return NULL; |
771 | } |
772 | |
773 | static inline bool mt_write_locked(const struct maple_tree *mt) |
774 | { |
775 | return mt_external_lock(mt) ? mt_write_lock_is_held(mt) : |
776 | lockdep_is_held(&mt->ma_lock); |
777 | } |
778 | |
779 | static __always_inline bool mt_locked(const struct maple_tree *mt) |
780 | { |
781 | return mt_external_lock(mt) ? mt_lock_is_held(mt) : |
782 | lockdep_is_held(&mt->ma_lock); |
783 | } |
784 | |
785 | static __always_inline void *mt_slot(const struct maple_tree *mt, |
786 | void __rcu **slots, unsigned char offset) |
787 | { |
788 | return rcu_dereference_check(slots[offset], mt_locked(mt)); |
789 | } |
790 | |
791 | static __always_inline void *mt_slot_locked(struct maple_tree *mt, |
792 | void __rcu **slots, unsigned char offset) |
793 | { |
794 | return rcu_dereference_protected(slots[offset], mt_write_locked(mt)); |
795 | } |
796 | /* |
797 | * mas_slot_locked() - Get the slot value when holding the maple tree lock. |
798 | * @mas: The maple state |
799 | * @slots: The pointer to the slots |
800 | * @offset: The offset into the slots array to fetch |
801 | * |
802 | * Return: The entry stored in @slots at the @offset. |
803 | */ |
804 | static __always_inline void *mas_slot_locked(struct ma_state *mas, |
805 | void __rcu **slots, unsigned char offset) |
806 | { |
807 | return mt_slot_locked(mt: mas->tree, slots, offset); |
808 | } |
809 | |
810 | /* |
811 | * mas_slot() - Get the slot value when not holding the maple tree lock. |
812 | * @mas: The maple state |
813 | * @slots: The pointer to the slots |
814 | * @offset: The offset into the slots array to fetch |
815 | * |
816 | * Return: The entry stored in @slots at the @offset |
817 | */ |
818 | static __always_inline void *mas_slot(struct ma_state *mas, void __rcu **slots, |
819 | unsigned char offset) |
820 | { |
821 | return mt_slot(mt: mas->tree, slots, offset); |
822 | } |
823 | |
824 | /* |
825 | * mas_root() - Get the maple tree root. |
826 | * @mas: The maple state. |
827 | * |
828 | * Return: The pointer to the root of the tree |
829 | */ |
830 | static __always_inline void *mas_root(struct ma_state *mas) |
831 | { |
832 | return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree)); |
833 | } |
834 | |
835 | static inline void *mt_root_locked(struct maple_tree *mt) |
836 | { |
837 | return rcu_dereference_protected(mt->ma_root, mt_write_locked(mt)); |
838 | } |
839 | |
840 | /* |
841 | * mas_root_locked() - Get the maple tree root when holding the maple tree lock. |
842 | * @mas: The maple state. |
843 | * |
844 | * Return: The pointer to the root of the tree |
845 | */ |
846 | static inline void *mas_root_locked(struct ma_state *mas) |
847 | { |
848 | return mt_root_locked(mt: mas->tree); |
849 | } |
850 | |
851 | static inline struct maple_metadata *ma_meta(struct maple_node *mn, |
852 | enum maple_type mt) |
853 | { |
854 | switch (mt) { |
855 | case maple_arange_64: |
856 | return &mn->ma64.meta; |
857 | default: |
858 | return &mn->mr64.meta; |
859 | } |
860 | } |
861 | |
862 | /* |
863 | * ma_set_meta() - Set the metadata information of a node. |
864 | * @mn: The maple node |
865 | * @mt: The maple node type |
866 | * @offset: The offset of the highest sub-gap in this node. |
867 | * @end: The end of the data in this node. |
868 | */ |
869 | static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt, |
870 | unsigned char offset, unsigned char end) |
871 | { |
872 | struct maple_metadata *meta = ma_meta(mn, mt); |
873 | |
874 | meta->gap = offset; |
875 | meta->end = end; |
876 | } |
877 | |
878 | /* |
879 | * mt_clear_meta() - clear the metadata information of a node, if it exists |
880 | * @mt: The maple tree |
881 | * @mn: The maple node |
882 | * @type: The maple node type |
883 | * @offset: The offset of the highest sub-gap in this node. |
884 | * @end: The end of the data in this node. |
885 | */ |
886 | static inline void mt_clear_meta(struct maple_tree *mt, struct maple_node *mn, |
887 | enum maple_type type) |
888 | { |
889 | struct maple_metadata *meta; |
890 | unsigned long *pivots; |
891 | void __rcu **slots; |
892 | void *next; |
893 | |
894 | switch (type) { |
895 | case maple_range_64: |
896 | pivots = mn->mr64.pivot; |
897 | if (unlikely(pivots[MAPLE_RANGE64_SLOTS - 2])) { |
898 | slots = mn->mr64.slot; |
899 | next = mt_slot_locked(mt, slots, |
900 | MAPLE_RANGE64_SLOTS - 1); |
901 | if (unlikely((mte_to_node(next) && |
902 | mte_node_type(next)))) |
903 | return; /* no metadata, could be node */ |
904 | } |
905 | fallthrough; |
906 | case maple_arange_64: |
907 | meta = ma_meta(mn, mt: type); |
908 | break; |
909 | default: |
910 | return; |
911 | } |
912 | |
913 | meta->gap = 0; |
914 | meta->end = 0; |
915 | } |
916 | |
917 | /* |
918 | * ma_meta_end() - Get the data end of a node from the metadata |
919 | * @mn: The maple node |
920 | * @mt: The maple node type |
921 | */ |
922 | static inline unsigned char ma_meta_end(struct maple_node *mn, |
923 | enum maple_type mt) |
924 | { |
925 | struct maple_metadata *meta = ma_meta(mn, mt); |
926 | |
927 | return meta->end; |
928 | } |
929 | |
930 | /* |
931 | * ma_meta_gap() - Get the largest gap location of a node from the metadata |
932 | * @mn: The maple node |
933 | */ |
934 | static inline unsigned char ma_meta_gap(struct maple_node *mn) |
935 | { |
936 | return mn->ma64.meta.gap; |
937 | } |
938 | |
939 | /* |
940 | * ma_set_meta_gap() - Set the largest gap location in a nodes metadata |
941 | * @mn: The maple node |
942 | * @mn: The maple node type |
943 | * @offset: The location of the largest gap. |
944 | */ |
945 | static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt, |
946 | unsigned char offset) |
947 | { |
948 | |
949 | struct maple_metadata *meta = ma_meta(mn, mt); |
950 | |
951 | meta->gap = offset; |
952 | } |
953 | |
954 | /* |
955 | * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes. |
956 | * @mat - the ma_topiary, a linked list of dead nodes. |
957 | * @dead_enode - the node to be marked as dead and added to the tail of the list |
958 | * |
959 | * Add the @dead_enode to the linked list in @mat. |
960 | */ |
961 | static inline void mat_add(struct ma_topiary *mat, |
962 | struct maple_enode *dead_enode) |
963 | { |
964 | mte_set_node_dead(mn: dead_enode); |
965 | mte_to_mat(entry: dead_enode)->next = NULL; |
966 | if (!mat->tail) { |
967 | mat->tail = mat->head = dead_enode; |
968 | return; |
969 | } |
970 | |
971 | mte_to_mat(entry: mat->tail)->next = dead_enode; |
972 | mat->tail = dead_enode; |
973 | } |
974 | |
975 | static void mt_free_walk(struct rcu_head *head); |
976 | static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt, |
977 | bool free); |
978 | /* |
979 | * mas_mat_destroy() - Free all nodes and subtrees in a dead list. |
980 | * @mas - the maple state |
981 | * @mat - the ma_topiary linked list of dead nodes to free. |
982 | * |
983 | * Destroy walk a dead list. |
984 | */ |
985 | static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat) |
986 | { |
987 | struct maple_enode *next; |
988 | struct maple_node *node; |
989 | bool in_rcu = mt_in_rcu(mt: mas->tree); |
990 | |
991 | while (mat->head) { |
992 | next = mte_to_mat(entry: mat->head)->next; |
993 | node = mte_to_node(entry: mat->head); |
994 | mt_destroy_walk(enode: mat->head, mt: mas->tree, free: !in_rcu); |
995 | if (in_rcu) |
996 | call_rcu(head: &node->rcu, func: mt_free_walk); |
997 | mat->head = next; |
998 | } |
999 | } |
1000 | /* |
1001 | * mas_descend() - Descend into the slot stored in the ma_state. |
1002 | * @mas - the maple state. |
1003 | * |
1004 | * Note: Not RCU safe, only use in write side or debug code. |
1005 | */ |
1006 | static inline void mas_descend(struct ma_state *mas) |
1007 | { |
1008 | enum maple_type type; |
1009 | unsigned long *pivots; |
1010 | struct maple_node *node; |
1011 | void __rcu **slots; |
1012 | |
1013 | node = mas_mn(mas); |
1014 | type = mte_node_type(entry: mas->node); |
1015 | pivots = ma_pivots(node, type); |
1016 | slots = ma_slots(mn: node, mt: type); |
1017 | |
1018 | if (mas->offset) |
1019 | mas->min = pivots[mas->offset - 1] + 1; |
1020 | mas->max = mas_safe_pivot(mas, pivots, piv: mas->offset, type); |
1021 | mas->node = mas_slot(mas, slots, offset: mas->offset); |
1022 | } |
1023 | |
1024 | /* |
1025 | * mte_set_gap() - Set a maple node gap. |
1026 | * @mn: The encoded maple node |
1027 | * @gap: The offset of the gap to set |
1028 | * @val: The gap value |
1029 | */ |
1030 | static inline void mte_set_gap(const struct maple_enode *mn, |
1031 | unsigned char gap, unsigned long val) |
1032 | { |
1033 | switch (mte_node_type(entry: mn)) { |
1034 | default: |
1035 | break; |
1036 | case maple_arange_64: |
1037 | mte_to_node(entry: mn)->ma64.gap[gap] = val; |
1038 | break; |
1039 | } |
1040 | } |
1041 | |
1042 | /* |
1043 | * mas_ascend() - Walk up a level of the tree. |
1044 | * @mas: The maple state |
1045 | * |
1046 | * Sets the @mas->max and @mas->min to the correct values when walking up. This |
1047 | * may cause several levels of walking up to find the correct min and max. |
1048 | * May find a dead node which will cause a premature return. |
1049 | * Return: 1 on dead node, 0 otherwise |
1050 | */ |
1051 | static int mas_ascend(struct ma_state *mas) |
1052 | { |
1053 | struct maple_enode *p_enode; /* parent enode. */ |
1054 | struct maple_enode *a_enode; /* ancestor enode. */ |
1055 | struct maple_node *a_node; /* ancestor node. */ |
1056 | struct maple_node *p_node; /* parent node. */ |
1057 | unsigned char a_slot; |
1058 | enum maple_type a_type; |
1059 | unsigned long min, max; |
1060 | unsigned long *pivots; |
1061 | bool set_max = false, set_min = false; |
1062 | |
1063 | a_node = mas_mn(mas); |
1064 | if (ma_is_root(node: a_node)) { |
1065 | mas->offset = 0; |
1066 | return 0; |
1067 | } |
1068 | |
1069 | p_node = mte_parent(enode: mas->node); |
1070 | if (unlikely(a_node == p_node)) |
1071 | return 1; |
1072 | |
1073 | a_type = mas_parent_type(mas, enode: mas->node); |
1074 | mas->offset = mte_parent_slot(enode: mas->node); |
1075 | a_enode = mt_mk_node(node: p_node, type: a_type); |
1076 | |
1077 | /* Check to make sure all parent information is still accurate */ |
1078 | if (p_node != mte_parent(enode: mas->node)) |
1079 | return 1; |
1080 | |
1081 | mas->node = a_enode; |
1082 | |
1083 | if (mte_is_root(node: a_enode)) { |
1084 | mas->max = ULONG_MAX; |
1085 | mas->min = 0; |
1086 | return 0; |
1087 | } |
1088 | |
1089 | min = 0; |
1090 | max = ULONG_MAX; |
1091 | if (!mas->offset) { |
1092 | min = mas->min; |
1093 | set_min = true; |
1094 | } |
1095 | |
1096 | if (mas->max == ULONG_MAX) |
1097 | set_max = true; |
1098 | |
1099 | do { |
1100 | p_enode = a_enode; |
1101 | a_type = mas_parent_type(mas, enode: p_enode); |
1102 | a_node = mte_parent(enode: p_enode); |
1103 | a_slot = mte_parent_slot(enode: p_enode); |
1104 | a_enode = mt_mk_node(node: a_node, type: a_type); |
1105 | pivots = ma_pivots(node: a_node, type: a_type); |
1106 | |
1107 | if (unlikely(ma_dead_node(a_node))) |
1108 | return 1; |
1109 | |
1110 | if (!set_min && a_slot) { |
1111 | set_min = true; |
1112 | min = pivots[a_slot - 1] + 1; |
1113 | } |
1114 | |
1115 | if (!set_max && a_slot < mt_pivots[a_type]) { |
1116 | set_max = true; |
1117 | max = pivots[a_slot]; |
1118 | } |
1119 | |
1120 | if (unlikely(ma_dead_node(a_node))) |
1121 | return 1; |
1122 | |
1123 | if (unlikely(ma_is_root(a_node))) |
1124 | break; |
1125 | |
1126 | } while (!set_min || !set_max); |
1127 | |
1128 | mas->max = max; |
1129 | mas->min = min; |
1130 | return 0; |
1131 | } |
1132 | |
1133 | /* |
1134 | * mas_pop_node() - Get a previously allocated maple node from the maple state. |
1135 | * @mas: The maple state |
1136 | * |
1137 | * Return: A pointer to a maple node. |
1138 | */ |
1139 | static inline struct maple_node *mas_pop_node(struct ma_state *mas) |
1140 | { |
1141 | struct maple_alloc *ret, *node = mas->alloc; |
1142 | unsigned long total = mas_allocated(mas); |
1143 | unsigned int req = mas_alloc_req(mas); |
1144 | |
1145 | /* nothing or a request pending. */ |
1146 | if (WARN_ON(!total)) |
1147 | return NULL; |
1148 | |
1149 | if (total == 1) { |
1150 | /* single allocation in this ma_state */ |
1151 | mas->alloc = NULL; |
1152 | ret = node; |
1153 | goto single_node; |
1154 | } |
1155 | |
1156 | if (node->node_count == 1) { |
1157 | /* Single allocation in this node. */ |
1158 | mas->alloc = node->slot[0]; |
1159 | mas->alloc->total = node->total - 1; |
1160 | ret = node; |
1161 | goto new_head; |
1162 | } |
1163 | node->total--; |
1164 | ret = node->slot[--node->node_count]; |
1165 | node->slot[node->node_count] = NULL; |
1166 | |
1167 | single_node: |
1168 | new_head: |
1169 | if (req) { |
1170 | req++; |
1171 | mas_set_alloc_req(mas, count: req); |
1172 | } |
1173 | |
1174 | memset(ret, 0, sizeof(*ret)); |
1175 | return (struct maple_node *)ret; |
1176 | } |
1177 | |
1178 | /* |
1179 | * mas_push_node() - Push a node back on the maple state allocation. |
1180 | * @mas: The maple state |
1181 | * @used: The used maple node |
1182 | * |
1183 | * Stores the maple node back into @mas->alloc for reuse. Updates allocated and |
1184 | * requested node count as necessary. |
1185 | */ |
1186 | static inline void mas_push_node(struct ma_state *mas, struct maple_node *used) |
1187 | { |
1188 | struct maple_alloc *reuse = (struct maple_alloc *)used; |
1189 | struct maple_alloc *head = mas->alloc; |
1190 | unsigned long count; |
1191 | unsigned int requested = mas_alloc_req(mas); |
1192 | |
1193 | count = mas_allocated(mas); |
1194 | |
1195 | reuse->request_count = 0; |
1196 | reuse->node_count = 0; |
1197 | if (count && (head->node_count < MAPLE_ALLOC_SLOTS)) { |
1198 | head->slot[head->node_count++] = reuse; |
1199 | head->total++; |
1200 | goto done; |
1201 | } |
1202 | |
1203 | reuse->total = 1; |
1204 | if ((head) && !((unsigned long)head & 0x1)) { |
1205 | reuse->slot[0] = head; |
1206 | reuse->node_count = 1; |
1207 | reuse->total += head->total; |
1208 | } |
1209 | |
1210 | mas->alloc = reuse; |
1211 | done: |
1212 | if (requested > 1) |
1213 | mas_set_alloc_req(mas, count: requested - 1); |
1214 | } |
1215 | |
1216 | /* |
1217 | * mas_alloc_nodes() - Allocate nodes into a maple state |
1218 | * @mas: The maple state |
1219 | * @gfp: The GFP Flags |
1220 | */ |
1221 | static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp) |
1222 | { |
1223 | struct maple_alloc *node; |
1224 | unsigned long allocated = mas_allocated(mas); |
1225 | unsigned int requested = mas_alloc_req(mas); |
1226 | unsigned int count; |
1227 | void **slots = NULL; |
1228 | unsigned int max_req = 0; |
1229 | |
1230 | if (!requested) |
1231 | return; |
1232 | |
1233 | mas_set_alloc_req(mas, count: 0); |
1234 | if (mas->mas_flags & MA_STATE_PREALLOC) { |
1235 | if (allocated) |
1236 | return; |
1237 | BUG_ON(!allocated); |
1238 | WARN_ON(!allocated); |
1239 | } |
1240 | |
1241 | if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) { |
1242 | node = (struct maple_alloc *)mt_alloc_one(gfp); |
1243 | if (!node) |
1244 | goto nomem_one; |
1245 | |
1246 | if (allocated) { |
1247 | node->slot[0] = mas->alloc; |
1248 | node->node_count = 1; |
1249 | } else { |
1250 | node->node_count = 0; |
1251 | } |
1252 | |
1253 | mas->alloc = node; |
1254 | node->total = ++allocated; |
1255 | requested--; |
1256 | } |
1257 | |
1258 | node = mas->alloc; |
1259 | node->request_count = 0; |
1260 | while (requested) { |
1261 | max_req = MAPLE_ALLOC_SLOTS - node->node_count; |
1262 | slots = (void **)&node->slot[node->node_count]; |
1263 | max_req = min(requested, max_req); |
1264 | count = mt_alloc_bulk(gfp, size: max_req, nodes: slots); |
1265 | if (!count) |
1266 | goto nomem_bulk; |
1267 | |
1268 | if (node->node_count == 0) { |
1269 | node->slot[0]->node_count = 0; |
1270 | node->slot[0]->request_count = 0; |
1271 | } |
1272 | |
1273 | node->node_count += count; |
1274 | allocated += count; |
1275 | node = node->slot[0]; |
1276 | requested -= count; |
1277 | } |
1278 | mas->alloc->total = allocated; |
1279 | return; |
1280 | |
1281 | nomem_bulk: |
1282 | /* Clean up potential freed allocations on bulk failure */ |
1283 | memset(slots, 0, max_req * sizeof(unsigned long)); |
1284 | nomem_one: |
1285 | mas_set_alloc_req(mas, count: requested); |
1286 | if (mas->alloc && !(((unsigned long)mas->alloc & 0x1))) |
1287 | mas->alloc->total = allocated; |
1288 | mas_set_err(mas, err: -ENOMEM); |
1289 | } |
1290 | |
1291 | /* |
1292 | * mas_free() - Free an encoded maple node |
1293 | * @mas: The maple state |
1294 | * @used: The encoded maple node to free. |
1295 | * |
1296 | * Uses rcu free if necessary, pushes @used back on the maple state allocations |
1297 | * otherwise. |
1298 | */ |
1299 | static inline void mas_free(struct ma_state *mas, struct maple_enode *used) |
1300 | { |
1301 | struct maple_node *tmp = mte_to_node(entry: used); |
1302 | |
1303 | if (mt_in_rcu(mt: mas->tree)) |
1304 | ma_free_rcu(node: tmp); |
1305 | else |
1306 | mas_push_node(mas, used: tmp); |
1307 | } |
1308 | |
1309 | /* |
1310 | * mas_node_count_gfp() - Check if enough nodes are allocated and request more |
1311 | * if there is not enough nodes. |
1312 | * @mas: The maple state |
1313 | * @count: The number of nodes needed |
1314 | * @gfp: the gfp flags |
1315 | */ |
1316 | static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp) |
1317 | { |
1318 | unsigned long allocated = mas_allocated(mas); |
1319 | |
1320 | if (allocated < count) { |
1321 | mas_set_alloc_req(mas, count: count - allocated); |
1322 | mas_alloc_nodes(mas, gfp); |
1323 | } |
1324 | } |
1325 | |
1326 | /* |
1327 | * mas_node_count() - Check if enough nodes are allocated and request more if |
1328 | * there is not enough nodes. |
1329 | * @mas: The maple state |
1330 | * @count: The number of nodes needed |
1331 | * |
1332 | * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags. |
1333 | */ |
1334 | static void mas_node_count(struct ma_state *mas, int count) |
1335 | { |
1336 | return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN); |
1337 | } |
1338 | |
1339 | /* |
1340 | * mas_start() - Sets up maple state for operations. |
1341 | * @mas: The maple state. |
1342 | * |
1343 | * If mas->status == mas_start, then set the min, max and depth to |
1344 | * defaults. |
1345 | * |
1346 | * Return: |
1347 | * - If mas->node is an error or not mas_start, return NULL. |
1348 | * - If it's an empty tree: NULL & mas->status == ma_none |
1349 | * - If it's a single entry: The entry & mas->status == mas_root |
1350 | * - If it's a tree: NULL & mas->status == safe root node. |
1351 | */ |
1352 | static inline struct maple_enode *mas_start(struct ma_state *mas) |
1353 | { |
1354 | if (likely(mas_is_start(mas))) { |
1355 | struct maple_enode *root; |
1356 | |
1357 | mas->min = 0; |
1358 | mas->max = ULONG_MAX; |
1359 | |
1360 | retry: |
1361 | mas->depth = 0; |
1362 | root = mas_root(mas); |
1363 | /* Tree with nodes */ |
1364 | if (likely(xa_is_node(root))) { |
1365 | mas->depth = 1; |
1366 | mas->status = ma_active; |
1367 | mas->node = mte_safe_root(node: root); |
1368 | mas->offset = 0; |
1369 | if (mte_dead_node(enode: mas->node)) |
1370 | goto retry; |
1371 | |
1372 | return NULL; |
1373 | } |
1374 | |
1375 | /* empty tree */ |
1376 | if (unlikely(!root)) { |
1377 | mas->node = NULL; |
1378 | mas->status = ma_none; |
1379 | mas->offset = MAPLE_NODE_SLOTS; |
1380 | return NULL; |
1381 | } |
1382 | |
1383 | /* Single entry tree */ |
1384 | mas->status = ma_root; |
1385 | mas->offset = MAPLE_NODE_SLOTS; |
1386 | |
1387 | /* Single entry tree. */ |
1388 | if (mas->index > 0) |
1389 | return NULL; |
1390 | |
1391 | return root; |
1392 | } |
1393 | |
1394 | return NULL; |
1395 | } |
1396 | |
1397 | /* |
1398 | * ma_data_end() - Find the end of the data in a node. |
1399 | * @node: The maple node |
1400 | * @type: The maple node type |
1401 | * @pivots: The array of pivots in the node |
1402 | * @max: The maximum value in the node |
1403 | * |
1404 | * Uses metadata to find the end of the data when possible. |
1405 | * Return: The zero indexed last slot with data (may be null). |
1406 | */ |
1407 | static __always_inline unsigned char ma_data_end(struct maple_node *node, |
1408 | enum maple_type type, unsigned long *pivots, unsigned long max) |
1409 | { |
1410 | unsigned char offset; |
1411 | |
1412 | if (!pivots) |
1413 | return 0; |
1414 | |
1415 | if (type == maple_arange_64) |
1416 | return ma_meta_end(mn: node, mt: type); |
1417 | |
1418 | offset = mt_pivots[type] - 1; |
1419 | if (likely(!pivots[offset])) |
1420 | return ma_meta_end(mn: node, mt: type); |
1421 | |
1422 | if (likely(pivots[offset] == max)) |
1423 | return offset; |
1424 | |
1425 | return mt_pivots[type]; |
1426 | } |
1427 | |
1428 | /* |
1429 | * mas_data_end() - Find the end of the data (slot). |
1430 | * @mas: the maple state |
1431 | * |
1432 | * This method is optimized to check the metadata of a node if the node type |
1433 | * supports data end metadata. |
1434 | * |
1435 | * Return: The zero indexed last slot with data (may be null). |
1436 | */ |
1437 | static inline unsigned char mas_data_end(struct ma_state *mas) |
1438 | { |
1439 | enum maple_type type; |
1440 | struct maple_node *node; |
1441 | unsigned char offset; |
1442 | unsigned long *pivots; |
1443 | |
1444 | type = mte_node_type(entry: mas->node); |
1445 | node = mas_mn(mas); |
1446 | if (type == maple_arange_64) |
1447 | return ma_meta_end(mn: node, mt: type); |
1448 | |
1449 | pivots = ma_pivots(node, type); |
1450 | if (unlikely(ma_dead_node(node))) |
1451 | return 0; |
1452 | |
1453 | offset = mt_pivots[type] - 1; |
1454 | if (likely(!pivots[offset])) |
1455 | return ma_meta_end(mn: node, mt: type); |
1456 | |
1457 | if (likely(pivots[offset] == mas->max)) |
1458 | return offset; |
1459 | |
1460 | return mt_pivots[type]; |
1461 | } |
1462 | |
1463 | /* |
1464 | * mas_leaf_max_gap() - Returns the largest gap in a leaf node |
1465 | * @mas - the maple state |
1466 | * |
1467 | * Return: The maximum gap in the leaf. |
1468 | */ |
1469 | static unsigned long mas_leaf_max_gap(struct ma_state *mas) |
1470 | { |
1471 | enum maple_type mt; |
1472 | unsigned long pstart, gap, max_gap; |
1473 | struct maple_node *mn; |
1474 | unsigned long *pivots; |
1475 | void __rcu **slots; |
1476 | unsigned char i; |
1477 | unsigned char max_piv; |
1478 | |
1479 | mt = mte_node_type(entry: mas->node); |
1480 | mn = mas_mn(mas); |
1481 | slots = ma_slots(mn, mt); |
1482 | max_gap = 0; |
1483 | if (unlikely(ma_is_dense(mt))) { |
1484 | gap = 0; |
1485 | for (i = 0; i < mt_slots[mt]; i++) { |
1486 | if (slots[i]) { |
1487 | if (gap > max_gap) |
1488 | max_gap = gap; |
1489 | gap = 0; |
1490 | } else { |
1491 | gap++; |
1492 | } |
1493 | } |
1494 | if (gap > max_gap) |
1495 | max_gap = gap; |
1496 | return max_gap; |
1497 | } |
1498 | |
1499 | /* |
1500 | * Check the first implied pivot optimizes the loop below and slot 1 may |
1501 | * be skipped if there is a gap in slot 0. |
1502 | */ |
1503 | pivots = ma_pivots(node: mn, type: mt); |
1504 | if (likely(!slots[0])) { |
1505 | max_gap = pivots[0] - mas->min + 1; |
1506 | i = 2; |
1507 | } else { |
1508 | i = 1; |
1509 | } |
1510 | |
1511 | /* reduce max_piv as the special case is checked before the loop */ |
1512 | max_piv = ma_data_end(node: mn, type: mt, pivots, max: mas->max) - 1; |
1513 | /* |
1514 | * Check end implied pivot which can only be a gap on the right most |
1515 | * node. |
1516 | */ |
1517 | if (unlikely(mas->max == ULONG_MAX) && !slots[max_piv + 1]) { |
1518 | gap = ULONG_MAX - pivots[max_piv]; |
1519 | if (gap > max_gap) |
1520 | max_gap = gap; |
1521 | |
1522 | if (max_gap > pivots[max_piv] - mas->min) |
1523 | return max_gap; |
1524 | } |
1525 | |
1526 | for (; i <= max_piv; i++) { |
1527 | /* data == no gap. */ |
1528 | if (likely(slots[i])) |
1529 | continue; |
1530 | |
1531 | pstart = pivots[i - 1]; |
1532 | gap = pivots[i] - pstart; |
1533 | if (gap > max_gap) |
1534 | max_gap = gap; |
1535 | |
1536 | /* There cannot be two gaps in a row. */ |
1537 | i++; |
1538 | } |
1539 | return max_gap; |
1540 | } |
1541 | |
1542 | /* |
1543 | * ma_max_gap() - Get the maximum gap in a maple node (non-leaf) |
1544 | * @node: The maple node |
1545 | * @gaps: The pointer to the gaps |
1546 | * @mt: The maple node type |
1547 | * @*off: Pointer to store the offset location of the gap. |
1548 | * |
1549 | * Uses the metadata data end to scan backwards across set gaps. |
1550 | * |
1551 | * Return: The maximum gap value |
1552 | */ |
1553 | static inline unsigned long |
1554 | ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt, |
1555 | unsigned char *off) |
1556 | { |
1557 | unsigned char offset, i; |
1558 | unsigned long max_gap = 0; |
1559 | |
1560 | i = offset = ma_meta_end(mn: node, mt); |
1561 | do { |
1562 | if (gaps[i] > max_gap) { |
1563 | max_gap = gaps[i]; |
1564 | offset = i; |
1565 | } |
1566 | } while (i--); |
1567 | |
1568 | *off = offset; |
1569 | return max_gap; |
1570 | } |
1571 | |
1572 | /* |
1573 | * mas_max_gap() - find the largest gap in a non-leaf node and set the slot. |
1574 | * @mas: The maple state. |
1575 | * |
1576 | * Return: The gap value. |
1577 | */ |
1578 | static inline unsigned long mas_max_gap(struct ma_state *mas) |
1579 | { |
1580 | unsigned long *gaps; |
1581 | unsigned char offset; |
1582 | enum maple_type mt; |
1583 | struct maple_node *node; |
1584 | |
1585 | mt = mte_node_type(entry: mas->node); |
1586 | if (ma_is_leaf(type: mt)) |
1587 | return mas_leaf_max_gap(mas); |
1588 | |
1589 | node = mas_mn(mas); |
1590 | MAS_BUG_ON(mas, mt != maple_arange_64); |
1591 | offset = ma_meta_gap(mn: node); |
1592 | gaps = ma_gaps(node, type: mt); |
1593 | return gaps[offset]; |
1594 | } |
1595 | |
1596 | /* |
1597 | * mas_parent_gap() - Set the parent gap and any gaps above, as needed |
1598 | * @mas: The maple state |
1599 | * @offset: The gap offset in the parent to set |
1600 | * @new: The new gap value. |
1601 | * |
1602 | * Set the parent gap then continue to set the gap upwards, using the metadata |
1603 | * of the parent to see if it is necessary to check the node above. |
1604 | */ |
1605 | static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset, |
1606 | unsigned long new) |
1607 | { |
1608 | unsigned long meta_gap = 0; |
1609 | struct maple_node *pnode; |
1610 | struct maple_enode *penode; |
1611 | unsigned long *pgaps; |
1612 | unsigned char meta_offset; |
1613 | enum maple_type pmt; |
1614 | |
1615 | pnode = mte_parent(enode: mas->node); |
1616 | pmt = mas_parent_type(mas, enode: mas->node); |
1617 | penode = mt_mk_node(node: pnode, type: pmt); |
1618 | pgaps = ma_gaps(node: pnode, type: pmt); |
1619 | |
1620 | ascend: |
1621 | MAS_BUG_ON(mas, pmt != maple_arange_64); |
1622 | meta_offset = ma_meta_gap(mn: pnode); |
1623 | meta_gap = pgaps[meta_offset]; |
1624 | |
1625 | pgaps[offset] = new; |
1626 | |
1627 | if (meta_gap == new) |
1628 | return; |
1629 | |
1630 | if (offset != meta_offset) { |
1631 | if (meta_gap > new) |
1632 | return; |
1633 | |
1634 | ma_set_meta_gap(mn: pnode, mt: pmt, offset); |
1635 | } else if (new < meta_gap) { |
1636 | new = ma_max_gap(node: pnode, gaps: pgaps, mt: pmt, off: &meta_offset); |
1637 | ma_set_meta_gap(mn: pnode, mt: pmt, offset: meta_offset); |
1638 | } |
1639 | |
1640 | if (ma_is_root(node: pnode)) |
1641 | return; |
1642 | |
1643 | /* Go to the parent node. */ |
1644 | pnode = mte_parent(enode: penode); |
1645 | pmt = mas_parent_type(mas, enode: penode); |
1646 | pgaps = ma_gaps(node: pnode, type: pmt); |
1647 | offset = mte_parent_slot(enode: penode); |
1648 | penode = mt_mk_node(node: pnode, type: pmt); |
1649 | goto ascend; |
1650 | } |
1651 | |
1652 | /* |
1653 | * mas_update_gap() - Update a nodes gaps and propagate up if necessary. |
1654 | * @mas - the maple state. |
1655 | */ |
1656 | static inline void mas_update_gap(struct ma_state *mas) |
1657 | { |
1658 | unsigned char pslot; |
1659 | unsigned long p_gap; |
1660 | unsigned long max_gap; |
1661 | |
1662 | if (!mt_is_alloc(mt: mas->tree)) |
1663 | return; |
1664 | |
1665 | if (mte_is_root(node: mas->node)) |
1666 | return; |
1667 | |
1668 | max_gap = mas_max_gap(mas); |
1669 | |
1670 | pslot = mte_parent_slot(enode: mas->node); |
1671 | p_gap = ma_gaps(node: mte_parent(enode: mas->node), |
1672 | type: mas_parent_type(mas, enode: mas->node))[pslot]; |
1673 | |
1674 | if (p_gap != max_gap) |
1675 | mas_parent_gap(mas, offset: pslot, new: max_gap); |
1676 | } |
1677 | |
1678 | /* |
1679 | * mas_adopt_children() - Set the parent pointer of all nodes in @parent to |
1680 | * @parent with the slot encoded. |
1681 | * @mas - the maple state (for the tree) |
1682 | * @parent - the maple encoded node containing the children. |
1683 | */ |
1684 | static inline void mas_adopt_children(struct ma_state *mas, |
1685 | struct maple_enode *parent) |
1686 | { |
1687 | enum maple_type type = mte_node_type(entry: parent); |
1688 | struct maple_node *node = mte_to_node(entry: parent); |
1689 | void __rcu **slots = ma_slots(mn: node, mt: type); |
1690 | unsigned long *pivots = ma_pivots(node, type); |
1691 | struct maple_enode *child; |
1692 | unsigned char offset; |
1693 | |
1694 | offset = ma_data_end(node, type, pivots, max: mas->max); |
1695 | do { |
1696 | child = mas_slot_locked(mas, slots, offset); |
1697 | mas_set_parent(mas, enode: child, parent, slot: offset); |
1698 | } while (offset--); |
1699 | } |
1700 | |
1701 | /* |
1702 | * mas_put_in_tree() - Put a new node in the tree, smp_wmb(), and mark the old |
1703 | * node as dead. |
1704 | * @mas - the maple state with the new node |
1705 | * @old_enode - The old maple encoded node to replace. |
1706 | */ |
1707 | static inline void mas_put_in_tree(struct ma_state *mas, |
1708 | struct maple_enode *old_enode) |
1709 | __must_hold(mas->tree->ma_lock) |
1710 | { |
1711 | unsigned char offset; |
1712 | void __rcu **slots; |
1713 | |
1714 | if (mte_is_root(node: mas->node)) { |
1715 | mas_mn(mas)->parent = ma_parent_ptr(mas_tree_parent(mas)); |
1716 | rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); |
1717 | mas_set_height(mas); |
1718 | } else { |
1719 | |
1720 | offset = mte_parent_slot(enode: mas->node); |
1721 | slots = ma_slots(mn: mte_parent(enode: mas->node), |
1722 | mt: mas_parent_type(mas, enode: mas->node)); |
1723 | rcu_assign_pointer(slots[offset], mas->node); |
1724 | } |
1725 | |
1726 | mte_set_node_dead(mn: old_enode); |
1727 | } |
1728 | |
1729 | /* |
1730 | * mas_replace_node() - Replace a node by putting it in the tree, marking it |
1731 | * dead, and freeing it. |
1732 | * the parent encoding to locate the maple node in the tree. |
1733 | * @mas - the ma_state with @mas->node pointing to the new node. |
1734 | * @old_enode - The old maple encoded node. |
1735 | */ |
1736 | static inline void mas_replace_node(struct ma_state *mas, |
1737 | struct maple_enode *old_enode) |
1738 | __must_hold(mas->tree->ma_lock) |
1739 | { |
1740 | mas_put_in_tree(mas, old_enode); |
1741 | mas_free(mas, used: old_enode); |
1742 | } |
1743 | |
1744 | /* |
1745 | * mas_find_child() - Find a child who has the parent @mas->node. |
1746 | * @mas: the maple state with the parent. |
1747 | * @child: the maple state to store the child. |
1748 | */ |
1749 | static inline bool mas_find_child(struct ma_state *mas, struct ma_state *child) |
1750 | __must_hold(mas->tree->ma_lock) |
1751 | { |
1752 | enum maple_type mt; |
1753 | unsigned char offset; |
1754 | unsigned char end; |
1755 | unsigned long *pivots; |
1756 | struct maple_enode *entry; |
1757 | struct maple_node *node; |
1758 | void __rcu **slots; |
1759 | |
1760 | mt = mte_node_type(entry: mas->node); |
1761 | node = mas_mn(mas); |
1762 | slots = ma_slots(mn: node, mt); |
1763 | pivots = ma_pivots(node, type: mt); |
1764 | end = ma_data_end(node, type: mt, pivots, max: mas->max); |
1765 | for (offset = mas->offset; offset <= end; offset++) { |
1766 | entry = mas_slot_locked(mas, slots, offset); |
1767 | if (mte_parent(enode: entry) == node) { |
1768 | *child = *mas; |
1769 | mas->offset = offset + 1; |
1770 | child->offset = offset; |
1771 | mas_descend(mas: child); |
1772 | child->offset = 0; |
1773 | return true; |
1774 | } |
1775 | } |
1776 | return false; |
1777 | } |
1778 | |
1779 | /* |
1780 | * mab_shift_right() - Shift the data in mab right. Note, does not clean out the |
1781 | * old data or set b_node->b_end. |
1782 | * @b_node: the maple_big_node |
1783 | * @shift: the shift count |
1784 | */ |
1785 | static inline void mab_shift_right(struct maple_big_node *b_node, |
1786 | unsigned char shift) |
1787 | { |
1788 | unsigned long size = b_node->b_end * sizeof(unsigned long); |
1789 | |
1790 | memmove(b_node->pivot + shift, b_node->pivot, size); |
1791 | memmove(b_node->slot + shift, b_node->slot, size); |
1792 | if (b_node->type == maple_arange_64) |
1793 | memmove(b_node->gap + shift, b_node->gap, size); |
1794 | } |
1795 | |
1796 | /* |
1797 | * mab_middle_node() - Check if a middle node is needed (unlikely) |
1798 | * @b_node: the maple_big_node that contains the data. |
1799 | * @size: the amount of data in the b_node |
1800 | * @split: the potential split location |
1801 | * @slot_count: the size that can be stored in a single node being considered. |
1802 | * |
1803 | * Return: true if a middle node is required. |
1804 | */ |
1805 | static inline bool mab_middle_node(struct maple_big_node *b_node, int split, |
1806 | unsigned char slot_count) |
1807 | { |
1808 | unsigned char size = b_node->b_end; |
1809 | |
1810 | if (size >= 2 * slot_count) |
1811 | return true; |
1812 | |
1813 | if (!b_node->slot[split] && (size >= 2 * slot_count - 1)) |
1814 | return true; |
1815 | |
1816 | return false; |
1817 | } |
1818 | |
1819 | /* |
1820 | * mab_no_null_split() - ensure the split doesn't fall on a NULL |
1821 | * @b_node: the maple_big_node with the data |
1822 | * @split: the suggested split location |
1823 | * @slot_count: the number of slots in the node being considered. |
1824 | * |
1825 | * Return: the split location. |
1826 | */ |
1827 | static inline int mab_no_null_split(struct maple_big_node *b_node, |
1828 | unsigned char split, unsigned char slot_count) |
1829 | { |
1830 | if (!b_node->slot[split]) { |
1831 | /* |
1832 | * If the split is less than the max slot && the right side will |
1833 | * still be sufficient, then increment the split on NULL. |
1834 | */ |
1835 | if ((split < slot_count - 1) && |
1836 | (b_node->b_end - split) > (mt_min_slots[b_node->type])) |
1837 | split++; |
1838 | else |
1839 | split--; |
1840 | } |
1841 | return split; |
1842 | } |
1843 | |
1844 | /* |
1845 | * mab_calc_split() - Calculate the split location and if there needs to be two |
1846 | * splits. |
1847 | * @bn: The maple_big_node with the data |
1848 | * @mid_split: The second split, if required. 0 otherwise. |
1849 | * |
1850 | * Return: The first split location. The middle split is set in @mid_split. |
1851 | */ |
1852 | static inline int mab_calc_split(struct ma_state *mas, |
1853 | struct maple_big_node *bn, unsigned char *mid_split, unsigned long min) |
1854 | { |
1855 | unsigned char b_end = bn->b_end; |
1856 | int split = b_end / 2; /* Assume equal split. */ |
1857 | unsigned char slot_min, slot_count = mt_slots[bn->type]; |
1858 | |
1859 | /* |
1860 | * To support gap tracking, all NULL entries are kept together and a node cannot |
1861 | * end on a NULL entry, with the exception of the left-most leaf. The |
1862 | * limitation means that the split of a node must be checked for this condition |
1863 | * and be able to put more data in one direction or the other. |
1864 | */ |
1865 | if (unlikely((mas->mas_flags & MA_STATE_BULK))) { |
1866 | *mid_split = 0; |
1867 | split = b_end - mt_min_slots[bn->type]; |
1868 | |
1869 | if (!ma_is_leaf(type: bn->type)) |
1870 | return split; |
1871 | |
1872 | mas->mas_flags |= MA_STATE_REBALANCE; |
1873 | if (!bn->slot[split]) |
1874 | split--; |
1875 | return split; |
1876 | } |
1877 | |
1878 | /* |
1879 | * Although extremely rare, it is possible to enter what is known as the 3-way |
1880 | * split scenario. The 3-way split comes about by means of a store of a range |
1881 | * that overwrites the end and beginning of two full nodes. The result is a set |
1882 | * of entries that cannot be stored in 2 nodes. Sometimes, these two nodes can |
1883 | * also be located in different parent nodes which are also full. This can |
1884 | * carry upwards all the way to the root in the worst case. |
1885 | */ |
1886 | if (unlikely(mab_middle_node(bn, split, slot_count))) { |
1887 | split = b_end / 3; |
1888 | *mid_split = split * 2; |
1889 | } else { |
1890 | slot_min = mt_min_slots[bn->type]; |
1891 | |
1892 | *mid_split = 0; |
1893 | /* |
1894 | * Avoid having a range less than the slot count unless it |
1895 | * causes one node to be deficient. |
1896 | * NOTE: mt_min_slots is 1 based, b_end and split are zero. |
1897 | */ |
1898 | while ((split < slot_count - 1) && |
1899 | ((bn->pivot[split] - min) < slot_count - 1) && |
1900 | (b_end - split > slot_min)) |
1901 | split++; |
1902 | } |
1903 | |
1904 | /* Avoid ending a node on a NULL entry */ |
1905 | split = mab_no_null_split(b_node: bn, split, slot_count); |
1906 | |
1907 | if (unlikely(*mid_split)) |
1908 | *mid_split = mab_no_null_split(b_node: bn, split: *mid_split, slot_count); |
1909 | |
1910 | return split; |
1911 | } |
1912 | |
1913 | /* |
1914 | * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node |
1915 | * and set @b_node->b_end to the next free slot. |
1916 | * @mas: The maple state |
1917 | * @mas_start: The starting slot to copy |
1918 | * @mas_end: The end slot to copy (inclusively) |
1919 | * @b_node: The maple_big_node to place the data |
1920 | * @mab_start: The starting location in maple_big_node to store the data. |
1921 | */ |
1922 | static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start, |
1923 | unsigned char mas_end, struct maple_big_node *b_node, |
1924 | unsigned char mab_start) |
1925 | { |
1926 | enum maple_type mt; |
1927 | struct maple_node *node; |
1928 | void __rcu **slots; |
1929 | unsigned long *pivots, *gaps; |
1930 | int i = mas_start, j = mab_start; |
1931 | unsigned char piv_end; |
1932 | |
1933 | node = mas_mn(mas); |
1934 | mt = mte_node_type(entry: mas->node); |
1935 | pivots = ma_pivots(node, type: mt); |
1936 | if (!i) { |
1937 | b_node->pivot[j] = pivots[i++]; |
1938 | if (unlikely(i > mas_end)) |
1939 | goto complete; |
1940 | j++; |
1941 | } |
1942 | |
1943 | piv_end = min(mas_end, mt_pivots[mt]); |
1944 | for (; i < piv_end; i++, j++) { |
1945 | b_node->pivot[j] = pivots[i]; |
1946 | if (unlikely(!b_node->pivot[j])) |
1947 | break; |
1948 | |
1949 | if (unlikely(mas->max == b_node->pivot[j])) |
1950 | goto complete; |
1951 | } |
1952 | |
1953 | if (likely(i <= mas_end)) |
1954 | b_node->pivot[j] = mas_safe_pivot(mas, pivots, piv: i, type: mt); |
1955 | |
1956 | complete: |
1957 | b_node->b_end = ++j; |
1958 | j -= mab_start; |
1959 | slots = ma_slots(mn: node, mt); |
1960 | memcpy(b_node->slot + mab_start, slots + mas_start, sizeof(void *) * j); |
1961 | if (!ma_is_leaf(type: mt) && mt_is_alloc(mt: mas->tree)) { |
1962 | gaps = ma_gaps(node, type: mt); |
1963 | memcpy(b_node->gap + mab_start, gaps + mas_start, |
1964 | sizeof(unsigned long) * j); |
1965 | } |
1966 | } |
1967 | |
1968 | /* |
1969 | * mas_leaf_set_meta() - Set the metadata of a leaf if possible. |
1970 | * @node: The maple node |
1971 | * @mt: The maple type |
1972 | * @end: The node end |
1973 | */ |
1974 | static inline void mas_leaf_set_meta(struct maple_node *node, |
1975 | enum maple_type mt, unsigned char end) |
1976 | { |
1977 | if (end < mt_slots[mt] - 1) |
1978 | ma_set_meta(mn: node, mt, offset: 0, end); |
1979 | } |
1980 | |
1981 | /* |
1982 | * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node. |
1983 | * @b_node: the maple_big_node that has the data |
1984 | * @mab_start: the start location in @b_node. |
1985 | * @mab_end: The end location in @b_node (inclusively) |
1986 | * @mas: The maple state with the maple encoded node. |
1987 | */ |
1988 | static inline void mab_mas_cp(struct maple_big_node *b_node, |
1989 | unsigned char mab_start, unsigned char mab_end, |
1990 | struct ma_state *mas, bool new_max) |
1991 | { |
1992 | int i, j = 0; |
1993 | enum maple_type mt = mte_node_type(entry: mas->node); |
1994 | struct maple_node *node = mte_to_node(entry: mas->node); |
1995 | void __rcu **slots = ma_slots(mn: node, mt); |
1996 | unsigned long *pivots = ma_pivots(node, type: mt); |
1997 | unsigned long *gaps = NULL; |
1998 | unsigned char end; |
1999 | |
2000 | if (mab_end - mab_start > mt_pivots[mt]) |
2001 | mab_end--; |
2002 | |
2003 | if (!pivots[mt_pivots[mt] - 1]) |
2004 | slots[mt_pivots[mt]] = NULL; |
2005 | |
2006 | i = mab_start; |
2007 | do { |
2008 | pivots[j++] = b_node->pivot[i++]; |
2009 | } while (i <= mab_end && likely(b_node->pivot[i])); |
2010 | |
2011 | memcpy(slots, b_node->slot + mab_start, |
2012 | sizeof(void *) * (i - mab_start)); |
2013 | |
2014 | if (new_max) |
2015 | mas->max = b_node->pivot[i - 1]; |
2016 | |
2017 | end = j - 1; |
2018 | if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) { |
2019 | unsigned long max_gap = 0; |
2020 | unsigned char offset = 0; |
2021 | |
2022 | gaps = ma_gaps(node, type: mt); |
2023 | do { |
2024 | gaps[--j] = b_node->gap[--i]; |
2025 | if (gaps[j] > max_gap) { |
2026 | offset = j; |
2027 | max_gap = gaps[j]; |
2028 | } |
2029 | } while (j); |
2030 | |
2031 | ma_set_meta(mn: node, mt, offset, end); |
2032 | } else { |
2033 | mas_leaf_set_meta(node, mt, end); |
2034 | } |
2035 | } |
2036 | |
2037 | /* |
2038 | * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert. |
2039 | * @mas: The maple state |
2040 | * @end: The maple node end |
2041 | * @mt: The maple node type |
2042 | */ |
2043 | static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end, |
2044 | enum maple_type mt) |
2045 | { |
2046 | if (!(mas->mas_flags & MA_STATE_BULK)) |
2047 | return; |
2048 | |
2049 | if (mte_is_root(node: mas->node)) |
2050 | return; |
2051 | |
2052 | if (end > mt_min_slots[mt]) { |
2053 | mas->mas_flags &= ~MA_STATE_REBALANCE; |
2054 | return; |
2055 | } |
2056 | } |
2057 | |
2058 | /* |
2059 | * mas_store_b_node() - Store an @entry into the b_node while also copying the |
2060 | * data from a maple encoded node. |
2061 | * @wr_mas: the maple write state |
2062 | * @b_node: the maple_big_node to fill with data |
2063 | * @offset_end: the offset to end copying |
2064 | * |
2065 | * Return: The actual end of the data stored in @b_node |
2066 | */ |
2067 | static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas, |
2068 | struct maple_big_node *b_node, unsigned char offset_end) |
2069 | { |
2070 | unsigned char slot; |
2071 | unsigned char b_end; |
2072 | /* Possible underflow of piv will wrap back to 0 before use. */ |
2073 | unsigned long piv; |
2074 | struct ma_state *mas = wr_mas->mas; |
2075 | |
2076 | b_node->type = wr_mas->type; |
2077 | b_end = 0; |
2078 | slot = mas->offset; |
2079 | if (slot) { |
2080 | /* Copy start data up to insert. */ |
2081 | mas_mab_cp(mas, mas_start: 0, mas_end: slot - 1, b_node, mab_start: 0); |
2082 | b_end = b_node->b_end; |
2083 | piv = b_node->pivot[b_end - 1]; |
2084 | } else |
2085 | piv = mas->min - 1; |
2086 | |
2087 | if (piv + 1 < mas->index) { |
2088 | /* Handle range starting after old range */ |
2089 | b_node->slot[b_end] = wr_mas->content; |
2090 | if (!wr_mas->content) |
2091 | b_node->gap[b_end] = mas->index - 1 - piv; |
2092 | b_node->pivot[b_end++] = mas->index - 1; |
2093 | } |
2094 | |
2095 | /* Store the new entry. */ |
2096 | mas->offset = b_end; |
2097 | b_node->slot[b_end] = wr_mas->entry; |
2098 | b_node->pivot[b_end] = mas->last; |
2099 | |
2100 | /* Appended. */ |
2101 | if (mas->last >= mas->max) |
2102 | goto b_end; |
2103 | |
2104 | /* Handle new range ending before old range ends */ |
2105 | piv = mas_safe_pivot(mas, pivots: wr_mas->pivots, piv: offset_end, type: wr_mas->type); |
2106 | if (piv > mas->last) { |
2107 | if (piv == ULONG_MAX) |
2108 | mas_bulk_rebalance(mas, end: b_node->b_end, mt: wr_mas->type); |
2109 | |
2110 | if (offset_end != slot) |
2111 | wr_mas->content = mas_slot_locked(mas, slots: wr_mas->slots, |
2112 | offset: offset_end); |
2113 | |
2114 | b_node->slot[++b_end] = wr_mas->content; |
2115 | if (!wr_mas->content) |
2116 | b_node->gap[b_end] = piv - mas->last + 1; |
2117 | b_node->pivot[b_end] = piv; |
2118 | } |
2119 | |
2120 | slot = offset_end + 1; |
2121 | if (slot > mas->end) |
2122 | goto b_end; |
2123 | |
2124 | /* Copy end data to the end of the node. */ |
2125 | mas_mab_cp(mas, mas_start: slot, mas_end: mas->end + 1, b_node, mab_start: ++b_end); |
2126 | b_node->b_end--; |
2127 | return; |
2128 | |
2129 | b_end: |
2130 | b_node->b_end = b_end; |
2131 | } |
2132 | |
2133 | /* |
2134 | * mas_prev_sibling() - Find the previous node with the same parent. |
2135 | * @mas: the maple state |
2136 | * |
2137 | * Return: True if there is a previous sibling, false otherwise. |
2138 | */ |
2139 | static inline bool mas_prev_sibling(struct ma_state *mas) |
2140 | { |
2141 | unsigned int p_slot = mte_parent_slot(enode: mas->node); |
2142 | |
2143 | if (mte_is_root(node: mas->node)) |
2144 | return false; |
2145 | |
2146 | if (!p_slot) |
2147 | return false; |
2148 | |
2149 | mas_ascend(mas); |
2150 | mas->offset = p_slot - 1; |
2151 | mas_descend(mas); |
2152 | return true; |
2153 | } |
2154 | |
2155 | /* |
2156 | * mas_next_sibling() - Find the next node with the same parent. |
2157 | * @mas: the maple state |
2158 | * |
2159 | * Return: true if there is a next sibling, false otherwise. |
2160 | */ |
2161 | static inline bool mas_next_sibling(struct ma_state *mas) |
2162 | { |
2163 | MA_STATE(parent, mas->tree, mas->index, mas->last); |
2164 | |
2165 | if (mte_is_root(node: mas->node)) |
2166 | return false; |
2167 | |
2168 | parent = *mas; |
2169 | mas_ascend(mas: &parent); |
2170 | parent.offset = mte_parent_slot(enode: mas->node) + 1; |
2171 | if (parent.offset > mas_data_end(mas: &parent)) |
2172 | return false; |
2173 | |
2174 | *mas = parent; |
2175 | mas_descend(mas); |
2176 | return true; |
2177 | } |
2178 | |
2179 | /* |
2180 | * mte_node_or_none() - Set the enode and state. |
2181 | * @enode: The encoded maple node. |
2182 | * |
2183 | * Set the node to the enode and the status. |
2184 | */ |
2185 | static inline void mas_node_or_none(struct ma_state *mas, |
2186 | struct maple_enode *enode) |
2187 | { |
2188 | if (enode) { |
2189 | mas->node = enode; |
2190 | mas->status = ma_active; |
2191 | } else { |
2192 | mas->node = NULL; |
2193 | mas->status = ma_none; |
2194 | } |
2195 | } |
2196 | |
2197 | /* |
2198 | * mas_wr_node_walk() - Find the correct offset for the index in the @mas. |
2199 | * @wr_mas: The maple write state |
2200 | * |
2201 | * Uses mas_slot_locked() and does not need to worry about dead nodes. |
2202 | */ |
2203 | static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas) |
2204 | { |
2205 | struct ma_state *mas = wr_mas->mas; |
2206 | unsigned char count, offset; |
2207 | |
2208 | if (unlikely(ma_is_dense(wr_mas->type))) { |
2209 | wr_mas->r_max = wr_mas->r_min = mas->index; |
2210 | mas->offset = mas->index = mas->min; |
2211 | return; |
2212 | } |
2213 | |
2214 | wr_mas->node = mas_mn(mas: wr_mas->mas); |
2215 | wr_mas->pivots = ma_pivots(node: wr_mas->node, type: wr_mas->type); |
2216 | count = mas->end = ma_data_end(node: wr_mas->node, type: wr_mas->type, |
2217 | pivots: wr_mas->pivots, max: mas->max); |
2218 | offset = mas->offset; |
2219 | |
2220 | while (offset < count && mas->index > wr_mas->pivots[offset]) |
2221 | offset++; |
2222 | |
2223 | wr_mas->r_max = offset < count ? wr_mas->pivots[offset] : mas->max; |
2224 | wr_mas->r_min = mas_safe_min(mas, pivots: wr_mas->pivots, offset); |
2225 | wr_mas->offset_end = mas->offset = offset; |
2226 | } |
2227 | |
2228 | /* |
2229 | * mast_rebalance_next() - Rebalance against the next node |
2230 | * @mast: The maple subtree state |
2231 | * @old_r: The encoded maple node to the right (next node). |
2232 | */ |
2233 | static inline void mast_rebalance_next(struct maple_subtree_state *mast) |
2234 | { |
2235 | unsigned char b_end = mast->bn->b_end; |
2236 | |
2237 | mas_mab_cp(mas: mast->orig_r, mas_start: 0, mt_slot_count(mast->orig_r->node), |
2238 | b_node: mast->bn, mab_start: b_end); |
2239 | mast->orig_r->last = mast->orig_r->max; |
2240 | } |
2241 | |
2242 | /* |
2243 | * mast_rebalance_prev() - Rebalance against the previous node |
2244 | * @mast: The maple subtree state |
2245 | * @old_l: The encoded maple node to the left (previous node) |
2246 | */ |
2247 | static inline void mast_rebalance_prev(struct maple_subtree_state *mast) |
2248 | { |
2249 | unsigned char end = mas_data_end(mas: mast->orig_l) + 1; |
2250 | unsigned char b_end = mast->bn->b_end; |
2251 | |
2252 | mab_shift_right(b_node: mast->bn, shift: end); |
2253 | mas_mab_cp(mas: mast->orig_l, mas_start: 0, mas_end: end - 1, b_node: mast->bn, mab_start: 0); |
2254 | mast->l->min = mast->orig_l->min; |
2255 | mast->orig_l->index = mast->orig_l->min; |
2256 | mast->bn->b_end = end + b_end; |
2257 | mast->l->offset += end; |
2258 | } |
2259 | |
2260 | /* |
2261 | * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring |
2262 | * the node to the right. Checking the nodes to the right then the left at each |
2263 | * level upwards until root is reached. |
2264 | * Data is copied into the @mast->bn. |
2265 | * @mast: The maple_subtree_state. |
2266 | */ |
2267 | static inline |
2268 | bool mast_spanning_rebalance(struct maple_subtree_state *mast) |
2269 | { |
2270 | struct ma_state r_tmp = *mast->orig_r; |
2271 | struct ma_state l_tmp = *mast->orig_l; |
2272 | unsigned char depth = 0; |
2273 | |
2274 | do { |
2275 | mas_ascend(mas: mast->orig_r); |
2276 | mas_ascend(mas: mast->orig_l); |
2277 | depth++; |
2278 | if (mast->orig_r->offset < mas_data_end(mas: mast->orig_r)) { |
2279 | mast->orig_r->offset++; |
2280 | do { |
2281 | mas_descend(mas: mast->orig_r); |
2282 | mast->orig_r->offset = 0; |
2283 | } while (--depth); |
2284 | |
2285 | mast_rebalance_next(mast); |
2286 | *mast->orig_l = l_tmp; |
2287 | return true; |
2288 | } else if (mast->orig_l->offset != 0) { |
2289 | mast->orig_l->offset--; |
2290 | do { |
2291 | mas_descend(mas: mast->orig_l); |
2292 | mast->orig_l->offset = |
2293 | mas_data_end(mas: mast->orig_l); |
2294 | } while (--depth); |
2295 | |
2296 | mast_rebalance_prev(mast); |
2297 | *mast->orig_r = r_tmp; |
2298 | return true; |
2299 | } |
2300 | } while (!mte_is_root(node: mast->orig_r->node)); |
2301 | |
2302 | *mast->orig_r = r_tmp; |
2303 | *mast->orig_l = l_tmp; |
2304 | return false; |
2305 | } |
2306 | |
2307 | /* |
2308 | * mast_ascend() - Ascend the original left and right maple states. |
2309 | * @mast: the maple subtree state. |
2310 | * |
2311 | * Ascend the original left and right sides. Set the offsets to point to the |
2312 | * data already in the new tree (@mast->l and @mast->r). |
2313 | */ |
2314 | static inline void mast_ascend(struct maple_subtree_state *mast) |
2315 | { |
2316 | MA_WR_STATE(wr_mas, mast->orig_r, NULL); |
2317 | mas_ascend(mas: mast->orig_l); |
2318 | mas_ascend(mas: mast->orig_r); |
2319 | |
2320 | mast->orig_r->offset = 0; |
2321 | mast->orig_r->index = mast->r->max; |
2322 | /* last should be larger than or equal to index */ |
2323 | if (mast->orig_r->last < mast->orig_r->index) |
2324 | mast->orig_r->last = mast->orig_r->index; |
2325 | |
2326 | wr_mas.type = mte_node_type(entry: mast->orig_r->node); |
2327 | mas_wr_node_walk(wr_mas: &wr_mas); |
2328 | /* Set up the left side of things */ |
2329 | mast->orig_l->offset = 0; |
2330 | mast->orig_l->index = mast->l->min; |
2331 | wr_mas.mas = mast->orig_l; |
2332 | wr_mas.type = mte_node_type(entry: mast->orig_l->node); |
2333 | mas_wr_node_walk(wr_mas: &wr_mas); |
2334 | |
2335 | mast->bn->type = wr_mas.type; |
2336 | } |
2337 | |
2338 | /* |
2339 | * mas_new_ma_node() - Create and return a new maple node. Helper function. |
2340 | * @mas: the maple state with the allocations. |
2341 | * @b_node: the maple_big_node with the type encoding. |
2342 | * |
2343 | * Use the node type from the maple_big_node to allocate a new node from the |
2344 | * ma_state. This function exists mainly for code readability. |
2345 | * |
2346 | * Return: A new maple encoded node |
2347 | */ |
2348 | static inline struct maple_enode |
2349 | *mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node) |
2350 | { |
2351 | return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), type: b_node->type); |
2352 | } |
2353 | |
2354 | /* |
2355 | * mas_mab_to_node() - Set up right and middle nodes |
2356 | * |
2357 | * @mas: the maple state that contains the allocations. |
2358 | * @b_node: the node which contains the data. |
2359 | * @left: The pointer which will have the left node |
2360 | * @right: The pointer which may have the right node |
2361 | * @middle: the pointer which may have the middle node (rare) |
2362 | * @mid_split: the split location for the middle node |
2363 | * |
2364 | * Return: the split of left. |
2365 | */ |
2366 | static inline unsigned char mas_mab_to_node(struct ma_state *mas, |
2367 | struct maple_big_node *b_node, struct maple_enode **left, |
2368 | struct maple_enode **right, struct maple_enode **middle, |
2369 | unsigned char *mid_split, unsigned long min) |
2370 | { |
2371 | unsigned char split = 0; |
2372 | unsigned char slot_count = mt_slots[b_node->type]; |
2373 | |
2374 | *left = mas_new_ma_node(mas, b_node); |
2375 | *right = NULL; |
2376 | *middle = NULL; |
2377 | *mid_split = 0; |
2378 | |
2379 | if (b_node->b_end < slot_count) { |
2380 | split = b_node->b_end; |
2381 | } else { |
2382 | split = mab_calc_split(mas, bn: b_node, mid_split, min); |
2383 | *right = mas_new_ma_node(mas, b_node); |
2384 | } |
2385 | |
2386 | if (*mid_split) |
2387 | *middle = mas_new_ma_node(mas, b_node); |
2388 | |
2389 | return split; |
2390 | |
2391 | } |
2392 | |
2393 | /* |
2394 | * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end |
2395 | * pointer. |
2396 | * @b_node - the big node to add the entry |
2397 | * @mas - the maple state to get the pivot (mas->max) |
2398 | * @entry - the entry to add, if NULL nothing happens. |
2399 | */ |
2400 | static inline void mab_set_b_end(struct maple_big_node *b_node, |
2401 | struct ma_state *mas, |
2402 | void *entry) |
2403 | { |
2404 | if (!entry) |
2405 | return; |
2406 | |
2407 | b_node->slot[b_node->b_end] = entry; |
2408 | if (mt_is_alloc(mt: mas->tree)) |
2409 | b_node->gap[b_node->b_end] = mas_max_gap(mas); |
2410 | b_node->pivot[b_node->b_end++] = mas->max; |
2411 | } |
2412 | |
2413 | /* |
2414 | * mas_set_split_parent() - combine_then_separate helper function. Sets the parent |
2415 | * of @mas->node to either @left or @right, depending on @slot and @split |
2416 | * |
2417 | * @mas - the maple state with the node that needs a parent |
2418 | * @left - possible parent 1 |
2419 | * @right - possible parent 2 |
2420 | * @slot - the slot the mas->node was placed |
2421 | * @split - the split location between @left and @right |
2422 | */ |
2423 | static inline void mas_set_split_parent(struct ma_state *mas, |
2424 | struct maple_enode *left, |
2425 | struct maple_enode *right, |
2426 | unsigned char *slot, unsigned char split) |
2427 | { |
2428 | if (mas_is_none(mas)) |
2429 | return; |
2430 | |
2431 | if ((*slot) <= split) |
2432 | mas_set_parent(mas, enode: mas->node, parent: left, slot: *slot); |
2433 | else if (right) |
2434 | mas_set_parent(mas, enode: mas->node, parent: right, slot: (*slot) - split - 1); |
2435 | |
2436 | (*slot)++; |
2437 | } |
2438 | |
2439 | /* |
2440 | * mte_mid_split_check() - Check if the next node passes the mid-split |
2441 | * @**l: Pointer to left encoded maple node. |
2442 | * @**m: Pointer to middle encoded maple node. |
2443 | * @**r: Pointer to right encoded maple node. |
2444 | * @slot: The offset |
2445 | * @*split: The split location. |
2446 | * @mid_split: The middle split. |
2447 | */ |
2448 | static inline void mte_mid_split_check(struct maple_enode **l, |
2449 | struct maple_enode **r, |
2450 | struct maple_enode *right, |
2451 | unsigned char slot, |
2452 | unsigned char *split, |
2453 | unsigned char mid_split) |
2454 | { |
2455 | if (*r == right) |
2456 | return; |
2457 | |
2458 | if (slot < mid_split) |
2459 | return; |
2460 | |
2461 | *l = *r; |
2462 | *r = right; |
2463 | *split = mid_split; |
2464 | } |
2465 | |
2466 | /* |
2467 | * mast_set_split_parents() - Helper function to set three nodes parents. Slot |
2468 | * is taken from @mast->l. |
2469 | * @mast - the maple subtree state |
2470 | * @left - the left node |
2471 | * @right - the right node |
2472 | * @split - the split location. |
2473 | */ |
2474 | static inline void mast_set_split_parents(struct maple_subtree_state *mast, |
2475 | struct maple_enode *left, |
2476 | struct maple_enode *middle, |
2477 | struct maple_enode *right, |
2478 | unsigned char split, |
2479 | unsigned char mid_split) |
2480 | { |
2481 | unsigned char slot; |
2482 | struct maple_enode *l = left; |
2483 | struct maple_enode *r = right; |
2484 | |
2485 | if (mas_is_none(mas: mast->l)) |
2486 | return; |
2487 | |
2488 | if (middle) |
2489 | r = middle; |
2490 | |
2491 | slot = mast->l->offset; |
2492 | |
2493 | mte_mid_split_check(l: &l, r: &r, right, slot, split: &split, mid_split); |
2494 | mas_set_split_parent(mas: mast->l, left: l, right: r, slot: &slot, split); |
2495 | |
2496 | mte_mid_split_check(l: &l, r: &r, right, slot, split: &split, mid_split); |
2497 | mas_set_split_parent(mas: mast->m, left: l, right: r, slot: &slot, split); |
2498 | |
2499 | mte_mid_split_check(l: &l, r: &r, right, slot, split: &split, mid_split); |
2500 | mas_set_split_parent(mas: mast->r, left: l, right: r, slot: &slot, split); |
2501 | } |
2502 | |
2503 | /* |
2504 | * mas_topiary_node() - Dispose of a single node |
2505 | * @mas: The maple state for pushing nodes |
2506 | * @enode: The encoded maple node |
2507 | * @in_rcu: If the tree is in rcu mode |
2508 | * |
2509 | * The node will either be RCU freed or pushed back on the maple state. |
2510 | */ |
2511 | static inline void mas_topiary_node(struct ma_state *mas, |
2512 | struct ma_state *tmp_mas, bool in_rcu) |
2513 | { |
2514 | struct maple_node *tmp; |
2515 | struct maple_enode *enode; |
2516 | |
2517 | if (mas_is_none(mas: tmp_mas)) |
2518 | return; |
2519 | |
2520 | enode = tmp_mas->node; |
2521 | tmp = mte_to_node(entry: enode); |
2522 | mte_set_node_dead(mn: enode); |
2523 | if (in_rcu) |
2524 | ma_free_rcu(node: tmp); |
2525 | else |
2526 | mas_push_node(mas, used: tmp); |
2527 | } |
2528 | |
2529 | /* |
2530 | * mas_topiary_replace() - Replace the data with new data, then repair the |
2531 | * parent links within the new tree. Iterate over the dead sub-tree and collect |
2532 | * the dead subtrees and topiary the nodes that are no longer of use. |
2533 | * |
2534 | * The new tree will have up to three children with the correct parent. Keep |
2535 | * track of the new entries as they need to be followed to find the next level |
2536 | * of new entries. |
2537 | * |
2538 | * The old tree will have up to three children with the old parent. Keep track |
2539 | * of the old entries as they may have more nodes below replaced. Nodes within |
2540 | * [index, last] are dead subtrees, others need to be freed and followed. |
2541 | * |
2542 | * @mas: The maple state pointing at the new data |
2543 | * @old_enode: The maple encoded node being replaced |
2544 | * |
2545 | */ |
2546 | static inline void mas_topiary_replace(struct ma_state *mas, |
2547 | struct maple_enode *old_enode) |
2548 | { |
2549 | struct ma_state tmp[3], tmp_next[3]; |
2550 | MA_TOPIARY(subtrees, mas->tree); |
2551 | bool in_rcu; |
2552 | int i, n; |
2553 | |
2554 | /* Place data in tree & then mark node as old */ |
2555 | mas_put_in_tree(mas, old_enode); |
2556 | |
2557 | /* Update the parent pointers in the tree */ |
2558 | tmp[0] = *mas; |
2559 | tmp[0].offset = 0; |
2560 | tmp[1].status = ma_none; |
2561 | tmp[2].status = ma_none; |
2562 | while (!mte_is_leaf(entry: tmp[0].node)) { |
2563 | n = 0; |
2564 | for (i = 0; i < 3; i++) { |
2565 | if (mas_is_none(mas: &tmp[i])) |
2566 | continue; |
2567 | |
2568 | while (n < 3) { |
2569 | if (!mas_find_child(mas: &tmp[i], child: &tmp_next[n])) |
2570 | break; |
2571 | n++; |
2572 | } |
2573 | |
2574 | mas_adopt_children(mas: &tmp[i], parent: tmp[i].node); |
2575 | } |
2576 | |
2577 | if (MAS_WARN_ON(mas, n == 0)) |
2578 | break; |
2579 | |
2580 | while (n < 3) |
2581 | tmp_next[n++].status = ma_none; |
2582 | |
2583 | for (i = 0; i < 3; i++) |
2584 | tmp[i] = tmp_next[i]; |
2585 | } |
2586 | |
2587 | /* Collect the old nodes that need to be discarded */ |
2588 | if (mte_is_leaf(entry: old_enode)) |
2589 | return mas_free(mas, used: old_enode); |
2590 | |
2591 | tmp[0] = *mas; |
2592 | tmp[0].offset = 0; |
2593 | tmp[0].node = old_enode; |
2594 | tmp[1].status = ma_none; |
2595 | tmp[2].status = ma_none; |
2596 | in_rcu = mt_in_rcu(mt: mas->tree); |
2597 | do { |
2598 | n = 0; |
2599 | for (i = 0; i < 3; i++) { |
2600 | if (mas_is_none(mas: &tmp[i])) |
2601 | continue; |
2602 | |
2603 | while (n < 3) { |
2604 | if (!mas_find_child(mas: &tmp[i], child: &tmp_next[n])) |
2605 | break; |
2606 | |
2607 | if ((tmp_next[n].min >= tmp_next->index) && |
2608 | (tmp_next[n].max <= tmp_next->last)) { |
2609 | mat_add(mat: &subtrees, dead_enode: tmp_next[n].node); |
2610 | tmp_next[n].status = ma_none; |
2611 | } else { |
2612 | n++; |
2613 | } |
2614 | } |
2615 | } |
2616 | |
2617 | if (MAS_WARN_ON(mas, n == 0)) |
2618 | break; |
2619 | |
2620 | while (n < 3) |
2621 | tmp_next[n++].status = ma_none; |
2622 | |
2623 | for (i = 0; i < 3; i++) { |
2624 | mas_topiary_node(mas, tmp_mas: &tmp[i], in_rcu); |
2625 | tmp[i] = tmp_next[i]; |
2626 | } |
2627 | } while (!mte_is_leaf(entry: tmp[0].node)); |
2628 | |
2629 | for (i = 0; i < 3; i++) |
2630 | mas_topiary_node(mas, tmp_mas: &tmp[i], in_rcu); |
2631 | |
2632 | mas_mat_destroy(mas, mat: &subtrees); |
2633 | } |
2634 | |
2635 | /* |
2636 | * mas_wmb_replace() - Write memory barrier and replace |
2637 | * @mas: The maple state |
2638 | * @old: The old maple encoded node that is being replaced. |
2639 | * |
2640 | * Updates gap as necessary. |
2641 | */ |
2642 | static inline void mas_wmb_replace(struct ma_state *mas, |
2643 | struct maple_enode *old_enode) |
2644 | { |
2645 | /* Insert the new data in the tree */ |
2646 | mas_topiary_replace(mas, old_enode); |
2647 | |
2648 | if (mte_is_leaf(entry: mas->node)) |
2649 | return; |
2650 | |
2651 | mas_update_gap(mas); |
2652 | } |
2653 | |
2654 | /* |
2655 | * mast_cp_to_nodes() - Copy data out to nodes. |
2656 | * @mast: The maple subtree state |
2657 | * @left: The left encoded maple node |
2658 | * @middle: The middle encoded maple node |
2659 | * @right: The right encoded maple node |
2660 | * @split: The location to split between left and (middle ? middle : right) |
2661 | * @mid_split: The location to split between middle and right. |
2662 | */ |
2663 | static inline void mast_cp_to_nodes(struct maple_subtree_state *mast, |
2664 | struct maple_enode *left, struct maple_enode *middle, |
2665 | struct maple_enode *right, unsigned char split, unsigned char mid_split) |
2666 | { |
2667 | bool new_lmax = true; |
2668 | |
2669 | mas_node_or_none(mas: mast->l, enode: left); |
2670 | mas_node_or_none(mas: mast->m, enode: middle); |
2671 | mas_node_or_none(mas: mast->r, enode: right); |
2672 | |
2673 | mast->l->min = mast->orig_l->min; |
2674 | if (split == mast->bn->b_end) { |
2675 | mast->l->max = mast->orig_r->max; |
2676 | new_lmax = false; |
2677 | } |
2678 | |
2679 | mab_mas_cp(b_node: mast->bn, mab_start: 0, mab_end: split, mas: mast->l, new_max: new_lmax); |
2680 | |
2681 | if (middle) { |
2682 | mab_mas_cp(b_node: mast->bn, mab_start: 1 + split, mab_end: mid_split, mas: mast->m, new_max: true); |
2683 | mast->m->min = mast->bn->pivot[split] + 1; |
2684 | split = mid_split; |
2685 | } |
2686 | |
2687 | mast->r->max = mast->orig_r->max; |
2688 | if (right) { |
2689 | mab_mas_cp(b_node: mast->bn, mab_start: 1 + split, mab_end: mast->bn->b_end, mas: mast->r, new_max: false); |
2690 | mast->r->min = mast->bn->pivot[split] + 1; |
2691 | } |
2692 | } |
2693 | |
2694 | /* |
2695 | * mast_combine_cp_left - Copy in the original left side of the tree into the |
2696 | * combined data set in the maple subtree state big node. |
2697 | * @mast: The maple subtree state |
2698 | */ |
2699 | static inline void mast_combine_cp_left(struct maple_subtree_state *mast) |
2700 | { |
2701 | unsigned char l_slot = mast->orig_l->offset; |
2702 | |
2703 | if (!l_slot) |
2704 | return; |
2705 | |
2706 | mas_mab_cp(mas: mast->orig_l, mas_start: 0, mas_end: l_slot - 1, b_node: mast->bn, mab_start: 0); |
2707 | } |
2708 | |
2709 | /* |
2710 | * mast_combine_cp_right: Copy in the original right side of the tree into the |
2711 | * combined data set in the maple subtree state big node. |
2712 | * @mast: The maple subtree state |
2713 | */ |
2714 | static inline void mast_combine_cp_right(struct maple_subtree_state *mast) |
2715 | { |
2716 | if (mast->bn->pivot[mast->bn->b_end - 1] >= mast->orig_r->max) |
2717 | return; |
2718 | |
2719 | mas_mab_cp(mas: mast->orig_r, mas_start: mast->orig_r->offset + 1, |
2720 | mt_slot_count(mast->orig_r->node), b_node: mast->bn, |
2721 | mab_start: mast->bn->b_end); |
2722 | mast->orig_r->last = mast->orig_r->max; |
2723 | } |
2724 | |
2725 | /* |
2726 | * mast_sufficient: Check if the maple subtree state has enough data in the big |
2727 | * node to create at least one sufficient node |
2728 | * @mast: the maple subtree state |
2729 | */ |
2730 | static inline bool mast_sufficient(struct maple_subtree_state *mast) |
2731 | { |
2732 | if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node)) |
2733 | return true; |
2734 | |
2735 | return false; |
2736 | } |
2737 | |
2738 | /* |
2739 | * mast_overflow: Check if there is too much data in the subtree state for a |
2740 | * single node. |
2741 | * @mast: The maple subtree state |
2742 | */ |
2743 | static inline bool mast_overflow(struct maple_subtree_state *mast) |
2744 | { |
2745 | if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node)) |
2746 | return true; |
2747 | |
2748 | return false; |
2749 | } |
2750 | |
2751 | static inline void *mtree_range_walk(struct ma_state *mas) |
2752 | { |
2753 | unsigned long *pivots; |
2754 | unsigned char offset; |
2755 | struct maple_node *node; |
2756 | struct maple_enode *next, *last; |
2757 | enum maple_type type; |
2758 | void __rcu **slots; |
2759 | unsigned char end; |
2760 | unsigned long max, min; |
2761 | unsigned long prev_max, prev_min; |
2762 | |
2763 | next = mas->node; |
2764 | min = mas->min; |
2765 | max = mas->max; |
2766 | do { |
2767 | last = next; |
2768 | node = mte_to_node(entry: next); |
2769 | type = mte_node_type(entry: next); |
2770 | pivots = ma_pivots(node, type); |
2771 | end = ma_data_end(node, type, pivots, max); |
2772 | prev_min = min; |
2773 | prev_max = max; |
2774 | if (pivots[0] >= mas->index) { |
2775 | offset = 0; |
2776 | max = pivots[0]; |
2777 | goto next; |
2778 | } |
2779 | |
2780 | offset = 1; |
2781 | while (offset < end) { |
2782 | if (pivots[offset] >= mas->index) { |
2783 | max = pivots[offset]; |
2784 | break; |
2785 | } |
2786 | offset++; |
2787 | } |
2788 | |
2789 | min = pivots[offset - 1] + 1; |
2790 | next: |
2791 | slots = ma_slots(mn: node, mt: type); |
2792 | next = mt_slot(mt: mas->tree, slots, offset); |
2793 | if (unlikely(ma_dead_node(node))) |
2794 | goto dead_node; |
2795 | } while (!ma_is_leaf(type)); |
2796 | |
2797 | mas->end = end; |
2798 | mas->offset = offset; |
2799 | mas->index = min; |
2800 | mas->last = max; |
2801 | mas->min = prev_min; |
2802 | mas->max = prev_max; |
2803 | mas->node = last; |
2804 | return (void *)next; |
2805 | |
2806 | dead_node: |
2807 | mas_reset(mas); |
2808 | return NULL; |
2809 | } |
2810 | |
2811 | /* |
2812 | * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers. |
2813 | * @mas: The starting maple state |
2814 | * @mast: The maple_subtree_state, keeps track of 4 maple states. |
2815 | * @count: The estimated count of iterations needed. |
2816 | * |
2817 | * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root |
2818 | * is hit. First @b_node is split into two entries which are inserted into the |
2819 | * next iteration of the loop. @b_node is returned populated with the final |
2820 | * iteration. @mas is used to obtain allocations. orig_l_mas keeps track of the |
2821 | * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last |
2822 | * to account of what has been copied into the new sub-tree. The update of |
2823 | * orig_l_mas->last is used in mas_consume to find the slots that will need to |
2824 | * be either freed or destroyed. orig_l_mas->depth keeps track of the height of |
2825 | * the new sub-tree in case the sub-tree becomes the full tree. |
2826 | * |
2827 | * Return: the number of elements in b_node during the last loop. |
2828 | */ |
2829 | static int mas_spanning_rebalance(struct ma_state *mas, |
2830 | struct maple_subtree_state *mast, unsigned char count) |
2831 | { |
2832 | unsigned char split, mid_split; |
2833 | unsigned char slot = 0; |
2834 | struct maple_enode *left = NULL, *middle = NULL, *right = NULL; |
2835 | struct maple_enode *old_enode; |
2836 | |
2837 | MA_STATE(l_mas, mas->tree, mas->index, mas->index); |
2838 | MA_STATE(r_mas, mas->tree, mas->index, mas->last); |
2839 | MA_STATE(m_mas, mas->tree, mas->index, mas->index); |
2840 | |
2841 | /* |
2842 | * The tree needs to be rebalanced and leaves need to be kept at the same level. |
2843 | * Rebalancing is done by use of the ``struct maple_topiary``. |
2844 | */ |
2845 | mast->l = &l_mas; |
2846 | mast->m = &m_mas; |
2847 | mast->r = &r_mas; |
2848 | l_mas.status = r_mas.status = m_mas.status = ma_none; |
2849 | |
2850 | /* Check if this is not root and has sufficient data. */ |
2851 | if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) && |
2852 | unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type])) |
2853 | mast_spanning_rebalance(mast); |
2854 | |
2855 | l_mas.depth = 0; |
2856 | |
2857 | /* |
2858 | * Each level of the tree is examined and balanced, pushing data to the left or |
2859 | * right, or rebalancing against left or right nodes is employed to avoid |
2860 | * rippling up the tree to limit the amount of churn. Once a new sub-section of |
2861 | * the tree is created, there may be a mix of new and old nodes. The old nodes |
2862 | * will have the incorrect parent pointers and currently be in two trees: the |
2863 | * original tree and the partially new tree. To remedy the parent pointers in |
2864 | * the old tree, the new data is swapped into the active tree and a walk down |
2865 | * the tree is performed and the parent pointers are updated. |
2866 | * See mas_topiary_replace() for more information. |
2867 | */ |
2868 | while (count--) { |
2869 | mast->bn->b_end--; |
2870 | mast->bn->type = mte_node_type(entry: mast->orig_l->node); |
2871 | split = mas_mab_to_node(mas, b_node: mast->bn, left: &left, right: &right, middle: &middle, |
2872 | mid_split: &mid_split, min: mast->orig_l->min); |
2873 | mast_set_split_parents(mast, left, middle, right, split, |
2874 | mid_split); |
2875 | mast_cp_to_nodes(mast, left, middle, right, split, mid_split); |
2876 | |
2877 | /* |
2878 | * Copy data from next level in the tree to mast->bn from next |
2879 | * iteration |
2880 | */ |
2881 | memset(mast->bn, 0, sizeof(struct maple_big_node)); |
2882 | mast->bn->type = mte_node_type(entry: left); |
2883 | l_mas.depth++; |
2884 | |
2885 | /* Root already stored in l->node. */ |
2886 | if (mas_is_root_limits(mas: mast->l)) |
2887 | goto new_root; |
2888 | |
2889 | mast_ascend(mast); |
2890 | mast_combine_cp_left(mast); |
2891 | l_mas.offset = mast->bn->b_end; |
2892 | mab_set_b_end(b_node: mast->bn, mas: &l_mas, entry: left); |
2893 | mab_set_b_end(b_node: mast->bn, mas: &m_mas, entry: middle); |
2894 | mab_set_b_end(b_node: mast->bn, mas: &r_mas, entry: right); |
2895 | |
2896 | /* Copy anything necessary out of the right node. */ |
2897 | mast_combine_cp_right(mast); |
2898 | mast->orig_l->last = mast->orig_l->max; |
2899 | |
2900 | if (mast_sufficient(mast)) |
2901 | continue; |
2902 | |
2903 | if (mast_overflow(mast)) |
2904 | continue; |
2905 | |
2906 | /* May be a new root stored in mast->bn */ |
2907 | if (mas_is_root_limits(mas: mast->orig_l)) |
2908 | break; |
2909 | |
2910 | mast_spanning_rebalance(mast); |
2911 | |
2912 | /* rebalancing from other nodes may require another loop. */ |
2913 | if (!count) |
2914 | count++; |
2915 | } |
2916 | |
2917 | l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), |
2918 | type: mte_node_type(entry: mast->orig_l->node)); |
2919 | l_mas.depth++; |
2920 | mab_mas_cp(b_node: mast->bn, mab_start: 0, mab_end: mt_slots[mast->bn->type] - 1, mas: &l_mas, new_max: true); |
2921 | mas_set_parent(mas, enode: left, parent: l_mas.node, slot); |
2922 | if (middle) |
2923 | mas_set_parent(mas, enode: middle, parent: l_mas.node, slot: ++slot); |
2924 | |
2925 | if (right) |
2926 | mas_set_parent(mas, enode: right, parent: l_mas.node, slot: ++slot); |
2927 | |
2928 | if (mas_is_root_limits(mas: mast->l)) { |
2929 | new_root: |
2930 | mas_mn(mas: mast->l)->parent = ma_parent_ptr(mas_tree_parent(mas)); |
2931 | while (!mte_is_root(node: mast->orig_l->node)) |
2932 | mast_ascend(mast); |
2933 | } else { |
2934 | mas_mn(mas: &l_mas)->parent = mas_mn(mas: mast->orig_l)->parent; |
2935 | } |
2936 | |
2937 | old_enode = mast->orig_l->node; |
2938 | mas->depth = l_mas.depth; |
2939 | mas->node = l_mas.node; |
2940 | mas->min = l_mas.min; |
2941 | mas->max = l_mas.max; |
2942 | mas->offset = l_mas.offset; |
2943 | mas_wmb_replace(mas, old_enode); |
2944 | mtree_range_walk(mas); |
2945 | return mast->bn->b_end; |
2946 | } |
2947 | |
2948 | /* |
2949 | * mas_rebalance() - Rebalance a given node. |
2950 | * @mas: The maple state |
2951 | * @b_node: The big maple node. |
2952 | * |
2953 | * Rebalance two nodes into a single node or two new nodes that are sufficient. |
2954 | * Continue upwards until tree is sufficient. |
2955 | * |
2956 | * Return: the number of elements in b_node during the last loop. |
2957 | */ |
2958 | static inline int mas_rebalance(struct ma_state *mas, |
2959 | struct maple_big_node *b_node) |
2960 | { |
2961 | char empty_count = mas_mt_height(mas); |
2962 | struct maple_subtree_state mast; |
2963 | unsigned char shift, b_end = ++b_node->b_end; |
2964 | |
2965 | MA_STATE(l_mas, mas->tree, mas->index, mas->last); |
2966 | MA_STATE(r_mas, mas->tree, mas->index, mas->last); |
2967 | |
2968 | trace_ma_op(fn: __func__, mas); |
2969 | |
2970 | /* |
2971 | * Rebalancing occurs if a node is insufficient. Data is rebalanced |
2972 | * against the node to the right if it exists, otherwise the node to the |
2973 | * left of this node is rebalanced against this node. If rebalancing |
2974 | * causes just one node to be produced instead of two, then the parent |
2975 | * is also examined and rebalanced if it is insufficient. Every level |
2976 | * tries to combine the data in the same way. If one node contains the |
2977 | * entire range of the tree, then that node is used as a new root node. |
2978 | */ |
2979 | mas_node_count(mas, count: empty_count * 2 - 1); |
2980 | if (mas_is_err(mas)) |
2981 | return 0; |
2982 | |
2983 | mast.orig_l = &l_mas; |
2984 | mast.orig_r = &r_mas; |
2985 | mast.bn = b_node; |
2986 | mast.bn->type = mte_node_type(entry: mas->node); |
2987 | |
2988 | l_mas = r_mas = *mas; |
2989 | |
2990 | if (mas_next_sibling(mas: &r_mas)) { |
2991 | mas_mab_cp(mas: &r_mas, mas_start: 0, mt_slot_count(r_mas.node), b_node, mab_start: b_end); |
2992 | r_mas.last = r_mas.index = r_mas.max; |
2993 | } else { |
2994 | mas_prev_sibling(mas: &l_mas); |
2995 | shift = mas_data_end(mas: &l_mas) + 1; |
2996 | mab_shift_right(b_node, shift); |
2997 | mas->offset += shift; |
2998 | mas_mab_cp(mas: &l_mas, mas_start: 0, mas_end: shift - 1, b_node, mab_start: 0); |
2999 | b_node->b_end = shift + b_end; |
3000 | l_mas.index = l_mas.last = l_mas.min; |
3001 | } |
3002 | |
3003 | return mas_spanning_rebalance(mas, mast: &mast, count: empty_count); |
3004 | } |
3005 | |
3006 | /* |
3007 | * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple |
3008 | * state. |
3009 | * @mas: The maple state |
3010 | * @end: The end of the left-most node. |
3011 | * |
3012 | * During a mass-insert event (such as forking), it may be necessary to |
3013 | * rebalance the left-most node when it is not sufficient. |
3014 | */ |
3015 | static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end) |
3016 | { |
3017 | enum maple_type mt = mte_node_type(entry: mas->node); |
3018 | struct maple_node reuse, *newnode, *parent, *new_left, *left, *node; |
3019 | struct maple_enode *eparent, *old_eparent; |
3020 | unsigned char offset, tmp, split = mt_slots[mt] / 2; |
3021 | void __rcu **l_slots, **slots; |
3022 | unsigned long *l_pivs, *pivs, gap; |
3023 | bool in_rcu = mt_in_rcu(mt: mas->tree); |
3024 | |
3025 | MA_STATE(l_mas, mas->tree, mas->index, mas->last); |
3026 | |
3027 | l_mas = *mas; |
3028 | mas_prev_sibling(mas: &l_mas); |
3029 | |
3030 | /* set up node. */ |
3031 | if (in_rcu) { |
3032 | /* Allocate for both left and right as well as parent. */ |
3033 | mas_node_count(mas, count: 3); |
3034 | if (mas_is_err(mas)) |
3035 | return; |
3036 | |
3037 | newnode = mas_pop_node(mas); |
3038 | } else { |
3039 | newnode = &reuse; |
3040 | } |
3041 | |
3042 | node = mas_mn(mas); |
3043 | newnode->parent = node->parent; |
3044 | slots = ma_slots(mn: newnode, mt); |
3045 | pivs = ma_pivots(node: newnode, type: mt); |
3046 | left = mas_mn(mas: &l_mas); |
3047 | l_slots = ma_slots(mn: left, mt); |
3048 | l_pivs = ma_pivots(node: left, type: mt); |
3049 | if (!l_slots[split]) |
3050 | split++; |
3051 | tmp = mas_data_end(mas: &l_mas) - split; |
3052 | |
3053 | memcpy(slots, l_slots + split + 1, sizeof(void *) * tmp); |
3054 | memcpy(pivs, l_pivs + split + 1, sizeof(unsigned long) * tmp); |
3055 | pivs[tmp] = l_mas.max; |
3056 | memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end); |
3057 | memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end); |
3058 | |
3059 | l_mas.max = l_pivs[split]; |
3060 | mas->min = l_mas.max + 1; |
3061 | old_eparent = mt_mk_node(node: mte_parent(enode: l_mas.node), |
3062 | type: mas_parent_type(mas: &l_mas, enode: l_mas.node)); |
3063 | tmp += end; |
3064 | if (!in_rcu) { |
3065 | unsigned char max_p = mt_pivots[mt]; |
3066 | unsigned char max_s = mt_slots[mt]; |
3067 | |
3068 | if (tmp < max_p) |
3069 | memset(pivs + tmp, 0, |
3070 | sizeof(unsigned long) * (max_p - tmp)); |
3071 | |
3072 | if (tmp < mt_slots[mt]) |
3073 | memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp)); |
3074 | |
3075 | memcpy(node, newnode, sizeof(struct maple_node)); |
3076 | ma_set_meta(mn: node, mt, offset: 0, end: tmp - 1); |
3077 | mte_set_pivot(mn: old_eparent, piv: mte_parent_slot(enode: l_mas.node), |
3078 | val: l_pivs[split]); |
3079 | |
3080 | /* Remove data from l_pivs. */ |
3081 | tmp = split + 1; |
3082 | memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp)); |
3083 | memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp)); |
3084 | ma_set_meta(mn: left, mt, offset: 0, end: split); |
3085 | eparent = old_eparent; |
3086 | |
3087 | goto done; |
3088 | } |
3089 | |
3090 | /* RCU requires replacing both l_mas, mas, and parent. */ |
3091 | mas->node = mt_mk_node(node: newnode, type: mt); |
3092 | ma_set_meta(mn: newnode, mt, offset: 0, end: tmp); |
3093 | |
3094 | new_left = mas_pop_node(mas); |
3095 | new_left->parent = left->parent; |
3096 | mt = mte_node_type(entry: l_mas.node); |
3097 | slots = ma_slots(mn: new_left, mt); |
3098 | pivs = ma_pivots(node: new_left, type: mt); |
3099 | memcpy(slots, l_slots, sizeof(void *) * split); |
3100 | memcpy(pivs, l_pivs, sizeof(unsigned long) * split); |
3101 | ma_set_meta(mn: new_left, mt, offset: 0, end: split); |
3102 | l_mas.node = mt_mk_node(node: new_left, type: mt); |
3103 | |
3104 | /* replace parent. */ |
3105 | offset = mte_parent_slot(enode: mas->node); |
3106 | mt = mas_parent_type(mas: &l_mas, enode: l_mas.node); |
3107 | parent = mas_pop_node(mas); |
3108 | slots = ma_slots(mn: parent, mt); |
3109 | pivs = ma_pivots(node: parent, type: mt); |
3110 | memcpy(parent, mte_to_node(old_eparent), sizeof(struct maple_node)); |
3111 | rcu_assign_pointer(slots[offset], mas->node); |
3112 | rcu_assign_pointer(slots[offset - 1], l_mas.node); |
3113 | pivs[offset - 1] = l_mas.max; |
3114 | eparent = mt_mk_node(node: parent, type: mt); |
3115 | done: |
3116 | gap = mas_leaf_max_gap(mas); |
3117 | mte_set_gap(mn: eparent, gap: mte_parent_slot(enode: mas->node), val: gap); |
3118 | gap = mas_leaf_max_gap(mas: &l_mas); |
3119 | mte_set_gap(mn: eparent, gap: mte_parent_slot(enode: l_mas.node), val: gap); |
3120 | mas_ascend(mas); |
3121 | |
3122 | if (in_rcu) { |
3123 | mas_replace_node(mas, old_enode: old_eparent); |
3124 | mas_adopt_children(mas, parent: mas->node); |
3125 | } |
3126 | |
3127 | mas_update_gap(mas); |
3128 | } |
3129 | |
3130 | /* |
3131 | * mas_split_final_node() - Split the final node in a subtree operation. |
3132 | * @mast: the maple subtree state |
3133 | * @mas: The maple state |
3134 | * @height: The height of the tree in case it's a new root. |
3135 | */ |
3136 | static inline void mas_split_final_node(struct maple_subtree_state *mast, |
3137 | struct ma_state *mas, int height) |
3138 | { |
3139 | struct maple_enode *ancestor; |
3140 | |
3141 | if (mte_is_root(node: mas->node)) { |
3142 | if (mt_is_alloc(mt: mas->tree)) |
3143 | mast->bn->type = maple_arange_64; |
3144 | else |
3145 | mast->bn->type = maple_range_64; |
3146 | mas->depth = height; |
3147 | } |
3148 | /* |
3149 | * Only a single node is used here, could be root. |
3150 | * The Big_node data should just fit in a single node. |
3151 | */ |
3152 | ancestor = mas_new_ma_node(mas, b_node: mast->bn); |
3153 | mas_set_parent(mas, enode: mast->l->node, parent: ancestor, slot: mast->l->offset); |
3154 | mas_set_parent(mas, enode: mast->r->node, parent: ancestor, slot: mast->r->offset); |
3155 | mte_to_node(entry: ancestor)->parent = mas_mn(mas)->parent; |
3156 | |
3157 | mast->l->node = ancestor; |
3158 | mab_mas_cp(b_node: mast->bn, mab_start: 0, mab_end: mt_slots[mast->bn->type] - 1, mas: mast->l, new_max: true); |
3159 | mas->offset = mast->bn->b_end - 1; |
3160 | } |
3161 | |
3162 | /* |
3163 | * mast_fill_bnode() - Copy data into the big node in the subtree state |
3164 | * @mast: The maple subtree state |
3165 | * @mas: the maple state |
3166 | * @skip: The number of entries to skip for new nodes insertion. |
3167 | */ |
3168 | static inline void mast_fill_bnode(struct maple_subtree_state *mast, |
3169 | struct ma_state *mas, |
3170 | unsigned char skip) |
3171 | { |
3172 | bool cp = true; |
3173 | unsigned char split; |
3174 | |
3175 | memset(mast->bn->gap, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->gap)); |
3176 | memset(mast->bn->slot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->slot)); |
3177 | memset(mast->bn->pivot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->pivot)); |
3178 | mast->bn->b_end = 0; |
3179 | |
3180 | if (mte_is_root(node: mas->node)) { |
3181 | cp = false; |
3182 | } else { |
3183 | mas_ascend(mas); |
3184 | mas->offset = mte_parent_slot(enode: mas->node); |
3185 | } |
3186 | |
3187 | if (cp && mast->l->offset) |
3188 | mas_mab_cp(mas, mas_start: 0, mas_end: mast->l->offset - 1, b_node: mast->bn, mab_start: 0); |
3189 | |
3190 | split = mast->bn->b_end; |
3191 | mab_set_b_end(b_node: mast->bn, mas: mast->l, entry: mast->l->node); |
3192 | mast->r->offset = mast->bn->b_end; |
3193 | mab_set_b_end(b_node: mast->bn, mas: mast->r, entry: mast->r->node); |
3194 | if (mast->bn->pivot[mast->bn->b_end - 1] == mas->max) |
3195 | cp = false; |
3196 | |
3197 | if (cp) |
3198 | mas_mab_cp(mas, mas_start: split + skip, mt_slot_count(mas->node) - 1, |
3199 | b_node: mast->bn, mab_start: mast->bn->b_end); |
3200 | |
3201 | mast->bn->b_end--; |
3202 | mast->bn->type = mte_node_type(entry: mas->node); |
3203 | } |
3204 | |
3205 | /* |
3206 | * mast_split_data() - Split the data in the subtree state big node into regular |
3207 | * nodes. |
3208 | * @mast: The maple subtree state |
3209 | * @mas: The maple state |
3210 | * @split: The location to split the big node |
3211 | */ |
3212 | static inline void mast_split_data(struct maple_subtree_state *mast, |
3213 | struct ma_state *mas, unsigned char split) |
3214 | { |
3215 | unsigned char p_slot; |
3216 | |
3217 | mab_mas_cp(b_node: mast->bn, mab_start: 0, mab_end: split, mas: mast->l, new_max: true); |
3218 | mte_set_pivot(mn: mast->r->node, piv: 0, val: mast->r->max); |
3219 | mab_mas_cp(b_node: mast->bn, mab_start: split + 1, mab_end: mast->bn->b_end, mas: mast->r, new_max: false); |
3220 | mast->l->offset = mte_parent_slot(enode: mas->node); |
3221 | mast->l->max = mast->bn->pivot[split]; |
3222 | mast->r->min = mast->l->max + 1; |
3223 | if (mte_is_leaf(entry: mas->node)) |
3224 | return; |
3225 | |
3226 | p_slot = mast->orig_l->offset; |
3227 | mas_set_split_parent(mas: mast->orig_l, left: mast->l->node, right: mast->r->node, |
3228 | slot: &p_slot, split); |
3229 | mas_set_split_parent(mas: mast->orig_r, left: mast->l->node, right: mast->r->node, |
3230 | slot: &p_slot, split); |
3231 | } |
3232 | |
3233 | /* |
3234 | * mas_push_data() - Instead of splitting a node, it is beneficial to push the |
3235 | * data to the right or left node if there is room. |
3236 | * @mas: The maple state |
3237 | * @height: The current height of the maple state |
3238 | * @mast: The maple subtree state |
3239 | * @left: Push left or not. |
3240 | * |
3241 | * Keeping the height of the tree low means faster lookups. |
3242 | * |
3243 | * Return: True if pushed, false otherwise. |
3244 | */ |
3245 | static inline bool mas_push_data(struct ma_state *mas, int height, |
3246 | struct maple_subtree_state *mast, bool left) |
3247 | { |
3248 | unsigned char slot_total = mast->bn->b_end; |
3249 | unsigned char end, space, split; |
3250 | |
3251 | MA_STATE(tmp_mas, mas->tree, mas->index, mas->last); |
3252 | tmp_mas = *mas; |
3253 | tmp_mas.depth = mast->l->depth; |
3254 | |
3255 | if (left && !mas_prev_sibling(mas: &tmp_mas)) |
3256 | return false; |
3257 | else if (!left && !mas_next_sibling(mas: &tmp_mas)) |
3258 | return false; |
3259 | |
3260 | end = mas_data_end(mas: &tmp_mas); |
3261 | slot_total += end; |
3262 | space = 2 * mt_slot_count(mas->node) - 2; |
3263 | /* -2 instead of -1 to ensure there isn't a triple split */ |
3264 | if (ma_is_leaf(type: mast->bn->type)) |
3265 | space--; |
3266 | |
3267 | if (mas->max == ULONG_MAX) |
3268 | space--; |
3269 | |
3270 | if (slot_total >= space) |
3271 | return false; |
3272 | |
3273 | /* Get the data; Fill mast->bn */ |
3274 | mast->bn->b_end++; |
3275 | if (left) { |
3276 | mab_shift_right(b_node: mast->bn, shift: end + 1); |
3277 | mas_mab_cp(mas: &tmp_mas, mas_start: 0, mas_end: end, b_node: mast->bn, mab_start: 0); |
3278 | mast->bn->b_end = slot_total + 1; |
3279 | } else { |
3280 | mas_mab_cp(mas: &tmp_mas, mas_start: 0, mas_end: end, b_node: mast->bn, mab_start: mast->bn->b_end); |
3281 | } |
3282 | |
3283 | /* Configure mast for splitting of mast->bn */ |
3284 | split = mt_slots[mast->bn->type] - 2; |
3285 | if (left) { |
3286 | /* Switch mas to prev node */ |
3287 | *mas = tmp_mas; |
3288 | /* Start using mast->l for the left side. */ |
3289 | tmp_mas.node = mast->l->node; |
3290 | *mast->l = tmp_mas; |
3291 | } else { |
3292 | tmp_mas.node = mast->r->node; |
3293 | *mast->r = tmp_mas; |
3294 | split = slot_total - split; |
3295 | } |
3296 | split = mab_no_null_split(b_node: mast->bn, split, slot_count: mt_slots[mast->bn->type]); |
3297 | /* Update parent slot for split calculation. */ |
3298 | if (left) |
3299 | mast->orig_l->offset += end + 1; |
3300 | |
3301 | mast_split_data(mast, mas, split); |
3302 | mast_fill_bnode(mast, mas, skip: 2); |
3303 | mas_split_final_node(mast, mas, height: height + 1); |
3304 | return true; |
3305 | } |
3306 | |
3307 | /* |
3308 | * mas_split() - Split data that is too big for one node into two. |
3309 | * @mas: The maple state |
3310 | * @b_node: The maple big node |
3311 | * Return: 1 on success, 0 on failure. |
3312 | */ |
3313 | static int mas_split(struct ma_state *mas, struct maple_big_node *b_node) |
3314 | { |
3315 | struct maple_subtree_state mast; |
3316 | int height = 0; |
3317 | unsigned char mid_split, split = 0; |
3318 | struct maple_enode *old; |
3319 | |
3320 | /* |
3321 | * Splitting is handled differently from any other B-tree; the Maple |
3322 | * Tree splits upwards. Splitting up means that the split operation |
3323 | * occurs when the walk of the tree hits the leaves and not on the way |
3324 | * down. The reason for splitting up is that it is impossible to know |
3325 | * how much space will be needed until the leaf is (or leaves are) |
3326 | * reached. Since overwriting data is allowed and a range could |
3327 | * overwrite more than one range or result in changing one entry into 3 |
3328 | * entries, it is impossible to know if a split is required until the |
3329 | * data is examined. |
3330 | * |
3331 | * Splitting is a balancing act between keeping allocations to a minimum |
3332 | * and avoiding a 'jitter' event where a tree is expanded to make room |
3333 | * for an entry followed by a contraction when the entry is removed. To |
3334 | * accomplish the balance, there are empty slots remaining in both left |
3335 | * and right nodes after a split. |
3336 | */ |
3337 | MA_STATE(l_mas, mas->tree, mas->index, mas->last); |
3338 | MA_STATE(r_mas, mas->tree, mas->index, mas->last); |
3339 | MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last); |
3340 | MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last); |
3341 | |
3342 | trace_ma_op(fn: __func__, mas); |
3343 | mas->depth = mas_mt_height(mas); |
3344 | /* Allocation failures will happen early. */ |
3345 | mas_node_count(mas, count: 1 + mas->depth * 2); |
3346 | if (mas_is_err(mas)) |
3347 | return 0; |
3348 | |
3349 | mast.l = &l_mas; |
3350 | mast.r = &r_mas; |
3351 | mast.orig_l = &prev_l_mas; |
3352 | mast.orig_r = &prev_r_mas; |
3353 | mast.bn = b_node; |
3354 | |
3355 | while (height++ <= mas->depth) { |
3356 | if (mt_slots[b_node->type] > b_node->b_end) { |
3357 | mas_split_final_node(mast: &mast, mas, height); |
3358 | break; |
3359 | } |
3360 | |
3361 | l_mas = r_mas = *mas; |
3362 | l_mas.node = mas_new_ma_node(mas, b_node); |
3363 | r_mas.node = mas_new_ma_node(mas, b_node); |
3364 | /* |
3365 | * Another way that 'jitter' is avoided is to terminate a split up early if the |
3366 | * left or right node has space to spare. This is referred to as "pushing left" |
3367 | * or "pushing right" and is similar to the B* tree, except the nodes left or |
3368 | * right can rarely be reused due to RCU, but the ripple upwards is halted which |
3369 | * is a significant savings. |
3370 | */ |
3371 | /* Try to push left. */ |
3372 | if (mas_push_data(mas, height, mast: &mast, left: true)) |
3373 | break; |
3374 | /* Try to push right. */ |
3375 | if (mas_push_data(mas, height, mast: &mast, left: false)) |
3376 | break; |
3377 | |
3378 | split = mab_calc_split(mas, bn: b_node, mid_split: &mid_split, min: prev_l_mas.min); |
3379 | mast_split_data(mast: &mast, mas, split); |
3380 | /* |
3381 | * Usually correct, mab_mas_cp in the above call overwrites |
3382 | * r->max. |
3383 | */ |
3384 | mast.r->max = mas->max; |
3385 | mast_fill_bnode(mast: &mast, mas, skip: 1); |
3386 | prev_l_mas = *mast.l; |
3387 | prev_r_mas = *mast.r; |
3388 | } |
3389 | |
3390 | /* Set the original node as dead */ |
3391 | old = mas->node; |
3392 | mas->node = l_mas.node; |
3393 | mas_wmb_replace(mas, old_enode: old); |
3394 | mtree_range_walk(mas); |
3395 | return 1; |
3396 | } |
3397 | |
3398 | /* |
3399 | * mas_reuse_node() - Reuse the node to store the data. |
3400 | * @wr_mas: The maple write state |
3401 | * @bn: The maple big node |
3402 | * @end: The end of the data. |
3403 | * |
3404 | * Will always return false in RCU mode. |
3405 | * |
3406 | * Return: True if node was reused, false otherwise. |
3407 | */ |
3408 | static inline bool mas_reuse_node(struct ma_wr_state *wr_mas, |
3409 | struct maple_big_node *bn, unsigned char end) |
3410 | { |
3411 | /* Need to be rcu safe. */ |
3412 | if (mt_in_rcu(mt: wr_mas->mas->tree)) |
3413 | return false; |
3414 | |
3415 | if (end > bn->b_end) { |
3416 | int clear = mt_slots[wr_mas->type] - bn->b_end; |
3417 | |
3418 | memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--); |
3419 | memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear); |
3420 | } |
3421 | mab_mas_cp(b_node: bn, mab_start: 0, mab_end: bn->b_end, mas: wr_mas->mas, new_max: false); |
3422 | return true; |
3423 | } |
3424 | |
3425 | /* |
3426 | * mas_commit_b_node() - Commit the big node into the tree. |
3427 | * @wr_mas: The maple write state |
3428 | * @b_node: The maple big node |
3429 | * @end: The end of the data. |
3430 | */ |
3431 | static noinline_for_kasan int mas_commit_b_node(struct ma_wr_state *wr_mas, |
3432 | struct maple_big_node *b_node, unsigned char end) |
3433 | { |
3434 | struct maple_node *node; |
3435 | struct maple_enode *old_enode; |
3436 | unsigned char b_end = b_node->b_end; |
3437 | enum maple_type b_type = b_node->type; |
3438 | |
3439 | old_enode = wr_mas->mas->node; |
3440 | if ((b_end < mt_min_slots[b_type]) && |
3441 | (!mte_is_root(node: old_enode)) && |
3442 | (mas_mt_height(mas: wr_mas->mas) > 1)) |
3443 | return mas_rebalance(mas: wr_mas->mas, b_node); |
3444 | |
3445 | if (b_end >= mt_slots[b_type]) |
3446 | return mas_split(mas: wr_mas->mas, b_node); |
3447 | |
3448 | if (mas_reuse_node(wr_mas, bn: b_node, end)) |
3449 | goto reuse_node; |
3450 | |
3451 | mas_node_count(mas: wr_mas->mas, count: 1); |
3452 | if (mas_is_err(mas: wr_mas->mas)) |
3453 | return 0; |
3454 | |
3455 | node = mas_pop_node(mas: wr_mas->mas); |
3456 | node->parent = mas_mn(mas: wr_mas->mas)->parent; |
3457 | wr_mas->mas->node = mt_mk_node(node, type: b_type); |
3458 | mab_mas_cp(b_node, mab_start: 0, mab_end: b_end, mas: wr_mas->mas, new_max: false); |
3459 | mas_replace_node(mas: wr_mas->mas, old_enode); |
3460 | reuse_node: |
3461 | mas_update_gap(mas: wr_mas->mas); |
3462 | wr_mas->mas->end = b_end; |
3463 | return 1; |
3464 | } |
3465 | |
3466 | /* |
3467 | * mas_root_expand() - Expand a root to a node |
3468 | * @mas: The maple state |
3469 | * @entry: The entry to store into the tree |
3470 | */ |
3471 | static inline int mas_root_expand(struct ma_state *mas, void *entry) |
3472 | { |
3473 | void *contents = mas_root_locked(mas); |
3474 | enum maple_type type = maple_leaf_64; |
3475 | struct maple_node *node; |
3476 | void __rcu **slots; |
3477 | unsigned long *pivots; |
3478 | int slot = 0; |
3479 | |
3480 | mas_node_count(mas, count: 1); |
3481 | if (unlikely(mas_is_err(mas))) |
3482 | return 0; |
3483 | |
3484 | node = mas_pop_node(mas); |
3485 | pivots = ma_pivots(node, type); |
3486 | slots = ma_slots(mn: node, mt: type); |
3487 | node->parent = ma_parent_ptr(mas_tree_parent(mas)); |
3488 | mas->node = mt_mk_node(node, type); |
3489 | mas->status = ma_active; |
3490 | |
3491 | if (mas->index) { |
3492 | if (contents) { |
3493 | rcu_assign_pointer(slots[slot], contents); |
3494 | if (likely(mas->index > 1)) |
3495 | slot++; |
3496 | } |
3497 | pivots[slot++] = mas->index - 1; |
3498 | } |
3499 | |
3500 | rcu_assign_pointer(slots[slot], entry); |
3501 | mas->offset = slot; |
3502 | pivots[slot] = mas->last; |
3503 | if (mas->last != ULONG_MAX) |
3504 | pivots[++slot] = ULONG_MAX; |
3505 | |
3506 | mas->depth = 1; |
3507 | mas_set_height(mas); |
3508 | ma_set_meta(mn: node, mt: maple_leaf_64, offset: 0, end: slot); |
3509 | /* swap the new root into the tree */ |
3510 | rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); |
3511 | return slot; |
3512 | } |
3513 | |
3514 | static inline void mas_store_root(struct ma_state *mas, void *entry) |
3515 | { |
3516 | if (likely((mas->last != 0) || (mas->index != 0))) |
3517 | mas_root_expand(mas, entry); |
3518 | else if (((unsigned long) (entry) & 3) == 2) |
3519 | mas_root_expand(mas, entry); |
3520 | else { |
3521 | rcu_assign_pointer(mas->tree->ma_root, entry); |
3522 | mas->status = ma_start; |
3523 | } |
3524 | } |
3525 | |
3526 | /* |
3527 | * mas_is_span_wr() - Check if the write needs to be treated as a write that |
3528 | * spans the node. |
3529 | * @mas: The maple state |
3530 | * @piv: The pivot value being written |
3531 | * @type: The maple node type |
3532 | * @entry: The data to write |
3533 | * |
3534 | * Spanning writes are writes that start in one node and end in another OR if |
3535 | * the write of a %NULL will cause the node to end with a %NULL. |
3536 | * |
3537 | * Return: True if this is a spanning write, false otherwise. |
3538 | */ |
3539 | static bool mas_is_span_wr(struct ma_wr_state *wr_mas) |
3540 | { |
3541 | unsigned long max = wr_mas->r_max; |
3542 | unsigned long last = wr_mas->mas->last; |
3543 | enum maple_type type = wr_mas->type; |
3544 | void *entry = wr_mas->entry; |
3545 | |
3546 | /* Contained in this pivot, fast path */ |
3547 | if (last < max) |
3548 | return false; |
3549 | |
3550 | if (ma_is_leaf(type)) { |
3551 | max = wr_mas->mas->max; |
3552 | if (last < max) |
3553 | return false; |
3554 | } |
3555 | |
3556 | if (last == max) { |
3557 | /* |
3558 | * The last entry of leaf node cannot be NULL unless it is the |
3559 | * rightmost node (writing ULONG_MAX), otherwise it spans slots. |
3560 | */ |
3561 | if (entry || last == ULONG_MAX) |
3562 | return false; |
3563 | } |
3564 | |
3565 | trace_ma_write(fn: __func__, mas: wr_mas->mas, piv: wr_mas->r_max, val: entry); |
3566 | return true; |
3567 | } |
3568 | |
3569 | static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas) |
3570 | { |
3571 | wr_mas->type = mte_node_type(entry: wr_mas->mas->node); |
3572 | mas_wr_node_walk(wr_mas); |
3573 | wr_mas->slots = ma_slots(mn: wr_mas->node, mt: wr_mas->type); |
3574 | } |
3575 | |
3576 | static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas) |
3577 | { |
3578 | wr_mas->mas->max = wr_mas->r_max; |
3579 | wr_mas->mas->min = wr_mas->r_min; |
3580 | wr_mas->mas->node = wr_mas->content; |
3581 | wr_mas->mas->offset = 0; |
3582 | wr_mas->mas->depth++; |
3583 | } |
3584 | /* |
3585 | * mas_wr_walk() - Walk the tree for a write. |
3586 | * @wr_mas: The maple write state |
3587 | * |
3588 | * Uses mas_slot_locked() and does not need to worry about dead nodes. |
3589 | * |
3590 | * Return: True if it's contained in a node, false on spanning write. |
3591 | */ |
3592 | static bool mas_wr_walk(struct ma_wr_state *wr_mas) |
3593 | { |
3594 | struct ma_state *mas = wr_mas->mas; |
3595 | |
3596 | while (true) { |
3597 | mas_wr_walk_descend(wr_mas); |
3598 | if (unlikely(mas_is_span_wr(wr_mas))) |
3599 | return false; |
3600 | |
3601 | wr_mas->content = mas_slot_locked(mas, slots: wr_mas->slots, |
3602 | offset: mas->offset); |
3603 | if (ma_is_leaf(type: wr_mas->type)) |
3604 | return true; |
3605 | |
3606 | mas_wr_walk_traverse(wr_mas); |
3607 | } |
3608 | |
3609 | return true; |
3610 | } |
3611 | |
3612 | static bool mas_wr_walk_index(struct ma_wr_state *wr_mas) |
3613 | { |
3614 | struct ma_state *mas = wr_mas->mas; |
3615 | |
3616 | while (true) { |
3617 | mas_wr_walk_descend(wr_mas); |
3618 | wr_mas->content = mas_slot_locked(mas, slots: wr_mas->slots, |
3619 | offset: mas->offset); |
3620 | if (ma_is_leaf(type: wr_mas->type)) |
3621 | return true; |
3622 | mas_wr_walk_traverse(wr_mas); |
3623 | |
3624 | } |
3625 | return true; |
3626 | } |
3627 | /* |
3628 | * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs. |
3629 | * @l_wr_mas: The left maple write state |
3630 | * @r_wr_mas: The right maple write state |
3631 | */ |
3632 | static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas, |
3633 | struct ma_wr_state *r_wr_mas) |
3634 | { |
3635 | struct ma_state *r_mas = r_wr_mas->mas; |
3636 | struct ma_state *l_mas = l_wr_mas->mas; |
3637 | unsigned char l_slot; |
3638 | |
3639 | l_slot = l_mas->offset; |
3640 | if (!l_wr_mas->content) |
3641 | l_mas->index = l_wr_mas->r_min; |
3642 | |
3643 | if ((l_mas->index == l_wr_mas->r_min) && |
3644 | (l_slot && |
3645 | !mas_slot_locked(mas: l_mas, slots: l_wr_mas->slots, offset: l_slot - 1))) { |
3646 | if (l_slot > 1) |
3647 | l_mas->index = l_wr_mas->pivots[l_slot - 2] + 1; |
3648 | else |
3649 | l_mas->index = l_mas->min; |
3650 | |
3651 | l_mas->offset = l_slot - 1; |
3652 | } |
3653 | |
3654 | if (!r_wr_mas->content) { |
3655 | if (r_mas->last < r_wr_mas->r_max) |
3656 | r_mas->last = r_wr_mas->r_max; |
3657 | r_mas->offset++; |
3658 | } else if ((r_mas->last == r_wr_mas->r_max) && |
3659 | (r_mas->last < r_mas->max) && |
3660 | !mas_slot_locked(mas: r_mas, slots: r_wr_mas->slots, offset: r_mas->offset + 1)) { |
3661 | r_mas->last = mas_safe_pivot(mas: r_mas, pivots: r_wr_mas->pivots, |
3662 | piv: r_wr_mas->type, type: r_mas->offset + 1); |
3663 | r_mas->offset++; |
3664 | } |
3665 | } |
3666 | |
3667 | static inline void *mas_state_walk(struct ma_state *mas) |
3668 | { |
3669 | void *entry; |
3670 | |
3671 | entry = mas_start(mas); |
3672 | if (mas_is_none(mas)) |
3673 | return NULL; |
3674 | |
3675 | if (mas_is_ptr(mas)) |
3676 | return entry; |
3677 | |
3678 | return mtree_range_walk(mas); |
3679 | } |
3680 | |
3681 | /* |
3682 | * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up |
3683 | * to date. |
3684 | * |
3685 | * @mas: The maple state. |
3686 | * |
3687 | * Note: Leaves mas in undesirable state. |
3688 | * Return: The entry for @mas->index or %NULL on dead node. |
3689 | */ |
3690 | static inline void *mtree_lookup_walk(struct ma_state *mas) |
3691 | { |
3692 | unsigned long *pivots; |
3693 | unsigned char offset; |
3694 | struct maple_node *node; |
3695 | struct maple_enode *next; |
3696 | enum maple_type type; |
3697 | void __rcu **slots; |
3698 | unsigned char end; |
3699 | |
3700 | next = mas->node; |
3701 | do { |
3702 | node = mte_to_node(entry: next); |
3703 | type = mte_node_type(entry: next); |
3704 | pivots = ma_pivots(node, type); |
3705 | end = mt_pivots[type]; |
3706 | offset = 0; |
3707 | do { |
3708 | if (pivots[offset] >= mas->index) |
3709 | break; |
3710 | } while (++offset < end); |
3711 | |
3712 | slots = ma_slots(mn: node, mt: type); |
3713 | next = mt_slot(mt: mas->tree, slots, offset); |
3714 | if (unlikely(ma_dead_node(node))) |
3715 | goto dead_node; |
3716 | } while (!ma_is_leaf(type)); |
3717 | |
3718 | return (void *)next; |
3719 | |
3720 | dead_node: |
3721 | mas_reset(mas); |
3722 | return NULL; |
3723 | } |
3724 | |
3725 | static void mte_destroy_walk(struct maple_enode *, struct maple_tree *); |
3726 | /* |
3727 | * mas_new_root() - Create a new root node that only contains the entry passed |
3728 | * in. |
3729 | * @mas: The maple state |
3730 | * @entry: The entry to store. |
3731 | * |
3732 | * Only valid when the index == 0 and the last == ULONG_MAX |
3733 | * |
3734 | * Return 0 on error, 1 on success. |
3735 | */ |
3736 | static inline int mas_new_root(struct ma_state *mas, void *entry) |
3737 | { |
3738 | struct maple_enode *root = mas_root_locked(mas); |
3739 | enum maple_type type = maple_leaf_64; |
3740 | struct maple_node *node; |
3741 | void __rcu **slots; |
3742 | unsigned long *pivots; |
3743 | |
3744 | if (!entry && !mas->index && mas->last == ULONG_MAX) { |
3745 | mas->depth = 0; |
3746 | mas_set_height(mas); |
3747 | rcu_assign_pointer(mas->tree->ma_root, entry); |
3748 | mas->status = ma_start; |
3749 | goto done; |
3750 | } |
3751 | |
3752 | mas_node_count(mas, count: 1); |
3753 | if (mas_is_err(mas)) |
3754 | return 0; |
3755 | |
3756 | node = mas_pop_node(mas); |
3757 | pivots = ma_pivots(node, type); |
3758 | slots = ma_slots(mn: node, mt: type); |
3759 | node->parent = ma_parent_ptr(mas_tree_parent(mas)); |
3760 | mas->node = mt_mk_node(node, type); |
3761 | mas->status = ma_active; |
3762 | rcu_assign_pointer(slots[0], entry); |
3763 | pivots[0] = mas->last; |
3764 | mas->depth = 1; |
3765 | mas_set_height(mas); |
3766 | rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); |
3767 | |
3768 | done: |
3769 | if (xa_is_node(entry: root)) |
3770 | mte_destroy_walk(root, mas->tree); |
3771 | |
3772 | return 1; |
3773 | } |
3774 | /* |
3775 | * mas_wr_spanning_store() - Create a subtree with the store operation completed |
3776 | * and new nodes where necessary, then place the sub-tree in the actual tree. |
3777 | * Note that mas is expected to point to the node which caused the store to |
3778 | * span. |
3779 | * @wr_mas: The maple write state |
3780 | * |
3781 | * Return: 0 on error, positive on success. |
3782 | */ |
3783 | static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas) |
3784 | { |
3785 | struct maple_subtree_state mast; |
3786 | struct maple_big_node b_node; |
3787 | struct ma_state *mas; |
3788 | unsigned char height; |
3789 | |
3790 | /* Left and Right side of spanning store */ |
3791 | MA_STATE(l_mas, NULL, 0, 0); |
3792 | MA_STATE(r_mas, NULL, 0, 0); |
3793 | MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry); |
3794 | MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry); |
3795 | |
3796 | /* |
3797 | * A store operation that spans multiple nodes is called a spanning |
3798 | * store and is handled early in the store call stack by the function |
3799 | * mas_is_span_wr(). When a spanning store is identified, the maple |
3800 | * state is duplicated. The first maple state walks the left tree path |
3801 | * to ``index``, the duplicate walks the right tree path to ``last``. |
3802 | * The data in the two nodes are combined into a single node, two nodes, |
3803 | * or possibly three nodes (see the 3-way split above). A ``NULL`` |
3804 | * written to the last entry of a node is considered a spanning store as |
3805 | * a rebalance is required for the operation to complete and an overflow |
3806 | * of data may happen. |
3807 | */ |
3808 | mas = wr_mas->mas; |
3809 | trace_ma_op(fn: __func__, mas); |
3810 | |
3811 | if (unlikely(!mas->index && mas->last == ULONG_MAX)) |
3812 | return mas_new_root(mas, entry: wr_mas->entry); |
3813 | /* |
3814 | * Node rebalancing may occur due to this store, so there may be three new |
3815 | * entries per level plus a new root. |
3816 | */ |
3817 | height = mas_mt_height(mas); |
3818 | mas_node_count(mas, count: 1 + height * 3); |
3819 | if (mas_is_err(mas)) |
3820 | return 0; |
3821 | |
3822 | /* |
3823 | * Set up right side. Need to get to the next offset after the spanning |
3824 | * store to ensure it's not NULL and to combine both the next node and |
3825 | * the node with the start together. |
3826 | */ |
3827 | r_mas = *mas; |
3828 | /* Avoid overflow, walk to next slot in the tree. */ |
3829 | if (r_mas.last + 1) |
3830 | r_mas.last++; |
3831 | |
3832 | r_mas.index = r_mas.last; |
3833 | mas_wr_walk_index(wr_mas: &r_wr_mas); |
3834 | r_mas.last = r_mas.index = mas->last; |
3835 | |
3836 | /* Set up left side. */ |
3837 | l_mas = *mas; |
3838 | mas_wr_walk_index(wr_mas: &l_wr_mas); |
3839 | |
3840 | if (!wr_mas->entry) { |
3841 | mas_extend_spanning_null(l_wr_mas: &l_wr_mas, r_wr_mas: &r_wr_mas); |
3842 | mas->offset = l_mas.offset; |
3843 | mas->index = l_mas.index; |
3844 | mas->last = l_mas.last = r_mas.last; |
3845 | } |
3846 | |
3847 | /* expanding NULLs may make this cover the entire range */ |
3848 | if (!l_mas.index && r_mas.last == ULONG_MAX) { |
3849 | mas_set_range(mas, start: 0, ULONG_MAX); |
3850 | return mas_new_root(mas, entry: wr_mas->entry); |
3851 | } |
3852 | |
3853 | memset(&b_node, 0, sizeof(struct maple_big_node)); |
3854 | /* Copy l_mas and store the value in b_node. */ |
3855 | mas_store_b_node(wr_mas: &l_wr_mas, b_node: &b_node, offset_end: l_mas.end); |
3856 | /* Copy r_mas into b_node. */ |
3857 | if (r_mas.offset <= r_mas.end) |
3858 | mas_mab_cp(mas: &r_mas, mas_start: r_mas.offset, mas_end: r_mas.end, |
3859 | b_node: &b_node, mab_start: b_node.b_end + 1); |
3860 | else |
3861 | b_node.b_end++; |
3862 | |
3863 | /* Stop spanning searches by searching for just index. */ |
3864 | l_mas.index = l_mas.last = mas->index; |
3865 | |
3866 | mast.bn = &b_node; |
3867 | mast.orig_l = &l_mas; |
3868 | mast.orig_r = &r_mas; |
3869 | /* Combine l_mas and r_mas and split them up evenly again. */ |
3870 | return mas_spanning_rebalance(mas, mast: &mast, count: height + 1); |
3871 | } |
3872 | |
3873 | /* |
3874 | * mas_wr_node_store() - Attempt to store the value in a node |
3875 | * @wr_mas: The maple write state |
3876 | * |
3877 | * Attempts to reuse the node, but may allocate. |
3878 | * |
3879 | * Return: True if stored, false otherwise |
3880 | */ |
3881 | static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas, |
3882 | unsigned char new_end) |
3883 | { |
3884 | struct ma_state *mas = wr_mas->mas; |
3885 | void __rcu **dst_slots; |
3886 | unsigned long *dst_pivots; |
3887 | unsigned char dst_offset, offset_end = wr_mas->offset_end; |
3888 | struct maple_node reuse, *newnode; |
3889 | unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type]; |
3890 | bool in_rcu = mt_in_rcu(mt: mas->tree); |
3891 | |
3892 | /* Check if there is enough data. The room is enough. */ |
3893 | if (!mte_is_root(node: mas->node) && (new_end <= mt_min_slots[wr_mas->type]) && |
3894 | !(mas->mas_flags & MA_STATE_BULK)) |
3895 | return false; |
3896 | |
3897 | if (mas->last == wr_mas->end_piv) |
3898 | offset_end++; /* don't copy this offset */ |
3899 | else if (unlikely(wr_mas->r_max == ULONG_MAX)) |
3900 | mas_bulk_rebalance(mas, end: mas->end, mt: wr_mas->type); |
3901 | |
3902 | /* set up node. */ |
3903 | if (in_rcu) { |
3904 | mas_node_count(mas, count: 1); |
3905 | if (mas_is_err(mas)) |
3906 | return false; |
3907 | |
3908 | newnode = mas_pop_node(mas); |
3909 | } else { |
3910 | memset(&reuse, 0, sizeof(struct maple_node)); |
3911 | newnode = &reuse; |
3912 | } |
3913 | |
3914 | newnode->parent = mas_mn(mas)->parent; |
3915 | dst_pivots = ma_pivots(node: newnode, type: wr_mas->type); |
3916 | dst_slots = ma_slots(mn: newnode, mt: wr_mas->type); |
3917 | /* Copy from start to insert point */ |
3918 | memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * mas->offset); |
3919 | memcpy(dst_slots, wr_mas->slots, sizeof(void *) * mas->offset); |
3920 | |
3921 | /* Handle insert of new range starting after old range */ |
3922 | if (wr_mas->r_min < mas->index) { |
3923 | rcu_assign_pointer(dst_slots[mas->offset], wr_mas->content); |
3924 | dst_pivots[mas->offset++] = mas->index - 1; |
3925 | } |
3926 | |
3927 | /* Store the new entry and range end. */ |
3928 | if (mas->offset < node_pivots) |
3929 | dst_pivots[mas->offset] = mas->last; |
3930 | rcu_assign_pointer(dst_slots[mas->offset], wr_mas->entry); |
3931 | |
3932 | /* |
3933 | * this range wrote to the end of the node or it overwrote the rest of |
3934 | * the data |
3935 | */ |
3936 | if (offset_end > mas->end) |
3937 | goto done; |
3938 | |
3939 | dst_offset = mas->offset + 1; |
3940 | /* Copy to the end of node if necessary. */ |
3941 | copy_size = mas->end - offset_end + 1; |
3942 | memcpy(dst_slots + dst_offset, wr_mas->slots + offset_end, |
3943 | sizeof(void *) * copy_size); |
3944 | memcpy(dst_pivots + dst_offset, wr_mas->pivots + offset_end, |
3945 | sizeof(unsigned long) * (copy_size - 1)); |
3946 | |
3947 | if (new_end < node_pivots) |
3948 | dst_pivots[new_end] = mas->max; |
3949 | |
3950 | done: |
3951 | mas_leaf_set_meta(node: newnode, mt: maple_leaf_64, end: new_end); |
3952 | if (in_rcu) { |
3953 | struct maple_enode *old_enode = mas->node; |
3954 | |
3955 | mas->node = mt_mk_node(node: newnode, type: wr_mas->type); |
3956 | mas_replace_node(mas, old_enode); |
3957 | } else { |
3958 | memcpy(wr_mas->node, newnode, sizeof(struct maple_node)); |
3959 | } |
3960 | trace_ma_write(fn: __func__, mas, piv: 0, val: wr_mas->entry); |
3961 | mas_update_gap(mas); |
3962 | mas->end = new_end; |
3963 | return true; |
3964 | } |
3965 | |
3966 | /* |
3967 | * mas_wr_slot_store: Attempt to store a value in a slot. |
3968 | * @wr_mas: the maple write state |
3969 | * |
3970 | * Return: True if stored, false otherwise |
3971 | */ |
3972 | static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas) |
3973 | { |
3974 | struct ma_state *mas = wr_mas->mas; |
3975 | unsigned char offset = mas->offset; |
3976 | void __rcu **slots = wr_mas->slots; |
3977 | bool gap = false; |
3978 | |
3979 | gap |= !mt_slot_locked(mt: mas->tree, slots, offset); |
3980 | gap |= !mt_slot_locked(mt: mas->tree, slots, offset: offset + 1); |
3981 | |
3982 | if (wr_mas->offset_end - offset == 1) { |
3983 | if (mas->index == wr_mas->r_min) { |
3984 | /* Overwriting the range and a part of the next one */ |
3985 | rcu_assign_pointer(slots[offset], wr_mas->entry); |
3986 | wr_mas->pivots[offset] = mas->last; |
3987 | } else { |
3988 | /* Overwriting a part of the range and the next one */ |
3989 | rcu_assign_pointer(slots[offset + 1], wr_mas->entry); |
3990 | wr_mas->pivots[offset] = mas->index - 1; |
3991 | mas->offset++; /* Keep mas accurate. */ |
3992 | } |
3993 | } else if (!mt_in_rcu(mt: mas->tree)) { |
3994 | /* |
3995 | * Expand the range, only partially overwriting the previous and |
3996 | * next ranges |
3997 | */ |
3998 | gap |= !mt_slot_locked(mt: mas->tree, slots, offset: offset + 2); |
3999 | rcu_assign_pointer(slots[offset + 1], wr_mas->entry); |
4000 | wr_mas->pivots[offset] = mas->index - 1; |
4001 | wr_mas->pivots[offset + 1] = mas->last; |
4002 | mas->offset++; /* Keep mas accurate. */ |
4003 | } else { |
4004 | return false; |
4005 | } |
4006 | |
4007 | trace_ma_write(fn: __func__, mas, piv: 0, val: wr_mas->entry); |
4008 | /* |
4009 | * Only update gap when the new entry is empty or there is an empty |
4010 | * entry in the original two ranges. |
4011 | */ |
4012 | if (!wr_mas->entry || gap) |
4013 | mas_update_gap(mas); |
4014 | |
4015 | return true; |
4016 | } |
4017 | |
4018 | static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas) |
4019 | { |
4020 | struct ma_state *mas = wr_mas->mas; |
4021 | |
4022 | if (!wr_mas->slots[wr_mas->offset_end]) { |
4023 | /* If this one is null, the next and prev are not */ |
4024 | mas->last = wr_mas->end_piv; |
4025 | } else { |
4026 | /* Check next slot(s) if we are overwriting the end */ |
4027 | if ((mas->last == wr_mas->end_piv) && |
4028 | (mas->end != wr_mas->offset_end) && |
4029 | !wr_mas->slots[wr_mas->offset_end + 1]) { |
4030 | wr_mas->offset_end++; |
4031 | if (wr_mas->offset_end == mas->end) |
4032 | mas->last = mas->max; |
4033 | else |
4034 | mas->last = wr_mas->pivots[wr_mas->offset_end]; |
4035 | wr_mas->end_piv = mas->last; |
4036 | } |
4037 | } |
4038 | |
4039 | if (!wr_mas->content) { |
4040 | /* If this one is null, the next and prev are not */ |
4041 | mas->index = wr_mas->r_min; |
4042 | } else { |
4043 | /* Check prev slot if we are overwriting the start */ |
4044 | if (mas->index == wr_mas->r_min && mas->offset && |
4045 | !wr_mas->slots[mas->offset - 1]) { |
4046 | mas->offset--; |
4047 | wr_mas->r_min = mas->index = |
4048 | mas_safe_min(mas, pivots: wr_mas->pivots, offset: mas->offset); |
4049 | wr_mas->r_max = wr_mas->pivots[mas->offset]; |
4050 | } |
4051 | } |
4052 | } |
4053 | |
4054 | static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas) |
4055 | { |
4056 | while ((wr_mas->offset_end < wr_mas->mas->end) && |
4057 | (wr_mas->mas->last > wr_mas->pivots[wr_mas->offset_end])) |
4058 | wr_mas->offset_end++; |
4059 | |
4060 | if (wr_mas->offset_end < wr_mas->mas->end) |
4061 | wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end]; |
4062 | else |
4063 | wr_mas->end_piv = wr_mas->mas->max; |
4064 | |
4065 | if (!wr_mas->entry) |
4066 | mas_wr_extend_null(wr_mas); |
4067 | } |
4068 | |
4069 | static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas) |
4070 | { |
4071 | struct ma_state *mas = wr_mas->mas; |
4072 | unsigned char new_end = mas->end + 2; |
4073 | |
4074 | new_end -= wr_mas->offset_end - mas->offset; |
4075 | if (wr_mas->r_min == mas->index) |
4076 | new_end--; |
4077 | |
4078 | if (wr_mas->end_piv == mas->last) |
4079 | new_end--; |
4080 | |
4081 | return new_end; |
4082 | } |
4083 | |
4084 | /* |
4085 | * mas_wr_append: Attempt to append |
4086 | * @wr_mas: the maple write state |
4087 | * @new_end: The end of the node after the modification |
4088 | * |
4089 | * This is currently unsafe in rcu mode since the end of the node may be cached |
4090 | * by readers while the node contents may be updated which could result in |
4091 | * inaccurate information. |
4092 | * |
4093 | * Return: True if appended, false otherwise |
4094 | */ |
4095 | static inline bool mas_wr_append(struct ma_wr_state *wr_mas, |
4096 | unsigned char new_end) |
4097 | { |
4098 | struct ma_state *mas; |
4099 | void __rcu **slots; |
4100 | unsigned char end; |
4101 | |
4102 | mas = wr_mas->mas; |
4103 | if (mt_in_rcu(mt: mas->tree)) |
4104 | return false; |
4105 | |
4106 | end = mas->end; |
4107 | if (mas->offset != end) |
4108 | return false; |
4109 | |
4110 | if (new_end < mt_pivots[wr_mas->type]) { |
4111 | wr_mas->pivots[new_end] = wr_mas->pivots[end]; |
4112 | ma_set_meta(mn: wr_mas->node, mt: wr_mas->type, offset: 0, end: new_end); |
4113 | } |
4114 | |
4115 | slots = wr_mas->slots; |
4116 | if (new_end == end + 1) { |
4117 | if (mas->last == wr_mas->r_max) { |
4118 | /* Append to end of range */ |
4119 | rcu_assign_pointer(slots[new_end], wr_mas->entry); |
4120 | wr_mas->pivots[end] = mas->index - 1; |
4121 | mas->offset = new_end; |
4122 | } else { |
4123 | /* Append to start of range */ |
4124 | rcu_assign_pointer(slots[new_end], wr_mas->content); |
4125 | wr_mas->pivots[end] = mas->last; |
4126 | rcu_assign_pointer(slots[end], wr_mas->entry); |
4127 | } |
4128 | } else { |
4129 | /* Append to the range without touching any boundaries. */ |
4130 | rcu_assign_pointer(slots[new_end], wr_mas->content); |
4131 | wr_mas->pivots[end + 1] = mas->last; |
4132 | rcu_assign_pointer(slots[end + 1], wr_mas->entry); |
4133 | wr_mas->pivots[end] = mas->index - 1; |
4134 | mas->offset = end + 1; |
4135 | } |
4136 | |
4137 | if (!wr_mas->content || !wr_mas->entry) |
4138 | mas_update_gap(mas); |
4139 | |
4140 | mas->end = new_end; |
4141 | trace_ma_write(fn: __func__, mas, piv: new_end, val: wr_mas->entry); |
4142 | return true; |
4143 | } |
4144 | |
4145 | /* |
4146 | * mas_wr_bnode() - Slow path for a modification. |
4147 | * @wr_mas: The write maple state |
4148 | * |
4149 | * This is where split, rebalance end up. |
4150 | */ |
4151 | static void mas_wr_bnode(struct ma_wr_state *wr_mas) |
4152 | { |
4153 | struct maple_big_node b_node; |
4154 | |
4155 | trace_ma_write(fn: __func__, mas: wr_mas->mas, piv: 0, val: wr_mas->entry); |
4156 | memset(&b_node, 0, sizeof(struct maple_big_node)); |
4157 | mas_store_b_node(wr_mas, b_node: &b_node, offset_end: wr_mas->offset_end); |
4158 | mas_commit_b_node(wr_mas, b_node: &b_node, end: wr_mas->mas->end); |
4159 | } |
4160 | |
4161 | static inline void mas_wr_modify(struct ma_wr_state *wr_mas) |
4162 | { |
4163 | struct ma_state *mas = wr_mas->mas; |
4164 | unsigned char new_end; |
4165 | |
4166 | /* Direct replacement */ |
4167 | if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) { |
4168 | rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry); |
4169 | if (!!wr_mas->entry ^ !!wr_mas->content) |
4170 | mas_update_gap(mas); |
4171 | return; |
4172 | } |
4173 | |
4174 | /* |
4175 | * new_end exceeds the size of the maple node and cannot enter the fast |
4176 | * path. |
4177 | */ |
4178 | new_end = mas_wr_new_end(wr_mas); |
4179 | if (new_end >= mt_slots[wr_mas->type]) |
4180 | goto slow_path; |
4181 | |
4182 | /* Attempt to append */ |
4183 | if (mas_wr_append(wr_mas, new_end)) |
4184 | return; |
4185 | |
4186 | if (new_end == mas->end && mas_wr_slot_store(wr_mas)) |
4187 | return; |
4188 | |
4189 | if (mas_wr_node_store(wr_mas, new_end)) |
4190 | return; |
4191 | |
4192 | if (mas_is_err(mas)) |
4193 | return; |
4194 | |
4195 | slow_path: |
4196 | mas_wr_bnode(wr_mas); |
4197 | } |
4198 | |
4199 | /* |
4200 | * mas_wr_store_entry() - Internal call to store a value |
4201 | * @mas: The maple state |
4202 | * @entry: The entry to store. |
4203 | * |
4204 | * Return: The contents that was stored at the index. |
4205 | */ |
4206 | static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas) |
4207 | { |
4208 | struct ma_state *mas = wr_mas->mas; |
4209 | |
4210 | wr_mas->content = mas_start(mas); |
4211 | if (mas_is_none(mas) || mas_is_ptr(mas)) { |
4212 | mas_store_root(mas, entry: wr_mas->entry); |
4213 | return wr_mas->content; |
4214 | } |
4215 | |
4216 | if (unlikely(!mas_wr_walk(wr_mas))) { |
4217 | mas_wr_spanning_store(wr_mas); |
4218 | return wr_mas->content; |
4219 | } |
4220 | |
4221 | /* At this point, we are at the leaf node that needs to be altered. */ |
4222 | mas_wr_end_piv(wr_mas); |
4223 | /* New root for a single pointer */ |
4224 | if (unlikely(!mas->index && mas->last == ULONG_MAX)) { |
4225 | mas_new_root(mas, entry: wr_mas->entry); |
4226 | return wr_mas->content; |
4227 | } |
4228 | |
4229 | mas_wr_modify(wr_mas); |
4230 | return wr_mas->content; |
4231 | } |
4232 | |
4233 | /** |
4234 | * mas_insert() - Internal call to insert a value |
4235 | * @mas: The maple state |
4236 | * @entry: The entry to store |
4237 | * |
4238 | * Return: %NULL or the contents that already exists at the requested index |
4239 | * otherwise. The maple state needs to be checked for error conditions. |
4240 | */ |
4241 | static inline void *mas_insert(struct ma_state *mas, void *entry) |
4242 | { |
4243 | MA_WR_STATE(wr_mas, mas, entry); |
4244 | |
4245 | /* |
4246 | * Inserting a new range inserts either 0, 1, or 2 pivots within the |
4247 | * tree. If the insert fits exactly into an existing gap with a value |
4248 | * of NULL, then the slot only needs to be written with the new value. |
4249 | * If the range being inserted is adjacent to another range, then only a |
4250 | * single pivot needs to be inserted (as well as writing the entry). If |
4251 | * the new range is within a gap but does not touch any other ranges, |
4252 | * then two pivots need to be inserted: the start - 1, and the end. As |
4253 | * usual, the entry must be written. Most operations require a new node |
4254 | * to be allocated and replace an existing node to ensure RCU safety, |
4255 | * when in RCU mode. The exception to requiring a newly allocated node |
4256 | * is when inserting at the end of a node (appending). When done |
4257 | * carefully, appending can reuse the node in place. |
4258 | */ |
4259 | wr_mas.content = mas_start(mas); |
4260 | if (wr_mas.content) |
4261 | goto exists; |
4262 | |
4263 | if (mas_is_none(mas) || mas_is_ptr(mas)) { |
4264 | mas_store_root(mas, entry); |
4265 | return NULL; |
4266 | } |
4267 | |
4268 | /* spanning writes always overwrite something */ |
4269 | if (!mas_wr_walk(wr_mas: &wr_mas)) |
4270 | goto exists; |
4271 | |
4272 | /* At this point, we are at the leaf node that needs to be altered. */ |
4273 | wr_mas.offset_end = mas->offset; |
4274 | wr_mas.end_piv = wr_mas.r_max; |
4275 | |
4276 | if (wr_mas.content || (mas->last > wr_mas.r_max)) |
4277 | goto exists; |
4278 | |
4279 | if (!entry) |
4280 | return NULL; |
4281 | |
4282 | mas_wr_modify(wr_mas: &wr_mas); |
4283 | return wr_mas.content; |
4284 | |
4285 | exists: |
4286 | mas_set_err(mas, err: -EEXIST); |
4287 | return wr_mas.content; |
4288 | |
4289 | } |
4290 | |
4291 | /** |
4292 | * mas_alloc_cyclic() - Internal call to find somewhere to store an entry |
4293 | * @mas: The maple state. |
4294 | * @startp: Pointer to ID. |
4295 | * @range_lo: Lower bound of range to search. |
4296 | * @range_hi: Upper bound of range to search. |
4297 | * @entry: The entry to store. |
4298 | * @next: Pointer to next ID to allocate. |
4299 | * @gfp: The GFP_FLAGS to use for allocations. |
4300 | * |
4301 | * Return: 0 if the allocation succeeded without wrapping, 1 if the |
4302 | * allocation succeeded after wrapping, or -EBUSY if there are no |
4303 | * free entries. |
4304 | */ |
4305 | int mas_alloc_cyclic(struct ma_state *mas, unsigned long *startp, |
4306 | void *entry, unsigned long range_lo, unsigned long range_hi, |
4307 | unsigned long *next, gfp_t gfp) |
4308 | { |
4309 | unsigned long min = range_lo; |
4310 | int ret = 0; |
4311 | |
4312 | range_lo = max(min, *next); |
4313 | ret = mas_empty_area(mas, min: range_lo, max: range_hi, size: 1); |
4314 | if ((mas->tree->ma_flags & MT_FLAGS_ALLOC_WRAPPED) && ret == 0) { |
4315 | mas->tree->ma_flags &= ~MT_FLAGS_ALLOC_WRAPPED; |
4316 | ret = 1; |
4317 | } |
4318 | if (ret < 0 && range_lo > min) { |
4319 | ret = mas_empty_area(mas, min, max: range_hi, size: 1); |
4320 | if (ret == 0) |
4321 | ret = 1; |
4322 | } |
4323 | if (ret < 0) |
4324 | return ret; |
4325 | |
4326 | do { |
4327 | mas_insert(mas, entry); |
4328 | } while (mas_nomem(mas, gfp)); |
4329 | if (mas_is_err(mas)) |
4330 | return xa_err(entry: mas->node); |
4331 | |
4332 | *startp = mas->index; |
4333 | *next = *startp + 1; |
4334 | if (*next == 0) |
4335 | mas->tree->ma_flags |= MT_FLAGS_ALLOC_WRAPPED; |
4336 | |
4337 | return ret; |
4338 | } |
4339 | EXPORT_SYMBOL(mas_alloc_cyclic); |
4340 | |
4341 | static __always_inline void mas_rewalk(struct ma_state *mas, unsigned long index) |
4342 | { |
4343 | retry: |
4344 | mas_set(mas, index); |
4345 | mas_state_walk(mas); |
4346 | if (mas_is_start(mas)) |
4347 | goto retry; |
4348 | } |
4349 | |
4350 | static __always_inline bool mas_rewalk_if_dead(struct ma_state *mas, |
4351 | struct maple_node *node, const unsigned long index) |
4352 | { |
4353 | if (unlikely(ma_dead_node(node))) { |
4354 | mas_rewalk(mas, index); |
4355 | return true; |
4356 | } |
4357 | return false; |
4358 | } |
4359 | |
4360 | /* |
4361 | * mas_prev_node() - Find the prev non-null entry at the same level in the |
4362 | * tree. The prev value will be mas->node[mas->offset] or the status will be |
4363 | * ma_none. |
4364 | * @mas: The maple state |
4365 | * @min: The lower limit to search |
4366 | * |
4367 | * The prev node value will be mas->node[mas->offset] or the status will be |
4368 | * ma_none. |
4369 | * Return: 1 if the node is dead, 0 otherwise. |
4370 | */ |
4371 | static int mas_prev_node(struct ma_state *mas, unsigned long min) |
4372 | { |
4373 | enum maple_type mt; |
4374 | int offset, level; |
4375 | void __rcu **slots; |
4376 | struct maple_node *node; |
4377 | unsigned long *pivots; |
4378 | unsigned long max; |
4379 | |
4380 | node = mas_mn(mas); |
4381 | if (!mas->min) |
4382 | goto no_entry; |
4383 | |
4384 | max = mas->min - 1; |
4385 | if (max < min) |
4386 | goto no_entry; |
4387 | |
4388 | level = 0; |
4389 | do { |
4390 | if (ma_is_root(node)) |
4391 | goto no_entry; |
4392 | |
4393 | /* Walk up. */ |
4394 | if (unlikely(mas_ascend(mas))) |
4395 | return 1; |
4396 | offset = mas->offset; |
4397 | level++; |
4398 | node = mas_mn(mas); |
4399 | } while (!offset); |
4400 | |
4401 | offset--; |
4402 | mt = mte_node_type(entry: mas->node); |
4403 | while (level > 1) { |
4404 | level--; |
4405 | slots = ma_slots(mn: node, mt); |
4406 | mas->node = mas_slot(mas, slots, offset); |
4407 | if (unlikely(ma_dead_node(node))) |
4408 | return 1; |
4409 | |
4410 | mt = mte_node_type(entry: mas->node); |
4411 | node = mas_mn(mas); |
4412 | pivots = ma_pivots(node, type: mt); |
4413 | offset = ma_data_end(node, type: mt, pivots, max); |
4414 | if (unlikely(ma_dead_node(node))) |
4415 | return 1; |
4416 | } |
4417 | |
4418 | slots = ma_slots(mn: node, mt); |
4419 | mas->node = mas_slot(mas, slots, offset); |
4420 | pivots = ma_pivots(node, type: mt); |
4421 | if (unlikely(ma_dead_node(node))) |
4422 | return 1; |
4423 | |
4424 | if (likely(offset)) |
4425 | mas->min = pivots[offset - 1] + 1; |
4426 | mas->max = max; |
4427 | mas->offset = mas_data_end(mas); |
4428 | if (unlikely(mte_dead_node(mas->node))) |
4429 | return 1; |
4430 | |
4431 | mas->end = mas->offset; |
4432 | return 0; |
4433 | |
4434 | no_entry: |
4435 | if (unlikely(ma_dead_node(node))) |
4436 | return 1; |
4437 | |
4438 | mas->status = ma_underflow; |
4439 | return 0; |
4440 | } |
4441 | |
4442 | /* |
4443 | * mas_prev_slot() - Get the entry in the previous slot |
4444 | * |
4445 | * @mas: The maple state |
4446 | * @max: The minimum starting range |
4447 | * @empty: Can be empty |
4448 | * @set_underflow: Set the @mas->node to underflow state on limit. |
4449 | * |
4450 | * Return: The entry in the previous slot which is possibly NULL |
4451 | */ |
4452 | static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty) |
4453 | { |
4454 | void *entry; |
4455 | void __rcu **slots; |
4456 | unsigned long pivot; |
4457 | enum maple_type type; |
4458 | unsigned long *pivots; |
4459 | struct maple_node *node; |
4460 | unsigned long save_point = mas->index; |
4461 | |
4462 | retry: |
4463 | node = mas_mn(mas); |
4464 | type = mte_node_type(entry: mas->node); |
4465 | pivots = ma_pivots(node, type); |
4466 | if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) |
4467 | goto retry; |
4468 | |
4469 | if (mas->min <= min) { |
4470 | pivot = mas_safe_min(mas, pivots, offset: mas->offset); |
4471 | |
4472 | if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) |
4473 | goto retry; |
4474 | |
4475 | if (pivot <= min) |
4476 | goto underflow; |
4477 | } |
4478 | |
4479 | again: |
4480 | if (likely(mas->offset)) { |
4481 | mas->offset--; |
4482 | mas->last = mas->index - 1; |
4483 | mas->index = mas_safe_min(mas, pivots, offset: mas->offset); |
4484 | } else { |
4485 | if (mas->index <= min) |
4486 | goto underflow; |
4487 | |
4488 | if (mas_prev_node(mas, min)) { |
4489 | mas_rewalk(mas, index: save_point); |
4490 | goto retry; |
4491 | } |
4492 | |
4493 | if (WARN_ON_ONCE(mas_is_underflow(mas))) |
4494 | return NULL; |
4495 | |
4496 | mas->last = mas->max; |
4497 | node = mas_mn(mas); |
4498 | type = mte_node_type(entry: mas->node); |
4499 | pivots = ma_pivots(node, type); |
4500 | mas->index = pivots[mas->offset - 1] + 1; |
4501 | } |
4502 | |
4503 | slots = ma_slots(mn: node, mt: type); |
4504 | entry = mas_slot(mas, slots, offset: mas->offset); |
4505 | if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) |
4506 | goto retry; |
4507 | |
4508 | |
4509 | if (likely(entry)) |
4510 | return entry; |
4511 | |
4512 | if (!empty) { |
4513 | if (mas->index <= min) { |
4514 | mas->status = ma_underflow; |
4515 | return NULL; |
4516 | } |
4517 | |
4518 | goto again; |
4519 | } |
4520 | |
4521 | return entry; |
4522 | |
4523 | underflow: |
4524 | mas->status = ma_underflow; |
4525 | return NULL; |
4526 | } |
4527 | |
4528 | /* |
4529 | * mas_next_node() - Get the next node at the same level in the tree. |
4530 | * @mas: The maple state |
4531 | * @max: The maximum pivot value to check. |
4532 | * |
4533 | * The next value will be mas->node[mas->offset] or the status will have |
4534 | * overflowed. |
4535 | * Return: 1 on dead node, 0 otherwise. |
4536 | */ |
4537 | static int mas_next_node(struct ma_state *mas, struct maple_node *node, |
4538 | unsigned long max) |
4539 | { |
4540 | unsigned long min; |
4541 | unsigned long *pivots; |
4542 | struct maple_enode *enode; |
4543 | struct maple_node *tmp; |
4544 | int level = 0; |
4545 | unsigned char node_end; |
4546 | enum maple_type mt; |
4547 | void __rcu **slots; |
4548 | |
4549 | if (mas->max >= max) |
4550 | goto overflow; |
4551 | |
4552 | min = mas->max + 1; |
4553 | level = 0; |
4554 | do { |
4555 | if (ma_is_root(node)) |
4556 | goto overflow; |
4557 | |
4558 | /* Walk up. */ |
4559 | if (unlikely(mas_ascend(mas))) |
4560 | return 1; |
4561 | |
4562 | level++; |
4563 | node = mas_mn(mas); |
4564 | mt = mte_node_type(entry: mas->node); |
4565 | pivots = ma_pivots(node, type: mt); |
4566 | node_end = ma_data_end(node, type: mt, pivots, max: mas->max); |
4567 | if (unlikely(ma_dead_node(node))) |
4568 | return 1; |
4569 | |
4570 | } while (unlikely(mas->offset == node_end)); |
4571 | |
4572 | slots = ma_slots(mn: node, mt); |
4573 | mas->offset++; |
4574 | enode = mas_slot(mas, slots, offset: mas->offset); |
4575 | if (unlikely(ma_dead_node(node))) |
4576 | return 1; |
4577 | |
4578 | if (level > 1) |
4579 | mas->offset = 0; |
4580 | |
4581 | while (unlikely(level > 1)) { |
4582 | level--; |
4583 | mas->node = enode; |
4584 | node = mas_mn(mas); |
4585 | mt = mte_node_type(entry: mas->node); |
4586 | slots = ma_slots(mn: node, mt); |
4587 | enode = mas_slot(mas, slots, offset: 0); |
4588 | if (unlikely(ma_dead_node(node))) |
4589 | return 1; |
4590 | } |
4591 | |
4592 | if (!mas->offset) |
4593 | pivots = ma_pivots(node, type: mt); |
4594 | |
4595 | mas->max = mas_safe_pivot(mas, pivots, piv: mas->offset, type: mt); |
4596 | tmp = mte_to_node(entry: enode); |
4597 | mt = mte_node_type(entry: enode); |
4598 | pivots = ma_pivots(node: tmp, type: mt); |
4599 | mas->end = ma_data_end(node: tmp, type: mt, pivots, max: mas->max); |
4600 | if (unlikely(ma_dead_node(node))) |
4601 | return 1; |
4602 | |
4603 | mas->node = enode; |
4604 | mas->min = min; |
4605 | return 0; |
4606 | |
4607 | overflow: |
4608 | if (unlikely(ma_dead_node(node))) |
4609 | return 1; |
4610 | |
4611 | mas->status = ma_overflow; |
4612 | return 0; |
4613 | } |
4614 | |
4615 | /* |
4616 | * mas_next_slot() - Get the entry in the next slot |
4617 | * |
4618 | * @mas: The maple state |
4619 | * @max: The maximum starting range |
4620 | * @empty: Can be empty |
4621 | * @set_overflow: Should @mas->node be set to overflow when the limit is |
4622 | * reached. |
4623 | * |
4624 | * Return: The entry in the next slot which is possibly NULL |
4625 | */ |
4626 | static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty) |
4627 | { |
4628 | void __rcu **slots; |
4629 | unsigned long *pivots; |
4630 | unsigned long pivot; |
4631 | enum maple_type type; |
4632 | struct maple_node *node; |
4633 | unsigned long save_point = mas->last; |
4634 | void *entry; |
4635 | |
4636 | retry: |
4637 | node = mas_mn(mas); |
4638 | type = mte_node_type(entry: mas->node); |
4639 | pivots = ma_pivots(node, type); |
4640 | if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) |
4641 | goto retry; |
4642 | |
4643 | if (mas->max >= max) { |
4644 | if (likely(mas->offset < mas->end)) |
4645 | pivot = pivots[mas->offset]; |
4646 | else |
4647 | pivot = mas->max; |
4648 | |
4649 | if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) |
4650 | goto retry; |
4651 | |
4652 | if (pivot >= max) { /* Was at the limit, next will extend beyond */ |
4653 | mas->status = ma_overflow; |
4654 | return NULL; |
4655 | } |
4656 | } |
4657 | |
4658 | if (likely(mas->offset < mas->end)) { |
4659 | mas->index = pivots[mas->offset] + 1; |
4660 | again: |
4661 | mas->offset++; |
4662 | if (likely(mas->offset < mas->end)) |
4663 | mas->last = pivots[mas->offset]; |
4664 | else |
4665 | mas->last = mas->max; |
4666 | } else { |
4667 | if (mas->last >= max) { |
4668 | mas->status = ma_overflow; |
4669 | return NULL; |
4670 | } |
4671 | |
4672 | if (mas_next_node(mas, node, max)) { |
4673 | mas_rewalk(mas, index: save_point); |
4674 | goto retry; |
4675 | } |
4676 | |
4677 | if (WARN_ON_ONCE(mas_is_overflow(mas))) |
4678 | return NULL; |
4679 | |
4680 | mas->offset = 0; |
4681 | mas->index = mas->min; |
4682 | node = mas_mn(mas); |
4683 | type = mte_node_type(entry: mas->node); |
4684 | pivots = ma_pivots(node, type); |
4685 | mas->last = pivots[0]; |
4686 | } |
4687 | |
4688 | slots = ma_slots(mn: node, mt: type); |
4689 | entry = mt_slot(mt: mas->tree, slots, offset: mas->offset); |
4690 | if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) |
4691 | goto retry; |
4692 | |
4693 | if (entry) |
4694 | return entry; |
4695 | |
4696 | |
4697 | if (!empty) { |
4698 | if (mas->last >= max) { |
4699 | mas->status = ma_overflow; |
4700 | return NULL; |
4701 | } |
4702 | |
4703 | mas->index = mas->last + 1; |
4704 | goto again; |
4705 | } |
4706 | |
4707 | return entry; |
4708 | } |
4709 | |
4710 | /* |
4711 | * mas_next_entry() - Internal function to get the next entry. |
4712 | * @mas: The maple state |
4713 | * @limit: The maximum range start. |
4714 | * |
4715 | * Set the @mas->node to the next entry and the range_start to |
4716 | * the beginning value for the entry. Does not check beyond @limit. |
4717 | * Sets @mas->index and @mas->last to the range, Does not update @mas->index and |
4718 | * @mas->last on overflow. |
4719 | * Restarts on dead nodes. |
4720 | * |
4721 | * Return: the next entry or %NULL. |
4722 | */ |
4723 | static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit) |
4724 | { |
4725 | if (mas->last >= limit) { |
4726 | mas->status = ma_overflow; |
4727 | return NULL; |
4728 | } |
4729 | |
4730 | return mas_next_slot(mas, max: limit, empty: false); |
4731 | } |
4732 | |
4733 | /* |
4734 | * mas_rev_awalk() - Internal function. Reverse allocation walk. Find the |
4735 | * highest gap address of a given size in a given node and descend. |
4736 | * @mas: The maple state |
4737 | * @size: The needed size. |
4738 | * |
4739 | * Return: True if found in a leaf, false otherwise. |
4740 | * |
4741 | */ |
4742 | static bool mas_rev_awalk(struct ma_state *mas, unsigned long size, |
4743 | unsigned long *gap_min, unsigned long *gap_max) |
4744 | { |
4745 | enum maple_type type = mte_node_type(entry: mas->node); |
4746 | struct maple_node *node = mas_mn(mas); |
4747 | unsigned long *pivots, *gaps; |
4748 | void __rcu **slots; |
4749 | unsigned long gap = 0; |
4750 | unsigned long max, min; |
4751 | unsigned char offset; |
4752 | |
4753 | if (unlikely(mas_is_err(mas))) |
4754 | return true; |
4755 | |
4756 | if (ma_is_dense(type)) { |
4757 | /* dense nodes. */ |
4758 | mas->offset = (unsigned char)(mas->index - mas->min); |
4759 | return true; |
4760 | } |
4761 | |
4762 | pivots = ma_pivots(node, type); |
4763 | slots = ma_slots(mn: node, mt: type); |
4764 | gaps = ma_gaps(node, type); |
4765 | offset = mas->offset; |
4766 | min = mas_safe_min(mas, pivots, offset); |
4767 | /* Skip out of bounds. */ |
4768 | while (mas->last < min) |
4769 | min = mas_safe_min(mas, pivots, offset: --offset); |
4770 | |
4771 | max = mas_safe_pivot(mas, pivots, piv: offset, type); |
4772 | while (mas->index <= max) { |
4773 | gap = 0; |
4774 | if (gaps) |
4775 | gap = gaps[offset]; |
4776 | else if (!mas_slot(mas, slots, offset)) |
4777 | gap = max - min + 1; |
4778 | |
4779 | if (gap) { |
4780 | if ((size <= gap) && (size <= mas->last - min + 1)) |
4781 | break; |
4782 | |
4783 | if (!gaps) { |
4784 | /* Skip the next slot, it cannot be a gap. */ |
4785 | if (offset < 2) |
4786 | goto ascend; |
4787 | |
4788 | offset -= 2; |
4789 | max = pivots[offset]; |
4790 | min = mas_safe_min(mas, pivots, offset); |
4791 | continue; |
4792 | } |
4793 | } |
4794 | |
4795 | if (!offset) |
4796 | goto ascend; |
4797 | |
4798 | offset--; |
4799 | max = min - 1; |
4800 | min = mas_safe_min(mas, pivots, offset); |
4801 | } |
4802 | |
4803 | if (unlikely((mas->index > max) || (size - 1 > max - mas->index))) |
4804 | goto no_space; |
4805 | |
4806 | if (unlikely(ma_is_leaf(type))) { |
4807 | mas->offset = offset; |
4808 | *gap_min = min; |
4809 | *gap_max = min + gap - 1; |
4810 | return true; |
4811 | } |
4812 | |
4813 | /* descend, only happens under lock. */ |
4814 | mas->node = mas_slot(mas, slots, offset); |
4815 | mas->min = min; |
4816 | mas->max = max; |
4817 | mas->offset = mas_data_end(mas); |
4818 | return false; |
4819 | |
4820 | ascend: |
4821 | if (!mte_is_root(node: mas->node)) |
4822 | return false; |
4823 | |
4824 | no_space: |
4825 | mas_set_err(mas, err: -EBUSY); |
4826 | return false; |
4827 | } |
4828 | |
4829 | static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size) |
4830 | { |
4831 | enum maple_type type = mte_node_type(entry: mas->node); |
4832 | unsigned long pivot, min, gap = 0; |
4833 | unsigned char offset, data_end; |
4834 | unsigned long *gaps, *pivots; |
4835 | void __rcu **slots; |
4836 | struct maple_node *node; |
4837 | bool found = false; |
4838 | |
4839 | if (ma_is_dense(type)) { |
4840 | mas->offset = (unsigned char)(mas->index - mas->min); |
4841 | return true; |
4842 | } |
4843 | |
4844 | node = mas_mn(mas); |
4845 | pivots = ma_pivots(node, type); |
4846 | slots = ma_slots(mn: node, mt: type); |
4847 | gaps = ma_gaps(node, type); |
4848 | offset = mas->offset; |
4849 | min = mas_safe_min(mas, pivots, offset); |
4850 | data_end = ma_data_end(node, type, pivots, max: mas->max); |
4851 | for (; offset <= data_end; offset++) { |
4852 | pivot = mas_safe_pivot(mas, pivots, piv: offset, type); |
4853 | |
4854 | /* Not within lower bounds */ |
4855 | if (mas->index > pivot) |
4856 | goto next_slot; |
4857 | |
4858 | if (gaps) |
4859 | gap = gaps[offset]; |
4860 | else if (!mas_slot(mas, slots, offset)) |
4861 | gap = min(pivot, mas->last) - max(mas->index, min) + 1; |
4862 | else |
4863 | goto next_slot; |
4864 | |
4865 | if (gap >= size) { |
4866 | if (ma_is_leaf(type)) { |
4867 | found = true; |
4868 | goto done; |
4869 | } |
4870 | if (mas->index <= pivot) { |
4871 | mas->node = mas_slot(mas, slots, offset); |
4872 | mas->min = min; |
4873 | mas->max = pivot; |
4874 | offset = 0; |
4875 | break; |
4876 | } |
4877 | } |
4878 | next_slot: |
4879 | min = pivot + 1; |
4880 | if (mas->last <= pivot) { |
4881 | mas_set_err(mas, err: -EBUSY); |
4882 | return true; |
4883 | } |
4884 | } |
4885 | |
4886 | if (mte_is_root(node: mas->node)) |
4887 | found = true; |
4888 | done: |
4889 | mas->offset = offset; |
4890 | return found; |
4891 | } |
4892 | |
4893 | /** |
4894 | * mas_walk() - Search for @mas->index in the tree. |
4895 | * @mas: The maple state. |
4896 | * |
4897 | * mas->index and mas->last will be set to the range if there is a value. If |
4898 | * mas->status is ma_none, reset to ma_start |
4899 | * |
4900 | * Return: the entry at the location or %NULL. |
4901 | */ |
4902 | void *mas_walk(struct ma_state *mas) |
4903 | { |
4904 | void *entry; |
4905 | |
4906 | if (!mas_is_active(mas) || !mas_is_start(mas)) |
4907 | mas->status = ma_start; |
4908 | retry: |
4909 | entry = mas_state_walk(mas); |
4910 | if (mas_is_start(mas)) { |
4911 | goto retry; |
4912 | } else if (mas_is_none(mas)) { |
4913 | mas->index = 0; |
4914 | mas->last = ULONG_MAX; |
4915 | } else if (mas_is_ptr(mas)) { |
4916 | if (!mas->index) { |
4917 | mas->last = 0; |
4918 | return entry; |
4919 | } |
4920 | |
4921 | mas->index = 1; |
4922 | mas->last = ULONG_MAX; |
4923 | mas->status = ma_none; |
4924 | return NULL; |
4925 | } |
4926 | |
4927 | return entry; |
4928 | } |
4929 | EXPORT_SYMBOL_GPL(mas_walk); |
4930 | |
4931 | static inline bool mas_rewind_node(struct ma_state *mas) |
4932 | { |
4933 | unsigned char slot; |
4934 | |
4935 | do { |
4936 | if (mte_is_root(node: mas->node)) { |
4937 | slot = mas->offset; |
4938 | if (!slot) |
4939 | return false; |
4940 | } else { |
4941 | mas_ascend(mas); |
4942 | slot = mas->offset; |
4943 | } |
4944 | } while (!slot); |
4945 | |
4946 | mas->offset = --slot; |
4947 | return true; |
4948 | } |
4949 | |
4950 | /* |
4951 | * mas_skip_node() - Internal function. Skip over a node. |
4952 | * @mas: The maple state. |
4953 | * |
4954 | * Return: true if there is another node, false otherwise. |
4955 | */ |
4956 | static inline bool mas_skip_node(struct ma_state *mas) |
4957 | { |
4958 | if (mas_is_err(mas)) |
4959 | return false; |
4960 | |
4961 | do { |
4962 | if (mte_is_root(node: mas->node)) { |
4963 | if (mas->offset >= mas_data_end(mas)) { |
4964 | mas_set_err(mas, err: -EBUSY); |
4965 | return false; |
4966 | } |
4967 | } else { |
4968 | mas_ascend(mas); |
4969 | } |
4970 | } while (mas->offset >= mas_data_end(mas)); |
4971 | |
4972 | mas->offset++; |
4973 | return true; |
4974 | } |
4975 | |
4976 | /* |
4977 | * mas_awalk() - Allocation walk. Search from low address to high, for a gap of |
4978 | * @size |
4979 | * @mas: The maple state |
4980 | * @size: The size of the gap required |
4981 | * |
4982 | * Search between @mas->index and @mas->last for a gap of @size. |
4983 | */ |
4984 | static inline void mas_awalk(struct ma_state *mas, unsigned long size) |
4985 | { |
4986 | struct maple_enode *last = NULL; |
4987 | |
4988 | /* |
4989 | * There are 4 options: |
4990 | * go to child (descend) |
4991 | * go back to parent (ascend) |
4992 | * no gap found. (return, slot == MAPLE_NODE_SLOTS) |
4993 | * found the gap. (return, slot != MAPLE_NODE_SLOTS) |
4994 | */ |
4995 | while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) { |
4996 | if (last == mas->node) |
4997 | mas_skip_node(mas); |
4998 | else |
4999 | last = mas->node; |
5000 | } |
5001 | } |
5002 | |
5003 | /* |
5004 | * mas_sparse_area() - Internal function. Return upper or lower limit when |
5005 | * searching for a gap in an empty tree. |
5006 | * @mas: The maple state |
5007 | * @min: the minimum range |
5008 | * @max: The maximum range |
5009 | * @size: The size of the gap |
5010 | * @fwd: Searching forward or back |
5011 | */ |
5012 | static inline int mas_sparse_area(struct ma_state *mas, unsigned long min, |
5013 | unsigned long max, unsigned long size, bool fwd) |
5014 | { |
5015 | if (!unlikely(mas_is_none(mas)) && min == 0) { |
5016 | min++; |
5017 | /* |
5018 | * At this time, min is increased, we need to recheck whether |
5019 | * the size is satisfied. |
5020 | */ |
5021 | if (min > max || max - min + 1 < size) |
5022 | return -EBUSY; |
5023 | } |
5024 | /* mas_is_ptr */ |
5025 | |
5026 | if (fwd) { |
5027 | mas->index = min; |
5028 | mas->last = min + size - 1; |
5029 | } else { |
5030 | mas->last = max; |
5031 | mas->index = max - size + 1; |
5032 | } |
5033 | return 0; |
5034 | } |
5035 | |
5036 | /* |
5037 | * mas_empty_area() - Get the lowest address within the range that is |
5038 | * sufficient for the size requested. |
5039 | * @mas: The maple state |
5040 | * @min: The lowest value of the range |
5041 | * @max: The highest value of the range |
5042 | * @size: The size needed |
5043 | */ |
5044 | int mas_empty_area(struct ma_state *mas, unsigned long min, |
5045 | unsigned long max, unsigned long size) |
5046 | { |
5047 | unsigned char offset; |
5048 | unsigned long *pivots; |
5049 | enum maple_type mt; |
5050 | struct maple_node *node; |
5051 | |
5052 | if (min > max) |
5053 | return -EINVAL; |
5054 | |
5055 | if (size == 0 || max - min < size - 1) |
5056 | return -EINVAL; |
5057 | |
5058 | if (mas_is_start(mas)) |
5059 | mas_start(mas); |
5060 | else if (mas->offset >= 2) |
5061 | mas->offset -= 2; |
5062 | else if (!mas_skip_node(mas)) |
5063 | return -EBUSY; |
5064 | |
5065 | /* Empty set */ |
5066 | if (mas_is_none(mas) || mas_is_ptr(mas)) |
5067 | return mas_sparse_area(mas, min, max, size, fwd: true); |
5068 | |
5069 | /* The start of the window can only be within these values */ |
5070 | mas->index = min; |
5071 | mas->last = max; |
5072 | mas_awalk(mas, size); |
5073 | |
5074 | if (unlikely(mas_is_err(mas))) |
5075 | return xa_err(entry: mas->node); |
5076 | |
5077 | offset = mas->offset; |
5078 | if (unlikely(offset == MAPLE_NODE_SLOTS)) |
5079 | return -EBUSY; |
5080 | |
5081 | node = mas_mn(mas); |
5082 | mt = mte_node_type(entry: mas->node); |
5083 | pivots = ma_pivots(node, type: mt); |
5084 | min = mas_safe_min(mas, pivots, offset); |
5085 | if (mas->index < min) |
5086 | mas->index = min; |
5087 | mas->last = mas->index + size - 1; |
5088 | mas->end = ma_data_end(node, type: mt, pivots, max: mas->max); |
5089 | return 0; |
5090 | } |
5091 | EXPORT_SYMBOL_GPL(mas_empty_area); |
5092 | |
5093 | /* |
5094 | * mas_empty_area_rev() - Get the highest address within the range that is |
5095 | * sufficient for the size requested. |
5096 | * @mas: The maple state |
5097 | * @min: The lowest value of the range |
5098 | * @max: The highest value of the range |
5099 | * @size: The size needed |
5100 | */ |
5101 | int mas_empty_area_rev(struct ma_state *mas, unsigned long min, |
5102 | unsigned long max, unsigned long size) |
5103 | { |
5104 | struct maple_enode *last = mas->node; |
5105 | |
5106 | if (min > max) |
5107 | return -EINVAL; |
5108 | |
5109 | if (size == 0 || max - min < size - 1) |
5110 | return -EINVAL; |
5111 | |
5112 | if (mas_is_start(mas)) { |
5113 | mas_start(mas); |
5114 | mas->offset = mas_data_end(mas); |
5115 | } else if (mas->offset >= 2) { |
5116 | mas->offset -= 2; |
5117 | } else if (!mas_rewind_node(mas)) { |
5118 | return -EBUSY; |
5119 | } |
5120 | |
5121 | /* Empty set. */ |
5122 | if (mas_is_none(mas) || mas_is_ptr(mas)) |
5123 | return mas_sparse_area(mas, min, max, size, fwd: false); |
5124 | |
5125 | /* The start of the window can only be within these values. */ |
5126 | mas->index = min; |
5127 | mas->last = max; |
5128 | |
5129 | while (!mas_rev_awalk(mas, size, gap_min: &min, gap_max: &max)) { |
5130 | if (last == mas->node) { |
5131 | if (!mas_rewind_node(mas)) |
5132 | return -EBUSY; |
5133 | } else { |
5134 | last = mas->node; |
5135 | } |
5136 | } |
5137 | |
5138 | if (mas_is_err(mas)) |
5139 | return xa_err(entry: mas->node); |
5140 | |
5141 | if (unlikely(mas->offset == MAPLE_NODE_SLOTS)) |
5142 | return -EBUSY; |
5143 | |
5144 | /* Trim the upper limit to the max. */ |
5145 | if (max < mas->last) |
5146 | mas->last = max; |
5147 | |
5148 | mas->index = mas->last - size + 1; |
5149 | mas->end = mas_data_end(mas); |
5150 | return 0; |
5151 | } |
5152 | EXPORT_SYMBOL_GPL(mas_empty_area_rev); |
5153 | |
5154 | /* |
5155 | * mte_dead_leaves() - Mark all leaves of a node as dead. |
5156 | * @mas: The maple state |
5157 | * @slots: Pointer to the slot array |
5158 | * @type: The maple node type |
5159 | * |
5160 | * Must hold the write lock. |
5161 | * |
5162 | * Return: The number of leaves marked as dead. |
5163 | */ |
5164 | static inline |
5165 | unsigned char mte_dead_leaves(struct maple_enode *enode, struct maple_tree *mt, |
5166 | void __rcu **slots) |
5167 | { |
5168 | struct maple_node *node; |
5169 | enum maple_type type; |
5170 | void *entry; |
5171 | int offset; |
5172 | |
5173 | for (offset = 0; offset < mt_slot_count(enode); offset++) { |
5174 | entry = mt_slot(mt, slots, offset); |
5175 | type = mte_node_type(entry); |
5176 | node = mte_to_node(entry); |
5177 | /* Use both node and type to catch LE & BE metadata */ |
5178 | if (!node || !type) |
5179 | break; |
5180 | |
5181 | mte_set_node_dead(mn: entry); |
5182 | node->type = type; |
5183 | rcu_assign_pointer(slots[offset], node); |
5184 | } |
5185 | |
5186 | return offset; |
5187 | } |
5188 | |
5189 | /** |
5190 | * mte_dead_walk() - Walk down a dead tree to just before the leaves |
5191 | * @enode: The maple encoded node |
5192 | * @offset: The starting offset |
5193 | * |
5194 | * Note: This can only be used from the RCU callback context. |
5195 | */ |
5196 | static void __rcu **mte_dead_walk(struct maple_enode **enode, unsigned char offset) |
5197 | { |
5198 | struct maple_node *node, *next; |
5199 | void __rcu **slots = NULL; |
5200 | |
5201 | next = mte_to_node(entry: *enode); |
5202 | do { |
5203 | *enode = ma_enode_ptr(next); |
5204 | node = mte_to_node(entry: *enode); |
5205 | slots = ma_slots(mn: node, mt: node->type); |
5206 | next = rcu_dereference_protected(slots[offset], |
5207 | lock_is_held(&rcu_callback_map)); |
5208 | offset = 0; |
5209 | } while (!ma_is_leaf(type: next->type)); |
5210 | |
5211 | return slots; |
5212 | } |
5213 | |
5214 | /** |
5215 | * mt_free_walk() - Walk & free a tree in the RCU callback context |
5216 | * @head: The RCU head that's within the node. |
5217 | * |
5218 | * Note: This can only be used from the RCU callback context. |
5219 | */ |
5220 | static void mt_free_walk(struct rcu_head *head) |
5221 | { |
5222 | void __rcu **slots; |
5223 | struct maple_node *node, *start; |
5224 | struct maple_enode *enode; |
5225 | unsigned char offset; |
5226 | enum maple_type type; |
5227 | |
5228 | node = container_of(head, struct maple_node, rcu); |
5229 | |
5230 | if (ma_is_leaf(type: node->type)) |
5231 | goto free_leaf; |
5232 | |
5233 | start = node; |
5234 | enode = mt_mk_node(node, type: node->type); |
5235 | slots = mte_dead_walk(enode: &enode, offset: 0); |
5236 | node = mte_to_node(entry: enode); |
5237 | do { |
5238 | mt_free_bulk(size: node->slot_len, nodes: slots); |
5239 | offset = node->parent_slot + 1; |
5240 | enode = node->piv_parent; |
5241 | if (mte_to_node(entry: enode) == node) |
5242 | goto free_leaf; |
5243 | |
5244 | type = mte_node_type(entry: enode); |
5245 | slots = ma_slots(mn: mte_to_node(entry: enode), mt: type); |
5246 | if ((offset < mt_slots[type]) && |
5247 | rcu_dereference_protected(slots[offset], |
5248 | lock_is_held(&rcu_callback_map))) |
5249 | slots = mte_dead_walk(enode: &enode, offset); |
5250 | node = mte_to_node(entry: enode); |
5251 | } while ((node != start) || (node->slot_len < offset)); |
5252 | |
5253 | slots = ma_slots(mn: node, mt: node->type); |
5254 | mt_free_bulk(size: node->slot_len, nodes: slots); |
5255 | |
5256 | free_leaf: |
5257 | mt_free_rcu(head: &node->rcu); |
5258 | } |
5259 | |
5260 | static inline void __rcu **mte_destroy_descend(struct maple_enode **enode, |
5261 | struct maple_tree *mt, struct maple_enode *prev, unsigned char offset) |
5262 | { |
5263 | struct maple_node *node; |
5264 | struct maple_enode *next = *enode; |
5265 | void __rcu **slots = NULL; |
5266 | enum maple_type type; |
5267 | unsigned char next_offset = 0; |
5268 | |
5269 | do { |
5270 | *enode = next; |
5271 | node = mte_to_node(entry: *enode); |
5272 | type = mte_node_type(entry: *enode); |
5273 | slots = ma_slots(mn: node, mt: type); |
5274 | next = mt_slot_locked(mt, slots, offset: next_offset); |
5275 | if ((mte_dead_node(enode: next))) |
5276 | next = mt_slot_locked(mt, slots, offset: ++next_offset); |
5277 | |
5278 | mte_set_node_dead(mn: *enode); |
5279 | node->type = type; |
5280 | node->piv_parent = prev; |
5281 | node->parent_slot = offset; |
5282 | offset = next_offset; |
5283 | next_offset = 0; |
5284 | prev = *enode; |
5285 | } while (!mte_is_leaf(entry: next)); |
5286 | |
5287 | return slots; |
5288 | } |
5289 | |
5290 | static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt, |
5291 | bool free) |
5292 | { |
5293 | void __rcu **slots; |
5294 | struct maple_node *node = mte_to_node(entry: enode); |
5295 | struct maple_enode *start; |
5296 | |
5297 | if (mte_is_leaf(entry: enode)) { |
5298 | node->type = mte_node_type(entry: enode); |
5299 | goto free_leaf; |
5300 | } |
5301 | |
5302 | start = enode; |
5303 | slots = mte_destroy_descend(enode: &enode, mt, prev: start, offset: 0); |
5304 | node = mte_to_node(entry: enode); // Updated in the above call. |
5305 | do { |
5306 | enum maple_type type; |
5307 | unsigned char offset; |
5308 | struct maple_enode *parent, *tmp; |
5309 | |
5310 | node->slot_len = mte_dead_leaves(enode, mt, slots); |
5311 | if (free) |
5312 | mt_free_bulk(size: node->slot_len, nodes: slots); |
5313 | offset = node->parent_slot + 1; |
5314 | enode = node->piv_parent; |
5315 | if (mte_to_node(entry: enode) == node) |
5316 | goto free_leaf; |
5317 | |
5318 | type = mte_node_type(entry: enode); |
5319 | slots = ma_slots(mn: mte_to_node(entry: enode), mt: type); |
5320 | if (offset >= mt_slots[type]) |
5321 | goto next; |
5322 | |
5323 | tmp = mt_slot_locked(mt, slots, offset); |
5324 | if (mte_node_type(entry: tmp) && mte_to_node(entry: tmp)) { |
5325 | parent = enode; |
5326 | enode = tmp; |
5327 | slots = mte_destroy_descend(enode: &enode, mt, prev: parent, offset); |
5328 | } |
5329 | next: |
5330 | node = mte_to_node(entry: enode); |
5331 | } while (start != enode); |
5332 | |
5333 | node = mte_to_node(entry: enode); |
5334 | node->slot_len = mte_dead_leaves(enode, mt, slots); |
5335 | if (free) |
5336 | mt_free_bulk(size: node->slot_len, nodes: slots); |
5337 | |
5338 | free_leaf: |
5339 | if (free) |
5340 | mt_free_rcu(head: &node->rcu); |
5341 | else |
5342 | mt_clear_meta(mt, mn: node, type: node->type); |
5343 | } |
5344 | |
5345 | /* |
5346 | * mte_destroy_walk() - Free a tree or sub-tree. |
5347 | * @enode: the encoded maple node (maple_enode) to start |
5348 | * @mt: the tree to free - needed for node types. |
5349 | * |
5350 | * Must hold the write lock. |
5351 | */ |
5352 | static inline void mte_destroy_walk(struct maple_enode *enode, |
5353 | struct maple_tree *mt) |
5354 | { |
5355 | struct maple_node *node = mte_to_node(entry: enode); |
5356 | |
5357 | if (mt_in_rcu(mt)) { |
5358 | mt_destroy_walk(enode, mt, free: false); |
5359 | call_rcu(head: &node->rcu, func: mt_free_walk); |
5360 | } else { |
5361 | mt_destroy_walk(enode, mt, free: true); |
5362 | } |
5363 | } |
5364 | |
5365 | static void mas_wr_store_setup(struct ma_wr_state *wr_mas) |
5366 | { |
5367 | if (!mas_is_active(mas: wr_mas->mas)) { |
5368 | if (mas_is_start(mas: wr_mas->mas)) |
5369 | return; |
5370 | |
5371 | if (unlikely(mas_is_paused(wr_mas->mas))) |
5372 | goto reset; |
5373 | |
5374 | if (unlikely(mas_is_none(wr_mas->mas))) |
5375 | goto reset; |
5376 | |
5377 | if (unlikely(mas_is_overflow(wr_mas->mas))) |
5378 | goto reset; |
5379 | |
5380 | if (unlikely(mas_is_underflow(wr_mas->mas))) |
5381 | goto reset; |
5382 | } |
5383 | |
5384 | /* |
5385 | * A less strict version of mas_is_span_wr() where we allow spanning |
5386 | * writes within this node. This is to stop partial walks in |
5387 | * mas_prealloc() from being reset. |
5388 | */ |
5389 | if (wr_mas->mas->last > wr_mas->mas->max) |
5390 | goto reset; |
5391 | |
5392 | if (wr_mas->entry) |
5393 | return; |
5394 | |
5395 | if (mte_is_leaf(entry: wr_mas->mas->node) && |
5396 | wr_mas->mas->last == wr_mas->mas->max) |
5397 | goto reset; |
5398 | |
5399 | return; |
5400 | |
5401 | reset: |
5402 | mas_reset(mas: wr_mas->mas); |
5403 | } |
5404 | |
5405 | /* Interface */ |
5406 | |
5407 | /** |
5408 | * mas_store() - Store an @entry. |
5409 | * @mas: The maple state. |
5410 | * @entry: The entry to store. |
5411 | * |
5412 | * The @mas->index and @mas->last is used to set the range for the @entry. |
5413 | * Note: The @mas should have pre-allocated entries to ensure there is memory to |
5414 | * store the entry. Please see mas_expected_entries()/mas_destroy() for more details. |
5415 | * |
5416 | * Return: the first entry between mas->index and mas->last or %NULL. |
5417 | */ |
5418 | void *mas_store(struct ma_state *mas, void *entry) |
5419 | { |
5420 | MA_WR_STATE(wr_mas, mas, entry); |
5421 | |
5422 | trace_ma_write(fn: __func__, mas, piv: 0, val: entry); |
5423 | #ifdef CONFIG_DEBUG_MAPLE_TREE |
5424 | if (MAS_WARN_ON(mas, mas->index > mas->last)) |
5425 | pr_err("Error %lX > %lX %p\n" , mas->index, mas->last, entry); |
5426 | |
5427 | if (mas->index > mas->last) { |
5428 | mas_set_err(mas, err: -EINVAL); |
5429 | return NULL; |
5430 | } |
5431 | |
5432 | #endif |
5433 | |
5434 | /* |
5435 | * Storing is the same operation as insert with the added caveat that it |
5436 | * can overwrite entries. Although this seems simple enough, one may |
5437 | * want to examine what happens if a single store operation was to |
5438 | * overwrite multiple entries within a self-balancing B-Tree. |
5439 | */ |
5440 | mas_wr_store_setup(wr_mas: &wr_mas); |
5441 | mas_wr_store_entry(wr_mas: &wr_mas); |
5442 | return wr_mas.content; |
5443 | } |
5444 | EXPORT_SYMBOL_GPL(mas_store); |
5445 | |
5446 | /** |
5447 | * mas_store_gfp() - Store a value into the tree. |
5448 | * @mas: The maple state |
5449 | * @entry: The entry to store |
5450 | * @gfp: The GFP_FLAGS to use for allocations if necessary. |
5451 | * |
5452 | * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not |
5453 | * be allocated. |
5454 | */ |
5455 | int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp) |
5456 | { |
5457 | MA_WR_STATE(wr_mas, mas, entry); |
5458 | |
5459 | mas_wr_store_setup(wr_mas: &wr_mas); |
5460 | trace_ma_write(fn: __func__, mas, piv: 0, val: entry); |
5461 | retry: |
5462 | mas_wr_store_entry(wr_mas: &wr_mas); |
5463 | if (unlikely(mas_nomem(mas, gfp))) |
5464 | goto retry; |
5465 | |
5466 | if (unlikely(mas_is_err(mas))) |
5467 | return xa_err(entry: mas->node); |
5468 | |
5469 | return 0; |
5470 | } |
5471 | EXPORT_SYMBOL_GPL(mas_store_gfp); |
5472 | |
5473 | /** |
5474 | * mas_store_prealloc() - Store a value into the tree using memory |
5475 | * preallocated in the maple state. |
5476 | * @mas: The maple state |
5477 | * @entry: The entry to store. |
5478 | */ |
5479 | void mas_store_prealloc(struct ma_state *mas, void *entry) |
5480 | { |
5481 | MA_WR_STATE(wr_mas, mas, entry); |
5482 | |
5483 | mas_wr_store_setup(wr_mas: &wr_mas); |
5484 | trace_ma_write(fn: __func__, mas, piv: 0, val: entry); |
5485 | mas_wr_store_entry(wr_mas: &wr_mas); |
5486 | MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas)); |
5487 | mas_destroy(mas); |
5488 | } |
5489 | EXPORT_SYMBOL_GPL(mas_store_prealloc); |
5490 | |
5491 | /** |
5492 | * mas_preallocate() - Preallocate enough nodes for a store operation |
5493 | * @mas: The maple state |
5494 | * @entry: The entry that will be stored |
5495 | * @gfp: The GFP_FLAGS to use for allocations. |
5496 | * |
5497 | * Return: 0 on success, -ENOMEM if memory could not be allocated. |
5498 | */ |
5499 | int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp) |
5500 | { |
5501 | MA_WR_STATE(wr_mas, mas, entry); |
5502 | unsigned char node_size; |
5503 | int request = 1; |
5504 | int ret; |
5505 | |
5506 | |
5507 | if (unlikely(!mas->index && mas->last == ULONG_MAX)) |
5508 | goto ask_now; |
5509 | |
5510 | mas_wr_store_setup(wr_mas: &wr_mas); |
5511 | wr_mas.content = mas_start(mas); |
5512 | /* Root expand */ |
5513 | if (unlikely(mas_is_none(mas) || mas_is_ptr(mas))) |
5514 | goto ask_now; |
5515 | |
5516 | if (unlikely(!mas_wr_walk(&wr_mas))) { |
5517 | /* Spanning store, use worst case for now */ |
5518 | request = 1 + mas_mt_height(mas) * 3; |
5519 | goto ask_now; |
5520 | } |
5521 | |
5522 | /* At this point, we are at the leaf node that needs to be altered. */ |
5523 | /* Exact fit, no nodes needed. */ |
5524 | if (wr_mas.r_min == mas->index && wr_mas.r_max == mas->last) |
5525 | return 0; |
5526 | |
5527 | mas_wr_end_piv(wr_mas: &wr_mas); |
5528 | node_size = mas_wr_new_end(wr_mas: &wr_mas); |
5529 | |
5530 | /* Slot store, does not require additional nodes */ |
5531 | if (node_size == mas->end) { |
5532 | /* reuse node */ |
5533 | if (!mt_in_rcu(mt: mas->tree)) |
5534 | return 0; |
5535 | /* shifting boundary */ |
5536 | if (wr_mas.offset_end - mas->offset == 1) |
5537 | return 0; |
5538 | } |
5539 | |
5540 | if (node_size >= mt_slots[wr_mas.type]) { |
5541 | /* Split, worst case for now. */ |
5542 | request = 1 + mas_mt_height(mas) * 2; |
5543 | goto ask_now; |
5544 | } |
5545 | |
5546 | /* New root needs a single node */ |
5547 | if (unlikely(mte_is_root(mas->node))) |
5548 | goto ask_now; |
5549 | |
5550 | /* Potential spanning rebalance collapsing a node, use worst-case */ |
5551 | if (node_size - 1 <= mt_min_slots[wr_mas.type]) |
5552 | request = mas_mt_height(mas) * 2 - 1; |
5553 | |
5554 | /* node store, slot store needs one node */ |
5555 | ask_now: |
5556 | mas_node_count_gfp(mas, count: request, gfp); |
5557 | mas->mas_flags |= MA_STATE_PREALLOC; |
5558 | if (likely(!mas_is_err(mas))) |
5559 | return 0; |
5560 | |
5561 | mas_set_alloc_req(mas, count: 0); |
5562 | ret = xa_err(entry: mas->node); |
5563 | mas_reset(mas); |
5564 | mas_destroy(mas); |
5565 | mas_reset(mas); |
5566 | return ret; |
5567 | } |
5568 | EXPORT_SYMBOL_GPL(mas_preallocate); |
5569 | |
5570 | /* |
5571 | * mas_destroy() - destroy a maple state. |
5572 | * @mas: The maple state |
5573 | * |
5574 | * Upon completion, check the left-most node and rebalance against the node to |
5575 | * the right if necessary. Frees any allocated nodes associated with this maple |
5576 | * state. |
5577 | */ |
5578 | void mas_destroy(struct ma_state *mas) |
5579 | { |
5580 | struct maple_alloc *node; |
5581 | unsigned long total; |
5582 | |
5583 | /* |
5584 | * When using mas_for_each() to insert an expected number of elements, |
5585 | * it is possible that the number inserted is less than the expected |
5586 | * number. To fix an invalid final node, a check is performed here to |
5587 | * rebalance the previous node with the final node. |
5588 | */ |
5589 | if (mas->mas_flags & MA_STATE_REBALANCE) { |
5590 | unsigned char end; |
5591 | |
5592 | mas_start(mas); |
5593 | mtree_range_walk(mas); |
5594 | end = mas->end + 1; |
5595 | if (end < mt_min_slot_count(mas->node) - 1) |
5596 | mas_destroy_rebalance(mas, end); |
5597 | |
5598 | mas->mas_flags &= ~MA_STATE_REBALANCE; |
5599 | } |
5600 | mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC); |
5601 | |
5602 | total = mas_allocated(mas); |
5603 | while (total) { |
5604 | node = mas->alloc; |
5605 | mas->alloc = node->slot[0]; |
5606 | if (node->node_count > 1) { |
5607 | size_t count = node->node_count - 1; |
5608 | |
5609 | mt_free_bulk(size: count, nodes: (void __rcu **)&node->slot[1]); |
5610 | total -= count; |
5611 | } |
5612 | mt_free_one(ma_mnode_ptr(node)); |
5613 | total--; |
5614 | } |
5615 | |
5616 | mas->alloc = NULL; |
5617 | } |
5618 | EXPORT_SYMBOL_GPL(mas_destroy); |
5619 | |
5620 | /* |
5621 | * mas_expected_entries() - Set the expected number of entries that will be inserted. |
5622 | * @mas: The maple state |
5623 | * @nr_entries: The number of expected entries. |
5624 | * |
5625 | * This will attempt to pre-allocate enough nodes to store the expected number |
5626 | * of entries. The allocations will occur using the bulk allocator interface |
5627 | * for speed. Please call mas_destroy() on the @mas after inserting the entries |
5628 | * to ensure any unused nodes are freed. |
5629 | * |
5630 | * Return: 0 on success, -ENOMEM if memory could not be allocated. |
5631 | */ |
5632 | int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries) |
5633 | { |
5634 | int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2; |
5635 | struct maple_enode *enode = mas->node; |
5636 | int nr_nodes; |
5637 | int ret; |
5638 | |
5639 | /* |
5640 | * Sometimes it is necessary to duplicate a tree to a new tree, such as |
5641 | * forking a process and duplicating the VMAs from one tree to a new |
5642 | * tree. When such a situation arises, it is known that the new tree is |
5643 | * not going to be used until the entire tree is populated. For |
5644 | * performance reasons, it is best to use a bulk load with RCU disabled. |
5645 | * This allows for optimistic splitting that favours the left and reuse |
5646 | * of nodes during the operation. |
5647 | */ |
5648 | |
5649 | /* Optimize splitting for bulk insert in-order */ |
5650 | mas->mas_flags |= MA_STATE_BULK; |
5651 | |
5652 | /* |
5653 | * Avoid overflow, assume a gap between each entry and a trailing null. |
5654 | * If this is wrong, it just means allocation can happen during |
5655 | * insertion of entries. |
5656 | */ |
5657 | nr_nodes = max(nr_entries, nr_entries * 2 + 1); |
5658 | if (!mt_is_alloc(mt: mas->tree)) |
5659 | nonleaf_cap = MAPLE_RANGE64_SLOTS - 2; |
5660 | |
5661 | /* Leaves; reduce slots to keep space for expansion */ |
5662 | nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 2); |
5663 | /* Internal nodes */ |
5664 | nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap); |
5665 | /* Add working room for split (2 nodes) + new parents */ |
5666 | mas_node_count_gfp(mas, count: nr_nodes + 3, GFP_KERNEL); |
5667 | |
5668 | /* Detect if allocations run out */ |
5669 | mas->mas_flags |= MA_STATE_PREALLOC; |
5670 | |
5671 | if (!mas_is_err(mas)) |
5672 | return 0; |
5673 | |
5674 | ret = xa_err(entry: mas->node); |
5675 | mas->node = enode; |
5676 | mas_destroy(mas); |
5677 | return ret; |
5678 | |
5679 | } |
5680 | EXPORT_SYMBOL_GPL(mas_expected_entries); |
5681 | |
5682 | static bool mas_next_setup(struct ma_state *mas, unsigned long max, |
5683 | void **entry) |
5684 | { |
5685 | bool was_none = mas_is_none(mas); |
5686 | |
5687 | if (unlikely(mas->last >= max)) { |
5688 | mas->status = ma_overflow; |
5689 | return true; |
5690 | } |
5691 | |
5692 | switch (mas->status) { |
5693 | case ma_active: |
5694 | return false; |
5695 | case ma_none: |
5696 | fallthrough; |
5697 | case ma_pause: |
5698 | mas->status = ma_start; |
5699 | fallthrough; |
5700 | case ma_start: |
5701 | mas_walk(mas); /* Retries on dead nodes handled by mas_walk */ |
5702 | break; |
5703 | case ma_overflow: |
5704 | /* Overflowed before, but the max changed */ |
5705 | mas->status = ma_active; |
5706 | break; |
5707 | case ma_underflow: |
5708 | /* The user expects the mas to be one before where it is */ |
5709 | mas->status = ma_active; |
5710 | *entry = mas_walk(mas); |
5711 | if (*entry) |
5712 | return true; |
5713 | break; |
5714 | case ma_root: |
5715 | break; |
5716 | case ma_error: |
5717 | return true; |
5718 | } |
5719 | |
5720 | if (likely(mas_is_active(mas))) /* Fast path */ |
5721 | return false; |
5722 | |
5723 | if (mas_is_ptr(mas)) { |
5724 | *entry = NULL; |
5725 | if (was_none && mas->index == 0) { |
5726 | mas->index = mas->last = 0; |
5727 | return true; |
5728 | } |
5729 | mas->index = 1; |
5730 | mas->last = ULONG_MAX; |
5731 | mas->status = ma_none; |
5732 | return true; |
5733 | } |
5734 | |
5735 | if (mas_is_none(mas)) |
5736 | return true; |
5737 | |
5738 | return false; |
5739 | } |
5740 | |
5741 | /** |
5742 | * mas_next() - Get the next entry. |
5743 | * @mas: The maple state |
5744 | * @max: The maximum index to check. |
5745 | * |
5746 | * Returns the next entry after @mas->index. |
5747 | * Must hold rcu_read_lock or the write lock. |
5748 | * Can return the zero entry. |
5749 | * |
5750 | * Return: The next entry or %NULL |
5751 | */ |
5752 | void *mas_next(struct ma_state *mas, unsigned long max) |
5753 | { |
5754 | void *entry = NULL; |
5755 | |
5756 | if (mas_next_setup(mas, max, entry: &entry)) |
5757 | return entry; |
5758 | |
5759 | /* Retries on dead nodes handled by mas_next_slot */ |
5760 | return mas_next_slot(mas, max, empty: false); |
5761 | } |
5762 | EXPORT_SYMBOL_GPL(mas_next); |
5763 | |
5764 | /** |
5765 | * mas_next_range() - Advance the maple state to the next range |
5766 | * @mas: The maple state |
5767 | * @max: The maximum index to check. |
5768 | * |
5769 | * Sets @mas->index and @mas->last to the range. |
5770 | * Must hold rcu_read_lock or the write lock. |
5771 | * Can return the zero entry. |
5772 | * |
5773 | * Return: The next entry or %NULL |
5774 | */ |
5775 | void *mas_next_range(struct ma_state *mas, unsigned long max) |
5776 | { |
5777 | void *entry = NULL; |
5778 | |
5779 | if (mas_next_setup(mas, max, entry: &entry)) |
5780 | return entry; |
5781 | |
5782 | /* Retries on dead nodes handled by mas_next_slot */ |
5783 | return mas_next_slot(mas, max, empty: true); |
5784 | } |
5785 | EXPORT_SYMBOL_GPL(mas_next_range); |
5786 | |
5787 | /** |
5788 | * mt_next() - get the next value in the maple tree |
5789 | * @mt: The maple tree |
5790 | * @index: The start index |
5791 | * @max: The maximum index to check |
5792 | * |
5793 | * Takes RCU read lock internally to protect the search, which does not |
5794 | * protect the returned pointer after dropping RCU read lock. |
5795 | * See also: Documentation/core-api/maple_tree.rst |
5796 | * |
5797 | * Return: The entry higher than @index or %NULL if nothing is found. |
5798 | */ |
5799 | void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max) |
5800 | { |
5801 | void *entry = NULL; |
5802 | MA_STATE(mas, mt, index, index); |
5803 | |
5804 | rcu_read_lock(); |
5805 | entry = mas_next(&mas, max); |
5806 | rcu_read_unlock(); |
5807 | return entry; |
5808 | } |
5809 | EXPORT_SYMBOL_GPL(mt_next); |
5810 | |
5811 | static bool mas_prev_setup(struct ma_state *mas, unsigned long min, void **entry) |
5812 | { |
5813 | if (unlikely(mas->index <= min)) { |
5814 | mas->status = ma_underflow; |
5815 | return true; |
5816 | } |
5817 | |
5818 | switch (mas->status) { |
5819 | case ma_active: |
5820 | return false; |
5821 | case ma_start: |
5822 | break; |
5823 | case ma_none: |
5824 | fallthrough; |
5825 | case ma_pause: |
5826 | mas->status = ma_start; |
5827 | break; |
5828 | case ma_underflow: |
5829 | /* underflowed before but the min changed */ |
5830 | mas->status = ma_active; |
5831 | break; |
5832 | case ma_overflow: |
5833 | /* User expects mas to be one after where it is */ |
5834 | mas->status = ma_active; |
5835 | *entry = mas_walk(mas); |
5836 | if (*entry) |
5837 | return true; |
5838 | break; |
5839 | case ma_root: |
5840 | break; |
5841 | case ma_error: |
5842 | return true; |
5843 | } |
5844 | |
5845 | if (mas_is_start(mas)) |
5846 | mas_walk(mas); |
5847 | |
5848 | if (unlikely(mas_is_ptr(mas))) { |
5849 | if (!mas->index) { |
5850 | mas->status = ma_none; |
5851 | return true; |
5852 | } |
5853 | mas->index = mas->last = 0; |
5854 | *entry = mas_root(mas); |
5855 | return true; |
5856 | } |
5857 | |
5858 | if (mas_is_none(mas)) { |
5859 | if (mas->index) { |
5860 | /* Walked to out-of-range pointer? */ |
5861 | mas->index = mas->last = 0; |
5862 | mas->status = ma_root; |
5863 | *entry = mas_root(mas); |
5864 | return true; |
5865 | } |
5866 | return true; |
5867 | } |
5868 | |
5869 | return false; |
5870 | } |
5871 | |
5872 | /** |
5873 | * mas_prev() - Get the previous entry |
5874 | * @mas: The maple state |
5875 | * @min: The minimum value to check. |
5876 | * |
5877 | * Must hold rcu_read_lock or the write lock. |
5878 | * Will reset mas to ma_start if the status is ma_none. Will stop on not |
5879 | * searchable nodes. |
5880 | * |
5881 | * Return: the previous value or %NULL. |
5882 | */ |
5883 | void *mas_prev(struct ma_state *mas, unsigned long min) |
5884 | { |
5885 | void *entry = NULL; |
5886 | |
5887 | if (mas_prev_setup(mas, min, entry: &entry)) |
5888 | return entry; |
5889 | |
5890 | return mas_prev_slot(mas, min, empty: false); |
5891 | } |
5892 | EXPORT_SYMBOL_GPL(mas_prev); |
5893 | |
5894 | /** |
5895 | * mas_prev_range() - Advance to the previous range |
5896 | * @mas: The maple state |
5897 | * @min: The minimum value to check. |
5898 | * |
5899 | * Sets @mas->index and @mas->last to the range. |
5900 | * Must hold rcu_read_lock or the write lock. |
5901 | * Will reset mas to ma_start if the node is ma_none. Will stop on not |
5902 | * searchable nodes. |
5903 | * |
5904 | * Return: the previous value or %NULL. |
5905 | */ |
5906 | void *mas_prev_range(struct ma_state *mas, unsigned long min) |
5907 | { |
5908 | void *entry = NULL; |
5909 | |
5910 | if (mas_prev_setup(mas, min, entry: &entry)) |
5911 | return entry; |
5912 | |
5913 | return mas_prev_slot(mas, min, empty: true); |
5914 | } |
5915 | EXPORT_SYMBOL_GPL(mas_prev_range); |
5916 | |
5917 | /** |
5918 | * mt_prev() - get the previous value in the maple tree |
5919 | * @mt: The maple tree |
5920 | * @index: The start index |
5921 | * @min: The minimum index to check |
5922 | * |
5923 | * Takes RCU read lock internally to protect the search, which does not |
5924 | * protect the returned pointer after dropping RCU read lock. |
5925 | * See also: Documentation/core-api/maple_tree.rst |
5926 | * |
5927 | * Return: The entry before @index or %NULL if nothing is found. |
5928 | */ |
5929 | void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min) |
5930 | { |
5931 | void *entry = NULL; |
5932 | MA_STATE(mas, mt, index, index); |
5933 | |
5934 | rcu_read_lock(); |
5935 | entry = mas_prev(&mas, min); |
5936 | rcu_read_unlock(); |
5937 | return entry; |
5938 | } |
5939 | EXPORT_SYMBOL_GPL(mt_prev); |
5940 | |
5941 | /** |
5942 | * mas_pause() - Pause a mas_find/mas_for_each to drop the lock. |
5943 | * @mas: The maple state to pause |
5944 | * |
5945 | * Some users need to pause a walk and drop the lock they're holding in |
5946 | * order to yield to a higher priority thread or carry out an operation |
5947 | * on an entry. Those users should call this function before they drop |
5948 | * the lock. It resets the @mas to be suitable for the next iteration |
5949 | * of the loop after the user has reacquired the lock. If most entries |
5950 | * found during a walk require you to call mas_pause(), the mt_for_each() |
5951 | * iterator may be more appropriate. |
5952 | * |
5953 | */ |
5954 | void mas_pause(struct ma_state *mas) |
5955 | { |
5956 | mas->status = ma_pause; |
5957 | mas->node = NULL; |
5958 | } |
5959 | EXPORT_SYMBOL_GPL(mas_pause); |
5960 | |
5961 | /** |
5962 | * mas_find_setup() - Internal function to set up mas_find*(). |
5963 | * @mas: The maple state |
5964 | * @max: The maximum index |
5965 | * @entry: Pointer to the entry |
5966 | * |
5967 | * Returns: True if entry is the answer, false otherwise. |
5968 | */ |
5969 | static __always_inline bool mas_find_setup(struct ma_state *mas, unsigned long max, void **entry) |
5970 | { |
5971 | switch (mas->status) { |
5972 | case ma_active: |
5973 | if (mas->last < max) |
5974 | return false; |
5975 | return true; |
5976 | case ma_start: |
5977 | break; |
5978 | case ma_pause: |
5979 | if (unlikely(mas->last >= max)) |
5980 | return true; |
5981 | |
5982 | mas->index = ++mas->last; |
5983 | mas->status = ma_start; |
5984 | break; |
5985 | case ma_none: |
5986 | if (unlikely(mas->last >= max)) |
5987 | return true; |
5988 | |
5989 | mas->index = mas->last; |
5990 | mas->status = ma_start; |
5991 | break; |
5992 | case ma_underflow: |
5993 | /* mas is pointing at entry before unable to go lower */ |
5994 | if (unlikely(mas->index >= max)) { |
5995 | mas->status = ma_overflow; |
5996 | return true; |
5997 | } |
5998 | |
5999 | mas->status = ma_active; |
6000 | *entry = mas_walk(mas); |
6001 | if (*entry) |
6002 | return true; |
6003 | break; |
6004 | case ma_overflow: |
6005 | if (unlikely(mas->last >= max)) |
6006 | return true; |
6007 | |
6008 | mas->status = ma_active; |
6009 | *entry = mas_walk(mas); |
6010 | if (*entry) |
6011 | return true; |
6012 | break; |
6013 | case ma_root: |
6014 | break; |
6015 | case ma_error: |
6016 | return true; |
6017 | } |
6018 | |
6019 | if (mas_is_start(mas)) { |
6020 | /* First run or continue */ |
6021 | if (mas->index > max) |
6022 | return true; |
6023 | |
6024 | *entry = mas_walk(mas); |
6025 | if (*entry) |
6026 | return true; |
6027 | |
6028 | } |
6029 | |
6030 | if (unlikely(mas_is_ptr(mas))) |
6031 | goto ptr_out_of_range; |
6032 | |
6033 | if (unlikely(mas_is_none(mas))) |
6034 | return true; |
6035 | |
6036 | if (mas->index == max) |
6037 | return true; |
6038 | |
6039 | return false; |
6040 | |
6041 | ptr_out_of_range: |
6042 | mas->status = ma_none; |
6043 | mas->index = 1; |
6044 | mas->last = ULONG_MAX; |
6045 | return true; |
6046 | } |
6047 | |
6048 | /** |
6049 | * mas_find() - On the first call, find the entry at or after mas->index up to |
6050 | * %max. Otherwise, find the entry after mas->index. |
6051 | * @mas: The maple state |
6052 | * @max: The maximum value to check. |
6053 | * |
6054 | * Must hold rcu_read_lock or the write lock. |
6055 | * If an entry exists, last and index are updated accordingly. |
6056 | * May set @mas->status to ma_overflow. |
6057 | * |
6058 | * Return: The entry or %NULL. |
6059 | */ |
6060 | void *mas_find(struct ma_state *mas, unsigned long max) |
6061 | { |
6062 | void *entry = NULL; |
6063 | |
6064 | if (mas_find_setup(mas, max, entry: &entry)) |
6065 | return entry; |
6066 | |
6067 | /* Retries on dead nodes handled by mas_next_slot */ |
6068 | entry = mas_next_slot(mas, max, empty: false); |
6069 | /* Ignore overflow */ |
6070 | mas->status = ma_active; |
6071 | return entry; |
6072 | } |
6073 | EXPORT_SYMBOL_GPL(mas_find); |
6074 | |
6075 | /** |
6076 | * mas_find_range() - On the first call, find the entry at or after |
6077 | * mas->index up to %max. Otherwise, advance to the next slot mas->index. |
6078 | * @mas: The maple state |
6079 | * @max: The maximum value to check. |
6080 | * |
6081 | * Must hold rcu_read_lock or the write lock. |
6082 | * If an entry exists, last and index are updated accordingly. |
6083 | * May set @mas->status to ma_overflow. |
6084 | * |
6085 | * Return: The entry or %NULL. |
6086 | */ |
6087 | void *mas_find_range(struct ma_state *mas, unsigned long max) |
6088 | { |
6089 | void *entry = NULL; |
6090 | |
6091 | if (mas_find_setup(mas, max, entry: &entry)) |
6092 | return entry; |
6093 | |
6094 | /* Retries on dead nodes handled by mas_next_slot */ |
6095 | return mas_next_slot(mas, max, empty: true); |
6096 | } |
6097 | EXPORT_SYMBOL_GPL(mas_find_range); |
6098 | |
6099 | /** |
6100 | * mas_find_rev_setup() - Internal function to set up mas_find_*_rev() |
6101 | * @mas: The maple state |
6102 | * @min: The minimum index |
6103 | * @entry: Pointer to the entry |
6104 | * |
6105 | * Returns: True if entry is the answer, false otherwise. |
6106 | */ |
6107 | static bool mas_find_rev_setup(struct ma_state *mas, unsigned long min, |
6108 | void **entry) |
6109 | { |
6110 | |
6111 | switch (mas->status) { |
6112 | case ma_active: |
6113 | goto active; |
6114 | case ma_start: |
6115 | break; |
6116 | case ma_pause: |
6117 | if (unlikely(mas->index <= min)) { |
6118 | mas->status = ma_underflow; |
6119 | return true; |
6120 | } |
6121 | mas->last = --mas->index; |
6122 | mas->status = ma_start; |
6123 | break; |
6124 | case ma_none: |
6125 | if (mas->index <= min) |
6126 | goto none; |
6127 | |
6128 | mas->last = mas->index; |
6129 | mas->status = ma_start; |
6130 | break; |
6131 | case ma_overflow: /* user expects the mas to be one after where it is */ |
6132 | if (unlikely(mas->index <= min)) { |
6133 | mas->status = ma_underflow; |
6134 | return true; |
6135 | } |
6136 | |
6137 | mas->status = ma_active; |
6138 | break; |
6139 | case ma_underflow: /* user expects the mas to be one before where it is */ |
6140 | if (unlikely(mas->index <= min)) |
6141 | return true; |
6142 | |
6143 | mas->status = ma_active; |
6144 | break; |
6145 | case ma_root: |
6146 | break; |
6147 | case ma_error: |
6148 | return true; |
6149 | } |
6150 | |
6151 | if (mas_is_start(mas)) { |
6152 | /* First run or continue */ |
6153 | if (mas->index < min) |
6154 | return true; |
6155 | |
6156 | *entry = mas_walk(mas); |
6157 | if (*entry) |
6158 | return true; |
6159 | } |
6160 | |
6161 | if (unlikely(mas_is_ptr(mas))) |
6162 | goto none; |
6163 | |
6164 | if (unlikely(mas_is_none(mas))) { |
6165 | /* |
6166 | * Walked to the location, and there was nothing so the previous |
6167 | * location is 0. |
6168 | */ |
6169 | mas->last = mas->index = 0; |
6170 | mas->status = ma_root; |
6171 | *entry = mas_root(mas); |
6172 | return true; |
6173 | } |
6174 | |
6175 | active: |
6176 | if (mas->index < min) |
6177 | return true; |
6178 | |
6179 | return false; |
6180 | |
6181 | none: |
6182 | mas->status = ma_none; |
6183 | return true; |
6184 | } |
6185 | |
6186 | /** |
6187 | * mas_find_rev: On the first call, find the first non-null entry at or below |
6188 | * mas->index down to %min. Otherwise find the first non-null entry below |
6189 | * mas->index down to %min. |
6190 | * @mas: The maple state |
6191 | * @min: The minimum value to check. |
6192 | * |
6193 | * Must hold rcu_read_lock or the write lock. |
6194 | * If an entry exists, last and index are updated accordingly. |
6195 | * May set @mas->status to ma_underflow. |
6196 | * |
6197 | * Return: The entry or %NULL. |
6198 | */ |
6199 | void *mas_find_rev(struct ma_state *mas, unsigned long min) |
6200 | { |
6201 | void *entry = NULL; |
6202 | |
6203 | if (mas_find_rev_setup(mas, min, entry: &entry)) |
6204 | return entry; |
6205 | |
6206 | /* Retries on dead nodes handled by mas_prev_slot */ |
6207 | return mas_prev_slot(mas, min, empty: false); |
6208 | |
6209 | } |
6210 | EXPORT_SYMBOL_GPL(mas_find_rev); |
6211 | |
6212 | /** |
6213 | * mas_find_range_rev: On the first call, find the first non-null entry at or |
6214 | * below mas->index down to %min. Otherwise advance to the previous slot after |
6215 | * mas->index down to %min. |
6216 | * @mas: The maple state |
6217 | * @min: The minimum value to check. |
6218 | * |
6219 | * Must hold rcu_read_lock or the write lock. |
6220 | * If an entry exists, last and index are updated accordingly. |
6221 | * May set @mas->status to ma_underflow. |
6222 | * |
6223 | * Return: The entry or %NULL. |
6224 | */ |
6225 | void *mas_find_range_rev(struct ma_state *mas, unsigned long min) |
6226 | { |
6227 | void *entry = NULL; |
6228 | |
6229 | if (mas_find_rev_setup(mas, min, entry: &entry)) |
6230 | return entry; |
6231 | |
6232 | /* Retries on dead nodes handled by mas_prev_slot */ |
6233 | return mas_prev_slot(mas, min, empty: true); |
6234 | } |
6235 | EXPORT_SYMBOL_GPL(mas_find_range_rev); |
6236 | |
6237 | /** |
6238 | * mas_erase() - Find the range in which index resides and erase the entire |
6239 | * range. |
6240 | * @mas: The maple state |
6241 | * |
6242 | * Must hold the write lock. |
6243 | * Searches for @mas->index, sets @mas->index and @mas->last to the range and |
6244 | * erases that range. |
6245 | * |
6246 | * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated. |
6247 | */ |
6248 | void *mas_erase(struct ma_state *mas) |
6249 | { |
6250 | void *entry; |
6251 | MA_WR_STATE(wr_mas, mas, NULL); |
6252 | |
6253 | if (!mas_is_active(mas) || !mas_is_start(mas)) |
6254 | mas->status = ma_start; |
6255 | |
6256 | /* Retry unnecessary when holding the write lock. */ |
6257 | entry = mas_state_walk(mas); |
6258 | if (!entry) |
6259 | return NULL; |
6260 | |
6261 | write_retry: |
6262 | /* Must reset to ensure spanning writes of last slot are detected */ |
6263 | mas_reset(mas); |
6264 | mas_wr_store_setup(wr_mas: &wr_mas); |
6265 | mas_wr_store_entry(wr_mas: &wr_mas); |
6266 | if (mas_nomem(mas, GFP_KERNEL)) |
6267 | goto write_retry; |
6268 | |
6269 | return entry; |
6270 | } |
6271 | EXPORT_SYMBOL_GPL(mas_erase); |
6272 | |
6273 | /** |
6274 | * mas_nomem() - Check if there was an error allocating and do the allocation |
6275 | * if necessary If there are allocations, then free them. |
6276 | * @mas: The maple state |
6277 | * @gfp: The GFP_FLAGS to use for allocations |
6278 | * Return: true on allocation, false otherwise. |
6279 | */ |
6280 | bool mas_nomem(struct ma_state *mas, gfp_t gfp) |
6281 | __must_hold(mas->tree->ma_lock) |
6282 | { |
6283 | if (likely(mas->node != MA_ERROR(-ENOMEM))) { |
6284 | mas_destroy(mas); |
6285 | return false; |
6286 | } |
6287 | |
6288 | if (gfpflags_allow_blocking(gfp_flags: gfp) && !mt_external_lock(mt: mas->tree)) { |
6289 | mtree_unlock(mas->tree); |
6290 | mas_alloc_nodes(mas, gfp); |
6291 | mtree_lock(mas->tree); |
6292 | } else { |
6293 | mas_alloc_nodes(mas, gfp); |
6294 | } |
6295 | |
6296 | if (!mas_allocated(mas)) |
6297 | return false; |
6298 | |
6299 | mas->status = ma_start; |
6300 | return true; |
6301 | } |
6302 | |
6303 | void __init maple_tree_init(void) |
6304 | { |
6305 | maple_node_cache = kmem_cache_create(name: "maple_node" , |
6306 | size: sizeof(struct maple_node), align: sizeof(struct maple_node), |
6307 | SLAB_PANIC, NULL); |
6308 | } |
6309 | |
6310 | /** |
6311 | * mtree_load() - Load a value stored in a maple tree |
6312 | * @mt: The maple tree |
6313 | * @index: The index to load |
6314 | * |
6315 | * Return: the entry or %NULL |
6316 | */ |
6317 | void *mtree_load(struct maple_tree *mt, unsigned long index) |
6318 | { |
6319 | MA_STATE(mas, mt, index, index); |
6320 | void *entry; |
6321 | |
6322 | trace_ma_read(fn: __func__, mas: &mas); |
6323 | rcu_read_lock(); |
6324 | retry: |
6325 | entry = mas_start(mas: &mas); |
6326 | if (unlikely(mas_is_none(&mas))) |
6327 | goto unlock; |
6328 | |
6329 | if (unlikely(mas_is_ptr(&mas))) { |
6330 | if (index) |
6331 | entry = NULL; |
6332 | |
6333 | goto unlock; |
6334 | } |
6335 | |
6336 | entry = mtree_lookup_walk(mas: &mas); |
6337 | if (!entry && unlikely(mas_is_start(&mas))) |
6338 | goto retry; |
6339 | unlock: |
6340 | rcu_read_unlock(); |
6341 | if (xa_is_zero(entry)) |
6342 | return NULL; |
6343 | |
6344 | return entry; |
6345 | } |
6346 | EXPORT_SYMBOL(mtree_load); |
6347 | |
6348 | /** |
6349 | * mtree_store_range() - Store an entry at a given range. |
6350 | * @mt: The maple tree |
6351 | * @index: The start of the range |
6352 | * @last: The end of the range |
6353 | * @entry: The entry to store |
6354 | * @gfp: The GFP_FLAGS to use for allocations |
6355 | * |
6356 | * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not |
6357 | * be allocated. |
6358 | */ |
6359 | int mtree_store_range(struct maple_tree *mt, unsigned long index, |
6360 | unsigned long last, void *entry, gfp_t gfp) |
6361 | { |
6362 | MA_STATE(mas, mt, index, last); |
6363 | MA_WR_STATE(wr_mas, &mas, entry); |
6364 | |
6365 | trace_ma_write(fn: __func__, mas: &mas, piv: 0, val: entry); |
6366 | if (WARN_ON_ONCE(xa_is_advanced(entry))) |
6367 | return -EINVAL; |
6368 | |
6369 | if (index > last) |
6370 | return -EINVAL; |
6371 | |
6372 | mtree_lock(mt); |
6373 | retry: |
6374 | mas_wr_store_entry(wr_mas: &wr_mas); |
6375 | if (mas_nomem(mas: &mas, gfp)) |
6376 | goto retry; |
6377 | |
6378 | mtree_unlock(mt); |
6379 | if (mas_is_err(mas: &mas)) |
6380 | return xa_err(entry: mas.node); |
6381 | |
6382 | return 0; |
6383 | } |
6384 | EXPORT_SYMBOL(mtree_store_range); |
6385 | |
6386 | /** |
6387 | * mtree_store() - Store an entry at a given index. |
6388 | * @mt: The maple tree |
6389 | * @index: The index to store the value |
6390 | * @entry: The entry to store |
6391 | * @gfp: The GFP_FLAGS to use for allocations |
6392 | * |
6393 | * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not |
6394 | * be allocated. |
6395 | */ |
6396 | int mtree_store(struct maple_tree *mt, unsigned long index, void *entry, |
6397 | gfp_t gfp) |
6398 | { |
6399 | return mtree_store_range(mt, index, index, entry, gfp); |
6400 | } |
6401 | EXPORT_SYMBOL(mtree_store); |
6402 | |
6403 | /** |
6404 | * mtree_insert_range() - Insert an entry at a given range if there is no value. |
6405 | * @mt: The maple tree |
6406 | * @first: The start of the range |
6407 | * @last: The end of the range |
6408 | * @entry: The entry to store |
6409 | * @gfp: The GFP_FLAGS to use for allocations. |
6410 | * |
6411 | * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid |
6412 | * request, -ENOMEM if memory could not be allocated. |
6413 | */ |
6414 | int mtree_insert_range(struct maple_tree *mt, unsigned long first, |
6415 | unsigned long last, void *entry, gfp_t gfp) |
6416 | { |
6417 | MA_STATE(ms, mt, first, last); |
6418 | |
6419 | if (WARN_ON_ONCE(xa_is_advanced(entry))) |
6420 | return -EINVAL; |
6421 | |
6422 | if (first > last) |
6423 | return -EINVAL; |
6424 | |
6425 | mtree_lock(mt); |
6426 | retry: |
6427 | mas_insert(mas: &ms, entry); |
6428 | if (mas_nomem(mas: &ms, gfp)) |
6429 | goto retry; |
6430 | |
6431 | mtree_unlock(mt); |
6432 | if (mas_is_err(mas: &ms)) |
6433 | return xa_err(entry: ms.node); |
6434 | |
6435 | return 0; |
6436 | } |
6437 | EXPORT_SYMBOL(mtree_insert_range); |
6438 | |
6439 | /** |
6440 | * mtree_insert() - Insert an entry at a given index if there is no value. |
6441 | * @mt: The maple tree |
6442 | * @index : The index to store the value |
6443 | * @entry: The entry to store |
6444 | * @gfp: The GFP_FLAGS to use for allocations. |
6445 | * |
6446 | * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid |
6447 | * request, -ENOMEM if memory could not be allocated. |
6448 | */ |
6449 | int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry, |
6450 | gfp_t gfp) |
6451 | { |
6452 | return mtree_insert_range(mt, index, index, entry, gfp); |
6453 | } |
6454 | EXPORT_SYMBOL(mtree_insert); |
6455 | |
6456 | int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp, |
6457 | void *entry, unsigned long size, unsigned long min, |
6458 | unsigned long max, gfp_t gfp) |
6459 | { |
6460 | int ret = 0; |
6461 | |
6462 | MA_STATE(mas, mt, 0, 0); |
6463 | if (!mt_is_alloc(mt)) |
6464 | return -EINVAL; |
6465 | |
6466 | if (WARN_ON_ONCE(mt_is_reserved(entry))) |
6467 | return -EINVAL; |
6468 | |
6469 | mtree_lock(mt); |
6470 | retry: |
6471 | ret = mas_empty_area(&mas, min, max, size); |
6472 | if (ret) |
6473 | goto unlock; |
6474 | |
6475 | mas_insert(mas: &mas, entry); |
6476 | /* |
6477 | * mas_nomem() may release the lock, causing the allocated area |
6478 | * to be unavailable, so try to allocate a free area again. |
6479 | */ |
6480 | if (mas_nomem(mas: &mas, gfp)) |
6481 | goto retry; |
6482 | |
6483 | if (mas_is_err(mas: &mas)) |
6484 | ret = xa_err(entry: mas.node); |
6485 | else |
6486 | *startp = mas.index; |
6487 | |
6488 | unlock: |
6489 | mtree_unlock(mt); |
6490 | return ret; |
6491 | } |
6492 | EXPORT_SYMBOL(mtree_alloc_range); |
6493 | |
6494 | /** |
6495 | * mtree_alloc_cyclic() - Find somewhere to store this entry in the tree. |
6496 | * @mt: The maple tree. |
6497 | * @startp: Pointer to ID. |
6498 | * @range_lo: Lower bound of range to search. |
6499 | * @range_hi: Upper bound of range to search. |
6500 | * @entry: The entry to store. |
6501 | * @next: Pointer to next ID to allocate. |
6502 | * @gfp: The GFP_FLAGS to use for allocations. |
6503 | * |
6504 | * Finds an empty entry in @mt after @next, stores the new index into |
6505 | * the @id pointer, stores the entry at that index, then updates @next. |
6506 | * |
6507 | * @mt must be initialized with the MT_FLAGS_ALLOC_RANGE flag. |
6508 | * |
6509 | * Context: Any context. Takes and releases the mt.lock. May sleep if |
6510 | * the @gfp flags permit. |
6511 | * |
6512 | * Return: 0 if the allocation succeeded without wrapping, 1 if the |
6513 | * allocation succeeded after wrapping, -ENOMEM if memory could not be |
6514 | * allocated, -EINVAL if @mt cannot be used, or -EBUSY if there are no |
6515 | * free entries. |
6516 | */ |
6517 | int mtree_alloc_cyclic(struct maple_tree *mt, unsigned long *startp, |
6518 | void *entry, unsigned long range_lo, unsigned long range_hi, |
6519 | unsigned long *next, gfp_t gfp) |
6520 | { |
6521 | int ret; |
6522 | |
6523 | MA_STATE(mas, mt, 0, 0); |
6524 | |
6525 | if (!mt_is_alloc(mt)) |
6526 | return -EINVAL; |
6527 | if (WARN_ON_ONCE(mt_is_reserved(entry))) |
6528 | return -EINVAL; |
6529 | mtree_lock(mt); |
6530 | ret = mas_alloc_cyclic(&mas, startp, entry, range_lo, range_hi, |
6531 | next, gfp); |
6532 | mtree_unlock(mt); |
6533 | return ret; |
6534 | } |
6535 | EXPORT_SYMBOL(mtree_alloc_cyclic); |
6536 | |
6537 | int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp, |
6538 | void *entry, unsigned long size, unsigned long min, |
6539 | unsigned long max, gfp_t gfp) |
6540 | { |
6541 | int ret = 0; |
6542 | |
6543 | MA_STATE(mas, mt, 0, 0); |
6544 | if (!mt_is_alloc(mt)) |
6545 | return -EINVAL; |
6546 | |
6547 | if (WARN_ON_ONCE(mt_is_reserved(entry))) |
6548 | return -EINVAL; |
6549 | |
6550 | mtree_lock(mt); |
6551 | retry: |
6552 | ret = mas_empty_area_rev(&mas, min, max, size); |
6553 | if (ret) |
6554 | goto unlock; |
6555 | |
6556 | mas_insert(mas: &mas, entry); |
6557 | /* |
6558 | * mas_nomem() may release the lock, causing the allocated area |
6559 | * to be unavailable, so try to allocate a free area again. |
6560 | */ |
6561 | if (mas_nomem(mas: &mas, gfp)) |
6562 | goto retry; |
6563 | |
6564 | if (mas_is_err(mas: &mas)) |
6565 | ret = xa_err(entry: mas.node); |
6566 | else |
6567 | *startp = mas.index; |
6568 | |
6569 | unlock: |
6570 | mtree_unlock(mt); |
6571 | return ret; |
6572 | } |
6573 | EXPORT_SYMBOL(mtree_alloc_rrange); |
6574 | |
6575 | /** |
6576 | * mtree_erase() - Find an index and erase the entire range. |
6577 | * @mt: The maple tree |
6578 | * @index: The index to erase |
6579 | * |
6580 | * Erasing is the same as a walk to an entry then a store of a NULL to that |
6581 | * ENTIRE range. In fact, it is implemented as such using the advanced API. |
6582 | * |
6583 | * Return: The entry stored at the @index or %NULL |
6584 | */ |
6585 | void *mtree_erase(struct maple_tree *mt, unsigned long index) |
6586 | { |
6587 | void *entry = NULL; |
6588 | |
6589 | MA_STATE(mas, mt, index, index); |
6590 | trace_ma_op(fn: __func__, mas: &mas); |
6591 | |
6592 | mtree_lock(mt); |
6593 | entry = mas_erase(&mas); |
6594 | mtree_unlock(mt); |
6595 | |
6596 | return entry; |
6597 | } |
6598 | EXPORT_SYMBOL(mtree_erase); |
6599 | |
6600 | /* |
6601 | * mas_dup_free() - Free an incomplete duplication of a tree. |
6602 | * @mas: The maple state of a incomplete tree. |
6603 | * |
6604 | * The parameter @mas->node passed in indicates that the allocation failed on |
6605 | * this node. This function frees all nodes starting from @mas->node in the |
6606 | * reverse order of mas_dup_build(). There is no need to hold the source tree |
6607 | * lock at this time. |
6608 | */ |
6609 | static void mas_dup_free(struct ma_state *mas) |
6610 | { |
6611 | struct maple_node *node; |
6612 | enum maple_type type; |
6613 | void __rcu **slots; |
6614 | unsigned char count, i; |
6615 | |
6616 | /* Maybe the first node allocation failed. */ |
6617 | if (mas_is_none(mas)) |
6618 | return; |
6619 | |
6620 | while (!mte_is_root(node: mas->node)) { |
6621 | mas_ascend(mas); |
6622 | if (mas->offset) { |
6623 | mas->offset--; |
6624 | do { |
6625 | mas_descend(mas); |
6626 | mas->offset = mas_data_end(mas); |
6627 | } while (!mte_is_leaf(entry: mas->node)); |
6628 | |
6629 | mas_ascend(mas); |
6630 | } |
6631 | |
6632 | node = mte_to_node(entry: mas->node); |
6633 | type = mte_node_type(entry: mas->node); |
6634 | slots = ma_slots(mn: node, mt: type); |
6635 | count = mas_data_end(mas) + 1; |
6636 | for (i = 0; i < count; i++) |
6637 | ((unsigned long *)slots)[i] &= ~MAPLE_NODE_MASK; |
6638 | mt_free_bulk(size: count, nodes: slots); |
6639 | } |
6640 | |
6641 | node = mte_to_node(entry: mas->node); |
6642 | mt_free_one(node); |
6643 | } |
6644 | |
6645 | /* |
6646 | * mas_copy_node() - Copy a maple node and replace the parent. |
6647 | * @mas: The maple state of source tree. |
6648 | * @new_mas: The maple state of new tree. |
6649 | * @parent: The parent of the new node. |
6650 | * |
6651 | * Copy @mas->node to @new_mas->node, set @parent to be the parent of |
6652 | * @new_mas->node. If memory allocation fails, @mas is set to -ENOMEM. |
6653 | */ |
6654 | static inline void mas_copy_node(struct ma_state *mas, struct ma_state *new_mas, |
6655 | struct maple_pnode *parent) |
6656 | { |
6657 | struct maple_node *node = mte_to_node(entry: mas->node); |
6658 | struct maple_node *new_node = mte_to_node(entry: new_mas->node); |
6659 | unsigned long val; |
6660 | |
6661 | /* Copy the node completely. */ |
6662 | memcpy(new_node, node, sizeof(struct maple_node)); |
6663 | /* Update the parent node pointer. */ |
6664 | val = (unsigned long)node->parent & MAPLE_NODE_MASK; |
6665 | new_node->parent = ma_parent_ptr(val | (unsigned long)parent); |
6666 | } |
6667 | |
6668 | /* |
6669 | * mas_dup_alloc() - Allocate child nodes for a maple node. |
6670 | * @mas: The maple state of source tree. |
6671 | * @new_mas: The maple state of new tree. |
6672 | * @gfp: The GFP_FLAGS to use for allocations. |
6673 | * |
6674 | * This function allocates child nodes for @new_mas->node during the duplication |
6675 | * process. If memory allocation fails, @mas is set to -ENOMEM. |
6676 | */ |
6677 | static inline void mas_dup_alloc(struct ma_state *mas, struct ma_state *new_mas, |
6678 | gfp_t gfp) |
6679 | { |
6680 | struct maple_node *node = mte_to_node(entry: mas->node); |
6681 | struct maple_node *new_node = mte_to_node(entry: new_mas->node); |
6682 | enum maple_type type; |
6683 | unsigned char request, count, i; |
6684 | void __rcu **slots; |
6685 | void __rcu **new_slots; |
6686 | unsigned long val; |
6687 | |
6688 | /* Allocate memory for child nodes. */ |
6689 | type = mte_node_type(entry: mas->node); |
6690 | new_slots = ma_slots(mn: new_node, mt: type); |
6691 | request = mas_data_end(mas) + 1; |
6692 | count = mt_alloc_bulk(gfp, size: request, nodes: (void **)new_slots); |
6693 | if (unlikely(count < request)) { |
6694 | memset(new_slots, 0, request * sizeof(void *)); |
6695 | mas_set_err(mas, err: -ENOMEM); |
6696 | return; |
6697 | } |
6698 | |
6699 | /* Restore node type information in slots. */ |
6700 | slots = ma_slots(mn: node, mt: type); |
6701 | for (i = 0; i < count; i++) { |
6702 | val = (unsigned long)mt_slot_locked(mt: mas->tree, slots, offset: i); |
6703 | val &= MAPLE_NODE_MASK; |
6704 | ((unsigned long *)new_slots)[i] |= val; |
6705 | } |
6706 | } |
6707 | |
6708 | /* |
6709 | * mas_dup_build() - Build a new maple tree from a source tree |
6710 | * @mas: The maple state of source tree, need to be in MAS_START state. |
6711 | * @new_mas: The maple state of new tree, need to be in MAS_START state. |
6712 | * @gfp: The GFP_FLAGS to use for allocations. |
6713 | * |
6714 | * This function builds a new tree in DFS preorder. If the memory allocation |
6715 | * fails, the error code -ENOMEM will be set in @mas, and @new_mas points to the |
6716 | * last node. mas_dup_free() will free the incomplete duplication of a tree. |
6717 | * |
6718 | * Note that the attributes of the two trees need to be exactly the same, and the |
6719 | * new tree needs to be empty, otherwise -EINVAL will be set in @mas. |
6720 | */ |
6721 | static inline void mas_dup_build(struct ma_state *mas, struct ma_state *new_mas, |
6722 | gfp_t gfp) |
6723 | { |
6724 | struct maple_node *node; |
6725 | struct maple_pnode *parent = NULL; |
6726 | struct maple_enode *root; |
6727 | enum maple_type type; |
6728 | |
6729 | if (unlikely(mt_attr(mas->tree) != mt_attr(new_mas->tree)) || |
6730 | unlikely(!mtree_empty(new_mas->tree))) { |
6731 | mas_set_err(mas, err: -EINVAL); |
6732 | return; |
6733 | } |
6734 | |
6735 | root = mas_start(mas); |
6736 | if (mas_is_ptr(mas) || mas_is_none(mas)) |
6737 | goto set_new_tree; |
6738 | |
6739 | node = mt_alloc_one(gfp); |
6740 | if (!node) { |
6741 | new_mas->status = ma_none; |
6742 | mas_set_err(mas, err: -ENOMEM); |
6743 | return; |
6744 | } |
6745 | |
6746 | type = mte_node_type(entry: mas->node); |
6747 | root = mt_mk_node(node, type); |
6748 | new_mas->node = root; |
6749 | new_mas->min = 0; |
6750 | new_mas->max = ULONG_MAX; |
6751 | root = mte_mk_root(node: root); |
6752 | while (1) { |
6753 | mas_copy_node(mas, new_mas, parent); |
6754 | if (!mte_is_leaf(entry: mas->node)) { |
6755 | /* Only allocate child nodes for non-leaf nodes. */ |
6756 | mas_dup_alloc(mas, new_mas, gfp); |
6757 | if (unlikely(mas_is_err(mas))) |
6758 | return; |
6759 | } else { |
6760 | /* |
6761 | * This is the last leaf node and duplication is |
6762 | * completed. |
6763 | */ |
6764 | if (mas->max == ULONG_MAX) |
6765 | goto done; |
6766 | |
6767 | /* This is not the last leaf node and needs to go up. */ |
6768 | do { |
6769 | mas_ascend(mas); |
6770 | mas_ascend(mas: new_mas); |
6771 | } while (mas->offset == mas_data_end(mas)); |
6772 | |
6773 | /* Move to the next subtree. */ |
6774 | mas->offset++; |
6775 | new_mas->offset++; |
6776 | } |
6777 | |
6778 | mas_descend(mas); |
6779 | parent = ma_parent_ptr(mte_to_node(new_mas->node)); |
6780 | mas_descend(mas: new_mas); |
6781 | mas->offset = 0; |
6782 | new_mas->offset = 0; |
6783 | } |
6784 | done: |
6785 | /* Specially handle the parent of the root node. */ |
6786 | mte_to_node(entry: root)->parent = ma_parent_ptr(mas_tree_parent(new_mas)); |
6787 | set_new_tree: |
6788 | /* Make them the same height */ |
6789 | new_mas->tree->ma_flags = mas->tree->ma_flags; |
6790 | rcu_assign_pointer(new_mas->tree->ma_root, root); |
6791 | } |
6792 | |
6793 | /** |
6794 | * __mt_dup(): Duplicate an entire maple tree |
6795 | * @mt: The source maple tree |
6796 | * @new: The new maple tree |
6797 | * @gfp: The GFP_FLAGS to use for allocations |
6798 | * |
6799 | * This function duplicates a maple tree in Depth-First Search (DFS) pre-order |
6800 | * traversal. It uses memcpy() to copy nodes in the source tree and allocate |
6801 | * new child nodes in non-leaf nodes. The new node is exactly the same as the |
6802 | * source node except for all the addresses stored in it. It will be faster than |
6803 | * traversing all elements in the source tree and inserting them one by one into |
6804 | * the new tree. |
6805 | * The user needs to ensure that the attributes of the source tree and the new |
6806 | * tree are the same, and the new tree needs to be an empty tree, otherwise |
6807 | * -EINVAL will be returned. |
6808 | * Note that the user needs to manually lock the source tree and the new tree. |
6809 | * |
6810 | * Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If |
6811 | * the attributes of the two trees are different or the new tree is not an empty |
6812 | * tree. |
6813 | */ |
6814 | int __mt_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp) |
6815 | { |
6816 | int ret = 0; |
6817 | MA_STATE(mas, mt, 0, 0); |
6818 | MA_STATE(new_mas, new, 0, 0); |
6819 | |
6820 | mas_dup_build(mas: &mas, new_mas: &new_mas, gfp); |
6821 | if (unlikely(mas_is_err(&mas))) { |
6822 | ret = xa_err(entry: mas.node); |
6823 | if (ret == -ENOMEM) |
6824 | mas_dup_free(mas: &new_mas); |
6825 | } |
6826 | |
6827 | return ret; |
6828 | } |
6829 | EXPORT_SYMBOL(__mt_dup); |
6830 | |
6831 | /** |
6832 | * mtree_dup(): Duplicate an entire maple tree |
6833 | * @mt: The source maple tree |
6834 | * @new: The new maple tree |
6835 | * @gfp: The GFP_FLAGS to use for allocations |
6836 | * |
6837 | * This function duplicates a maple tree in Depth-First Search (DFS) pre-order |
6838 | * traversal. It uses memcpy() to copy nodes in the source tree and allocate |
6839 | * new child nodes in non-leaf nodes. The new node is exactly the same as the |
6840 | * source node except for all the addresses stored in it. It will be faster than |
6841 | * traversing all elements in the source tree and inserting them one by one into |
6842 | * the new tree. |
6843 | * The user needs to ensure that the attributes of the source tree and the new |
6844 | * tree are the same, and the new tree needs to be an empty tree, otherwise |
6845 | * -EINVAL will be returned. |
6846 | * |
6847 | * Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If |
6848 | * the attributes of the two trees are different or the new tree is not an empty |
6849 | * tree. |
6850 | */ |
6851 | int mtree_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp) |
6852 | { |
6853 | int ret = 0; |
6854 | MA_STATE(mas, mt, 0, 0); |
6855 | MA_STATE(new_mas, new, 0, 0); |
6856 | |
6857 | mas_lock(&new_mas); |
6858 | mas_lock_nested(&mas, SINGLE_DEPTH_NESTING); |
6859 | mas_dup_build(mas: &mas, new_mas: &new_mas, gfp); |
6860 | mas_unlock(&mas); |
6861 | if (unlikely(mas_is_err(&mas))) { |
6862 | ret = xa_err(entry: mas.node); |
6863 | if (ret == -ENOMEM) |
6864 | mas_dup_free(mas: &new_mas); |
6865 | } |
6866 | |
6867 | mas_unlock(&new_mas); |
6868 | return ret; |
6869 | } |
6870 | EXPORT_SYMBOL(mtree_dup); |
6871 | |
6872 | /** |
6873 | * __mt_destroy() - Walk and free all nodes of a locked maple tree. |
6874 | * @mt: The maple tree |
6875 | * |
6876 | * Note: Does not handle locking. |
6877 | */ |
6878 | void __mt_destroy(struct maple_tree *mt) |
6879 | { |
6880 | void *root = mt_root_locked(mt); |
6881 | |
6882 | rcu_assign_pointer(mt->ma_root, NULL); |
6883 | if (xa_is_node(entry: root)) |
6884 | mte_destroy_walk(enode: root, mt); |
6885 | |
6886 | mt->ma_flags = mt_attr(mt); |
6887 | } |
6888 | EXPORT_SYMBOL_GPL(__mt_destroy); |
6889 | |
6890 | /** |
6891 | * mtree_destroy() - Destroy a maple tree |
6892 | * @mt: The maple tree |
6893 | * |
6894 | * Frees all resources used by the tree. Handles locking. |
6895 | */ |
6896 | void mtree_destroy(struct maple_tree *mt) |
6897 | { |
6898 | mtree_lock(mt); |
6899 | __mt_destroy(mt); |
6900 | mtree_unlock(mt); |
6901 | } |
6902 | EXPORT_SYMBOL(mtree_destroy); |
6903 | |
6904 | /** |
6905 | * mt_find() - Search from the start up until an entry is found. |
6906 | * @mt: The maple tree |
6907 | * @index: Pointer which contains the start location of the search |
6908 | * @max: The maximum value of the search range |
6909 | * |
6910 | * Takes RCU read lock internally to protect the search, which does not |
6911 | * protect the returned pointer after dropping RCU read lock. |
6912 | * See also: Documentation/core-api/maple_tree.rst |
6913 | * |
6914 | * In case that an entry is found @index is updated to point to the next |
6915 | * possible entry independent whether the found entry is occupying a |
6916 | * single index or a range if indices. |
6917 | * |
6918 | * Return: The entry at or after the @index or %NULL |
6919 | */ |
6920 | void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max) |
6921 | { |
6922 | MA_STATE(mas, mt, *index, *index); |
6923 | void *entry; |
6924 | #ifdef CONFIG_DEBUG_MAPLE_TREE |
6925 | unsigned long copy = *index; |
6926 | #endif |
6927 | |
6928 | trace_ma_read(fn: __func__, mas: &mas); |
6929 | |
6930 | if ((*index) > max) |
6931 | return NULL; |
6932 | |
6933 | rcu_read_lock(); |
6934 | retry: |
6935 | entry = mas_state_walk(mas: &mas); |
6936 | if (mas_is_start(mas: &mas)) |
6937 | goto retry; |
6938 | |
6939 | if (unlikely(xa_is_zero(entry))) |
6940 | entry = NULL; |
6941 | |
6942 | if (entry) |
6943 | goto unlock; |
6944 | |
6945 | while (mas_is_active(mas: &mas) && (mas.last < max)) { |
6946 | entry = mas_next_entry(mas: &mas, limit: max); |
6947 | if (likely(entry && !xa_is_zero(entry))) |
6948 | break; |
6949 | } |
6950 | |
6951 | if (unlikely(xa_is_zero(entry))) |
6952 | entry = NULL; |
6953 | unlock: |
6954 | rcu_read_unlock(); |
6955 | if (likely(entry)) { |
6956 | *index = mas.last + 1; |
6957 | #ifdef CONFIG_DEBUG_MAPLE_TREE |
6958 | if (MT_WARN_ON(mt, (*index) && ((*index) <= copy))) |
6959 | pr_err("index not increased! %lx <= %lx\n" , |
6960 | *index, copy); |
6961 | #endif |
6962 | } |
6963 | |
6964 | return entry; |
6965 | } |
6966 | EXPORT_SYMBOL(mt_find); |
6967 | |
6968 | /** |
6969 | * mt_find_after() - Search from the start up until an entry is found. |
6970 | * @mt: The maple tree |
6971 | * @index: Pointer which contains the start location of the search |
6972 | * @max: The maximum value to check |
6973 | * |
6974 | * Same as mt_find() except that it checks @index for 0 before |
6975 | * searching. If @index == 0, the search is aborted. This covers a wrap |
6976 | * around of @index to 0 in an iterator loop. |
6977 | * |
6978 | * Return: The entry at or after the @index or %NULL |
6979 | */ |
6980 | void *mt_find_after(struct maple_tree *mt, unsigned long *index, |
6981 | unsigned long max) |
6982 | { |
6983 | if (!(*index)) |
6984 | return NULL; |
6985 | |
6986 | return mt_find(mt, index, max); |
6987 | } |
6988 | EXPORT_SYMBOL(mt_find_after); |
6989 | |
6990 | #ifdef CONFIG_DEBUG_MAPLE_TREE |
6991 | atomic_t maple_tree_tests_run; |
6992 | EXPORT_SYMBOL_GPL(maple_tree_tests_run); |
6993 | atomic_t maple_tree_tests_passed; |
6994 | EXPORT_SYMBOL_GPL(maple_tree_tests_passed); |
6995 | |
6996 | #ifndef __KERNEL__ |
6997 | extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int); |
6998 | void mt_set_non_kernel(unsigned int val) |
6999 | { |
7000 | kmem_cache_set_non_kernel(maple_node_cache, val); |
7001 | } |
7002 | |
7003 | extern unsigned long kmem_cache_get_alloc(struct kmem_cache *); |
7004 | unsigned long mt_get_alloc_size(void) |
7005 | { |
7006 | return kmem_cache_get_alloc(maple_node_cache); |
7007 | } |
7008 | |
7009 | extern void kmem_cache_zero_nr_tallocated(struct kmem_cache *); |
7010 | void mt_zero_nr_tallocated(void) |
7011 | { |
7012 | kmem_cache_zero_nr_tallocated(maple_node_cache); |
7013 | } |
7014 | |
7015 | extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache *); |
7016 | unsigned int mt_nr_tallocated(void) |
7017 | { |
7018 | return kmem_cache_nr_tallocated(maple_node_cache); |
7019 | } |
7020 | |
7021 | extern unsigned int kmem_cache_nr_allocated(struct kmem_cache *); |
7022 | unsigned int mt_nr_allocated(void) |
7023 | { |
7024 | return kmem_cache_nr_allocated(maple_node_cache); |
7025 | } |
7026 | |
7027 | void mt_cache_shrink(void) |
7028 | { |
7029 | } |
7030 | #else |
7031 | /* |
7032 | * mt_cache_shrink() - For testing, don't use this. |
7033 | * |
7034 | * Certain testcases can trigger an OOM when combined with other memory |
7035 | * debugging configuration options. This function is used to reduce the |
7036 | * possibility of an out of memory even due to kmem_cache objects remaining |
7037 | * around for longer than usual. |
7038 | */ |
7039 | void mt_cache_shrink(void) |
7040 | { |
7041 | kmem_cache_shrink(s: maple_node_cache); |
7042 | |
7043 | } |
7044 | EXPORT_SYMBOL_GPL(mt_cache_shrink); |
7045 | |
7046 | #endif /* not defined __KERNEL__ */ |
7047 | /* |
7048 | * mas_get_slot() - Get the entry in the maple state node stored at @offset. |
7049 | * @mas: The maple state |
7050 | * @offset: The offset into the slot array to fetch. |
7051 | * |
7052 | * Return: The entry stored at @offset. |
7053 | */ |
7054 | static inline struct maple_enode *mas_get_slot(struct ma_state *mas, |
7055 | unsigned char offset) |
7056 | { |
7057 | return mas_slot(mas, slots: ma_slots(mn: mas_mn(mas), mt: mte_node_type(entry: mas->node)), |
7058 | offset); |
7059 | } |
7060 | |
7061 | /* Depth first search, post-order */ |
7062 | static void mas_dfs_postorder(struct ma_state *mas, unsigned long max) |
7063 | { |
7064 | |
7065 | struct maple_enode *p, *mn = mas->node; |
7066 | unsigned long p_min, p_max; |
7067 | |
7068 | mas_next_node(mas, node: mas_mn(mas), max); |
7069 | if (!mas_is_overflow(mas)) |
7070 | return; |
7071 | |
7072 | if (mte_is_root(node: mn)) |
7073 | return; |
7074 | |
7075 | mas->node = mn; |
7076 | mas_ascend(mas); |
7077 | do { |
7078 | p = mas->node; |
7079 | p_min = mas->min; |
7080 | p_max = mas->max; |
7081 | mas_prev_node(mas, min: 0); |
7082 | } while (!mas_is_underflow(mas)); |
7083 | |
7084 | mas->node = p; |
7085 | mas->max = p_max; |
7086 | mas->min = p_min; |
7087 | } |
7088 | |
7089 | /* Tree validations */ |
7090 | static void mt_dump_node(const struct maple_tree *mt, void *entry, |
7091 | unsigned long min, unsigned long max, unsigned int depth, |
7092 | enum mt_dump_format format); |
7093 | static void mt_dump_range(unsigned long min, unsigned long max, |
7094 | unsigned int depth, enum mt_dump_format format) |
7095 | { |
7096 | static const char spaces[] = " " ; |
7097 | |
7098 | switch(format) { |
7099 | case mt_dump_hex: |
7100 | if (min == max) |
7101 | pr_info("%.*s%lx: " , depth * 2, spaces, min); |
7102 | else |
7103 | pr_info("%.*s%lx-%lx: " , depth * 2, spaces, min, max); |
7104 | break; |
7105 | case mt_dump_dec: |
7106 | if (min == max) |
7107 | pr_info("%.*s%lu: " , depth * 2, spaces, min); |
7108 | else |
7109 | pr_info("%.*s%lu-%lu: " , depth * 2, spaces, min, max); |
7110 | } |
7111 | } |
7112 | |
7113 | static void mt_dump_entry(void *entry, unsigned long min, unsigned long max, |
7114 | unsigned int depth, enum mt_dump_format format) |
7115 | { |
7116 | mt_dump_range(min, max, depth, format); |
7117 | |
7118 | if (xa_is_value(entry)) |
7119 | pr_cont("value %ld (0x%lx) [%p]\n" , xa_to_value(entry), |
7120 | xa_to_value(entry), entry); |
7121 | else if (xa_is_zero(entry)) |
7122 | pr_cont("zero (%ld)\n" , xa_to_internal(entry)); |
7123 | else if (mt_is_reserved(entry)) |
7124 | pr_cont("UNKNOWN ENTRY (%p)\n" , entry); |
7125 | else |
7126 | pr_cont("%p\n" , entry); |
7127 | } |
7128 | |
7129 | static void mt_dump_range64(const struct maple_tree *mt, void *entry, |
7130 | unsigned long min, unsigned long max, unsigned int depth, |
7131 | enum mt_dump_format format) |
7132 | { |
7133 | struct maple_range_64 *node = &mte_to_node(entry)->mr64; |
7134 | bool leaf = mte_is_leaf(entry); |
7135 | unsigned long first = min; |
7136 | int i; |
7137 | |
7138 | pr_cont(" contents: " ); |
7139 | for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) { |
7140 | switch(format) { |
7141 | case mt_dump_hex: |
7142 | pr_cont("%p %lX " , node->slot[i], node->pivot[i]); |
7143 | break; |
7144 | case mt_dump_dec: |
7145 | pr_cont("%p %lu " , node->slot[i], node->pivot[i]); |
7146 | } |
7147 | } |
7148 | pr_cont("%p\n" , node->slot[i]); |
7149 | for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) { |
7150 | unsigned long last = max; |
7151 | |
7152 | if (i < (MAPLE_RANGE64_SLOTS - 1)) |
7153 | last = node->pivot[i]; |
7154 | else if (!node->slot[i] && max != mt_node_max(entry)) |
7155 | break; |
7156 | if (last == 0 && i > 0) |
7157 | break; |
7158 | if (leaf) |
7159 | mt_dump_entry(entry: mt_slot(mt, slots: node->slot, offset: i), |
7160 | min: first, max: last, depth: depth + 1, format); |
7161 | else if (node->slot[i]) |
7162 | mt_dump_node(mt, entry: mt_slot(mt, slots: node->slot, offset: i), |
7163 | min: first, max: last, depth: depth + 1, format); |
7164 | |
7165 | if (last == max) |
7166 | break; |
7167 | if (last > max) { |
7168 | switch(format) { |
7169 | case mt_dump_hex: |
7170 | pr_err("node %p last (%lx) > max (%lx) at pivot %d!\n" , |
7171 | node, last, max, i); |
7172 | break; |
7173 | case mt_dump_dec: |
7174 | pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n" , |
7175 | node, last, max, i); |
7176 | } |
7177 | } |
7178 | first = last + 1; |
7179 | } |
7180 | } |
7181 | |
7182 | static void mt_dump_arange64(const struct maple_tree *mt, void *entry, |
7183 | unsigned long min, unsigned long max, unsigned int depth, |
7184 | enum mt_dump_format format) |
7185 | { |
7186 | struct maple_arange_64 *node = &mte_to_node(entry)->ma64; |
7187 | bool leaf = mte_is_leaf(entry); |
7188 | unsigned long first = min; |
7189 | int i; |
7190 | |
7191 | pr_cont(" contents: " ); |
7192 | for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) { |
7193 | switch (format) { |
7194 | case mt_dump_hex: |
7195 | pr_cont("%lx " , node->gap[i]); |
7196 | break; |
7197 | case mt_dump_dec: |
7198 | pr_cont("%lu " , node->gap[i]); |
7199 | } |
7200 | } |
7201 | pr_cont("| %02X %02X| " , node->meta.end, node->meta.gap); |
7202 | for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++) { |
7203 | switch (format) { |
7204 | case mt_dump_hex: |
7205 | pr_cont("%p %lX " , node->slot[i], node->pivot[i]); |
7206 | break; |
7207 | case mt_dump_dec: |
7208 | pr_cont("%p %lu " , node->slot[i], node->pivot[i]); |
7209 | } |
7210 | } |
7211 | pr_cont("%p\n" , node->slot[i]); |
7212 | for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) { |
7213 | unsigned long last = max; |
7214 | |
7215 | if (i < (MAPLE_ARANGE64_SLOTS - 1)) |
7216 | last = node->pivot[i]; |
7217 | else if (!node->slot[i]) |
7218 | break; |
7219 | if (last == 0 && i > 0) |
7220 | break; |
7221 | if (leaf) |
7222 | mt_dump_entry(entry: mt_slot(mt, slots: node->slot, offset: i), |
7223 | min: first, max: last, depth: depth + 1, format); |
7224 | else if (node->slot[i]) |
7225 | mt_dump_node(mt, entry: mt_slot(mt, slots: node->slot, offset: i), |
7226 | min: first, max: last, depth: depth + 1, format); |
7227 | |
7228 | if (last == max) |
7229 | break; |
7230 | if (last > max) { |
7231 | pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n" , |
7232 | node, last, max, i); |
7233 | break; |
7234 | } |
7235 | first = last + 1; |
7236 | } |
7237 | } |
7238 | |
7239 | static void mt_dump_node(const struct maple_tree *mt, void *entry, |
7240 | unsigned long min, unsigned long max, unsigned int depth, |
7241 | enum mt_dump_format format) |
7242 | { |
7243 | struct maple_node *node = mte_to_node(entry); |
7244 | unsigned int type = mte_node_type(entry); |
7245 | unsigned int i; |
7246 | |
7247 | mt_dump_range(min, max, depth, format); |
7248 | |
7249 | pr_cont("node %p depth %d type %d parent %p" , node, depth, type, |
7250 | node ? node->parent : NULL); |
7251 | switch (type) { |
7252 | case maple_dense: |
7253 | pr_cont("\n" ); |
7254 | for (i = 0; i < MAPLE_NODE_SLOTS; i++) { |
7255 | if (min + i > max) |
7256 | pr_cont("OUT OF RANGE: " ); |
7257 | mt_dump_entry(entry: mt_slot(mt, slots: node->slot, offset: i), |
7258 | min: min + i, max: min + i, depth, format); |
7259 | } |
7260 | break; |
7261 | case maple_leaf_64: |
7262 | case maple_range_64: |
7263 | mt_dump_range64(mt, entry, min, max, depth, format); |
7264 | break; |
7265 | case maple_arange_64: |
7266 | mt_dump_arange64(mt, entry, min, max, depth, format); |
7267 | break; |
7268 | |
7269 | default: |
7270 | pr_cont(" UNKNOWN TYPE\n" ); |
7271 | } |
7272 | } |
7273 | |
7274 | void mt_dump(const struct maple_tree *mt, enum mt_dump_format format) |
7275 | { |
7276 | void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt)); |
7277 | |
7278 | pr_info("maple_tree(%p) flags %X, height %u root %p\n" , |
7279 | mt, mt->ma_flags, mt_height(mt), entry); |
7280 | if (!xa_is_node(entry)) |
7281 | mt_dump_entry(entry, min: 0, max: 0, depth: 0, format); |
7282 | else if (entry) |
7283 | mt_dump_node(mt, entry, min: 0, mt_node_max(entry), depth: 0, format); |
7284 | } |
7285 | EXPORT_SYMBOL_GPL(mt_dump); |
7286 | |
7287 | /* |
7288 | * Calculate the maximum gap in a node and check if that's what is reported in |
7289 | * the parent (unless root). |
7290 | */ |
7291 | static void mas_validate_gaps(struct ma_state *mas) |
7292 | { |
7293 | struct maple_enode *mte = mas->node; |
7294 | struct maple_node *p_mn, *node = mte_to_node(entry: mte); |
7295 | enum maple_type mt = mte_node_type(entry: mas->node); |
7296 | unsigned long gap = 0, max_gap = 0; |
7297 | unsigned long p_end, p_start = mas->min; |
7298 | unsigned char p_slot, offset; |
7299 | unsigned long *gaps = NULL; |
7300 | unsigned long *pivots = ma_pivots(node, type: mt); |
7301 | unsigned int i; |
7302 | |
7303 | if (ma_is_dense(type: mt)) { |
7304 | for (i = 0; i < mt_slot_count(mte); i++) { |
7305 | if (mas_get_slot(mas, offset: i)) { |
7306 | if (gap > max_gap) |
7307 | max_gap = gap; |
7308 | gap = 0; |
7309 | continue; |
7310 | } |
7311 | gap++; |
7312 | } |
7313 | goto counted; |
7314 | } |
7315 | |
7316 | gaps = ma_gaps(node, type: mt); |
7317 | for (i = 0; i < mt_slot_count(mte); i++) { |
7318 | p_end = mas_safe_pivot(mas, pivots, piv: i, type: mt); |
7319 | |
7320 | if (!gaps) { |
7321 | if (!mas_get_slot(mas, offset: i)) |
7322 | gap = p_end - p_start + 1; |
7323 | } else { |
7324 | void *entry = mas_get_slot(mas, offset: i); |
7325 | |
7326 | gap = gaps[i]; |
7327 | MT_BUG_ON(mas->tree, !entry); |
7328 | |
7329 | if (gap > p_end - p_start + 1) { |
7330 | pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n" , |
7331 | mas_mn(mas), i, gap, p_end, p_start, |
7332 | p_end - p_start + 1); |
7333 | MT_BUG_ON(mas->tree, gap > p_end - p_start + 1); |
7334 | } |
7335 | } |
7336 | |
7337 | if (gap > max_gap) |
7338 | max_gap = gap; |
7339 | |
7340 | p_start = p_end + 1; |
7341 | if (p_end >= mas->max) |
7342 | break; |
7343 | } |
7344 | |
7345 | counted: |
7346 | if (mt == maple_arange_64) { |
7347 | MT_BUG_ON(mas->tree, !gaps); |
7348 | offset = ma_meta_gap(mn: node); |
7349 | if (offset > i) { |
7350 | pr_err("gap offset %p[%u] is invalid\n" , node, offset); |
7351 | MT_BUG_ON(mas->tree, 1); |
7352 | } |
7353 | |
7354 | if (gaps[offset] != max_gap) { |
7355 | pr_err("gap %p[%u] is not the largest gap %lu\n" , |
7356 | node, offset, max_gap); |
7357 | MT_BUG_ON(mas->tree, 1); |
7358 | } |
7359 | |
7360 | for (i++ ; i < mt_slot_count(mte); i++) { |
7361 | if (gaps[i] != 0) { |
7362 | pr_err("gap %p[%u] beyond node limit != 0\n" , |
7363 | node, i); |
7364 | MT_BUG_ON(mas->tree, 1); |
7365 | } |
7366 | } |
7367 | } |
7368 | |
7369 | if (mte_is_root(node: mte)) |
7370 | return; |
7371 | |
7372 | p_slot = mte_parent_slot(enode: mas->node); |
7373 | p_mn = mte_parent(enode: mte); |
7374 | MT_BUG_ON(mas->tree, max_gap > mas->max); |
7375 | if (ma_gaps(node: p_mn, type: mas_parent_type(mas, enode: mte))[p_slot] != max_gap) { |
7376 | pr_err("gap %p[%u] != %lu\n" , p_mn, p_slot, max_gap); |
7377 | mt_dump(mas->tree, mt_dump_hex); |
7378 | MT_BUG_ON(mas->tree, 1); |
7379 | } |
7380 | } |
7381 | |
7382 | static void mas_validate_parent_slot(struct ma_state *mas) |
7383 | { |
7384 | struct maple_node *parent; |
7385 | struct maple_enode *node; |
7386 | enum maple_type p_type; |
7387 | unsigned char p_slot; |
7388 | void __rcu **slots; |
7389 | int i; |
7390 | |
7391 | if (mte_is_root(node: mas->node)) |
7392 | return; |
7393 | |
7394 | p_slot = mte_parent_slot(enode: mas->node); |
7395 | p_type = mas_parent_type(mas, enode: mas->node); |
7396 | parent = mte_parent(enode: mas->node); |
7397 | slots = ma_slots(mn: parent, mt: p_type); |
7398 | MT_BUG_ON(mas->tree, mas_mn(mas) == parent); |
7399 | |
7400 | /* Check prev/next parent slot for duplicate node entry */ |
7401 | |
7402 | for (i = 0; i < mt_slots[p_type]; i++) { |
7403 | node = mas_slot(mas, slots, offset: i); |
7404 | if (i == p_slot) { |
7405 | if (node != mas->node) |
7406 | pr_err("parent %p[%u] does not have %p\n" , |
7407 | parent, i, mas_mn(mas)); |
7408 | MT_BUG_ON(mas->tree, node != mas->node); |
7409 | } else if (node == mas->node) { |
7410 | pr_err("Invalid child %p at parent %p[%u] p_slot %u\n" , |
7411 | mas_mn(mas), parent, i, p_slot); |
7412 | MT_BUG_ON(mas->tree, node == mas->node); |
7413 | } |
7414 | } |
7415 | } |
7416 | |
7417 | static void mas_validate_child_slot(struct ma_state *mas) |
7418 | { |
7419 | enum maple_type type = mte_node_type(entry: mas->node); |
7420 | void __rcu **slots = ma_slots(mn: mte_to_node(entry: mas->node), mt: type); |
7421 | unsigned long *pivots = ma_pivots(node: mte_to_node(entry: mas->node), type); |
7422 | struct maple_enode *child; |
7423 | unsigned char i; |
7424 | |
7425 | if (mte_is_leaf(entry: mas->node)) |
7426 | return; |
7427 | |
7428 | for (i = 0; i < mt_slots[type]; i++) { |
7429 | child = mas_slot(mas, slots, offset: i); |
7430 | |
7431 | if (!child) { |
7432 | pr_err("Non-leaf node lacks child at %p[%u]\n" , |
7433 | mas_mn(mas), i); |
7434 | MT_BUG_ON(mas->tree, 1); |
7435 | } |
7436 | |
7437 | if (mte_parent_slot(enode: child) != i) { |
7438 | pr_err("Slot error at %p[%u]: child %p has pslot %u\n" , |
7439 | mas_mn(mas), i, mte_to_node(child), |
7440 | mte_parent_slot(child)); |
7441 | MT_BUG_ON(mas->tree, 1); |
7442 | } |
7443 | |
7444 | if (mte_parent(enode: child) != mte_to_node(entry: mas->node)) { |
7445 | pr_err("child %p has parent %p not %p\n" , |
7446 | mte_to_node(child), mte_parent(child), |
7447 | mte_to_node(mas->node)); |
7448 | MT_BUG_ON(mas->tree, 1); |
7449 | } |
7450 | |
7451 | if (i < mt_pivots[type] && pivots[i] == mas->max) |
7452 | break; |
7453 | } |
7454 | } |
7455 | |
7456 | /* |
7457 | * Validate all pivots are within mas->min and mas->max, check metadata ends |
7458 | * where the maximum ends and ensure there is no slots or pivots set outside of |
7459 | * the end of the data. |
7460 | */ |
7461 | static void mas_validate_limits(struct ma_state *mas) |
7462 | { |
7463 | int i; |
7464 | unsigned long prev_piv = 0; |
7465 | enum maple_type type = mte_node_type(entry: mas->node); |
7466 | void __rcu **slots = ma_slots(mn: mte_to_node(entry: mas->node), mt: type); |
7467 | unsigned long *pivots = ma_pivots(node: mas_mn(mas), type); |
7468 | |
7469 | for (i = 0; i < mt_slots[type]; i++) { |
7470 | unsigned long piv; |
7471 | |
7472 | piv = mas_safe_pivot(mas, pivots, piv: i, type); |
7473 | |
7474 | if (!piv && (i != 0)) { |
7475 | pr_err("Missing node limit pivot at %p[%u]" , |
7476 | mas_mn(mas), i); |
7477 | MAS_WARN_ON(mas, 1); |
7478 | } |
7479 | |
7480 | if (prev_piv > piv) { |
7481 | pr_err("%p[%u] piv %lu < prev_piv %lu\n" , |
7482 | mas_mn(mas), i, piv, prev_piv); |
7483 | MAS_WARN_ON(mas, piv < prev_piv); |
7484 | } |
7485 | |
7486 | if (piv < mas->min) { |
7487 | pr_err("%p[%u] %lu < %lu\n" , mas_mn(mas), i, |
7488 | piv, mas->min); |
7489 | MAS_WARN_ON(mas, piv < mas->min); |
7490 | } |
7491 | if (piv > mas->max) { |
7492 | pr_err("%p[%u] %lu > %lu\n" , mas_mn(mas), i, |
7493 | piv, mas->max); |
7494 | MAS_WARN_ON(mas, piv > mas->max); |
7495 | } |
7496 | prev_piv = piv; |
7497 | if (piv == mas->max) |
7498 | break; |
7499 | } |
7500 | |
7501 | if (mas_data_end(mas) != i) { |
7502 | pr_err("node%p: data_end %u != the last slot offset %u\n" , |
7503 | mas_mn(mas), mas_data_end(mas), i); |
7504 | MT_BUG_ON(mas->tree, 1); |
7505 | } |
7506 | |
7507 | for (i += 1; i < mt_slots[type]; i++) { |
7508 | void *entry = mas_slot(mas, slots, offset: i); |
7509 | |
7510 | if (entry && (i != mt_slots[type] - 1)) { |
7511 | pr_err("%p[%u] should not have entry %p\n" , mas_mn(mas), |
7512 | i, entry); |
7513 | MT_BUG_ON(mas->tree, entry != NULL); |
7514 | } |
7515 | |
7516 | if (i < mt_pivots[type]) { |
7517 | unsigned long piv = pivots[i]; |
7518 | |
7519 | if (!piv) |
7520 | continue; |
7521 | |
7522 | pr_err("%p[%u] should not have piv %lu\n" , |
7523 | mas_mn(mas), i, piv); |
7524 | MAS_WARN_ON(mas, i < mt_pivots[type] - 1); |
7525 | } |
7526 | } |
7527 | } |
7528 | |
7529 | static void mt_validate_nulls(struct maple_tree *mt) |
7530 | { |
7531 | void *entry, *last = (void *)1; |
7532 | unsigned char offset = 0; |
7533 | void __rcu **slots; |
7534 | MA_STATE(mas, mt, 0, 0); |
7535 | |
7536 | mas_start(mas: &mas); |
7537 | if (mas_is_none(mas: &mas) || (mas_is_ptr(mas: &mas))) |
7538 | return; |
7539 | |
7540 | while (!mte_is_leaf(entry: mas.node)) |
7541 | mas_descend(mas: &mas); |
7542 | |
7543 | slots = ma_slots(mn: mte_to_node(entry: mas.node), mt: mte_node_type(entry: mas.node)); |
7544 | do { |
7545 | entry = mas_slot(mas: &mas, slots, offset); |
7546 | if (!last && !entry) { |
7547 | pr_err("Sequential nulls end at %p[%u]\n" , |
7548 | mas_mn(&mas), offset); |
7549 | } |
7550 | MT_BUG_ON(mt, !last && !entry); |
7551 | last = entry; |
7552 | if (offset == mas_data_end(mas: &mas)) { |
7553 | mas_next_node(mas: &mas, node: mas_mn(mas: &mas), ULONG_MAX); |
7554 | if (mas_is_overflow(mas: &mas)) |
7555 | return; |
7556 | offset = 0; |
7557 | slots = ma_slots(mn: mte_to_node(entry: mas.node), |
7558 | mt: mte_node_type(entry: mas.node)); |
7559 | } else { |
7560 | offset++; |
7561 | } |
7562 | |
7563 | } while (!mas_is_overflow(mas: &mas)); |
7564 | } |
7565 | |
7566 | /* |
7567 | * validate a maple tree by checking: |
7568 | * 1. The limits (pivots are within mas->min to mas->max) |
7569 | * 2. The gap is correctly set in the parents |
7570 | */ |
7571 | void mt_validate(struct maple_tree *mt) |
7572 | { |
7573 | unsigned char end; |
7574 | |
7575 | MA_STATE(mas, mt, 0, 0); |
7576 | rcu_read_lock(); |
7577 | mas_start(mas: &mas); |
7578 | if (!mas_is_active(mas: &mas)) |
7579 | goto done; |
7580 | |
7581 | while (!mte_is_leaf(entry: mas.node)) |
7582 | mas_descend(mas: &mas); |
7583 | |
7584 | while (!mas_is_overflow(mas: &mas)) { |
7585 | MAS_WARN_ON(&mas, mte_dead_node(mas.node)); |
7586 | end = mas_data_end(mas: &mas); |
7587 | if (MAS_WARN_ON(&mas, (end < mt_min_slot_count(mas.node)) && |
7588 | (mas.max != ULONG_MAX))) { |
7589 | pr_err("Invalid size %u of %p\n" , end, mas_mn(&mas)); |
7590 | } |
7591 | |
7592 | mas_validate_parent_slot(mas: &mas); |
7593 | mas_validate_limits(mas: &mas); |
7594 | mas_validate_child_slot(mas: &mas); |
7595 | if (mt_is_alloc(mt)) |
7596 | mas_validate_gaps(mas: &mas); |
7597 | mas_dfs_postorder(mas: &mas, ULONG_MAX); |
7598 | } |
7599 | mt_validate_nulls(mt); |
7600 | done: |
7601 | rcu_read_unlock(); |
7602 | |
7603 | } |
7604 | EXPORT_SYMBOL_GPL(mt_validate); |
7605 | |
7606 | void mas_dump(const struct ma_state *mas) |
7607 | { |
7608 | pr_err("MAS: tree=%p enode=%p " , mas->tree, mas->node); |
7609 | switch (mas->status) { |
7610 | case ma_active: |
7611 | pr_err("(ma_active)" ); |
7612 | break; |
7613 | case ma_none: |
7614 | pr_err("(ma_none)" ); |
7615 | break; |
7616 | case ma_root: |
7617 | pr_err("(ma_root)" ); |
7618 | break; |
7619 | case ma_start: |
7620 | pr_err("(ma_start) " ); |
7621 | break; |
7622 | case ma_pause: |
7623 | pr_err("(ma_pause) " ); |
7624 | break; |
7625 | case ma_overflow: |
7626 | pr_err("(ma_overflow) " ); |
7627 | break; |
7628 | case ma_underflow: |
7629 | pr_err("(ma_underflow) " ); |
7630 | break; |
7631 | case ma_error: |
7632 | pr_err("(ma_error) " ); |
7633 | break; |
7634 | } |
7635 | |
7636 | pr_err("[%u/%u] index=%lx last=%lx\n" , mas->offset, mas->end, |
7637 | mas->index, mas->last); |
7638 | pr_err(" min=%lx max=%lx alloc=%p, depth=%u, flags=%x\n" , |
7639 | mas->min, mas->max, mas->alloc, mas->depth, mas->mas_flags); |
7640 | if (mas->index > mas->last) |
7641 | pr_err("Check index & last\n" ); |
7642 | } |
7643 | EXPORT_SYMBOL_GPL(mas_dump); |
7644 | |
7645 | void mas_wr_dump(const struct ma_wr_state *wr_mas) |
7646 | { |
7647 | pr_err("WR_MAS: node=%p r_min=%lx r_max=%lx\n" , |
7648 | wr_mas->node, wr_mas->r_min, wr_mas->r_max); |
7649 | pr_err(" type=%u off_end=%u, node_end=%u, end_piv=%lx\n" , |
7650 | wr_mas->type, wr_mas->offset_end, wr_mas->mas->end, |
7651 | wr_mas->end_piv); |
7652 | } |
7653 | EXPORT_SYMBOL_GPL(mas_wr_dump); |
7654 | |
7655 | #endif /* CONFIG_DEBUG_MAPLE_TREE */ |
7656 | |