1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
2 | #ifndef _LINUX_XARRAY_H |
3 | #define _LINUX_XARRAY_H |
4 | /* |
5 | * eXtensible Arrays |
6 | * Copyright (c) 2017 Microsoft Corporation |
7 | * Author: Matthew Wilcox <willy@infradead.org> |
8 | * |
9 | * See Documentation/core-api/xarray.rst for how to use the XArray. |
10 | */ |
11 | |
12 | #include <linux/bug.h> |
13 | #include <linux/compiler.h> |
14 | #include <linux/gfp.h> |
15 | #include <linux/kconfig.h> |
16 | #include <linux/kernel.h> |
17 | #include <linux/rcupdate.h> |
18 | #include <linux/spinlock.h> |
19 | #include <linux/types.h> |
20 | |
21 | /* |
22 | * The bottom two bits of the entry determine how the XArray interprets |
23 | * the contents: |
24 | * |
25 | * 00: Pointer entry |
26 | * 10: Internal entry |
27 | * x1: Value entry or tagged pointer |
28 | * |
29 | * Attempting to store internal entries in the XArray is a bug. |
30 | * |
31 | * Most internal entries are pointers to the next node in the tree. |
32 | * The following internal entries have a special meaning: |
33 | * |
34 | * 0-62: Sibling entries |
35 | * 256: Zero entry |
36 | * 257: Retry entry |
37 | * |
38 | * Errors are also represented as internal entries, but use the negative |
39 | * space (-4094 to -2). They're never stored in the slots array; only |
40 | * returned by the normal API. |
41 | */ |
42 | |
43 | #define BITS_PER_XA_VALUE (BITS_PER_LONG - 1) |
44 | |
45 | /** |
46 | * xa_mk_value() - Create an XArray entry from an integer. |
47 | * @v: Value to store in XArray. |
48 | * |
49 | * Context: Any context. |
50 | * Return: An entry suitable for storing in the XArray. |
51 | */ |
52 | static inline void *xa_mk_value(unsigned long v) |
53 | { |
54 | WARN_ON((long)v < 0); |
55 | return (void *)((v << 1) | 1); |
56 | } |
57 | |
58 | /** |
59 | * xa_to_value() - Get value stored in an XArray entry. |
60 | * @entry: XArray entry. |
61 | * |
62 | * Context: Any context. |
63 | * Return: The value stored in the XArray entry. |
64 | */ |
65 | static inline unsigned long xa_to_value(const void *entry) |
66 | { |
67 | return (unsigned long)entry >> 1; |
68 | } |
69 | |
70 | /** |
71 | * xa_is_value() - Determine if an entry is a value. |
72 | * @entry: XArray entry. |
73 | * |
74 | * Context: Any context. |
75 | * Return: True if the entry is a value, false if it is a pointer. |
76 | */ |
77 | static inline bool xa_is_value(const void *entry) |
78 | { |
79 | return (unsigned long)entry & 1; |
80 | } |
81 | |
82 | /** |
83 | * xa_tag_pointer() - Create an XArray entry for a tagged pointer. |
84 | * @p: Plain pointer. |
85 | * @tag: Tag value (0, 1 or 3). |
86 | * |
87 | * If the user of the XArray prefers, they can tag their pointers instead |
88 | * of storing value entries. Three tags are available (0, 1 and 3). |
89 | * These are distinct from the xa_mark_t as they are not replicated up |
90 | * through the array and cannot be searched for. |
91 | * |
92 | * Context: Any context. |
93 | * Return: An XArray entry. |
94 | */ |
95 | static inline void *xa_tag_pointer(void *p, unsigned long tag) |
96 | { |
97 | return (void *)((unsigned long)p | tag); |
98 | } |
99 | |
100 | /** |
101 | * xa_untag_pointer() - Turn an XArray entry into a plain pointer. |
102 | * @entry: XArray entry. |
103 | * |
104 | * If you have stored a tagged pointer in the XArray, call this function |
105 | * to get the untagged version of the pointer. |
106 | * |
107 | * Context: Any context. |
108 | * Return: A pointer. |
109 | */ |
110 | static inline void *xa_untag_pointer(void *entry) |
111 | { |
112 | return (void *)((unsigned long)entry & ~3UL); |
113 | } |
114 | |
115 | /** |
116 | * xa_pointer_tag() - Get the tag stored in an XArray entry. |
117 | * @entry: XArray entry. |
118 | * |
119 | * If you have stored a tagged pointer in the XArray, call this function |
120 | * to get the tag of that pointer. |
121 | * |
122 | * Context: Any context. |
123 | * Return: A tag. |
124 | */ |
125 | static inline unsigned int xa_pointer_tag(void *entry) |
126 | { |
127 | return (unsigned long)entry & 3UL; |
128 | } |
129 | |
130 | /* |
131 | * xa_mk_internal() - Create an internal entry. |
132 | * @v: Value to turn into an internal entry. |
133 | * |
134 | * Internal entries are used for a number of purposes. Entries 0-255 are |
135 | * used for sibling entries (only 0-62 are used by the current code). 256 |
136 | * is used for the retry entry. 257 is used for the reserved / zero entry. |
137 | * Negative internal entries are used to represent errnos. Node pointers |
138 | * are also tagged as internal entries in some situations. |
139 | * |
140 | * Context: Any context. |
141 | * Return: An XArray internal entry corresponding to this value. |
142 | */ |
143 | static inline void *xa_mk_internal(unsigned long v) |
144 | { |
145 | return (void *)((v << 2) | 2); |
146 | } |
147 | |
148 | /* |
149 | * xa_to_internal() - Extract the value from an internal entry. |
150 | * @entry: XArray entry. |
151 | * |
152 | * Context: Any context. |
153 | * Return: The value which was stored in the internal entry. |
154 | */ |
155 | static inline unsigned long xa_to_internal(const void *entry) |
156 | { |
157 | return (unsigned long)entry >> 2; |
158 | } |
159 | |
160 | /* |
161 | * xa_is_internal() - Is the entry an internal entry? |
162 | * @entry: XArray entry. |
163 | * |
164 | * Context: Any context. |
165 | * Return: %true if the entry is an internal entry. |
166 | */ |
167 | static inline bool xa_is_internal(const void *entry) |
168 | { |
169 | return ((unsigned long)entry & 3) == 2; |
170 | } |
171 | |
172 | #define XA_ZERO_ENTRY xa_mk_internal(257) |
173 | |
174 | /** |
175 | * xa_is_zero() - Is the entry a zero entry? |
176 | * @entry: Entry retrieved from the XArray |
177 | * |
178 | * The normal API will return NULL as the contents of a slot containing |
179 | * a zero entry. You can only see zero entries by using the advanced API. |
180 | * |
181 | * Return: %true if the entry is a zero entry. |
182 | */ |
183 | static inline bool xa_is_zero(const void *entry) |
184 | { |
185 | return unlikely(entry == XA_ZERO_ENTRY); |
186 | } |
187 | |
188 | /** |
189 | * xa_is_err() - Report whether an XArray operation returned an error |
190 | * @entry: Result from calling an XArray function |
191 | * |
192 | * If an XArray operation cannot complete an operation, it will return |
193 | * a special value indicating an error. This function tells you |
194 | * whether an error occurred; xa_err() tells you which error occurred. |
195 | * |
196 | * Context: Any context. |
197 | * Return: %true if the entry indicates an error. |
198 | */ |
199 | static inline bool xa_is_err(const void *entry) |
200 | { |
201 | return unlikely(xa_is_internal(entry) && |
202 | entry >= xa_mk_internal(-MAX_ERRNO)); |
203 | } |
204 | |
205 | /** |
206 | * xa_err() - Turn an XArray result into an errno. |
207 | * @entry: Result from calling an XArray function. |
208 | * |
209 | * If an XArray operation cannot complete an operation, it will return |
210 | * a special pointer value which encodes an errno. This function extracts |
211 | * the errno from the pointer value, or returns 0 if the pointer does not |
212 | * represent an errno. |
213 | * |
214 | * Context: Any context. |
215 | * Return: A negative errno or 0. |
216 | */ |
217 | static inline int xa_err(void *entry) |
218 | { |
219 | /* xa_to_internal() would not do sign extension. */ |
220 | if (xa_is_err(entry)) |
221 | return (long)entry >> 2; |
222 | return 0; |
223 | } |
224 | |
225 | /** |
226 | * struct xa_limit - Represents a range of IDs. |
227 | * @min: The lowest ID to allocate (inclusive). |
228 | * @max: The maximum ID to allocate (inclusive). |
229 | * |
230 | * This structure is used either directly or via the XA_LIMIT() macro |
231 | * to communicate the range of IDs that are valid for allocation. |
232 | * Two common ranges are predefined for you: |
233 | * * xa_limit_32b - [0 - UINT_MAX] |
234 | * * xa_limit_31b - [0 - INT_MAX] |
235 | */ |
236 | struct xa_limit { |
237 | u32 max; |
238 | u32 min; |
239 | }; |
240 | |
241 | #define XA_LIMIT(_min, _max) (struct xa_limit) { .min = _min, .max = _max } |
242 | |
243 | #define xa_limit_32b XA_LIMIT(0, UINT_MAX) |
244 | #define xa_limit_31b XA_LIMIT(0, INT_MAX) |
245 | |
246 | typedef unsigned __bitwise xa_mark_t; |
247 | #define XA_MARK_0 ((__force xa_mark_t)0U) |
248 | #define XA_MARK_1 ((__force xa_mark_t)1U) |
249 | #define XA_MARK_2 ((__force xa_mark_t)2U) |
250 | #define XA_PRESENT ((__force xa_mark_t)8U) |
251 | #define XA_MARK_MAX XA_MARK_2 |
252 | #define XA_FREE_MARK XA_MARK_0 |
253 | |
254 | enum xa_lock_type { |
255 | XA_LOCK_IRQ = 1, |
256 | XA_LOCK_BH = 2, |
257 | }; |
258 | |
259 | /* |
260 | * Values for xa_flags. The radix tree stores its GFP flags in the xa_flags, |
261 | * and we remain compatible with that. |
262 | */ |
263 | #define XA_FLAGS_LOCK_IRQ ((__force gfp_t)XA_LOCK_IRQ) |
264 | #define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH) |
265 | #define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U) |
266 | #define XA_FLAGS_ZERO_BUSY ((__force gfp_t)8U) |
267 | #define XA_FLAGS_ALLOC_WRAPPED ((__force gfp_t)16U) |
268 | #define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \ |
269 | (__force unsigned)(mark))) |
270 | |
271 | /* ALLOC is for a normal 0-based alloc. ALLOC1 is for an 1-based alloc */ |
272 | #define XA_FLAGS_ALLOC (XA_FLAGS_TRACK_FREE | XA_FLAGS_MARK(XA_FREE_MARK)) |
273 | #define XA_FLAGS_ALLOC1 (XA_FLAGS_TRACK_FREE | XA_FLAGS_ZERO_BUSY) |
274 | |
275 | /** |
276 | * struct xarray - The anchor of the XArray. |
277 | * @xa_lock: Lock that protects the contents of the XArray. |
278 | * |
279 | * To use the xarray, define it statically or embed it in your data structure. |
280 | * It is a very small data structure, so it does not usually make sense to |
281 | * allocate it separately and keep a pointer to it in your data structure. |
282 | * |
283 | * You may use the xa_lock to protect your own data structures as well. |
284 | */ |
285 | /* |
286 | * If all of the entries in the array are NULL, @xa_head is a NULL pointer. |
287 | * If the only non-NULL entry in the array is at index 0, @xa_head is that |
288 | * entry. If any other entry in the array is non-NULL, @xa_head points |
289 | * to an @xa_node. |
290 | */ |
291 | struct xarray { |
292 | spinlock_t xa_lock; |
293 | /* private: The rest of the data structure is not to be used directly. */ |
294 | gfp_t xa_flags; |
295 | void __rcu * xa_head; |
296 | }; |
297 | |
298 | #define XARRAY_INIT(name, flags) { \ |
299 | .xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock), \ |
300 | .xa_flags = flags, \ |
301 | .xa_head = NULL, \ |
302 | } |
303 | |
304 | /** |
305 | * DEFINE_XARRAY_FLAGS() - Define an XArray with custom flags. |
306 | * @name: A string that names your XArray. |
307 | * @flags: XA_FLAG values. |
308 | * |
309 | * This is intended for file scope definitions of XArrays. It declares |
310 | * and initialises an empty XArray with the chosen name and flags. It is |
311 | * equivalent to calling xa_init_flags() on the array, but it does the |
312 | * initialisation at compiletime instead of runtime. |
313 | */ |
314 | #define DEFINE_XARRAY_FLAGS(name, flags) \ |
315 | struct xarray name = XARRAY_INIT(name, flags) |
316 | |
317 | /** |
318 | * DEFINE_XARRAY() - Define an XArray. |
319 | * @name: A string that names your XArray. |
320 | * |
321 | * This is intended for file scope definitions of XArrays. It declares |
322 | * and initialises an empty XArray with the chosen name. It is equivalent |
323 | * to calling xa_init() on the array, but it does the initialisation at |
324 | * compiletime instead of runtime. |
325 | */ |
326 | #define DEFINE_XARRAY(name) DEFINE_XARRAY_FLAGS(name, 0) |
327 | |
328 | /** |
329 | * DEFINE_XARRAY_ALLOC() - Define an XArray which allocates IDs starting at 0. |
330 | * @name: A string that names your XArray. |
331 | * |
332 | * This is intended for file scope definitions of allocating XArrays. |
333 | * See also DEFINE_XARRAY(). |
334 | */ |
335 | #define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC) |
336 | |
337 | /** |
338 | * DEFINE_XARRAY_ALLOC1() - Define an XArray which allocates IDs starting at 1. |
339 | * @name: A string that names your XArray. |
340 | * |
341 | * This is intended for file scope definitions of allocating XArrays. |
342 | * See also DEFINE_XARRAY(). |
343 | */ |
344 | #define DEFINE_XARRAY_ALLOC1(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC1) |
345 | |
346 | void *xa_load(struct xarray *, unsigned long index); |
347 | void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); |
348 | void *xa_erase(struct xarray *, unsigned long index); |
349 | void *xa_store_range(struct xarray *, unsigned long first, unsigned long last, |
350 | void *entry, gfp_t); |
351 | bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t); |
352 | void xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); |
353 | void xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); |
354 | void *xa_find(struct xarray *xa, unsigned long *index, |
355 | unsigned long max, xa_mark_t) __attribute__((nonnull(2))); |
356 | void *xa_find_after(struct xarray *xa, unsigned long *index, |
357 | unsigned long max, xa_mark_t) __attribute__((nonnull(2))); |
358 | unsigned int (struct xarray *, void **dst, unsigned long start, |
359 | unsigned long max, unsigned int n, xa_mark_t); |
360 | void xa_destroy(struct xarray *); |
361 | |
362 | /** |
363 | * xa_init_flags() - Initialise an empty XArray with flags. |
364 | * @xa: XArray. |
365 | * @flags: XA_FLAG values. |
366 | * |
367 | * If you need to initialise an XArray with special flags (eg you need |
368 | * to take the lock from interrupt context), use this function instead |
369 | * of xa_init(). |
370 | * |
371 | * Context: Any context. |
372 | */ |
373 | static inline void xa_init_flags(struct xarray *xa, gfp_t flags) |
374 | { |
375 | spin_lock_init(&xa->xa_lock); |
376 | xa->xa_flags = flags; |
377 | xa->xa_head = NULL; |
378 | } |
379 | |
380 | /** |
381 | * xa_init() - Initialise an empty XArray. |
382 | * @xa: XArray. |
383 | * |
384 | * An empty XArray is full of NULL entries. |
385 | * |
386 | * Context: Any context. |
387 | */ |
388 | static inline void xa_init(struct xarray *xa) |
389 | { |
390 | xa_init_flags(xa, 0); |
391 | } |
392 | |
393 | /** |
394 | * xa_empty() - Determine if an array has any present entries. |
395 | * @xa: XArray. |
396 | * |
397 | * Context: Any context. |
398 | * Return: %true if the array contains only NULL pointers. |
399 | */ |
400 | static inline bool xa_empty(const struct xarray *xa) |
401 | { |
402 | return xa->xa_head == NULL; |
403 | } |
404 | |
405 | /** |
406 | * xa_marked() - Inquire whether any entry in this array has a mark set |
407 | * @xa: Array |
408 | * @mark: Mark value |
409 | * |
410 | * Context: Any context. |
411 | * Return: %true if any entry has this mark set. |
412 | */ |
413 | static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark) |
414 | { |
415 | return xa->xa_flags & XA_FLAGS_MARK(mark); |
416 | } |
417 | |
418 | /** |
419 | * xa_for_each_start() - Iterate over a portion of an XArray. |
420 | * @xa: XArray. |
421 | * @index: Index of @entry. |
422 | * @entry: Entry retrieved from array. |
423 | * @start: First index to retrieve from array. |
424 | * |
425 | * During the iteration, @entry will have the value of the entry stored |
426 | * in @xa at @index. You may modify @index during the iteration if you |
427 | * want to skip or reprocess indices. It is safe to modify the array |
428 | * during the iteration. At the end of the iteration, @entry will be set |
429 | * to NULL and @index will have a value less than or equal to max. |
430 | * |
431 | * xa_for_each_start() is O(n.log(n)) while xas_for_each() is O(n). You have |
432 | * to handle your own locking with xas_for_each(), and if you have to unlock |
433 | * after each iteration, it will also end up being O(n.log(n)). |
434 | * xa_for_each_start() will spin if it hits a retry entry; if you intend to |
435 | * see retry entries, you should use the xas_for_each() iterator instead. |
436 | * The xas_for_each() iterator will expand into more inline code than |
437 | * xa_for_each_start(). |
438 | * |
439 | * Context: Any context. Takes and releases the RCU lock. |
440 | */ |
441 | #define xa_for_each_start(xa, index, entry, start) \ |
442 | for (index = start, \ |
443 | entry = xa_find(xa, &index, ULONG_MAX, XA_PRESENT); \ |
444 | entry; \ |
445 | entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT)) |
446 | |
447 | /** |
448 | * xa_for_each() - Iterate over present entries in an XArray. |
449 | * @xa: XArray. |
450 | * @index: Index of @entry. |
451 | * @entry: Entry retrieved from array. |
452 | * |
453 | * During the iteration, @entry will have the value of the entry stored |
454 | * in @xa at @index. You may modify @index during the iteration if you want |
455 | * to skip or reprocess indices. It is safe to modify the array during the |
456 | * iteration. At the end of the iteration, @entry will be set to NULL and |
457 | * @index will have a value less than or equal to max. |
458 | * |
459 | * xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have |
460 | * to handle your own locking with xas_for_each(), and if you have to unlock |
461 | * after each iteration, it will also end up being O(n.log(n)). xa_for_each() |
462 | * will spin if it hits a retry entry; if you intend to see retry entries, |
463 | * you should use the xas_for_each() iterator instead. The xas_for_each() |
464 | * iterator will expand into more inline code than xa_for_each(). |
465 | * |
466 | * Context: Any context. Takes and releases the RCU lock. |
467 | */ |
468 | #define xa_for_each(xa, index, entry) \ |
469 | xa_for_each_start(xa, index, entry, 0) |
470 | |
471 | /** |
472 | * xa_for_each_marked() - Iterate over marked entries in an XArray. |
473 | * @xa: XArray. |
474 | * @index: Index of @entry. |
475 | * @entry: Entry retrieved from array. |
476 | * @filter: Selection criterion. |
477 | * |
478 | * During the iteration, @entry will have the value of the entry stored |
479 | * in @xa at @index. The iteration will skip all entries in the array |
480 | * which do not match @filter. You may modify @index during the iteration |
481 | * if you want to skip or reprocess indices. It is safe to modify the array |
482 | * during the iteration. At the end of the iteration, @entry will be set to |
483 | * NULL and @index will have a value less than or equal to max. |
484 | * |
485 | * xa_for_each_marked() is O(n.log(n)) while xas_for_each_marked() is O(n). |
486 | * You have to handle your own locking with xas_for_each(), and if you have |
487 | * to unlock after each iteration, it will also end up being O(n.log(n)). |
488 | * xa_for_each_marked() will spin if it hits a retry entry; if you intend to |
489 | * see retry entries, you should use the xas_for_each_marked() iterator |
490 | * instead. The xas_for_each_marked() iterator will expand into more inline |
491 | * code than xa_for_each_marked(). |
492 | * |
493 | * Context: Any context. Takes and releases the RCU lock. |
494 | */ |
495 | #define xa_for_each_marked(xa, index, entry, filter) \ |
496 | for (index = 0, entry = xa_find(xa, &index, ULONG_MAX, filter); \ |
497 | entry; entry = xa_find_after(xa, &index, ULONG_MAX, filter)) |
498 | |
499 | #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock) |
500 | #define xa_lock(xa) spin_lock(&(xa)->xa_lock) |
501 | #define xa_unlock(xa) spin_unlock(&(xa)->xa_lock) |
502 | #define xa_lock_bh(xa) spin_lock_bh(&(xa)->xa_lock) |
503 | #define xa_unlock_bh(xa) spin_unlock_bh(&(xa)->xa_lock) |
504 | #define xa_lock_irq(xa) spin_lock_irq(&(xa)->xa_lock) |
505 | #define xa_unlock_irq(xa) spin_unlock_irq(&(xa)->xa_lock) |
506 | #define xa_lock_irqsave(xa, flags) \ |
507 | spin_lock_irqsave(&(xa)->xa_lock, flags) |
508 | #define xa_unlock_irqrestore(xa, flags) \ |
509 | spin_unlock_irqrestore(&(xa)->xa_lock, flags) |
510 | |
511 | /* |
512 | * Versions of the normal API which require the caller to hold the |
513 | * xa_lock. If the GFP flags allow it, they will drop the lock to |
514 | * allocate memory, then reacquire it afterwards. These functions |
515 | * may also re-enable interrupts if the XArray flags indicate the |
516 | * locking should be interrupt safe. |
517 | */ |
518 | void *__xa_erase(struct xarray *, unsigned long index); |
519 | void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); |
520 | void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, |
521 | void *entry, gfp_t); |
522 | int __must_check __xa_insert(struct xarray *, unsigned long index, |
523 | void *entry, gfp_t); |
524 | int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry, |
525 | struct xa_limit, gfp_t); |
526 | int __must_check __xa_alloc_cyclic(struct xarray *, u32 *id, void *entry, |
527 | struct xa_limit, u32 *next, gfp_t); |
528 | void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); |
529 | void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); |
530 | |
531 | /** |
532 | * xa_store_bh() - Store this entry in the XArray. |
533 | * @xa: XArray. |
534 | * @index: Index into array. |
535 | * @entry: New entry. |
536 | * @gfp: Memory allocation flags. |
537 | * |
538 | * This function is like calling xa_store() except it disables softirqs |
539 | * while holding the array lock. |
540 | * |
541 | * Context: Any context. Takes and releases the xa_lock while |
542 | * disabling softirqs. |
543 | * Return: The entry which used to be at this index. |
544 | */ |
545 | static inline void *xa_store_bh(struct xarray *xa, unsigned long index, |
546 | void *entry, gfp_t gfp) |
547 | { |
548 | void *curr; |
549 | |
550 | xa_lock_bh(xa); |
551 | curr = __xa_store(xa, index, entry, gfp); |
552 | xa_unlock_bh(xa); |
553 | |
554 | return curr; |
555 | } |
556 | |
557 | /** |
558 | * xa_store_irq() - Store this entry in the XArray. |
559 | * @xa: XArray. |
560 | * @index: Index into array. |
561 | * @entry: New entry. |
562 | * @gfp: Memory allocation flags. |
563 | * |
564 | * This function is like calling xa_store() except it disables interrupts |
565 | * while holding the array lock. |
566 | * |
567 | * Context: Process context. Takes and releases the xa_lock while |
568 | * disabling interrupts. |
569 | * Return: The entry which used to be at this index. |
570 | */ |
571 | static inline void *xa_store_irq(struct xarray *xa, unsigned long index, |
572 | void *entry, gfp_t gfp) |
573 | { |
574 | void *curr; |
575 | |
576 | xa_lock_irq(xa); |
577 | curr = __xa_store(xa, index, entry, gfp); |
578 | xa_unlock_irq(xa); |
579 | |
580 | return curr; |
581 | } |
582 | |
583 | /** |
584 | * xa_erase_bh() - Erase this entry from the XArray. |
585 | * @xa: XArray. |
586 | * @index: Index of entry. |
587 | * |
588 | * After this function returns, loading from @index will return %NULL. |
589 | * If the index is part of a multi-index entry, all indices will be erased |
590 | * and none of the entries will be part of a multi-index entry. |
591 | * |
592 | * Context: Any context. Takes and releases the xa_lock while |
593 | * disabling softirqs. |
594 | * Return: The entry which used to be at this index. |
595 | */ |
596 | static inline void *xa_erase_bh(struct xarray *xa, unsigned long index) |
597 | { |
598 | void *entry; |
599 | |
600 | xa_lock_bh(xa); |
601 | entry = __xa_erase(xa, index); |
602 | xa_unlock_bh(xa); |
603 | |
604 | return entry; |
605 | } |
606 | |
607 | /** |
608 | * xa_erase_irq() - Erase this entry from the XArray. |
609 | * @xa: XArray. |
610 | * @index: Index of entry. |
611 | * |
612 | * After this function returns, loading from @index will return %NULL. |
613 | * If the index is part of a multi-index entry, all indices will be erased |
614 | * and none of the entries will be part of a multi-index entry. |
615 | * |
616 | * Context: Process context. Takes and releases the xa_lock while |
617 | * disabling interrupts. |
618 | * Return: The entry which used to be at this index. |
619 | */ |
620 | static inline void *xa_erase_irq(struct xarray *xa, unsigned long index) |
621 | { |
622 | void *entry; |
623 | |
624 | xa_lock_irq(xa); |
625 | entry = __xa_erase(xa, index); |
626 | xa_unlock_irq(xa); |
627 | |
628 | return entry; |
629 | } |
630 | |
631 | /** |
632 | * xa_cmpxchg() - Conditionally replace an entry in the XArray. |
633 | * @xa: XArray. |
634 | * @index: Index into array. |
635 | * @old: Old value to test against. |
636 | * @entry: New value to place in array. |
637 | * @gfp: Memory allocation flags. |
638 | * |
639 | * If the entry at @index is the same as @old, replace it with @entry. |
640 | * If the return value is equal to @old, then the exchange was successful. |
641 | * |
642 | * Context: Any context. Takes and releases the xa_lock. May sleep |
643 | * if the @gfp flags permit. |
644 | * Return: The old value at this index or xa_err() if an error happened. |
645 | */ |
646 | static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index, |
647 | void *old, void *entry, gfp_t gfp) |
648 | { |
649 | void *curr; |
650 | |
651 | xa_lock(xa); |
652 | curr = __xa_cmpxchg(xa, index, old, entry, gfp); |
653 | xa_unlock(xa); |
654 | |
655 | return curr; |
656 | } |
657 | |
658 | /** |
659 | * xa_cmpxchg_bh() - Conditionally replace an entry in the XArray. |
660 | * @xa: XArray. |
661 | * @index: Index into array. |
662 | * @old: Old value to test against. |
663 | * @entry: New value to place in array. |
664 | * @gfp: Memory allocation flags. |
665 | * |
666 | * This function is like calling xa_cmpxchg() except it disables softirqs |
667 | * while holding the array lock. |
668 | * |
669 | * Context: Any context. Takes and releases the xa_lock while |
670 | * disabling softirqs. May sleep if the @gfp flags permit. |
671 | * Return: The old value at this index or xa_err() if an error happened. |
672 | */ |
673 | static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index, |
674 | void *old, void *entry, gfp_t gfp) |
675 | { |
676 | void *curr; |
677 | |
678 | xa_lock_bh(xa); |
679 | curr = __xa_cmpxchg(xa, index, old, entry, gfp); |
680 | xa_unlock_bh(xa); |
681 | |
682 | return curr; |
683 | } |
684 | |
685 | /** |
686 | * xa_cmpxchg_irq() - Conditionally replace an entry in the XArray. |
687 | * @xa: XArray. |
688 | * @index: Index into array. |
689 | * @old: Old value to test against. |
690 | * @entry: New value to place in array. |
691 | * @gfp: Memory allocation flags. |
692 | * |
693 | * This function is like calling xa_cmpxchg() except it disables interrupts |
694 | * while holding the array lock. |
695 | * |
696 | * Context: Process context. Takes and releases the xa_lock while |
697 | * disabling interrupts. May sleep if the @gfp flags permit. |
698 | * Return: The old value at this index or xa_err() if an error happened. |
699 | */ |
700 | static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index, |
701 | void *old, void *entry, gfp_t gfp) |
702 | { |
703 | void *curr; |
704 | |
705 | xa_lock_irq(xa); |
706 | curr = __xa_cmpxchg(xa, index, old, entry, gfp); |
707 | xa_unlock_irq(xa); |
708 | |
709 | return curr; |
710 | } |
711 | |
712 | /** |
713 | * xa_insert() - Store this entry in the XArray unless another entry is |
714 | * already present. |
715 | * @xa: XArray. |
716 | * @index: Index into array. |
717 | * @entry: New entry. |
718 | * @gfp: Memory allocation flags. |
719 | * |
720 | * Inserting a NULL entry will store a reserved entry (like xa_reserve()) |
721 | * if no entry is present. Inserting will fail if a reserved entry is |
722 | * present, even though loading from this index will return NULL. |
723 | * |
724 | * Context: Any context. Takes and releases the xa_lock. May sleep if |
725 | * the @gfp flags permit. |
726 | * Return: 0 if the store succeeded. -EBUSY if another entry was present. |
727 | * -ENOMEM if memory could not be allocated. |
728 | */ |
729 | static inline int __must_check xa_insert(struct xarray *xa, |
730 | unsigned long index, void *entry, gfp_t gfp) |
731 | { |
732 | int err; |
733 | |
734 | xa_lock(xa); |
735 | err = __xa_insert(xa, index, entry, gfp); |
736 | xa_unlock(xa); |
737 | |
738 | return err; |
739 | } |
740 | |
741 | /** |
742 | * xa_insert_bh() - Store this entry in the XArray unless another entry is |
743 | * already present. |
744 | * @xa: XArray. |
745 | * @index: Index into array. |
746 | * @entry: New entry. |
747 | * @gfp: Memory allocation flags. |
748 | * |
749 | * Inserting a NULL entry will store a reserved entry (like xa_reserve()) |
750 | * if no entry is present. Inserting will fail if a reserved entry is |
751 | * present, even though loading from this index will return NULL. |
752 | * |
753 | * Context: Any context. Takes and releases the xa_lock while |
754 | * disabling softirqs. May sleep if the @gfp flags permit. |
755 | * Return: 0 if the store succeeded. -EBUSY if another entry was present. |
756 | * -ENOMEM if memory could not be allocated. |
757 | */ |
758 | static inline int __must_check xa_insert_bh(struct xarray *xa, |
759 | unsigned long index, void *entry, gfp_t gfp) |
760 | { |
761 | int err; |
762 | |
763 | xa_lock_bh(xa); |
764 | err = __xa_insert(xa, index, entry, gfp); |
765 | xa_unlock_bh(xa); |
766 | |
767 | return err; |
768 | } |
769 | |
770 | /** |
771 | * xa_insert_irq() - Store this entry in the XArray unless another entry is |
772 | * already present. |
773 | * @xa: XArray. |
774 | * @index: Index into array. |
775 | * @entry: New entry. |
776 | * @gfp: Memory allocation flags. |
777 | * |
778 | * Inserting a NULL entry will store a reserved entry (like xa_reserve()) |
779 | * if no entry is present. Inserting will fail if a reserved entry is |
780 | * present, even though loading from this index will return NULL. |
781 | * |
782 | * Context: Process context. Takes and releases the xa_lock while |
783 | * disabling interrupts. May sleep if the @gfp flags permit. |
784 | * Return: 0 if the store succeeded. -EBUSY if another entry was present. |
785 | * -ENOMEM if memory could not be allocated. |
786 | */ |
787 | static inline int __must_check xa_insert_irq(struct xarray *xa, |
788 | unsigned long index, void *entry, gfp_t gfp) |
789 | { |
790 | int err; |
791 | |
792 | xa_lock_irq(xa); |
793 | err = __xa_insert(xa, index, entry, gfp); |
794 | xa_unlock_irq(xa); |
795 | |
796 | return err; |
797 | } |
798 | |
799 | /** |
800 | * xa_alloc() - Find somewhere to store this entry in the XArray. |
801 | * @xa: XArray. |
802 | * @id: Pointer to ID. |
803 | * @entry: New entry. |
804 | * @limit: Range of ID to allocate. |
805 | * @gfp: Memory allocation flags. |
806 | * |
807 | * Finds an empty entry in @xa between @limit.min and @limit.max, |
808 | * stores the index into the @id pointer, then stores the entry at |
809 | * that index. A concurrent lookup will not see an uninitialised @id. |
810 | * |
811 | * Context: Any context. Takes and releases the xa_lock. May sleep if |
812 | * the @gfp flags permit. |
813 | * Return: 0 on success, -ENOMEM if memory could not be allocated or |
814 | * -EBUSY if there are no free entries in @limit. |
815 | */ |
816 | static inline __must_check int xa_alloc(struct xarray *xa, u32 *id, |
817 | void *entry, struct xa_limit limit, gfp_t gfp) |
818 | { |
819 | int err; |
820 | |
821 | xa_lock(xa); |
822 | err = __xa_alloc(xa, id, entry, limit, gfp); |
823 | xa_unlock(xa); |
824 | |
825 | return err; |
826 | } |
827 | |
828 | /** |
829 | * xa_alloc_bh() - Find somewhere to store this entry in the XArray. |
830 | * @xa: XArray. |
831 | * @id: Pointer to ID. |
832 | * @entry: New entry. |
833 | * @limit: Range of ID to allocate. |
834 | * @gfp: Memory allocation flags. |
835 | * |
836 | * Finds an empty entry in @xa between @limit.min and @limit.max, |
837 | * stores the index into the @id pointer, then stores the entry at |
838 | * that index. A concurrent lookup will not see an uninitialised @id. |
839 | * |
840 | * Context: Any context. Takes and releases the xa_lock while |
841 | * disabling softirqs. May sleep if the @gfp flags permit. |
842 | * Return: 0 on success, -ENOMEM if memory could not be allocated or |
843 | * -EBUSY if there are no free entries in @limit. |
844 | */ |
845 | static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id, |
846 | void *entry, struct xa_limit limit, gfp_t gfp) |
847 | { |
848 | int err; |
849 | |
850 | xa_lock_bh(xa); |
851 | err = __xa_alloc(xa, id, entry, limit, gfp); |
852 | xa_unlock_bh(xa); |
853 | |
854 | return err; |
855 | } |
856 | |
857 | /** |
858 | * xa_alloc_irq() - Find somewhere to store this entry in the XArray. |
859 | * @xa: XArray. |
860 | * @id: Pointer to ID. |
861 | * @entry: New entry. |
862 | * @limit: Range of ID to allocate. |
863 | * @gfp: Memory allocation flags. |
864 | * |
865 | * Finds an empty entry in @xa between @limit.min and @limit.max, |
866 | * stores the index into the @id pointer, then stores the entry at |
867 | * that index. A concurrent lookup will not see an uninitialised @id. |
868 | * |
869 | * Context: Process context. Takes and releases the xa_lock while |
870 | * disabling interrupts. May sleep if the @gfp flags permit. |
871 | * Return: 0 on success, -ENOMEM if memory could not be allocated or |
872 | * -EBUSY if there are no free entries in @limit. |
873 | */ |
874 | static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id, |
875 | void *entry, struct xa_limit limit, gfp_t gfp) |
876 | { |
877 | int err; |
878 | |
879 | xa_lock_irq(xa); |
880 | err = __xa_alloc(xa, id, entry, limit, gfp); |
881 | xa_unlock_irq(xa); |
882 | |
883 | return err; |
884 | } |
885 | |
886 | /** |
887 | * xa_alloc_cyclic() - Find somewhere to store this entry in the XArray. |
888 | * @xa: XArray. |
889 | * @id: Pointer to ID. |
890 | * @entry: New entry. |
891 | * @limit: Range of allocated ID. |
892 | * @next: Pointer to next ID to allocate. |
893 | * @gfp: Memory allocation flags. |
894 | * |
895 | * Finds an empty entry in @xa between @limit.min and @limit.max, |
896 | * stores the index into the @id pointer, then stores the entry at |
897 | * that index. A concurrent lookup will not see an uninitialised @id. |
898 | * The search for an empty entry will start at @next and will wrap |
899 | * around if necessary. |
900 | * |
901 | * Context: Any context. Takes and releases the xa_lock. May sleep if |
902 | * the @gfp flags permit. |
903 | * Return: 0 if the allocation succeeded without wrapping. 1 if the |
904 | * allocation succeeded after wrapping, -ENOMEM if memory could not be |
905 | * allocated or -EBUSY if there are no free entries in @limit. |
906 | */ |
907 | static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, |
908 | struct xa_limit limit, u32 *next, gfp_t gfp) |
909 | { |
910 | int err; |
911 | |
912 | xa_lock(xa); |
913 | err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); |
914 | xa_unlock(xa); |
915 | |
916 | return err; |
917 | } |
918 | |
919 | /** |
920 | * xa_alloc_cyclic_bh() - Find somewhere to store this entry in the XArray. |
921 | * @xa: XArray. |
922 | * @id: Pointer to ID. |
923 | * @entry: New entry. |
924 | * @limit: Range of allocated ID. |
925 | * @next: Pointer to next ID to allocate. |
926 | * @gfp: Memory allocation flags. |
927 | * |
928 | * Finds an empty entry in @xa between @limit.min and @limit.max, |
929 | * stores the index into the @id pointer, then stores the entry at |
930 | * that index. A concurrent lookup will not see an uninitialised @id. |
931 | * The search for an empty entry will start at @next and will wrap |
932 | * around if necessary. |
933 | * |
934 | * Context: Any context. Takes and releases the xa_lock while |
935 | * disabling softirqs. May sleep if the @gfp flags permit. |
936 | * Return: 0 if the allocation succeeded without wrapping. 1 if the |
937 | * allocation succeeded after wrapping, -ENOMEM if memory could not be |
938 | * allocated or -EBUSY if there are no free entries in @limit. |
939 | */ |
940 | static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry, |
941 | struct xa_limit limit, u32 *next, gfp_t gfp) |
942 | { |
943 | int err; |
944 | |
945 | xa_lock_bh(xa); |
946 | err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); |
947 | xa_unlock_bh(xa); |
948 | |
949 | return err; |
950 | } |
951 | |
952 | /** |
953 | * xa_alloc_cyclic_irq() - Find somewhere to store this entry in the XArray. |
954 | * @xa: XArray. |
955 | * @id: Pointer to ID. |
956 | * @entry: New entry. |
957 | * @limit: Range of allocated ID. |
958 | * @next: Pointer to next ID to allocate. |
959 | * @gfp: Memory allocation flags. |
960 | * |
961 | * Finds an empty entry in @xa between @limit.min and @limit.max, |
962 | * stores the index into the @id pointer, then stores the entry at |
963 | * that index. A concurrent lookup will not see an uninitialised @id. |
964 | * The search for an empty entry will start at @next and will wrap |
965 | * around if necessary. |
966 | * |
967 | * Context: Process context. Takes and releases the xa_lock while |
968 | * disabling interrupts. May sleep if the @gfp flags permit. |
969 | * Return: 0 if the allocation succeeded without wrapping. 1 if the |
970 | * allocation succeeded after wrapping, -ENOMEM if memory could not be |
971 | * allocated or -EBUSY if there are no free entries in @limit. |
972 | */ |
973 | static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry, |
974 | struct xa_limit limit, u32 *next, gfp_t gfp) |
975 | { |
976 | int err; |
977 | |
978 | xa_lock_irq(xa); |
979 | err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); |
980 | xa_unlock_irq(xa); |
981 | |
982 | return err; |
983 | } |
984 | |
985 | /** |
986 | * xa_reserve() - Reserve this index in the XArray. |
987 | * @xa: XArray. |
988 | * @index: Index into array. |
989 | * @gfp: Memory allocation flags. |
990 | * |
991 | * Ensures there is somewhere to store an entry at @index in the array. |
992 | * If there is already something stored at @index, this function does |
993 | * nothing. If there was nothing there, the entry is marked as reserved. |
994 | * Loading from a reserved entry returns a %NULL pointer. |
995 | * |
996 | * If you do not use the entry that you have reserved, call xa_release() |
997 | * or xa_erase() to free any unnecessary memory. |
998 | * |
999 | * Context: Any context. Takes and releases the xa_lock. |
1000 | * May sleep if the @gfp flags permit. |
1001 | * Return: 0 if the reservation succeeded or -ENOMEM if it failed. |
1002 | */ |
1003 | static inline __must_check |
1004 | int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) |
1005 | { |
1006 | return xa_err(xa_cmpxchg(xa, index, NULL, XA_ZERO_ENTRY, gfp)); |
1007 | } |
1008 | |
1009 | /** |
1010 | * xa_reserve_bh() - Reserve this index in the XArray. |
1011 | * @xa: XArray. |
1012 | * @index: Index into array. |
1013 | * @gfp: Memory allocation flags. |
1014 | * |
1015 | * A softirq-disabling version of xa_reserve(). |
1016 | * |
1017 | * Context: Any context. Takes and releases the xa_lock while |
1018 | * disabling softirqs. |
1019 | * Return: 0 if the reservation succeeded or -ENOMEM if it failed. |
1020 | */ |
1021 | static inline __must_check |
1022 | int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp) |
1023 | { |
1024 | return xa_err(xa_cmpxchg_bh(xa, index, NULL, XA_ZERO_ENTRY, gfp)); |
1025 | } |
1026 | |
1027 | /** |
1028 | * xa_reserve_irq() - Reserve this index in the XArray. |
1029 | * @xa: XArray. |
1030 | * @index: Index into array. |
1031 | * @gfp: Memory allocation flags. |
1032 | * |
1033 | * An interrupt-disabling version of xa_reserve(). |
1034 | * |
1035 | * Context: Process context. Takes and releases the xa_lock while |
1036 | * disabling interrupts. |
1037 | * Return: 0 if the reservation succeeded or -ENOMEM if it failed. |
1038 | */ |
1039 | static inline __must_check |
1040 | int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp) |
1041 | { |
1042 | return xa_err(xa_cmpxchg_irq(xa, index, NULL, XA_ZERO_ENTRY, gfp)); |
1043 | } |
1044 | |
1045 | /** |
1046 | * xa_release() - Release a reserved entry. |
1047 | * @xa: XArray. |
1048 | * @index: Index of entry. |
1049 | * |
1050 | * After calling xa_reserve(), you can call this function to release the |
1051 | * reservation. If the entry at @index has been stored to, this function |
1052 | * will do nothing. |
1053 | */ |
1054 | static inline void xa_release(struct xarray *xa, unsigned long index) |
1055 | { |
1056 | xa_cmpxchg(xa, index, XA_ZERO_ENTRY, NULL, 0); |
1057 | } |
1058 | |
1059 | /* Everything below here is the Advanced API. Proceed with caution. */ |
1060 | |
1061 | /* |
1062 | * The xarray is constructed out of a set of 'chunks' of pointers. Choosing |
1063 | * the best chunk size requires some tradeoffs. A power of two recommends |
1064 | * itself so that we can walk the tree based purely on shifts and masks. |
1065 | * Generally, the larger the better; as the number of slots per level of the |
1066 | * tree increases, the less tall the tree needs to be. But that needs to be |
1067 | * balanced against the memory consumption of each node. On a 64-bit system, |
1068 | * xa_node is currently 576 bytes, and we get 7 of them per 4kB page. If we |
1069 | * doubled the number of slots per node, we'd get only 3 nodes per 4kB page. |
1070 | */ |
1071 | #ifndef XA_CHUNK_SHIFT |
1072 | #define XA_CHUNK_SHIFT (CONFIG_BASE_SMALL ? 4 : 6) |
1073 | #endif |
1074 | #define XA_CHUNK_SIZE (1UL << XA_CHUNK_SHIFT) |
1075 | #define XA_CHUNK_MASK (XA_CHUNK_SIZE - 1) |
1076 | #define XA_MAX_MARKS 3 |
1077 | #define XA_MARK_LONGS DIV_ROUND_UP(XA_CHUNK_SIZE, BITS_PER_LONG) |
1078 | |
1079 | /* |
1080 | * @count is the count of every non-NULL element in the ->slots array |
1081 | * whether that is a value entry, a retry entry, a user pointer, |
1082 | * a sibling entry or a pointer to the next level of the tree. |
1083 | * @nr_values is the count of every element in ->slots which is |
1084 | * either a value entry or a sibling of a value entry. |
1085 | */ |
1086 | struct xa_node { |
1087 | unsigned char shift; /* Bits remaining in each slot */ |
1088 | unsigned char offset; /* Slot offset in parent */ |
1089 | unsigned char count; /* Total entry count */ |
1090 | unsigned char nr_values; /* Value entry count */ |
1091 | struct xa_node __rcu *parent; /* NULL at top of tree */ |
1092 | struct xarray *array; /* The array we belong to */ |
1093 | union { |
1094 | struct list_head private_list; /* For tree user */ |
1095 | struct rcu_head rcu_head; /* Used when freeing node */ |
1096 | }; |
1097 | void __rcu *slots[XA_CHUNK_SIZE]; |
1098 | union { |
1099 | unsigned long tags[XA_MAX_MARKS][XA_MARK_LONGS]; |
1100 | unsigned long marks[XA_MAX_MARKS][XA_MARK_LONGS]; |
1101 | }; |
1102 | }; |
1103 | |
1104 | void xa_dump(const struct xarray *); |
1105 | void xa_dump_node(const struct xa_node *); |
1106 | |
1107 | #ifdef XA_DEBUG |
1108 | #define XA_BUG_ON(xa, x) do { \ |
1109 | if (x) { \ |
1110 | xa_dump(xa); \ |
1111 | BUG(); \ |
1112 | } \ |
1113 | } while (0) |
1114 | #define XA_NODE_BUG_ON(node, x) do { \ |
1115 | if (x) { \ |
1116 | if (node) xa_dump_node(node); \ |
1117 | BUG(); \ |
1118 | } \ |
1119 | } while (0) |
1120 | #else |
1121 | #define XA_BUG_ON(xa, x) do { } while (0) |
1122 | #define XA_NODE_BUG_ON(node, x) do { } while (0) |
1123 | #endif |
1124 | |
1125 | /* Private */ |
1126 | static inline void *xa_head(const struct xarray *xa) |
1127 | { |
1128 | return rcu_dereference_check(xa->xa_head, |
1129 | lockdep_is_held(&xa->xa_lock)); |
1130 | } |
1131 | |
1132 | /* Private */ |
1133 | static inline void *xa_head_locked(const struct xarray *xa) |
1134 | { |
1135 | return rcu_dereference_protected(xa->xa_head, |
1136 | lockdep_is_held(&xa->xa_lock)); |
1137 | } |
1138 | |
1139 | /* Private */ |
1140 | static inline void *xa_entry(const struct xarray *xa, |
1141 | const struct xa_node *node, unsigned int offset) |
1142 | { |
1143 | XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE); |
1144 | return rcu_dereference_check(node->slots[offset], |
1145 | lockdep_is_held(&xa->xa_lock)); |
1146 | } |
1147 | |
1148 | /* Private */ |
1149 | static inline void *xa_entry_locked(const struct xarray *xa, |
1150 | const struct xa_node *node, unsigned int offset) |
1151 | { |
1152 | XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE); |
1153 | return rcu_dereference_protected(node->slots[offset], |
1154 | lockdep_is_held(&xa->xa_lock)); |
1155 | } |
1156 | |
1157 | /* Private */ |
1158 | static inline struct xa_node *xa_parent(const struct xarray *xa, |
1159 | const struct xa_node *node) |
1160 | { |
1161 | return rcu_dereference_check(node->parent, |
1162 | lockdep_is_held(&xa->xa_lock)); |
1163 | } |
1164 | |
1165 | /* Private */ |
1166 | static inline struct xa_node *xa_parent_locked(const struct xarray *xa, |
1167 | const struct xa_node *node) |
1168 | { |
1169 | return rcu_dereference_protected(node->parent, |
1170 | lockdep_is_held(&xa->xa_lock)); |
1171 | } |
1172 | |
1173 | /* Private */ |
1174 | static inline void *xa_mk_node(const struct xa_node *node) |
1175 | { |
1176 | return (void *)((unsigned long)node | 2); |
1177 | } |
1178 | |
1179 | /* Private */ |
1180 | static inline struct xa_node *xa_to_node(const void *entry) |
1181 | { |
1182 | return (struct xa_node *)((unsigned long)entry - 2); |
1183 | } |
1184 | |
1185 | /* Private */ |
1186 | static inline bool xa_is_node(const void *entry) |
1187 | { |
1188 | return xa_is_internal(entry) && (unsigned long)entry > 4096; |
1189 | } |
1190 | |
1191 | /* Private */ |
1192 | static inline void *xa_mk_sibling(unsigned int offset) |
1193 | { |
1194 | return xa_mk_internal(offset); |
1195 | } |
1196 | |
1197 | /* Private */ |
1198 | static inline unsigned long xa_to_sibling(const void *entry) |
1199 | { |
1200 | return xa_to_internal(entry); |
1201 | } |
1202 | |
1203 | /** |
1204 | * xa_is_sibling() - Is the entry a sibling entry? |
1205 | * @entry: Entry retrieved from the XArray |
1206 | * |
1207 | * Return: %true if the entry is a sibling entry. |
1208 | */ |
1209 | static inline bool xa_is_sibling(const void *entry) |
1210 | { |
1211 | return IS_ENABLED(CONFIG_XARRAY_MULTI) && xa_is_internal(entry) && |
1212 | (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1)); |
1213 | } |
1214 | |
1215 | #define XA_RETRY_ENTRY xa_mk_internal(256) |
1216 | |
1217 | /** |
1218 | * xa_is_retry() - Is the entry a retry entry? |
1219 | * @entry: Entry retrieved from the XArray |
1220 | * |
1221 | * Return: %true if the entry is a retry entry. |
1222 | */ |
1223 | static inline bool xa_is_retry(const void *entry) |
1224 | { |
1225 | return unlikely(entry == XA_RETRY_ENTRY); |
1226 | } |
1227 | |
1228 | /** |
1229 | * xa_is_advanced() - Is the entry only permitted for the advanced API? |
1230 | * @entry: Entry to be stored in the XArray. |
1231 | * |
1232 | * Return: %true if the entry cannot be stored by the normal API. |
1233 | */ |
1234 | static inline bool xa_is_advanced(const void *entry) |
1235 | { |
1236 | return xa_is_internal(entry) && (entry <= XA_RETRY_ENTRY); |
1237 | } |
1238 | |
1239 | /** |
1240 | * typedef xa_update_node_t - A callback function from the XArray. |
1241 | * @node: The node which is being processed |
1242 | * |
1243 | * This function is called every time the XArray updates the count of |
1244 | * present and value entries in a node. It allows advanced users to |
1245 | * maintain the private_list in the node. |
1246 | * |
1247 | * Context: The xa_lock is held and interrupts may be disabled. |
1248 | * Implementations should not drop the xa_lock, nor re-enable |
1249 | * interrupts. |
1250 | */ |
1251 | typedef void (*xa_update_node_t)(struct xa_node *node); |
1252 | |
1253 | /* |
1254 | * The xa_state is opaque to its users. It contains various different pieces |
1255 | * of state involved in the current operation on the XArray. It should be |
1256 | * declared on the stack and passed between the various internal routines. |
1257 | * The various elements in it should not be accessed directly, but only |
1258 | * through the provided accessor functions. The below documentation is for |
1259 | * the benefit of those working on the code, not for users of the XArray. |
1260 | * |
1261 | * @xa_node usually points to the xa_node containing the slot we're operating |
1262 | * on (and @xa_offset is the offset in the slots array). If there is a |
1263 | * single entry in the array at index 0, there are no allocated xa_nodes to |
1264 | * point to, and so we store %NULL in @xa_node. @xa_node is set to |
1265 | * the value %XAS_RESTART if the xa_state is not walked to the correct |
1266 | * position in the tree of nodes for this operation. If an error occurs |
1267 | * during an operation, it is set to an %XAS_ERROR value. If we run off the |
1268 | * end of the allocated nodes, it is set to %XAS_BOUNDS. |
1269 | */ |
1270 | struct xa_state { |
1271 | struct xarray *xa; |
1272 | unsigned long xa_index; |
1273 | unsigned char xa_shift; |
1274 | unsigned char xa_sibs; |
1275 | unsigned char xa_offset; |
1276 | unsigned char xa_pad; /* Helps gcc generate better code */ |
1277 | struct xa_node *xa_node; |
1278 | struct xa_node *xa_alloc; |
1279 | xa_update_node_t xa_update; |
1280 | }; |
1281 | |
1282 | /* |
1283 | * We encode errnos in the xas->xa_node. If an error has happened, we need to |
1284 | * drop the lock to fix it, and once we've done so the xa_state is invalid. |
1285 | */ |
1286 | #define XA_ERROR(errno) ((struct xa_node *)(((unsigned long)errno << 2) | 2UL)) |
1287 | #define XAS_BOUNDS ((struct xa_node *)1UL) |
1288 | #define XAS_RESTART ((struct xa_node *)3UL) |
1289 | |
1290 | #define __XA_STATE(array, index, shift, sibs) { \ |
1291 | .xa = array, \ |
1292 | .xa_index = index, \ |
1293 | .xa_shift = shift, \ |
1294 | .xa_sibs = sibs, \ |
1295 | .xa_offset = 0, \ |
1296 | .xa_pad = 0, \ |
1297 | .xa_node = XAS_RESTART, \ |
1298 | .xa_alloc = NULL, \ |
1299 | .xa_update = NULL \ |
1300 | } |
1301 | |
1302 | /** |
1303 | * XA_STATE() - Declare an XArray operation state. |
1304 | * @name: Name of this operation state (usually xas). |
1305 | * @array: Array to operate on. |
1306 | * @index: Initial index of interest. |
1307 | * |
1308 | * Declare and initialise an xa_state on the stack. |
1309 | */ |
1310 | #define XA_STATE(name, array, index) \ |
1311 | struct xa_state name = __XA_STATE(array, index, 0, 0) |
1312 | |
1313 | /** |
1314 | * XA_STATE_ORDER() - Declare an XArray operation state. |
1315 | * @name: Name of this operation state (usually xas). |
1316 | * @array: Array to operate on. |
1317 | * @index: Initial index of interest. |
1318 | * @order: Order of entry. |
1319 | * |
1320 | * Declare and initialise an xa_state on the stack. This variant of |
1321 | * XA_STATE() allows you to specify the 'order' of the element you |
1322 | * want to operate on.` |
1323 | */ |
1324 | #define XA_STATE_ORDER(name, array, index, order) \ |
1325 | struct xa_state name = __XA_STATE(array, \ |
1326 | (index >> order) << order, \ |
1327 | order - (order % XA_CHUNK_SHIFT), \ |
1328 | (1U << (order % XA_CHUNK_SHIFT)) - 1) |
1329 | |
1330 | #define xas_marked(xas, mark) xa_marked((xas)->xa, (mark)) |
1331 | #define xas_trylock(xas) xa_trylock((xas)->xa) |
1332 | #define xas_lock(xas) xa_lock((xas)->xa) |
1333 | #define xas_unlock(xas) xa_unlock((xas)->xa) |
1334 | #define xas_lock_bh(xas) xa_lock_bh((xas)->xa) |
1335 | #define xas_unlock_bh(xas) xa_unlock_bh((xas)->xa) |
1336 | #define xas_lock_irq(xas) xa_lock_irq((xas)->xa) |
1337 | #define xas_unlock_irq(xas) xa_unlock_irq((xas)->xa) |
1338 | #define xas_lock_irqsave(xas, flags) \ |
1339 | xa_lock_irqsave((xas)->xa, flags) |
1340 | #define xas_unlock_irqrestore(xas, flags) \ |
1341 | xa_unlock_irqrestore((xas)->xa, flags) |
1342 | |
1343 | /** |
1344 | * xas_error() - Return an errno stored in the xa_state. |
1345 | * @xas: XArray operation state. |
1346 | * |
1347 | * Return: 0 if no error has been noted. A negative errno if one has. |
1348 | */ |
1349 | static inline int xas_error(const struct xa_state *xas) |
1350 | { |
1351 | return xa_err(xas->xa_node); |
1352 | } |
1353 | |
1354 | /** |
1355 | * xas_set_err() - Note an error in the xa_state. |
1356 | * @xas: XArray operation state. |
1357 | * @err: Negative error number. |
1358 | * |
1359 | * Only call this function with a negative @err; zero or positive errors |
1360 | * will probably not behave the way you think they should. If you want |
1361 | * to clear the error from an xa_state, use xas_reset(). |
1362 | */ |
1363 | static inline void xas_set_err(struct xa_state *xas, long err) |
1364 | { |
1365 | xas->xa_node = XA_ERROR(err); |
1366 | } |
1367 | |
1368 | /** |
1369 | * xas_invalid() - Is the xas in a retry or error state? |
1370 | * @xas: XArray operation state. |
1371 | * |
1372 | * Return: %true if the xas cannot be used for operations. |
1373 | */ |
1374 | static inline bool xas_invalid(const struct xa_state *xas) |
1375 | { |
1376 | return (unsigned long)xas->xa_node & 3; |
1377 | } |
1378 | |
1379 | /** |
1380 | * xas_valid() - Is the xas a valid cursor into the array? |
1381 | * @xas: XArray operation state. |
1382 | * |
1383 | * Return: %true if the xas can be used for operations. |
1384 | */ |
1385 | static inline bool xas_valid(const struct xa_state *xas) |
1386 | { |
1387 | return !xas_invalid(xas); |
1388 | } |
1389 | |
1390 | /** |
1391 | * xas_is_node() - Does the xas point to a node? |
1392 | * @xas: XArray operation state. |
1393 | * |
1394 | * Return: %true if the xas currently references a node. |
1395 | */ |
1396 | static inline bool xas_is_node(const struct xa_state *xas) |
1397 | { |
1398 | return xas_valid(xas) && xas->xa_node; |
1399 | } |
1400 | |
1401 | /* True if the pointer is something other than a node */ |
1402 | static inline bool xas_not_node(struct xa_node *node) |
1403 | { |
1404 | return ((unsigned long)node & 3) || !node; |
1405 | } |
1406 | |
1407 | /* True if the node represents RESTART or an error */ |
1408 | static inline bool xas_frozen(struct xa_node *node) |
1409 | { |
1410 | return (unsigned long)node & 2; |
1411 | } |
1412 | |
1413 | /* True if the node represents head-of-tree, RESTART or BOUNDS */ |
1414 | static inline bool xas_top(struct xa_node *node) |
1415 | { |
1416 | return node <= XAS_RESTART; |
1417 | } |
1418 | |
1419 | /** |
1420 | * xas_reset() - Reset an XArray operation state. |
1421 | * @xas: XArray operation state. |
1422 | * |
1423 | * Resets the error or walk state of the @xas so future walks of the |
1424 | * array will start from the root. Use this if you have dropped the |
1425 | * xarray lock and want to reuse the xa_state. |
1426 | * |
1427 | * Context: Any context. |
1428 | */ |
1429 | static inline void xas_reset(struct xa_state *xas) |
1430 | { |
1431 | xas->xa_node = XAS_RESTART; |
1432 | } |
1433 | |
1434 | /** |
1435 | * xas_retry() - Retry the operation if appropriate. |
1436 | * @xas: XArray operation state. |
1437 | * @entry: Entry from xarray. |
1438 | * |
1439 | * The advanced functions may sometimes return an internal entry, such as |
1440 | * a retry entry or a zero entry. This function sets up the @xas to restart |
1441 | * the walk from the head of the array if needed. |
1442 | * |
1443 | * Context: Any context. |
1444 | * Return: true if the operation needs to be retried. |
1445 | */ |
1446 | static inline bool xas_retry(struct xa_state *xas, const void *entry) |
1447 | { |
1448 | if (xa_is_zero(entry)) |
1449 | return true; |
1450 | if (!xa_is_retry(entry)) |
1451 | return false; |
1452 | xas_reset(xas); |
1453 | return true; |
1454 | } |
1455 | |
1456 | void *xas_load(struct xa_state *); |
1457 | void *xas_store(struct xa_state *, void *entry); |
1458 | void *xas_find(struct xa_state *, unsigned long max); |
1459 | void *xas_find_conflict(struct xa_state *); |
1460 | |
1461 | bool xas_get_mark(const struct xa_state *, xa_mark_t); |
1462 | void xas_set_mark(const struct xa_state *, xa_mark_t); |
1463 | void xas_clear_mark(const struct xa_state *, xa_mark_t); |
1464 | void *xas_find_marked(struct xa_state *, unsigned long max, xa_mark_t); |
1465 | void xas_init_marks(const struct xa_state *); |
1466 | |
1467 | bool xas_nomem(struct xa_state *, gfp_t); |
1468 | void xas_pause(struct xa_state *); |
1469 | |
1470 | void xas_create_range(struct xa_state *); |
1471 | |
1472 | /** |
1473 | * xas_reload() - Refetch an entry from the xarray. |
1474 | * @xas: XArray operation state. |
1475 | * |
1476 | * Use this function to check that a previously loaded entry still has |
1477 | * the same value. This is useful for the lockless pagecache lookup where |
1478 | * we walk the array with only the RCU lock to protect us, lock the page, |
1479 | * then check that the page hasn't moved since we looked it up. |
1480 | * |
1481 | * The caller guarantees that @xas is still valid. If it may be in an |
1482 | * error or restart state, call xas_load() instead. |
1483 | * |
1484 | * Return: The entry at this location in the xarray. |
1485 | */ |
1486 | static inline void *xas_reload(struct xa_state *xas) |
1487 | { |
1488 | struct xa_node *node = xas->xa_node; |
1489 | |
1490 | if (node) |
1491 | return xa_entry(xas->xa, node, xas->xa_offset); |
1492 | return xa_head(xas->xa); |
1493 | } |
1494 | |
1495 | /** |
1496 | * xas_set() - Set up XArray operation state for a different index. |
1497 | * @xas: XArray operation state. |
1498 | * @index: New index into the XArray. |
1499 | * |
1500 | * Move the operation state to refer to a different index. This will |
1501 | * have the effect of starting a walk from the top; see xas_next() |
1502 | * to move to an adjacent index. |
1503 | */ |
1504 | static inline void xas_set(struct xa_state *xas, unsigned long index) |
1505 | { |
1506 | xas->xa_index = index; |
1507 | xas->xa_node = XAS_RESTART; |
1508 | } |
1509 | |
1510 | /** |
1511 | * xas_set_order() - Set up XArray operation state for a multislot entry. |
1512 | * @xas: XArray operation state. |
1513 | * @index: Target of the operation. |
1514 | * @order: Entry occupies 2^@order indices. |
1515 | */ |
1516 | static inline void xas_set_order(struct xa_state *xas, unsigned long index, |
1517 | unsigned int order) |
1518 | { |
1519 | #ifdef CONFIG_XARRAY_MULTI |
1520 | xas->xa_index = order < BITS_PER_LONG ? (index >> order) << order : 0; |
1521 | xas->xa_shift = order - (order % XA_CHUNK_SHIFT); |
1522 | xas->xa_sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1; |
1523 | xas->xa_node = XAS_RESTART; |
1524 | #else |
1525 | BUG_ON(order > 0); |
1526 | xas_set(xas, index); |
1527 | #endif |
1528 | } |
1529 | |
1530 | /** |
1531 | * xas_set_update() - Set up XArray operation state for a callback. |
1532 | * @xas: XArray operation state. |
1533 | * @update: Function to call when updating a node. |
1534 | * |
1535 | * The XArray can notify a caller after it has updated an xa_node. |
1536 | * This is advanced functionality and is only needed by the page cache. |
1537 | */ |
1538 | static inline void xas_set_update(struct xa_state *xas, xa_update_node_t update) |
1539 | { |
1540 | xas->xa_update = update; |
1541 | } |
1542 | |
1543 | /** |
1544 | * xas_next_entry() - Advance iterator to next present entry. |
1545 | * @xas: XArray operation state. |
1546 | * @max: Highest index to return. |
1547 | * |
1548 | * xas_next_entry() is an inline function to optimise xarray traversal for |
1549 | * speed. It is equivalent to calling xas_find(), and will call xas_find() |
1550 | * for all the hard cases. |
1551 | * |
1552 | * Return: The next present entry after the one currently referred to by @xas. |
1553 | */ |
1554 | static inline void *xas_next_entry(struct xa_state *xas, unsigned long max) |
1555 | { |
1556 | struct xa_node *node = xas->xa_node; |
1557 | void *entry; |
1558 | |
1559 | if (unlikely(xas_not_node(node) || node->shift || |
1560 | xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK))) |
1561 | return xas_find(xas, max); |
1562 | |
1563 | do { |
1564 | if (unlikely(xas->xa_index >= max)) |
1565 | return xas_find(xas, max); |
1566 | if (unlikely(xas->xa_offset == XA_CHUNK_MASK)) |
1567 | return xas_find(xas, max); |
1568 | entry = xa_entry(xas->xa, node, xas->xa_offset + 1); |
1569 | if (unlikely(xa_is_internal(entry))) |
1570 | return xas_find(xas, max); |
1571 | xas->xa_offset++; |
1572 | xas->xa_index++; |
1573 | } while (!entry); |
1574 | |
1575 | return entry; |
1576 | } |
1577 | |
1578 | /* Private */ |
1579 | static inline unsigned int xas_find_chunk(struct xa_state *xas, bool advance, |
1580 | xa_mark_t mark) |
1581 | { |
1582 | unsigned long *addr = xas->xa_node->marks[(__force unsigned)mark]; |
1583 | unsigned int offset = xas->xa_offset; |
1584 | |
1585 | if (advance) |
1586 | offset++; |
1587 | if (XA_CHUNK_SIZE == BITS_PER_LONG) { |
1588 | if (offset < XA_CHUNK_SIZE) { |
1589 | unsigned long data = *addr & (~0UL << offset); |
1590 | if (data) |
1591 | return __ffs(data); |
1592 | } |
1593 | return XA_CHUNK_SIZE; |
1594 | } |
1595 | |
1596 | return find_next_bit(addr, XA_CHUNK_SIZE, offset); |
1597 | } |
1598 | |
1599 | /** |
1600 | * xas_next_marked() - Advance iterator to next marked entry. |
1601 | * @xas: XArray operation state. |
1602 | * @max: Highest index to return. |
1603 | * @mark: Mark to search for. |
1604 | * |
1605 | * xas_next_marked() is an inline function to optimise xarray traversal for |
1606 | * speed. It is equivalent to calling xas_find_marked(), and will call |
1607 | * xas_find_marked() for all the hard cases. |
1608 | * |
1609 | * Return: The next marked entry after the one currently referred to by @xas. |
1610 | */ |
1611 | static inline void *xas_next_marked(struct xa_state *xas, unsigned long max, |
1612 | xa_mark_t mark) |
1613 | { |
1614 | struct xa_node *node = xas->xa_node; |
1615 | unsigned int offset; |
1616 | |
1617 | if (unlikely(xas_not_node(node) || node->shift)) |
1618 | return xas_find_marked(xas, max, mark); |
1619 | offset = xas_find_chunk(xas, true, mark); |
1620 | xas->xa_offset = offset; |
1621 | xas->xa_index = (xas->xa_index & ~XA_CHUNK_MASK) + offset; |
1622 | if (xas->xa_index > max) |
1623 | return NULL; |
1624 | if (offset == XA_CHUNK_SIZE) |
1625 | return xas_find_marked(xas, max, mark); |
1626 | return xa_entry(xas->xa, node, offset); |
1627 | } |
1628 | |
1629 | /* |
1630 | * If iterating while holding a lock, drop the lock and reschedule |
1631 | * every %XA_CHECK_SCHED loops. |
1632 | */ |
1633 | enum { |
1634 | XA_CHECK_SCHED = 4096, |
1635 | }; |
1636 | |
1637 | /** |
1638 | * xas_for_each() - Iterate over a range of an XArray. |
1639 | * @xas: XArray operation state. |
1640 | * @entry: Entry retrieved from the array. |
1641 | * @max: Maximum index to retrieve from array. |
1642 | * |
1643 | * The loop body will be executed for each entry present in the xarray |
1644 | * between the current xas position and @max. @entry will be set to |
1645 | * the entry retrieved from the xarray. It is safe to delete entries |
1646 | * from the array in the loop body. You should hold either the RCU lock |
1647 | * or the xa_lock while iterating. If you need to drop the lock, call |
1648 | * xas_pause() first. |
1649 | */ |
1650 | #define xas_for_each(xas, entry, max) \ |
1651 | for (entry = xas_find(xas, max); entry; \ |
1652 | entry = xas_next_entry(xas, max)) |
1653 | |
1654 | /** |
1655 | * xas_for_each_marked() - Iterate over a range of an XArray. |
1656 | * @xas: XArray operation state. |
1657 | * @entry: Entry retrieved from the array. |
1658 | * @max: Maximum index to retrieve from array. |
1659 | * @mark: Mark to search for. |
1660 | * |
1661 | * The loop body will be executed for each marked entry in the xarray |
1662 | * between the current xas position and @max. @entry will be set to |
1663 | * the entry retrieved from the xarray. It is safe to delete entries |
1664 | * from the array in the loop body. You should hold either the RCU lock |
1665 | * or the xa_lock while iterating. If you need to drop the lock, call |
1666 | * xas_pause() first. |
1667 | */ |
1668 | #define xas_for_each_marked(xas, entry, max, mark) \ |
1669 | for (entry = xas_find_marked(xas, max, mark); entry; \ |
1670 | entry = xas_next_marked(xas, max, mark)) |
1671 | |
1672 | /** |
1673 | * xas_for_each_conflict() - Iterate over a range of an XArray. |
1674 | * @xas: XArray operation state. |
1675 | * @entry: Entry retrieved from the array. |
1676 | * |
1677 | * The loop body will be executed for each entry in the XArray that lies |
1678 | * within the range specified by @xas. If the loop completes successfully, |
1679 | * any entries that lie in this range will be replaced by @entry. The caller |
1680 | * may break out of the loop; if they do so, the contents of the XArray will |
1681 | * be unchanged. The operation may fail due to an out of memory condition. |
1682 | * The caller may also call xa_set_err() to exit the loop while setting an |
1683 | * error to record the reason. |
1684 | */ |
1685 | #define xas_for_each_conflict(xas, entry) \ |
1686 | while ((entry = xas_find_conflict(xas))) |
1687 | |
1688 | void *__xas_next(struct xa_state *); |
1689 | void *__xas_prev(struct xa_state *); |
1690 | |
1691 | /** |
1692 | * xas_prev() - Move iterator to previous index. |
1693 | * @xas: XArray operation state. |
1694 | * |
1695 | * If the @xas was in an error state, it will remain in an error state |
1696 | * and this function will return %NULL. If the @xas has never been walked, |
1697 | * it will have the effect of calling xas_load(). Otherwise one will be |
1698 | * subtracted from the index and the state will be walked to the correct |
1699 | * location in the array for the next operation. |
1700 | * |
1701 | * If the iterator was referencing index 0, this function wraps |
1702 | * around to %ULONG_MAX. |
1703 | * |
1704 | * Return: The entry at the new index. This may be %NULL or an internal |
1705 | * entry. |
1706 | */ |
1707 | static inline void *xas_prev(struct xa_state *xas) |
1708 | { |
1709 | struct xa_node *node = xas->xa_node; |
1710 | |
1711 | if (unlikely(xas_not_node(node) || node->shift || |
1712 | xas->xa_offset == 0)) |
1713 | return __xas_prev(xas); |
1714 | |
1715 | xas->xa_index--; |
1716 | xas->xa_offset--; |
1717 | return xa_entry(xas->xa, node, xas->xa_offset); |
1718 | } |
1719 | |
1720 | /** |
1721 | * xas_next() - Move state to next index. |
1722 | * @xas: XArray operation state. |
1723 | * |
1724 | * If the @xas was in an error state, it will remain in an error state |
1725 | * and this function will return %NULL. If the @xas has never been walked, |
1726 | * it will have the effect of calling xas_load(). Otherwise one will be |
1727 | * added to the index and the state will be walked to the correct |
1728 | * location in the array for the next operation. |
1729 | * |
1730 | * If the iterator was referencing index %ULONG_MAX, this function wraps |
1731 | * around to 0. |
1732 | * |
1733 | * Return: The entry at the new index. This may be %NULL or an internal |
1734 | * entry. |
1735 | */ |
1736 | static inline void *xas_next(struct xa_state *xas) |
1737 | { |
1738 | struct xa_node *node = xas->xa_node; |
1739 | |
1740 | if (unlikely(xas_not_node(node) || node->shift || |
1741 | xas->xa_offset == XA_CHUNK_MASK)) |
1742 | return __xas_next(xas); |
1743 | |
1744 | xas->xa_index++; |
1745 | xas->xa_offset++; |
1746 | return xa_entry(xas->xa, node, xas->xa_offset); |
1747 | } |
1748 | |
1749 | #endif /* _LINUX_XARRAY_H */ |
1750 | |