1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#ifndef __XFS_BTREE_H__
7#define __XFS_BTREE_H__
8
9struct xfs_buf;
10struct xfs_inode;
11struct xfs_mount;
12struct xfs_trans;
13struct xfs_ifork;
14struct xfs_perag;
15
16/*
17 * Generic key, ptr and record wrapper structures.
18 *
19 * These are disk format structures, and are converted where necessary
20 * by the btree specific code that needs to interpret them.
21 */
22union xfs_btree_ptr {
23 __be32 s; /* short form ptr */
24 __be64 l; /* long form ptr */
25};
26
27/*
28 * The in-core btree key. Overlapping btrees actually store two keys
29 * per pointer, so we reserve enough memory to hold both. The __*bigkey
30 * items should never be accessed directly.
31 */
32union xfs_btree_key {
33 struct xfs_bmbt_key bmbt;
34 xfs_bmdr_key_t bmbr; /* bmbt root block */
35 xfs_alloc_key_t alloc;
36 struct xfs_inobt_key inobt;
37 struct xfs_rmap_key rmap;
38 struct xfs_rmap_key __rmap_bigkey[2];
39 struct xfs_refcount_key refc;
40};
41
42union xfs_btree_rec {
43 struct xfs_bmbt_rec bmbt;
44 xfs_bmdr_rec_t bmbr; /* bmbt root block */
45 struct xfs_alloc_rec alloc;
46 struct xfs_inobt_rec inobt;
47 struct xfs_rmap_rec rmap;
48 struct xfs_refcount_rec refc;
49};
50
51/*
52 * This nonsense is to make -wlint happy.
53 */
54#define XFS_LOOKUP_EQ ((xfs_lookup_t)XFS_LOOKUP_EQi)
55#define XFS_LOOKUP_LE ((xfs_lookup_t)XFS_LOOKUP_LEi)
56#define XFS_LOOKUP_GE ((xfs_lookup_t)XFS_LOOKUP_GEi)
57
58struct xfs_btree_ops;
59uint32_t xfs_btree_magic(struct xfs_mount *mp, const struct xfs_btree_ops *ops);
60
61/*
62 * For logging record fields.
63 */
64#define XFS_BB_MAGIC (1u << 0)
65#define XFS_BB_LEVEL (1u << 1)
66#define XFS_BB_NUMRECS (1u << 2)
67#define XFS_BB_LEFTSIB (1u << 3)
68#define XFS_BB_RIGHTSIB (1u << 4)
69#define XFS_BB_BLKNO (1u << 5)
70#define XFS_BB_LSN (1u << 6)
71#define XFS_BB_UUID (1u << 7)
72#define XFS_BB_OWNER (1u << 8)
73#define XFS_BB_NUM_BITS 5
74#define XFS_BB_ALL_BITS ((1u << XFS_BB_NUM_BITS) - 1)
75#define XFS_BB_NUM_BITS_CRC 9
76#define XFS_BB_ALL_BITS_CRC ((1u << XFS_BB_NUM_BITS_CRC) - 1)
77
78/*
79 * Generic stats interface
80 */
81#define XFS_BTREE_STATS_INC(cur, stat) \
82 XFS_STATS_INC_OFF((cur)->bc_mp, \
83 (cur)->bc_ops->statoff + __XBTS_ ## stat)
84#define XFS_BTREE_STATS_ADD(cur, stat, val) \
85 XFS_STATS_ADD_OFF((cur)->bc_mp, \
86 (cur)->bc_ops->statoff + __XBTS_ ## stat, val)
87
88enum xbtree_key_contig {
89 XBTREE_KEY_GAP = 0,
90 XBTREE_KEY_CONTIGUOUS,
91 XBTREE_KEY_OVERLAP,
92};
93
94/*
95 * Decide if these two numeric btree key fields are contiguous, overlapping,
96 * or if there's a gap between them. @x should be the field from the high
97 * key and @y should be the field from the low key.
98 */
99static inline enum xbtree_key_contig xbtree_key_contig(uint64_t x, uint64_t y)
100{
101 x++;
102 if (x < y)
103 return XBTREE_KEY_GAP;
104 if (x == y)
105 return XBTREE_KEY_CONTIGUOUS;
106 return XBTREE_KEY_OVERLAP;
107}
108
109#define XFS_BTREE_LONG_PTR_LEN (sizeof(__be64))
110#define XFS_BTREE_SHORT_PTR_LEN (sizeof(__be32))
111
112enum xfs_btree_type {
113 XFS_BTREE_TYPE_AG,
114 XFS_BTREE_TYPE_INODE,
115 XFS_BTREE_TYPE_MEM,
116};
117
118struct xfs_btree_ops {
119 const char *name;
120
121 /* Type of btree - AG-rooted or inode-rooted */
122 enum xfs_btree_type type;
123
124 /* XFS_BTGEO_* flags that determine the geometry of the btree */
125 unsigned int geom_flags;
126
127 /* size of the key, pointer, and record structures */
128 size_t key_len;
129 size_t ptr_len;
130 size_t rec_len;
131
132 /* LRU refcount to set on each btree buffer created */
133 unsigned int lru_refs;
134
135 /* offset of btree stats array */
136 unsigned int statoff;
137
138 /* sick mask for health reporting (only for XFS_BTREE_TYPE_AG) */
139 unsigned int sick_mask;
140
141 /* cursor operations */
142 struct xfs_btree_cur *(*dup_cursor)(struct xfs_btree_cur *);
143 void (*update_cursor)(struct xfs_btree_cur *src,
144 struct xfs_btree_cur *dst);
145
146 /* update btree root pointer */
147 void (*set_root)(struct xfs_btree_cur *cur,
148 const union xfs_btree_ptr *nptr, int level_change);
149
150 /* block allocation / freeing */
151 int (*alloc_block)(struct xfs_btree_cur *cur,
152 const union xfs_btree_ptr *start_bno,
153 union xfs_btree_ptr *new_bno,
154 int *stat);
155 int (*free_block)(struct xfs_btree_cur *cur, struct xfs_buf *bp);
156
157 /* update last record information */
158 void (*update_lastrec)(struct xfs_btree_cur *cur,
159 const struct xfs_btree_block *block,
160 const union xfs_btree_rec *rec,
161 int ptr, int reason);
162
163 /* records in block/level */
164 int (*get_minrecs)(struct xfs_btree_cur *cur, int level);
165 int (*get_maxrecs)(struct xfs_btree_cur *cur, int level);
166
167 /* records on disk. Matter for the root in inode case. */
168 int (*get_dmaxrecs)(struct xfs_btree_cur *cur, int level);
169
170 /* init values of btree structures */
171 void (*init_key_from_rec)(union xfs_btree_key *key,
172 const union xfs_btree_rec *rec);
173 void (*init_rec_from_cur)(struct xfs_btree_cur *cur,
174 union xfs_btree_rec *rec);
175 void (*init_ptr_from_cur)(struct xfs_btree_cur *cur,
176 union xfs_btree_ptr *ptr);
177 void (*init_high_key_from_rec)(union xfs_btree_key *key,
178 const union xfs_btree_rec *rec);
179
180 /* difference between key value and cursor value */
181 int64_t (*key_diff)(struct xfs_btree_cur *cur,
182 const union xfs_btree_key *key);
183
184 /*
185 * Difference between key2 and key1 -- positive if key1 > key2,
186 * negative if key1 < key2, and zero if equal. If the @mask parameter
187 * is non NULL, each key field to be used in the comparison must
188 * contain a nonzero value.
189 */
190 int64_t (*diff_two_keys)(struct xfs_btree_cur *cur,
191 const union xfs_btree_key *key1,
192 const union xfs_btree_key *key2,
193 const union xfs_btree_key *mask);
194
195 const struct xfs_buf_ops *buf_ops;
196
197 /* check that k1 is lower than k2 */
198 int (*keys_inorder)(struct xfs_btree_cur *cur,
199 const union xfs_btree_key *k1,
200 const union xfs_btree_key *k2);
201
202 /* check that r1 is lower than r2 */
203 int (*recs_inorder)(struct xfs_btree_cur *cur,
204 const union xfs_btree_rec *r1,
205 const union xfs_btree_rec *r2);
206
207 /*
208 * Are these two btree keys immediately adjacent?
209 *
210 * Given two btree keys @key1 and @key2, decide if it is impossible for
211 * there to be a third btree key K satisfying the relationship
212 * @key1 < K < @key2. To determine if two btree records are
213 * immediately adjacent, @key1 should be the high key of the first
214 * record and @key2 should be the low key of the second record.
215 * If the @mask parameter is non NULL, each key field to be used in the
216 * comparison must contain a nonzero value.
217 */
218 enum xbtree_key_contig (*keys_contiguous)(struct xfs_btree_cur *cur,
219 const union xfs_btree_key *key1,
220 const union xfs_btree_key *key2,
221 const union xfs_btree_key *mask);
222};
223
224/* btree geometry flags */
225#define XFS_BTGEO_LASTREC_UPDATE (1U << 0) /* track last rec externally */
226#define XFS_BTGEO_OVERLAPPING (1U << 1) /* overlapping intervals */
227
228/*
229 * Reasons for the update_lastrec method to be called.
230 */
231#define LASTREC_UPDATE 0
232#define LASTREC_INSREC 1
233#define LASTREC_DELREC 2
234
235
236union xfs_btree_irec {
237 struct xfs_alloc_rec_incore a;
238 struct xfs_bmbt_irec b;
239 struct xfs_inobt_rec_incore i;
240 struct xfs_rmap_irec r;
241 struct xfs_refcount_irec rc;
242};
243
244struct xfs_btree_level {
245 /* buffer pointer */
246 struct xfs_buf *bp;
247
248 /* key/record number */
249 uint16_t ptr;
250
251 /* readahead info */
252#define XFS_BTCUR_LEFTRA (1 << 0) /* left sibling has been read-ahead */
253#define XFS_BTCUR_RIGHTRA (1 << 1) /* right sibling has been read-ahead */
254 uint16_t ra;
255};
256
257/*
258 * Btree cursor structure.
259 * This collects all information needed by the btree code in one place.
260 */
261struct xfs_btree_cur
262{
263 struct xfs_trans *bc_tp; /* transaction we're in, if any */
264 struct xfs_mount *bc_mp; /* file system mount struct */
265 const struct xfs_btree_ops *bc_ops;
266 struct kmem_cache *bc_cache; /* cursor cache */
267 unsigned int bc_flags; /* btree features - below */
268 union xfs_btree_irec bc_rec; /* current insert/search record value */
269 uint8_t bc_nlevels; /* number of levels in the tree */
270 uint8_t bc_maxlevels; /* maximum levels for this btree type */
271
272 /* per-type information */
273 union {
274 struct {
275 struct xfs_inode *ip;
276 short forksize;
277 char whichfork;
278 struct xbtree_ifakeroot *ifake; /* for staging cursor */
279 } bc_ino;
280 struct {
281 struct xfs_perag *pag;
282 struct xfs_buf *agbp;
283 struct xbtree_afakeroot *afake; /* for staging cursor */
284 } bc_ag;
285 struct {
286 struct xfbtree *xfbtree;
287 struct xfs_perag *pag;
288 } bc_mem;
289 };
290
291 /* per-format private data */
292 union {
293 struct {
294 int allocated;
295 } bc_bmap; /* bmapbt */
296 struct {
297 unsigned int nr_ops; /* # record updates */
298 unsigned int shape_changes; /* # of extent splits */
299 } bc_refc; /* refcountbt */
300 };
301
302 /* Must be at the end of the struct! */
303 struct xfs_btree_level bc_levels[];
304};
305
306/*
307 * Compute the size of a btree cursor that can handle a btree of a given
308 * height. The bc_levels array handles node and leaf blocks, so its size
309 * is exactly nlevels.
310 */
311static inline size_t
312xfs_btree_cur_sizeof(unsigned int nlevels)
313{
314 return struct_size_t(struct xfs_btree_cur, bc_levels, nlevels);
315}
316
317/* cursor state flags */
318/*
319 * The root of this btree is a fakeroot structure so that we can stage a btree
320 * rebuild without leaving it accessible via primary metadata. The ops struct
321 * is dynamically allocated and must be freed when the cursor is deleted.
322 */
323#define XFS_BTREE_STAGING (1U << 0)
324
325/* We are converting a delalloc reservation (only for bmbt btrees) */
326#define XFS_BTREE_BMBT_WASDEL (1U << 1)
327
328/* For extent swap, ignore owner check in verifier (only for bmbt btrees) */
329#define XFS_BTREE_BMBT_INVALID_OWNER (1U << 2)
330
331/* Cursor is active (only for allocbt btrees) */
332#define XFS_BTREE_ALLOCBT_ACTIVE (1U << 3)
333
334#define XFS_BTREE_NOERROR 0
335#define XFS_BTREE_ERROR 1
336
337/*
338 * Convert from buffer to btree block header.
339 */
340#define XFS_BUF_TO_BLOCK(bp) ((struct xfs_btree_block *)((bp)->b_addr))
341
342xfs_failaddr_t __xfs_btree_check_block(struct xfs_btree_cur *cur,
343 struct xfs_btree_block *block, int level, struct xfs_buf *bp);
344int __xfs_btree_check_ptr(struct xfs_btree_cur *cur,
345 const union xfs_btree_ptr *ptr, int index, int level);
346
347/*
348 * Check that block header is ok.
349 */
350int
351xfs_btree_check_block(
352 struct xfs_btree_cur *cur, /* btree cursor */
353 struct xfs_btree_block *block, /* generic btree block pointer */
354 int level, /* level of the btree block */
355 struct xfs_buf *bp); /* buffer containing block, if any */
356
357/*
358 * Delete the btree cursor.
359 */
360void
361xfs_btree_del_cursor(
362 struct xfs_btree_cur *cur, /* btree cursor */
363 int error); /* del because of error */
364
365/*
366 * Duplicate the btree cursor.
367 * Allocate a new one, copy the record, re-get the buffers.
368 */
369int /* error */
370xfs_btree_dup_cursor(
371 struct xfs_btree_cur *cur, /* input cursor */
372 struct xfs_btree_cur **ncur);/* output cursor */
373
374/*
375 * Compute first and last byte offsets for the fields given.
376 * Interprets the offsets table, which contains struct field offsets.
377 */
378void
379xfs_btree_offsets(
380 uint32_t fields, /* bitmask of fields */
381 const short *offsets,/* table of field offsets */
382 int nbits, /* number of bits to inspect */
383 int *first, /* output: first byte offset */
384 int *last); /* output: last byte offset */
385
386/*
387 * Initialise a new btree block header
388 */
389void xfs_btree_init_buf(struct xfs_mount *mp, struct xfs_buf *bp,
390 const struct xfs_btree_ops *ops, __u16 level, __u16 numrecs,
391 __u64 owner);
392void xfs_btree_init_block(struct xfs_mount *mp,
393 struct xfs_btree_block *buf, const struct xfs_btree_ops *ops,
394 __u16 level, __u16 numrecs, __u64 owner);
395
396/*
397 * Common btree core entry points.
398 */
399int xfs_btree_increment(struct xfs_btree_cur *, int, int *);
400int xfs_btree_decrement(struct xfs_btree_cur *, int, int *);
401int xfs_btree_lookup(struct xfs_btree_cur *, xfs_lookup_t, int *);
402int xfs_btree_update(struct xfs_btree_cur *, union xfs_btree_rec *);
403int xfs_btree_new_iroot(struct xfs_btree_cur *, int *, int *);
404int xfs_btree_insert(struct xfs_btree_cur *, int *);
405int xfs_btree_delete(struct xfs_btree_cur *, int *);
406int xfs_btree_get_rec(struct xfs_btree_cur *, union xfs_btree_rec **, int *);
407int xfs_btree_change_owner(struct xfs_btree_cur *cur, uint64_t new_owner,
408 struct list_head *buffer_list);
409
410/*
411 * btree block CRC helpers
412 */
413void xfs_btree_fsblock_calc_crc(struct xfs_buf *);
414bool xfs_btree_fsblock_verify_crc(struct xfs_buf *);
415void xfs_btree_agblock_calc_crc(struct xfs_buf *);
416bool xfs_btree_agblock_verify_crc(struct xfs_buf *);
417
418/*
419 * Internal btree helpers also used by xfs_bmap.c.
420 */
421void xfs_btree_log_block(struct xfs_btree_cur *, struct xfs_buf *, uint32_t);
422void xfs_btree_log_recs(struct xfs_btree_cur *, struct xfs_buf *, int, int);
423
424/*
425 * Helpers.
426 */
427static inline int xfs_btree_get_numrecs(const struct xfs_btree_block *block)
428{
429 return be16_to_cpu(block->bb_numrecs);
430}
431
432static inline void xfs_btree_set_numrecs(struct xfs_btree_block *block,
433 uint16_t numrecs)
434{
435 block->bb_numrecs = cpu_to_be16(numrecs);
436}
437
438static inline int xfs_btree_get_level(const struct xfs_btree_block *block)
439{
440 return be16_to_cpu(block->bb_level);
441}
442
443
444/*
445 * Min and max functions for extlen, agblock, fileoff, and filblks types.
446 */
447#define XFS_EXTLEN_MIN(a,b) min_t(xfs_extlen_t, (a), (b))
448#define XFS_EXTLEN_MAX(a,b) max_t(xfs_extlen_t, (a), (b))
449#define XFS_AGBLOCK_MIN(a,b) min_t(xfs_agblock_t, (a), (b))
450#define XFS_AGBLOCK_MAX(a,b) max_t(xfs_agblock_t, (a), (b))
451#define XFS_FILEOFF_MIN(a,b) min_t(xfs_fileoff_t, (a), (b))
452#define XFS_FILEOFF_MAX(a,b) max_t(xfs_fileoff_t, (a), (b))
453#define XFS_FILBLKS_MIN(a,b) min_t(xfs_filblks_t, (a), (b))
454#define XFS_FILBLKS_MAX(a,b) max_t(xfs_filblks_t, (a), (b))
455
456xfs_failaddr_t xfs_btree_agblock_v5hdr_verify(struct xfs_buf *bp);
457xfs_failaddr_t xfs_btree_agblock_verify(struct xfs_buf *bp,
458 unsigned int max_recs);
459xfs_failaddr_t xfs_btree_fsblock_v5hdr_verify(struct xfs_buf *bp,
460 uint64_t owner);
461xfs_failaddr_t xfs_btree_fsblock_verify(struct xfs_buf *bp,
462 unsigned int max_recs);
463xfs_failaddr_t xfs_btree_memblock_verify(struct xfs_buf *bp,
464 unsigned int max_recs);
465
466unsigned int xfs_btree_compute_maxlevels(const unsigned int *limits,
467 unsigned long long records);
468unsigned long long xfs_btree_calc_size(const unsigned int *limits,
469 unsigned long long records);
470unsigned int xfs_btree_space_to_height(const unsigned int *limits,
471 unsigned long long blocks);
472
473/*
474 * Return codes for the query range iterator function are 0 to continue
475 * iterating, and non-zero to stop iterating. Any non-zero value will be
476 * passed up to the _query_range caller. The special value -ECANCELED can be
477 * used to stop iteration, because _query_range never generates that error
478 * code on its own.
479 */
480typedef int (*xfs_btree_query_range_fn)(struct xfs_btree_cur *cur,
481 const union xfs_btree_rec *rec, void *priv);
482
483int xfs_btree_query_range(struct xfs_btree_cur *cur,
484 const union xfs_btree_irec *low_rec,
485 const union xfs_btree_irec *high_rec,
486 xfs_btree_query_range_fn fn, void *priv);
487int xfs_btree_query_all(struct xfs_btree_cur *cur, xfs_btree_query_range_fn fn,
488 void *priv);
489
490typedef int (*xfs_btree_visit_blocks_fn)(struct xfs_btree_cur *cur, int level,
491 void *data);
492/* Visit record blocks. */
493#define XFS_BTREE_VISIT_RECORDS (1 << 0)
494/* Visit leaf blocks. */
495#define XFS_BTREE_VISIT_LEAVES (1 << 1)
496/* Visit all blocks. */
497#define XFS_BTREE_VISIT_ALL (XFS_BTREE_VISIT_RECORDS | \
498 XFS_BTREE_VISIT_LEAVES)
499int xfs_btree_visit_blocks(struct xfs_btree_cur *cur,
500 xfs_btree_visit_blocks_fn fn, unsigned int flags, void *data);
501
502int xfs_btree_count_blocks(struct xfs_btree_cur *cur, xfs_extlen_t *blocks);
503
504union xfs_btree_rec *xfs_btree_rec_addr(struct xfs_btree_cur *cur, int n,
505 struct xfs_btree_block *block);
506union xfs_btree_key *xfs_btree_key_addr(struct xfs_btree_cur *cur, int n,
507 struct xfs_btree_block *block);
508union xfs_btree_key *xfs_btree_high_key_addr(struct xfs_btree_cur *cur, int n,
509 struct xfs_btree_block *block);
510union xfs_btree_ptr *xfs_btree_ptr_addr(struct xfs_btree_cur *cur, int n,
511 struct xfs_btree_block *block);
512int xfs_btree_lookup_get_block(struct xfs_btree_cur *cur, int level,
513 const union xfs_btree_ptr *pp, struct xfs_btree_block **blkp);
514struct xfs_btree_block *xfs_btree_get_block(struct xfs_btree_cur *cur,
515 int level, struct xfs_buf **bpp);
516bool xfs_btree_ptr_is_null(struct xfs_btree_cur *cur,
517 const union xfs_btree_ptr *ptr);
518int64_t xfs_btree_diff_two_ptrs(struct xfs_btree_cur *cur,
519 const union xfs_btree_ptr *a,
520 const union xfs_btree_ptr *b);
521void xfs_btree_get_sibling(struct xfs_btree_cur *cur,
522 struct xfs_btree_block *block,
523 union xfs_btree_ptr *ptr, int lr);
524void xfs_btree_get_keys(struct xfs_btree_cur *cur,
525 struct xfs_btree_block *block, union xfs_btree_key *key);
526union xfs_btree_key *xfs_btree_high_key_from_key(struct xfs_btree_cur *cur,
527 union xfs_btree_key *key);
528typedef bool (*xfs_btree_key_gap_fn)(struct xfs_btree_cur *cur,
529 const union xfs_btree_key *key1,
530 const union xfs_btree_key *key2);
531
532int xfs_btree_has_records(struct xfs_btree_cur *cur,
533 const union xfs_btree_irec *low,
534 const union xfs_btree_irec *high,
535 const union xfs_btree_key *mask,
536 enum xbtree_recpacking *outcome);
537
538bool xfs_btree_has_more_records(struct xfs_btree_cur *cur);
539struct xfs_ifork *xfs_btree_ifork_ptr(struct xfs_btree_cur *cur);
540
541/* Key comparison helpers */
542static inline bool
543xfs_btree_keycmp_lt(
544 struct xfs_btree_cur *cur,
545 const union xfs_btree_key *key1,
546 const union xfs_btree_key *key2)
547{
548 return cur->bc_ops->diff_two_keys(cur, key1, key2, NULL) < 0;
549}
550
551static inline bool
552xfs_btree_keycmp_gt(
553 struct xfs_btree_cur *cur,
554 const union xfs_btree_key *key1,
555 const union xfs_btree_key *key2)
556{
557 return cur->bc_ops->diff_two_keys(cur, key1, key2, NULL) > 0;
558}
559
560static inline bool
561xfs_btree_keycmp_eq(
562 struct xfs_btree_cur *cur,
563 const union xfs_btree_key *key1,
564 const union xfs_btree_key *key2)
565{
566 return cur->bc_ops->diff_two_keys(cur, key1, key2, NULL) == 0;
567}
568
569static inline bool
570xfs_btree_keycmp_le(
571 struct xfs_btree_cur *cur,
572 const union xfs_btree_key *key1,
573 const union xfs_btree_key *key2)
574{
575 return !xfs_btree_keycmp_gt(cur, key1, key2);
576}
577
578static inline bool
579xfs_btree_keycmp_ge(
580 struct xfs_btree_cur *cur,
581 const union xfs_btree_key *key1,
582 const union xfs_btree_key *key2)
583{
584 return !xfs_btree_keycmp_lt(cur, key1, key2);
585}
586
587static inline bool
588xfs_btree_keycmp_ne(
589 struct xfs_btree_cur *cur,
590 const union xfs_btree_key *key1,
591 const union xfs_btree_key *key2)
592{
593 return !xfs_btree_keycmp_eq(cur, key1, key2);
594}
595
596/* Masked key comparison helpers */
597static inline bool
598xfs_btree_masked_keycmp_lt(
599 struct xfs_btree_cur *cur,
600 const union xfs_btree_key *key1,
601 const union xfs_btree_key *key2,
602 const union xfs_btree_key *mask)
603{
604 return cur->bc_ops->diff_two_keys(cur, key1, key2, mask) < 0;
605}
606
607static inline bool
608xfs_btree_masked_keycmp_gt(
609 struct xfs_btree_cur *cur,
610 const union xfs_btree_key *key1,
611 const union xfs_btree_key *key2,
612 const union xfs_btree_key *mask)
613{
614 return cur->bc_ops->diff_two_keys(cur, key1, key2, mask) > 0;
615}
616
617static inline bool
618xfs_btree_masked_keycmp_ge(
619 struct xfs_btree_cur *cur,
620 const union xfs_btree_key *key1,
621 const union xfs_btree_key *key2,
622 const union xfs_btree_key *mask)
623{
624 return !xfs_btree_masked_keycmp_lt(cur, key1, key2, mask);
625}
626
627/* Does this cursor point to the last block in the given level? */
628static inline bool
629xfs_btree_islastblock(
630 struct xfs_btree_cur *cur,
631 int level)
632{
633 struct xfs_btree_block *block;
634 struct xfs_buf *bp;
635
636 block = xfs_btree_get_block(cur, level, bpp: &bp);
637
638 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
639 return block->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK);
640 return block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK);
641}
642
643void xfs_btree_set_ptr_null(struct xfs_btree_cur *cur,
644 union xfs_btree_ptr *ptr);
645int xfs_btree_get_buf_block(struct xfs_btree_cur *cur,
646 const union xfs_btree_ptr *ptr, struct xfs_btree_block **block,
647 struct xfs_buf **bpp);
648int xfs_btree_read_buf_block(struct xfs_btree_cur *cur,
649 const union xfs_btree_ptr *ptr, int flags,
650 struct xfs_btree_block **block, struct xfs_buf **bpp);
651void xfs_btree_set_sibling(struct xfs_btree_cur *cur,
652 struct xfs_btree_block *block, const union xfs_btree_ptr *ptr,
653 int lr);
654void xfs_btree_init_block_cur(struct xfs_btree_cur *cur,
655 struct xfs_buf *bp, int level, int numrecs);
656void xfs_btree_copy_ptrs(struct xfs_btree_cur *cur,
657 union xfs_btree_ptr *dst_ptr,
658 const union xfs_btree_ptr *src_ptr, int numptrs);
659void xfs_btree_copy_keys(struct xfs_btree_cur *cur,
660 union xfs_btree_key *dst_key,
661 const union xfs_btree_key *src_key, int numkeys);
662void xfs_btree_init_ptr_from_cur(struct xfs_btree_cur *cur,
663 union xfs_btree_ptr *ptr);
664
665static inline struct xfs_btree_cur *
666xfs_btree_alloc_cursor(
667 struct xfs_mount *mp,
668 struct xfs_trans *tp,
669 const struct xfs_btree_ops *ops,
670 uint8_t maxlevels,
671 struct kmem_cache *cache)
672{
673 struct xfs_btree_cur *cur;
674
675 ASSERT(ops->ptr_len == XFS_BTREE_LONG_PTR_LEN ||
676 ops->ptr_len == XFS_BTREE_SHORT_PTR_LEN);
677
678 /* BMBT allocations can come through from non-transactional context. */
679 cur = kmem_cache_zalloc(cache,
680 GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
681 cur->bc_ops = ops;
682 cur->bc_tp = tp;
683 cur->bc_mp = mp;
684 cur->bc_maxlevels = maxlevels;
685 cur->bc_cache = cache;
686
687 return cur;
688}
689
690int __init xfs_btree_init_cur_caches(void);
691void xfs_btree_destroy_cur_caches(void);
692
693int xfs_btree_goto_left_edge(struct xfs_btree_cur *cur);
694
695/* Does this level of the cursor point to the inode root (and not a block)? */
696static inline bool
697xfs_btree_at_iroot(
698 const struct xfs_btree_cur *cur,
699 int level)
700{
701 return cur->bc_ops->type == XFS_BTREE_TYPE_INODE &&
702 level == cur->bc_nlevels - 1;
703}
704
705#endif /* __XFS_BTREE_H__ */
706

source code of linux/fs/xfs/libxfs/xfs_btree.h