linux/lib/xarray.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * XArray implementation
   4 * Copyright (c) 2017 Microsoft Corporation
   5 * Author: Matthew Wilcox <willy@infradead.org>
   6 */
   7
   8#include <linux/bitmap.h>
   9#include <linux/export.h>
  10#include <linux/list.h>
  11#include <linux/slab.h>
  12#include <linux/xarray.h>
  13
  14/*
  15 * Coding conventions in this file:
  16 *
  17 * @xa is used to refer to the entire xarray.
  18 * @xas is the 'xarray operation state'.  It may be either a pointer to
  19 * an xa_state, or an xa_state stored on the stack.  This is an unfortunate
  20 * ambiguity.
  21 * @index is the index of the entry being operated on
  22 * @mark is an xa_mark_t; a small number indicating one of the mark bits.
  23 * @node refers to an xa_node; usually the primary one being operated on by
  24 * this function.
  25 * @offset is the index into the slots array inside an xa_node.
  26 * @parent refers to the @xa_node closer to the head than @node.
  27 * @entry refers to something stored in a slot in the xarray
  28 */
  29
  30static inline unsigned int xa_lock_type(const struct xarray *xa)
  31{
  32        return (__force unsigned int)xa->xa_flags & 3;
  33}
  34
  35static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type)
  36{
  37        if (lock_type == XA_LOCK_IRQ)
  38                xas_lock_irq(xas);
  39        else if (lock_type == XA_LOCK_BH)
  40                xas_lock_bh(xas);
  41        else
  42                xas_lock(xas);
  43}
  44
  45static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type)
  46{
  47        if (lock_type == XA_LOCK_IRQ)
  48                xas_unlock_irq(xas);
  49        else if (lock_type == XA_LOCK_BH)
  50                xas_unlock_bh(xas);
  51        else
  52                xas_unlock(xas);
  53}
  54
  55static inline bool xa_track_free(const struct xarray *xa)
  56{
  57        return xa->xa_flags & XA_FLAGS_TRACK_FREE;
  58}
  59
  60static inline bool xa_zero_busy(const struct xarray *xa)
  61{
  62        return xa->xa_flags & XA_FLAGS_ZERO_BUSY;
  63}
  64
  65static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark)
  66{
  67        if (!(xa->xa_flags & XA_FLAGS_MARK(mark)))
  68                xa->xa_flags |= XA_FLAGS_MARK(mark);
  69}
  70
  71static inline void xa_mark_clear(struct xarray *xa, xa_mark_t mark)
  72{
  73        if (xa->xa_flags & XA_FLAGS_MARK(mark))
  74                xa->xa_flags &= ~(XA_FLAGS_MARK(mark));
  75}
  76
  77static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark)
  78{
  79        return node->marks[(__force unsigned)mark];
  80}
  81
  82static inline bool node_get_mark(struct xa_node *node,
  83                unsigned int offset, xa_mark_t mark)
  84{
  85        return test_bit(offset, node_marks(node, mark));
  86}
  87
  88/* returns true if the bit was set */
  89static inline bool node_set_mark(struct xa_node *node, unsigned int offset,
  90                                xa_mark_t mark)
  91{
  92        return __test_and_set_bit(offset, node_marks(node, mark));
  93}
  94
  95/* returns true if the bit was set */
  96static inline bool node_clear_mark(struct xa_node *node, unsigned int offset,
  97                                xa_mark_t mark)
  98{
  99        return __test_and_clear_bit(offset, node_marks(node, mark));
 100}
 101
 102static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark)
 103{
 104        return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE);
 105}
 106
 107static inline void node_mark_all(struct xa_node *node, xa_mark_t mark)
 108{
 109        bitmap_fill(node_marks(node, mark), XA_CHUNK_SIZE);
 110}
 111
 112#define mark_inc(mark) do { \
 113        mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \
 114} while (0)
 115
 116/*
 117 * xas_squash_marks() - Merge all marks to the first entry
 118 * @xas: Array operation state.
 119 *
 120 * Set a mark on the first entry if any entry has it set.  Clear marks on
 121 * all sibling entries.
 122 */
 123static void xas_squash_marks(const struct xa_state *xas)
 124{
 125        unsigned int mark = 0;
 126        unsigned int limit = xas->xa_offset + xas->xa_sibs + 1;
 127
 128        if (!xas->xa_sibs)
 129                return;
 130
 131        do {
 132                unsigned long *marks = xas->xa_node->marks[mark];
 133                if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit)
 134                        continue;
 135                __set_bit(xas->xa_offset, marks);
 136                bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
 137        } while (mark++ != (__force unsigned)XA_MARK_MAX);
 138}
 139
 140/* extracts the offset within this node from the index */
 141static unsigned int get_offset(unsigned long index, struct xa_node *node)
 142{
 143        return (index >> node->shift) & XA_CHUNK_MASK;
 144}
 145
 146static void xas_set_offset(struct xa_state *xas)
 147{
 148        xas->xa_offset = get_offset(xas->xa_index, xas->xa_node);
 149}
 150
 151/* move the index either forwards (find) or backwards (sibling slot) */
 152static void xas_move_index(struct xa_state *xas, unsigned long offset)
 153{
 154        unsigned int shift = xas->xa_node->shift;
 155        xas->xa_index &= ~XA_CHUNK_MASK << shift;
 156        xas->xa_index += offset << shift;
 157}
 158
 159static void xas_advance(struct xa_state *xas)
 160{
 161        xas->xa_offset++;
 162        xas_move_index(xas, xas->xa_offset);
 163}
 164
 165static void *set_bounds(struct xa_state *xas)
 166{
 167        xas->xa_node = XAS_BOUNDS;
 168        return NULL;
 169}
 170
 171/*
 172 * Starts a walk.  If the @xas is already valid, we assume that it's on
 173 * the right path and just return where we've got to.  If we're in an
 174 * error state, return NULL.  If the index is outside the current scope
 175 * of the xarray, return NULL without changing @xas->xa_node.  Otherwise
 176 * set @xas->xa_node to NULL and return the current head of the array.
 177 */
 178static void *xas_start(struct xa_state *xas)
 179{
 180        void *entry;
 181
 182        if (xas_valid(xas))
 183                return xas_reload(xas);
 184        if (xas_error(xas))
 185                return NULL;
 186
 187        entry = xa_head(xas->xa);
 188        if (!xa_is_node(entry)) {
 189                if (xas->xa_index)
 190                        return set_bounds(xas);
 191        } else {
 192                if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK)
 193                        return set_bounds(xas);
 194        }
 195
 196        xas->xa_node = NULL;
 197        return entry;
 198}
 199
 200static void *xas_descend(struct xa_state *xas, struct xa_node *node)
 201{
 202        unsigned int offset = get_offset(xas->xa_index, node);
 203        void *entry = xa_entry(xas->xa, node, offset);
 204
 205        xas->xa_node = node;
 206        if (xa_is_sibling(entry)) {
 207                offset = xa_to_sibling(entry);
 208                entry = xa_entry(xas->xa, node, offset);
 209        }
 210
 211        xas->xa_offset = offset;
 212        return entry;
 213}
 214
 215/**
 216 * xas_load() - Load an entry from the XArray (advanced).
 217 * @xas: XArray operation state.
 218 *
 219 * Usually walks the @xas to the appropriate state to load the entry
 220 * stored at xa_index.  However, it will do nothing and return %NULL if
 221 * @xas is in an error state.  xas_load() will never expand the tree.
 222 *
 223 * If the xa_state is set up to operate on a multi-index entry, xas_load()
 224 * may return %NULL or an internal entry, even if there are entries
 225 * present within the range specified by @xas.
 226 *
 227 * Context: Any context.  The caller should hold the xa_lock or the RCU lock.
 228 * Return: Usually an entry in the XArray, but see description for exceptions.
 229 */
 230void *xas_load(struct xa_state *xas)
 231{
 232        void *entry = xas_start(xas);
 233
 234        while (xa_is_node(entry)) {
 235                struct xa_node *node = xa_to_node(entry);
 236
 237                if (xas->xa_shift > node->shift)
 238                        break;
 239                entry = xas_descend(xas, node);
 240                if (node->shift == 0)
 241                        break;
 242        }
 243        return entry;
 244}
 245EXPORT_SYMBOL_GPL(xas_load);
 246
 247/* Move the radix tree node cache here */
 248extern struct kmem_cache *radix_tree_node_cachep;
 249extern void radix_tree_node_rcu_free(struct rcu_head *head);
 250
 251#define XA_RCU_FREE     ((struct xarray *)1)
 252
 253static void xa_node_free(struct xa_node *node)
 254{
 255        XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
 256        node->array = XA_RCU_FREE;
 257        call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
 258}
 259
 260/*
 261 * xas_destroy() - Free any resources allocated during the XArray operation.
 262 * @xas: XArray operation state.
 263 *
 264 * This function is now internal-only.
 265 */
 266static void xas_destroy(struct xa_state *xas)
 267{
 268        struct xa_node *node = xas->xa_alloc;
 269
 270        if (!node)
 271                return;
 272        XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
 273        kmem_cache_free(radix_tree_node_cachep, node);
 274        xas->xa_alloc = NULL;
 275}
 276
 277/**
 278 * xas_nomem() - Allocate memory if needed.
 279 * @xas: XArray operation state.
 280 * @gfp: Memory allocation flags.
 281 *
 282 * If we need to add new nodes to the XArray, we try to allocate memory
 283 * with GFP_NOWAIT while holding the lock, which will usually succeed.
 284 * If it fails, @xas is flagged as needing memory to continue.  The caller
 285 * should drop the lock and call xas_nomem().  If xas_nomem() succeeds,
 286 * the caller should retry the operation.
 287 *
 288 * Forward progress is guaranteed as one node is allocated here and
 289 * stored in the xa_state where it will be found by xas_alloc().  More
 290 * nodes will likely be found in the slab allocator, but we do not tie
 291 * them up here.
 292 *
 293 * Return: true if memory was needed, and was successfully allocated.
 294 */
 295bool xas_nomem(struct xa_state *xas, gfp_t gfp)
 296{
 297        if (xas->xa_node != XA_ERROR(-ENOMEM)) {
 298                xas_destroy(xas);
 299                return false;
 300        }
 301        if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
 302                gfp |= __GFP_ACCOUNT;
 303        xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
 304        if (!xas->xa_alloc)
 305                return false;
 306        XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
 307        xas->xa_node = XAS_RESTART;
 308        return true;
 309}
 310EXPORT_SYMBOL_GPL(xas_nomem);
 311
 312/*
 313 * __xas_nomem() - Drop locks and allocate memory if needed.
 314 * @xas: XArray operation state.
 315 * @gfp: Memory allocation flags.
 316 *
 317 * Internal variant of xas_nomem().
 318 *
 319 * Return: true if memory was needed, and was successfully allocated.
 320 */
 321static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
 322        __must_hold(xas->xa->xa_lock)
 323{
 324        unsigned int lock_type = xa_lock_type(xas->xa);
 325
 326        if (xas->xa_node != XA_ERROR(-ENOMEM)) {
 327                xas_destroy(xas);
 328                return false;
 329        }
 330        if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
 331                gfp |= __GFP_ACCOUNT;
 332        if (gfpflags_allow_blocking(gfp)) {
 333                xas_unlock_type(xas, lock_type);
 334                xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
 335                xas_lock_type(xas, lock_type);
 336        } else {
 337                xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
 338        }
 339        if (!xas->xa_alloc)
 340                return false;
 341        XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
 342        xas->xa_node = XAS_RESTART;
 343        return true;
 344}
 345
 346static void xas_update(struct xa_state *xas, struct xa_node *node)
 347{
 348        if (xas->xa_update)
 349                xas->xa_update(node);
 350        else
 351                XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
 352}
 353
 354static void *xas_alloc(struct xa_state *xas, unsigned int shift)
 355{
 356        struct xa_node *parent = xas->xa_node;
 357        struct xa_node *node = xas->xa_alloc;
 358
 359        if (xas_invalid(xas))
 360                return NULL;
 361
 362        if (node) {
 363                xas->xa_alloc = NULL;
 364        } else {
 365                gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN;
 366
 367                if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
 368                        gfp |= __GFP_ACCOUNT;
 369
 370                node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
 371                if (!node) {
 372                        xas_set_err(xas, -ENOMEM);
 373                        return NULL;
 374                }
 375        }
 376
 377        if (parent) {
 378                node->offset = xas->xa_offset;
 379                parent->count++;
 380                XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE);
 381                xas_update(xas, parent);
 382        }
 383        XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
 384        XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
 385        node->shift = shift;
 386        node->count = 0;
 387        node->nr_values = 0;
 388        RCU_INIT_POINTER(node->parent, xas->xa_node);
 389        node->array = xas->xa;
 390
 391        return node;
 392}
 393
 394#ifdef CONFIG_XARRAY_MULTI
 395/* Returns the number of indices covered by a given xa_state */
 396static unsigned long xas_size(const struct xa_state *xas)
 397{
 398        return (xas->xa_sibs + 1UL) << xas->xa_shift;
 399}
 400#endif
 401
 402/*
 403 * Use this to calculate the maximum index that will need to be created
 404 * in order to add the entry described by @xas.  Because we cannot store a
 405 * multiple-index entry at index 0, the calculation is a little more complex
 406 * than you might expect.
 407 */
 408static unsigned long xas_max(struct xa_state *xas)
 409{
 410        unsigned long max = xas->xa_index;
 411
 412#ifdef CONFIG_XARRAY_MULTI
 413        if (xas->xa_shift || xas->xa_sibs) {
 414                unsigned long mask = xas_size(xas) - 1;
 415                max |= mask;
 416                if (mask == max)
 417                        max++;
 418        }
 419#endif
 420
 421        return max;
 422}
 423
 424/* The maximum index that can be contained in the array without expanding it */
 425static unsigned long max_index(void *entry)
 426{
 427        if (!xa_is_node(entry))
 428                return 0;
 429        return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1;
 430}
 431
 432static void xas_shrink(struct xa_state *xas)
 433{
 434        struct xarray *xa = xas->xa;
 435        struct xa_node *node = xas->xa_node;
 436
 437        for (;;) {
 438                void *entry;
 439
 440                XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
 441                if (node->count != 1)
 442                        break;
 443                entry = xa_entry_locked(xa, node, 0);
 444                if (!entry)
 445                        break;
 446                if (!xa_is_node(entry) && node->shift)
 447                        break;
 448                if (xa_is_zero(entry) && xa_zero_busy(xa))
 449                        entry = NULL;
 450                xas->xa_node = XAS_BOUNDS;
 451
 452                RCU_INIT_POINTER(xa->xa_head, entry);
 453                if (xa_track_free(xa) && !node_get_mark(node, 0, XA_FREE_MARK))
 454                        xa_mark_clear(xa, XA_FREE_MARK);
 455
 456                node->count = 0;
 457                node->nr_values = 0;
 458                if (!xa_is_node(entry))
 459                        RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY);
 460                xas_update(xas, node);
 461                xa_node_free(node);
 462                if (!xa_is_node(entry))
 463                        break;
 464                node = xa_to_node(entry);
 465                node->parent = NULL;
 466        }
 467}
 468
 469/*
 470 * xas_delete_node() - Attempt to delete an xa_node
 471 * @xas: Array operation state.
 472 *
 473 * Attempts to delete the @xas->xa_node.  This will fail if xa->node has
 474 * a non-zero reference count.
 475 */
 476static void xas_delete_node(struct xa_state *xas)
 477{
 478        struct xa_node *node = xas->xa_node;
 479
 480        for (;;) {
 481                struct xa_node *parent;
 482
 483                XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
 484                if (node->count)
 485                        break;
 486
 487                parent = xa_parent_locked(xas->xa, node);
 488                xas->xa_node = parent;
 489                xas->xa_offset = node->offset;
 490                xa_node_free(node);
 491
 492                if (!parent) {
 493                        xas->xa->xa_head = NULL;
 494                        xas->xa_node = XAS_BOUNDS;
 495                        return;
 496                }
 497
 498                parent->slots[xas->xa_offset] = NULL;
 499                parent->count--;
 500                XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE);
 501                node = parent;
 502                xas_update(xas, node);
 503        }
 504
 505        if (!node->parent)
 506                xas_shrink(xas);
 507}
 508
 509/**
 510 * xas_free_nodes() - Free this node and all nodes that it references
 511 * @xas: Array operation state.
 512 * @top: Node to free
 513 *
 514 * This node has been removed from the tree.  We must now free it and all
 515 * of its subnodes.  There may be RCU walkers with references into the tree,
 516 * so we must replace all entries with retry markers.
 517 */
 518static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
 519{
 520        unsigned int offset = 0;
 521        struct xa_node *node = top;
 522
 523        for (;;) {
 524                void *entry = xa_entry_locked(xas->xa, node, offset);
 525
 526                if (node->shift && xa_is_node(entry)) {
 527                        node = xa_to_node(entry);
 528                        offset = 0;
 529                        continue;
 530                }
 531                if (entry)
 532                        RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY);
 533                offset++;
 534                while (offset == XA_CHUNK_SIZE) {
 535                        struct xa_node *parent;
 536
 537                        parent = xa_parent_locked(xas->xa, node);
 538                        offset = node->offset + 1;
 539                        node->count = 0;
 540                        node->nr_values = 0;
 541                        xas_update(xas, node);
 542                        xa_node_free(node);
 543                        if (node == top)
 544                                return;
 545                        node = parent;
 546                }
 547        }
 548}
 549
 550/*
 551 * xas_expand adds nodes to the head of the tree until it has reached
 552 * sufficient height to be able to contain @xas->xa_index
 553 */
 554static int xas_expand(struct xa_state *xas, void *head)
 555{
 556        struct xarray *xa = xas->xa;
 557        struct xa_node *node = NULL;
 558        unsigned int shift = 0;
 559        unsigned long max = xas_max(xas);
 560
 561        if (!head) {
 562                if (max == 0)
 563                        return 0;
 564                while ((max >> shift) >= XA_CHUNK_SIZE)
 565                        shift += XA_CHUNK_SHIFT;
 566                return shift + XA_CHUNK_SHIFT;
 567        } else if (xa_is_node(head)) {
 568                node = xa_to_node(head);
 569                shift = node->shift + XA_CHUNK_SHIFT;
 570        }
 571        xas->xa_node = NULL;
 572
 573        while (max > max_index(head)) {
 574                xa_mark_t mark = 0;
 575
 576                XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
 577                node = xas_alloc(xas, shift);
 578                if (!node)
 579                        return -ENOMEM;
 580
 581                node->count = 1;
 582                if (xa_is_value(head))
 583                        node->nr_values = 1;
 584                RCU_INIT_POINTER(node->slots[0], head);
 585
 586                /* Propagate the aggregated mark info to the new child */
 587                for (;;) {
 588                        if (xa_track_free(xa) && mark == XA_FREE_MARK) {
 589                                node_mark_all(node, XA_FREE_MARK);
 590                                if (!xa_marked(xa, XA_FREE_MARK)) {
 591                                        node_clear_mark(node, 0, XA_FREE_MARK);
 592                                        xa_mark_set(xa, XA_FREE_MARK);
 593                                }
 594                        } else if (xa_marked(xa, mark)) {
 595                                node_set_mark(node, 0, mark);
 596                        }
 597                        if (mark == XA_MARK_MAX)
 598                                break;
 599                        mark_inc(mark);
 600                }
 601
 602                /*
 603                 * Now that the new node is fully initialised, we can add
 604                 * it to the tree
 605                 */
 606                if (xa_is_node(head)) {
 607                        xa_to_node(head)->offset = 0;
 608                        rcu_assign_pointer(xa_to_node(head)->parent, node);
 609                }
 610                head = xa_mk_node(node);
 611                rcu_assign_pointer(xa->xa_head, head);
 612                xas_update(xas, node);
 613
 614                shift += XA_CHUNK_SHIFT;
 615        }
 616
 617        xas->xa_node = node;
 618        return shift;
 619}
 620
 621/*
 622 * xas_create() - Create a slot to store an entry in.
 623 * @xas: XArray operation state.
 624 * @allow_root: %true if we can store the entry in the root directly
 625 *
 626 * Most users will not need to call this function directly, as it is called
 627 * by xas_store().  It is useful for doing conditional store operations
 628 * (see the xa_cmpxchg() implementation for an example).
 629 *
 630 * Return: If the slot already existed, returns the contents of this slot.
 631 * If the slot was newly created, returns %NULL.  If it failed to create the
 632 * slot, returns %NULL and indicates the error in @xas.
 633 */
 634static void *xas_create(struct xa_state *xas, bool allow_root)
 635{
 636        struct xarray *xa = xas->xa;
 637        void *entry;
 638        void __rcu **slot;
 639        struct xa_node *node = xas->xa_node;
 640        int shift;
 641        unsigned int order = xas->xa_shift;
 642
 643        if (xas_top(node)) {
 644                entry = xa_head_locked(xa);
 645                xas->xa_node = NULL;
 646                if (!entry && xa_zero_busy(xa))
 647                        entry = XA_ZERO_ENTRY;
 648                shift = xas_expand(xas, entry);
 649                if (shift < 0)
 650                        return NULL;
 651                if (!shift && !allow_root)
 652                        shift = XA_CHUNK_SHIFT;
 653                entry = xa_head_locked(xa);
 654                slot = &xa->xa_head;
 655        } else if (xas_error(xas)) {
 656                return NULL;
 657        } else if (node) {
 658                unsigned int offset = xas->xa_offset;
 659
 660                shift = node->shift;
 661                entry = xa_entry_locked(xa, node, offset);
 662                slot = &node->slots[offset];
 663        } else {
 664                shift = 0;
 665                entry = xa_head_locked(xa);
 666                slot = &xa->xa_head;
 667        }
 668
 669        while (shift > order) {
 670                shift -= XA_CHUNK_SHIFT;
 671                if (!entry) {
 672                        node = xas_alloc(xas, shift);
 673                        if (!node)
 674                                break;
 675                        if (xa_track_free(xa))
 676                                node_mark_all(node, XA_FREE_MARK);
 677                        rcu_assign_pointer(*slot, xa_mk_node(node));
 678                } else if (xa_is_node(entry)) {
 679                        node = xa_to_node(entry);
 680                } else {
 681                        break;
 682                }
 683                entry = xas_descend(xas, node);
 684                slot = &node->slots[xas->xa_offset];
 685        }
 686
 687        return entry;
 688}
 689
 690/**
 691 * xas_create_range() - Ensure that stores to this range will succeed
 692 * @xas: XArray operation state.
 693 *
 694 * Creates all of the slots in the range covered by @xas.  Sets @xas to
 695 * create single-index entries and positions it at the beginning of the
 696 * range.  This is for the benefit of users which have not yet been
 697 * converted to use multi-index entries.
 698 */
 699void xas_create_range(struct xa_state *xas)
 700{
 701        unsigned long index = xas->xa_index;
 702        unsigned char shift = xas->xa_shift;
 703        unsigned char sibs = xas->xa_sibs;
 704
 705        xas->xa_index |= ((sibs + 1) << shift) - 1;
 706        if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift)
 707                xas->xa_offset |= sibs;
 708        xas->xa_shift = 0;
 709        xas->xa_sibs = 0;
 710
 711        for (;;) {
 712                xas_create(xas, true);
 713                if (xas_error(xas))
 714                        goto restore;
 715                if (xas->xa_index <= (index | XA_CHUNK_MASK))
 716                        goto success;
 717                xas->xa_index -= XA_CHUNK_SIZE;
 718
 719                for (;;) {
 720                        struct xa_node *node = xas->xa_node;
 721                        xas->xa_node = xa_parent_locked(xas->xa, node);
 722                        xas->xa_offset = node->offset - 1;
 723                        if (node->offset != 0)
 724                                break;
 725                }
 726        }
 727
 728restore:
 729        xas->xa_shift = shift;
 730        xas->xa_sibs = sibs;
 731        xas->xa_index = index;
 732        return;
 733success:
 734        xas->xa_index = index;
 735        if (xas->xa_node)
 736                xas_set_offset(xas);
 737}
 738EXPORT_SYMBOL_GPL(xas_create_range);
 739
 740static void update_node(struct xa_state *xas, struct xa_node *node,
 741                int count, int values)
 742{
 743        if (!node || (!count && !values))
 744                return;
 745
 746        node->count += count;
 747        node->nr_values += values;
 748        XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
 749        XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE);
 750        xas_update(xas, node);
 751        if (count < 0)
 752                xas_delete_node(xas);
 753}
 754
 755/**
 756 * xas_store() - Store this entry in the XArray.
 757 * @xas: XArray operation state.
 758 * @entry: New entry.
 759 *
 760 * If @xas is operating on a multi-index entry, the entry returned by this
 761 * function is essentially meaningless (it may be an internal entry or it
 762 * may be %NULL, even if there are non-NULL entries at some of the indices
 763 * covered by the range).  This is not a problem for any current users,
 764 * and can be changed if needed.
 765 *
 766 * Return: The old entry at this index.
 767 */
 768void *xas_store(struct xa_state *xas, void *entry)
 769{
 770        struct xa_node *node;
 771        void __rcu **slot = &xas->xa->xa_head;
 772        unsigned int offset, max;
 773        int count = 0;
 774        int values = 0;
 775        void *first, *next;
 776        bool value = xa_is_value(entry);
 777
 778        if (entry) {
 779                bool allow_root = !xa_is_node(entry) && !xa_is_zero(entry);
 780                first = xas_create(xas, allow_root);
 781        } else {
 782                first = xas_load(xas);
 783        }
 784
 785        if (xas_invalid(xas))
 786                return first;
 787        node = xas->xa_node;
 788        if (node && (xas->xa_shift < node->shift))
 789                xas->xa_sibs = 0;
 790        if ((first == entry) && !xas->xa_sibs)
 791                return first;
 792
 793        next = first;
 794        offset = xas->xa_offset;
 795        max = xas->xa_offset + xas->xa_sibs;
 796        if (node) {
 797                slot = &node->slots[offset];
 798                if (xas->xa_sibs)
 799                        xas_squash_marks(xas);
 800        }
 801        if (!entry)
 802                xas_init_marks(xas);
 803
 804        for (;;) {
 805                /*
 806                 * Must clear the marks before setting the entry to NULL,
 807                 * otherwise xas_for_each_marked may find a NULL entry and
 808                 * stop early.  rcu_assign_pointer contains a release barrier
 809                 * so the mark clearing will appear to happen before the
 810                 * entry is set to NULL.
 811                 */
 812                rcu_assign_pointer(*slot, entry);
 813                if (xa_is_node(next) && (!node || node->shift))
 814                        xas_free_nodes(xas, xa_to_node(next));
 815                if (!node)
 816                        break;
 817                count += !next - !entry;
 818                values += !xa_is_value(first) - !value;
 819                if (entry) {
 820                        if (offset == max)
 821                                break;
 822                        if (!xa_is_sibling(entry))
 823                                entry = xa_mk_sibling(xas->xa_offset);
 824                } else {
 825                        if (offset == XA_CHUNK_MASK)
 826                                break;
 827                }
 828                next = xa_entry_locked(xas->xa, node, ++offset);
 829                if (!xa_is_sibling(next)) {
 830                        if (!entry && (offset > max))
 831                                break;
 832                        first = next;
 833                }
 834                slot++;
 835        }
 836
 837        update_node(xas, node, count, values);
 838        return first;
 839}
 840EXPORT_SYMBOL_GPL(xas_store);
 841
 842/**
 843 * xas_get_mark() - Returns the state of this mark.
 844 * @xas: XArray operation state.
 845 * @mark: Mark number.
 846 *
 847 * Return: true if the mark is set, false if the mark is clear or @xas
 848 * is in an error state.
 849 */
 850bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark)
 851{
 852        if (xas_invalid(xas))
 853                return false;
 854        if (!xas->xa_node)
 855                return xa_marked(xas->xa, mark);
 856        return node_get_mark(xas->xa_node, xas->xa_offset, mark);
 857}
 858EXPORT_SYMBOL_GPL(xas_get_mark);
 859
 860/**
 861 * xas_set_mark() - Sets the mark on this entry and its parents.
 862 * @xas: XArray operation state.
 863 * @mark: Mark number.
 864 *
 865 * Sets the specified mark on this entry, and walks up the tree setting it
 866 * on all the ancestor entries.  Does nothing if @xas has not been walked to
 867 * an entry, or is in an error state.
 868 */
 869void xas_set_mark(const struct xa_state *xas, xa_mark_t mark)
 870{
 871        struct xa_node *node = xas->xa_node;
 872        unsigned int offset = xas->xa_offset;
 873
 874        if (xas_invalid(xas))
 875                return;
 876
 877        while (node) {
 878                if (node_set_mark(node, offset, mark))
 879                        return;
 880                offset = node->offset;
 881                node = xa_parent_locked(xas->xa, node);
 882        }
 883
 884        if (!xa_marked(xas->xa, mark))
 885                xa_mark_set(xas->xa, mark);
 886}
 887EXPORT_SYMBOL_GPL(xas_set_mark);
 888
 889/**
 890 * xas_clear_mark() - Clears the mark on this entry and its parents.
 891 * @xas: XArray operation state.
 892 * @mark: Mark number.
 893 *
 894 * Clears the specified mark on this entry, and walks back to the head
 895 * attempting to clear it on all the ancestor entries.  Does nothing if
 896 * @xas has not been walked to an entry, or is in an error state.
 897 */
 898void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark)
 899{
 900        struct xa_node *node = xas->xa_node;
 901        unsigned int offset = xas->xa_offset;
 902
 903        if (xas_invalid(xas))
 904                return;
 905
 906        while (node) {
 907                if (!node_clear_mark(node, offset, mark))
 908                        return;
 909                if (node_any_mark(node, mark))
 910                        return;
 911
 912                offset = node->offset;
 913                node = xa_parent_locked(xas->xa, node);
 914        }
 915
 916        if (xa_marked(xas->xa, mark))
 917                xa_mark_clear(xas->xa, mark);
 918}
 919EXPORT_SYMBOL_GPL(xas_clear_mark);
 920
 921/**
 922 * xas_init_marks() - Initialise all marks for the entry
 923 * @xas: Array operations state.
 924 *
 925 * Initialise all marks for the entry specified by @xas.  If we're tracking
 926 * free entries with a mark, we need to set it on all entries.  All other
 927 * marks are cleared.
 928 *
 929 * This implementation is not as efficient as it could be; we may walk
 930 * up the tree multiple times.
 931 */
 932void xas_init_marks(const struct xa_state *xas)
 933{
 934        xa_mark_t mark = 0;
 935
 936        for (;;) {
 937                if (xa_track_free(xas->xa) && mark == XA_FREE_MARK)
 938                        xas_set_mark(xas, mark);
 939                else
 940                        xas_clear_mark(xas, mark);
 941                if (mark == XA_MARK_MAX)
 942                        break;
 943                mark_inc(mark);
 944        }
 945}
 946EXPORT_SYMBOL_GPL(xas_init_marks);
 947
 948/**
 949 * xas_pause() - Pause a walk to drop a lock.
 950 * @xas: XArray operation state.
 951 *
 952 * Some users need to pause a walk and drop the lock they're holding in
 953 * order to yield to a higher priority thread or carry out an operation
 954 * on an entry.  Those users should call this function before they drop
 955 * the lock.  It resets the @xas to be suitable for the next iteration
 956 * of the loop after the user has reacquired the lock.  If most entries
 957 * found during a walk require you to call xas_pause(), the xa_for_each()
 958 * iterator may be more appropriate.
 959 *
 960 * Note that xas_pause() only works for forward iteration.  If a user needs
 961 * to pause a reverse iteration, we will need a xas_pause_rev().
 962 */
 963void xas_pause(struct xa_state *xas)
 964{
 965        struct xa_node *node = xas->xa_node;
 966
 967        if (xas_invalid(xas))
 968                return;
 969
 970        if (node) {
 971                unsigned int offset = xas->xa_offset;
 972                while (++offset < XA_CHUNK_SIZE) {
 973                        if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
 974                                break;
 975                }
 976                xas->xa_index += (offset - xas->xa_offset) << node->shift;
 977        } else {
 978                xas->xa_index++;
 979        }
 980        xas->xa_node = XAS_RESTART;
 981}
 982EXPORT_SYMBOL_GPL(xas_pause);
 983
 984/*
 985 * __xas_prev() - Find the previous entry in the XArray.
 986 * @xas: XArray operation state.
 987 *
 988 * Helper function for xas_prev() which handles all the complex cases
 989 * out of line.
 990 */
 991void *__xas_prev(struct xa_state *xas)
 992{
 993        void *entry;
 994
 995        if (!xas_frozen(xas->xa_node))
 996                xas->xa_index--;
 997        if (xas_not_node(xas->xa_node))
 998                return xas_load(xas);
 999
1000        if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
1001                xas->xa_offset--;
1002
1003        while (xas->xa_offset == 255) {
1004                xas->xa_offset = xas->xa_node->offset - 1;
1005                xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1006                if (!xas->xa_node)
1007                        return set_bounds(xas);
1008        }
1009
1010        for (;;) {
1011                entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1012                if (!xa_is_node(entry))
1013                        return entry;
1014
1015                xas->xa_node = xa_to_node(entry);
1016                xas_set_offset(xas);
1017        }
1018}
1019EXPORT_SYMBOL_GPL(__xas_prev);
1020
1021/*
1022 * __xas_next() - Find the next entry in the XArray.
1023 * @xas: XArray operation state.
1024 *
1025 * Helper function for xas_next() which handles all the complex cases
1026 * out of line.
1027 */
1028void *__xas_next(struct xa_state *xas)
1029{
1030        void *entry;
1031
1032        if (!xas_frozen(xas->xa_node))
1033                xas->xa_index++;
1034        if (xas_not_node(xas->xa_node))
1035                return xas_load(xas);
1036
1037        if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
1038                xas->xa_offset++;
1039
1040        while (xas->xa_offset == XA_CHUNK_SIZE) {
1041                xas->xa_offset = xas->xa_node->offset + 1;
1042                xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1043                if (!xas->xa_node)
1044                        return set_bounds(xas);
1045        }
1046
1047        for (;;) {
1048                entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1049                if (!xa_is_node(entry))
1050                        return entry;
1051
1052                xas->xa_node = xa_to_node(entry);
1053                xas_set_offset(xas);
1054        }
1055}
1056EXPORT_SYMBOL_GPL(__xas_next);
1057
1058/**
1059 * xas_find() - Find the next present entry in the XArray.
1060 * @xas: XArray operation state.
1061 * @max: Highest index to return.
1062 *
1063 * If the @xas has not yet been walked to an entry, return the entry
1064 * which has an index >= xas.xa_index.  If it has been walked, the entry
1065 * currently being pointed at has been processed, and so we move to the
1066 * next entry.
1067 *
1068 * If no entry is found and the array is smaller than @max, the iterator
1069 * is set to the smallest index not yet in the array.  This allows @xas
1070 * to be immediately passed to xas_store().
1071 *
1072 * Return: The entry, if found, otherwise %NULL.
1073 */
1074void *xas_find(struct xa_state *xas, unsigned long max)
1075{
1076        void *entry;
1077
1078        if (xas_error(xas))
1079                return NULL;
1080
1081        if (!xas->xa_node) {
1082                xas->xa_index = 1;
1083                return set_bounds(xas);
1084        } else if (xas_top(xas->xa_node)) {
1085                entry = xas_load(xas);
1086                if (entry || xas_not_node(xas->xa_node))
1087                        return entry;
1088        } else if (!xas->xa_node->shift &&
1089                    xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) {
1090                xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1;
1091        }
1092
1093        xas_advance(xas);
1094
1095        while (xas->xa_node && (xas->xa_index <= max)) {
1096                if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
1097                        xas->xa_offset = xas->xa_node->offset + 1;
1098                        xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1099                        continue;
1100                }
1101
1102                entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1103                if (xa_is_node(entry)) {
1104                        xas->xa_node = xa_to_node(entry);
1105                        xas->xa_offset = 0;
1106                        continue;
1107                }
1108                if (entry && !xa_is_sibling(entry))
1109                        return entry;
1110
1111                xas_advance(xas);
1112        }
1113
1114        if (!xas->xa_node)
1115                xas->xa_node = XAS_BOUNDS;
1116        return NULL;
1117}
1118EXPORT_SYMBOL_GPL(xas_find);
1119
1120/**
1121 * xas_find_marked() - Find the next marked entry in the XArray.
1122 * @xas: XArray operation state.
1123 * @max: Highest index to return.
1124 * @mark: Mark number to search for.
1125 *
1126 * If the @xas has not yet been walked to an entry, return the marked entry
1127 * which has an index >= xas.xa_index.  If it has been walked, the entry
1128 * currently being pointed at has been processed, and so we return the
1129 * first marked entry with an index > xas.xa_index.
1130 *
1131 * If no marked entry is found and the array is smaller than @max, @xas is
1132 * set to the bounds state and xas->xa_index is set to the smallest index
1133 * not yet in the array.  This allows @xas to be immediately passed to
1134 * xas_store().
1135 *
1136 * If no entry is found before @max is reached, @xas is set to the restart
1137 * state.
1138 *
1139 * Return: The entry, if found, otherwise %NULL.
1140 */
1141void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
1142{
1143        bool advance = true;
1144        unsigned int offset;
1145        void *entry;
1146
1147        if (xas_error(xas))
1148                return NULL;
1149
1150        if (!xas->xa_node) {
1151                xas->xa_index = 1;
1152                goto out;
1153        } else if (xas_top(xas->xa_node)) {
1154                advance = false;
1155                entry = xa_head(xas->xa);
1156                xas->xa_node = NULL;
1157                if (xas->xa_index > max_index(entry))
1158                        goto out;
1159                if (!xa_is_node(entry)) {
1160                        if (xa_marked(xas->xa, mark))
1161                                return entry;
1162                        xas->xa_index = 1;
1163                        goto out;
1164                }
1165                xas->xa_node = xa_to_node(entry);
1166                xas->xa_offset = xas->xa_index >> xas->xa_node->shift;
1167        }
1168
1169        while (xas->xa_index <= max) {
1170                if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
1171                        xas->xa_offset = xas->xa_node->offset + 1;
1172                        xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1173                        if (!xas->xa_node)
1174                                break;
1175                        advance = false;
1176                        continue;
1177                }
1178
1179                if (!advance) {
1180                        entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1181                        if (xa_is_sibling(entry)) {
1182                                xas->xa_offset = xa_to_sibling(entry);
1183                                xas_move_index(xas, xas->xa_offset);
1184                        }
1185                }
1186
1187                offset = xas_find_chunk(xas, advance, mark);
1188                if (offset > xas->xa_offset) {
1189                        advance = false;
1190                        xas_move_index(xas, offset);
1191                        /* Mind the wrap */
1192                        if ((xas->xa_index - 1) >= max)
1193                                goto max;
1194                        xas->xa_offset = offset;
1195                        if (offset == XA_CHUNK_SIZE)
1196                                continue;
1197                }
1198
1199                entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1200                if (!xa_is_node(entry))
1201                        return entry;
1202                xas->xa_node = xa_to_node(entry);
1203                xas_set_offset(xas);
1204        }
1205
1206out:
1207        if (xas->xa_index > max)
1208                goto max;
1209        return set_bounds(xas);
1210max:
1211        xas->xa_node = XAS_RESTART;
1212        return NULL;
1213}
1214EXPORT_SYMBOL_GPL(xas_find_marked);
1215
1216/**
1217 * xas_find_conflict() - Find the next present entry in a range.
1218 * @xas: XArray operation state.
1219 *
1220 * The @xas describes both a range and a position within that range.
1221 *
1222 * Context: Any context.  Expects xa_lock to be held.
1223 * Return: The next entry in the range covered by @xas or %NULL.
1224 */
1225void *xas_find_conflict(struct xa_state *xas)
1226{
1227        void *curr;
1228
1229        if (xas_error(xas))
1230                return NULL;
1231
1232        if (!xas->xa_node)
1233                return NULL;
1234
1235        if (xas_top(xas->xa_node)) {
1236                curr = xas_start(xas);
1237                if (!curr)
1238                        return NULL;
1239                while (xa_is_node(curr)) {
1240                        struct xa_node *node = xa_to_node(curr);
1241                        curr = xas_descend(xas, node);
1242                }
1243                if (curr)
1244                        return curr;
1245        }
1246
1247        if (xas->xa_node->shift > xas->xa_shift)
1248                return NULL;
1249
1250        for (;;) {
1251                if (xas->xa_node->shift == xas->xa_shift) {
1252                        if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs)
1253                                break;
1254                } else if (xas->xa_offset == XA_CHUNK_MASK) {
1255                        xas->xa_offset = xas->xa_node->offset;
1256                        xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node);
1257                        if (!xas->xa_node)
1258                                break;
1259                        continue;
1260                }
1261                curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset);
1262                if (xa_is_sibling(curr))
1263                        continue;
1264                while (xa_is_node(curr)) {
1265                        xas->xa_node = xa_to_node(curr);
1266                        xas->xa_offset = 0;
1267                        curr = xa_entry_locked(xas->xa, xas->xa_node, 0);
1268                }
1269                if (curr)
1270                        return curr;
1271        }
1272        xas->xa_offset -= xas->xa_sibs;
1273        return NULL;
1274}
1275EXPORT_SYMBOL_GPL(xas_find_conflict);
1276
1277/**
1278 * xa_load() - Load an entry from an XArray.
1279 * @xa: XArray.
1280 * @index: index into array.
1281 *
1282 * Context: Any context.  Takes and releases the RCU lock.
1283 * Return: The entry at @index in @xa.
1284 */
1285void *xa_load(struct xarray *xa, unsigned long index)
1286{
1287        XA_STATE(xas, xa, index);
1288        void *entry;
1289
1290        rcu_read_lock();
1291        do {
1292                entry = xas_load(&xas);
1293                if (xa_is_zero(entry))
1294                        entry = NULL;
1295        } while (xas_retry(&xas, entry));
1296        rcu_read_unlock();
1297
1298        return entry;
1299}
1300EXPORT_SYMBOL(xa_load);
1301
1302static void *xas_result(struct xa_state *xas, void *curr)
1303{
1304        if (xa_is_zero(curr))
1305                return NULL;
1306        if (xas_error(xas))
1307                curr = xas->xa_node;
1308        return curr;
1309}
1310
1311/**
1312 * __xa_erase() - Erase this entry from the XArray while locked.
1313 * @xa: XArray.
1314 * @index: Index into array.
1315 *
1316 * After this function returns, loading from @index will return %NULL.
1317 * If the index is part of a multi-index entry, all indices will be erased
1318 * and none of the entries will be part of a multi-index entry.
1319 *
1320 * Context: Any context.  Expects xa_lock to be held on entry.
1321 * Return: The entry which used to be at this index.
1322 */
1323void *__xa_erase(struct xarray *xa, unsigned long index)
1324{
1325        XA_STATE(xas, xa, index);
1326        return xas_result(&xas, xas_store(&xas, NULL));
1327}
1328EXPORT_SYMBOL(__xa_erase);
1329
1330/**
1331 * xa_erase() - Erase this entry from the XArray.
1332 * @xa: XArray.
1333 * @index: Index of entry.
1334 *
1335 * After this function returns, loading from @index will return %NULL.
1336 * If the index is part of a multi-index entry, all indices will be erased
1337 * and none of the entries will be part of a multi-index entry.
1338 *
1339 * Context: Any context.  Takes and releases the xa_lock.
1340 * Return: The entry which used to be at this index.
1341 */
1342void *xa_erase(struct xarray *xa, unsigned long index)
1343{
1344        void *entry;
1345
1346        xa_lock(xa);
1347        entry = __xa_erase(xa, index);
1348        xa_unlock(xa);
1349
1350        return entry;
1351}
1352EXPORT_SYMBOL(xa_erase);
1353
1354/**
1355 * __xa_store() - Store this entry in the XArray.
1356 * @xa: XArray.
1357 * @index: Index into array.
1358 * @entry: New entry.
1359 * @gfp: Memory allocation flags.
1360 *
1361 * You must already be holding the xa_lock when calling this function.
1362 * It will drop the lock if needed to allocate memory, and then reacquire
1363 * it afterwards.
1364 *
1365 * Context: Any context.  Expects xa_lock to be held on entry.  May
1366 * release and reacquire xa_lock if @gfp flags permit.
1367 * Return: The old entry at this index or xa_err() if an error happened.
1368 */
1369void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1370{
1371        XA_STATE(xas, xa, index);
1372        void *curr;
1373
1374        if (WARN_ON_ONCE(xa_is_advanced(entry)))
1375                return XA_ERROR(-EINVAL);
1376        if (xa_track_free(xa) && !entry)
1377                entry = XA_ZERO_ENTRY;
1378
1379        do {
1380                curr = xas_store(&xas, entry);
1381                if (xa_track_free(xa))
1382                        xas_clear_mark(&xas, XA_FREE_MARK);
1383        } while (__xas_nomem(&xas, gfp));
1384
1385        return xas_result(&xas, curr);
1386}
1387EXPORT_SYMBOL(__xa_store);
1388
1389/**
1390 * xa_store() - Store this entry in the XArray.
1391 * @xa: XArray.
1392 * @index: Index into array.
1393 * @entry: New entry.
1394 * @gfp: Memory allocation flags.
1395 *
1396 * After this function returns, loads from this index will return @entry.
1397 * Storing into an existing multislot entry updates the entry of every index.
1398 * The marks associated with @index are unaffected unless @entry is %NULL.
1399 *
1400 * Context: Any context.  Takes and releases the xa_lock.
1401 * May sleep if the @gfp flags permit.
1402 * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
1403 * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
1404 * failed.
1405 */
1406void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1407{
1408        void *curr;
1409
1410        xa_lock(xa);
1411        curr = __xa_store(xa, index, entry, gfp);
1412        xa_unlock(xa);
1413
1414        return curr;
1415}
1416EXPORT_SYMBOL(xa_store);
1417
1418/**
1419 * __xa_cmpxchg() - Store this entry in the XArray.
1420 * @xa: XArray.
1421 * @index: Index into array.
1422 * @old: Old value to test against.
1423 * @entry: New entry.
1424 * @gfp: Memory allocation flags.
1425 *
1426 * You must already be holding the xa_lock when calling this function.
1427 * It will drop the lock if needed to allocate memory, and then reacquire
1428 * it afterwards.
1429 *
1430 * Context: Any context.  Expects xa_lock to be held on entry.  May
1431 * release and reacquire xa_lock if @gfp flags permit.
1432 * Return: The old entry at this index or xa_err() if an error happened.
1433 */
1434void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
1435                        void *old, void *entry, gfp_t gfp)
1436{
1437        XA_STATE(xas, xa, index);
1438        void *curr;
1439
1440        if (WARN_ON_ONCE(xa_is_advanced(entry)))
1441                return XA_ERROR(-EINVAL);
1442
1443        do {
1444                curr = xas_load(&xas);
1445                if (curr == old) {
1446                        xas_store(&xas, entry);
1447                        if (xa_track_free(xa) && entry && !curr)
1448                                xas_clear_mark(&xas, XA_FREE_MARK);
1449                }
1450        } while (__xas_nomem(&xas, gfp));
1451
1452        return xas_result(&xas, curr);
1453}
1454EXPORT_SYMBOL(__xa_cmpxchg);
1455
1456/**
1457 * __xa_insert() - Store this entry in the XArray if no entry is present.
1458 * @xa: XArray.
1459 * @index: Index into array.
1460 * @entry: New entry.
1461 * @gfp: Memory allocation flags.
1462 *
1463 * Inserting a NULL entry will store a reserved entry (like xa_reserve())
1464 * if no entry is present.  Inserting will fail if a reserved entry is
1465 * present, even though loading from this index will return NULL.
1466 *
1467 * Context: Any context.  Expects xa_lock to be held on entry.  May
1468 * release and reacquire xa_lock if @gfp flags permit.
1469 * Return: 0 if the store succeeded.  -EBUSY if another entry was present.
1470 * -ENOMEM if memory could not be allocated.
1471 */
1472int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1473{
1474        XA_STATE(xas, xa, index);
1475        void *curr;
1476
1477        if (WARN_ON_ONCE(xa_is_advanced(entry)))
1478                return -EINVAL;
1479        if (!entry)
1480                entry = XA_ZERO_ENTRY;
1481
1482        do {
1483                curr = xas_load(&xas);
1484                if (!curr) {
1485                        xas_store(&xas, entry);
1486                        if (xa_track_free(xa))
1487                                xas_clear_mark(&xas, XA_FREE_MARK);
1488                } else {
1489                        xas_set_err(&xas, -EBUSY);
1490                }
1491        } while (__xas_nomem(&xas, gfp));
1492
1493        return xas_error(&xas);
1494}
1495EXPORT_SYMBOL(__xa_insert);
1496
1497#ifdef CONFIG_XARRAY_MULTI
1498static void xas_set_range(struct xa_state *xas, unsigned long first,
1499                unsigned long last)
1500{
1501        unsigned int shift = 0;
1502        unsigned long sibs = last - first;
1503        unsigned int offset = XA_CHUNK_MASK;
1504
1505        xas_set(xas, first);
1506
1507        while ((first & XA_CHUNK_MASK) == 0) {
1508                if (sibs < XA_CHUNK_MASK)
1509                        break;
1510                if ((sibs == XA_CHUNK_MASK) && (offset < XA_CHUNK_MASK))
1511                        break;
1512                shift += XA_CHUNK_SHIFT;
1513                if (offset == XA_CHUNK_MASK)
1514                        offset = sibs & XA_CHUNK_MASK;
1515                sibs >>= XA_CHUNK_SHIFT;
1516                first >>= XA_CHUNK_SHIFT;
1517        }
1518
1519        offset = first & XA_CHUNK_MASK;
1520        if (offset + sibs > XA_CHUNK_MASK)
1521                sibs = XA_CHUNK_MASK - offset;
1522        if ((((first + sibs + 1) << shift) - 1) > last)
1523                sibs -= 1;
1524
1525        xas->xa_shift = shift;
1526        xas->xa_sibs = sibs;
1527}
1528
1529/**
1530 * xa_store_range() - Store this entry at a range of indices in the XArray.
1531 * @xa: XArray.
1532 * @first: First index to affect.
1533 * @last: Last index to affect.
1534 * @entry: New entry.
1535 * @gfp: Memory allocation flags.
1536 *
1537 * After this function returns, loads from any index between @first and @last,
1538 * inclusive will return @entry.
1539 * Storing into an existing multislot entry updates the entry of every index.
1540 * The marks associated with @index are unaffected unless @entry is %NULL.
1541 *
1542 * Context: Process context.  Takes and releases the xa_lock.  May sleep
1543 * if the @gfp flags permit.
1544 * Return: %NULL on success, xa_err(-EINVAL) if @entry cannot be stored in
1545 * an XArray, or xa_err(-ENOMEM) if memory allocation failed.
1546 */
1547void *xa_store_range(struct xarray *xa, unsigned long first,
1548                unsigned long last, void *entry, gfp_t gfp)
1549{
1550        XA_STATE(xas, xa, 0);
1551
1552        if (WARN_ON_ONCE(xa_is_internal(entry)))
1553                return XA_ERROR(-EINVAL);
1554        if (last < first)
1555                return XA_ERROR(-EINVAL);
1556
1557        do {
1558                xas_lock(&xas);
1559                if (entry) {
1560                        unsigned int order = BITS_PER_LONG;
1561                        if (last + 1)
1562                                order = __ffs(last + 1);
1563                        xas_set_order(&xas, last, order);
1564                        xas_create(&xas, true);
1565                        if (xas_error(&xas))
1566                                goto unlock;
1567                }
1568                do {
1569                        xas_set_range(&xas, first, last);
1570                        xas_store(&xas, entry);
1571                        if (xas_error(&xas))
1572                                goto unlock;
1573                        first += xas_size(&xas);
1574                } while (first <= last);
1575unlock:
1576                xas_unlock(&xas);
1577        } while (xas_nomem(&xas, gfp));
1578
1579        return xas_result(&xas, NULL);
1580}
1581EXPORT_SYMBOL(xa_store_range);
1582#endif /* CONFIG_XARRAY_MULTI */
1583
1584/**
1585 * __xa_alloc() - Find somewhere to store this entry in the XArray.
1586 * @xa: XArray.
1587 * @id: Pointer to ID.
1588 * @limit: Range for allocated ID.
1589 * @entry: New entry.
1590 * @gfp: Memory allocation flags.
1591 *
1592 * Finds an empty entry in @xa between @limit.min and @limit.max,
1593 * stores the index into the @id pointer, then stores the entry at
1594 * that index.  A concurrent lookup will not see an uninitialised @id.
1595 *
1596 * Context: Any context.  Expects xa_lock to be held on entry.  May
1597 * release and reacquire xa_lock if @gfp flags permit.
1598 * Return: 0 on success, -ENOMEM if memory could not be allocated or
1599 * -EBUSY if there are no free entries in @limit.
1600 */
1601int __xa_alloc(struct xarray *xa, u32 *id, void *entry,
1602                struct xa_limit limit, gfp_t gfp)
1603{
1604        XA_STATE(xas, xa, 0);
1605
1606        if (WARN_ON_ONCE(xa_is_advanced(entry)))
1607                return -EINVAL;
1608        if (WARN_ON_ONCE(!xa_track_free(xa)))
1609                return -EINVAL;
1610
1611        if (!entry)
1612                entry = XA_ZERO_ENTRY;
1613
1614        do {
1615                xas.xa_index = limit.min;
1616                xas_find_marked(&xas, limit.max, XA_FREE_MARK);
1617                if (xas.xa_node == XAS_RESTART)
1618                        xas_set_err(&xas, -EBUSY);
1619                else
1620                        *id = xas.xa_index;
1621                xas_store(&xas, entry);
1622                xas_clear_mark(&xas, XA_FREE_MARK);
1623        } while (__xas_nomem(&xas, gfp));
1624
1625        return xas_error(&xas);
1626}
1627EXPORT_SYMBOL(__xa_alloc);
1628
1629/**
1630 * __xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
1631 * @xa: XArray.
1632 * @id: Pointer to ID.
1633 * @entry: New entry.
1634 * @limit: Range of allocated ID.
1635 * @next: Pointer to next ID to allocate.
1636 * @gfp: Memory allocation flags.
1637 *
1638 * Finds an empty entry in @xa between @limit.min and @limit.max,
1639 * stores the index into the @id pointer, then stores the entry at
1640 * that index.  A concurrent lookup will not see an uninitialised @id.
1641 * The search for an empty entry will start at @next and will wrap
1642 * around if necessary.
1643 *
1644 * Context: Any context.  Expects xa_lock to be held on entry.  May
1645 * release and reacquire xa_lock if @gfp flags permit.
1646 * Return: 0 if the allocation succeeded without wrapping.  1 if the
1647 * allocation succeeded after wrapping, -ENOMEM if memory could not be
1648 * allocated or -EBUSY if there are no free entries in @limit.
1649 */
1650int __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
1651                struct xa_limit limit, u32 *next, gfp_t gfp)
1652{
1653        u32 min = limit.min;
1654        int ret;
1655
1656        limit.min = max(min, *next);
1657        ret = __xa_alloc(xa, id, entry, limit, gfp);
1658        if ((xa->xa_flags & XA_FLAGS_ALLOC_WRAPPED) && ret == 0) {
1659                xa->xa_flags &= ~XA_FLAGS_ALLOC_WRAPPED;
1660                ret = 1;
1661        }
1662
1663        if (ret < 0 && limit.min > min) {
1664                limit.min = min;
1665                ret = __xa_alloc(xa, id, entry, limit, gfp);
1666                if (ret == 0)
1667                        ret = 1;
1668        }
1669
1670        if (ret >= 0) {
1671                *next = *id + 1;
1672                if (*next == 0)
1673                        xa->xa_flags |= XA_FLAGS_ALLOC_WRAPPED;
1674        }
1675        return ret;
1676}
1677EXPORT_SYMBOL(__xa_alloc_cyclic);
1678
1679/**
1680 * __xa_set_mark() - Set this mark on this entry while locked.
1681 * @xa: XArray.
1682 * @index: Index of entry.
1683 * @mark: Mark number.
1684 *
1685 * Attempting to set a mark on a %NULL entry does not succeed.
1686 *
1687 * Context: Any context.  Expects xa_lock to be held on entry.
1688 */
1689void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1690{
1691        XA_STATE(xas, xa, index);
1692        void *entry = xas_load(&xas);
1693
1694        if (entry)
1695                xas_set_mark(&xas, mark);
1696}
1697EXPORT_SYMBOL(__xa_set_mark);
1698
1699/**
1700 * __xa_clear_mark() - Clear this mark on this entry while locked.
1701 * @xa: XArray.
1702 * @index: Index of entry.
1703 * @mark: Mark number.
1704 *
1705 * Context: Any context.  Expects xa_lock to be held on entry.
1706 */
1707void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1708{
1709        XA_STATE(xas, xa, index);
1710        void *entry = xas_load(&xas);
1711
1712        if (entry)
1713                xas_clear_mark(&xas, mark);
1714}
1715EXPORT_SYMBOL(__xa_clear_mark);
1716
1717/**
1718 * xa_get_mark() - Inquire whether this mark is set on this entry.
1719 * @xa: XArray.
1720 * @index: Index of entry.
1721 * @mark: Mark number.
1722 *
1723 * This function uses the RCU read lock, so the result may be out of date
1724 * by the time it returns.  If you need the result to be stable, use a lock.
1725 *
1726 * Context: Any context.  Takes and releases the RCU lock.
1727 * Return: True if the entry at @index has this mark set, false if it doesn't.
1728 */
1729bool xa_get_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1730{
1731        XA_STATE(xas, xa, index);
1732        void *entry;
1733
1734        rcu_read_lock();
1735        entry = xas_start(&xas);
1736        while (xas_get_mark(&xas, mark)) {
1737                if (!xa_is_node(entry))
1738                        goto found;
1739                entry = xas_descend(&xas, xa_to_node(entry));
1740        }
1741        rcu_read_unlock();
1742        return false;
1743 found:
1744        rcu_read_unlock();
1745        return true;
1746}
1747EXPORT_SYMBOL(xa_get_mark);
1748
1749/**
1750 * xa_set_mark() - Set this mark on this entry.
1751 * @xa: XArray.
1752 * @index: Index of entry.
1753 * @mark: Mark number.
1754 *
1755 * Attempting to set a mark on a %NULL entry does not succeed.
1756 *
1757 * Context: Process context.  Takes and releases the xa_lock.
1758 */
1759void xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1760{
1761        xa_lock(xa);
1762        __xa_set_mark(xa, index, mark);
1763        xa_unlock(xa);
1764}
1765EXPORT_SYMBOL(xa_set_mark);
1766
1767/**
1768 * xa_clear_mark() - Clear this mark on this entry.
1769 * @xa: XArray.
1770 * @index: Index of entry.
1771 * @mark: Mark number.
1772 *
1773 * Clearing a mark always succeeds.
1774 *
1775 * Context: Process context.  Takes and releases the xa_lock.
1776 */
1777void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1778{
1779        xa_lock(xa);
1780        __xa_clear_mark(xa, index, mark);
1781        xa_unlock(xa);
1782}
1783EXPORT_SYMBOL(xa_clear_mark);
1784
1785/**
1786 * xa_find() - Search the XArray for an entry.
1787 * @xa: XArray.
1788 * @indexp: Pointer to an index.
1789 * @max: Maximum index to search to.
1790 * @filter: Selection criterion.
1791 *
1792 * Finds the entry in @xa which matches the @filter, and has the lowest
1793 * index that is at least @indexp and no more than @max.
1794 * If an entry is found, @indexp is updated to be the index of the entry.
1795 * This function is protected by the RCU read lock, so it may not find
1796 * entries which are being simultaneously added.  It will not return an
1797 * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
1798 *
1799 * Context: Any context.  Takes and releases the RCU lock.
1800 * Return: The entry, if found, otherwise %NULL.
1801 */
1802void *xa_find(struct xarray *xa, unsigned long *indexp,
1803                        unsigned long max, xa_mark_t filter)
1804{
1805        XA_STATE(xas, xa, *indexp);
1806        void *entry;
1807
1808        rcu_read_lock();
1809        do {
1810                if ((__force unsigned int)filter < XA_MAX_MARKS)
1811                        entry = xas_find_marked(&xas, max, filter);
1812                else
1813                        entry = xas_find(&xas, max);
1814        } while (xas_retry(&xas, entry));
1815        rcu_read_unlock();
1816
1817        if (entry)
1818                *indexp = xas.xa_index;
1819        return entry;
1820}
1821EXPORT_SYMBOL(xa_find);
1822
1823/**
1824 * xa_find_after() - Search the XArray for a present entry.
1825 * @xa: XArray.
1826 * @indexp: Pointer to an index.
1827 * @max: Maximum index to search to.
1828 * @filter: Selection criterion.
1829 *
1830 * Finds the entry in @xa which matches the @filter and has the lowest
1831 * index that is above @indexp and no more than @max.
1832 * If an entry is found, @indexp is updated to be the index of the entry.
1833 * This function is protected by the RCU read lock, so it may miss entries
1834 * which are being simultaneously added.  It will not return an
1835 * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
1836 *
1837 * Context: Any context.  Takes and releases the RCU lock.
1838 * Return: The pointer, if found, otherwise %NULL.
1839 */
1840void *xa_find_after(struct xarray *xa, unsigned long *indexp,
1841                        unsigned long max, xa_mark_t filter)
1842{
1843        XA_STATE(xas, xa, *indexp + 1);
1844        void *entry;
1845
1846        rcu_read_lock();
1847        for (;;) {
1848                if ((__force unsigned int)filter < XA_MAX_MARKS)
1849                        entry = xas_find_marked(&xas, max, filter);
1850                else
1851                        entry = xas_find(&xas, max);
1852                if (xas.xa_node == XAS_BOUNDS)
1853                        break;
1854                if (xas.xa_shift) {
1855                        if (xas.xa_index & ((1UL << xas.xa_shift) - 1))
1856                                continue;
1857                } else {
1858                        if (xas.xa_offset < (xas.xa_index & XA_CHUNK_MASK))
1859                                continue;
1860                }
1861                if (!xas_retry(&xas, entry))
1862                        break;
1863        }
1864        rcu_read_unlock();
1865
1866        if (entry)
1867                *indexp = xas.xa_index;
1868        return entry;
1869}
1870EXPORT_SYMBOL(xa_find_after);
1871
1872static unsigned int xas_extract_present(struct xa_state *xas, void **dst,
1873                        unsigned long max, unsigned int n)
1874{
1875        void *entry;
1876        unsigned int i = 0;
1877
1878        rcu_read_lock();
1879        xas_for_each(xas, entry, max) {
1880                if (xas_retry(xas, entry))
1881                        continue;
1882                dst[i++] = entry;
1883                if (i == n)
1884                        break;
1885        }
1886        rcu_read_unlock();
1887
1888        return i;
1889}
1890
1891static unsigned int xas_extract_marked(struct xa_state *xas, void **dst,
1892                        unsigned long max, unsigned int n, xa_mark_t mark)
1893{
1894        void *entry;
1895        unsigned int i = 0;
1896
1897        rcu_read_lock();
1898        xas_for_each_marked(xas, entry, max, mark) {
1899                if (xas_retry(xas, entry))
1900                        continue;
1901                dst[i++] = entry;
1902                if (i == n)
1903                        break;
1904        }
1905        rcu_read_unlock();
1906
1907        return i;
1908}
1909
1910/**
1911 * xa_extract() - Copy selected entries from the XArray into a normal array.
1912 * @xa: The source XArray to copy from.
1913 * @dst: The buffer to copy entries into.
1914 * @start: The first index in the XArray eligible to be selected.
1915 * @max: The last index in the XArray eligible to be selected.
1916 * @n: The maximum number of entries to copy.
1917 * @filter: Selection criterion.
1918 *
1919 * Copies up to @n entries that match @filter from the XArray.  The
1920 * copied entries will have indices between @start and @max, inclusive.
1921 *
1922 * The @filter may be an XArray mark value, in which case entries which are
1923 * marked with that mark will be copied.  It may also be %XA_PRESENT, in
1924 * which case all entries which are not %NULL will be copied.
1925 *
1926 * The entries returned may not represent a snapshot of the XArray at a
1927 * moment in time.  For example, if another thread stores to index 5, then
1928 * index 10, calling xa_extract() may return the old contents of index 5
1929 * and the new contents of index 10.  Indices not modified while this
1930 * function is running will not be skipped.
1931 *
1932 * If you need stronger guarantees, holding the xa_lock across calls to this
1933 * function will prevent concurrent modification.
1934 *
1935 * Context: Any context.  Takes and releases the RCU lock.
1936 * Return: The number of entries copied.
1937 */
1938unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start,
1939                        unsigned long max, unsigned int n, xa_mark_t filter)
1940{
1941        XA_STATE(xas, xa, start);
1942
1943        if (!n)
1944                return 0;
1945
1946        if ((__force unsigned int)filter < XA_MAX_MARKS)
1947                return xas_extract_marked(&xas, dst, max, n, filter);
1948        return xas_extract_present(&xas, dst, max, n);
1949}
1950EXPORT_SYMBOL(xa_extract);
1951
1952/**
1953 * xa_destroy() - Free all internal data structures.
1954 * @xa: XArray.
1955 *
1956 * After calling this function, the XArray is empty and has freed all memory
1957 * allocated for its internal data structures.  You are responsible for
1958 * freeing the objects referenced by the XArray.
1959 *
1960 * Context: Any context.  Takes and releases the xa_lock, interrupt-safe.
1961 */
1962void xa_destroy(struct xarray *xa)
1963{
1964        XA_STATE(xas, xa, 0);
1965        unsigned long flags;
1966        void *entry;
1967
1968        xas.xa_node = NULL;
1969        xas_lock_irqsave(&xas, flags);
1970        entry = xa_head_locked(xa);
1971        RCU_INIT_POINTER(xa->xa_head, NULL);
1972        xas_init_marks(&xas);
1973        if (xa_zero_busy(xa))
1974                xa_mark_clear(xa, XA_FREE_MARK);
1975        /* lockdep checks we're still holding the lock in xas_free_nodes() */
1976        if (xa_is_node(entry))
1977                xas_free_nodes(&xas, xa_to_node(entry));
1978        xas_unlock_irqrestore(&xas, flags);
1979}
1980EXPORT_SYMBOL(xa_destroy);
1981
1982#ifdef XA_DEBUG
1983void xa_dump_node(const struct xa_node *node)
1984{
1985        unsigned i, j;
1986
1987        if (!node)
1988                return;
1989        if ((unsigned long)node & 3) {
1990                pr_cont("node %px\n", node);
1991                return;
1992        }
1993
1994        pr_cont("node %px %s %d parent %px shift %d count %d values %d "
1995                "array %px list %px %px marks",
1996                node, node->parent ? "offset" : "max", node->offset,
1997                node->parent, node->shift, node->count, node->nr_values,
1998                node->array, node->private_list.prev, node->private_list.next);
1999        for (i = 0; i < XA_MAX_MARKS; i++)
2000                for (j = 0; j < XA_MARK_LONGS; j++)
2001                        pr_cont(" %lx", node->marks[i][j]);
2002        pr_cont("\n");
2003}
2004
2005void xa_dump_index(unsigned long index, unsigned int shift)
2006{
2007        if (!shift)
2008                pr_info("%lu: ", index);
2009        else if (shift >= BITS_PER_LONG)
2010                pr_info("0-%lu: ", ~0UL);
2011        else
2012                pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1));
2013}
2014
2015void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift)
2016{
2017        if (!entry)
2018                return;
2019
2020        xa_dump_index(index, shift);
2021
2022        if (xa_is_node(entry)) {
2023                if (shift == 0) {
2024                        pr_cont("%px\n", entry);
2025                } else {
2026                        unsigned long i;
2027                        struct xa_node *node = xa_to_node(entry);
2028                        xa_dump_node(node);
2029                        for (i = 0; i < XA_CHUNK_SIZE; i++)
2030                                xa_dump_entry(node->slots[i],
2031                                      index + (i << node->shift), node->shift);
2032                }
2033        } else if (xa_is_value(entry))
2034                pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry),
2035                                                xa_to_value(entry), entry);
2036        else if (!xa_is_internal(entry))
2037                pr_cont("%px\n", entry);
2038        else if (xa_is_retry(entry))
2039                pr_cont("retry (%ld)\n", xa_to_internal(entry));
2040        else if (xa_is_sibling(entry))
2041                pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry));
2042        else if (xa_is_zero(entry))
2043                pr_cont("zero (%ld)\n", xa_to_internal(entry));
2044        else
2045                pr_cont("UNKNOWN ENTRY (%px)\n", entry);
2046}
2047
2048void xa_dump(const struct xarray *xa)
2049{
2050        void *entry = xa->xa_head;
2051        unsigned int shift = 0;
2052
2053        pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry,
2054                        xa->xa_flags, xa_marked(xa, XA_MARK_0),
2055                        xa_marked(xa, XA_MARK_1), xa_marked(xa, XA_MARK_2));
2056        if (xa_is_node(entry))
2057                shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT;
2058        xa_dump_entry(entry, 0, shift);
2059}
2060#endif
2061