linux/lib/xarray.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * XArray implementation
   4 * Copyright (c) 2017-2018 Microsoft Corporation
   5 * Copyright (c) 2018-2020 Oracle
   6 * Author: Matthew Wilcox <willy@infradead.org>
   7 */
   8
   9#include <linux/bitmap.h>
  10#include <linux/export.h>
  11#include <linux/list.h>
  12#include <linux/slab.h>
  13#include <linux/xarray.h>
  14
  15/*
  16 * Coding conventions in this file:
  17 *
  18 * @xa is used to refer to the entire xarray.
  19 * @xas is the 'xarray operation state'.  It may be either a pointer to
  20 * an xa_state, or an xa_state stored on the stack.  This is an unfortunate
  21 * ambiguity.
  22 * @index is the index of the entry being operated on
  23 * @mark is an xa_mark_t; a small number indicating one of the mark bits.
  24 * @node refers to an xa_node; usually the primary one being operated on by
  25 * this function.
  26 * @offset is the index into the slots array inside an xa_node.
  27 * @parent refers to the @xa_node closer to the head than @node.
  28 * @entry refers to something stored in a slot in the xarray
  29 */
  30
  31static inline unsigned int xa_lock_type(const struct xarray *xa)
  32{
  33        return (__force unsigned int)xa->xa_flags & 3;
  34}
  35
  36static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type)
  37{
  38        if (lock_type == XA_LOCK_IRQ)
  39                xas_lock_irq(xas);
  40        else if (lock_type == XA_LOCK_BH)
  41                xas_lock_bh(xas);
  42        else
  43                xas_lock(xas);
  44}
  45
  46static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type)
  47{
  48        if (lock_type == XA_LOCK_IRQ)
  49                xas_unlock_irq(xas);
  50        else if (lock_type == XA_LOCK_BH)
  51                xas_unlock_bh(xas);
  52        else
  53                xas_unlock(xas);
  54}
  55
  56static inline bool xa_track_free(const struct xarray *xa)
  57{
  58        return xa->xa_flags & XA_FLAGS_TRACK_FREE;
  59}
  60
  61static inline bool xa_zero_busy(const struct xarray *xa)
  62{
  63        return xa->xa_flags & XA_FLAGS_ZERO_BUSY;
  64}
  65
  66static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark)
  67{
  68        if (!(xa->xa_flags & XA_FLAGS_MARK(mark)))
  69                xa->xa_flags |= XA_FLAGS_MARK(mark);
  70}
  71
  72static inline void xa_mark_clear(struct xarray *xa, xa_mark_t mark)
  73{
  74        if (xa->xa_flags & XA_FLAGS_MARK(mark))
  75                xa->xa_flags &= ~(XA_FLAGS_MARK(mark));
  76}
  77
  78static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark)
  79{
  80        return node->marks[(__force unsigned)mark];
  81}
  82
  83static inline bool node_get_mark(struct xa_node *node,
  84                unsigned int offset, xa_mark_t mark)
  85{
  86        return test_bit(offset, node_marks(node, mark));
  87}
  88
  89/* returns true if the bit was set */
  90static inline bool node_set_mark(struct xa_node *node, unsigned int offset,
  91                                xa_mark_t mark)
  92{
  93        return __test_and_set_bit(offset, node_marks(node, mark));
  94}
  95
  96/* returns true if the bit was set */
  97static inline bool node_clear_mark(struct xa_node *node, unsigned int offset,
  98                                xa_mark_t mark)
  99{
 100        return __test_and_clear_bit(offset, node_marks(node, mark));
 101}
 102
 103static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark)
 104{
 105        return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE);
 106}
 107
 108static inline void node_mark_all(struct xa_node *node, xa_mark_t mark)
 109{
 110        bitmap_fill(node_marks(node, mark), XA_CHUNK_SIZE);
 111}
 112
 113#define mark_inc(mark) do { \
 114        mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \
 115} while (0)
 116
 117/*
 118 * xas_squash_marks() - Merge all marks to the first entry
 119 * @xas: Array operation state.
 120 *
 121 * Set a mark on the first entry if any entry has it set.  Clear marks on
 122 * all sibling entries.
 123 */
 124static void xas_squash_marks(const struct xa_state *xas)
 125{
 126        unsigned int mark = 0;
 127        unsigned int limit = xas->xa_offset + xas->xa_sibs + 1;
 128
 129        if (!xas->xa_sibs)
 130                return;
 131
 132        do {
 133                unsigned long *marks = xas->xa_node->marks[mark];
 134                if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit)
 135                        continue;
 136                __set_bit(xas->xa_offset, marks);
 137                bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
 138        } while (mark++ != (__force unsigned)XA_MARK_MAX);
 139}
 140
 141/* extracts the offset within this node from the index */
 142static unsigned int get_offset(unsigned long index, struct xa_node *node)
 143{
 144        return (index >> node->shift) & XA_CHUNK_MASK;
 145}
 146
 147static void xas_set_offset(struct xa_state *xas)
 148{
 149        xas->xa_offset = get_offset(xas->xa_index, xas->xa_node);
 150}
 151
 152/* move the index either forwards (find) or backwards (sibling slot) */
 153static void xas_move_index(struct xa_state *xas, unsigned long offset)
 154{
 155        unsigned int shift = xas->xa_node->shift;
 156        xas->xa_index &= ~XA_CHUNK_MASK << shift;
 157        xas->xa_index += offset << shift;
 158}
 159
 160static void xas_advance(struct xa_state *xas)
 161{
 162        xas->xa_offset++;
 163        xas_move_index(xas, xas->xa_offset);
 164}
 165
 166static void *set_bounds(struct xa_state *xas)
 167{
 168        xas->xa_node = XAS_BOUNDS;
 169        return NULL;
 170}
 171
 172/*
 173 * Starts a walk.  If the @xas is already valid, we assume that it's on
 174 * the right path and just return where we've got to.  If we're in an
 175 * error state, return NULL.  If the index is outside the current scope
 176 * of the xarray, return NULL without changing @xas->xa_node.  Otherwise
 177 * set @xas->xa_node to NULL and return the current head of the array.
 178 */
 179static void *xas_start(struct xa_state *xas)
 180{
 181        void *entry;
 182
 183        if (xas_valid(xas))
 184                return xas_reload(xas);
 185        if (xas_error(xas))
 186                return NULL;
 187
 188        entry = xa_head(xas->xa);
 189        if (!xa_is_node(entry)) {
 190                if (xas->xa_index)
 191                        return set_bounds(xas);
 192        } else {
 193                if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK)
 194                        return set_bounds(xas);
 195        }
 196
 197        xas->xa_node = NULL;
 198        return entry;
 199}
 200
 201static void *xas_descend(struct xa_state *xas, struct xa_node *node)
 202{
 203        unsigned int offset = get_offset(xas->xa_index, node);
 204        void *entry = xa_entry(xas->xa, node, offset);
 205
 206        xas->xa_node = node;
 207        if (xa_is_sibling(entry)) {
 208                offset = xa_to_sibling(entry);
 209                entry = xa_entry(xas->xa, node, offset);
 210        }
 211
 212        xas->xa_offset = offset;
 213        return entry;
 214}
 215
 216/**
 217 * xas_load() - Load an entry from the XArray (advanced).
 218 * @xas: XArray operation state.
 219 *
 220 * Usually walks the @xas to the appropriate state to load the entry
 221 * stored at xa_index.  However, it will do nothing and return %NULL if
 222 * @xas is in an error state.  xas_load() will never expand the tree.
 223 *
 224 * If the xa_state is set up to operate on a multi-index entry, xas_load()
 225 * may return %NULL or an internal entry, even if there are entries
 226 * present within the range specified by @xas.
 227 *
 228 * Context: Any context.  The caller should hold the xa_lock or the RCU lock.
 229 * Return: Usually an entry in the XArray, but see description for exceptions.
 230 */
 231void *xas_load(struct xa_state *xas)
 232{
 233        void *entry = xas_start(xas);
 234
 235        while (xa_is_node(entry)) {
 236                struct xa_node *node = xa_to_node(entry);
 237
 238                if (xas->xa_shift > node->shift)
 239                        break;
 240                entry = xas_descend(xas, node);
 241                if (node->shift == 0)
 242                        break;
 243        }
 244        return entry;
 245}
 246EXPORT_SYMBOL_GPL(xas_load);
 247
 248/* Move the radix tree node cache here */
 249extern struct kmem_cache *radix_tree_node_cachep;
 250extern void radix_tree_node_rcu_free(struct rcu_head *head);
 251
 252#define XA_RCU_FREE     ((struct xarray *)1)
 253
 254static void xa_node_free(struct xa_node *node)
 255{
 256        XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
 257        node->array = XA_RCU_FREE;
 258        call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
 259}
 260
 261/*
 262 * xas_destroy() - Free any resources allocated during the XArray operation.
 263 * @xas: XArray operation state.
 264 *
 265 * This function is now internal-only.
 266 */
 267static void xas_destroy(struct xa_state *xas)
 268{
 269        struct xa_node *node = xas->xa_alloc;
 270
 271        if (!node)
 272                return;
 273        XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
 274        kmem_cache_free(radix_tree_node_cachep, node);
 275        xas->xa_alloc = NULL;
 276}
 277
 278/**
 279 * xas_nomem() - Allocate memory if needed.
 280 * @xas: XArray operation state.
 281 * @gfp: Memory allocation flags.
 282 *
 283 * If we need to add new nodes to the XArray, we try to allocate memory
 284 * with GFP_NOWAIT while holding the lock, which will usually succeed.
 285 * If it fails, @xas is flagged as needing memory to continue.  The caller
 286 * should drop the lock and call xas_nomem().  If xas_nomem() succeeds,
 287 * the caller should retry the operation.
 288 *
 289 * Forward progress is guaranteed as one node is allocated here and
 290 * stored in the xa_state where it will be found by xas_alloc().  More
 291 * nodes will likely be found in the slab allocator, but we do not tie
 292 * them up here.
 293 *
 294 * Return: true if memory was needed, and was successfully allocated.
 295 */
 296bool xas_nomem(struct xa_state *xas, gfp_t gfp)
 297{
 298        if (xas->xa_node != XA_ERROR(-ENOMEM)) {
 299                xas_destroy(xas);
 300                return false;
 301        }
 302        if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
 303                gfp |= __GFP_ACCOUNT;
 304        xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
 305        if (!xas->xa_alloc)
 306                return false;
 307        XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
 308        xas->xa_node = XAS_RESTART;
 309        return true;
 310}
 311EXPORT_SYMBOL_GPL(xas_nomem);
 312
 313/*
 314 * __xas_nomem() - Drop locks and allocate memory if needed.
 315 * @xas: XArray operation state.
 316 * @gfp: Memory allocation flags.
 317 *
 318 * Internal variant of xas_nomem().
 319 *
 320 * Return: true if memory was needed, and was successfully allocated.
 321 */
 322static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
 323        __must_hold(xas->xa->xa_lock)
 324{
 325        unsigned int lock_type = xa_lock_type(xas->xa);
 326
 327        if (xas->xa_node != XA_ERROR(-ENOMEM)) {
 328                xas_destroy(xas);
 329                return false;
 330        }
 331        if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
 332                gfp |= __GFP_ACCOUNT;
 333        if (gfpflags_allow_blocking(gfp)) {
 334                xas_unlock_type(xas, lock_type);
 335                xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
 336                xas_lock_type(xas, lock_type);
 337        } else {
 338                xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
 339        }
 340        if (!xas->xa_alloc)
 341                return false;
 342        XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
 343        xas->xa_node = XAS_RESTART;
 344        return true;
 345}
 346
 347static void xas_update(struct xa_state *xas, struct xa_node *node)
 348{
 349        if (xas->xa_update)
 350                xas->xa_update(node);
 351        else
 352                XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
 353}
 354
 355static void *xas_alloc(struct xa_state *xas, unsigned int shift)
 356{
 357        struct xa_node *parent = xas->xa_node;
 358        struct xa_node *node = xas->xa_alloc;
 359
 360        if (xas_invalid(xas))
 361                return NULL;
 362
 363        if (node) {
 364                xas->xa_alloc = NULL;
 365        } else {
 366                gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN;
 367
 368                if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
 369                        gfp |= __GFP_ACCOUNT;
 370
 371                node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
 372                if (!node) {
 373                        xas_set_err(xas, -ENOMEM);
 374                        return NULL;
 375                }
 376        }
 377
 378        if (parent) {
 379                node->offset = xas->xa_offset;
 380                parent->count++;
 381                XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE);
 382                xas_update(xas, parent);
 383        }
 384        XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
 385        XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
 386        node->shift = shift;
 387        node->count = 0;
 388        node->nr_values = 0;
 389        RCU_INIT_POINTER(node->parent, xas->xa_node);
 390        node->array = xas->xa;
 391
 392        return node;
 393}
 394
 395#ifdef CONFIG_XARRAY_MULTI
 396/* Returns the number of indices covered by a given xa_state */
 397static unsigned long xas_size(const struct xa_state *xas)
 398{
 399        return (xas->xa_sibs + 1UL) << xas->xa_shift;
 400}
 401#endif
 402
 403/*
 404 * Use this to calculate the maximum index that will need to be created
 405 * in order to add the entry described by @xas.  Because we cannot store a
 406 * multiple-index entry at index 0, the calculation is a little more complex
 407 * than you might expect.
 408 */
 409static unsigned long xas_max(struct xa_state *xas)
 410{
 411        unsigned long max = xas->xa_index;
 412
 413#ifdef CONFIG_XARRAY_MULTI
 414        if (xas->xa_shift || xas->xa_sibs) {
 415                unsigned long mask = xas_size(xas) - 1;
 416                max |= mask;
 417                if (mask == max)
 418                        max++;
 419        }
 420#endif
 421
 422        return max;
 423}
 424
 425/* The maximum index that can be contained in the array without expanding it */
 426static unsigned long max_index(void *entry)
 427{
 428        if (!xa_is_node(entry))
 429                return 0;
 430        return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1;
 431}
 432
 433static void xas_shrink(struct xa_state *xas)
 434{
 435        struct xarray *xa = xas->xa;
 436        struct xa_node *node = xas->xa_node;
 437
 438        for (;;) {
 439                void *entry;
 440
 441                XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
 442                if (node->count != 1)
 443                        break;
 444                entry = xa_entry_locked(xa, node, 0);
 445                if (!entry)
 446                        break;
 447                if (!xa_is_node(entry) && node->shift)
 448                        break;
 449                if (xa_is_zero(entry) && xa_zero_busy(xa))
 450                        entry = NULL;
 451                xas->xa_node = XAS_BOUNDS;
 452
 453                RCU_INIT_POINTER(xa->xa_head, entry);
 454                if (xa_track_free(xa) && !node_get_mark(node, 0, XA_FREE_MARK))
 455                        xa_mark_clear(xa, XA_FREE_MARK);
 456
 457                node->count = 0;
 458                node->nr_values = 0;
 459                if (!xa_is_node(entry))
 460                        RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY);
 461                xas_update(xas, node);
 462                xa_node_free(node);
 463                if (!xa_is_node(entry))
 464                        break;
 465                node = xa_to_node(entry);
 466                node->parent = NULL;
 467        }
 468}
 469
 470/*
 471 * xas_delete_node() - Attempt to delete an xa_node
 472 * @xas: Array operation state.
 473 *
 474 * Attempts to delete the @xas->xa_node.  This will fail if xa->node has
 475 * a non-zero reference count.
 476 */
 477static void xas_delete_node(struct xa_state *xas)
 478{
 479        struct xa_node *node = xas->xa_node;
 480
 481        for (;;) {
 482                struct xa_node *parent;
 483
 484                XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
 485                if (node->count)
 486                        break;
 487
 488                parent = xa_parent_locked(xas->xa, node);
 489                xas->xa_node = parent;
 490                xas->xa_offset = node->offset;
 491                xa_node_free(node);
 492
 493                if (!parent) {
 494                        xas->xa->xa_head = NULL;
 495                        xas->xa_node = XAS_BOUNDS;
 496                        return;
 497                }
 498
 499                parent->slots[xas->xa_offset] = NULL;
 500                parent->count--;
 501                XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE);
 502                node = parent;
 503                xas_update(xas, node);
 504        }
 505
 506        if (!node->parent)
 507                xas_shrink(xas);
 508}
 509
 510/**
 511 * xas_free_nodes() - Free this node and all nodes that it references
 512 * @xas: Array operation state.
 513 * @top: Node to free
 514 *
 515 * This node has been removed from the tree.  We must now free it and all
 516 * of its subnodes.  There may be RCU walkers with references into the tree,
 517 * so we must replace all entries with retry markers.
 518 */
 519static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
 520{
 521        unsigned int offset = 0;
 522        struct xa_node *node = top;
 523
 524        for (;;) {
 525                void *entry = xa_entry_locked(xas->xa, node, offset);
 526
 527                if (node->shift && xa_is_node(entry)) {
 528                        node = xa_to_node(entry);
 529                        offset = 0;
 530                        continue;
 531                }
 532                if (entry)
 533                        RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY);
 534                offset++;
 535                while (offset == XA_CHUNK_SIZE) {
 536                        struct xa_node *parent;
 537
 538                        parent = xa_parent_locked(xas->xa, node);
 539                        offset = node->offset + 1;
 540                        node->count = 0;
 541                        node->nr_values = 0;
 542                        xas_update(xas, node);
 543                        xa_node_free(node);
 544                        if (node == top)
 545                                return;
 546                        node = parent;
 547                }
 548        }
 549}
 550
 551/*
 552 * xas_expand adds nodes to the head of the tree until it has reached
 553 * sufficient height to be able to contain @xas->xa_index
 554 */
 555static int xas_expand(struct xa_state *xas, void *head)
 556{
 557        struct xarray *xa = xas->xa;
 558        struct xa_node *node = NULL;
 559        unsigned int shift = 0;
 560        unsigned long max = xas_max(xas);
 561
 562        if (!head) {
 563                if (max == 0)
 564                        return 0;
 565                while ((max >> shift) >= XA_CHUNK_SIZE)
 566                        shift += XA_CHUNK_SHIFT;
 567                return shift + XA_CHUNK_SHIFT;
 568        } else if (xa_is_node(head)) {
 569                node = xa_to_node(head);
 570                shift = node->shift + XA_CHUNK_SHIFT;
 571        }
 572        xas->xa_node = NULL;
 573
 574        while (max > max_index(head)) {
 575                xa_mark_t mark = 0;
 576
 577                XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
 578                node = xas_alloc(xas, shift);
 579                if (!node)
 580                        return -ENOMEM;
 581
 582                node->count = 1;
 583                if (xa_is_value(head))
 584                        node->nr_values = 1;
 585                RCU_INIT_POINTER(node->slots[0], head);
 586
 587                /* Propagate the aggregated mark info to the new child */
 588                for (;;) {
 589                        if (xa_track_free(xa) && mark == XA_FREE_MARK) {
 590                                node_mark_all(node, XA_FREE_MARK);
 591                                if (!xa_marked(xa, XA_FREE_MARK)) {
 592                                        node_clear_mark(node, 0, XA_FREE_MARK);
 593                                        xa_mark_set(xa, XA_FREE_MARK);
 594                                }
 595                        } else if (xa_marked(xa, mark)) {
 596                                node_set_mark(node, 0, mark);
 597                        }
 598                        if (mark == XA_MARK_MAX)
 599                                break;
 600                        mark_inc(mark);
 601                }
 602
 603                /*
 604                 * Now that the new node is fully initialised, we can add
 605                 * it to the tree
 606                 */
 607                if (xa_is_node(head)) {
 608                        xa_to_node(head)->offset = 0;
 609                        rcu_assign_pointer(xa_to_node(head)->parent, node);
 610                }
 611                head = xa_mk_node(node);
 612                rcu_assign_pointer(xa->xa_head, head);
 613                xas_update(xas, node);
 614
 615                shift += XA_CHUNK_SHIFT;
 616        }
 617
 618        xas->xa_node = node;
 619        return shift;
 620}
 621
 622/*
 623 * xas_create() - Create a slot to store an entry in.
 624 * @xas: XArray operation state.
 625 * @allow_root: %true if we can store the entry in the root directly
 626 *
 627 * Most users will not need to call this function directly, as it is called
 628 * by xas_store().  It is useful for doing conditional store operations
 629 * (see the xa_cmpxchg() implementation for an example).
 630 *
 631 * Return: If the slot already existed, returns the contents of this slot.
 632 * If the slot was newly created, returns %NULL.  If it failed to create the
 633 * slot, returns %NULL and indicates the error in @xas.
 634 */
 635static void *xas_create(struct xa_state *xas, bool allow_root)
 636{
 637        struct xarray *xa = xas->xa;
 638        void *entry;
 639        void __rcu **slot;
 640        struct xa_node *node = xas->xa_node;
 641        int shift;
 642        unsigned int order = xas->xa_shift;
 643
 644        if (xas_top(node)) {
 645                entry = xa_head_locked(xa);
 646                xas->xa_node = NULL;
 647                if (!entry && xa_zero_busy(xa))
 648                        entry = XA_ZERO_ENTRY;
 649                shift = xas_expand(xas, entry);
 650                if (shift < 0)
 651                        return NULL;
 652                if (!shift && !allow_root)
 653                        shift = XA_CHUNK_SHIFT;
 654                entry = xa_head_locked(xa);
 655                slot = &xa->xa_head;
 656        } else if (xas_error(xas)) {
 657                return NULL;
 658        } else if (node) {
 659                unsigned int offset = xas->xa_offset;
 660
 661                shift = node->shift;
 662                entry = xa_entry_locked(xa, node, offset);
 663                slot = &node->slots[offset];
 664        } else {
 665                shift = 0;
 666                entry = xa_head_locked(xa);
 667                slot = &xa->xa_head;
 668        }
 669
 670        while (shift > order) {
 671                shift -= XA_CHUNK_SHIFT;
 672                if (!entry) {
 673                        node = xas_alloc(xas, shift);
 674                        if (!node)
 675                                break;
 676                        if (xa_track_free(xa))
 677                                node_mark_all(node, XA_FREE_MARK);
 678                        rcu_assign_pointer(*slot, xa_mk_node(node));
 679                } else if (xa_is_node(entry)) {
 680                        node = xa_to_node(entry);
 681                } else {
 682                        break;
 683                }
 684                entry = xas_descend(xas, node);
 685                slot = &node->slots[xas->xa_offset];
 686        }
 687
 688        return entry;
 689}
 690
 691/**
 692 * xas_create_range() - Ensure that stores to this range will succeed
 693 * @xas: XArray operation state.
 694 *
 695 * Creates all of the slots in the range covered by @xas.  Sets @xas to
 696 * create single-index entries and positions it at the beginning of the
 697 * range.  This is for the benefit of users which have not yet been
 698 * converted to use multi-index entries.
 699 */
 700void xas_create_range(struct xa_state *xas)
 701{
 702        unsigned long index = xas->xa_index;
 703        unsigned char shift = xas->xa_shift;
 704        unsigned char sibs = xas->xa_sibs;
 705
 706        xas->xa_index |= ((sibs + 1) << shift) - 1;
 707        if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift)
 708                xas->xa_offset |= sibs;
 709        xas->xa_shift = 0;
 710        xas->xa_sibs = 0;
 711
 712        for (;;) {
 713                xas_create(xas, true);
 714                if (xas_error(xas))
 715                        goto restore;
 716                if (xas->xa_index <= (index | XA_CHUNK_MASK))
 717                        goto success;
 718                xas->xa_index -= XA_CHUNK_SIZE;
 719
 720                for (;;) {
 721                        struct xa_node *node = xas->xa_node;
 722                        xas->xa_node = xa_parent_locked(xas->xa, node);
 723                        xas->xa_offset = node->offset - 1;
 724                        if (node->offset != 0)
 725                                break;
 726                }
 727        }
 728
 729restore:
 730        xas->xa_shift = shift;
 731        xas->xa_sibs = sibs;
 732        xas->xa_index = index;
 733        return;
 734success:
 735        xas->xa_index = index;
 736        if (xas->xa_node)
 737                xas_set_offset(xas);
 738}
 739EXPORT_SYMBOL_GPL(xas_create_range);
 740
 741static void update_node(struct xa_state *xas, struct xa_node *node,
 742                int count, int values)
 743{
 744        if (!node || (!count && !values))
 745                return;
 746
 747        node->count += count;
 748        node->nr_values += values;
 749        XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
 750        XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE);
 751        xas_update(xas, node);
 752        if (count < 0)
 753                xas_delete_node(xas);
 754}
 755
 756/**
 757 * xas_store() - Store this entry in the XArray.
 758 * @xas: XArray operation state.
 759 * @entry: New entry.
 760 *
 761 * If @xas is operating on a multi-index entry, the entry returned by this
 762 * function is essentially meaningless (it may be an internal entry or it
 763 * may be %NULL, even if there are non-NULL entries at some of the indices
 764 * covered by the range).  This is not a problem for any current users,
 765 * and can be changed if needed.
 766 *
 767 * Return: The old entry at this index.
 768 */
 769void *xas_store(struct xa_state *xas, void *entry)
 770{
 771        struct xa_node *node;
 772        void __rcu **slot = &xas->xa->xa_head;
 773        unsigned int offset, max;
 774        int count = 0;
 775        int values = 0;
 776        void *first, *next;
 777        bool value = xa_is_value(entry);
 778
 779        if (entry) {
 780                bool allow_root = !xa_is_node(entry) && !xa_is_zero(entry);
 781                first = xas_create(xas, allow_root);
 782        } else {
 783                first = xas_load(xas);
 784        }
 785
 786        if (xas_invalid(xas))
 787                return first;
 788        node = xas->xa_node;
 789        if (node && (xas->xa_shift < node->shift))
 790                xas->xa_sibs = 0;
 791        if ((first == entry) && !xas->xa_sibs)
 792                return first;
 793
 794        next = first;
 795        offset = xas->xa_offset;
 796        max = xas->xa_offset + xas->xa_sibs;
 797        if (node) {
 798                slot = &node->slots[offset];
 799                if (xas->xa_sibs)
 800                        xas_squash_marks(xas);
 801        }
 802        if (!entry)
 803                xas_init_marks(xas);
 804
 805        for (;;) {
 806                /*
 807                 * Must clear the marks before setting the entry to NULL,
 808                 * otherwise xas_for_each_marked may find a NULL entry and
 809                 * stop early.  rcu_assign_pointer contains a release barrier
 810                 * so the mark clearing will appear to happen before the
 811                 * entry is set to NULL.
 812                 */
 813                rcu_assign_pointer(*slot, entry);
 814                if (xa_is_node(next) && (!node || node->shift))
 815                        xas_free_nodes(xas, xa_to_node(next));
 816                if (!node)
 817                        break;
 818                count += !next - !entry;
 819                values += !xa_is_value(first) - !value;
 820                if (entry) {
 821                        if (offset == max)
 822                                break;
 823                        if (!xa_is_sibling(entry))
 824                                entry = xa_mk_sibling(xas->xa_offset);
 825                } else {
 826                        if (offset == XA_CHUNK_MASK)
 827                                break;
 828                }
 829                next = xa_entry_locked(xas->xa, node, ++offset);
 830                if (!xa_is_sibling(next)) {
 831                        if (!entry && (offset > max))
 832                                break;
 833                        first = next;
 834                }
 835                slot++;
 836        }
 837
 838        update_node(xas, node, count, values);
 839        return first;
 840}
 841EXPORT_SYMBOL_GPL(xas_store);
 842
 843/**
 844 * xas_get_mark() - Returns the state of this mark.
 845 * @xas: XArray operation state.
 846 * @mark: Mark number.
 847 *
 848 * Return: true if the mark is set, false if the mark is clear or @xas
 849 * is in an error state.
 850 */
 851bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark)
 852{
 853        if (xas_invalid(xas))
 854                return false;
 855        if (!xas->xa_node)
 856                return xa_marked(xas->xa, mark);
 857        return node_get_mark(xas->xa_node, xas->xa_offset, mark);
 858}
 859EXPORT_SYMBOL_GPL(xas_get_mark);
 860
 861/**
 862 * xas_set_mark() - Sets the mark on this entry and its parents.
 863 * @xas: XArray operation state.
 864 * @mark: Mark number.
 865 *
 866 * Sets the specified mark on this entry, and walks up the tree setting it
 867 * on all the ancestor entries.  Does nothing if @xas has not been walked to
 868 * an entry, or is in an error state.
 869 */
 870void xas_set_mark(const struct xa_state *xas, xa_mark_t mark)
 871{
 872        struct xa_node *node = xas->xa_node;
 873        unsigned int offset = xas->xa_offset;
 874
 875        if (xas_invalid(xas))
 876                return;
 877
 878        while (node) {
 879                if (node_set_mark(node, offset, mark))
 880                        return;
 881                offset = node->offset;
 882                node = xa_parent_locked(xas->xa, node);
 883        }
 884
 885        if (!xa_marked(xas->xa, mark))
 886                xa_mark_set(xas->xa, mark);
 887}
 888EXPORT_SYMBOL_GPL(xas_set_mark);
 889
 890/**
 891 * xas_clear_mark() - Clears the mark on this entry and its parents.
 892 * @xas: XArray operation state.
 893 * @mark: Mark number.
 894 *
 895 * Clears the specified mark on this entry, and walks back to the head
 896 * attempting to clear it on all the ancestor entries.  Does nothing if
 897 * @xas has not been walked to an entry, or is in an error state.
 898 */
 899void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark)
 900{
 901        struct xa_node *node = xas->xa_node;
 902        unsigned int offset = xas->xa_offset;
 903
 904        if (xas_invalid(xas))
 905                return;
 906
 907        while (node) {
 908                if (!node_clear_mark(node, offset, mark))
 909                        return;
 910                if (node_any_mark(node, mark))
 911                        return;
 912
 913                offset = node->offset;
 914                node = xa_parent_locked(xas->xa, node);
 915        }
 916
 917        if (xa_marked(xas->xa, mark))
 918                xa_mark_clear(xas->xa, mark);
 919}
 920EXPORT_SYMBOL_GPL(xas_clear_mark);
 921
 922/**
 923 * xas_init_marks() - Initialise all marks for the entry
 924 * @xas: Array operations state.
 925 *
 926 * Initialise all marks for the entry specified by @xas.  If we're tracking
 927 * free entries with a mark, we need to set it on all entries.  All other
 928 * marks are cleared.
 929 *
 930 * This implementation is not as efficient as it could be; we may walk
 931 * up the tree multiple times.
 932 */
 933void xas_init_marks(const struct xa_state *xas)
 934{
 935        xa_mark_t mark = 0;
 936
 937        for (;;) {
 938                if (xa_track_free(xas->xa) && mark == XA_FREE_MARK)
 939                        xas_set_mark(xas, mark);
 940                else
 941                        xas_clear_mark(xas, mark);
 942                if (mark == XA_MARK_MAX)
 943                        break;
 944                mark_inc(mark);
 945        }
 946}
 947EXPORT_SYMBOL_GPL(xas_init_marks);
 948
 949/**
 950 * xas_pause() - Pause a walk to drop a lock.
 951 * @xas: XArray operation state.
 952 *
 953 * Some users need to pause a walk and drop the lock they're holding in
 954 * order to yield to a higher priority thread or carry out an operation
 955 * on an entry.  Those users should call this function before they drop
 956 * the lock.  It resets the @xas to be suitable for the next iteration
 957 * of the loop after the user has reacquired the lock.  If most entries
 958 * found during a walk require you to call xas_pause(), the xa_for_each()
 959 * iterator may be more appropriate.
 960 *
 961 * Note that xas_pause() only works for forward iteration.  If a user needs
 962 * to pause a reverse iteration, we will need a xas_pause_rev().
 963 */
 964void xas_pause(struct xa_state *xas)
 965{
 966        struct xa_node *node = xas->xa_node;
 967
 968        if (xas_invalid(xas))
 969                return;
 970
 971        xas->xa_node = XAS_RESTART;
 972        if (node) {
 973                unsigned int offset = xas->xa_offset;
 974                while (++offset < XA_CHUNK_SIZE) {
 975                        if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
 976                                break;
 977                }
 978                xas->xa_index += (offset - xas->xa_offset) << node->shift;
 979                if (xas->xa_index == 0)
 980                        xas->xa_node = XAS_BOUNDS;
 981        } else {
 982                xas->xa_index++;
 983        }
 984}
 985EXPORT_SYMBOL_GPL(xas_pause);
 986
 987/*
 988 * __xas_prev() - Find the previous entry in the XArray.
 989 * @xas: XArray operation state.
 990 *
 991 * Helper function for xas_prev() which handles all the complex cases
 992 * out of line.
 993 */
 994void *__xas_prev(struct xa_state *xas)
 995{
 996        void *entry;
 997
 998        if (!xas_frozen(xas->xa_node))
 999                xas->xa_index--;
1000        if (!xas->xa_node)
1001                return set_bounds(xas);
1002        if (xas_not_node(xas->xa_node))
1003                return xas_load(xas);
1004
1005        if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
1006                xas->xa_offset--;
1007
1008        while (xas->xa_offset == 255) {
1009                xas->xa_offset = xas->xa_node->offset - 1;
1010                xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1011                if (!xas->xa_node)
1012                        return set_bounds(xas);
1013        }
1014
1015        for (;;) {
1016                entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1017                if (!xa_is_node(entry))
1018                        return entry;
1019
1020                xas->xa_node = xa_to_node(entry);
1021                xas_set_offset(xas);
1022        }
1023}
1024EXPORT_SYMBOL_GPL(__xas_prev);
1025
1026/*
1027 * __xas_next() - Find the next entry in the XArray.
1028 * @xas: XArray operation state.
1029 *
1030 * Helper function for xas_next() which handles all the complex cases
1031 * out of line.
1032 */
1033void *__xas_next(struct xa_state *xas)
1034{
1035        void *entry;
1036
1037        if (!xas_frozen(xas->xa_node))
1038                xas->xa_index++;
1039        if (!xas->xa_node)
1040                return set_bounds(xas);
1041        if (xas_not_node(xas->xa_node))
1042                return xas_load(xas);
1043
1044        if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
1045                xas->xa_offset++;
1046
1047        while (xas->xa_offset == XA_CHUNK_SIZE) {
1048                xas->xa_offset = xas->xa_node->offset + 1;
1049                xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1050                if (!xas->xa_node)
1051                        return set_bounds(xas);
1052        }
1053
1054        for (;;) {
1055                entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1056                if (!xa_is_node(entry))
1057                        return entry;
1058
1059                xas->xa_node = xa_to_node(entry);
1060                xas_set_offset(xas);
1061        }
1062}
1063EXPORT_SYMBOL_GPL(__xas_next);
1064
1065/**
1066 * xas_find() - Find the next present entry in the XArray.
1067 * @xas: XArray operation state.
1068 * @max: Highest index to return.
1069 *
1070 * If the @xas has not yet been walked to an entry, return the entry
1071 * which has an index >= xas.xa_index.  If it has been walked, the entry
1072 * currently being pointed at has been processed, and so we move to the
1073 * next entry.
1074 *
1075 * If no entry is found and the array is smaller than @max, the iterator
1076 * is set to the smallest index not yet in the array.  This allows @xas
1077 * to be immediately passed to xas_store().
1078 *
1079 * Return: The entry, if found, otherwise %NULL.
1080 */
1081void *xas_find(struct xa_state *xas, unsigned long max)
1082{
1083        void *entry;
1084
1085        if (xas_error(xas) || xas->xa_node == XAS_BOUNDS)
1086                return NULL;
1087        if (xas->xa_index > max)
1088                return set_bounds(xas);
1089
1090        if (!xas->xa_node) {
1091                xas->xa_index = 1;
1092                return set_bounds(xas);
1093        } else if (xas->xa_node == XAS_RESTART) {
1094                entry = xas_load(xas);
1095                if (entry || xas_not_node(xas->xa_node))
1096                        return entry;
1097        } else if (!xas->xa_node->shift &&
1098                    xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) {
1099                xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1;
1100        }
1101
1102        xas_advance(xas);
1103
1104        while (xas->xa_node && (xas->xa_index <= max)) {
1105                if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
1106                        xas->xa_offset = xas->xa_node->offset + 1;
1107                        xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1108                        continue;
1109                }
1110
1111                entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1112                if (xa_is_node(entry)) {
1113                        xas->xa_node = xa_to_node(entry);
1114                        xas->xa_offset = 0;
1115                        continue;
1116                }
1117                if (entry && !xa_is_sibling(entry))
1118                        return entry;
1119
1120                xas_advance(xas);
1121        }
1122
1123        if (!xas->xa_node)
1124                xas->xa_node = XAS_BOUNDS;
1125        return NULL;
1126}
1127EXPORT_SYMBOL_GPL(xas_find);
1128
1129/**
1130 * xas_find_marked() - Find the next marked entry in the XArray.
1131 * @xas: XArray operation state.
1132 * @max: Highest index to return.
1133 * @mark: Mark number to search for.
1134 *
1135 * If the @xas has not yet been walked to an entry, return the marked entry
1136 * which has an index >= xas.xa_index.  If it has been walked, the entry
1137 * currently being pointed at has been processed, and so we return the
1138 * first marked entry with an index > xas.xa_index.
1139 *
1140 * If no marked entry is found and the array is smaller than @max, @xas is
1141 * set to the bounds state and xas->xa_index is set to the smallest index
1142 * not yet in the array.  This allows @xas to be immediately passed to
1143 * xas_store().
1144 *
1145 * If no entry is found before @max is reached, @xas is set to the restart
1146 * state.
1147 *
1148 * Return: The entry, if found, otherwise %NULL.
1149 */
1150void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
1151{
1152        bool advance = true;
1153        unsigned int offset;
1154        void *entry;
1155
1156        if (xas_error(xas))
1157                return NULL;
1158        if (xas->xa_index > max)
1159                goto max;
1160
1161        if (!xas->xa_node) {
1162                xas->xa_index = 1;
1163                goto out;
1164        } else if (xas_top(xas->xa_node)) {
1165                advance = false;
1166                entry = xa_head(xas->xa);
1167                xas->xa_node = NULL;
1168                if (xas->xa_index > max_index(entry))
1169                        goto out;
1170                if (!xa_is_node(entry)) {
1171                        if (xa_marked(xas->xa, mark))
1172                                return entry;
1173                        xas->xa_index = 1;
1174                        goto out;
1175                }
1176                xas->xa_node = xa_to_node(entry);
1177                xas->xa_offset = xas->xa_index >> xas->xa_node->shift;
1178        }
1179
1180        while (xas->xa_index <= max) {
1181                if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
1182                        xas->xa_offset = xas->xa_node->offset + 1;
1183                        xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1184                        if (!xas->xa_node)
1185                                break;
1186                        advance = false;
1187                        continue;
1188                }
1189
1190                if (!advance) {
1191                        entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1192                        if (xa_is_sibling(entry)) {
1193                                xas->xa_offset = xa_to_sibling(entry);
1194                                xas_move_index(xas, xas->xa_offset);
1195                        }
1196                }
1197
1198                offset = xas_find_chunk(xas, advance, mark);
1199                if (offset > xas->xa_offset) {
1200                        advance = false;
1201                        xas_move_index(xas, offset);
1202                        /* Mind the wrap */
1203                        if ((xas->xa_index - 1) >= max)
1204                                goto max;
1205                        xas->xa_offset = offset;
1206                        if (offset == XA_CHUNK_SIZE)
1207                                continue;
1208                }
1209
1210                entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1211                if (!xa_is_node(entry))
1212                        return entry;
1213                xas->xa_node = xa_to_node(entry);
1214                xas_set_offset(xas);
1215        }
1216
1217out:
1218        if (xas->xa_index > max)
1219                goto max;
1220        return set_bounds(xas);
1221max:
1222        xas->xa_node = XAS_RESTART;
1223        return NULL;
1224}
1225EXPORT_SYMBOL_GPL(xas_find_marked);
1226
1227/**
1228 * xas_find_conflict() - Find the next present entry in a range.
1229 * @xas: XArray operation state.
1230 *
1231 * The @xas describes both a range and a position within that range.
1232 *
1233 * Context: Any context.  Expects xa_lock to be held.
1234 * Return: The next entry in the range covered by @xas or %NULL.
1235 */
1236void *xas_find_conflict(struct xa_state *xas)
1237{
1238        void *curr;
1239
1240        if (xas_error(xas))
1241                return NULL;
1242
1243        if (!xas->xa_node)
1244                return NULL;
1245
1246        if (xas_top(xas->xa_node)) {
1247                curr = xas_start(xas);
1248                if (!curr)
1249                        return NULL;
1250                while (xa_is_node(curr)) {
1251                        struct xa_node *node = xa_to_node(curr);
1252                        curr = xas_descend(xas, node);
1253                }
1254                if (curr)
1255                        return curr;
1256        }
1257
1258        if (xas->xa_node->shift > xas->xa_shift)
1259                return NULL;
1260
1261        for (;;) {
1262                if (xas->xa_node->shift == xas->xa_shift) {
1263                        if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs)
1264                                break;
1265                } else if (xas->xa_offset == XA_CHUNK_MASK) {
1266                        xas->xa_offset = xas->xa_node->offset;
1267                        xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node);
1268                        if (!xas->xa_node)
1269                                break;
1270                        continue;
1271                }
1272                curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset);
1273                if (xa_is_sibling(curr))
1274                        continue;
1275                while (xa_is_node(curr)) {
1276                        xas->xa_node = xa_to_node(curr);
1277                        xas->xa_offset = 0;
1278                        curr = xa_entry_locked(xas->xa, xas->xa_node, 0);
1279                }
1280                if (curr)
1281                        return curr;
1282        }
1283        xas->xa_offset -= xas->xa_sibs;
1284        return NULL;
1285}
1286EXPORT_SYMBOL_GPL(xas_find_conflict);
1287
1288/**
1289 * xa_load() - Load an entry from an XArray.
1290 * @xa: XArray.
1291 * @index: index into array.
1292 *
1293 * Context: Any context.  Takes and releases the RCU lock.
1294 * Return: The entry at @index in @xa.
1295 */
1296void *xa_load(struct xarray *xa, unsigned long index)
1297{
1298        XA_STATE(xas, xa, index);
1299        void *entry;
1300
1301        rcu_read_lock();
1302        do {
1303                entry = xas_load(&xas);
1304                if (xa_is_zero(entry))
1305                        entry = NULL;
1306        } while (xas_retry(&xas, entry));
1307        rcu_read_unlock();
1308
1309        return entry;
1310}
1311EXPORT_SYMBOL(xa_load);
1312
1313static void *xas_result(struct xa_state *xas, void *curr)
1314{
1315        if (xa_is_zero(curr))
1316                return NULL;
1317        if (xas_error(xas))
1318                curr = xas->xa_node;
1319        return curr;
1320}
1321
1322/**
1323 * __xa_erase() - Erase this entry from the XArray while locked.
1324 * @xa: XArray.
1325 * @index: Index into array.
1326 *
1327 * After this function returns, loading from @index will return %NULL.
1328 * If the index is part of a multi-index entry, all indices will be erased
1329 * and none of the entries will be part of a multi-index entry.
1330 *
1331 * Context: Any context.  Expects xa_lock to be held on entry.
1332 * Return: The entry which used to be at this index.
1333 */
1334void *__xa_erase(struct xarray *xa, unsigned long index)
1335{
1336        XA_STATE(xas, xa, index);
1337        return xas_result(&xas, xas_store(&xas, NULL));
1338}
1339EXPORT_SYMBOL(__xa_erase);
1340
1341/**
1342 * xa_erase() - Erase this entry from the XArray.
1343 * @xa: XArray.
1344 * @index: Index of entry.
1345 *
1346 * After this function returns, loading from @index will return %NULL.
1347 * If the index is part of a multi-index entry, all indices will be erased
1348 * and none of the entries will be part of a multi-index entry.
1349 *
1350 * Context: Any context.  Takes and releases the xa_lock.
1351 * Return: The entry which used to be at this index.
1352 */
1353void *xa_erase(struct xarray *xa, unsigned long index)
1354{
1355        void *entry;
1356
1357        xa_lock(xa);
1358        entry = __xa_erase(xa, index);
1359        xa_unlock(xa);
1360
1361        return entry;
1362}
1363EXPORT_SYMBOL(xa_erase);
1364
1365/**
1366 * __xa_store() - Store this entry in the XArray.
1367 * @xa: XArray.
1368 * @index: Index into array.
1369 * @entry: New entry.
1370 * @gfp: Memory allocation flags.
1371 *
1372 * You must already be holding the xa_lock when calling this function.
1373 * It will drop the lock if needed to allocate memory, and then reacquire
1374 * it afterwards.
1375 *
1376 * Context: Any context.  Expects xa_lock to be held on entry.  May
1377 * release and reacquire xa_lock if @gfp flags permit.
1378 * Return: The old entry at this index or xa_err() if an error happened.
1379 */
1380void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1381{
1382        XA_STATE(xas, xa, index);
1383        void *curr;
1384
1385        if (WARN_ON_ONCE(xa_is_advanced(entry)))
1386                return XA_ERROR(-EINVAL);
1387        if (xa_track_free(xa) && !entry)
1388                entry = XA_ZERO_ENTRY;
1389
1390        do {
1391                curr = xas_store(&xas, entry);
1392                if (xa_track_free(xa))
1393                        xas_clear_mark(&xas, XA_FREE_MARK);
1394        } while (__xas_nomem(&xas, gfp));
1395
1396        return xas_result(&xas, curr);
1397}
1398EXPORT_SYMBOL(__xa_store);
1399
1400/**
1401 * xa_store() - Store this entry in the XArray.
1402 * @xa: XArray.
1403 * @index: Index into array.
1404 * @entry: New entry.
1405 * @gfp: Memory allocation flags.
1406 *
1407 * After this function returns, loads from this index will return @entry.
1408 * Storing into an existing multislot entry updates the entry of every index.
1409 * The marks associated with @index are unaffected unless @entry is %NULL.
1410 *
1411 * Context: Any context.  Takes and releases the xa_lock.
1412 * May sleep if the @gfp flags permit.
1413 * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
1414 * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
1415 * failed.
1416 */
1417void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1418{
1419        void *curr;
1420
1421        xa_lock(xa);
1422        curr = __xa_store(xa, index, entry, gfp);
1423        xa_unlock(xa);
1424
1425        return curr;
1426}
1427EXPORT_SYMBOL(xa_store);
1428
1429/**
1430 * __xa_cmpxchg() - Store this entry in the XArray.
1431 * @xa: XArray.
1432 * @index: Index into array.
1433 * @old: Old value to test against.
1434 * @entry: New entry.
1435 * @gfp: Memory allocation flags.
1436 *
1437 * You must already be holding the xa_lock when calling this function.
1438 * It will drop the lock if needed to allocate memory, and then reacquire
1439 * it afterwards.
1440 *
1441 * Context: Any context.  Expects xa_lock to be held on entry.  May
1442 * release and reacquire xa_lock if @gfp flags permit.
1443 * Return: The old entry at this index or xa_err() if an error happened.
1444 */
1445void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
1446                        void *old, void *entry, gfp_t gfp)
1447{
1448        XA_STATE(xas, xa, index);
1449        void *curr;
1450
1451        if (WARN_ON_ONCE(xa_is_advanced(entry)))
1452                return XA_ERROR(-EINVAL);
1453
1454        do {
1455                curr = xas_load(&xas);
1456                if (curr == old) {
1457                        xas_store(&xas, entry);
1458                        if (xa_track_free(xa) && entry && !curr)
1459                                xas_clear_mark(&xas, XA_FREE_MARK);
1460                }
1461        } while (__xas_nomem(&xas, gfp));
1462
1463        return xas_result(&xas, curr);
1464}
1465EXPORT_SYMBOL(__xa_cmpxchg);
1466
1467/**
1468 * __xa_insert() - Store this entry in the XArray if no entry is present.
1469 * @xa: XArray.
1470 * @index: Index into array.
1471 * @entry: New entry.
1472 * @gfp: Memory allocation flags.
1473 *
1474 * Inserting a NULL entry will store a reserved entry (like xa_reserve())
1475 * if no entry is present.  Inserting will fail if a reserved entry is
1476 * present, even though loading from this index will return NULL.
1477 *
1478 * Context: Any context.  Expects xa_lock to be held on entry.  May
1479 * release and reacquire xa_lock if @gfp flags permit.
1480 * Return: 0 if the store succeeded.  -EBUSY if another entry was present.
1481 * -ENOMEM if memory could not be allocated.
1482 */
1483int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1484{
1485        XA_STATE(xas, xa, index);
1486        void *curr;
1487
1488        if (WARN_ON_ONCE(xa_is_advanced(entry)))
1489                return -EINVAL;
1490        if (!entry)
1491                entry = XA_ZERO_ENTRY;
1492
1493        do {
1494                curr = xas_load(&xas);
1495                if (!curr) {
1496                        xas_store(&xas, entry);
1497                        if (xa_track_free(xa))
1498                                xas_clear_mark(&xas, XA_FREE_MARK);
1499                } else {
1500                        xas_set_err(&xas, -EBUSY);
1501                }
1502        } while (__xas_nomem(&xas, gfp));
1503
1504        return xas_error(&xas);
1505}
1506EXPORT_SYMBOL(__xa_insert);
1507
1508#ifdef CONFIG_XARRAY_MULTI
1509static void xas_set_range(struct xa_state *xas, unsigned long first,
1510                unsigned long last)
1511{
1512        unsigned int shift = 0;
1513        unsigned long sibs = last - first;
1514        unsigned int offset = XA_CHUNK_MASK;
1515
1516        xas_set(xas, first);
1517
1518        while ((first & XA_CHUNK_MASK) == 0) {
1519                if (sibs < XA_CHUNK_MASK)
1520                        break;
1521                if ((sibs == XA_CHUNK_MASK) && (offset < XA_CHUNK_MASK))
1522                        break;
1523                shift += XA_CHUNK_SHIFT;
1524                if (offset == XA_CHUNK_MASK)
1525                        offset = sibs & XA_CHUNK_MASK;
1526                sibs >>= XA_CHUNK_SHIFT;
1527                first >>= XA_CHUNK_SHIFT;
1528        }
1529
1530        offset = first & XA_CHUNK_MASK;
1531        if (offset + sibs > XA_CHUNK_MASK)
1532                sibs = XA_CHUNK_MASK - offset;
1533        if ((((first + sibs + 1) << shift) - 1) > last)
1534                sibs -= 1;
1535
1536        xas->xa_shift = shift;
1537        xas->xa_sibs = sibs;
1538}
1539
1540/**
1541 * xa_store_range() - Store this entry at a range of indices in the XArray.
1542 * @xa: XArray.
1543 * @first: First index to affect.
1544 * @last: Last index to affect.
1545 * @entry: New entry.
1546 * @gfp: Memory allocation flags.
1547 *
1548 * After this function returns, loads from any index between @first and @last,
1549 * inclusive will return @entry.
1550 * Storing into an existing multislot entry updates the entry of every index.
1551 * The marks associated with @index are unaffected unless @entry is %NULL.
1552 *
1553 * Context: Process context.  Takes and releases the xa_lock.  May sleep
1554 * if the @gfp flags permit.
1555 * Return: %NULL on success, xa_err(-EINVAL) if @entry cannot be stored in
1556 * an XArray, or xa_err(-ENOMEM) if memory allocation failed.
1557 */
1558void *xa_store_range(struct xarray *xa, unsigned long first,
1559                unsigned long last, void *entry, gfp_t gfp)
1560{
1561        XA_STATE(xas, xa, 0);
1562
1563        if (WARN_ON_ONCE(xa_is_internal(entry)))
1564                return XA_ERROR(-EINVAL);
1565        if (last < first)
1566                return XA_ERROR(-EINVAL);
1567
1568        do {
1569                xas_lock(&xas);
1570                if (entry) {
1571                        unsigned int order = BITS_PER_LONG;
1572                        if (last + 1)
1573                                order = __ffs(last + 1);
1574                        xas_set_order(&xas, last, order);
1575                        xas_create(&xas, true);
1576                        if (xas_error(&xas))
1577                                goto unlock;
1578                }
1579                do {
1580                        xas_set_range(&xas, first, last);
1581                        xas_store(&xas, entry);
1582                        if (xas_error(&xas))
1583                                goto unlock;
1584                        first += xas_size(&xas);
1585                } while (first <= last);
1586unlock:
1587                xas_unlock(&xas);
1588        } while (xas_nomem(&xas, gfp));
1589
1590        return xas_result(&xas, NULL);
1591}
1592EXPORT_SYMBOL(xa_store_range);
1593#endif /* CONFIG_XARRAY_MULTI */
1594
1595/**
1596 * __xa_alloc() - Find somewhere to store this entry in the XArray.
1597 * @xa: XArray.
1598 * @id: Pointer to ID.
1599 * @limit: Range for allocated ID.
1600 * @entry: New entry.
1601 * @gfp: Memory allocation flags.
1602 *
1603 * Finds an empty entry in @xa between @limit.min and @limit.max,
1604 * stores the index into the @id pointer, then stores the entry at
1605 * that index.  A concurrent lookup will not see an uninitialised @id.
1606 *
1607 * Context: Any context.  Expects xa_lock to be held on entry.  May
1608 * release and reacquire xa_lock if @gfp flags permit.
1609 * Return: 0 on success, -ENOMEM if memory could not be allocated or
1610 * -EBUSY if there are no free entries in @limit.
1611 */
1612int __xa_alloc(struct xarray *xa, u32 *id, void *entry,
1613                struct xa_limit limit, gfp_t gfp)
1614{
1615        XA_STATE(xas, xa, 0);
1616
1617        if (WARN_ON_ONCE(xa_is_advanced(entry)))
1618                return -EINVAL;
1619        if (WARN_ON_ONCE(!xa_track_free(xa)))
1620                return -EINVAL;
1621
1622        if (!entry)
1623                entry = XA_ZERO_ENTRY;
1624
1625        do {
1626                xas.xa_index = limit.min;
1627                xas_find_marked(&xas, limit.max, XA_FREE_MARK);
1628                if (xas.xa_node == XAS_RESTART)
1629                        xas_set_err(&xas, -EBUSY);
1630                else
1631                        *id = xas.xa_index;
1632                xas_store(&xas, entry);
1633                xas_clear_mark(&xas, XA_FREE_MARK);
1634        } while (__xas_nomem(&xas, gfp));
1635
1636        return xas_error(&xas);
1637}
1638EXPORT_SYMBOL(__xa_alloc);
1639
1640/**
1641 * __xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
1642 * @xa: XArray.
1643 * @id: Pointer to ID.
1644 * @entry: New entry.
1645 * @limit: Range of allocated ID.
1646 * @next: Pointer to next ID to allocate.
1647 * @gfp: Memory allocation flags.
1648 *
1649 * Finds an empty entry in @xa between @limit.min and @limit.max,
1650 * stores the index into the @id pointer, then stores the entry at
1651 * that index.  A concurrent lookup will not see an uninitialised @id.
1652 * The search for an empty entry will start at @next and will wrap
1653 * around if necessary.
1654 *
1655 * Context: Any context.  Expects xa_lock to be held on entry.  May
1656 * release and reacquire xa_lock if @gfp flags permit.
1657 * Return: 0 if the allocation succeeded without wrapping.  1 if the
1658 * allocation succeeded after wrapping, -ENOMEM if memory could not be
1659 * allocated or -EBUSY if there are no free entries in @limit.
1660 */
1661int __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
1662                struct xa_limit limit, u32 *next, gfp_t gfp)
1663{
1664        u32 min = limit.min;
1665        int ret;
1666
1667        limit.min = max(min, *next);
1668        ret = __xa_alloc(xa, id, entry, limit, gfp);
1669        if ((xa->xa_flags & XA_FLAGS_ALLOC_WRAPPED) && ret == 0) {
1670                xa->xa_flags &= ~XA_FLAGS_ALLOC_WRAPPED;
1671                ret = 1;
1672        }
1673
1674        if (ret < 0 && limit.min > min) {
1675                limit.min = min;
1676                ret = __xa_alloc(xa, id, entry, limit, gfp);
1677                if (ret == 0)
1678                        ret = 1;
1679        }
1680
1681        if (ret >= 0) {
1682                *next = *id + 1;
1683                if (*next == 0)
1684                        xa->xa_flags |= XA_FLAGS_ALLOC_WRAPPED;
1685        }
1686        return ret;
1687}
1688EXPORT_SYMBOL(__xa_alloc_cyclic);
1689
1690/**
1691 * __xa_set_mark() - Set this mark on this entry while locked.
1692 * @xa: XArray.
1693 * @index: Index of entry.
1694 * @mark: Mark number.
1695 *
1696 * Attempting to set a mark on a %NULL entry does not succeed.
1697 *
1698 * Context: Any context.  Expects xa_lock to be held on entry.
1699 */
1700void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1701{
1702        XA_STATE(xas, xa, index);
1703        void *entry = xas_load(&xas);
1704
1705        if (entry)
1706                xas_set_mark(&xas, mark);
1707}
1708EXPORT_SYMBOL(__xa_set_mark);
1709
1710/**
1711 * __xa_clear_mark() - Clear this mark on this entry while locked.
1712 * @xa: XArray.
1713 * @index: Index of entry.
1714 * @mark: Mark number.
1715 *
1716 * Context: Any context.  Expects xa_lock to be held on entry.
1717 */
1718void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1719{
1720        XA_STATE(xas, xa, index);
1721        void *entry = xas_load(&xas);
1722
1723        if (entry)
1724                xas_clear_mark(&xas, mark);
1725}
1726EXPORT_SYMBOL(__xa_clear_mark);
1727
1728/**
1729 * xa_get_mark() - Inquire whether this mark is set on this entry.
1730 * @xa: XArray.
1731 * @index: Index of entry.
1732 * @mark: Mark number.
1733 *
1734 * This function uses the RCU read lock, so the result may be out of date
1735 * by the time it returns.  If you need the result to be stable, use a lock.
1736 *
1737 * Context: Any context.  Takes and releases the RCU lock.
1738 * Return: True if the entry at @index has this mark set, false if it doesn't.
1739 */
1740bool xa_get_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1741{
1742        XA_STATE(xas, xa, index);
1743        void *entry;
1744
1745        rcu_read_lock();
1746        entry = xas_start(&xas);
1747        while (xas_get_mark(&xas, mark)) {
1748                if (!xa_is_node(entry))
1749                        goto found;
1750                entry = xas_descend(&xas, xa_to_node(entry));
1751        }
1752        rcu_read_unlock();
1753        return false;
1754 found:
1755        rcu_read_unlock();
1756        return true;
1757}
1758EXPORT_SYMBOL(xa_get_mark);
1759
1760/**
1761 * xa_set_mark() - Set this mark on this entry.
1762 * @xa: XArray.
1763 * @index: Index of entry.
1764 * @mark: Mark number.
1765 *
1766 * Attempting to set a mark on a %NULL entry does not succeed.
1767 *
1768 * Context: Process context.  Takes and releases the xa_lock.
1769 */
1770void xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1771{
1772        xa_lock(xa);
1773        __xa_set_mark(xa, index, mark);
1774        xa_unlock(xa);
1775}
1776EXPORT_SYMBOL(xa_set_mark);
1777
1778/**
1779 * xa_clear_mark() - Clear this mark on this entry.
1780 * @xa: XArray.
1781 * @index: Index of entry.
1782 * @mark: Mark number.
1783 *
1784 * Clearing a mark always succeeds.
1785 *
1786 * Context: Process context.  Takes and releases the xa_lock.
1787 */
1788void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1789{
1790        xa_lock(xa);
1791        __xa_clear_mark(xa, index, mark);
1792        xa_unlock(xa);
1793}
1794EXPORT_SYMBOL(xa_clear_mark);
1795
1796/**
1797 * xa_find() - Search the XArray for an entry.
1798 * @xa: XArray.
1799 * @indexp: Pointer to an index.
1800 * @max: Maximum index to search to.
1801 * @filter: Selection criterion.
1802 *
1803 * Finds the entry in @xa which matches the @filter, and has the lowest
1804 * index that is at least @indexp and no more than @max.
1805 * If an entry is found, @indexp is updated to be the index of the entry.
1806 * This function is protected by the RCU read lock, so it may not find
1807 * entries which are being simultaneously added.  It will not return an
1808 * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
1809 *
1810 * Context: Any context.  Takes and releases the RCU lock.
1811 * Return: The entry, if found, otherwise %NULL.
1812 */
1813void *xa_find(struct xarray *xa, unsigned long *indexp,
1814                        unsigned long max, xa_mark_t filter)
1815{
1816        XA_STATE(xas, xa, *indexp);
1817        void *entry;
1818
1819        rcu_read_lock();
1820        do {
1821                if ((__force unsigned int)filter < XA_MAX_MARKS)
1822                        entry = xas_find_marked(&xas, max, filter);
1823                else
1824                        entry = xas_find(&xas, max);
1825        } while (xas_retry(&xas, entry));
1826        rcu_read_unlock();
1827
1828        if (entry)
1829                *indexp = xas.xa_index;
1830        return entry;
1831}
1832EXPORT_SYMBOL(xa_find);
1833
1834static bool xas_sibling(struct xa_state *xas)
1835{
1836        struct xa_node *node = xas->xa_node;
1837        unsigned long mask;
1838
1839        if (!node)
1840                return false;
1841        mask = (XA_CHUNK_SIZE << node->shift) - 1;
1842        return (xas->xa_index & mask) > (xas->xa_offset << node->shift);
1843}
1844
1845/**
1846 * xa_find_after() - Search the XArray for a present entry.
1847 * @xa: XArray.
1848 * @indexp: Pointer to an index.
1849 * @max: Maximum index to search to.
1850 * @filter: Selection criterion.
1851 *
1852 * Finds the entry in @xa which matches the @filter and has the lowest
1853 * index that is above @indexp and no more than @max.
1854 * If an entry is found, @indexp is updated to be the index of the entry.
1855 * This function is protected by the RCU read lock, so it may miss entries
1856 * which are being simultaneously added.  It will not return an
1857 * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
1858 *
1859 * Context: Any context.  Takes and releases the RCU lock.
1860 * Return: The pointer, if found, otherwise %NULL.
1861 */
1862void *xa_find_after(struct xarray *xa, unsigned long *indexp,
1863                        unsigned long max, xa_mark_t filter)
1864{
1865        XA_STATE(xas, xa, *indexp + 1);
1866        void *entry;
1867
1868        if (xas.xa_index == 0)
1869                return NULL;
1870
1871        rcu_read_lock();
1872        for (;;) {
1873                if ((__force unsigned int)filter < XA_MAX_MARKS)
1874                        entry = xas_find_marked(&xas, max, filter);
1875                else
1876                        entry = xas_find(&xas, max);
1877
1878                if (xas_invalid(&xas))
1879                        break;
1880                if (xas_sibling(&xas))
1881                        continue;
1882                if (!xas_retry(&xas, entry))
1883                        break;
1884        }
1885        rcu_read_unlock();
1886
1887        if (entry)
1888                *indexp = xas.xa_index;
1889        return entry;
1890}
1891EXPORT_SYMBOL(xa_find_after);
1892
1893static unsigned int xas_extract_present(struct xa_state *xas, void **dst,
1894                        unsigned long max, unsigned int n)
1895{
1896        void *entry;
1897        unsigned int i = 0;
1898
1899        rcu_read_lock();
1900        xas_for_each(xas, entry, max) {
1901                if (xas_retry(xas, entry))
1902                        continue;
1903                dst[i++] = entry;
1904                if (i == n)
1905                        break;
1906        }
1907        rcu_read_unlock();
1908
1909        return i;
1910}
1911
1912static unsigned int xas_extract_marked(struct xa_state *xas, void **dst,
1913                        unsigned long max, unsigned int n, xa_mark_t mark)
1914{
1915        void *entry;
1916        unsigned int i = 0;
1917
1918        rcu_read_lock();
1919        xas_for_each_marked(xas, entry, max, mark) {
1920                if (xas_retry(xas, entry))
1921                        continue;
1922                dst[i++] = entry;
1923                if (i == n)
1924                        break;
1925        }
1926        rcu_read_unlock();
1927
1928        return i;
1929}
1930
1931/**
1932 * xa_extract() - Copy selected entries from the XArray into a normal array.
1933 * @xa: The source XArray to copy from.
1934 * @dst: The buffer to copy entries into.
1935 * @start: The first index in the XArray eligible to be selected.
1936 * @max: The last index in the XArray eligible to be selected.
1937 * @n: The maximum number of entries to copy.
1938 * @filter: Selection criterion.
1939 *
1940 * Copies up to @n entries that match @filter from the XArray.  The
1941 * copied entries will have indices between @start and @max, inclusive.
1942 *
1943 * The @filter may be an XArray mark value, in which case entries which are
1944 * marked with that mark will be copied.  It may also be %XA_PRESENT, in
1945 * which case all entries which are not %NULL will be copied.
1946 *
1947 * The entries returned may not represent a snapshot of the XArray at a
1948 * moment in time.  For example, if another thread stores to index 5, then
1949 * index 10, calling xa_extract() may return the old contents of index 5
1950 * and the new contents of index 10.  Indices not modified while this
1951 * function is running will not be skipped.
1952 *
1953 * If you need stronger guarantees, holding the xa_lock across calls to this
1954 * function will prevent concurrent modification.
1955 *
1956 * Context: Any context.  Takes and releases the RCU lock.
1957 * Return: The number of entries copied.
1958 */
1959unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start,
1960                        unsigned long max, unsigned int n, xa_mark_t filter)
1961{
1962        XA_STATE(xas, xa, start);
1963
1964        if (!n)
1965                return 0;
1966
1967        if ((__force unsigned int)filter < XA_MAX_MARKS)
1968                return xas_extract_marked(&xas, dst, max, n, filter);
1969        return xas_extract_present(&xas, dst, max, n);
1970}
1971EXPORT_SYMBOL(xa_extract);
1972
1973/**
1974 * xa_destroy() - Free all internal data structures.
1975 * @xa: XArray.
1976 *
1977 * After calling this function, the XArray is empty and has freed all memory
1978 * allocated for its internal data structures.  You are responsible for
1979 * freeing the objects referenced by the XArray.
1980 *
1981 * Context: Any context.  Takes and releases the xa_lock, interrupt-safe.
1982 */
1983void xa_destroy(struct xarray *xa)
1984{
1985        XA_STATE(xas, xa, 0);
1986        unsigned long flags;
1987        void *entry;
1988
1989        xas.xa_node = NULL;
1990        xas_lock_irqsave(&xas, flags);
1991        entry = xa_head_locked(xa);
1992        RCU_INIT_POINTER(xa->xa_head, NULL);
1993        xas_init_marks(&xas);
1994        if (xa_zero_busy(xa))
1995                xa_mark_clear(xa, XA_FREE_MARK);
1996        /* lockdep checks we're still holding the lock in xas_free_nodes() */
1997        if (xa_is_node(entry))
1998                xas_free_nodes(&xas, xa_to_node(entry));
1999        xas_unlock_irqrestore(&xas, flags);
2000}
2001EXPORT_SYMBOL(xa_destroy);
2002
2003#ifdef XA_DEBUG
2004void xa_dump_node(const struct xa_node *node)
2005{
2006        unsigned i, j;
2007
2008        if (!node)
2009                return;
2010        if ((unsigned long)node & 3) {
2011                pr_cont("node %px\n", node);
2012                return;
2013        }
2014
2015        pr_cont("node %px %s %d parent %px shift %d count %d values %d "
2016                "array %px list %px %px marks",
2017                node, node->parent ? "offset" : "max", node->offset,
2018                node->parent, node->shift, node->count, node->nr_values,
2019                node->array, node->private_list.prev, node->private_list.next);
2020        for (i = 0; i < XA_MAX_MARKS; i++)
2021                for (j = 0; j < XA_MARK_LONGS; j++)
2022                        pr_cont(" %lx", node->marks[i][j]);
2023        pr_cont("\n");
2024}
2025
2026void xa_dump_index(unsigned long index, unsigned int shift)
2027{
2028        if (!shift)
2029                pr_info("%lu: ", index);
2030        else if (shift >= BITS_PER_LONG)
2031                pr_info("0-%lu: ", ~0UL);
2032        else
2033                pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1));
2034}
2035
2036void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift)
2037{
2038        if (!entry)
2039                return;
2040
2041        xa_dump_index(index, shift);
2042
2043        if (xa_is_node(entry)) {
2044                if (shift == 0) {
2045                        pr_cont("%px\n", entry);
2046                } else {
2047                        unsigned long i;
2048                        struct xa_node *node = xa_to_node(entry);
2049                        xa_dump_node(node);
2050                        for (i = 0; i < XA_CHUNK_SIZE; i++)
2051                                xa_dump_entry(node->slots[i],
2052                                      index + (i << node->shift), node->shift);
2053                }
2054        } else if (xa_is_value(entry))
2055                pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry),
2056                                                xa_to_value(entry), entry);
2057        else if (!xa_is_internal(entry))
2058                pr_cont("%px\n", entry);
2059        else if (xa_is_retry(entry))
2060                pr_cont("retry (%ld)\n", xa_to_internal(entry));
2061        else if (xa_is_sibling(entry))
2062                pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry));
2063        else if (xa_is_zero(entry))
2064                pr_cont("zero (%ld)\n", xa_to_internal(entry));
2065        else
2066                pr_cont("UNKNOWN ENTRY (%px)\n", entry);
2067}
2068
2069void xa_dump(const struct xarray *xa)
2070{
2071        void *entry = xa->xa_head;
2072        unsigned int shift = 0;
2073
2074        pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry,
2075                        xa->xa_flags, xa_marked(xa, XA_MARK_0),
2076                        xa_marked(xa, XA_MARK_1), xa_marked(xa, XA_MARK_2));
2077        if (xa_is_node(entry))
2078                shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT;
2079        xa_dump_entry(entry, 0, shift);
2080}
2081#endif
2082