linux/include/linux/radix-tree.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 * Copyright (C) 2001 Momchil Velikov
   4 * Portions Copyright (C) 2001 Christoph Hellwig
   5 * Copyright (C) 2006 Nick Piggin
   6 * Copyright (C) 2012 Konstantin Khlebnikov
   7 */
   8#ifndef _LINUX_RADIX_TREE_H
   9#define _LINUX_RADIX_TREE_H
  10
  11#include <linux/bitops.h>
  12#include <linux/kernel.h>
  13#include <linux/list.h>
  14#include <linux/percpu.h>
  15#include <linux/preempt.h>
  16#include <linux/rcupdate.h>
  17#include <linux/spinlock.h>
  18#include <linux/types.h>
  19#include <linux/xarray.h>
  20#include <linux/local_lock.h>
  21
  22/* Keep unconverted code working */
  23#define radix_tree_root         xarray
  24#define radix_tree_node         xa_node
  25
  26struct radix_tree_preload {
  27        local_lock_t lock;
  28        unsigned nr;
  29        /* nodes->parent points to next preallocated node */
  30        struct radix_tree_node *nodes;
  31};
  32DECLARE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
  33
  34/*
  35 * The bottom two bits of the slot determine how the remaining bits in the
  36 * slot are interpreted:
  37 *
  38 * 00 - data pointer
  39 * 10 - internal entry
  40 * x1 - value entry
  41 *
  42 * The internal entry may be a pointer to the next level in the tree, a
  43 * sibling entry, or an indicator that the entry in this slot has been moved
  44 * to another location in the tree and the lookup should be restarted.  While
  45 * NULL fits the 'data pointer' pattern, it means that there is no entry in
  46 * the tree for this index (no matter what level of the tree it is found at).
  47 * This means that storing a NULL entry in the tree is the same as deleting
  48 * the entry from the tree.
  49 */
  50#define RADIX_TREE_ENTRY_MASK           3UL
  51#define RADIX_TREE_INTERNAL_NODE        2UL
  52
  53static inline bool radix_tree_is_internal_node(void *ptr)
  54{
  55        return ((unsigned long)ptr & RADIX_TREE_ENTRY_MASK) ==
  56                                RADIX_TREE_INTERNAL_NODE;
  57}
  58
  59/*** radix-tree API starts here ***/
  60
  61#define RADIX_TREE_MAP_SHIFT    XA_CHUNK_SHIFT
  62#define RADIX_TREE_MAP_SIZE     (1UL << RADIX_TREE_MAP_SHIFT)
  63#define RADIX_TREE_MAP_MASK     (RADIX_TREE_MAP_SIZE-1)
  64
  65#define RADIX_TREE_MAX_TAGS     XA_MAX_MARKS
  66#define RADIX_TREE_TAG_LONGS    XA_MARK_LONGS
  67
  68#define RADIX_TREE_INDEX_BITS  (8 /* CHAR_BIT */ * sizeof(unsigned long))
  69#define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
  70                                          RADIX_TREE_MAP_SHIFT))
  71
  72/* The IDR tag is stored in the low bits of xa_flags */
  73#define ROOT_IS_IDR     ((__force gfp_t)4)
  74/* The top bits of xa_flags are used to store the root tags */
  75#define ROOT_TAG_SHIFT  (__GFP_BITS_SHIFT)
  76
  77#define RADIX_TREE_INIT(name, mask)     XARRAY_INIT(name, mask)
  78
  79#define RADIX_TREE(name, mask) \
  80        struct radix_tree_root name = RADIX_TREE_INIT(name, mask)
  81
  82#define INIT_RADIX_TREE(root, mask) xa_init_flags(root, mask)
  83
  84static inline bool radix_tree_empty(const struct radix_tree_root *root)
  85{
  86        return root->xa_head == NULL;
  87}
  88
  89/**
  90 * struct radix_tree_iter - radix tree iterator state
  91 *
  92 * @index:      index of current slot
  93 * @next_index: one beyond the last index for this chunk
  94 * @tags:       bit-mask for tag-iterating
  95 * @node:       node that contains current slot
  96 *
  97 * This radix tree iterator works in terms of "chunks" of slots.  A chunk is a
  98 * subinterval of slots contained within one radix tree leaf node.  It is
  99 * described by a pointer to its first slot and a struct radix_tree_iter
 100 * which holds the chunk's position in the tree and its size.  For tagged
 101 * iteration radix_tree_iter also holds the slots' bit-mask for one chosen
 102 * radix tree tag.
 103 */
 104struct radix_tree_iter {
 105        unsigned long   index;
 106        unsigned long   next_index;
 107        unsigned long   tags;
 108        struct radix_tree_node *node;
 109};
 110
 111/**
 112 * Radix-tree synchronization
 113 *
 114 * The radix-tree API requires that users provide all synchronisation (with
 115 * specific exceptions, noted below).
 116 *
 117 * Synchronization of access to the data items being stored in the tree, and
 118 * management of their lifetimes must be completely managed by API users.
 119 *
 120 * For API usage, in general,
 121 * - any function _modifying_ the tree or tags (inserting or deleting
 122 *   items, setting or clearing tags) must exclude other modifications, and
 123 *   exclude any functions reading the tree.
 124 * - any function _reading_ the tree or tags (looking up items or tags,
 125 *   gang lookups) must exclude modifications to the tree, but may occur
 126 *   concurrently with other readers.
 127 *
 128 * The notable exceptions to this rule are the following functions:
 129 * __radix_tree_lookup
 130 * radix_tree_lookup
 131 * radix_tree_lookup_slot
 132 * radix_tree_tag_get
 133 * radix_tree_gang_lookup
 134 * radix_tree_gang_lookup_tag
 135 * radix_tree_gang_lookup_tag_slot
 136 * radix_tree_tagged
 137 *
 138 * The first 7 functions are able to be called locklessly, using RCU. The
 139 * caller must ensure calls to these functions are made within rcu_read_lock()
 140 * regions. Other readers (lock-free or otherwise) and modifications may be
 141 * running concurrently.
 142 *
 143 * It is still required that the caller manage the synchronization and lifetimes
 144 * of the items. So if RCU lock-free lookups are used, typically this would mean
 145 * that the items have their own locks, or are amenable to lock-free access; and
 146 * that the items are freed by RCU (or only freed after having been deleted from
 147 * the radix tree *and* a synchronize_rcu() grace period).
 148 *
 149 * (Note, rcu_assign_pointer and rcu_dereference are not needed to control
 150 * access to data items when inserting into or looking up from the radix tree)
 151 *
 152 * Note that the value returned by radix_tree_tag_get() may not be relied upon
 153 * if only the RCU read lock is held.  Functions to set/clear tags and to
 154 * delete nodes running concurrently with it may affect its result such that
 155 * two consecutive reads in the same locked section may return different
 156 * values.  If reliability is required, modification functions must also be
 157 * excluded from concurrency.
 158 *
 159 * radix_tree_tagged is able to be called without locking or RCU.
 160 */
 161
 162/**
 163 * radix_tree_deref_slot - dereference a slot
 164 * @slot: slot pointer, returned by radix_tree_lookup_slot
 165 *
 166 * For use with radix_tree_lookup_slot().  Caller must hold tree at least read
 167 * locked across slot lookup and dereference. Not required if write lock is
 168 * held (ie. items cannot be concurrently inserted).
 169 *
 170 * radix_tree_deref_retry must be used to confirm validity of the pointer if
 171 * only the read lock is held.
 172 *
 173 * Return: entry stored in that slot.
 174 */
 175static inline void *radix_tree_deref_slot(void __rcu **slot)
 176{
 177        return rcu_dereference(*slot);
 178}
 179
 180/**
 181 * radix_tree_deref_slot_protected - dereference a slot with tree lock held
 182 * @slot: slot pointer, returned by radix_tree_lookup_slot
 183 *
 184 * Similar to radix_tree_deref_slot.  The caller does not hold the RCU read
 185 * lock but it must hold the tree lock to prevent parallel updates.
 186 *
 187 * Return: entry stored in that slot.
 188 */
 189static inline void *radix_tree_deref_slot_protected(void __rcu **slot,
 190                                                        spinlock_t *treelock)
 191{
 192        return rcu_dereference_protected(*slot, lockdep_is_held(treelock));
 193}
 194
 195/**
 196 * radix_tree_deref_retry       - check radix_tree_deref_slot
 197 * @arg:        pointer returned by radix_tree_deref_slot
 198 * Returns:     0 if retry is not required, otherwise retry is required
 199 *
 200 * radix_tree_deref_retry must be used with radix_tree_deref_slot.
 201 */
 202static inline int radix_tree_deref_retry(void *arg)
 203{
 204        return unlikely(radix_tree_is_internal_node(arg));
 205}
 206
 207/**
 208 * radix_tree_exception - radix_tree_deref_slot returned either exception?
 209 * @arg:        value returned by radix_tree_deref_slot
 210 * Returns:     0 if well-aligned pointer, non-0 if either kind of exception.
 211 */
 212static inline int radix_tree_exception(void *arg)
 213{
 214        return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK);
 215}
 216
 217int radix_tree_insert(struct radix_tree_root *, unsigned long index,
 218                        void *);
 219void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index,
 220                          struct radix_tree_node **nodep, void __rcu ***slotp);
 221void *radix_tree_lookup(const struct radix_tree_root *, unsigned long);
 222void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *,
 223                                        unsigned long index);
 224void __radix_tree_replace(struct radix_tree_root *, struct radix_tree_node *,
 225                          void __rcu **slot, void *entry);
 226void radix_tree_iter_replace(struct radix_tree_root *,
 227                const struct radix_tree_iter *, void __rcu **slot, void *entry);
 228void radix_tree_replace_slot(struct radix_tree_root *,
 229                             void __rcu **slot, void *entry);
 230void radix_tree_iter_delete(struct radix_tree_root *,
 231                        struct radix_tree_iter *iter, void __rcu **slot);
 232void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
 233void *radix_tree_delete(struct radix_tree_root *, unsigned long);
 234unsigned int radix_tree_gang_lookup(const struct radix_tree_root *,
 235                        void **results, unsigned long first_index,
 236                        unsigned int max_items);
 237int radix_tree_preload(gfp_t gfp_mask);
 238int radix_tree_maybe_preload(gfp_t gfp_mask);
 239void radix_tree_init(void);
 240void *radix_tree_tag_set(struct radix_tree_root *,
 241                        unsigned long index, unsigned int tag);
 242void *radix_tree_tag_clear(struct radix_tree_root *,
 243                        unsigned long index, unsigned int tag);
 244int radix_tree_tag_get(const struct radix_tree_root *,
 245                        unsigned long index, unsigned int tag);
 246void radix_tree_iter_tag_clear(struct radix_tree_root *,
 247                const struct radix_tree_iter *iter, unsigned int tag);
 248unsigned int radix_tree_gang_lookup_tag(const struct radix_tree_root *,
 249                void **results, unsigned long first_index,
 250                unsigned int max_items, unsigned int tag);
 251unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *,
 252                void __rcu ***results, unsigned long first_index,
 253                unsigned int max_items, unsigned int tag);
 254int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag);
 255
 256static inline void radix_tree_preload_end(void)
 257{
 258        local_unlock(&radix_tree_preloads.lock);
 259}
 260
 261void __rcu **idr_get_free(struct radix_tree_root *root,
 262                              struct radix_tree_iter *iter, gfp_t gfp,
 263                              unsigned long max);
 264
 265enum {
 266        RADIX_TREE_ITER_TAG_MASK = 0x0f,        /* tag index in lower nybble */
 267        RADIX_TREE_ITER_TAGGED   = 0x10,        /* lookup tagged slots */
 268        RADIX_TREE_ITER_CONTIG   = 0x20,        /* stop at first hole */
 269};
 270
 271/**
 272 * radix_tree_iter_init - initialize radix tree iterator
 273 *
 274 * @iter:       pointer to iterator state
 275 * @start:      iteration starting index
 276 * Returns:     NULL
 277 */
 278static __always_inline void __rcu **
 279radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start)
 280{
 281        /*
 282         * Leave iter->tags uninitialized. radix_tree_next_chunk() will fill it
 283         * in the case of a successful tagged chunk lookup.  If the lookup was
 284         * unsuccessful or non-tagged then nobody cares about ->tags.
 285         *
 286         * Set index to zero to bypass next_index overflow protection.
 287         * See the comment in radix_tree_next_chunk() for details.
 288         */
 289        iter->index = 0;
 290        iter->next_index = start;
 291        return NULL;
 292}
 293
 294/**
 295 * radix_tree_next_chunk - find next chunk of slots for iteration
 296 *
 297 * @root:       radix tree root
 298 * @iter:       iterator state
 299 * @flags:      RADIX_TREE_ITER_* flags and tag index
 300 * Returns:     pointer to chunk first slot, or NULL if there no more left
 301 *
 302 * This function looks up the next chunk in the radix tree starting from
 303 * @iter->next_index.  It returns a pointer to the chunk's first slot.
 304 * Also it fills @iter with data about chunk: position in the tree (index),
 305 * its end (next_index), and constructs a bit mask for tagged iterating (tags).
 306 */
 307void __rcu **radix_tree_next_chunk(const struct radix_tree_root *,
 308                             struct radix_tree_iter *iter, unsigned flags);
 309
 310/**
 311 * radix_tree_iter_lookup - look up an index in the radix tree
 312 * @root: radix tree root
 313 * @iter: iterator state
 314 * @index: key to look up
 315 *
 316 * If @index is present in the radix tree, this function returns the slot
 317 * containing it and updates @iter to describe the entry.  If @index is not
 318 * present, it returns NULL.
 319 */
 320static inline void __rcu **
 321radix_tree_iter_lookup(const struct radix_tree_root *root,
 322                        struct radix_tree_iter *iter, unsigned long index)
 323{
 324        radix_tree_iter_init(iter, index);
 325        return radix_tree_next_chunk(root, iter, RADIX_TREE_ITER_CONTIG);
 326}
 327
 328/**
 329 * radix_tree_iter_retry - retry this chunk of the iteration
 330 * @iter:       iterator state
 331 *
 332 * If we iterate over a tree protected only by the RCU lock, a race
 333 * against deletion or creation may result in seeing a slot for which
 334 * radix_tree_deref_retry() returns true.  If so, call this function
 335 * and continue the iteration.
 336 */
 337static inline __must_check
 338void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter)
 339{
 340        iter->next_index = iter->index;
 341        iter->tags = 0;
 342        return NULL;
 343}
 344
 345static inline unsigned long
 346__radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots)
 347{
 348        return iter->index + slots;
 349}
 350
 351/**
 352 * radix_tree_iter_resume - resume iterating when the chunk may be invalid
 353 * @slot: pointer to current slot
 354 * @iter: iterator state
 355 * Returns: New slot pointer
 356 *
 357 * If the iterator needs to release then reacquire a lock, the chunk may
 358 * have been invalidated by an insertion or deletion.  Call this function
 359 * before releasing the lock to continue the iteration from the next index.
 360 */
 361void __rcu **__must_check radix_tree_iter_resume(void __rcu **slot,
 362                                        struct radix_tree_iter *iter);
 363
 364/**
 365 * radix_tree_chunk_size - get current chunk size
 366 *
 367 * @iter:       pointer to radix tree iterator
 368 * Returns:     current chunk size
 369 */
 370static __always_inline long
 371radix_tree_chunk_size(struct radix_tree_iter *iter)
 372{
 373        return iter->next_index - iter->index;
 374}
 375
 376/**
 377 * radix_tree_next_slot - find next slot in chunk
 378 *
 379 * @slot:       pointer to current slot
 380 * @iter:       pointer to iterator state
 381 * @flags:      RADIX_TREE_ITER_*, should be constant
 382 * Returns:     pointer to next slot, or NULL if there no more left
 383 *
 384 * This function updates @iter->index in the case of a successful lookup.
 385 * For tagged lookup it also eats @iter->tags.
 386 *
 387 * There are several cases where 'slot' can be passed in as NULL to this
 388 * function.  These cases result from the use of radix_tree_iter_resume() or
 389 * radix_tree_iter_retry().  In these cases we don't end up dereferencing
 390 * 'slot' because either:
 391 * a) we are doing tagged iteration and iter->tags has been set to 0, or
 392 * b) we are doing non-tagged iteration, and iter->index and iter->next_index
 393 *    have been set up so that radix_tree_chunk_size() returns 1 or 0.
 394 */
 395static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot,
 396                                struct radix_tree_iter *iter, unsigned flags)
 397{
 398        if (flags & RADIX_TREE_ITER_TAGGED) {
 399                iter->tags >>= 1;
 400                if (unlikely(!iter->tags))
 401                        return NULL;
 402                if (likely(iter->tags & 1ul)) {
 403                        iter->index = __radix_tree_iter_add(iter, 1);
 404                        slot++;
 405                        goto found;
 406                }
 407                if (!(flags & RADIX_TREE_ITER_CONTIG)) {
 408                        unsigned offset = __ffs(iter->tags);
 409
 410                        iter->tags >>= offset++;
 411                        iter->index = __radix_tree_iter_add(iter, offset);
 412                        slot += offset;
 413                        goto found;
 414                }
 415        } else {
 416                long count = radix_tree_chunk_size(iter);
 417
 418                while (--count > 0) {
 419                        slot++;
 420                        iter->index = __radix_tree_iter_add(iter, 1);
 421
 422                        if (likely(*slot))
 423                                goto found;
 424                        if (flags & RADIX_TREE_ITER_CONTIG) {
 425                                /* forbid switching to the next chunk */
 426                                iter->next_index = 0;
 427                                break;
 428                        }
 429                }
 430        }
 431        return NULL;
 432
 433 found:
 434        return slot;
 435}
 436
 437/**
 438 * radix_tree_for_each_slot - iterate over non-empty slots
 439 *
 440 * @slot:       the void** variable for pointer to slot
 441 * @root:       the struct radix_tree_root pointer
 442 * @iter:       the struct radix_tree_iter pointer
 443 * @start:      iteration starting index
 444 *
 445 * @slot points to radix tree slot, @iter->index contains its index.
 446 */
 447#define radix_tree_for_each_slot(slot, root, iter, start)               \
 448        for (slot = radix_tree_iter_init(iter, start) ;                 \
 449             slot || (slot = radix_tree_next_chunk(root, iter, 0)) ;    \
 450             slot = radix_tree_next_slot(slot, iter, 0))
 451
 452/**
 453 * radix_tree_for_each_tagged - iterate over tagged slots
 454 *
 455 * @slot:       the void** variable for pointer to slot
 456 * @root:       the struct radix_tree_root pointer
 457 * @iter:       the struct radix_tree_iter pointer
 458 * @start:      iteration starting index
 459 * @tag:        tag index
 460 *
 461 * @slot points to radix tree slot, @iter->index contains its index.
 462 */
 463#define radix_tree_for_each_tagged(slot, root, iter, start, tag)        \
 464        for (slot = radix_tree_iter_init(iter, start) ;                 \
 465             slot || (slot = radix_tree_next_chunk(root, iter,          \
 466                              RADIX_TREE_ITER_TAGGED | tag)) ;          \
 467             slot = radix_tree_next_slot(slot, iter,                    \
 468                                RADIX_TREE_ITER_TAGGED | tag))
 469
 470#endif /* _LINUX_RADIX_TREE_H */
 471