linux/include/linux/radix-tree.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 * Copyright (C) 2001 Momchil Velikov
   4 * Portions Copyright (C) 2001 Christoph Hellwig
   5 * Copyright (C) 2006 Nick Piggin
   6 * Copyright (C) 2012 Konstantin Khlebnikov
   7 */
   8#ifndef _LINUX_RADIX_TREE_H
   9#define _LINUX_RADIX_TREE_H
  10
  11#include <linux/bitops.h>
  12#include <linux/kernel.h>
  13#include <linux/list.h>
  14#include <linux/preempt.h>
  15#include <linux/rcupdate.h>
  16#include <linux/spinlock.h>
  17#include <linux/types.h>
  18#include <linux/xarray.h>
  19
  20/* Keep unconverted code working */
  21#define radix_tree_root         xarray
  22#define radix_tree_node         xa_node
  23
  24/*
  25 * The bottom two bits of the slot determine how the remaining bits in the
  26 * slot are interpreted:
  27 *
  28 * 00 - data pointer
  29 * 10 - internal entry
  30 * x1 - value entry
  31 *
  32 * The internal entry may be a pointer to the next level in the tree, a
  33 * sibling entry, or an indicator that the entry in this slot has been moved
  34 * to another location in the tree and the lookup should be restarted.  While
  35 * NULL fits the 'data pointer' pattern, it means that there is no entry in
  36 * the tree for this index (no matter what level of the tree it is found at).
  37 * This means that storing a NULL entry in the tree is the same as deleting
  38 * the entry from the tree.
  39 */
  40#define RADIX_TREE_ENTRY_MASK           3UL
  41#define RADIX_TREE_INTERNAL_NODE        2UL
  42
  43static inline bool radix_tree_is_internal_node(void *ptr)
  44{
  45        return ((unsigned long)ptr & RADIX_TREE_ENTRY_MASK) ==
  46                                RADIX_TREE_INTERNAL_NODE;
  47}
  48
  49/*** radix-tree API starts here ***/
  50
  51#define RADIX_TREE_MAP_SHIFT    XA_CHUNK_SHIFT
  52#define RADIX_TREE_MAP_SIZE     (1UL << RADIX_TREE_MAP_SHIFT)
  53#define RADIX_TREE_MAP_MASK     (RADIX_TREE_MAP_SIZE-1)
  54
  55#define RADIX_TREE_MAX_TAGS     XA_MAX_MARKS
  56#define RADIX_TREE_TAG_LONGS    XA_MARK_LONGS
  57
  58#define RADIX_TREE_INDEX_BITS  (8 /* CHAR_BIT */ * sizeof(unsigned long))
  59#define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
  60                                          RADIX_TREE_MAP_SHIFT))
  61
  62/* The IDR tag is stored in the low bits of xa_flags */
  63#define ROOT_IS_IDR     ((__force gfp_t)4)
  64/* The top bits of xa_flags are used to store the root tags */
  65#define ROOT_TAG_SHIFT  (__GFP_BITS_SHIFT)
  66
  67#define RADIX_TREE_INIT(name, mask)     XARRAY_INIT(name, mask)
  68
  69#define RADIX_TREE(name, mask) \
  70        struct radix_tree_root name = RADIX_TREE_INIT(name, mask)
  71
  72#define INIT_RADIX_TREE(root, mask) xa_init_flags(root, mask)
  73
  74static inline bool radix_tree_empty(const struct radix_tree_root *root)
  75{
  76        return root->xa_head == NULL;
  77}
  78
  79/**
  80 * struct radix_tree_iter - radix tree iterator state
  81 *
  82 * @index:      index of current slot
  83 * @next_index: one beyond the last index for this chunk
  84 * @tags:       bit-mask for tag-iterating
  85 * @node:       node that contains current slot
  86 *
  87 * This radix tree iterator works in terms of "chunks" of slots.  A chunk is a
  88 * subinterval of slots contained within one radix tree leaf node.  It is
  89 * described by a pointer to its first slot and a struct radix_tree_iter
  90 * which holds the chunk's position in the tree and its size.  For tagged
  91 * iteration radix_tree_iter also holds the slots' bit-mask for one chosen
  92 * radix tree tag.
  93 */
  94struct radix_tree_iter {
  95        unsigned long   index;
  96        unsigned long   next_index;
  97        unsigned long   tags;
  98        struct radix_tree_node *node;
  99};
 100
 101/**
 102 * Radix-tree synchronization
 103 *
 104 * The radix-tree API requires that users provide all synchronisation (with
 105 * specific exceptions, noted below).
 106 *
 107 * Synchronization of access to the data items being stored in the tree, and
 108 * management of their lifetimes must be completely managed by API users.
 109 *
 110 * For API usage, in general,
 111 * - any function _modifying_ the tree or tags (inserting or deleting
 112 *   items, setting or clearing tags) must exclude other modifications, and
 113 *   exclude any functions reading the tree.
 114 * - any function _reading_ the tree or tags (looking up items or tags,
 115 *   gang lookups) must exclude modifications to the tree, but may occur
 116 *   concurrently with other readers.
 117 *
 118 * The notable exceptions to this rule are the following functions:
 119 * __radix_tree_lookup
 120 * radix_tree_lookup
 121 * radix_tree_lookup_slot
 122 * radix_tree_tag_get
 123 * radix_tree_gang_lookup
 124 * radix_tree_gang_lookup_tag
 125 * radix_tree_gang_lookup_tag_slot
 126 * radix_tree_tagged
 127 *
 128 * The first 7 functions are able to be called locklessly, using RCU. The
 129 * caller must ensure calls to these functions are made within rcu_read_lock()
 130 * regions. Other readers (lock-free or otherwise) and modifications may be
 131 * running concurrently.
 132 *
 133 * It is still required that the caller manage the synchronization and lifetimes
 134 * of the items. So if RCU lock-free lookups are used, typically this would mean
 135 * that the items have their own locks, or are amenable to lock-free access; and
 136 * that the items are freed by RCU (or only freed after having been deleted from
 137 * the radix tree *and* a synchronize_rcu() grace period).
 138 *
 139 * (Note, rcu_assign_pointer and rcu_dereference are not needed to control
 140 * access to data items when inserting into or looking up from the radix tree)
 141 *
 142 * Note that the value returned by radix_tree_tag_get() may not be relied upon
 143 * if only the RCU read lock is held.  Functions to set/clear tags and to
 144 * delete nodes running concurrently with it may affect its result such that
 145 * two consecutive reads in the same locked section may return different
 146 * values.  If reliability is required, modification functions must also be
 147 * excluded from concurrency.
 148 *
 149 * radix_tree_tagged is able to be called without locking or RCU.
 150 */
 151
 152/**
 153 * radix_tree_deref_slot - dereference a slot
 154 * @slot: slot pointer, returned by radix_tree_lookup_slot
 155 *
 156 * For use with radix_tree_lookup_slot().  Caller must hold tree at least read
 157 * locked across slot lookup and dereference. Not required if write lock is
 158 * held (ie. items cannot be concurrently inserted).
 159 *
 160 * radix_tree_deref_retry must be used to confirm validity of the pointer if
 161 * only the read lock is held.
 162 *
 163 * Return: entry stored in that slot.
 164 */
 165static inline void *radix_tree_deref_slot(void __rcu **slot)
 166{
 167        return rcu_dereference(*slot);
 168}
 169
 170/**
 171 * radix_tree_deref_slot_protected - dereference a slot with tree lock held
 172 * @slot: slot pointer, returned by radix_tree_lookup_slot
 173 *
 174 * Similar to radix_tree_deref_slot.  The caller does not hold the RCU read
 175 * lock but it must hold the tree lock to prevent parallel updates.
 176 *
 177 * Return: entry stored in that slot.
 178 */
 179static inline void *radix_tree_deref_slot_protected(void __rcu **slot,
 180                                                        spinlock_t *treelock)
 181{
 182        return rcu_dereference_protected(*slot, lockdep_is_held(treelock));
 183}
 184
 185/**
 186 * radix_tree_deref_retry       - check radix_tree_deref_slot
 187 * @arg:        pointer returned by radix_tree_deref_slot
 188 * Returns:     0 if retry is not required, otherwise retry is required
 189 *
 190 * radix_tree_deref_retry must be used with radix_tree_deref_slot.
 191 */
 192static inline int radix_tree_deref_retry(void *arg)
 193{
 194        return unlikely(radix_tree_is_internal_node(arg));
 195}
 196
 197/**
 198 * radix_tree_exception - radix_tree_deref_slot returned either exception?
 199 * @arg:        value returned by radix_tree_deref_slot
 200 * Returns:     0 if well-aligned pointer, non-0 if either kind of exception.
 201 */
 202static inline int radix_tree_exception(void *arg)
 203{
 204        return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK);
 205}
 206
 207int radix_tree_insert(struct radix_tree_root *, unsigned long index,
 208                        void *);
 209void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index,
 210                          struct radix_tree_node **nodep, void __rcu ***slotp);
 211void *radix_tree_lookup(const struct radix_tree_root *, unsigned long);
 212void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *,
 213                                        unsigned long index);
 214void __radix_tree_replace(struct radix_tree_root *, struct radix_tree_node *,
 215                          void __rcu **slot, void *entry);
 216void radix_tree_iter_replace(struct radix_tree_root *,
 217                const struct radix_tree_iter *, void __rcu **slot, void *entry);
 218void radix_tree_replace_slot(struct radix_tree_root *,
 219                             void __rcu **slot, void *entry);
 220void radix_tree_iter_delete(struct radix_tree_root *,
 221                        struct radix_tree_iter *iter, void __rcu **slot);
 222void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
 223void *radix_tree_delete(struct radix_tree_root *, unsigned long);
 224unsigned int radix_tree_gang_lookup(const struct radix_tree_root *,
 225                        void **results, unsigned long first_index,
 226                        unsigned int max_items);
 227int radix_tree_preload(gfp_t gfp_mask);
 228int radix_tree_maybe_preload(gfp_t gfp_mask);
 229void radix_tree_init(void);
 230void *radix_tree_tag_set(struct radix_tree_root *,
 231                        unsigned long index, unsigned int tag);
 232void *radix_tree_tag_clear(struct radix_tree_root *,
 233                        unsigned long index, unsigned int tag);
 234int radix_tree_tag_get(const struct radix_tree_root *,
 235                        unsigned long index, unsigned int tag);
 236void radix_tree_iter_tag_clear(struct radix_tree_root *,
 237                const struct radix_tree_iter *iter, unsigned int tag);
 238unsigned int radix_tree_gang_lookup_tag(const struct radix_tree_root *,
 239                void **results, unsigned long first_index,
 240                unsigned int max_items, unsigned int tag);
 241unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *,
 242                void __rcu ***results, unsigned long first_index,
 243                unsigned int max_items, unsigned int tag);
 244int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag);
 245
 246static inline void radix_tree_preload_end(void)
 247{
 248        preempt_enable();
 249}
 250
 251void __rcu **idr_get_free(struct radix_tree_root *root,
 252                              struct radix_tree_iter *iter, gfp_t gfp,
 253                              unsigned long max);
 254
 255enum {
 256        RADIX_TREE_ITER_TAG_MASK = 0x0f,        /* tag index in lower nybble */
 257        RADIX_TREE_ITER_TAGGED   = 0x10,        /* lookup tagged slots */
 258        RADIX_TREE_ITER_CONTIG   = 0x20,        /* stop at first hole */
 259};
 260
 261/**
 262 * radix_tree_iter_init - initialize radix tree iterator
 263 *
 264 * @iter:       pointer to iterator state
 265 * @start:      iteration starting index
 266 * Returns:     NULL
 267 */
 268static __always_inline void __rcu **
 269radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start)
 270{
 271        /*
 272         * Leave iter->tags uninitialized. radix_tree_next_chunk() will fill it
 273         * in the case of a successful tagged chunk lookup.  If the lookup was
 274         * unsuccessful or non-tagged then nobody cares about ->tags.
 275         *
 276         * Set index to zero to bypass next_index overflow protection.
 277         * See the comment in radix_tree_next_chunk() for details.
 278         */
 279        iter->index = 0;
 280        iter->next_index = start;
 281        return NULL;
 282}
 283
 284/**
 285 * radix_tree_next_chunk - find next chunk of slots for iteration
 286 *
 287 * @root:       radix tree root
 288 * @iter:       iterator state
 289 * @flags:      RADIX_TREE_ITER_* flags and tag index
 290 * Returns:     pointer to chunk first slot, or NULL if there no more left
 291 *
 292 * This function looks up the next chunk in the radix tree starting from
 293 * @iter->next_index.  It returns a pointer to the chunk's first slot.
 294 * Also it fills @iter with data about chunk: position in the tree (index),
 295 * its end (next_index), and constructs a bit mask for tagged iterating (tags).
 296 */
 297void __rcu **radix_tree_next_chunk(const struct radix_tree_root *,
 298                             struct radix_tree_iter *iter, unsigned flags);
 299
 300/**
 301 * radix_tree_iter_lookup - look up an index in the radix tree
 302 * @root: radix tree root
 303 * @iter: iterator state
 304 * @index: key to look up
 305 *
 306 * If @index is present in the radix tree, this function returns the slot
 307 * containing it and updates @iter to describe the entry.  If @index is not
 308 * present, it returns NULL.
 309 */
 310static inline void __rcu **
 311radix_tree_iter_lookup(const struct radix_tree_root *root,
 312                        struct radix_tree_iter *iter, unsigned long index)
 313{
 314        radix_tree_iter_init(iter, index);
 315        return radix_tree_next_chunk(root, iter, RADIX_TREE_ITER_CONTIG);
 316}
 317
 318/**
 319 * radix_tree_iter_find - find a present entry
 320 * @root: radix tree root
 321 * @iter: iterator state
 322 * @index: start location
 323 *
 324 * This function returns the slot containing the entry with the lowest index
 325 * which is at least @index.  If @index is larger than any present entry, this
 326 * function returns NULL.  The @iter is updated to describe the entry found.
 327 */
 328static inline void __rcu **
 329radix_tree_iter_find(const struct radix_tree_root *root,
 330                        struct radix_tree_iter *iter, unsigned long index)
 331{
 332        radix_tree_iter_init(iter, index);
 333        return radix_tree_next_chunk(root, iter, 0);
 334}
 335
 336/**
 337 * radix_tree_iter_retry - retry this chunk of the iteration
 338 * @iter:       iterator state
 339 *
 340 * If we iterate over a tree protected only by the RCU lock, a race
 341 * against deletion or creation may result in seeing a slot for which
 342 * radix_tree_deref_retry() returns true.  If so, call this function
 343 * and continue the iteration.
 344 */
 345static inline __must_check
 346void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter)
 347{
 348        iter->next_index = iter->index;
 349        iter->tags = 0;
 350        return NULL;
 351}
 352
 353static inline unsigned long
 354__radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots)
 355{
 356        return iter->index + slots;
 357}
 358
 359/**
 360 * radix_tree_iter_resume - resume iterating when the chunk may be invalid
 361 * @slot: pointer to current slot
 362 * @iter: iterator state
 363 * Returns: New slot pointer
 364 *
 365 * If the iterator needs to release then reacquire a lock, the chunk may
 366 * have been invalidated by an insertion or deletion.  Call this function
 367 * before releasing the lock to continue the iteration from the next index.
 368 */
 369void __rcu **__must_check radix_tree_iter_resume(void __rcu **slot,
 370                                        struct radix_tree_iter *iter);
 371
 372/**
 373 * radix_tree_chunk_size - get current chunk size
 374 *
 375 * @iter:       pointer to radix tree iterator
 376 * Returns:     current chunk size
 377 */
 378static __always_inline long
 379radix_tree_chunk_size(struct radix_tree_iter *iter)
 380{
 381        return iter->next_index - iter->index;
 382}
 383
 384/**
 385 * radix_tree_next_slot - find next slot in chunk
 386 *
 387 * @slot:       pointer to current slot
 388 * @iter:       pointer to interator state
 389 * @flags:      RADIX_TREE_ITER_*, should be constant
 390 * Returns:     pointer to next slot, or NULL if there no more left
 391 *
 392 * This function updates @iter->index in the case of a successful lookup.
 393 * For tagged lookup it also eats @iter->tags.
 394 *
 395 * There are several cases where 'slot' can be passed in as NULL to this
 396 * function.  These cases result from the use of radix_tree_iter_resume() or
 397 * radix_tree_iter_retry().  In these cases we don't end up dereferencing
 398 * 'slot' because either:
 399 * a) we are doing tagged iteration and iter->tags has been set to 0, or
 400 * b) we are doing non-tagged iteration, and iter->index and iter->next_index
 401 *    have been set up so that radix_tree_chunk_size() returns 1 or 0.
 402 */
 403static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot,
 404                                struct radix_tree_iter *iter, unsigned flags)
 405{
 406        if (flags & RADIX_TREE_ITER_TAGGED) {
 407                iter->tags >>= 1;
 408                if (unlikely(!iter->tags))
 409                        return NULL;
 410                if (likely(iter->tags & 1ul)) {
 411                        iter->index = __radix_tree_iter_add(iter, 1);
 412                        slot++;
 413                        goto found;
 414                }
 415                if (!(flags & RADIX_TREE_ITER_CONTIG)) {
 416                        unsigned offset = __ffs(iter->tags);
 417
 418                        iter->tags >>= offset++;
 419                        iter->index = __radix_tree_iter_add(iter, offset);
 420                        slot += offset;
 421                        goto found;
 422                }
 423        } else {
 424                long count = radix_tree_chunk_size(iter);
 425
 426                while (--count > 0) {
 427                        slot++;
 428                        iter->index = __radix_tree_iter_add(iter, 1);
 429
 430                        if (likely(*slot))
 431                                goto found;
 432                        if (flags & RADIX_TREE_ITER_CONTIG) {
 433                                /* forbid switching to the next chunk */
 434                                iter->next_index = 0;
 435                                break;
 436                        }
 437                }
 438        }
 439        return NULL;
 440
 441 found:
 442        return slot;
 443}
 444
 445/**
 446 * radix_tree_for_each_slot - iterate over non-empty slots
 447 *
 448 * @slot:       the void** variable for pointer to slot
 449 * @root:       the struct radix_tree_root pointer
 450 * @iter:       the struct radix_tree_iter pointer
 451 * @start:      iteration starting index
 452 *
 453 * @slot points to radix tree slot, @iter->index contains its index.
 454 */
 455#define radix_tree_for_each_slot(slot, root, iter, start)               \
 456        for (slot = radix_tree_iter_init(iter, start) ;                 \
 457             slot || (slot = radix_tree_next_chunk(root, iter, 0)) ;    \
 458             slot = radix_tree_next_slot(slot, iter, 0))
 459
 460/**
 461 * radix_tree_for_each_tagged - iterate over tagged slots
 462 *
 463 * @slot:       the void** variable for pointer to slot
 464 * @root:       the struct radix_tree_root pointer
 465 * @iter:       the struct radix_tree_iter pointer
 466 * @start:      iteration starting index
 467 * @tag:        tag index
 468 *
 469 * @slot points to radix tree slot, @iter->index contains its index.
 470 */
 471#define radix_tree_for_each_tagged(slot, root, iter, start, tag)        \
 472        for (slot = radix_tree_iter_init(iter, start) ;                 \
 473             slot || (slot = radix_tree_next_chunk(root, iter,          \
 474                              RADIX_TREE_ITER_TAGGED | tag)) ;          \
 475             slot = radix_tree_next_slot(slot, iter,                    \
 476                                RADIX_TREE_ITER_TAGGED | tag))
 477
 478#endif /* _LINUX_RADIX_TREE_H */
 479