linux/include/linux/rculist.h
<<
>>
Prefs
   1#ifndef _LINUX_RCULIST_H
   2#define _LINUX_RCULIST_H
   3
   4#ifdef __KERNEL__
   5
   6/*
   7 * RCU-protected list version
   8 */
   9#include <linux/list.h>
  10#include <linux/rcupdate.h>
  11
  12/*
  13 * Why is there no list_empty_rcu()?  Because list_empty() serves this
  14 * purpose.  The list_empty() function fetches the RCU-protected pointer
  15 * and compares it to the address of the list head, but neither dereferences
  16 * this pointer itself nor provides this pointer to the caller.  Therefore,
  17 * it is not necessary to use rcu_dereference(), so that list_empty() can
  18 * be used anywhere you would want to use a list_empty_rcu().
  19 */
  20
  21/*
  22 * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers
  23 * @list: list to be initialized
  24 *
  25 * You should instead use INIT_LIST_HEAD() for normal initialization and
  26 * cleanup tasks, when readers have no access to the list being initialized.
  27 * However, if the list being initialized is visible to readers, you
  28 * need to keep the compiler from being too mischievous.
  29 */
  30static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
  31{
  32        WRITE_ONCE(list->next, list);
  33        WRITE_ONCE(list->prev, list);
  34}
  35
  36/*
  37 * return the ->next pointer of a list_head in an rcu safe
  38 * way, we must not access it directly
  39 */
  40#define list_next_rcu(list)     (*((struct list_head __rcu **)(&(list)->next)))
  41
  42/*
  43 * Insert a new entry between two known consecutive entries.
  44 *
  45 * This is only for internal list manipulation where we know
  46 * the prev/next entries already!
  47 */
  48static inline void __list_add_rcu(struct list_head *new,
  49                struct list_head *prev, struct list_head *next)
  50{
  51        if (!__list_add_valid(new, prev, next))
  52                return;
  53
  54        new->next = next;
  55        new->prev = prev;
  56        rcu_assign_pointer(list_next_rcu(prev), new);
  57        next->prev = new;
  58}
  59
  60/**
  61 * list_add_rcu - add a new entry to rcu-protected list
  62 * @new: new entry to be added
  63 * @head: list head to add it after
  64 *
  65 * Insert a new entry after the specified head.
  66 * This is good for implementing stacks.
  67 *
  68 * The caller must take whatever precautions are necessary
  69 * (such as holding appropriate locks) to avoid racing
  70 * with another list-mutation primitive, such as list_add_rcu()
  71 * or list_del_rcu(), running on this same list.
  72 * However, it is perfectly legal to run concurrently with
  73 * the _rcu list-traversal primitives, such as
  74 * list_for_each_entry_rcu().
  75 */
  76static inline void list_add_rcu(struct list_head *new, struct list_head *head)
  77{
  78        __list_add_rcu(new, head, head->next);
  79}
  80
  81/**
  82 * list_add_tail_rcu - add a new entry to rcu-protected list
  83 * @new: new entry to be added
  84 * @head: list head to add it before
  85 *
  86 * Insert a new entry before the specified head.
  87 * This is useful for implementing queues.
  88 *
  89 * The caller must take whatever precautions are necessary
  90 * (such as holding appropriate locks) to avoid racing
  91 * with another list-mutation primitive, such as list_add_tail_rcu()
  92 * or list_del_rcu(), running on this same list.
  93 * However, it is perfectly legal to run concurrently with
  94 * the _rcu list-traversal primitives, such as
  95 * list_for_each_entry_rcu().
  96 */
  97static inline void list_add_tail_rcu(struct list_head *new,
  98                                        struct list_head *head)
  99{
 100        __list_add_rcu(new, head->prev, head);
 101}
 102
 103/**
 104 * list_del_rcu - deletes entry from list without re-initialization
 105 * @entry: the element to delete from the list.
 106 *
 107 * Note: list_empty() on entry does not return true after this,
 108 * the entry is in an undefined state. It is useful for RCU based
 109 * lockfree traversal.
 110 *
 111 * In particular, it means that we can not poison the forward
 112 * pointers that may still be used for walking the list.
 113 *
 114 * The caller must take whatever precautions are necessary
 115 * (such as holding appropriate locks) to avoid racing
 116 * with another list-mutation primitive, such as list_del_rcu()
 117 * or list_add_rcu(), running on this same list.
 118 * However, it is perfectly legal to run concurrently with
 119 * the _rcu list-traversal primitives, such as
 120 * list_for_each_entry_rcu().
 121 *
 122 * Note that the caller is not permitted to immediately free
 123 * the newly deleted entry.  Instead, either synchronize_rcu()
 124 * or call_rcu() must be used to defer freeing until an RCU
 125 * grace period has elapsed.
 126 */
 127static inline void list_del_rcu(struct list_head *entry)
 128{
 129        __list_del_entry(entry);
 130        entry->prev = LIST_POISON2;
 131}
 132
 133/**
 134 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
 135 * @n: the element to delete from the hash list.
 136 *
 137 * Note: list_unhashed() on the node return true after this. It is
 138 * useful for RCU based read lockfree traversal if the writer side
 139 * must know if the list entry is still hashed or already unhashed.
 140 *
 141 * In particular, it means that we can not poison the forward pointers
 142 * that may still be used for walking the hash list and we can only
 143 * zero the pprev pointer so list_unhashed() will return true after
 144 * this.
 145 *
 146 * The caller must take whatever precautions are necessary (such as
 147 * holding appropriate locks) to avoid racing with another
 148 * list-mutation primitive, such as hlist_add_head_rcu() or
 149 * hlist_del_rcu(), running on this same list.  However, it is
 150 * perfectly legal to run concurrently with the _rcu list-traversal
 151 * primitives, such as hlist_for_each_entry_rcu().
 152 */
 153static inline void hlist_del_init_rcu(struct hlist_node *n)
 154{
 155        if (!hlist_unhashed(n)) {
 156                __hlist_del(n);
 157                n->pprev = NULL;
 158        }
 159}
 160
 161/**
 162 * list_replace_rcu - replace old entry by new one
 163 * @old : the element to be replaced
 164 * @new : the new element to insert
 165 *
 166 * The @old entry will be replaced with the @new entry atomically.
 167 * Note: @old should not be empty.
 168 */
 169static inline void list_replace_rcu(struct list_head *old,
 170                                struct list_head *new)
 171{
 172        new->next = old->next;
 173        new->prev = old->prev;
 174        rcu_assign_pointer(list_next_rcu(new->prev), new);
 175        new->next->prev = new;
 176        old->prev = LIST_POISON2;
 177}
 178
 179/**
 180 * __list_splice_init_rcu - join an RCU-protected list into an existing list.
 181 * @list:       the RCU-protected list to splice
 182 * @prev:       points to the last element of the existing list
 183 * @next:       points to the first element of the existing list
 184 * @sync:       function to sync: synchronize_rcu(), synchronize_sched(), ...
 185 *
 186 * The list pointed to by @prev and @next can be RCU-read traversed
 187 * concurrently with this function.
 188 *
 189 * Note that this function blocks.
 190 *
 191 * Important note: the caller must take whatever action is necessary to prevent
 192 * any other updates to the existing list.  In principle, it is possible to
 193 * modify the list as soon as sync() begins execution. If this sort of thing
 194 * becomes necessary, an alternative version based on call_rcu() could be
 195 * created.  But only if -really- needed -- there is no shortage of RCU API
 196 * members.
 197 */
 198static inline void __list_splice_init_rcu(struct list_head *list,
 199                                          struct list_head *prev,
 200                                          struct list_head *next,
 201                                          void (*sync)(void))
 202{
 203        struct list_head *first = list->next;
 204        struct list_head *last = list->prev;
 205
 206        /*
 207         * "first" and "last" tracking list, so initialize it.  RCU readers
 208         * have access to this list, so we must use INIT_LIST_HEAD_RCU()
 209         * instead of INIT_LIST_HEAD().
 210         */
 211
 212        INIT_LIST_HEAD_RCU(list);
 213
 214        /*
 215         * At this point, the list body still points to the source list.
 216         * Wait for any readers to finish using the list before splicing
 217         * the list body into the new list.  Any new readers will see
 218         * an empty list.
 219         */
 220
 221        sync();
 222
 223        /*
 224         * Readers are finished with the source list, so perform splice.
 225         * The order is important if the new list is global and accessible
 226         * to concurrent RCU readers.  Note that RCU readers are not
 227         * permitted to traverse the prev pointers without excluding
 228         * this function.
 229         */
 230
 231        last->next = next;
 232        rcu_assign_pointer(list_next_rcu(prev), first);
 233        first->prev = prev;
 234        next->prev = last;
 235}
 236
 237/**
 238 * list_splice_init_rcu - splice an RCU-protected list into an existing list,
 239 *                        designed for stacks.
 240 * @list:       the RCU-protected list to splice
 241 * @head:       the place in the existing list to splice the first list into
 242 * @sync:       function to sync: synchronize_rcu(), synchronize_sched(), ...
 243 */
 244static inline void list_splice_init_rcu(struct list_head *list,
 245                                        struct list_head *head,
 246                                        void (*sync)(void))
 247{
 248        if (!list_empty(list))
 249                __list_splice_init_rcu(list, head, head->next, sync);
 250}
 251
 252/**
 253 * list_splice_tail_init_rcu - splice an RCU-protected list into an existing
 254 *                             list, designed for queues.
 255 * @list:       the RCU-protected list to splice
 256 * @head:       the place in the existing list to splice the first list into
 257 * @sync:       function to sync: synchronize_rcu(), synchronize_sched(), ...
 258 */
 259static inline void list_splice_tail_init_rcu(struct list_head *list,
 260                                             struct list_head *head,
 261                                             void (*sync)(void))
 262{
 263        if (!list_empty(list))
 264                __list_splice_init_rcu(list, head->prev, head, sync);
 265}
 266
 267/**
 268 * list_entry_rcu - get the struct for this entry
 269 * @ptr:        the &struct list_head pointer.
 270 * @type:       the type of the struct this is embedded in.
 271 * @member:     the name of the list_head within the struct.
 272 *
 273 * This primitive may safely run concurrently with the _rcu list-mutation
 274 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
 275 */
 276#define list_entry_rcu(ptr, type, member) \
 277        container_of(lockless_dereference(ptr), type, member)
 278
 279/**
 280 * Where are list_empty_rcu() and list_first_entry_rcu()?
 281 *
 282 * Implementing those functions following their counterparts list_empty() and
 283 * list_first_entry() is not advisable because they lead to subtle race
 284 * conditions as the following snippet shows:
 285 *
 286 * if (!list_empty_rcu(mylist)) {
 287 *      struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member);
 288 *      do_something(bar);
 289 * }
 290 *
 291 * The list may not be empty when list_empty_rcu checks it, but it may be when
 292 * list_first_entry_rcu rereads the ->next pointer.
 293 *
 294 * Rereading the ->next pointer is not a problem for list_empty() and
 295 * list_first_entry() because they would be protected by a lock that blocks
 296 * writers.
 297 *
 298 * See list_first_or_null_rcu for an alternative.
 299 */
 300
 301/**
 302 * list_first_or_null_rcu - get the first element from a list
 303 * @ptr:        the list head to take the element from.
 304 * @type:       the type of the struct this is embedded in.
 305 * @member:     the name of the list_head within the struct.
 306 *
 307 * Note that if the list is empty, it returns NULL.
 308 *
 309 * This primitive may safely run concurrently with the _rcu list-mutation
 310 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
 311 */
 312#define list_first_or_null_rcu(ptr, type, member) \
 313({ \
 314        struct list_head *__ptr = (ptr); \
 315        struct list_head *__next = READ_ONCE(__ptr->next); \
 316        likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \
 317})
 318
 319/**
 320 * list_next_or_null_rcu - get the first element from a list
 321 * @head:       the head for the list.
 322 * @ptr:        the list head to take the next element from.
 323 * @type:       the type of the struct this is embedded in.
 324 * @member:     the name of the list_head within the struct.
 325 *
 326 * Note that if the ptr is at the end of the list, NULL is returned.
 327 *
 328 * This primitive may safely run concurrently with the _rcu list-mutation
 329 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
 330 */
 331#define list_next_or_null_rcu(head, ptr, type, member) \
 332({ \
 333        struct list_head *__head = (head); \
 334        struct list_head *__ptr = (ptr); \
 335        struct list_head *__next = READ_ONCE(__ptr->next); \
 336        likely(__next != __head) ? list_entry_rcu(__next, type, \
 337                                                  member) : NULL; \
 338})
 339
 340/**
 341 * list_for_each_entry_rcu      -       iterate over rcu list of given type
 342 * @pos:        the type * to use as a loop cursor.
 343 * @head:       the head for your list.
 344 * @member:     the name of the list_head within the struct.
 345 *
 346 * This list-traversal primitive may safely run concurrently with
 347 * the _rcu list-mutation primitives such as list_add_rcu()
 348 * as long as the traversal is guarded by rcu_read_lock().
 349 */
 350#define list_for_each_entry_rcu(pos, head, member) \
 351        for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \
 352                &pos->member != (head); \
 353                pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
 354
 355/**
 356 * list_entry_lockless - get the struct for this entry
 357 * @ptr:        the &struct list_head pointer.
 358 * @type:       the type of the struct this is embedded in.
 359 * @member:     the name of the list_head within the struct.
 360 *
 361 * This primitive may safely run concurrently with the _rcu list-mutation
 362 * primitives such as list_add_rcu(), but requires some implicit RCU
 363 * read-side guarding.  One example is running within a special
 364 * exception-time environment where preemption is disabled and where
 365 * lockdep cannot be invoked (in which case updaters must use RCU-sched,
 366 * as in synchronize_sched(), call_rcu_sched(), and friends).  Another
 367 * example is when items are added to the list, but never deleted.
 368 */
 369#define list_entry_lockless(ptr, type, member) \
 370        container_of((typeof(ptr))lockless_dereference(ptr), type, member)
 371
 372/**
 373 * list_for_each_entry_lockless - iterate over rcu list of given type
 374 * @pos:        the type * to use as a loop cursor.
 375 * @head:       the head for your list.
 376 * @member:     the name of the list_struct within the struct.
 377 *
 378 * This primitive may safely run concurrently with the _rcu list-mutation
 379 * primitives such as list_add_rcu(), but requires some implicit RCU
 380 * read-side guarding.  One example is running within a special
 381 * exception-time environment where preemption is disabled and where
 382 * lockdep cannot be invoked (in which case updaters must use RCU-sched,
 383 * as in synchronize_sched(), call_rcu_sched(), and friends).  Another
 384 * example is when items are added to the list, but never deleted.
 385 */
 386#define list_for_each_entry_lockless(pos, head, member) \
 387        for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \
 388             &pos->member != (head); \
 389             pos = list_entry_lockless(pos->member.next, typeof(*pos), member))
 390
 391/**
 392 * list_for_each_entry_continue_rcu - continue iteration over list of given type
 393 * @pos:        the type * to use as a loop cursor.
 394 * @head:       the head for your list.
 395 * @member:     the name of the list_head within the struct.
 396 *
 397 * Continue to iterate over list of given type, continuing after
 398 * the current position.
 399 */
 400#define list_for_each_entry_continue_rcu(pos, head, member)             \
 401        for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \
 402             &pos->member != (head);    \
 403             pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
 404
 405/**
 406 * hlist_del_rcu - deletes entry from hash list without re-initialization
 407 * @n: the element to delete from the hash list.
 408 *
 409 * Note: list_unhashed() on entry does not return true after this,
 410 * the entry is in an undefined state. It is useful for RCU based
 411 * lockfree traversal.
 412 *
 413 * In particular, it means that we can not poison the forward
 414 * pointers that may still be used for walking the hash list.
 415 *
 416 * The caller must take whatever precautions are necessary
 417 * (such as holding appropriate locks) to avoid racing
 418 * with another list-mutation primitive, such as hlist_add_head_rcu()
 419 * or hlist_del_rcu(), running on this same list.
 420 * However, it is perfectly legal to run concurrently with
 421 * the _rcu list-traversal primitives, such as
 422 * hlist_for_each_entry().
 423 */
 424static inline void hlist_del_rcu(struct hlist_node *n)
 425{
 426        __hlist_del(n);
 427        n->pprev = LIST_POISON2;
 428}
 429
 430/**
 431 * hlist_replace_rcu - replace old entry by new one
 432 * @old : the element to be replaced
 433 * @new : the new element to insert
 434 *
 435 * The @old entry will be replaced with the @new entry atomically.
 436 */
 437static inline void hlist_replace_rcu(struct hlist_node *old,
 438                                        struct hlist_node *new)
 439{
 440        struct hlist_node *next = old->next;
 441
 442        new->next = next;
 443        new->pprev = old->pprev;
 444        rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new);
 445        if (next)
 446                new->next->pprev = &new->next;
 447        old->pprev = LIST_POISON2;
 448}
 449
 450/*
 451 * return the first or the next element in an RCU protected hlist
 452 */
 453#define hlist_first_rcu(head)   (*((struct hlist_node __rcu **)(&(head)->first)))
 454#define hlist_next_rcu(node)    (*((struct hlist_node __rcu **)(&(node)->next)))
 455#define hlist_pprev_rcu(node)   (*((struct hlist_node __rcu **)((node)->pprev)))
 456
 457/**
 458 * hlist_add_head_rcu
 459 * @n: the element to add to the hash list.
 460 * @h: the list to add to.
 461 *
 462 * Description:
 463 * Adds the specified element to the specified hlist,
 464 * while permitting racing traversals.
 465 *
 466 * The caller must take whatever precautions are necessary
 467 * (such as holding appropriate locks) to avoid racing
 468 * with another list-mutation primitive, such as hlist_add_head_rcu()
 469 * or hlist_del_rcu(), running on this same list.
 470 * However, it is perfectly legal to run concurrently with
 471 * the _rcu list-traversal primitives, such as
 472 * hlist_for_each_entry_rcu(), used to prevent memory-consistency
 473 * problems on Alpha CPUs.  Regardless of the type of CPU, the
 474 * list-traversal primitive must be guarded by rcu_read_lock().
 475 */
 476static inline void hlist_add_head_rcu(struct hlist_node *n,
 477                                        struct hlist_head *h)
 478{
 479        struct hlist_node *first = h->first;
 480
 481        n->next = first;
 482        n->pprev = &h->first;
 483        rcu_assign_pointer(hlist_first_rcu(h), n);
 484        if (first)
 485                first->pprev = &n->next;
 486}
 487
 488/**
 489 * hlist_add_tail_rcu
 490 * @n: the element to add to the hash list.
 491 * @h: the list to add to.
 492 *
 493 * Description:
 494 * Adds the specified element to the specified hlist,
 495 * while permitting racing traversals.
 496 *
 497 * The caller must take whatever precautions are necessary
 498 * (such as holding appropriate locks) to avoid racing
 499 * with another list-mutation primitive, such as hlist_add_head_rcu()
 500 * or hlist_del_rcu(), running on this same list.
 501 * However, it is perfectly legal to run concurrently with
 502 * the _rcu list-traversal primitives, such as
 503 * hlist_for_each_entry_rcu(), used to prevent memory-consistency
 504 * problems on Alpha CPUs.  Regardless of the type of CPU, the
 505 * list-traversal primitive must be guarded by rcu_read_lock().
 506 */
 507static inline void hlist_add_tail_rcu(struct hlist_node *n,
 508                                      struct hlist_head *h)
 509{
 510        struct hlist_node *i, *last = NULL;
 511
 512        for (i = hlist_first_rcu(h); i; i = hlist_next_rcu(i))
 513                last = i;
 514
 515        if (last) {
 516                n->next = last->next;
 517                n->pprev = &last->next;
 518                rcu_assign_pointer(hlist_next_rcu(last), n);
 519        } else {
 520                hlist_add_head_rcu(n, h);
 521        }
 522}
 523
 524/**
 525 * hlist_add_before_rcu
 526 * @n: the new element to add to the hash list.
 527 * @next: the existing element to add the new element before.
 528 *
 529 * Description:
 530 * Adds the specified element to the specified hlist
 531 * before the specified node while permitting racing traversals.
 532 *
 533 * The caller must take whatever precautions are necessary
 534 * (such as holding appropriate locks) to avoid racing
 535 * with another list-mutation primitive, such as hlist_add_head_rcu()
 536 * or hlist_del_rcu(), running on this same list.
 537 * However, it is perfectly legal to run concurrently with
 538 * the _rcu list-traversal primitives, such as
 539 * hlist_for_each_entry_rcu(), used to prevent memory-consistency
 540 * problems on Alpha CPUs.
 541 */
 542static inline void hlist_add_before_rcu(struct hlist_node *n,
 543                                        struct hlist_node *next)
 544{
 545        n->pprev = next->pprev;
 546        n->next = next;
 547        rcu_assign_pointer(hlist_pprev_rcu(n), n);
 548        next->pprev = &n->next;
 549}
 550
 551/**
 552 * hlist_add_behind_rcu
 553 * @n: the new element to add to the hash list.
 554 * @prev: the existing element to add the new element after.
 555 *
 556 * Description:
 557 * Adds the specified element to the specified hlist
 558 * after the specified node while permitting racing traversals.
 559 *
 560 * The caller must take whatever precautions are necessary
 561 * (such as holding appropriate locks) to avoid racing
 562 * with another list-mutation primitive, such as hlist_add_head_rcu()
 563 * or hlist_del_rcu(), running on this same list.
 564 * However, it is perfectly legal to run concurrently with
 565 * the _rcu list-traversal primitives, such as
 566 * hlist_for_each_entry_rcu(), used to prevent memory-consistency
 567 * problems on Alpha CPUs.
 568 */
 569static inline void hlist_add_behind_rcu(struct hlist_node *n,
 570                                        struct hlist_node *prev)
 571{
 572        n->next = prev->next;
 573        n->pprev = &prev->next;
 574        rcu_assign_pointer(hlist_next_rcu(prev), n);
 575        if (n->next)
 576                n->next->pprev = &n->next;
 577}
 578
 579#define __hlist_for_each_rcu(pos, head)                         \
 580        for (pos = rcu_dereference(hlist_first_rcu(head));      \
 581             pos;                                               \
 582             pos = rcu_dereference(hlist_next_rcu(pos)))
 583
 584/**
 585 * hlist_for_each_entry_rcu - iterate over rcu list of given type
 586 * @pos:        the type * to use as a loop cursor.
 587 * @head:       the head for your list.
 588 * @member:     the name of the hlist_node within the struct.
 589 *
 590 * This list-traversal primitive may safely run concurrently with
 591 * the _rcu list-mutation primitives such as hlist_add_head_rcu()
 592 * as long as the traversal is guarded by rcu_read_lock().
 593 */
 594#define hlist_for_each_entry_rcu(pos, head, member)                     \
 595        for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\
 596                        typeof(*(pos)), member);                        \
 597                pos;                                                    \
 598                pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
 599                        &(pos)->member)), typeof(*(pos)), member))
 600
 601/**
 602 * hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing)
 603 * @pos:        the type * to use as a loop cursor.
 604 * @head:       the head for your list.
 605 * @member:     the name of the hlist_node within the struct.
 606 *
 607 * This list-traversal primitive may safely run concurrently with
 608 * the _rcu list-mutation primitives such as hlist_add_head_rcu()
 609 * as long as the traversal is guarded by rcu_read_lock().
 610 *
 611 * This is the same as hlist_for_each_entry_rcu() except that it does
 612 * not do any RCU debugging or tracing.
 613 */
 614#define hlist_for_each_entry_rcu_notrace(pos, head, member)                     \
 615        for (pos = hlist_entry_safe (rcu_dereference_raw_notrace(hlist_first_rcu(head)),\
 616                        typeof(*(pos)), member);                        \
 617                pos;                                                    \
 618                pos = hlist_entry_safe(rcu_dereference_raw_notrace(hlist_next_rcu(\
 619                        &(pos)->member)), typeof(*(pos)), member))
 620
 621/**
 622 * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
 623 * @pos:        the type * to use as a loop cursor.
 624 * @head:       the head for your list.
 625 * @member:     the name of the hlist_node within the struct.
 626 *
 627 * This list-traversal primitive may safely run concurrently with
 628 * the _rcu list-mutation primitives such as hlist_add_head_rcu()
 629 * as long as the traversal is guarded by rcu_read_lock().
 630 */
 631#define hlist_for_each_entry_rcu_bh(pos, head, member)                  \
 632        for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\
 633                        typeof(*(pos)), member);                        \
 634                pos;                                                    \
 635                pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\
 636                        &(pos)->member)), typeof(*(pos)), member))
 637
 638/**
 639 * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point
 640 * @pos:        the type * to use as a loop cursor.
 641 * @member:     the name of the hlist_node within the struct.
 642 */
 643#define hlist_for_each_entry_continue_rcu(pos, member)                  \
 644        for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
 645                        &(pos)->member)), typeof(*(pos)), member);      \
 646             pos;                                                       \
 647             pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
 648                        &(pos)->member)), typeof(*(pos)), member))
 649
 650/**
 651 * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
 652 * @pos:        the type * to use as a loop cursor.
 653 * @member:     the name of the hlist_node within the struct.
 654 */
 655#define hlist_for_each_entry_continue_rcu_bh(pos, member)               \
 656        for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(  \
 657                        &(pos)->member)), typeof(*(pos)), member);      \
 658             pos;                                                       \
 659             pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(  \
 660                        &(pos)->member)), typeof(*(pos)), member))
 661
 662/**
 663 * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point
 664 * @pos:        the type * to use as a loop cursor.
 665 * @member:     the name of the hlist_node within the struct.
 666 */
 667#define hlist_for_each_entry_from_rcu(pos, member)                      \
 668        for (; pos;                                                     \
 669             pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
 670                        &(pos)->member)), typeof(*(pos)), member))
 671
 672#endif  /* __KERNEL__ */
 673#endif
 674