1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_RCULIST_H 3#define _LINUX_RCULIST_H 4 5#ifdef __KERNEL__ 6 7/* 8 * RCU-protected list version 9 */ 10#include <linux/list.h> 11#include <linux/rcupdate.h> 12 13/* 14 * Why is there no list_empty_rcu()? Because list_empty() serves this 15 * purpose. The list_empty() function fetches the RCU-protected pointer 16 * and compares it to the address of the list head, but neither dereferences 17 * this pointer itself nor provides this pointer to the caller. Therefore, 18 * it is not necessary to use rcu_dereference(), so that list_empty() can 19 * be used anywhere you would want to use a list_empty_rcu(). 20 */ 21 22/* 23 * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers 24 * @list: list to be initialized 25 * 26 * You should instead use INIT_LIST_HEAD() for normal initialization and 27 * cleanup tasks, when readers have no access to the list being initialized. 28 * However, if the list being initialized is visible to readers, you 29 * need to keep the compiler from being too mischievous. 30 */ 31static inline void INIT_LIST_HEAD_RCU(struct list_head *list) 32{ 33 WRITE_ONCE(list->next, list); 34 WRITE_ONCE(list->prev, list); 35} 36 37/* 38 * return the ->next pointer of a list_head in an rcu safe 39 * way, we must not access it directly 40 */ 41#define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next))) 42 43/** 44 * list_tail_rcu - returns the prev pointer of the head of the list 45 * @head: the head of the list 46 * 47 * Note: This should only be used with the list header, and even then 48 * only if list_del() and similar primitives are not also used on the 49 * list header. 50 */ 51#define list_tail_rcu(head) (*((struct list_head __rcu **)(&(head)->prev))) 52 53/* 54 * Check during list traversal that we are within an RCU reader 55 */ 56 57#define check_arg_count_one(dummy) 58 59#ifdef CONFIG_PROVE_RCU_LIST 60#define __list_check_rcu(dummy, cond, extra...) \ 61 ({ \ 62 check_arg_count_one(extra); \ 63 RCU_LOCKDEP_WARN(!cond && !rcu_read_lock_any_held(), \ 64 "RCU-list traversed in non-reader section!"); \ 65 }) 66 67#define __list_check_srcu(cond) \ 68 ({ \ 69 RCU_LOCKDEP_WARN(!(cond), \ 70 "RCU-list traversed without holding the required lock!");\ 71 }) 72#else 73#define __list_check_rcu(dummy, cond, extra...) \ 74 ({ check_arg_count_one(extra); }) 75 76#define __list_check_srcu(cond) ({ }) 77#endif 78 79/* 80 * Insert a new entry between two known consecutive entries. 81 * 82 * This is only for internal list manipulation where we know 83 * the prev/next entries already! 84 */ 85static inline void __list_add_rcu(struct list_head *new, 86 struct list_head *prev, struct list_head *next) 87{ 88 if (!__list_add_valid(new, prev, next)) 89 return; 90 91 new->next = next; 92 new->prev = prev; 93 rcu_assign_pointer(list_next_rcu(prev), new); 94 next->prev = new; 95} 96 97/** 98 * list_add_rcu - add a new entry to rcu-protected list 99 * @new: new entry to be added 100 * @head: list head to add it after 101 * 102 * Insert a new entry after the specified head. 103 * This is good for implementing stacks. 104 * 105 * The caller must take whatever precautions are necessary 106 * (such as holding appropriate locks) to avoid racing 107 * with another list-mutation primitive, such as list_add_rcu() 108 * or list_del_rcu(), running on this same list. 109 * However, it is perfectly legal to run concurrently with 110 * the _rcu list-traversal primitives, such as 111 * list_for_each_entry_rcu(). 112 */ 113static inline void list_add_rcu(struct list_head *new, struct list_head *head) 114{ 115 __list_add_rcu(new, head, head->next); 116} 117 118/** 119 * list_add_tail_rcu - add a new entry to rcu-protected list 120 * @new: new entry to be added 121 * @head: list head to add it before 122 * 123 * Insert a new entry before the specified head. 124 * This is useful for implementing queues. 125 * 126 * The caller must take whatever precautions are necessary 127 * (such as holding appropriate locks) to avoid racing 128 * with another list-mutation primitive, such as list_add_tail_rcu() 129 * or list_del_rcu(), running on this same list. 130 * However, it is perfectly legal to run concurrently with 131 * the _rcu list-traversal primitives, such as 132 * list_for_each_entry_rcu(). 133 */ 134static inline void list_add_tail_rcu(struct list_head *new, 135 struct list_head *head) 136{ 137 __list_add_rcu(new, head->prev, head); 138} 139 140/** 141 * list_del_rcu - deletes entry from list without re-initialization 142 * @entry: the element to delete from the list. 143 * 144 * Note: list_empty() on entry does not return true after this, 145 * the entry is in an undefined state. It is useful for RCU based 146 * lockfree traversal. 147 * 148 * In particular, it means that we can not poison the forward 149 * pointers that may still be used for walking the list. 150 * 151 * The caller must take whatever precautions are necessary 152 * (such as holding appropriate locks) to avoid racing 153 * with another list-mutation primitive, such as list_del_rcu() 154 * or list_add_rcu(), running on this same list. 155 * However, it is perfectly legal to run concurrently with 156 * the _rcu list-traversal primitives, such as 157 * list_for_each_entry_rcu(). 158 * 159 * Note that the caller is not permitted to immediately free 160 * the newly deleted entry. Instead, either synchronize_rcu() 161 * or call_rcu() must be used to defer freeing until an RCU 162 * grace period has elapsed. 163 */ 164static inline void list_del_rcu(struct list_head *entry) 165{ 166 __list_del_entry(entry); 167 entry->prev = LIST_POISON2; 168} 169 170/** 171 * hlist_del_init_rcu - deletes entry from hash list with re-initialization 172 * @n: the element to delete from the hash list. 173 * 174 * Note: list_unhashed() on the node return true after this. It is 175 * useful for RCU based read lockfree traversal if the writer side 176 * must know if the list entry is still hashed or already unhashed. 177 * 178 * In particular, it means that we can not poison the forward pointers 179 * that may still be used for walking the hash list and we can only 180 * zero the pprev pointer so list_unhashed() will return true after 181 * this. 182 * 183 * The caller must take whatever precautions are necessary (such as 184 * holding appropriate locks) to avoid racing with another 185 * list-mutation primitive, such as hlist_add_head_rcu() or 186 * hlist_del_rcu(), running on this same list. However, it is 187 * perfectly legal to run concurrently with the _rcu list-traversal 188 * primitives, such as hlist_for_each_entry_rcu(). 189 */ 190static inline void hlist_del_init_rcu(struct hlist_node *n) 191{ 192 if (!hlist_unhashed(n)) { 193 __hlist_del(n); 194 n->pprev = NULL; 195 } 196} 197 198/** 199 * list_replace_rcu - replace old entry by new one 200 * @old : the element to be replaced 201 * @new : the new element to insert 202 * 203 * The @old entry will be replaced with the @new entry atomically. 204 * Note: @old should not be empty. 205 */ 206static inline void list_replace_rcu(struct list_head *old, 207 struct list_head *new) 208{ 209 new->next = old->next; 210 new->prev = old->prev; 211 rcu_assign_pointer(list_next_rcu(new->prev), new); 212 new->next->prev = new; 213 old->prev = LIST_POISON2; 214} 215 216/** 217 * __list_splice_init_rcu - join an RCU-protected list into an existing list. 218 * @list: the RCU-protected list to splice 219 * @prev: points to the last element of the existing list 220 * @next: points to the first element of the existing list 221 * @sync: synchronize_rcu, synchronize_rcu_expedited, ... 222 * 223 * The list pointed to by @prev and @next can be RCU-read traversed 224 * concurrently with this function. 225 * 226 * Note that this function blocks. 227 * 228 * Important note: the caller must take whatever action is necessary to prevent 229 * any other updates to the existing list. In principle, it is possible to 230 * modify the list as soon as sync() begins execution. If this sort of thing 231 * becomes necessary, an alternative version based on call_rcu() could be 232 * created. But only if -really- needed -- there is no shortage of RCU API 233 * members. 234 */ 235static inline void __list_splice_init_rcu(struct list_head *list, 236 struct list_head *prev, 237 struct list_head *next, 238 void (*sync)(void)) 239{ 240 struct list_head *first = list->next; 241 struct list_head *last = list->prev; 242 243 /* 244 * "first" and "last" tracking list, so initialize it. RCU readers 245 * have access to this list, so we must use INIT_LIST_HEAD_RCU() 246 * instead of INIT_LIST_HEAD(). 247 */ 248 249 INIT_LIST_HEAD_RCU(list); 250 251 /* 252 * At this point, the list body still points to the source list. 253 * Wait for any readers to finish using the list before splicing 254 * the list body into the new list. Any new readers will see 255 * an empty list. 256 */ 257 258 sync(); 259 260 /* 261 * Readers are finished with the source list, so perform splice. 262 * The order is important if the new list is global and accessible 263 * to concurrent RCU readers. Note that RCU readers are not 264 * permitted to traverse the prev pointers without excluding 265 * this function. 266 */ 267 268 last->next = next; 269 rcu_assign_pointer(list_next_rcu(prev), first); 270 first->prev = prev; 271 next->prev = last; 272} 273 274/** 275 * list_splice_init_rcu - splice an RCU-protected list into an existing list, 276 * designed for stacks. 277 * @list: the RCU-protected list to splice 278 * @head: the place in the existing list to splice the first list into 279 * @sync: synchronize_rcu, synchronize_rcu_expedited, ... 280 */ 281static inline void list_splice_init_rcu(struct list_head *list, 282 struct list_head *head, 283 void (*sync)(void)) 284{ 285 if (!list_empty(list)) 286 __list_splice_init_rcu(list, head, head->next, sync); 287} 288 289/** 290 * list_splice_tail_init_rcu - splice an RCU-protected list into an existing 291 * list, designed for queues. 292 * @list: the RCU-protected list to splice 293 * @head: the place in the existing list to splice the first list into 294 * @sync: synchronize_rcu, synchronize_rcu_expedited, ... 295 */ 296static inline void list_splice_tail_init_rcu(struct list_head *list, 297 struct list_head *head, 298 void (*sync)(void)) 299{ 300 if (!list_empty(list)) 301 __list_splice_init_rcu(list, head->prev, head, sync); 302} 303 304/** 305 * list_entry_rcu - get the struct for this entry 306 * @ptr: the &struct list_head pointer. 307 * @type: the type of the struct this is embedded in. 308 * @member: the name of the list_head within the struct. 309 * 310 * This primitive may safely run concurrently with the _rcu list-mutation 311 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). 312 */ 313#define list_entry_rcu(ptr, type, member) \ 314 container_of(READ_ONCE(ptr), type, member) 315 316/* 317 * Where are list_empty_rcu() and list_first_entry_rcu()? 318 * 319 * Implementing those functions following their counterparts list_empty() and 320 * list_first_entry() is not advisable because they lead to subtle race 321 * conditions as the following snippet shows: 322 * 323 * if (!list_empty_rcu(mylist)) { 324 * struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member); 325 * do_something(bar); 326 * } 327 * 328 * The list may not be empty when list_empty_rcu checks it, but it may be when 329 * list_first_entry_rcu rereads the ->next pointer. 330 * 331 * Rereading the ->next pointer is not a problem for list_empty() and 332 * list_first_entry() because they would be protected by a lock that blocks 333 * writers. 334 * 335 * See list_first_or_null_rcu for an alternative. 336 */ 337 338/** 339 * list_first_or_null_rcu - get the first element from a list 340 * @ptr: the list head to take the element from. 341 * @type: the type of the struct this is embedded in. 342 * @member: the name of the list_head within the struct. 343 * 344 * Note that if the list is empty, it returns NULL. 345 * 346 * This primitive may safely run concurrently with the _rcu list-mutation 347 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). 348 */ 349#define list_first_or_null_rcu(ptr, type, member) \ 350({ \ 351 struct list_head *__ptr = (ptr); \ 352 struct list_head *__next = READ_ONCE(__ptr->next); \ 353 likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \ 354}) 355 356/** 357 * list_next_or_null_rcu - get the first element from a list 358 * @head: the head for the list. 359 * @ptr: the list head to take the next element from. 360 * @type: the type of the struct this is embedded in. 361 * @member: the name of the list_head within the struct. 362 * 363 * Note that if the ptr is at the end of the list, NULL is returned. 364 * 365 * This primitive may safely run concurrently with the _rcu list-mutation 366 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). 367 */ 368#define list_next_or_null_rcu(head, ptr, type, member) \ 369({ \ 370 struct list_head *__head = (head); \ 371 struct list_head *__ptr = (ptr); \ 372 struct list_head *__next = READ_ONCE(__ptr->next); \ 373 likely(__next != __head) ? list_entry_rcu(__next, type, \ 374 member) : NULL; \ 375}) 376 377/** 378 * list_for_each_entry_rcu - iterate over rcu list of given type 379 * @pos: the type * to use as a loop cursor. 380 * @head: the head for your list. 381 * @member: the name of the list_head within the struct. 382 * @cond: optional lockdep expression if called from non-RCU protection. 383 * 384 * This list-traversal primitive may safely run concurrently with 385 * the _rcu list-mutation primitives such as list_add_rcu() 386 * as long as the traversal is guarded by rcu_read_lock(). 387 */ 388#define list_for_each_entry_rcu(pos, head, member, cond...) \ 389 for (__list_check_rcu(dummy, ## cond, 0), \ 390 pos = list_entry_rcu((head)->next, typeof(*pos), member); \ 391 &pos->member != (head); \ 392 pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) 393 394/** 395 * list_for_each_entry_srcu - iterate over rcu list of given type 396 * @pos: the type * to use as a loop cursor. 397 * @head: the head for your list. 398 * @member: the name of the list_head within the struct. 399 * @cond: lockdep expression for the lock required to traverse the list. 400 * 401 * This list-traversal primitive may safely run concurrently with 402 * the _rcu list-mutation primitives such as list_add_rcu() 403 * as long as the traversal is guarded by srcu_read_lock(). 404 * The lockdep expression srcu_read_lock_held() can be passed as the 405 * cond argument from read side. 406 */ 407#define list_for_each_entry_srcu(pos, head, member, cond) \ 408 for (__list_check_srcu(cond), \ 409 pos = list_entry_rcu((head)->next, typeof(*pos), member); \ 410 &pos->member != (head); \ 411 pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) 412 413/** 414 * list_entry_lockless - get the struct for this entry 415 * @ptr: the &struct list_head pointer. 416 * @type: the type of the struct this is embedded in. 417 * @member: the name of the list_head within the struct. 418 * 419 * This primitive may safely run concurrently with the _rcu 420 * list-mutation primitives such as list_add_rcu(), but requires some 421 * implicit RCU read-side guarding. One example is running within a special 422 * exception-time environment where preemption is disabled and where lockdep 423 * cannot be invoked. Another example is when items are added to the list, 424 * but never deleted. 425 */ 426#define list_entry_lockless(ptr, type, member) \ 427 container_of((typeof(ptr))READ_ONCE(ptr), type, member) 428 429/** 430 * list_for_each_entry_lockless - iterate over rcu list of given type 431 * @pos: the type * to use as a loop cursor. 432 * @head: the head for your list. 433 * @member: the name of the list_struct within the struct. 434 * 435 * This primitive may safely run concurrently with the _rcu 436 * list-mutation primitives such as list_add_rcu(), but requires some 437 * implicit RCU read-side guarding. One example is running within a special 438 * exception-time environment where preemption is disabled and where lockdep 439 * cannot be invoked. Another example is when items are added to the list, 440 * but never deleted. 441 */ 442#define list_for_each_entry_lockless(pos, head, member) \ 443 for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \ 444 &pos->member != (head); \ 445 pos = list_entry_lockless(pos->member.next, typeof(*pos), member)) 446 447/** 448 * list_for_each_entry_continue_rcu - continue iteration over list of given type 449 * @pos: the type * to use as a loop cursor. 450 * @head: the head for your list. 451 * @member: the name of the list_head within the struct. 452 * 453 * Continue to iterate over list of given type, continuing after 454 * the current position which must have been in the list when the RCU read 455 * lock was taken. 456 * This would typically require either that you obtained the node from a 457 * previous walk of the list in the same RCU read-side critical section, or 458 * that you held some sort of non-RCU reference (such as a reference count) 459 * to keep the node alive *and* in the list. 460 * 461 * This iterator is similar to list_for_each_entry_from_rcu() except 462 * this starts after the given position and that one starts at the given 463 * position. 464 */ 465#define list_for_each_entry_continue_rcu(pos, head, member) \ 466 for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \ 467 &pos->member != (head); \ 468 pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) 469 470/** 471 * list_for_each_entry_from_rcu - iterate over a list from current point 472 * @pos: the type * to use as a loop cursor. 473 * @head: the head for your list. 474 * @member: the name of the list_node within the struct. 475 * 476 * Iterate over the tail of a list starting from a given position, 477 * which must have been in the list when the RCU read lock was taken. 478 * This would typically require either that you obtained the node from a 479 * previous walk of the list in the same RCU read-side critical section, or 480 * that you held some sort of non-RCU reference (such as a reference count) 481 * to keep the node alive *and* in the list. 482 * 483 * This iterator is similar to list_for_each_entry_continue_rcu() except 484 * this starts from the given position and that one starts from the position 485 * after the given position. 486 */ 487#define list_for_each_entry_from_rcu(pos, head, member) \ 488 for (; &(pos)->member != (head); \ 489 pos = list_entry_rcu(pos->member.next, typeof(*(pos)), member)) 490 491/** 492 * hlist_del_rcu - deletes entry from hash list without re-initialization 493 * @n: the element to delete from the hash list. 494 * 495 * Note: list_unhashed() on entry does not return true after this, 496 * the entry is in an undefined state. It is useful for RCU based 497 * lockfree traversal. 498 * 499 * In particular, it means that we can not poison the forward 500 * pointers that may still be used for walking the hash list. 501 * 502 * The caller must take whatever precautions are necessary 503 * (such as holding appropriate locks) to avoid racing 504 * with another list-mutation primitive, such as hlist_add_head_rcu() 505 * or hlist_del_rcu(), running on this same list. 506 * However, it is perfectly legal to run concurrently with 507 * the _rcu list-traversal primitives, such as 508 * hlist_for_each_entry(). 509 */ 510static inline void hlist_del_rcu(struct hlist_node *n) 511{ 512 __hlist_del(n); 513 n->pprev = LIST_POISON2; 514} 515 516/** 517 * hlist_replace_rcu - replace old entry by new one 518 * @old : the element to be replaced 519 * @new : the new element to insert 520 * 521 * The @old entry will be replaced with the @new entry atomically. 522 */ 523static inline void hlist_replace_rcu(struct hlist_node *old, 524 struct hlist_node *new) 525{ 526 struct hlist_node *next = old->next; 527 528 new->next = next; 529 new->pprev = old->pprev; 530 rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new); 531 if (next) 532 new->next->pprev = &new->next; 533 old->pprev = LIST_POISON2; 534} 535 536/* 537 * return the first or the next element in an RCU protected hlist 538 */ 539#define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first))) 540#define hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next))) 541#define hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev))) 542 543/** 544 * hlist_add_head_rcu 545 * @n: the element to add to the hash list. 546 * @h: the list to add to. 547 * 548 * Description: 549 * Adds the specified element to the specified hlist, 550 * while permitting racing traversals. 551 * 552 * The caller must take whatever precautions are necessary 553 * (such as holding appropriate locks) to avoid racing 554 * with another list-mutation primitive, such as hlist_add_head_rcu() 555 * or hlist_del_rcu(), running on this same list. 556 * However, it is perfectly legal to run concurrently with 557 * the _rcu list-traversal primitives, such as 558 * hlist_for_each_entry_rcu(), used to prevent memory-consistency 559 * problems on Alpha CPUs. Regardless of the type of CPU, the 560 * list-traversal primitive must be guarded by rcu_read_lock(). 561 */ 562static inline void hlist_add_head_rcu(struct hlist_node *n, 563 struct hlist_head *h) 564{ 565 struct hlist_node *first = h->first; 566 567 n->next = first; 568 n->pprev = &h->first; 569 rcu_assign_pointer(hlist_first_rcu(h), n); 570 if (first) 571 first->pprev = &n->next; 572} 573 574/** 575 * hlist_add_tail_rcu 576 * @n: the element to add to the hash list. 577 * @h: the list to add to. 578 * 579 * Description: 580 * Adds the specified element to the specified hlist, 581 * while permitting racing traversals. 582 * 583 * The caller must take whatever precautions are necessary 584 * (such as holding appropriate locks) to avoid racing 585 * with another list-mutation primitive, such as hlist_add_head_rcu() 586 * or hlist_del_rcu(), running on this same list. 587 * However, it is perfectly legal to run concurrently with 588 * the _rcu list-traversal primitives, such as 589 * hlist_for_each_entry_rcu(), used to prevent memory-consistency 590 * problems on Alpha CPUs. Regardless of the type of CPU, the 591 * list-traversal primitive must be guarded by rcu_read_lock(). 592 */ 593static inline void hlist_add_tail_rcu(struct hlist_node *n, 594 struct hlist_head *h) 595{ 596 struct hlist_node *i, *last = NULL; 597 598 /* Note: write side code, so rcu accessors are not needed. */ 599 for (i = h->first; i; i = i->next) 600 last = i; 601 602 if (last) { 603 n->next = last->next; 604 n->pprev = &last->next; 605 rcu_assign_pointer(hlist_next_rcu(last), n); 606 } else { 607 hlist_add_head_rcu(n, h); 608 } 609} 610 611/** 612 * hlist_add_before_rcu 613 * @n: the new element to add to the hash list. 614 * @next: the existing element to add the new element before. 615 * 616 * Description: 617 * Adds the specified element to the specified hlist 618 * before the specified node while permitting racing traversals. 619 * 620 * The caller must take whatever precautions are necessary 621 * (such as holding appropriate locks) to avoid racing 622 * with another list-mutation primitive, such as hlist_add_head_rcu() 623 * or hlist_del_rcu(), running on this same list. 624 * However, it is perfectly legal to run concurrently with 625 * the _rcu list-traversal primitives, such as 626 * hlist_for_each_entry_rcu(), used to prevent memory-consistency 627 * problems on Alpha CPUs. 628 */ 629static inline void hlist_add_before_rcu(struct hlist_node *n, 630 struct hlist_node *next) 631{ 632 n->pprev = next->pprev; 633 n->next = next; 634 rcu_assign_pointer(hlist_pprev_rcu(n), n); 635 next->pprev = &n->next; 636} 637 638/** 639 * hlist_add_behind_rcu 640 * @n: the new element to add to the hash list. 641 * @prev: the existing element to add the new element after. 642 * 643 * Description: 644 * Adds the specified element to the specified hlist 645 * after the specified node while permitting racing traversals. 646 * 647 * The caller must take whatever precautions are necessary 648 * (such as holding appropriate locks) to avoid racing 649 * with another list-mutation primitive, such as hlist_add_head_rcu() 650 * or hlist_del_rcu(), running on this same list. 651 * However, it is perfectly legal to run concurrently with 652 * the _rcu list-traversal primitives, such as 653 * hlist_for_each_entry_rcu(), used to prevent memory-consistency 654 * problems on Alpha CPUs. 655 */ 656static inline void hlist_add_behind_rcu(struct hlist_node *n, 657 struct hlist_node *prev) 658{ 659 n->next = prev->next; 660 n->pprev = &prev->next; 661 rcu_assign_pointer(hlist_next_rcu(prev), n); 662 if (n->next) 663 n->next->pprev = &n->next; 664} 665 666#define __hlist_for_each_rcu(pos, head) \ 667 for (pos = rcu_dereference(hlist_first_rcu(head)); \ 668 pos; \ 669 pos = rcu_dereference(hlist_next_rcu(pos))) 670 671/** 672 * hlist_for_each_entry_rcu - iterate over rcu list of given type 673 * @pos: the type * to use as a loop cursor. 674 * @head: the head for your list. 675 * @member: the name of the hlist_node within the struct. 676 * @cond: optional lockdep expression if called from non-RCU protection. 677 * 678 * This list-traversal primitive may safely run concurrently with 679 * the _rcu list-mutation primitives such as hlist_add_head_rcu() 680 * as long as the traversal is guarded by rcu_read_lock(). 681 */ 682#define hlist_for_each_entry_rcu(pos, head, member, cond...) \ 683 for (__list_check_rcu(dummy, ## cond, 0), \ 684 pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\ 685 typeof(*(pos)), member); \ 686 pos; \ 687 pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ 688 &(pos)->member)), typeof(*(pos)), member)) 689 690/** 691 * hlist_for_each_entry_srcu - iterate over rcu list of given type 692 * @pos: the type * to use as a loop cursor. 693 * @head: the head for your list. 694 * @member: the name of the hlist_node within the struct. 695 * @cond: lockdep expression for the lock required to traverse the list. 696 * 697 * This list-traversal primitive may safely run concurrently with 698 * the _rcu list-mutation primitives such as hlist_add_head_rcu() 699 * as long as the traversal is guarded by srcu_read_lock(). 700 * The lockdep expression srcu_read_lock_held() can be passed as the 701 * cond argument from read side. 702 */ 703#define hlist_for_each_entry_srcu(pos, head, member, cond) \ 704 for (__list_check_srcu(cond), \ 705 pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\ 706 typeof(*(pos)), member); \ 707 pos; \ 708 pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ 709 &(pos)->member)), typeof(*(pos)), member)) 710 711/** 712 * hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing) 713 * @pos: the type * to use as a loop cursor. 714 * @head: the head for your list. 715 * @member: the name of the hlist_node within the struct. 716 * 717 * This list-traversal primitive may safely run concurrently with 718 * the _rcu list-mutation primitives such as hlist_add_head_rcu() 719 * as long as the traversal is guarded by rcu_read_lock(). 720 * 721 * This is the same as hlist_for_each_entry_rcu() except that it does 722 * not do any RCU debugging or tracing. 723 */ 724#define hlist_for_each_entry_rcu_notrace(pos, head, member) \ 725 for (pos = hlist_entry_safe (rcu_dereference_raw_notrace(hlist_first_rcu(head)),\ 726 typeof(*(pos)), member); \ 727 pos; \ 728 pos = hlist_entry_safe(rcu_dereference_raw_notrace(hlist_next_rcu(\ 729 &(pos)->member)), typeof(*(pos)), member)) 730 731/** 732 * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type 733 * @pos: the type * to use as a loop cursor. 734 * @head: the head for your list. 735 * @member: the name of the hlist_node within the struct. 736 * 737 * This list-traversal primitive may safely run concurrently with 738 * the _rcu list-mutation primitives such as hlist_add_head_rcu() 739 * as long as the traversal is guarded by rcu_read_lock(). 740 */ 741#define hlist_for_each_entry_rcu_bh(pos, head, member) \ 742 for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\ 743 typeof(*(pos)), member); \ 744 pos; \ 745 pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\ 746 &(pos)->member)), typeof(*(pos)), member)) 747 748/** 749 * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point 750 * @pos: the type * to use as a loop cursor. 751 * @member: the name of the hlist_node within the struct. 752 */ 753#define hlist_for_each_entry_continue_rcu(pos, member) \ 754 for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ 755 &(pos)->member)), typeof(*(pos)), member); \ 756 pos; \ 757 pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ 758 &(pos)->member)), typeof(*(pos)), member)) 759 760/** 761 * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point 762 * @pos: the type * to use as a loop cursor. 763 * @member: the name of the hlist_node within the struct. 764 */ 765#define hlist_for_each_entry_continue_rcu_bh(pos, member) \ 766 for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \ 767 &(pos)->member)), typeof(*(pos)), member); \ 768 pos; \ 769 pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \ 770 &(pos)->member)), typeof(*(pos)), member)) 771 772/** 773 * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point 774 * @pos: the type * to use as a loop cursor. 775 * @member: the name of the hlist_node within the struct. 776 */ 777#define hlist_for_each_entry_from_rcu(pos, member) \ 778 for (; pos; \ 779 pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ 780 &(pos)->member)), typeof(*(pos)), member)) 781 782#endif /* __KERNEL__ */ 783#endif 784