1#ifndef _LINUX_RCULIST_H 2#define _LINUX_RCULIST_H 3 4#ifdef __KERNEL__ 5 6/* 7 * RCU-protected list version 8 */ 9#include <linux/list.h> 10#include <linux/rcupdate.h> 11 12/* 13 * Why is there no list_empty_rcu()? Because list_empty() serves this 14 * purpose. The list_empty() function fetches the RCU-protected pointer 15 * and compares it to the address of the list head, but neither dereferences 16 * this pointer itself nor provides this pointer to the caller. Therefore, 17 * it is not necessary to use rcu_dereference(), so that list_empty() can 18 * be used anywhere you would want to use a list_empty_rcu(). 19 */ 20 21/* 22 * return the ->next pointer of a list_head in an rcu safe 23 * way, we must not access it directly 24 */ 25#define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next))) 26 27/* 28 * Insert a new entry between two known consecutive entries. 29 * 30 * This is only for internal list manipulation where we know 31 * the prev/next entries already! 32 */ 33static inline void __list_add_rcu(struct list_head *new, 34 struct list_head *prev, struct list_head *next) 35{ 36 new->next = next; 37 new->prev = prev; 38 rcu_assign_pointer(list_next_rcu(prev), new); 39 next->prev = new; 40} 41 42/** 43 * list_add_rcu - add a new entry to rcu-protected list 44 * @new: new entry to be added 45 * @head: list head to add it after 46 * 47 * Insert a new entry after the specified head. 48 * This is good for implementing stacks. 49 * 50 * The caller must take whatever precautions are necessary 51 * (such as holding appropriate locks) to avoid racing 52 * with another list-mutation primitive, such as list_add_rcu() 53 * or list_del_rcu(), running on this same list. 54 * However, it is perfectly legal to run concurrently with 55 * the _rcu list-traversal primitives, such as 56 * list_for_each_entry_rcu(). 57 */ 58static inline void list_add_rcu(struct list_head *new, struct list_head *head) 59{ 60 __list_add_rcu(new, head, head->next); 61} 62 63/** 64 * list_add_tail_rcu - add a new entry to rcu-protected list 65 * @new: new entry to be added 66 * @head: list head to add it before 67 * 68 * Insert a new entry before the specified head. 69 * This is useful for implementing queues. 70 * 71 * The caller must take whatever precautions are necessary 72 * (such as holding appropriate locks) to avoid racing 73 * with another list-mutation primitive, such as list_add_tail_rcu() 74 * or list_del_rcu(), running on this same list. 75 * However, it is perfectly legal to run concurrently with 76 * the _rcu list-traversal primitives, such as 77 * list_for_each_entry_rcu(). 78 */ 79static inline void list_add_tail_rcu(struct list_head *new, 80 struct list_head *head) 81{ 82 __list_add_rcu(new, head->prev, head); 83} 84 85/** 86 * list_del_rcu - deletes entry from list without re-initialization 87 * @entry: the element to delete from the list. 88 * 89 * Note: list_empty() on entry does not return true after this, 90 * the entry is in an undefined state. It is useful for RCU based 91 * lockfree traversal. 92 * 93 * In particular, it means that we can not poison the forward 94 * pointers that may still be used for walking the list. 95 * 96 * The caller must take whatever precautions are necessary 97 * (such as holding appropriate locks) to avoid racing 98 * with another list-mutation primitive, such as list_del_rcu() 99 * or list_add_rcu(), running on this same list. 100 * However, it is perfectly legal to run concurrently with 101 * the _rcu list-traversal primitives, such as 102 * list_for_each_entry_rcu(). 103 * 104 * Note that the caller is not permitted to immediately free 105 * the newly deleted entry. Instead, either synchronize_rcu() 106 * or call_rcu() must be used to defer freeing until an RCU 107 * grace period has elapsed. 108 */ 109static inline void list_del_rcu(struct list_head *entry) 110{ 111 __list_del(entry->prev, entry->next); 112 entry->prev = LIST_POISON2; 113} 114 115/** 116 * hlist_del_init_rcu - deletes entry from hash list with re-initialization 117 * @n: the element to delete from the hash list. 118 * 119 * Note: list_unhashed() on the node return true after this. It is 120 * useful for RCU based read lockfree traversal if the writer side 121 * must know if the list entry is still hashed or already unhashed. 122 * 123 * In particular, it means that we can not poison the forward pointers 124 * that may still be used for walking the hash list and we can only 125 * zero the pprev pointer so list_unhashed() will return true after 126 * this. 127 * 128 * The caller must take whatever precautions are necessary (such as 129 * holding appropriate locks) to avoid racing with another 130 * list-mutation primitive, such as hlist_add_head_rcu() or 131 * hlist_del_rcu(), running on this same list. However, it is 132 * perfectly legal to run concurrently with the _rcu list-traversal 133 * primitives, such as hlist_for_each_entry_rcu(). 134 */ 135static inline void hlist_del_init_rcu(struct hlist_node *n) 136{ 137 if (!hlist_unhashed(n)) { 138 __hlist_del(n); 139 n->pprev = NULL; 140 } 141} 142 143/** 144 * list_replace_rcu - replace old entry by new one 145 * @old : the element to be replaced 146 * @new : the new element to insert 147 * 148 * The @old entry will be replaced with the @new entry atomically. 149 * Note: @old should not be empty. 150 */ 151static inline void list_replace_rcu(struct list_head *old, 152 struct list_head *new) 153{ 154 new->next = old->next; 155 new->prev = old->prev; 156 rcu_assign_pointer(list_next_rcu(new->prev), new); 157 new->next->prev = new; 158 old->prev = LIST_POISON2; 159} 160 161/** 162 * list_splice_init_rcu - splice an RCU-protected list into an existing list. 163 * @list: the RCU-protected list to splice 164 * @head: the place in the list to splice the first list into 165 * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... 166 * 167 * @head can be RCU-read traversed concurrently with this function. 168 * 169 * Note that this function blocks. 170 * 171 * Important note: the caller must take whatever action is necessary to 172 * prevent any other updates to @head. In principle, it is possible 173 * to modify the list as soon as sync() begins execution. 174 * If this sort of thing becomes necessary, an alternative version 175 * based on call_rcu() could be created. But only if -really- 176 * needed -- there is no shortage of RCU API members. 177 */ 178static inline void list_splice_init_rcu(struct list_head *list, 179 struct list_head *head, 180 void (*sync)(void)) 181{ 182 struct list_head *first = list->next; 183 struct list_head *last = list->prev; 184 struct list_head *at = head->next; 185 186 if (list_empty(list)) 187 return; 188 189 /* "first" and "last" tracking list, so initialize it. */ 190 191 INIT_LIST_HEAD(list); 192 193 /* 194 * At this point, the list body still points to the source list. 195 * Wait for any readers to finish using the list before splicing 196 * the list body into the new list. Any new readers will see 197 * an empty list. 198 */ 199 200 sync(); 201 202 /* 203 * Readers are finished with the source list, so perform splice. 204 * The order is important if the new list is global and accessible 205 * to concurrent RCU readers. Note that RCU readers are not 206 * permitted to traverse the prev pointers without excluding 207 * this function. 208 */ 209 210 last->next = at; 211 rcu_assign_pointer(list_next_rcu(head), first); 212 first->prev = head; 213 at->prev = last; 214} 215 216/** 217 * list_entry_rcu - get the struct for this entry 218 * @ptr: the &struct list_head pointer. 219 * @type: the type of the struct this is embedded in. 220 * @member: the name of the list_struct within the struct. 221 * 222 * This primitive may safely run concurrently with the _rcu list-mutation 223 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). 224 */ 225#define list_entry_rcu(ptr, type, member) \ 226 ({typeof (*ptr) __rcu *__ptr = (typeof (*ptr) __rcu __force *)ptr; \ 227 container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \ 228 }) 229 230/** 231 * list_first_entry_rcu - get the first element from a list 232 * @ptr: the list head to take the element from. 233 * @type: the type of the struct this is embedded in. 234 * @member: the name of the list_struct within the struct. 235 * 236 * Note, that list is expected to be not empty. 237 * 238 * This primitive may safely run concurrently with the _rcu list-mutation 239 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). 240 */ 241#define list_first_entry_rcu(ptr, type, member) \ 242 list_entry_rcu((ptr)->next, type, member) 243 244/** 245 * list_for_each_entry_rcu - iterate over rcu list of given type 246 * @pos: the type * to use as a loop cursor. 247 * @head: the head for your list. 248 * @member: the name of the list_struct within the struct. 249 * 250 * This list-traversal primitive may safely run concurrently with 251 * the _rcu list-mutation primitives such as list_add_rcu() 252 * as long as the traversal is guarded by rcu_read_lock(). 253 */ 254#define list_for_each_entry_rcu(pos, head, member) \ 255 for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \ 256 &pos->member != (head); \ 257 pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) 258 259 260/** 261 * list_for_each_continue_rcu 262 * @pos: the &struct list_head to use as a loop cursor. 263 * @head: the head for your list. 264 * 265 * Iterate over an rcu-protected list, continuing after current point. 266 * 267 * This list-traversal primitive may safely run concurrently with 268 * the _rcu list-mutation primitives such as list_add_rcu() 269 * as long as the traversal is guarded by rcu_read_lock(). 270 */ 271#define list_for_each_continue_rcu(pos, head) \ 272 for ((pos) = rcu_dereference_raw(list_next_rcu(pos)); \ 273 (pos) != (head); \ 274 (pos) = rcu_dereference_raw(list_next_rcu(pos))) 275 276/** 277 * list_for_each_entry_continue_rcu - continue iteration over list of given type 278 * @pos: the type * to use as a loop cursor. 279 * @head: the head for your list. 280 * @member: the name of the list_struct within the struct. 281 * 282 * Continue to iterate over list of given type, continuing after 283 * the current position. 284 */ 285#define list_for_each_entry_continue_rcu(pos, head, member) \ 286 for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \ 287 &pos->member != (head); \ 288 pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) 289 290/** 291 * hlist_del_rcu - deletes entry from hash list without re-initialization 292 * @n: the element to delete from the hash list. 293 * 294 * Note: list_unhashed() on entry does not return true after this, 295 * the entry is in an undefined state. It is useful for RCU based 296 * lockfree traversal. 297 * 298 * In particular, it means that we can not poison the forward 299 * pointers that may still be used for walking the hash list. 300 * 301 * The caller must take whatever precautions are necessary 302 * (such as holding appropriate locks) to avoid racing 303 * with another list-mutation primitive, such as hlist_add_head_rcu() 304 * or hlist_del_rcu(), running on this same list. 305 * However, it is perfectly legal to run concurrently with 306 * the _rcu list-traversal primitives, such as 307 * hlist_for_each_entry(). 308 */ 309static inline void hlist_del_rcu(struct hlist_node *n) 310{ 311 __hlist_del(n); 312 n->pprev = LIST_POISON2; 313} 314 315/** 316 * hlist_replace_rcu - replace old entry by new one 317 * @old : the element to be replaced 318 * @new : the new element to insert 319 * 320 * The @old entry will be replaced with the @new entry atomically. 321 */ 322static inline void hlist_replace_rcu(struct hlist_node *old, 323 struct hlist_node *new) 324{ 325 struct hlist_node *next = old->next; 326 327 new->next = next; 328 new->pprev = old->pprev; 329 rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new); 330 if (next) 331 new->next->pprev = &new->next; 332 old->pprev = LIST_POISON2; 333} 334 335/* 336 * return the first or the next element in an RCU protected hlist 337 */ 338#define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first))) 339#define hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next))) 340#define hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev))) 341 342/** 343 * hlist_add_head_rcu 344 * @n: the element to add to the hash list. 345 * @h: the list to add to. 346 * 347 * Description: 348 * Adds the specified element to the specified hlist, 349 * while permitting racing traversals. 350 * 351 * The caller must take whatever precautions are necessary 352 * (such as holding appropriate locks) to avoid racing 353 * with another list-mutation primitive, such as hlist_add_head_rcu() 354 * or hlist_del_rcu(), running on this same list. 355 * However, it is perfectly legal to run concurrently with 356 * the _rcu list-traversal primitives, such as 357 * hlist_for_each_entry_rcu(), used to prevent memory-consistency 358 * problems on Alpha CPUs. Regardless of the type of CPU, the 359 * list-traversal primitive must be guarded by rcu_read_lock(). 360 */ 361static inline void hlist_add_head_rcu(struct hlist_node *n, 362 struct hlist_head *h) 363{ 364 struct hlist_node *first = h->first; 365 366 n->next = first; 367 n->pprev = &h->first; 368 rcu_assign_pointer(hlist_first_rcu(h), n); 369 if (first) 370 first->pprev = &n->next; 371} 372 373/** 374 * hlist_add_before_rcu 375 * @n: the new element to add to the hash list. 376 * @next: the existing element to add the new element before. 377 * 378 * Description: 379 * Adds the specified element to the specified hlist 380 * before the specified node while permitting racing traversals. 381 * 382 * The caller must take whatever precautions are necessary 383 * (such as holding appropriate locks) to avoid racing 384 * with another list-mutation primitive, such as hlist_add_head_rcu() 385 * or hlist_del_rcu(), running on this same list. 386 * However, it is perfectly legal to run concurrently with 387 * the _rcu list-traversal primitives, such as 388 * hlist_for_each_entry_rcu(), used to prevent memory-consistency 389 * problems on Alpha CPUs. 390 */ 391static inline void hlist_add_before_rcu(struct hlist_node *n, 392 struct hlist_node *next) 393{ 394 n->pprev = next->pprev; 395 n->next = next; 396 rcu_assign_pointer(hlist_pprev_rcu(n), n); 397 next->pprev = &n->next; 398} 399 400/** 401 * hlist_add_after_rcu 402 * @prev: the existing element to add the new element after. 403 * @n: the new element to add to the hash list. 404 * 405 * Description: 406 * Adds the specified element to the specified hlist 407 * after the specified node while permitting racing traversals. 408 * 409 * The caller must take whatever precautions are necessary 410 * (such as holding appropriate locks) to avoid racing 411 * with another list-mutation primitive, such as hlist_add_head_rcu() 412 * or hlist_del_rcu(), running on this same list. 413 * However, it is perfectly legal to run concurrently with 414 * the _rcu list-traversal primitives, such as 415 * hlist_for_each_entry_rcu(), used to prevent memory-consistency 416 * problems on Alpha CPUs. 417 */ 418static inline void hlist_add_after_rcu(struct hlist_node *prev, 419 struct hlist_node *n) 420{ 421 n->next = prev->next; 422 n->pprev = &prev->next; 423 rcu_assign_pointer(hlist_next_rcu(prev), n); 424 if (n->next) 425 n->next->pprev = &n->next; 426} 427 428#define __hlist_for_each_rcu(pos, head) \ 429 for (pos = rcu_dereference(hlist_first_rcu(head)); \ 430 pos; \ 431 pos = rcu_dereference(hlist_next_rcu(pos))) 432 433/** 434 * hlist_for_each_entry_rcu - iterate over rcu list of given type 435 * @tpos: the type * to use as a loop cursor. 436 * @pos: the &struct hlist_node to use as a loop cursor. 437 * @head: the head for your list. 438 * @member: the name of the hlist_node within the struct. 439 * 440 * This list-traversal primitive may safely run concurrently with 441 * the _rcu list-mutation primitives such as hlist_add_head_rcu() 442 * as long as the traversal is guarded by rcu_read_lock(). 443 */ 444#define hlist_for_each_entry_rcu(tpos, pos, head, member) \ 445 for (pos = rcu_dereference_raw(hlist_first_rcu(head)); \ 446 pos && \ 447 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ 448 pos = rcu_dereference_raw(hlist_next_rcu(pos))) 449 450/** 451 * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type 452 * @tpos: the type * to use as a loop cursor. 453 * @pos: the &struct hlist_node to use as a loop cursor. 454 * @head: the head for your list. 455 * @member: the name of the hlist_node within the struct. 456 * 457 * This list-traversal primitive may safely run concurrently with 458 * the _rcu list-mutation primitives such as hlist_add_head_rcu() 459 * as long as the traversal is guarded by rcu_read_lock(). 460 */ 461#define hlist_for_each_entry_rcu_bh(tpos, pos, head, member) \ 462 for (pos = rcu_dereference_bh((head)->first); \ 463 pos && \ 464 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ 465 pos = rcu_dereference_bh(pos->next)) 466 467/** 468 * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point 469 * @tpos: the type * to use as a loop cursor. 470 * @pos: the &struct hlist_node to use as a loop cursor. 471 * @member: the name of the hlist_node within the struct. 472 */ 473#define hlist_for_each_entry_continue_rcu(tpos, pos, member) \ 474 for (pos = rcu_dereference((pos)->next); \ 475 pos && \ 476 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ 477 pos = rcu_dereference(pos->next)) 478 479/** 480 * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point 481 * @tpos: the type * to use as a loop cursor. 482 * @pos: the &struct hlist_node to use as a loop cursor. 483 * @member: the name of the hlist_node within the struct. 484 */ 485#define hlist_for_each_entry_continue_rcu_bh(tpos, pos, member) \ 486 for (pos = rcu_dereference_bh((pos)->next); \ 487 pos && \ 488 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ 489 pos = rcu_dereference_bh(pos->next)) 490 491 492#endif /* __KERNEL__ */ 493#endif 494