1/********************************************************************* 2 * 3 * Filename: irqueue.c 4 * Version: 0.3 5 * Description: General queue implementation 6 * Status: Experimental. 7 * Author: Dag Brattli <dagb@cs.uit.no> 8 * Created at: Tue Jun 9 13:29:31 1998 9 * Modified at: Sun Dec 12 13:48:22 1999 10 * Modified by: Dag Brattli <dagb@cs.uit.no> 11 * Modified at: Thu Jan 4 14:29:10 CET 2001 12 * Modified by: Marc Zyngier <mzyngier@freesurf.fr> 13 * 14 * Copyright (C) 1998-1999, Aage Kvalnes <aage@cs.uit.no> 15 * Copyright (C) 1998, Dag Brattli, 16 * All Rights Reserved. 17 * 18 * This code is taken from the Vortex Operating System written by Aage 19 * Kvalnes. Aage has agreed that this code can use the GPL licence, 20 * although he does not use that licence in his own code. 21 * 22 * This copyright does however _not_ include the ELF hash() function 23 * which I currently don't know which licence or copyright it 24 * has. Please inform me if you know. 25 * 26 * This program is free software; you can redistribute it and/or 27 * modify it under the terms of the GNU General Public License as 28 * published by the Free Software Foundation; either version 2 of 29 * the License, or (at your option) any later version. 30 * 31 * Neither Dag Brattli nor University of Tromsø admit liability nor 32 * provide warranty for any of this software. This material is 33 * provided "AS-IS" and at no charge. 34 * 35 ********************************************************************/ 36 37/* 38 * NOTE : 39 * There are various problems with this package : 40 * o the hash function for ints is pathetic (but could be changed) 41 * o locking is sometime suspicious (especially during enumeration) 42 * o most users have only a few elements (== overhead) 43 * o most users never use search, so don't benefit from hashing 44 * Problem already fixed : 45 * o not 64 bit compliant (most users do hashv = (int) self) 46 * o hashbin_remove() is broken => use hashbin_remove_this() 47 * I think most users would be better served by a simple linked list 48 * (like include/linux/list.h) with a global spinlock per list. 49 * Jean II 50 */ 51 52/* 53 * Notes on the concurrent access to hashbin and other SMP issues 54 * ------------------------------------------------------------- 55 * Hashbins are very often in the IrDA stack a global repository of 56 * information, and therefore used in a very asynchronous manner following 57 * various events (driver calls, timers, user calls...). 58 * Therefore, very often it is highly important to consider the 59 * management of concurrent access to the hashbin and how to guarantee the 60 * consistency of the operations on it. 61 * 62 * First, we need to define the objective of locking : 63 * 1) Protect user data (content pointed by the hashbin) 64 * 2) Protect hashbin structure itself (linked list in each bin) 65 * 66 * OLD LOCKING 67 * ----------- 68 * 69 * The previous locking strategy, either HB_LOCAL or HB_GLOBAL were 70 * both inadequate in *both* aspect. 71 * o HB_GLOBAL was using a spinlock for each bin (local locking). 72 * o HB_LOCAL was disabling irq on *all* CPUs, so use a single 73 * global semaphore. 74 * The problems were : 75 * A) Global irq disabling is no longer supported by the kernel 76 * B) No protection for the hashbin struct global data 77 * o hashbin_delete() 78 * o hb_current 79 * C) No protection for user data in some cases 80 * 81 * A) HB_LOCAL use global irq disabling, so doesn't work on kernel 82 * 2.5.X. Even when it is supported (kernel 2.4.X and earlier), its 83 * performance is not satisfactory on SMP setups. Most hashbins were 84 * HB_LOCAL, so (A) definitely need fixing. 85 * B) HB_LOCAL could be modified to fix (B). However, because HB_GLOBAL 86 * lock only the individual bins, it will never be able to lock the 87 * global data, so can't do (B). 88 * C) Some functions return pointer to data that is still in the 89 * hashbin : 90 * o hashbin_find() 91 * o hashbin_get_first() 92 * o hashbin_get_next() 93 * As the data is still in the hashbin, it may be changed or free'd 94 * while the caller is examinimg the data. In those case, locking can't 95 * be done within the hashbin, but must include use of the data within 96 * the caller. 97 * The caller can easily do this with HB_LOCAL (just disable irqs). 98 * However, this is impossible with HB_GLOBAL because the caller has no 99 * way to know the proper bin, so don't know which spinlock to use. 100 * 101 * Quick summary : can no longer use HB_LOCAL, and HB_GLOBAL is 102 * fundamentally broken and will never work. 103 * 104 * NEW LOCKING 105 * ----------- 106 * 107 * To fix those problems, I've introduce a few changes in the 108 * hashbin locking : 109 * 1) New HB_LOCK scheme 110 * 2) hashbin->hb_spinlock 111 * 3) New hashbin usage policy 112 * 113 * HB_LOCK : 114 * ------- 115 * HB_LOCK is a locking scheme intermediate between the old HB_LOCAL 116 * and HB_GLOBAL. It uses a single spinlock to protect the whole content 117 * of the hashbin. As it is a single spinlock, it can protect the global 118 * data of the hashbin and not only the bins themselves. 119 * HB_LOCK can only protect some of the hashbin calls, so it only lock 120 * call that can be made 100% safe and leave other call unprotected. 121 * HB_LOCK in theory is slower than HB_GLOBAL, but as the hashbin 122 * content is always small contention is not high, so it doesn't matter 123 * much. HB_LOCK is probably faster than HB_LOCAL. 124 * 125 * hashbin->hb_spinlock : 126 * -------------------- 127 * The spinlock that HB_LOCK uses is available for caller, so that 128 * the caller can protect unprotected calls (see below). 129 * If the caller want to do entirely its own locking (HB_NOLOCK), he 130 * can do so and may use safely this spinlock. 131 * Locking is done like this : 132 * spin_lock_irqsave(&hashbin->hb_spinlock, flags); 133 * Releasing the lock : 134 * spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); 135 * 136 * Safe & Protected calls : 137 * ---------------------- 138 * The following calls are safe or protected via HB_LOCK : 139 * o hashbin_new() -> safe 140 * o hashbin_delete() 141 * o hashbin_insert() 142 * o hashbin_remove_first() 143 * o hashbin_remove() 144 * o hashbin_remove_this() 145 * o HASHBIN_GET_SIZE() -> atomic 146 * 147 * The following calls only protect the hashbin itself : 148 * o hashbin_lock_find() 149 * o hashbin_find_next() 150 * 151 * Unprotected calls : 152 * ----------------- 153 * The following calls need to be protected by the caller : 154 * o hashbin_find() 155 * o hashbin_get_first() 156 * o hashbin_get_next() 157 * 158 * Locking Policy : 159 * -------------- 160 * If the hashbin is used only in a single thread of execution 161 * (explicitly or implicitely), you can use HB_NOLOCK 162 * If the calling module already provide concurrent access protection, 163 * you may use HB_NOLOCK. 164 * 165 * In all other cases, you need to use HB_LOCK and lock the hashbin 166 * every time before calling one of the unprotected calls. You also must 167 * use the pointer returned by the unprotected call within the locked 168 * region. 169 * 170 * Extra care for enumeration : 171 * -------------------------- 172 * hashbin_get_first() and hashbin_get_next() use the hashbin to 173 * store the current position, in hb_current. 174 * As long as the hashbin remains locked, this is safe. If you unlock 175 * the hashbin, the current position may change if anybody else modify 176 * or enumerate the hashbin. 177 * Summary : do the full enumeration while locked. 178 * 179 * Alternatively, you may use hashbin_find_next(). But, this will 180 * be slower, is more complex to use and doesn't protect the hashbin 181 * content. So, care is needed here as well. 182 * 183 * Other issues : 184 * ------------ 185 * I believe that we are overdoing it by using spin_lock_irqsave() 186 * and we should use only spin_lock_bh() or similar. But, I don't have 187 * the balls to try it out. 188 * Don't believe that because hashbin are now (somewhat) SMP safe 189 * that the rest of the code is. Higher layers tend to be safest, 190 * but LAP and LMP would need some serious dedicated love. 191 * 192 * Jean II 193 */ 194#include <linux/module.h> 195#include <linux/slab.h> 196 197#include <net/irda/irda.h> 198#include <net/irda/irqueue.h> 199 200/************************ QUEUE SUBROUTINES ************************/ 201 202/* 203 * Hashbin 204 */ 205#define GET_HASHBIN(x) ( x & HASHBIN_MASK ) 206 207/* 208 * Function hash (name) 209 * 210 * This function hash the input string 'name' using the ELF hash 211 * function for strings. 212 */ 213static __u32 hash( const char* name) 214{ 215 __u32 h = 0; 216 __u32 g; 217 218 while(*name) { 219 h = (h<<4) + *name++; 220 if ((g = (h & 0xf0000000))) 221 h ^=g>>24; 222 h &=~g; 223 } 224 return h; 225} 226 227/* 228 * Function enqueue_first (queue, proc) 229 * 230 * Insert item first in queue. 231 * 232 */ 233static void enqueue_first(irda_queue_t **queue, irda_queue_t* element) 234{ 235 236 /* 237 * Check if queue is empty. 238 */ 239 if ( *queue == NULL ) { 240 /* 241 * Queue is empty. Insert one element into the queue. 242 */ 243 element->q_next = element->q_prev = *queue = element; 244 245 } else { 246 /* 247 * Queue is not empty. Insert element into front of queue. 248 */ 249 element->q_next = (*queue); 250 (*queue)->q_prev->q_next = element; 251 element->q_prev = (*queue)->q_prev; 252 (*queue)->q_prev = element; 253 (*queue) = element; 254 } 255} 256 257 258/* 259 * Function dequeue (queue) 260 * 261 * Remove first entry in queue 262 * 263 */ 264static irda_queue_t *dequeue_first(irda_queue_t **queue) 265{ 266 irda_queue_t *ret; 267 268 pr_debug("dequeue_first()\n"); 269 270 /* 271 * Set return value 272 */ 273 ret = *queue; 274 275 if ( *queue == NULL ) { 276 /* 277 * Queue was empty. 278 */ 279 } else if ( (*queue)->q_next == *queue ) { 280 /* 281 * Queue only contained a single element. It will now be 282 * empty. 283 */ 284 *queue = NULL; 285 } else { 286 /* 287 * Queue contained several element. Remove the first one. 288 */ 289 (*queue)->q_prev->q_next = (*queue)->q_next; 290 (*queue)->q_next->q_prev = (*queue)->q_prev; 291 *queue = (*queue)->q_next; 292 } 293 294 /* 295 * Return the removed entry (or NULL of queue was empty). 296 */ 297 return ret; 298} 299 300/* 301 * Function dequeue_general (queue, element) 302 * 303 * 304 */ 305static irda_queue_t *dequeue_general(irda_queue_t **queue, irda_queue_t* element) 306{ 307 irda_queue_t *ret; 308 309 pr_debug("dequeue_general()\n"); 310 311 /* 312 * Set return value 313 */ 314 ret = *queue; 315 316 if ( *queue == NULL ) { 317 /* 318 * Queue was empty. 319 */ 320 } else if ( (*queue)->q_next == *queue ) { 321 /* 322 * Queue only contained a single element. It will now be 323 * empty. 324 */ 325 *queue = NULL; 326 327 } else { 328 /* 329 * Remove specific element. 330 */ 331 element->q_prev->q_next = element->q_next; 332 element->q_next->q_prev = element->q_prev; 333 if ( (*queue) == element) 334 (*queue) = element->q_next; 335 } 336 337 /* 338 * Return the removed entry (or NULL of queue was empty). 339 */ 340 return ret; 341} 342 343/************************ HASHBIN MANAGEMENT ************************/ 344 345/* 346 * Function hashbin_create ( type, name ) 347 * 348 * Create hashbin! 349 * 350 */ 351hashbin_t *hashbin_new(int type) 352{ 353 hashbin_t* hashbin; 354 355 /* 356 * Allocate new hashbin 357 */ 358 hashbin = kzalloc(sizeof(*hashbin), GFP_ATOMIC); 359 if (!hashbin) 360 return NULL; 361 362 /* 363 * Initialize structure 364 */ 365 hashbin->hb_type = type; 366 hashbin->magic = HB_MAGIC; 367 //hashbin->hb_current = NULL; 368 369 /* Make sure all spinlock's are unlocked */ 370 if ( hashbin->hb_type & HB_LOCK ) { 371 spin_lock_init(&hashbin->hb_spinlock); 372 } 373 374 return hashbin; 375} 376EXPORT_SYMBOL(hashbin_new); 377 378 379/* 380 * Function hashbin_delete (hashbin, free_func) 381 * 382 * Destroy hashbin, the free_func can be a user supplied special routine 383 * for deallocating this structure if it's complex. If not the user can 384 * just supply kfree, which should take care of the job. 385 */ 386#ifdef CONFIG_LOCKDEP 387static int hashbin_lock_depth = 0; 388#endif 389int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func) 390{ 391 irda_queue_t* queue; 392 unsigned long flags = 0; 393 int i; 394 395 IRDA_ASSERT(hashbin != NULL, return -1;); 396 IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;); 397 398 /* Synchronize */ 399 if ( hashbin->hb_type & HB_LOCK ) { 400 spin_lock_irqsave_nested(&hashbin->hb_spinlock, flags, 401 hashbin_lock_depth++); 402 } 403 404 /* 405 * Free the entries in the hashbin, TODO: use hashbin_clear when 406 * it has been shown to work 407 */ 408 for (i = 0; i < HASHBIN_SIZE; i ++ ) { 409 queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]); 410 while (queue ) { 411 if (free_func) 412 (*free_func)(queue); 413 queue = dequeue_first( 414 (irda_queue_t**) &hashbin->hb_queue[i]); 415 } 416 } 417 418 /* Cleanup local data */ 419 hashbin->hb_current = NULL; 420 hashbin->magic = ~HB_MAGIC; 421 422 /* Release lock */ 423 if ( hashbin->hb_type & HB_LOCK) { 424 spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); 425#ifdef CONFIG_LOCKDEP 426 hashbin_lock_depth--; 427#endif 428 } 429 430 /* 431 * Free the hashbin structure 432 */ 433 kfree(hashbin); 434 435 return 0; 436} 437EXPORT_SYMBOL(hashbin_delete); 438 439/********************* HASHBIN LIST OPERATIONS *********************/ 440 441/* 442 * Function hashbin_insert (hashbin, entry, name) 443 * 444 * Insert an entry into the hashbin 445 * 446 */ 447void hashbin_insert(hashbin_t* hashbin, irda_queue_t* entry, long hashv, 448 const char* name) 449{ 450 unsigned long flags = 0; 451 int bin; 452 453 IRDA_ASSERT( hashbin != NULL, return;); 454 IRDA_ASSERT( hashbin->magic == HB_MAGIC, return;); 455 456 /* 457 * Locate hashbin 458 */ 459 if ( name ) 460 hashv = hash( name ); 461 bin = GET_HASHBIN( hashv ); 462 463 /* Synchronize */ 464 if ( hashbin->hb_type & HB_LOCK ) { 465 spin_lock_irqsave(&hashbin->hb_spinlock, flags); 466 } /* Default is no-lock */ 467 468 /* 469 * Store name and key 470 */ 471 entry->q_hash = hashv; 472 if ( name ) 473 strlcpy( entry->q_name, name, sizeof(entry->q_name)); 474 475 /* 476 * Insert new entry first 477 */ 478 enqueue_first( (irda_queue_t**) &hashbin->hb_queue[ bin ], 479 entry); 480 hashbin->hb_size++; 481 482 /* Release lock */ 483 if ( hashbin->hb_type & HB_LOCK ) { 484 spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); 485 } /* Default is no-lock */ 486} 487EXPORT_SYMBOL(hashbin_insert); 488 489/* 490 * Function hashbin_remove_first (hashbin) 491 * 492 * Remove first entry of the hashbin 493 * 494 * Note : this function no longer use hashbin_remove(), but does things 495 * similar to hashbin_remove_this(), so can be considered safe. 496 * Jean II 497 */ 498void *hashbin_remove_first( hashbin_t *hashbin) 499{ 500 unsigned long flags = 0; 501 irda_queue_t *entry = NULL; 502 503 /* Synchronize */ 504 if ( hashbin->hb_type & HB_LOCK ) { 505 spin_lock_irqsave(&hashbin->hb_spinlock, flags); 506 } /* Default is no-lock */ 507 508 entry = hashbin_get_first( hashbin); 509 if ( entry != NULL) { 510 int bin; 511 long hashv; 512 /* 513 * Locate hashbin 514 */ 515 hashv = entry->q_hash; 516 bin = GET_HASHBIN( hashv ); 517 518 /* 519 * Dequeue the entry... 520 */ 521 dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ], 522 entry); 523 hashbin->hb_size--; 524 entry->q_next = NULL; 525 entry->q_prev = NULL; 526 527 /* 528 * Check if this item is the currently selected item, and in 529 * that case we must reset hb_current 530 */ 531 if ( entry == hashbin->hb_current) 532 hashbin->hb_current = NULL; 533 } 534 535 /* Release lock */ 536 if ( hashbin->hb_type & HB_LOCK ) { 537 spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); 538 } /* Default is no-lock */ 539 540 return entry; 541} 542 543 544/* 545 * Function hashbin_remove (hashbin, hashv, name) 546 * 547 * Remove entry with the given name 548 * 549 * The use of this function is highly discouraged, because the whole 550 * concept behind hashbin_remove() is broken. In many cases, it's not 551 * possible to guarantee the unicity of the index (either hashv or name), 552 * leading to removing the WRONG entry. 553 * The only simple safe use is : 554 * hashbin_remove(hasbin, (int) self, NULL); 555 * In other case, you must think hard to guarantee unicity of the index. 556 * Jean II 557 */ 558void* hashbin_remove( hashbin_t* hashbin, long hashv, const char* name) 559{ 560 int bin, found = FALSE; 561 unsigned long flags = 0; 562 irda_queue_t* entry; 563 564 IRDA_ASSERT( hashbin != NULL, return NULL;); 565 IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); 566 567 /* 568 * Locate hashbin 569 */ 570 if ( name ) 571 hashv = hash( name ); 572 bin = GET_HASHBIN( hashv ); 573 574 /* Synchronize */ 575 if ( hashbin->hb_type & HB_LOCK ) { 576 spin_lock_irqsave(&hashbin->hb_spinlock, flags); 577 } /* Default is no-lock */ 578 579 /* 580 * Search for entry 581 */ 582 entry = hashbin->hb_queue[ bin ]; 583 if ( entry ) { 584 do { 585 /* 586 * Check for key 587 */ 588 if ( entry->q_hash == hashv ) { 589 /* 590 * Name compare too? 591 */ 592 if ( name ) { 593 if ( strcmp( entry->q_name, name) == 0) 594 { 595 found = TRUE; 596 break; 597 } 598 } else { 599 found = TRUE; 600 break; 601 } 602 } 603 entry = entry->q_next; 604 } while ( entry != hashbin->hb_queue[ bin ] ); 605 } 606 607 /* 608 * If entry was found, dequeue it 609 */ 610 if ( found ) { 611 dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ], 612 entry); 613 hashbin->hb_size--; 614 615 /* 616 * Check if this item is the currently selected item, and in 617 * that case we must reset hb_current 618 */ 619 if ( entry == hashbin->hb_current) 620 hashbin->hb_current = NULL; 621 } 622 623 /* Release lock */ 624 if ( hashbin->hb_type & HB_LOCK ) { 625 spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); 626 } /* Default is no-lock */ 627 628 629 /* Return */ 630 if ( found ) 631 return entry; 632 else 633 return NULL; 634 635} 636EXPORT_SYMBOL(hashbin_remove); 637 638/* 639 * Function hashbin_remove_this (hashbin, entry) 640 * 641 * Remove entry with the given name 642 * 643 * In some cases, the user of hashbin can't guarantee the unicity 644 * of either the hashv or name. 645 * In those cases, using the above function is guaranteed to cause troubles, 646 * so we use this one instead... 647 * And by the way, it's also faster, because we skip the search phase ;-) 648 */ 649void* hashbin_remove_this( hashbin_t* hashbin, irda_queue_t* entry) 650{ 651 unsigned long flags = 0; 652 int bin; 653 long hashv; 654 655 IRDA_ASSERT( hashbin != NULL, return NULL;); 656 IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); 657 IRDA_ASSERT( entry != NULL, return NULL;); 658 659 /* Synchronize */ 660 if ( hashbin->hb_type & HB_LOCK ) { 661 spin_lock_irqsave(&hashbin->hb_spinlock, flags); 662 } /* Default is no-lock */ 663 664 /* Check if valid and not already removed... */ 665 if((entry->q_next == NULL) || (entry->q_prev == NULL)) { 666 entry = NULL; 667 goto out; 668 } 669 670 /* 671 * Locate hashbin 672 */ 673 hashv = entry->q_hash; 674 bin = GET_HASHBIN( hashv ); 675 676 /* 677 * Dequeue the entry... 678 */ 679 dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ], 680 entry); 681 hashbin->hb_size--; 682 entry->q_next = NULL; 683 entry->q_prev = NULL; 684 685 /* 686 * Check if this item is the currently selected item, and in 687 * that case we must reset hb_current 688 */ 689 if ( entry == hashbin->hb_current) 690 hashbin->hb_current = NULL; 691out: 692 /* Release lock */ 693 if ( hashbin->hb_type & HB_LOCK ) { 694 spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); 695 } /* Default is no-lock */ 696 697 return entry; 698} 699EXPORT_SYMBOL(hashbin_remove_this); 700 701/*********************** HASHBIN ENUMERATION ***********************/ 702 703/* 704 * Function hashbin_common_find (hashbin, hashv, name) 705 * 706 * Find item with the given hashv or name 707 * 708 */ 709void* hashbin_find( hashbin_t* hashbin, long hashv, const char* name ) 710{ 711 int bin; 712 irda_queue_t* entry; 713 714 pr_debug("hashbin_find()\n"); 715 716 IRDA_ASSERT( hashbin != NULL, return NULL;); 717 IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); 718 719 /* 720 * Locate hashbin 721 */ 722 if ( name ) 723 hashv = hash( name ); 724 bin = GET_HASHBIN( hashv ); 725 726 /* 727 * Search for entry 728 */ 729 entry = hashbin->hb_queue[ bin]; 730 if ( entry ) { 731 do { 732 /* 733 * Check for key 734 */ 735 if ( entry->q_hash == hashv ) { 736 /* 737 * Name compare too? 738 */ 739 if ( name ) { 740 if ( strcmp( entry->q_name, name ) == 0 ) { 741 return entry; 742 } 743 } else { 744 return entry; 745 } 746 } 747 entry = entry->q_next; 748 } while ( entry != hashbin->hb_queue[ bin ] ); 749 } 750 751 return NULL; 752} 753EXPORT_SYMBOL(hashbin_find); 754 755/* 756 * Function hashbin_lock_find (hashbin, hashv, name) 757 * 758 * Find item with the given hashv or name 759 * 760 * Same, but with spinlock protection... 761 * I call it safe, but it's only safe with respect to the hashbin, not its 762 * content. - Jean II 763 */ 764void* hashbin_lock_find( hashbin_t* hashbin, long hashv, const char* name ) 765{ 766 unsigned long flags = 0; 767 irda_queue_t* entry; 768 769 /* Synchronize */ 770 spin_lock_irqsave(&hashbin->hb_spinlock, flags); 771 772 /* 773 * Search for entry 774 */ 775 entry = hashbin_find(hashbin, hashv, name); 776 777 /* Release lock */ 778 spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); 779 780 return entry; 781} 782EXPORT_SYMBOL(hashbin_lock_find); 783 784/* 785 * Function hashbin_find (hashbin, hashv, name, pnext) 786 * 787 * Find an item with the given hashv or name, and its successor 788 * 789 * This function allow to do concurrent enumerations without the 790 * need to lock over the whole session, because the caller keep the 791 * context of the search. On the other hand, it might fail and return 792 * NULL if the entry is removed. - Jean II 793 */ 794void* hashbin_find_next( hashbin_t* hashbin, long hashv, const char* name, 795 void ** pnext) 796{ 797 unsigned long flags = 0; 798 irda_queue_t* entry; 799 800 /* Synchronize */ 801 spin_lock_irqsave(&hashbin->hb_spinlock, flags); 802 803 /* 804 * Search for current entry 805 * This allow to check if the current item is still in the 806 * hashbin or has been removed. 807 */ 808 entry = hashbin_find(hashbin, hashv, name); 809 810 /* 811 * Trick hashbin_get_next() to return what we want 812 */ 813 if(entry) { 814 hashbin->hb_current = entry; 815 *pnext = hashbin_get_next( hashbin ); 816 } else 817 *pnext = NULL; 818 819 /* Release lock */ 820 spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); 821 822 return entry; 823} 824 825/* 826 * Function hashbin_get_first (hashbin) 827 * 828 * Get a pointer to first element in hashbin, this function must be 829 * called before any calls to hashbin_get_next()! 830 * 831 */ 832irda_queue_t *hashbin_get_first( hashbin_t* hashbin) 833{ 834 irda_queue_t *entry; 835 int i; 836 837 IRDA_ASSERT( hashbin != NULL, return NULL;); 838 IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); 839 840 if ( hashbin == NULL) 841 return NULL; 842 843 for ( i = 0; i < HASHBIN_SIZE; i ++ ) { 844 entry = hashbin->hb_queue[ i]; 845 if ( entry) { 846 hashbin->hb_current = entry; 847 return entry; 848 } 849 } 850 /* 851 * Did not find any item in hashbin 852 */ 853 return NULL; 854} 855EXPORT_SYMBOL(hashbin_get_first); 856 857/* 858 * Function hashbin_get_next (hashbin) 859 * 860 * Get next item in hashbin. A series of hashbin_get_next() calls must 861 * be started by a call to hashbin_get_first(). The function returns 862 * NULL when all items have been traversed 863 * 864 * The context of the search is stored within the hashbin, so you must 865 * protect yourself from concurrent enumerations. - Jean II 866 */ 867irda_queue_t *hashbin_get_next( hashbin_t *hashbin) 868{ 869 irda_queue_t* entry; 870 int bin; 871 int i; 872 873 IRDA_ASSERT( hashbin != NULL, return NULL;); 874 IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); 875 876 if ( hashbin->hb_current == NULL) { 877 IRDA_ASSERT( hashbin->hb_current != NULL, return NULL;); 878 return NULL; 879 } 880 entry = hashbin->hb_current->q_next; 881 bin = GET_HASHBIN( entry->q_hash); 882 883 /* 884 * Make sure that we are not back at the beginning of the queue 885 * again 886 */ 887 if ( entry != hashbin->hb_queue[ bin ]) { 888 hashbin->hb_current = entry; 889 890 return entry; 891 } 892 893 /* 894 * Check that this is not the last queue in hashbin 895 */ 896 if ( bin >= HASHBIN_SIZE) 897 return NULL; 898 899 /* 900 * Move to next queue in hashbin 901 */ 902 bin++; 903 for ( i = bin; i < HASHBIN_SIZE; i++ ) { 904 entry = hashbin->hb_queue[ i]; 905 if ( entry) { 906 hashbin->hb_current = entry; 907 908 return entry; 909 } 910 } 911 return NULL; 912} 913EXPORT_SYMBOL(hashbin_get_next); 914