1/********************************************************************* 2 * 3 * Filename: irqueue.c 4 * Version: 0.3 5 * Description: General queue implementation 6 * Status: Experimental. 7 * Author: Dag Brattli <dagb@cs.uit.no> 8 * Created at: Tue Jun 9 13:29:31 1998 9 * Modified at: Sun Dec 12 13:48:22 1999 10 * Modified by: Dag Brattli <dagb@cs.uit.no> 11 * Modified at: Thu Jan 4 14:29:10 CET 2001 12 * Modified by: Marc Zyngier <mzyngier@freesurf.fr> 13 * 14 * Copyright (C) 1998-1999, Aage Kvalnes <aage@cs.uit.no> 15 * Copyright (C) 1998, Dag Brattli, 16 * All Rights Reserved. 17 * 18 * This code is taken from the Vortex Operating System written by Aage 19 * Kvalnes. Aage has agreed that this code can use the GPL licence, 20 * although he does not use that licence in his own code. 21 * 22 * This copyright does however _not_ include the ELF hash() function 23 * which I currently don't know which licence or copyright it 24 * has. Please inform me if you know. 25 * 26 * This program is free software; you can redistribute it and/or 27 * modify it under the terms of the GNU General Public License as 28 * published by the Free Software Foundation; either version 2 of 29 * the License, or (at your option) any later version. 30 * 31 * Neither Dag Brattli nor University of Tromsø admit liability nor 32 * provide warranty for any of this software. This material is 33 * provided "AS-IS" and at no charge. 34 * 35 ********************************************************************/ 36 37/* 38 * NOTE : 39 * There are various problems with this package : 40 * o the hash function for ints is pathetic (but could be changed) 41 * o locking is sometime suspicious (especially during enumeration) 42 * o most users have only a few elements (== overhead) 43 * o most users never use search, so don't benefit from hashing 44 * Problem already fixed : 45 * o not 64 bit compliant (most users do hashv = (int) self) 46 * o hashbin_remove() is broken => use hashbin_remove_this() 47 * I think most users would be better served by a simple linked list 48 * (like include/linux/list.h) with a global spinlock per list. 49 * Jean II 50 */ 51 52/* 53 * Notes on the concurrent access to hashbin and other SMP issues 54 * ------------------------------------------------------------- 55 * Hashbins are very often in the IrDA stack a global repository of 56 * information, and therefore used in a very asynchronous manner following 57 * various events (driver calls, timers, user calls...). 58 * Therefore, very often it is highly important to consider the 59 * management of concurrent access to the hashbin and how to guarantee the 60 * consistency of the operations on it. 61 * 62 * First, we need to define the objective of locking : 63 * 1) Protect user data (content pointed by the hashbin) 64 * 2) Protect hashbin structure itself (linked list in each bin) 65 * 66 * OLD LOCKING 67 * ----------- 68 * 69 * The previous locking strategy, either HB_LOCAL or HB_GLOBAL were 70 * both inadequate in *both* aspect. 71 * o HB_GLOBAL was using a spinlock for each bin (local locking). 72 * o HB_LOCAL was disabling irq on *all* CPUs, so use a single 73 * global semaphore. 74 * The problems were : 75 * A) Global irq disabling is no longer supported by the kernel 76 * B) No protection for the hashbin struct global data 77 * o hashbin_delete() 78 * o hb_current 79 * C) No protection for user data in some cases 80 * 81 * A) HB_LOCAL use global irq disabling, so doesn't work on kernel 82 * 2.5.X. Even when it is supported (kernel 2.4.X and earlier), its 83 * performance is not satisfactory on SMP setups. Most hashbins were 84 * HB_LOCAL, so (A) definitely need fixing. 85 * B) HB_LOCAL could be modified to fix (B). However, because HB_GLOBAL 86 * lock only the individual bins, it will never be able to lock the 87 * global data, so can't do (B). 88 * C) Some functions return pointer to data that is still in the 89 * hashbin : 90 * o hashbin_find() 91 * o hashbin_get_first() 92 * o hashbin_get_next() 93 * As the data is still in the hashbin, it may be changed or free'd 94 * while the caller is examinimg the data. In those case, locking can't 95 * be done within the hashbin, but must include use of the data within 96 * the caller. 97 * The caller can easily do this with HB_LOCAL (just disable irqs). 98 * However, this is impossible with HB_GLOBAL because the caller has no 99 * way to know the proper bin, so don't know which spinlock to use. 100 * 101 * Quick summary : can no longer use HB_LOCAL, and HB_GLOBAL is 102 * fundamentally broken and will never work. 103 * 104 * NEW LOCKING 105 * ----------- 106 * 107 * To fix those problems, I've introduce a few changes in the 108 * hashbin locking : 109 * 1) New HB_LOCK scheme 110 * 2) hashbin->hb_spinlock 111 * 3) New hashbin usage policy 112 * 113 * HB_LOCK : 114 * ------- 115 * HB_LOCK is a locking scheme intermediate between the old HB_LOCAL 116 * and HB_GLOBAL. It uses a single spinlock to protect the whole content 117 * of the hashbin. As it is a single spinlock, it can protect the global 118 * data of the hashbin and not only the bins themselves. 119 * HB_LOCK can only protect some of the hashbin calls, so it only lock 120 * call that can be made 100% safe and leave other call unprotected. 121 * HB_LOCK in theory is slower than HB_GLOBAL, but as the hashbin 122 * content is always small contention is not high, so it doesn't matter 123 * much. HB_LOCK is probably faster than HB_LOCAL. 124 * 125 * hashbin->hb_spinlock : 126 * -------------------- 127 * The spinlock that HB_LOCK uses is available for caller, so that 128 * the caller can protect unprotected calls (see below). 129 * If the caller want to do entirely its own locking (HB_NOLOCK), he 130 * can do so and may use safely this spinlock. 131 * Locking is done like this : 132 * spin_lock_irqsave(&hashbin->hb_spinlock, flags); 133 * Releasing the lock : 134 * spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); 135 * 136 * Safe & Protected calls : 137 * ---------------------- 138 * The following calls are safe or protected via HB_LOCK : 139 * o hashbin_new() -> safe 140 * o hashbin_delete() 141 * o hashbin_insert() 142 * o hashbin_remove_first() 143 * o hashbin_remove() 144 * o hashbin_remove_this() 145 * o HASHBIN_GET_SIZE() -> atomic 146 * 147 * The following calls only protect the hashbin itself : 148 * o hashbin_lock_find() 149 * o hashbin_find_next() 150 * 151 * Unprotected calls : 152 * ----------------- 153 * The following calls need to be protected by the caller : 154 * o hashbin_find() 155 * o hashbin_get_first() 156 * o hashbin_get_next() 157 * 158 * Locking Policy : 159 * -------------- 160 * If the hashbin is used only in a single thread of execution 161 * (explicitly or implicitely), you can use HB_NOLOCK 162 * If the calling module already provide concurrent access protection, 163 * you may use HB_NOLOCK. 164 * 165 * In all other cases, you need to use HB_LOCK and lock the hashbin 166 * every time before calling one of the unprotected calls. You also must 167 * use the pointer returned by the unprotected call within the locked 168 * region. 169 * 170 * Extra care for enumeration : 171 * -------------------------- 172 * hashbin_get_first() and hashbin_get_next() use the hashbin to 173 * store the current position, in hb_current. 174 * As long as the hashbin remains locked, this is safe. If you unlock 175 * the hashbin, the current position may change if anybody else modify 176 * or enumerate the hashbin. 177 * Summary : do the full enumeration while locked. 178 * 179 * Alternatively, you may use hashbin_find_next(). But, this will 180 * be slower, is more complex to use and doesn't protect the hashbin 181 * content. So, care is needed here as well. 182 * 183 * Other issues : 184 * ------------ 185 * I believe that we are overdoing it by using spin_lock_irqsave() 186 * and we should use only spin_lock_bh() or similar. But, I don't have 187 * the balls to try it out. 188 * Don't believe that because hashbin are now (somewhat) SMP safe 189 * that the rest of the code is. Higher layers tend to be safest, 190 * but LAP and LMP would need some serious dedicated love. 191 * 192 * Jean II 193 */ 194#include <linux/module.h> 195#include <linux/slab.h> 196 197#include <net/irda/irda.h> 198#include <net/irda/irqueue.h> 199 200/************************ QUEUE SUBROUTINES ************************/ 201 202/* 203 * Hashbin 204 */ 205#define GET_HASHBIN(x) ( x & HASHBIN_MASK ) 206 207/* 208 * Function hash (name) 209 * 210 * This function hash the input string 'name' using the ELF hash 211 * function for strings. 212 */ 213static __u32 hash( const char* name) 214{ 215 __u32 h = 0; 216 __u32 g; 217 218 while(*name) { 219 h = (h<<4) + *name++; 220 g = h & 0xf0000000; 221 if (g) 222 h ^=g>>24; 223 h &=~g; 224 } 225 return h; 226} 227 228/* 229 * Function enqueue_first (queue, proc) 230 * 231 * Insert item first in queue. 232 * 233 */ 234static void enqueue_first(irda_queue_t **queue, irda_queue_t* element) 235{ 236 237 /* 238 * Check if queue is empty. 239 */ 240 if ( *queue == NULL ) { 241 /* 242 * Queue is empty. Insert one element into the queue. 243 */ 244 element->q_next = element->q_prev = *queue = element; 245 246 } else { 247 /* 248 * Queue is not empty. Insert element into front of queue. 249 */ 250 element->q_next = (*queue); 251 (*queue)->q_prev->q_next = element; 252 element->q_prev = (*queue)->q_prev; 253 (*queue)->q_prev = element; 254 (*queue) = element; 255 } 256} 257 258 259/* 260 * Function dequeue (queue) 261 * 262 * Remove first entry in queue 263 * 264 */ 265static irda_queue_t *dequeue_first(irda_queue_t **queue) 266{ 267 irda_queue_t *ret; 268 269 pr_debug("dequeue_first()\n"); 270 271 /* 272 * Set return value 273 */ 274 ret = *queue; 275 276 if ( *queue == NULL ) { 277 /* 278 * Queue was empty. 279 */ 280 } else if ( (*queue)->q_next == *queue ) { 281 /* 282 * Queue only contained a single element. It will now be 283 * empty. 284 */ 285 *queue = NULL; 286 } else { 287 /* 288 * Queue contained several element. Remove the first one. 289 */ 290 (*queue)->q_prev->q_next = (*queue)->q_next; 291 (*queue)->q_next->q_prev = (*queue)->q_prev; 292 *queue = (*queue)->q_next; 293 } 294 295 /* 296 * Return the removed entry (or NULL of queue was empty). 297 */ 298 return ret; 299} 300 301/* 302 * Function dequeue_general (queue, element) 303 * 304 * 305 */ 306static irda_queue_t *dequeue_general(irda_queue_t **queue, irda_queue_t* element) 307{ 308 irda_queue_t *ret; 309 310 pr_debug("dequeue_general()\n"); 311 312 /* 313 * Set return value 314 */ 315 ret = *queue; 316 317 if ( *queue == NULL ) { 318 /* 319 * Queue was empty. 320 */ 321 } else if ( (*queue)->q_next == *queue ) { 322 /* 323 * Queue only contained a single element. It will now be 324 * empty. 325 */ 326 *queue = NULL; 327 328 } else { 329 /* 330 * Remove specific element. 331 */ 332 element->q_prev->q_next = element->q_next; 333 element->q_next->q_prev = element->q_prev; 334 if ( (*queue) == element) 335 (*queue) = element->q_next; 336 } 337 338 /* 339 * Return the removed entry (or NULL of queue was empty). 340 */ 341 return ret; 342} 343 344/************************ HASHBIN MANAGEMENT ************************/ 345 346/* 347 * Function hashbin_create ( type, name ) 348 * 349 * Create hashbin! 350 * 351 */ 352hashbin_t *hashbin_new(int type) 353{ 354 hashbin_t* hashbin; 355 356 /* 357 * Allocate new hashbin 358 */ 359 hashbin = kzalloc(sizeof(*hashbin), GFP_ATOMIC); 360 if (!hashbin) 361 return NULL; 362 363 /* 364 * Initialize structure 365 */ 366 hashbin->hb_type = type; 367 hashbin->magic = HB_MAGIC; 368 //hashbin->hb_current = NULL; 369 370 /* Make sure all spinlock's are unlocked */ 371 if ( hashbin->hb_type & HB_LOCK ) { 372 spin_lock_init(&hashbin->hb_spinlock); 373 } 374 375 return hashbin; 376} 377EXPORT_SYMBOL(hashbin_new); 378 379 380/* 381 * Function hashbin_delete (hashbin, free_func) 382 * 383 * Destroy hashbin, the free_func can be a user supplied special routine 384 * for deallocating this structure if it's complex. If not the user can 385 * just supply kfree, which should take care of the job. 386 */ 387int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func) 388{ 389 irda_queue_t* queue; 390 unsigned long flags = 0; 391 int i; 392 393 IRDA_ASSERT(hashbin != NULL, return -1;); 394 IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;); 395 396 /* Synchronize */ 397 if (hashbin->hb_type & HB_LOCK) 398 spin_lock_irqsave(&hashbin->hb_spinlock, flags); 399 400 /* 401 * Free the entries in the hashbin, TODO: use hashbin_clear when 402 * it has been shown to work 403 */ 404 for (i = 0; i < HASHBIN_SIZE; i ++ ) { 405 while (1) { 406 queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]); 407 408 if (!queue) 409 break; 410 411 if (free_func) { 412 if (hashbin->hb_type & HB_LOCK) 413 spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); 414 free_func(queue); 415 if (hashbin->hb_type & HB_LOCK) 416 spin_lock_irqsave(&hashbin->hb_spinlock, flags); 417 } 418 } 419 } 420 421 /* Cleanup local data */ 422 hashbin->hb_current = NULL; 423 hashbin->magic = ~HB_MAGIC; 424 425 /* Release lock */ 426 if (hashbin->hb_type & HB_LOCK) 427 spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); 428 429 /* 430 * Free the hashbin structure 431 */ 432 kfree(hashbin); 433 434 return 0; 435} 436EXPORT_SYMBOL(hashbin_delete); 437 438/********************* HASHBIN LIST OPERATIONS *********************/ 439 440/* 441 * Function hashbin_insert (hashbin, entry, name) 442 * 443 * Insert an entry into the hashbin 444 * 445 */ 446void hashbin_insert(hashbin_t* hashbin, irda_queue_t* entry, long hashv, 447 const char* name) 448{ 449 unsigned long flags = 0; 450 int bin; 451 452 IRDA_ASSERT( hashbin != NULL, return;); 453 IRDA_ASSERT( hashbin->magic == HB_MAGIC, return;); 454 455 /* 456 * Locate hashbin 457 */ 458 if ( name ) 459 hashv = hash( name ); 460 bin = GET_HASHBIN( hashv ); 461 462 /* Synchronize */ 463 if ( hashbin->hb_type & HB_LOCK ) { 464 spin_lock_irqsave(&hashbin->hb_spinlock, flags); 465 } /* Default is no-lock */ 466 467 /* 468 * Store name and key 469 */ 470 entry->q_hash = hashv; 471 if ( name ) 472 strlcpy( entry->q_name, name, sizeof(entry->q_name)); 473 474 /* 475 * Insert new entry first 476 */ 477 enqueue_first( (irda_queue_t**) &hashbin->hb_queue[ bin ], 478 entry); 479 hashbin->hb_size++; 480 481 /* Release lock */ 482 if ( hashbin->hb_type & HB_LOCK ) { 483 spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); 484 } /* Default is no-lock */ 485} 486EXPORT_SYMBOL(hashbin_insert); 487 488/* 489 * Function hashbin_remove_first (hashbin) 490 * 491 * Remove first entry of the hashbin 492 * 493 * Note : this function no longer use hashbin_remove(), but does things 494 * similar to hashbin_remove_this(), so can be considered safe. 495 * Jean II 496 */ 497void *hashbin_remove_first( hashbin_t *hashbin) 498{ 499 unsigned long flags = 0; 500 irda_queue_t *entry = NULL; 501 502 /* Synchronize */ 503 if ( hashbin->hb_type & HB_LOCK ) { 504 spin_lock_irqsave(&hashbin->hb_spinlock, flags); 505 } /* Default is no-lock */ 506 507 entry = hashbin_get_first( hashbin); 508 if ( entry != NULL) { 509 int bin; 510 long hashv; 511 /* 512 * Locate hashbin 513 */ 514 hashv = entry->q_hash; 515 bin = GET_HASHBIN( hashv ); 516 517 /* 518 * Dequeue the entry... 519 */ 520 dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ], 521 entry); 522 hashbin->hb_size--; 523 entry->q_next = NULL; 524 entry->q_prev = NULL; 525 526 /* 527 * Check if this item is the currently selected item, and in 528 * that case we must reset hb_current 529 */ 530 if ( entry == hashbin->hb_current) 531 hashbin->hb_current = NULL; 532 } 533 534 /* Release lock */ 535 if ( hashbin->hb_type & HB_LOCK ) { 536 spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); 537 } /* Default is no-lock */ 538 539 return entry; 540} 541 542 543/* 544 * Function hashbin_remove (hashbin, hashv, name) 545 * 546 * Remove entry with the given name 547 * 548 * The use of this function is highly discouraged, because the whole 549 * concept behind hashbin_remove() is broken. In many cases, it's not 550 * possible to guarantee the unicity of the index (either hashv or name), 551 * leading to removing the WRONG entry. 552 * The only simple safe use is : 553 * hashbin_remove(hasbin, (int) self, NULL); 554 * In other case, you must think hard to guarantee unicity of the index. 555 * Jean II 556 */ 557void* hashbin_remove( hashbin_t* hashbin, long hashv, const char* name) 558{ 559 int bin, found = FALSE; 560 unsigned long flags = 0; 561 irda_queue_t* entry; 562 563 IRDA_ASSERT( hashbin != NULL, return NULL;); 564 IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); 565 566 /* 567 * Locate hashbin 568 */ 569 if ( name ) 570 hashv = hash( name ); 571 bin = GET_HASHBIN( hashv ); 572 573 /* Synchronize */ 574 if ( hashbin->hb_type & HB_LOCK ) { 575 spin_lock_irqsave(&hashbin->hb_spinlock, flags); 576 } /* Default is no-lock */ 577 578 /* 579 * Search for entry 580 */ 581 entry = hashbin->hb_queue[ bin ]; 582 if ( entry ) { 583 do { 584 /* 585 * Check for key 586 */ 587 if ( entry->q_hash == hashv ) { 588 /* 589 * Name compare too? 590 */ 591 if ( name ) { 592 if ( strcmp( entry->q_name, name) == 0) 593 { 594 found = TRUE; 595 break; 596 } 597 } else { 598 found = TRUE; 599 break; 600 } 601 } 602 entry = entry->q_next; 603 } while ( entry != hashbin->hb_queue[ bin ] ); 604 } 605 606 /* 607 * If entry was found, dequeue it 608 */ 609 if ( found ) { 610 dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ], 611 entry); 612 hashbin->hb_size--; 613 614 /* 615 * Check if this item is the currently selected item, and in 616 * that case we must reset hb_current 617 */ 618 if ( entry == hashbin->hb_current) 619 hashbin->hb_current = NULL; 620 } 621 622 /* Release lock */ 623 if ( hashbin->hb_type & HB_LOCK ) { 624 spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); 625 } /* Default is no-lock */ 626 627 628 /* Return */ 629 if ( found ) 630 return entry; 631 else 632 return NULL; 633 634} 635EXPORT_SYMBOL(hashbin_remove); 636 637/* 638 * Function hashbin_remove_this (hashbin, entry) 639 * 640 * Remove entry with the given name 641 * 642 * In some cases, the user of hashbin can't guarantee the unicity 643 * of either the hashv or name. 644 * In those cases, using the above function is guaranteed to cause troubles, 645 * so we use this one instead... 646 * And by the way, it's also faster, because we skip the search phase ;-) 647 */ 648void* hashbin_remove_this( hashbin_t* hashbin, irda_queue_t* entry) 649{ 650 unsigned long flags = 0; 651 int bin; 652 long hashv; 653 654 IRDA_ASSERT( hashbin != NULL, return NULL;); 655 IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); 656 IRDA_ASSERT( entry != NULL, return NULL;); 657 658 /* Synchronize */ 659 if ( hashbin->hb_type & HB_LOCK ) { 660 spin_lock_irqsave(&hashbin->hb_spinlock, flags); 661 } /* Default is no-lock */ 662 663 /* Check if valid and not already removed... */ 664 if((entry->q_next == NULL) || (entry->q_prev == NULL)) { 665 entry = NULL; 666 goto out; 667 } 668 669 /* 670 * Locate hashbin 671 */ 672 hashv = entry->q_hash; 673 bin = GET_HASHBIN( hashv ); 674 675 /* 676 * Dequeue the entry... 677 */ 678 dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ], 679 entry); 680 hashbin->hb_size--; 681 entry->q_next = NULL; 682 entry->q_prev = NULL; 683 684 /* 685 * Check if this item is the currently selected item, and in 686 * that case we must reset hb_current 687 */ 688 if ( entry == hashbin->hb_current) 689 hashbin->hb_current = NULL; 690out: 691 /* Release lock */ 692 if ( hashbin->hb_type & HB_LOCK ) { 693 spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); 694 } /* Default is no-lock */ 695 696 return entry; 697} 698EXPORT_SYMBOL(hashbin_remove_this); 699 700/*********************** HASHBIN ENUMERATION ***********************/ 701 702/* 703 * Function hashbin_common_find (hashbin, hashv, name) 704 * 705 * Find item with the given hashv or name 706 * 707 */ 708void* hashbin_find( hashbin_t* hashbin, long hashv, const char* name ) 709{ 710 int bin; 711 irda_queue_t* entry; 712 713 pr_debug("hashbin_find()\n"); 714 715 IRDA_ASSERT( hashbin != NULL, return NULL;); 716 IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); 717 718 /* 719 * Locate hashbin 720 */ 721 if ( name ) 722 hashv = hash( name ); 723 bin = GET_HASHBIN( hashv ); 724 725 /* 726 * Search for entry 727 */ 728 entry = hashbin->hb_queue[ bin]; 729 if ( entry ) { 730 do { 731 /* 732 * Check for key 733 */ 734 if ( entry->q_hash == hashv ) { 735 /* 736 * Name compare too? 737 */ 738 if ( name ) { 739 if ( strcmp( entry->q_name, name ) == 0 ) { 740 return entry; 741 } 742 } else { 743 return entry; 744 } 745 } 746 entry = entry->q_next; 747 } while ( entry != hashbin->hb_queue[ bin ] ); 748 } 749 750 return NULL; 751} 752EXPORT_SYMBOL(hashbin_find); 753 754/* 755 * Function hashbin_lock_find (hashbin, hashv, name) 756 * 757 * Find item with the given hashv or name 758 * 759 * Same, but with spinlock protection... 760 * I call it safe, but it's only safe with respect to the hashbin, not its 761 * content. - Jean II 762 */ 763void* hashbin_lock_find( hashbin_t* hashbin, long hashv, const char* name ) 764{ 765 unsigned long flags = 0; 766 irda_queue_t* entry; 767 768 /* Synchronize */ 769 spin_lock_irqsave(&hashbin->hb_spinlock, flags); 770 771 /* 772 * Search for entry 773 */ 774 entry = hashbin_find(hashbin, hashv, name); 775 776 /* Release lock */ 777 spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); 778 779 return entry; 780} 781EXPORT_SYMBOL(hashbin_lock_find); 782 783/* 784 * Function hashbin_find (hashbin, hashv, name, pnext) 785 * 786 * Find an item with the given hashv or name, and its successor 787 * 788 * This function allow to do concurrent enumerations without the 789 * need to lock over the whole session, because the caller keep the 790 * context of the search. On the other hand, it might fail and return 791 * NULL if the entry is removed. - Jean II 792 */ 793void* hashbin_find_next( hashbin_t* hashbin, long hashv, const char* name, 794 void ** pnext) 795{ 796 unsigned long flags = 0; 797 irda_queue_t* entry; 798 799 /* Synchronize */ 800 spin_lock_irqsave(&hashbin->hb_spinlock, flags); 801 802 /* 803 * Search for current entry 804 * This allow to check if the current item is still in the 805 * hashbin or has been removed. 806 */ 807 entry = hashbin_find(hashbin, hashv, name); 808 809 /* 810 * Trick hashbin_get_next() to return what we want 811 */ 812 if(entry) { 813 hashbin->hb_current = entry; 814 *pnext = hashbin_get_next( hashbin ); 815 } else 816 *pnext = NULL; 817 818 /* Release lock */ 819 spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); 820 821 return entry; 822} 823 824/* 825 * Function hashbin_get_first (hashbin) 826 * 827 * Get a pointer to first element in hashbin, this function must be 828 * called before any calls to hashbin_get_next()! 829 * 830 */ 831irda_queue_t *hashbin_get_first( hashbin_t* hashbin) 832{ 833 irda_queue_t *entry; 834 int i; 835 836 IRDA_ASSERT( hashbin != NULL, return NULL;); 837 IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); 838 839 if ( hashbin == NULL) 840 return NULL; 841 842 for ( i = 0; i < HASHBIN_SIZE; i ++ ) { 843 entry = hashbin->hb_queue[ i]; 844 if ( entry) { 845 hashbin->hb_current = entry; 846 return entry; 847 } 848 } 849 /* 850 * Did not find any item in hashbin 851 */ 852 return NULL; 853} 854EXPORT_SYMBOL(hashbin_get_first); 855 856/* 857 * Function hashbin_get_next (hashbin) 858 * 859 * Get next item in hashbin. A series of hashbin_get_next() calls must 860 * be started by a call to hashbin_get_first(). The function returns 861 * NULL when all items have been traversed 862 * 863 * The context of the search is stored within the hashbin, so you must 864 * protect yourself from concurrent enumerations. - Jean II 865 */ 866irda_queue_t *hashbin_get_next( hashbin_t *hashbin) 867{ 868 irda_queue_t* entry; 869 int bin; 870 int i; 871 872 IRDA_ASSERT( hashbin != NULL, return NULL;); 873 IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); 874 875 if ( hashbin->hb_current == NULL) { 876 IRDA_ASSERT( hashbin->hb_current != NULL, return NULL;); 877 return NULL; 878 } 879 entry = hashbin->hb_current->q_next; 880 bin = GET_HASHBIN( entry->q_hash); 881 882 /* 883 * Make sure that we are not back at the beginning of the queue 884 * again 885 */ 886 if ( entry != hashbin->hb_queue[ bin ]) { 887 hashbin->hb_current = entry; 888 889 return entry; 890 } 891 892 /* 893 * Check that this is not the last queue in hashbin 894 */ 895 if ( bin >= HASHBIN_SIZE) 896 return NULL; 897 898 /* 899 * Move to next queue in hashbin 900 */ 901 bin++; 902 for ( i = bin; i < HASHBIN_SIZE; i++ ) { 903 entry = hashbin->hb_queue[ i]; 904 if ( entry) { 905 hashbin->hb_current = entry; 906 907 return entry; 908 } 909 } 910 return NULL; 911} 912EXPORT_SYMBOL(hashbin_get_next); 913