1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#ifndef TEST
30#include <linux/slab.h>
31#include <linux/init.h>
32#include <linux/export.h>
33#endif
34#include <linux/err.h>
35#include <linux/string.h>
36#include <linux/idr.h>
37#include <linux/spinlock.h>
38#include <linux/percpu.h>
39#include <linux/hardirq.h>
40
41#define MAX_IDR_SHIFT (sizeof(int) * 8 - 1)
42#define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
43
44
45#define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
46
47
48#define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
49
50static struct kmem_cache *idr_layer_cache;
51static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head);
52static DEFINE_PER_CPU(int, idr_preload_cnt);
53static DEFINE_SPINLOCK(simple_ida_lock);
54
55
56static int idr_max(int layers)
57{
58 int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT);
59
60 return (1 << bits) - 1;
61}
62
63
64
65
66
67
68static int idr_layer_prefix_mask(int layer)
69{
70 return ~idr_max(layer + 1);
71}
72
73static struct idr_layer *get_from_free_list(struct idr *idp)
74{
75 struct idr_layer *p;
76 unsigned long flags;
77
78 spin_lock_irqsave(&idp->lock, flags);
79 if ((p = idp->id_free)) {
80 idp->id_free = p->ary[0];
81 idp->id_free_cnt--;
82 p->ary[0] = NULL;
83 }
84 spin_unlock_irqrestore(&idp->lock, flags);
85 return(p);
86}
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
102{
103 struct idr_layer *new;
104
105
106 if (layer_idr)
107 return get_from_free_list(layer_idr);
108
109
110
111
112
113
114
115
116 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN);
117 if (new)
118 return new;
119
120
121
122
123
124 if (!in_interrupt()) {
125 preempt_disable();
126 new = __this_cpu_read(idr_preload_head);
127 if (new) {
128 __this_cpu_write(idr_preload_head, new->ary[0]);
129 __this_cpu_dec(idr_preload_cnt);
130 new->ary[0] = NULL;
131 }
132 preempt_enable();
133 if (new)
134 return new;
135 }
136
137
138
139
140
141 return kmem_cache_zalloc(idr_layer_cache, gfp_mask);
142}
143
144static void idr_layer_rcu_free(struct rcu_head *head)
145{
146 struct idr_layer *layer;
147
148 layer = container_of(head, struct idr_layer, rcu_head);
149 kmem_cache_free(idr_layer_cache, layer);
150}
151
152static inline void free_layer(struct idr *idr, struct idr_layer *p)
153{
154 if (idr->hint && idr->hint == p)
155 RCU_INIT_POINTER(idr->hint, NULL);
156 call_rcu(&p->rcu_head, idr_layer_rcu_free);
157}
158
159
160static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
161{
162 p->ary[0] = idp->id_free;
163 idp->id_free = p;
164 idp->id_free_cnt++;
165}
166
167static void move_to_free_list(struct idr *idp, struct idr_layer *p)
168{
169 unsigned long flags;
170
171
172
173
174 spin_lock_irqsave(&idp->lock, flags);
175 __move_to_free_list(idp, p);
176 spin_unlock_irqrestore(&idp->lock, flags);
177}
178
179static void idr_mark_full(struct idr_layer **pa, int id)
180{
181 struct idr_layer *p = pa[0];
182 int l = 0;
183
184 __set_bit(id & IDR_MASK, p->bitmap);
185
186
187
188
189
190
191 while (bitmap_full(p->bitmap, IDR_SIZE)) {
192 if (!(p = pa[++l]))
193 break;
194 id = id >> IDR_BITS;
195 __set_bit((id & IDR_MASK), p->bitmap);
196 }
197}
198
199int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
200{
201 while (idp->id_free_cnt < MAX_IDR_FREE) {
202 struct idr_layer *new;
203 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
204 if (new == NULL)
205 return (0);
206 move_to_free_list(idp, new);
207 }
208 return 1;
209}
210EXPORT_SYMBOL(__idr_pre_get);
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
229 gfp_t gfp_mask, struct idr *layer_idr)
230{
231 int n, m, sh;
232 struct idr_layer *p, *new;
233 int l, id, oid;
234
235 id = *starting_id;
236 restart:
237 p = idp->top;
238 l = idp->layers;
239 pa[l--] = NULL;
240 while (1) {
241
242
243
244 n = (id >> (IDR_BITS*l)) & IDR_MASK;
245 m = find_next_zero_bit(p->bitmap, IDR_SIZE, n);
246 if (m == IDR_SIZE) {
247
248 l++;
249 oid = id;
250 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
251
252
253 if (id >= 1 << (idp->layers * IDR_BITS)) {
254 *starting_id = id;
255 return -EAGAIN;
256 }
257 p = pa[l];
258 BUG_ON(!p);
259
260
261
262
263 sh = IDR_BITS * (l + 1);
264 if (oid >> sh == id >> sh)
265 continue;
266 else
267 goto restart;
268 }
269 if (m != n) {
270 sh = IDR_BITS*l;
271 id = ((id >> sh) ^ n ^ m) << sh;
272 }
273 if ((id >= MAX_IDR_BIT) || (id < 0))
274 return -ENOSPC;
275 if (l == 0)
276 break;
277
278
279
280 if (!p->ary[m]) {
281 new = idr_layer_alloc(gfp_mask, layer_idr);
282 if (!new)
283 return -ENOMEM;
284 new->layer = l-1;
285 new->prefix = id & idr_layer_prefix_mask(new->layer);
286 rcu_assign_pointer(p->ary[m], new);
287 p->count++;
288 }
289 pa[l--] = p;
290 p = p->ary[m];
291 }
292
293 pa[l] = p;
294 return id;
295}
296
297static int idr_get_empty_slot(struct idr *idp, int starting_id,
298 struct idr_layer **pa, gfp_t gfp_mask,
299 struct idr *layer_idr)
300{
301 struct idr_layer *p, *new;
302 int layers, v, id;
303 unsigned long flags;
304
305 id = starting_id;
306build_up:
307 p = idp->top;
308 layers = idp->layers;
309 if (unlikely(!p)) {
310 if (!(p = idr_layer_alloc(gfp_mask, layer_idr)))
311 return -ENOMEM;
312 p->layer = 0;
313 layers = 1;
314 }
315
316
317
318
319 while (id > idr_max(layers)) {
320 layers++;
321 if (!p->count) {
322
323
324
325
326 p->layer++;
327 WARN_ON_ONCE(p->prefix);
328 continue;
329 }
330 if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) {
331
332
333
334
335 spin_lock_irqsave(&idp->lock, flags);
336 for (new = p; p && p != idp->top; new = p) {
337 p = p->ary[0];
338 new->ary[0] = NULL;
339 new->count = 0;
340 bitmap_clear(new->bitmap, 0, IDR_SIZE);
341 __move_to_free_list(idp, new);
342 }
343 spin_unlock_irqrestore(&idp->lock, flags);
344 return -ENOMEM;
345 }
346 new->ary[0] = p;
347 new->count = 1;
348 new->layer = layers-1;
349 new->prefix = id & idr_layer_prefix_mask(new->layer);
350 if (bitmap_full(p->bitmap, IDR_SIZE))
351 __set_bit(0, new->bitmap);
352 p = new;
353 }
354 rcu_assign_pointer(idp->top, p);
355 idp->layers = layers;
356 v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr);
357 if (v == -EAGAIN)
358 goto build_up;
359 return(v);
360}
361
362
363
364
365
366static void idr_fill_slot(struct idr *idr, void *ptr, int id,
367 struct idr_layer **pa)
368{
369
370 rcu_assign_pointer(idr->hint, pa[0]);
371
372 rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr);
373 pa[0]->count++;
374 idr_mark_full(pa, id);
375}
376
377int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
378{
379 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
380 int rv;
381
382 rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp);
383 if (rv < 0)
384 return rv == -ENOMEM ? -EAGAIN : rv;
385
386 idr_fill_slot(idp, ptr, rv, pa);
387 *id = rv;
388 return 0;
389}
390EXPORT_SYMBOL(__idr_get_new_above);
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417void idr_preload(gfp_t gfp_mask)
418{
419
420
421
422
423 WARN_ON_ONCE(in_interrupt());
424 might_sleep_if(gfp_mask & __GFP_WAIT);
425
426 preempt_disable();
427
428
429
430
431
432
433
434
435 while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
436 struct idr_layer *new;
437
438 preempt_enable();
439 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
440 preempt_disable();
441 if (!new)
442 break;
443
444
445 new->ary[0] = __this_cpu_read(idr_preload_head);
446 __this_cpu_write(idr_preload_head, new);
447 __this_cpu_inc(idr_preload_cnt);
448 }
449}
450EXPORT_SYMBOL(idr_preload);
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
473{
474 int max = end > 0 ? end - 1 : INT_MAX;
475 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
476 int id;
477
478 might_sleep_if(gfp_mask & __GFP_WAIT);
479
480
481 if (WARN_ON_ONCE(start < 0))
482 return -EINVAL;
483 if (unlikely(max < start))
484 return -ENOSPC;
485
486
487 id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL);
488 if (unlikely(id < 0))
489 return id;
490 if (unlikely(id > max))
491 return -ENOSPC;
492
493 idr_fill_slot(idr, ptr, id, pa);
494 return id;
495}
496EXPORT_SYMBOL_GPL(idr_alloc);
497
498
499
500
501
502
503
504
505
506
507
508
509
510int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end,
511 gfp_t gfp_mask)
512{
513 int id;
514
515 id = idr_alloc(idr, ptr, max(start, idr->cur), end, gfp_mask);
516 if (id == -ENOSPC)
517 id = idr_alloc(idr, ptr, start, end, gfp_mask);
518
519 if (likely(id >= 0))
520 idr->cur = id + 1;
521 return id;
522}
523EXPORT_SYMBOL(idr_alloc_cyclic);
524
525static void idr_remove_warning(int id)
526{
527 WARN(1, "idr_remove called for id=%d which is not allocated.\n", id);
528}
529
530static void sub_remove(struct idr *idp, int shift, int id)
531{
532 struct idr_layer *p = idp->top;
533 struct idr_layer **pa[MAX_IDR_LEVEL + 1];
534 struct idr_layer ***paa = &pa[0];
535 struct idr_layer *to_free;
536 int n;
537
538 *paa = NULL;
539 *++paa = &idp->top;
540
541 while ((shift > 0) && p) {
542 n = (id >> shift) & IDR_MASK;
543 __clear_bit(n, p->bitmap);
544 *++paa = &p->ary[n];
545 p = p->ary[n];
546 shift -= IDR_BITS;
547 }
548 n = id & IDR_MASK;
549 if (likely(p != NULL && test_bit(n, p->bitmap))) {
550 __clear_bit(n, p->bitmap);
551 rcu_assign_pointer(p->ary[n], NULL);
552 to_free = NULL;
553 while(*paa && ! --((**paa)->count)){
554 if (to_free)
555 free_layer(idp, to_free);
556 to_free = **paa;
557 **paa-- = NULL;
558 }
559 if (!*paa)
560 idp->layers = 0;
561 if (to_free)
562 free_layer(idp, to_free);
563 } else
564 idr_remove_warning(id);
565}
566
567
568
569
570
571
572void idr_remove(struct idr *idp, int id)
573{
574 struct idr_layer *p;
575 struct idr_layer *to_free;
576
577 if (id < 0)
578 return;
579
580 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
581 if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
582 idp->top->ary[0]) {
583
584
585
586
587
588
589 to_free = idp->top;
590 p = idp->top->ary[0];
591 rcu_assign_pointer(idp->top, p);
592 --idp->layers;
593 to_free->count = 0;
594 bitmap_clear(to_free->bitmap, 0, IDR_SIZE);
595 free_layer(idp, to_free);
596 }
597 while (idp->id_free_cnt >= MAX_IDR_FREE) {
598 p = get_from_free_list(idp);
599
600
601
602
603
604 kmem_cache_free(idr_layer_cache, p);
605 }
606 return;
607}
608EXPORT_SYMBOL(idr_remove);
609
610void __idr_remove_all(struct idr *idp)
611{
612 int n, id, max;
613 int bt_mask;
614 struct idr_layer *p;
615 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
616 struct idr_layer **paa = &pa[0];
617
618 n = idp->layers * IDR_BITS;
619 p = idp->top;
620 rcu_assign_pointer(idp->top, NULL);
621 max = idr_max(idp->layers);
622
623 id = 0;
624 while (id >= 0 && id <= max) {
625 while (n > IDR_BITS && p) {
626 n -= IDR_BITS;
627 *paa++ = p;
628 p = p->ary[(id >> n) & IDR_MASK];
629 }
630
631 bt_mask = id;
632 id += 1 << n;
633
634 while (n < fls(id ^ bt_mask)) {
635 if (p)
636 free_layer(idp, p);
637 n += IDR_BITS;
638 p = *--paa;
639 }
640 }
641 idp->layers = 0;
642}
643EXPORT_SYMBOL(__idr_remove_all);
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658void idr_destroy(struct idr *idp)
659{
660 __idr_remove_all(idp);
661
662 while (idp->id_free_cnt) {
663 struct idr_layer *p = get_from_free_list(idp);
664 kmem_cache_free(idr_layer_cache, p);
665 }
666}
667EXPORT_SYMBOL(idr_destroy);
668
669void *idr_find_slowpath(struct idr *idp, int id)
670{
671 int n;
672 struct idr_layer *p;
673
674 if (id < 0)
675 return NULL;
676
677 p = rcu_dereference_raw(idp->top);
678 if (!p)
679 return NULL;
680 n = (p->layer+1) * IDR_BITS;
681
682 if (id > idr_max(p->layer + 1))
683 return NULL;
684 BUG_ON(n == 0);
685
686 while (n > 0 && p) {
687 n -= IDR_BITS;
688 BUG_ON(n != p->layer*IDR_BITS);
689 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
690 }
691 return((void *)p);
692}
693EXPORT_SYMBOL(idr_find_slowpath);
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713int idr_for_each(struct idr *idp,
714 int (*fn)(int id, void *p, void *data), void *data)
715{
716 int n, id, max, error = 0;
717 struct idr_layer *p;
718 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
719 struct idr_layer **paa = &pa[0];
720
721 n = idp->layers * IDR_BITS;
722 p = rcu_dereference_raw(idp->top);
723 max = idr_max(idp->layers);
724
725 id = 0;
726 while (id >= 0 && id <= max) {
727 while (n > 0 && p) {
728 n -= IDR_BITS;
729 *paa++ = p;
730 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
731 }
732
733 if (p) {
734 error = fn(id, (void *)p, data);
735 if (error)
736 break;
737 }
738
739 id += 1 << n;
740 while (n < fls(id)) {
741 n += IDR_BITS;
742 p = *--paa;
743 }
744 }
745
746 return error;
747}
748EXPORT_SYMBOL(idr_for_each);
749
750
751
752
753
754
755
756
757
758
759
760
761
762void *idr_get_next(struct idr *idp, int *nextidp)
763{
764 struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1];
765 struct idr_layer **paa = &pa[0];
766 int id = *nextidp;
767 int n, max;
768
769
770 p = rcu_dereference_raw(idp->top);
771 if (!p)
772 return NULL;
773 n = (p->layer + 1) * IDR_BITS;
774 max = idr_max(p->layer + 1);
775
776 while (id >= 0 && id <= max) {
777 while (n > 0 && p) {
778 n -= IDR_BITS;
779 *paa++ = p;
780 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
781 }
782
783 if (p) {
784 *nextidp = id;
785 return p;
786 }
787
788
789
790
791
792
793
794
795 id = round_up(id + 1, 1 << n);
796 while (n < fls(id)) {
797 n += IDR_BITS;
798 p = *--paa;
799 }
800 }
801 return NULL;
802}
803EXPORT_SYMBOL(idr_get_next);
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818void *idr_replace(struct idr *idp, void *ptr, int id)
819{
820 int n;
821 struct idr_layer *p, *old_p;
822
823 if (id < 0)
824 return ERR_PTR(-EINVAL);
825
826 p = idp->top;
827 if (!p)
828 return ERR_PTR(-EINVAL);
829
830 n = (p->layer+1) * IDR_BITS;
831
832 if (id >= (1 << n))
833 return ERR_PTR(-EINVAL);
834
835 n -= IDR_BITS;
836 while ((n > 0) && p) {
837 p = p->ary[(id >> n) & IDR_MASK];
838 n -= IDR_BITS;
839 }
840
841 n = id & IDR_MASK;
842 if (unlikely(p == NULL || !test_bit(n, p->bitmap)))
843 return ERR_PTR(-ENOENT);
844
845 old_p = p->ary[n];
846 rcu_assign_pointer(p->ary[n], ptr);
847
848 return old_p;
849}
850EXPORT_SYMBOL(idr_replace);
851
852void __init idr_init_cache(void)
853{
854 idr_layer_cache = kmem_cache_create("idr_layer_cache",
855 sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
856}
857
858
859
860
861
862
863
864
865void idr_init(struct idr *idp)
866{
867 memset(idp, 0, sizeof(struct idr));
868 spin_lock_init(&idp->lock);
869}
870EXPORT_SYMBOL(idr_init);
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
886{
887 unsigned long flags;
888
889 if (!ida->free_bitmap) {
890 spin_lock_irqsave(&ida->idr.lock, flags);
891 if (!ida->free_bitmap) {
892 ida->free_bitmap = bitmap;
893 bitmap = NULL;
894 }
895 spin_unlock_irqrestore(&ida->idr.lock, flags);
896 }
897
898 kfree(bitmap);
899}
900
901
902
903
904
905
906
907
908
909
910
911
912
913int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
914{
915
916 if (!__idr_pre_get(&ida->idr, gfp_mask))
917 return 0;
918
919
920 if (!ida->free_bitmap) {
921 struct ida_bitmap *bitmap;
922
923 bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
924 if (!bitmap)
925 return 0;
926
927 free_bitmap(ida, bitmap);
928 }
929
930 return 1;
931}
932EXPORT_SYMBOL(ida_pre_get);
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
950{
951 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
952 struct ida_bitmap *bitmap;
953 unsigned long flags;
954 int idr_id = starting_id / IDA_BITMAP_BITS;
955 int offset = starting_id % IDA_BITMAP_BITS;
956 int t, id;
957
958 restart:
959
960 t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr);
961 if (t < 0)
962 return t == -ENOMEM ? -EAGAIN : t;
963
964 if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
965 return -ENOSPC;
966
967 if (t != idr_id)
968 offset = 0;
969 idr_id = t;
970
971
972 bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
973 if (!bitmap) {
974 spin_lock_irqsave(&ida->idr.lock, flags);
975 bitmap = ida->free_bitmap;
976 ida->free_bitmap = NULL;
977 spin_unlock_irqrestore(&ida->idr.lock, flags);
978
979 if (!bitmap)
980 return -EAGAIN;
981
982 memset(bitmap, 0, sizeof(struct ida_bitmap));
983 rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
984 (void *)bitmap);
985 pa[0]->count++;
986 }
987
988
989 t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
990 if (t == IDA_BITMAP_BITS) {
991
992 idr_id++;
993 offset = 0;
994 goto restart;
995 }
996
997 id = idr_id * IDA_BITMAP_BITS + t;
998 if (id >= MAX_IDR_BIT)
999 return -ENOSPC;
1000
1001 __set_bit(t, bitmap->bitmap);
1002 if (++bitmap->nr_busy == IDA_BITMAP_BITS)
1003 idr_mark_full(pa, idr_id);
1004
1005 *p_id = id;
1006
1007
1008
1009
1010
1011
1012 if (ida->idr.id_free_cnt || ida->free_bitmap) {
1013 struct idr_layer *p = get_from_free_list(&ida->idr);
1014 if (p)
1015 kmem_cache_free(idr_layer_cache, p);
1016 }
1017
1018 return 0;
1019}
1020EXPORT_SYMBOL(ida_get_new_above);
1021
1022
1023
1024
1025
1026
1027void ida_remove(struct ida *ida, int id)
1028{
1029 struct idr_layer *p = ida->idr.top;
1030 int shift = (ida->idr.layers - 1) * IDR_BITS;
1031 int idr_id = id / IDA_BITMAP_BITS;
1032 int offset = id % IDA_BITMAP_BITS;
1033 int n;
1034 struct ida_bitmap *bitmap;
1035
1036
1037 while ((shift > 0) && p) {
1038 n = (idr_id >> shift) & IDR_MASK;
1039 __clear_bit(n, p->bitmap);
1040 p = p->ary[n];
1041 shift -= IDR_BITS;
1042 }
1043
1044 if (p == NULL)
1045 goto err;
1046
1047 n = idr_id & IDR_MASK;
1048 __clear_bit(n, p->bitmap);
1049
1050 bitmap = (void *)p->ary[n];
1051 if (!test_bit(offset, bitmap->bitmap))
1052 goto err;
1053
1054
1055 __clear_bit(offset, bitmap->bitmap);
1056 if (--bitmap->nr_busy == 0) {
1057 __set_bit(n, p->bitmap);
1058 idr_remove(&ida->idr, idr_id);
1059 free_bitmap(ida, bitmap);
1060 }
1061
1062 return;
1063
1064 err:
1065 WARN(1, "ida_remove called for id=%d which is not allocated.\n", id);
1066}
1067EXPORT_SYMBOL(ida_remove);
1068
1069
1070
1071
1072
1073void ida_destroy(struct ida *ida)
1074{
1075 idr_destroy(&ida->idr);
1076 kfree(ida->free_bitmap);
1077}
1078EXPORT_SYMBOL(ida_destroy);
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
1093 gfp_t gfp_mask)
1094{
1095 int ret, id;
1096 unsigned int max;
1097 unsigned long flags;
1098
1099 BUG_ON((int)start < 0);
1100 BUG_ON((int)end < 0);
1101
1102 if (end == 0)
1103 max = 0x80000000;
1104 else {
1105 BUG_ON(end < start);
1106 max = end - 1;
1107 }
1108
1109again:
1110 if (!ida_pre_get(ida, gfp_mask))
1111 return -ENOMEM;
1112
1113 spin_lock_irqsave(&simple_ida_lock, flags);
1114 ret = ida_get_new_above(ida, start, &id);
1115 if (!ret) {
1116 if (id > max) {
1117 ida_remove(ida, id);
1118 ret = -ENOSPC;
1119 } else {
1120 ret = id;
1121 }
1122 }
1123 spin_unlock_irqrestore(&simple_ida_lock, flags);
1124
1125 if (unlikely(ret == -EAGAIN))
1126 goto again;
1127
1128 return ret;
1129}
1130EXPORT_SYMBOL(ida_simple_get);
1131
1132
1133
1134
1135
1136
1137void ida_simple_remove(struct ida *ida, unsigned int id)
1138{
1139 unsigned long flags;
1140
1141 BUG_ON((int)id < 0);
1142 spin_lock_irqsave(&simple_ida_lock, flags);
1143 ida_remove(ida, id);
1144 spin_unlock_irqrestore(&simple_ida_lock, flags);
1145}
1146EXPORT_SYMBOL(ida_simple_remove);
1147
1148
1149
1150
1151
1152
1153
1154
1155void ida_init(struct ida *ida)
1156{
1157 memset(ida, 0, sizeof(struct ida));
1158 idr_init(&ida->idr);
1159
1160}
1161EXPORT_SYMBOL(ida_init);
1162