1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#ifndef TEST
24#include <linux/slab.h>
25#include <linux/init.h>
26#include <linux/export.h>
27#endif
28#include <linux/err.h>
29#include <linux/string.h>
30#include <linux/idr.h>
31#include <linux/spinlock.h>
32#include <linux/percpu.h>
33
34#define MAX_IDR_SHIFT (sizeof(int) * 8 - 1)
35#define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
36
37
38#define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
39
40
41#define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
42
43static struct kmem_cache *idr_layer_cache;
44static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head);
45static DEFINE_PER_CPU(int, idr_preload_cnt);
46static DEFINE_SPINLOCK(simple_ida_lock);
47
48
49static int idr_max(int layers)
50{
51 int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT);
52
53 return (1 << bits) - 1;
54}
55
56
57
58
59
60
61static int idr_layer_prefix_mask(int layer)
62{
63 return ~idr_max(layer + 1);
64}
65
66static struct idr_layer *get_from_free_list(struct idr *idp)
67{
68 struct idr_layer *p;
69 unsigned long flags;
70
71 spin_lock_irqsave(&idp->lock, flags);
72 if ((p = idp->id_free)) {
73 idp->id_free = p->ary[0];
74 idp->id_free_cnt--;
75 p->ary[0] = NULL;
76 }
77 spin_unlock_irqrestore(&idp->lock, flags);
78 return(p);
79}
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
95{
96 struct idr_layer *new;
97
98
99 if (layer_idr)
100 return get_from_free_list(layer_idr);
101
102
103
104
105
106
107
108
109 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN);
110 if (new)
111 return new;
112
113
114
115
116
117 if (!in_interrupt()) {
118 preempt_disable();
119 new = __this_cpu_read(idr_preload_head);
120 if (new) {
121 __this_cpu_write(idr_preload_head, new->ary[0]);
122 __this_cpu_dec(idr_preload_cnt);
123 new->ary[0] = NULL;
124 }
125 preempt_enable();
126 if (new)
127 return new;
128 }
129
130
131
132
133
134 return kmem_cache_zalloc(idr_layer_cache, gfp_mask);
135}
136
137static void idr_layer_rcu_free(struct rcu_head *head)
138{
139 struct idr_layer *layer;
140
141 layer = container_of(head, struct idr_layer, rcu_head);
142 kmem_cache_free(idr_layer_cache, layer);
143}
144
145static inline void free_layer(struct idr *idr, struct idr_layer *p)
146{
147 if (idr->hint == p)
148 RCU_INIT_POINTER(idr->hint, NULL);
149 call_rcu(&p->rcu_head, idr_layer_rcu_free);
150}
151
152
153static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
154{
155 p->ary[0] = idp->id_free;
156 idp->id_free = p;
157 idp->id_free_cnt++;
158}
159
160static void move_to_free_list(struct idr *idp, struct idr_layer *p)
161{
162 unsigned long flags;
163
164
165
166
167 spin_lock_irqsave(&idp->lock, flags);
168 __move_to_free_list(idp, p);
169 spin_unlock_irqrestore(&idp->lock, flags);
170}
171
172static void idr_mark_full(struct idr_layer **pa, int id)
173{
174 struct idr_layer *p = pa[0];
175 int l = 0;
176
177 __set_bit(id & IDR_MASK, p->bitmap);
178
179
180
181
182
183
184 while (bitmap_full(p->bitmap, IDR_SIZE)) {
185 if (!(p = pa[++l]))
186 break;
187 id = id >> IDR_BITS;
188 __set_bit((id & IDR_MASK), p->bitmap);
189 }
190}
191
192static int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
193{
194 while (idp->id_free_cnt < MAX_IDR_FREE) {
195 struct idr_layer *new;
196 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
197 if (new == NULL)
198 return (0);
199 move_to_free_list(idp, new);
200 }
201 return 1;
202}
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
221 gfp_t gfp_mask, struct idr *layer_idr)
222{
223 int n, m, sh;
224 struct idr_layer *p, *new;
225 int l, id, oid;
226
227 id = *starting_id;
228 restart:
229 p = idp->top;
230 l = idp->layers;
231 pa[l--] = NULL;
232 while (1) {
233
234
235
236 n = (id >> (IDR_BITS*l)) & IDR_MASK;
237 m = find_next_zero_bit(p->bitmap, IDR_SIZE, n);
238 if (m == IDR_SIZE) {
239
240 l++;
241 oid = id;
242 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
243
244
245 if (id > idr_max(idp->layers)) {
246 *starting_id = id;
247 return -EAGAIN;
248 }
249 p = pa[l];
250 BUG_ON(!p);
251
252
253
254
255 sh = IDR_BITS * (l + 1);
256 if (oid >> sh == id >> sh)
257 continue;
258 else
259 goto restart;
260 }
261 if (m != n) {
262 sh = IDR_BITS*l;
263 id = ((id >> sh) ^ n ^ m) << sh;
264 }
265 if ((id >= MAX_IDR_BIT) || (id < 0))
266 return -ENOSPC;
267 if (l == 0)
268 break;
269
270
271
272 if (!p->ary[m]) {
273 new = idr_layer_alloc(gfp_mask, layer_idr);
274 if (!new)
275 return -ENOMEM;
276 new->layer = l-1;
277 new->prefix = id & idr_layer_prefix_mask(new->layer);
278 rcu_assign_pointer(p->ary[m], new);
279 p->count++;
280 }
281 pa[l--] = p;
282 p = p->ary[m];
283 }
284
285 pa[l] = p;
286 return id;
287}
288
289static int idr_get_empty_slot(struct idr *idp, int starting_id,
290 struct idr_layer **pa, gfp_t gfp_mask,
291 struct idr *layer_idr)
292{
293 struct idr_layer *p, *new;
294 int layers, v, id;
295 unsigned long flags;
296
297 id = starting_id;
298build_up:
299 p = idp->top;
300 layers = idp->layers;
301 if (unlikely(!p)) {
302 if (!(p = idr_layer_alloc(gfp_mask, layer_idr)))
303 return -ENOMEM;
304 p->layer = 0;
305 layers = 1;
306 }
307
308
309
310
311 while (id > idr_max(layers)) {
312 layers++;
313 if (!p->count) {
314
315
316
317
318 p->layer++;
319 WARN_ON_ONCE(p->prefix);
320 continue;
321 }
322 if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) {
323
324
325
326
327 spin_lock_irqsave(&idp->lock, flags);
328 for (new = p; p && p != idp->top; new = p) {
329 p = p->ary[0];
330 new->ary[0] = NULL;
331 new->count = 0;
332 bitmap_clear(new->bitmap, 0, IDR_SIZE);
333 __move_to_free_list(idp, new);
334 }
335 spin_unlock_irqrestore(&idp->lock, flags);
336 return -ENOMEM;
337 }
338 new->ary[0] = p;
339 new->count = 1;
340 new->layer = layers-1;
341 new->prefix = id & idr_layer_prefix_mask(new->layer);
342 if (bitmap_full(p->bitmap, IDR_SIZE))
343 __set_bit(0, new->bitmap);
344 p = new;
345 }
346 rcu_assign_pointer(idp->top, p);
347 idp->layers = layers;
348 v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr);
349 if (v == -EAGAIN)
350 goto build_up;
351 return(v);
352}
353
354
355
356
357
358static void idr_fill_slot(struct idr *idr, void *ptr, int id,
359 struct idr_layer **pa)
360{
361
362 rcu_assign_pointer(idr->hint, pa[0]);
363
364 rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr);
365 pa[0]->count++;
366 idr_mark_full(pa, id);
367}
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395void idr_preload(gfp_t gfp_mask)
396{
397
398
399
400
401 WARN_ON_ONCE(in_interrupt());
402 might_sleep_if(gfpflags_allow_blocking(gfp_mask));
403
404 preempt_disable();
405
406
407
408
409
410
411
412
413 while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
414 struct idr_layer *new;
415
416 preempt_enable();
417 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
418 preempt_disable();
419 if (!new)
420 break;
421
422
423 new->ary[0] = __this_cpu_read(idr_preload_head);
424 __this_cpu_write(idr_preload_head, new);
425 __this_cpu_inc(idr_preload_cnt);
426 }
427}
428EXPORT_SYMBOL(idr_preload);
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
451{
452 int max = end > 0 ? end - 1 : INT_MAX;
453 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
454 int id;
455
456 might_sleep_if(gfpflags_allow_blocking(gfp_mask));
457
458
459 if (WARN_ON_ONCE(start < 0))
460 return -EINVAL;
461 if (unlikely(max < start))
462 return -ENOSPC;
463
464
465 id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL);
466 if (unlikely(id < 0))
467 return id;
468 if (unlikely(id > max))
469 return -ENOSPC;
470
471 idr_fill_slot(idr, ptr, id, pa);
472 return id;
473}
474EXPORT_SYMBOL_GPL(idr_alloc);
475
476
477
478
479
480
481
482
483
484
485
486
487
488int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end,
489 gfp_t gfp_mask)
490{
491 int id;
492
493 id = idr_alloc(idr, ptr, max(start, idr->cur), end, gfp_mask);
494 if (id == -ENOSPC)
495 id = idr_alloc(idr, ptr, start, end, gfp_mask);
496
497 if (likely(id >= 0))
498 idr->cur = id + 1;
499 return id;
500}
501EXPORT_SYMBOL(idr_alloc_cyclic);
502
503static void idr_remove_warning(int id)
504{
505 WARN(1, "idr_remove called for id=%d which is not allocated.\n", id);
506}
507
508static void sub_remove(struct idr *idp, int shift, int id)
509{
510 struct idr_layer *p = idp->top;
511 struct idr_layer **pa[MAX_IDR_LEVEL + 1];
512 struct idr_layer ***paa = &pa[0];
513 struct idr_layer *to_free;
514 int n;
515
516 *paa = NULL;
517 *++paa = &idp->top;
518
519 while ((shift > 0) && p) {
520 n = (id >> shift) & IDR_MASK;
521 __clear_bit(n, p->bitmap);
522 *++paa = &p->ary[n];
523 p = p->ary[n];
524 shift -= IDR_BITS;
525 }
526 n = id & IDR_MASK;
527 if (likely(p != NULL && test_bit(n, p->bitmap))) {
528 __clear_bit(n, p->bitmap);
529 RCU_INIT_POINTER(p->ary[n], NULL);
530 to_free = NULL;
531 while(*paa && ! --((**paa)->count)){
532 if (to_free)
533 free_layer(idp, to_free);
534 to_free = **paa;
535 **paa-- = NULL;
536 }
537 if (!*paa)
538 idp->layers = 0;
539 if (to_free)
540 free_layer(idp, to_free);
541 } else
542 idr_remove_warning(id);
543}
544
545
546
547
548
549
550void idr_remove(struct idr *idp, int id)
551{
552 struct idr_layer *p;
553 struct idr_layer *to_free;
554
555 if (id < 0)
556 return;
557
558 if (id > idr_max(idp->layers)) {
559 idr_remove_warning(id);
560 return;
561 }
562
563 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
564 if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
565 idp->top->ary[0]) {
566
567
568
569
570
571
572 to_free = idp->top;
573 p = idp->top->ary[0];
574 rcu_assign_pointer(idp->top, p);
575 --idp->layers;
576 to_free->count = 0;
577 bitmap_clear(to_free->bitmap, 0, IDR_SIZE);
578 free_layer(idp, to_free);
579 }
580}
581EXPORT_SYMBOL(idr_remove);
582
583static void __idr_remove_all(struct idr *idp)
584{
585 int n, id, max;
586 int bt_mask;
587 struct idr_layer *p;
588 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
589 struct idr_layer **paa = &pa[0];
590
591 n = idp->layers * IDR_BITS;
592 *paa = idp->top;
593 RCU_INIT_POINTER(idp->top, NULL);
594 max = idr_max(idp->layers);
595
596 id = 0;
597 while (id >= 0 && id <= max) {
598 p = *paa;
599 while (n > IDR_BITS && p) {
600 n -= IDR_BITS;
601 p = p->ary[(id >> n) & IDR_MASK];
602 *++paa = p;
603 }
604
605 bt_mask = id;
606 id += 1 << n;
607
608 while (n < fls(id ^ bt_mask)) {
609 if (*paa)
610 free_layer(idp, *paa);
611 n += IDR_BITS;
612 --paa;
613 }
614 }
615 idp->layers = 0;
616}
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631void idr_destroy(struct idr *idp)
632{
633 __idr_remove_all(idp);
634
635 while (idp->id_free_cnt) {
636 struct idr_layer *p = get_from_free_list(idp);
637 kmem_cache_free(idr_layer_cache, p);
638 }
639}
640EXPORT_SYMBOL(idr_destroy);
641
642void *idr_find_slowpath(struct idr *idp, int id)
643{
644 int n;
645 struct idr_layer *p;
646
647 if (id < 0)
648 return NULL;
649
650 p = rcu_dereference_raw(idp->top);
651 if (!p)
652 return NULL;
653 n = (p->layer+1) * IDR_BITS;
654
655 if (id > idr_max(p->layer + 1))
656 return NULL;
657 BUG_ON(n == 0);
658
659 while (n > 0 && p) {
660 n -= IDR_BITS;
661 BUG_ON(n != p->layer*IDR_BITS);
662 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
663 }
664 return((void *)p);
665}
666EXPORT_SYMBOL(idr_find_slowpath);
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686int idr_for_each(struct idr *idp,
687 int (*fn)(int id, void *p, void *data), void *data)
688{
689 int n, id, max, error = 0;
690 struct idr_layer *p;
691 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
692 struct idr_layer **paa = &pa[0];
693
694 n = idp->layers * IDR_BITS;
695 *paa = rcu_dereference_raw(idp->top);
696 max = idr_max(idp->layers);
697
698 id = 0;
699 while (id >= 0 && id <= max) {
700 p = *paa;
701 while (n > 0 && p) {
702 n -= IDR_BITS;
703 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
704 *++paa = p;
705 }
706
707 if (p) {
708 error = fn(id, (void *)p, data);
709 if (error)
710 break;
711 }
712
713 id += 1 << n;
714 while (n < fls(id)) {
715 n += IDR_BITS;
716 --paa;
717 }
718 }
719
720 return error;
721}
722EXPORT_SYMBOL(idr_for_each);
723
724
725
726
727
728
729
730
731
732
733
734
735
736void *idr_get_next(struct idr *idp, int *nextidp)
737{
738 struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1];
739 struct idr_layer **paa = &pa[0];
740 int id = *nextidp;
741 int n, max;
742
743
744 p = *paa = rcu_dereference_raw(idp->top);
745 if (!p)
746 return NULL;
747 n = (p->layer + 1) * IDR_BITS;
748 max = idr_max(p->layer + 1);
749
750 while (id >= 0 && id <= max) {
751 p = *paa;
752 while (n > 0 && p) {
753 n -= IDR_BITS;
754 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
755 *++paa = p;
756 }
757
758 if (p) {
759 *nextidp = id;
760 return p;
761 }
762
763
764
765
766
767
768
769
770 id = round_up(id + 1, 1 << n);
771 while (n < fls(id)) {
772 n += IDR_BITS;
773 --paa;
774 }
775 }
776 return NULL;
777}
778EXPORT_SYMBOL(idr_get_next);
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793void *idr_replace(struct idr *idp, void *ptr, int id)
794{
795 int n;
796 struct idr_layer *p, *old_p;
797
798 if (id < 0)
799 return ERR_PTR(-EINVAL);
800
801 p = idp->top;
802 if (!p)
803 return ERR_PTR(-ENOENT);
804
805 if (id > idr_max(p->layer + 1))
806 return ERR_PTR(-ENOENT);
807
808 n = p->layer * IDR_BITS;
809 while ((n > 0) && p) {
810 p = p->ary[(id >> n) & IDR_MASK];
811 n -= IDR_BITS;
812 }
813
814 n = id & IDR_MASK;
815 if (unlikely(p == NULL || !test_bit(n, p->bitmap)))
816 return ERR_PTR(-ENOENT);
817
818 old_p = p->ary[n];
819 rcu_assign_pointer(p->ary[n], ptr);
820
821 return old_p;
822}
823EXPORT_SYMBOL(idr_replace);
824
825void __init idr_init_cache(void)
826{
827 idr_layer_cache = kmem_cache_create("idr_layer_cache",
828 sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
829}
830
831
832
833
834
835
836
837
838void idr_init(struct idr *idp)
839{
840 memset(idp, 0, sizeof(struct idr));
841 spin_lock_init(&idp->lock);
842}
843EXPORT_SYMBOL(idr_init);
844
845static int idr_has_entry(int id, void *p, void *data)
846{
847 return 1;
848}
849
850bool idr_is_empty(struct idr *idp)
851{
852 return !idr_for_each(idp, idr_has_entry, NULL);
853}
854EXPORT_SYMBOL(idr_is_empty);
855
856
857
858
859
860
861
862
863
864
865
866
867
868static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
869{
870 unsigned long flags;
871
872 if (!ida->free_bitmap) {
873 spin_lock_irqsave(&ida->idr.lock, flags);
874 if (!ida->free_bitmap) {
875 ida->free_bitmap = bitmap;
876 bitmap = NULL;
877 }
878 spin_unlock_irqrestore(&ida->idr.lock, flags);
879 }
880
881 kfree(bitmap);
882}
883
884
885
886
887
888
889
890
891
892
893
894
895
896int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
897{
898
899 if (!__idr_pre_get(&ida->idr, gfp_mask))
900 return 0;
901
902
903 if (!ida->free_bitmap) {
904 struct ida_bitmap *bitmap;
905
906 bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
907 if (!bitmap)
908 return 0;
909
910 free_bitmap(ida, bitmap);
911 }
912
913 return 1;
914}
915EXPORT_SYMBOL(ida_pre_get);
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
933{
934 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
935 struct ida_bitmap *bitmap;
936 unsigned long flags;
937 int idr_id = starting_id / IDA_BITMAP_BITS;
938 int offset = starting_id % IDA_BITMAP_BITS;
939 int t, id;
940
941 restart:
942
943 t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr);
944 if (t < 0)
945 return t == -ENOMEM ? -EAGAIN : t;
946
947 if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
948 return -ENOSPC;
949
950 if (t != idr_id)
951 offset = 0;
952 idr_id = t;
953
954
955 bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
956 if (!bitmap) {
957 spin_lock_irqsave(&ida->idr.lock, flags);
958 bitmap = ida->free_bitmap;
959 ida->free_bitmap = NULL;
960 spin_unlock_irqrestore(&ida->idr.lock, flags);
961
962 if (!bitmap)
963 return -EAGAIN;
964
965 memset(bitmap, 0, sizeof(struct ida_bitmap));
966 rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
967 (void *)bitmap);
968 pa[0]->count++;
969 }
970
971
972 t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
973 if (t == IDA_BITMAP_BITS) {
974
975 idr_id++;
976 offset = 0;
977 goto restart;
978 }
979
980 id = idr_id * IDA_BITMAP_BITS + t;
981 if (id >= MAX_IDR_BIT)
982 return -ENOSPC;
983
984 __set_bit(t, bitmap->bitmap);
985 if (++bitmap->nr_busy == IDA_BITMAP_BITS)
986 idr_mark_full(pa, idr_id);
987
988 *p_id = id;
989
990
991
992
993
994
995 if (ida->idr.id_free_cnt || ida->free_bitmap) {
996 struct idr_layer *p = get_from_free_list(&ida->idr);
997 if (p)
998 kmem_cache_free(idr_layer_cache, p);
999 }
1000
1001 return 0;
1002}
1003EXPORT_SYMBOL(ida_get_new_above);
1004
1005
1006
1007
1008
1009
1010void ida_remove(struct ida *ida, int id)
1011{
1012 struct idr_layer *p = ida->idr.top;
1013 int shift = (ida->idr.layers - 1) * IDR_BITS;
1014 int idr_id = id / IDA_BITMAP_BITS;
1015 int offset = id % IDA_BITMAP_BITS;
1016 int n;
1017 struct ida_bitmap *bitmap;
1018
1019 if (idr_id > idr_max(ida->idr.layers))
1020 goto err;
1021
1022
1023 while ((shift > 0) && p) {
1024 n = (idr_id >> shift) & IDR_MASK;
1025 __clear_bit(n, p->bitmap);
1026 p = p->ary[n];
1027 shift -= IDR_BITS;
1028 }
1029
1030 if (p == NULL)
1031 goto err;
1032
1033 n = idr_id & IDR_MASK;
1034 __clear_bit(n, p->bitmap);
1035
1036 bitmap = (void *)p->ary[n];
1037 if (!bitmap || !test_bit(offset, bitmap->bitmap))
1038 goto err;
1039
1040
1041 __clear_bit(offset, bitmap->bitmap);
1042 if (--bitmap->nr_busy == 0) {
1043 __set_bit(n, p->bitmap);
1044 idr_remove(&ida->idr, idr_id);
1045 free_bitmap(ida, bitmap);
1046 }
1047
1048 return;
1049
1050 err:
1051 WARN(1, "ida_remove called for id=%d which is not allocated.\n", id);
1052}
1053EXPORT_SYMBOL(ida_remove);
1054
1055
1056
1057
1058
1059void ida_destroy(struct ida *ida)
1060{
1061 idr_destroy(&ida->idr);
1062 kfree(ida->free_bitmap);
1063}
1064EXPORT_SYMBOL(ida_destroy);
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
1079 gfp_t gfp_mask)
1080{
1081 int ret, id;
1082 unsigned int max;
1083 unsigned long flags;
1084
1085 BUG_ON((int)start < 0);
1086 BUG_ON((int)end < 0);
1087
1088 if (end == 0)
1089 max = 0x80000000;
1090 else {
1091 BUG_ON(end < start);
1092 max = end - 1;
1093 }
1094
1095again:
1096 if (!ida_pre_get(ida, gfp_mask))
1097 return -ENOMEM;
1098
1099 spin_lock_irqsave(&simple_ida_lock, flags);
1100 ret = ida_get_new_above(ida, start, &id);
1101 if (!ret) {
1102 if (id > max) {
1103 ida_remove(ida, id);
1104 ret = -ENOSPC;
1105 } else {
1106 ret = id;
1107 }
1108 }
1109 spin_unlock_irqrestore(&simple_ida_lock, flags);
1110
1111 if (unlikely(ret == -EAGAIN))
1112 goto again;
1113
1114 return ret;
1115}
1116EXPORT_SYMBOL(ida_simple_get);
1117
1118
1119
1120
1121
1122
1123void ida_simple_remove(struct ida *ida, unsigned int id)
1124{
1125 unsigned long flags;
1126
1127 BUG_ON((int)id < 0);
1128 spin_lock_irqsave(&simple_ida_lock, flags);
1129 ida_remove(ida, id);
1130 spin_unlock_irqrestore(&simple_ida_lock, flags);
1131}
1132EXPORT_SYMBOL(ida_simple_remove);
1133
1134
1135
1136
1137
1138
1139
1140
1141void ida_init(struct ida *ida)
1142{
1143 memset(ida, 0, sizeof(struct ida));
1144 idr_init(&ida->idr);
1145
1146}
1147EXPORT_SYMBOL(ida_init);
1148