1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/slab.h>
30#include <linux/export.h>
31#include <linux/bitmap.h>
32#include <linux/rculist.h>
33#include <linux/interrupt.h>
34#include <linux/genalloc.h>
35#include <linux/of_device.h>
36#include <linux/vmalloc.h>
37
38static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
39{
40 return chunk->end_addr - chunk->start_addr + 1;
41}
42
43static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
44{
45 unsigned long val, nval;
46
47 nval = *addr;
48 do {
49 val = nval;
50 if (val & mask_to_set)
51 return -EBUSY;
52 cpu_relax();
53 } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
54
55 return 0;
56}
57
58static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
59{
60 unsigned long val, nval;
61
62 nval = *addr;
63 do {
64 val = nval;
65 if ((val & mask_to_clear) != mask_to_clear)
66 return -EBUSY;
67 cpu_relax();
68 } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
69
70 return 0;
71}
72
73
74
75
76
77
78
79
80
81
82
83
84static unsigned long
85bitmap_set_ll(unsigned long *map, unsigned long start, unsigned long nr)
86{
87 unsigned long *p = map + BIT_WORD(start);
88 const unsigned long size = start + nr;
89 int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
90 unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
91
92 while (nr >= bits_to_set) {
93 if (set_bits_ll(p, mask_to_set))
94 return nr;
95 nr -= bits_to_set;
96 bits_to_set = BITS_PER_LONG;
97 mask_to_set = ~0UL;
98 p++;
99 }
100 if (nr) {
101 mask_to_set &= BITMAP_LAST_WORD_MASK(size);
102 if (set_bits_ll(p, mask_to_set))
103 return nr;
104 }
105
106 return 0;
107}
108
109
110
111
112
113
114
115
116
117
118
119
120static unsigned long
121bitmap_clear_ll(unsigned long *map, unsigned long start, unsigned long nr)
122{
123 unsigned long *p = map + BIT_WORD(start);
124 const unsigned long size = start + nr;
125 int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
126 unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
127
128 while (nr >= bits_to_clear) {
129 if (clear_bits_ll(p, mask_to_clear))
130 return nr;
131 nr -= bits_to_clear;
132 bits_to_clear = BITS_PER_LONG;
133 mask_to_clear = ~0UL;
134 p++;
135 }
136 if (nr) {
137 mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
138 if (clear_bits_ll(p, mask_to_clear))
139 return nr;
140 }
141
142 return 0;
143}
144
145
146
147
148
149
150
151
152
153struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
154{
155 struct gen_pool *pool;
156
157 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
158 if (pool != NULL) {
159 spin_lock_init(&pool->lock);
160 INIT_LIST_HEAD(&pool->chunks);
161 pool->min_alloc_order = min_alloc_order;
162 pool->algo = gen_pool_first_fit;
163 pool->data = NULL;
164 pool->name = NULL;
165 }
166 return pool;
167}
168EXPORT_SYMBOL(gen_pool_create);
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184int gen_pool_add_owner(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
185 size_t size, int nid, void *owner)
186{
187 struct gen_pool_chunk *chunk;
188 unsigned long nbits = size >> pool->min_alloc_order;
189 unsigned long nbytes = sizeof(struct gen_pool_chunk) +
190 BITS_TO_LONGS(nbits) * sizeof(long);
191
192 chunk = vzalloc_node(nbytes, nid);
193 if (unlikely(chunk == NULL))
194 return -ENOMEM;
195
196 chunk->phys_addr = phys;
197 chunk->start_addr = virt;
198 chunk->end_addr = virt + size - 1;
199 chunk->owner = owner;
200 atomic_long_set(&chunk->avail, size);
201
202 spin_lock(&pool->lock);
203 list_add_rcu(&chunk->next_chunk, &pool->chunks);
204 spin_unlock(&pool->lock);
205
206 return 0;
207}
208EXPORT_SYMBOL(gen_pool_add_owner);
209
210
211
212
213
214
215
216
217phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
218{
219 struct gen_pool_chunk *chunk;
220 phys_addr_t paddr = -1;
221
222 rcu_read_lock();
223 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
224 if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
225 paddr = chunk->phys_addr + (addr - chunk->start_addr);
226 break;
227 }
228 }
229 rcu_read_unlock();
230
231 return paddr;
232}
233EXPORT_SYMBOL(gen_pool_virt_to_phys);
234
235
236
237
238
239
240
241
242void gen_pool_destroy(struct gen_pool *pool)
243{
244 struct list_head *_chunk, *_next_chunk;
245 struct gen_pool_chunk *chunk;
246 int order = pool->min_alloc_order;
247 unsigned long bit, end_bit;
248
249 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
250 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
251 list_del(&chunk->next_chunk);
252
253 end_bit = chunk_size(chunk) >> order;
254 bit = find_next_bit(chunk->bits, end_bit, 0);
255 BUG_ON(bit < end_bit);
256
257 vfree(chunk);
258 }
259 kfree_const(pool->name);
260 kfree(pool);
261}
262EXPORT_SYMBOL(gen_pool_destroy);
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size,
278 genpool_algo_t algo, void *data, void **owner)
279{
280 struct gen_pool_chunk *chunk;
281 unsigned long addr = 0;
282 int order = pool->min_alloc_order;
283 unsigned long nbits, start_bit, end_bit, remain;
284
285#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
286 BUG_ON(in_nmi());
287#endif
288
289 if (owner)
290 *owner = NULL;
291
292 if (size == 0)
293 return 0;
294
295 nbits = (size + (1UL << order) - 1) >> order;
296 rcu_read_lock();
297 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
298 if (size > atomic_long_read(&chunk->avail))
299 continue;
300
301 start_bit = 0;
302 end_bit = chunk_size(chunk) >> order;
303retry:
304 start_bit = algo(chunk->bits, end_bit, start_bit,
305 nbits, data, pool, chunk->start_addr);
306 if (start_bit >= end_bit)
307 continue;
308 remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
309 if (remain) {
310 remain = bitmap_clear_ll(chunk->bits, start_bit,
311 nbits - remain);
312 BUG_ON(remain);
313 goto retry;
314 }
315
316 addr = chunk->start_addr + ((unsigned long)start_bit << order);
317 size = nbits << order;
318 atomic_long_sub(size, &chunk->avail);
319 if (owner)
320 *owner = chunk->owner;
321 break;
322 }
323 rcu_read_unlock();
324 return addr;
325}
326EXPORT_SYMBOL(gen_pool_alloc_algo_owner);
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
342{
343 return gen_pool_dma_alloc_algo(pool, size, dma, pool->algo, pool->data);
344}
345EXPORT_SYMBOL(gen_pool_dma_alloc);
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size,
363 dma_addr_t *dma, genpool_algo_t algo, void *data)
364{
365 unsigned long vaddr;
366
367 if (!pool)
368 return NULL;
369
370 vaddr = gen_pool_alloc_algo(pool, size, algo, data);
371 if (!vaddr)
372 return NULL;
373
374 if (dma)
375 *dma = gen_pool_virt_to_phys(pool, vaddr);
376
377 return (void *)vaddr;
378}
379EXPORT_SYMBOL(gen_pool_dma_alloc_algo);
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size,
396 dma_addr_t *dma, int align)
397{
398 struct genpool_data_align data = { .align = align };
399
400 return gen_pool_dma_alloc_algo(pool, size, dma,
401 gen_pool_first_fit_align, &data);
402}
403EXPORT_SYMBOL(gen_pool_dma_alloc_align);
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
420{
421 return gen_pool_dma_zalloc_algo(pool, size, dma, pool->algo, pool->data);
422}
423EXPORT_SYMBOL(gen_pool_dma_zalloc);
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size,
441 dma_addr_t *dma, genpool_algo_t algo, void *data)
442{
443 void *vaddr = gen_pool_dma_alloc_algo(pool, size, dma, algo, data);
444
445 if (vaddr)
446 memset(vaddr, 0, size);
447
448 return vaddr;
449}
450EXPORT_SYMBOL(gen_pool_dma_zalloc_algo);
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size,
467 dma_addr_t *dma, int align)
468{
469 struct genpool_data_align data = { .align = align };
470
471 return gen_pool_dma_zalloc_algo(pool, size, dma,
472 gen_pool_first_fit_align, &data);
473}
474EXPORT_SYMBOL(gen_pool_dma_zalloc_align);
475
476
477
478
479
480
481
482
483
484
485
486
487void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size,
488 void **owner)
489{
490 struct gen_pool_chunk *chunk;
491 int order = pool->min_alloc_order;
492 unsigned long start_bit, nbits, remain;
493
494#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
495 BUG_ON(in_nmi());
496#endif
497
498 if (owner)
499 *owner = NULL;
500
501 nbits = (size + (1UL << order) - 1) >> order;
502 rcu_read_lock();
503 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
504 if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
505 BUG_ON(addr + size - 1 > chunk->end_addr);
506 start_bit = (addr - chunk->start_addr) >> order;
507 remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
508 BUG_ON(remain);
509 size = nbits << order;
510 atomic_long_add(size, &chunk->avail);
511 if (owner)
512 *owner = chunk->owner;
513 rcu_read_unlock();
514 return;
515 }
516 }
517 rcu_read_unlock();
518 BUG();
519}
520EXPORT_SYMBOL(gen_pool_free_owner);
521
522
523
524
525
526
527
528
529
530
531void gen_pool_for_each_chunk(struct gen_pool *pool,
532 void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
533 void *data)
534{
535 struct gen_pool_chunk *chunk;
536
537 rcu_read_lock();
538 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
539 func(pool, chunk, data);
540 rcu_read_unlock();
541}
542EXPORT_SYMBOL(gen_pool_for_each_chunk);
543
544
545
546
547
548
549
550
551
552
553bool gen_pool_has_addr(struct gen_pool *pool, unsigned long start,
554 size_t size)
555{
556 bool found = false;
557 unsigned long end = start + size - 1;
558 struct gen_pool_chunk *chunk;
559
560 rcu_read_lock();
561 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
562 if (start >= chunk->start_addr && start <= chunk->end_addr) {
563 if (end <= chunk->end_addr) {
564 found = true;
565 break;
566 }
567 }
568 }
569 rcu_read_unlock();
570 return found;
571}
572EXPORT_SYMBOL(gen_pool_has_addr);
573
574
575
576
577
578
579
580size_t gen_pool_avail(struct gen_pool *pool)
581{
582 struct gen_pool_chunk *chunk;
583 size_t avail = 0;
584
585 rcu_read_lock();
586 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
587 avail += atomic_long_read(&chunk->avail);
588 rcu_read_unlock();
589 return avail;
590}
591EXPORT_SYMBOL_GPL(gen_pool_avail);
592
593
594
595
596
597
598
599size_t gen_pool_size(struct gen_pool *pool)
600{
601 struct gen_pool_chunk *chunk;
602 size_t size = 0;
603
604 rcu_read_lock();
605 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
606 size += chunk_size(chunk);
607 rcu_read_unlock();
608 return size;
609}
610EXPORT_SYMBOL_GPL(gen_pool_size);
611
612
613
614
615
616
617
618
619
620
621
622void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
623{
624 rcu_read_lock();
625
626 pool->algo = algo;
627 if (!pool->algo)
628 pool->algo = gen_pool_first_fit;
629
630 pool->data = data;
631
632 rcu_read_unlock();
633}
634EXPORT_SYMBOL(gen_pool_set_algo);
635
636
637
638
639
640
641
642
643
644
645
646
647unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
648 unsigned long start, unsigned int nr, void *data,
649 struct gen_pool *pool, unsigned long start_addr)
650{
651 return bitmap_find_next_zero_area(map, size, start, nr, 0);
652}
653EXPORT_SYMBOL(gen_pool_first_fit);
654
655
656
657
658
659
660
661
662
663
664
665
666unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size,
667 unsigned long start, unsigned int nr, void *data,
668 struct gen_pool *pool, unsigned long start_addr)
669{
670 struct genpool_data_align *alignment;
671 unsigned long align_mask, align_off;
672 int order;
673
674 alignment = data;
675 order = pool->min_alloc_order;
676 align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1;
677 align_off = (start_addr & (alignment->align - 1)) >> order;
678
679 return bitmap_find_next_zero_area_off(map, size, start, nr,
680 align_mask, align_off);
681}
682EXPORT_SYMBOL(gen_pool_first_fit_align);
683
684
685
686
687
688
689
690
691
692
693
694unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size,
695 unsigned long start, unsigned int nr, void *data,
696 struct gen_pool *pool, unsigned long start_addr)
697{
698 struct genpool_data_fixed *fixed_data;
699 int order;
700 unsigned long offset_bit;
701 unsigned long start_bit;
702
703 fixed_data = data;
704 order = pool->min_alloc_order;
705 offset_bit = fixed_data->offset >> order;
706 if (WARN_ON(fixed_data->offset & ((1UL << order) - 1)))
707 return size;
708
709 start_bit = bitmap_find_next_zero_area(map, size,
710 start + offset_bit, nr, 0);
711 if (start_bit != offset_bit)
712 start_bit = size;
713 return start_bit;
714}
715EXPORT_SYMBOL(gen_pool_fixed_alloc);
716
717
718
719
720
721
722
723
724
725
726
727
728
729unsigned long gen_pool_first_fit_order_align(unsigned long *map,
730 unsigned long size, unsigned long start,
731 unsigned int nr, void *data, struct gen_pool *pool,
732 unsigned long start_addr)
733{
734 unsigned long align_mask = roundup_pow_of_two(nr) - 1;
735
736 return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
737}
738EXPORT_SYMBOL(gen_pool_first_fit_order_align);
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
755 unsigned long start, unsigned int nr, void *data,
756 struct gen_pool *pool, unsigned long start_addr)
757{
758 unsigned long start_bit = size;
759 unsigned long len = size + 1;
760 unsigned long index;
761
762 index = bitmap_find_next_zero_area(map, size, start, nr, 0);
763
764 while (index < size) {
765 unsigned long next_bit = find_next_bit(map, size, index + nr);
766 if ((next_bit - index) < len) {
767 len = next_bit - index;
768 start_bit = index;
769 if (len == nr)
770 return start_bit;
771 }
772 index = bitmap_find_next_zero_area(map, size,
773 next_bit + 1, nr, 0);
774 }
775
776 return start_bit;
777}
778EXPORT_SYMBOL(gen_pool_best_fit);
779
780static void devm_gen_pool_release(struct device *dev, void *res)
781{
782 gen_pool_destroy(*(struct gen_pool **)res);
783}
784
785static int devm_gen_pool_match(struct device *dev, void *res, void *data)
786{
787 struct gen_pool **p = res;
788
789
790 if (!data && !(*p)->name)
791 return 1;
792
793 if (!data || !(*p)->name)
794 return 0;
795
796 return !strcmp((*p)->name, data);
797}
798
799
800
801
802
803
804
805
806struct gen_pool *gen_pool_get(struct device *dev, const char *name)
807{
808 struct gen_pool **p;
809
810 p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match,
811 (void *)name);
812 if (!p)
813 return NULL;
814 return *p;
815}
816EXPORT_SYMBOL_GPL(gen_pool_get);
817
818
819
820
821
822
823
824
825
826
827
828
829struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
830 int nid, const char *name)
831{
832 struct gen_pool **ptr, *pool;
833 const char *pool_name = NULL;
834
835
836 if (gen_pool_get(dev, name))
837 return ERR_PTR(-EINVAL);
838
839 if (name) {
840 pool_name = kstrdup_const(name, GFP_KERNEL);
841 if (!pool_name)
842 return ERR_PTR(-ENOMEM);
843 }
844
845 ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
846 if (!ptr)
847 goto free_pool_name;
848
849 pool = gen_pool_create(min_alloc_order, nid);
850 if (!pool)
851 goto free_devres;
852
853 *ptr = pool;
854 pool->name = pool_name;
855 devres_add(dev, ptr);
856
857 return pool;
858
859free_devres:
860 devres_free(ptr);
861free_pool_name:
862 kfree_const(pool_name);
863
864 return ERR_PTR(-ENOMEM);
865}
866EXPORT_SYMBOL(devm_gen_pool_create);
867
868#ifdef CONFIG_OF
869
870
871
872
873
874
875
876
877
878
879struct gen_pool *of_gen_pool_get(struct device_node *np,
880 const char *propname, int index)
881{
882 struct platform_device *pdev;
883 struct device_node *np_pool, *parent;
884 const char *name = NULL;
885 struct gen_pool *pool = NULL;
886
887 np_pool = of_parse_phandle(np, propname, index);
888 if (!np_pool)
889 return NULL;
890
891 pdev = of_find_device_by_node(np_pool);
892 if (!pdev) {
893
894 parent = of_get_parent(np_pool);
895 pdev = of_find_device_by_node(parent);
896 of_node_put(parent);
897
898 of_property_read_string(np_pool, "label", &name);
899 if (!name)
900 name = np_pool->name;
901 }
902 if (pdev)
903 pool = gen_pool_get(&pdev->dev, name);
904 of_node_put(np_pool);
905
906 return pool;
907}
908EXPORT_SYMBOL_GPL(of_gen_pool_get);
909#endif
910