linux/lib/genalloc.c
<<
>>
Prefs
   1/*
   2 * Basic general purpose allocator for managing special purpose
   3 * memory, for example, memory that is not managed by the regular
   4 * kmalloc/kfree interface.  Uses for this includes on-device special
   5 * memory, uncached memory etc.
   6 *
   7 * It is safe to use the allocator in NMI handlers and other special
   8 * unblockable contexts that could otherwise deadlock on locks.  This
   9 * is implemented by using atomic operations and retries on any
  10 * conflicts.  The disadvantage is that there may be livelocks in
  11 * extreme cases.  For better scalability, one allocator can be used
  12 * for each CPU.
  13 *
  14 * The lockless operation only works if there is enough memory
  15 * available.  If new memory is added to the pool a lock has to be
  16 * still taken.  So any user relying on locklessness has to ensure
  17 * that sufficient memory is preallocated.
  18 *
  19 * The basic atomic operation of this allocator is cmpxchg on long.
  20 * On architectures that don't have NMI-safe cmpxchg implementation,
  21 * the allocator can NOT be used in NMI handler.  So code uses the
  22 * allocator in NMI handler should depend on
  23 * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
  24 *
  25 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
  26 *
  27 * This source code is licensed under the GNU General Public License,
  28 * Version 2.  See the file COPYING for more details.
  29 */
  30
  31#include <linux/slab.h>
  32#include <linux/export.h>
  33#include <linux/bitmap.h>
  34#include <linux/rculist.h>
  35#include <linux/interrupt.h>
  36#include <linux/genalloc.h>
  37#include <linux/of_device.h>
  38
  39static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
  40{
  41        return chunk->end_addr - chunk->start_addr + 1;
  42}
  43
  44static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
  45{
  46        unsigned long val, nval;
  47
  48        nval = *addr;
  49        do {
  50                val = nval;
  51                if (val & mask_to_set)
  52                        return -EBUSY;
  53                cpu_relax();
  54        } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
  55
  56        return 0;
  57}
  58
  59static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
  60{
  61        unsigned long val, nval;
  62
  63        nval = *addr;
  64        do {
  65                val = nval;
  66                if ((val & mask_to_clear) != mask_to_clear)
  67                        return -EBUSY;
  68                cpu_relax();
  69        } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
  70
  71        return 0;
  72}
  73
  74/*
  75 * bitmap_set_ll - set the specified number of bits at the specified position
  76 * @map: pointer to a bitmap
  77 * @start: a bit position in @map
  78 * @nr: number of bits to set
  79 *
  80 * Set @nr bits start from @start in @map lock-lessly. Several users
  81 * can set/clear the same bitmap simultaneously without lock. If two
  82 * users set the same bit, one user will return remain bits, otherwise
  83 * return 0.
  84 */
  85static int bitmap_set_ll(unsigned long *map, int start, int nr)
  86{
  87        unsigned long *p = map + BIT_WORD(start);
  88        const int size = start + nr;
  89        int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
  90        unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
  91
  92        while (nr - bits_to_set >= 0) {
  93                if (set_bits_ll(p, mask_to_set))
  94                        return nr;
  95                nr -= bits_to_set;
  96                bits_to_set = BITS_PER_LONG;
  97                mask_to_set = ~0UL;
  98                p++;
  99        }
 100        if (nr) {
 101                mask_to_set &= BITMAP_LAST_WORD_MASK(size);
 102                if (set_bits_ll(p, mask_to_set))
 103                        return nr;
 104        }
 105
 106        return 0;
 107}
 108
 109/*
 110 * bitmap_clear_ll - clear the specified number of bits at the specified position
 111 * @map: pointer to a bitmap
 112 * @start: a bit position in @map
 113 * @nr: number of bits to set
 114 *
 115 * Clear @nr bits start from @start in @map lock-lessly. Several users
 116 * can set/clear the same bitmap simultaneously without lock. If two
 117 * users clear the same bit, one user will return remain bits,
 118 * otherwise return 0.
 119 */
 120static int bitmap_clear_ll(unsigned long *map, int start, int nr)
 121{
 122        unsigned long *p = map + BIT_WORD(start);
 123        const int size = start + nr;
 124        int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
 125        unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
 126
 127        while (nr - bits_to_clear >= 0) {
 128                if (clear_bits_ll(p, mask_to_clear))
 129                        return nr;
 130                nr -= bits_to_clear;
 131                bits_to_clear = BITS_PER_LONG;
 132                mask_to_clear = ~0UL;
 133                p++;
 134        }
 135        if (nr) {
 136                mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
 137                if (clear_bits_ll(p, mask_to_clear))
 138                        return nr;
 139        }
 140
 141        return 0;
 142}
 143
 144/**
 145 * gen_pool_create - create a new special memory pool
 146 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
 147 * @nid: node id of the node the pool structure should be allocated on, or -1
 148 *
 149 * Create a new special memory pool that can be used to manage special purpose
 150 * memory not managed by the regular kmalloc/kfree interface.
 151 */
 152struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
 153{
 154        struct gen_pool *pool;
 155
 156        pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
 157        if (pool != NULL) {
 158                spin_lock_init(&pool->lock);
 159                INIT_LIST_HEAD(&pool->chunks);
 160                pool->min_alloc_order = min_alloc_order;
 161                pool->algo = gen_pool_first_fit;
 162                pool->data = NULL;
 163                pool->name = NULL;
 164        }
 165        return pool;
 166}
 167EXPORT_SYMBOL(gen_pool_create);
 168
 169/**
 170 * gen_pool_add_virt - add a new chunk of special memory to the pool
 171 * @pool: pool to add new memory chunk to
 172 * @virt: virtual starting address of memory chunk to add to pool
 173 * @phys: physical starting address of memory chunk to add to pool
 174 * @size: size in bytes of the memory chunk to add to pool
 175 * @nid: node id of the node the chunk structure and bitmap should be
 176 *       allocated on, or -1
 177 *
 178 * Add a new chunk of special memory to the specified pool.
 179 *
 180 * Returns 0 on success or a -ve errno on failure.
 181 */
 182int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
 183                 size_t size, int nid)
 184{
 185        struct gen_pool_chunk *chunk;
 186        int nbits = size >> pool->min_alloc_order;
 187        int nbytes = sizeof(struct gen_pool_chunk) +
 188                                BITS_TO_LONGS(nbits) * sizeof(long);
 189
 190        chunk = kzalloc_node(nbytes, GFP_KERNEL, nid);
 191        if (unlikely(chunk == NULL))
 192                return -ENOMEM;
 193
 194        chunk->phys_addr = phys;
 195        chunk->start_addr = virt;
 196        chunk->end_addr = virt + size - 1;
 197        atomic_set(&chunk->avail, size);
 198
 199        spin_lock(&pool->lock);
 200        list_add_rcu(&chunk->next_chunk, &pool->chunks);
 201        spin_unlock(&pool->lock);
 202
 203        return 0;
 204}
 205EXPORT_SYMBOL(gen_pool_add_virt);
 206
 207/**
 208 * gen_pool_virt_to_phys - return the physical address of memory
 209 * @pool: pool to allocate from
 210 * @addr: starting address of memory
 211 *
 212 * Returns the physical address on success, or -1 on error.
 213 */
 214phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
 215{
 216        struct gen_pool_chunk *chunk;
 217        phys_addr_t paddr = -1;
 218
 219        rcu_read_lock();
 220        list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
 221                if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
 222                        paddr = chunk->phys_addr + (addr - chunk->start_addr);
 223                        break;
 224                }
 225        }
 226        rcu_read_unlock();
 227
 228        return paddr;
 229}
 230EXPORT_SYMBOL(gen_pool_virt_to_phys);
 231
 232/**
 233 * gen_pool_destroy - destroy a special memory pool
 234 * @pool: pool to destroy
 235 *
 236 * Destroy the specified special memory pool. Verifies that there are no
 237 * outstanding allocations.
 238 */
 239void gen_pool_destroy(struct gen_pool *pool)
 240{
 241        struct list_head *_chunk, *_next_chunk;
 242        struct gen_pool_chunk *chunk;
 243        int order = pool->min_alloc_order;
 244        int bit, end_bit;
 245
 246        list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
 247                chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
 248                list_del(&chunk->next_chunk);
 249
 250                end_bit = chunk_size(chunk) >> order;
 251                bit = find_next_bit(chunk->bits, end_bit, 0);
 252                BUG_ON(bit < end_bit);
 253
 254                kfree(chunk);
 255        }
 256        kfree_const(pool->name);
 257        kfree(pool);
 258}
 259EXPORT_SYMBOL(gen_pool_destroy);
 260
 261/**
 262 * gen_pool_alloc - allocate special memory from the pool
 263 * @pool: pool to allocate from
 264 * @size: number of bytes to allocate from the pool
 265 *
 266 * Allocate the requested number of bytes from the specified pool.
 267 * Uses the pool allocation function (with first-fit algorithm by default).
 268 * Can not be used in NMI handler on architectures without
 269 * NMI-safe cmpxchg implementation.
 270 */
 271unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
 272{
 273        return gen_pool_alloc_algo(pool, size, pool->algo, pool->data);
 274}
 275EXPORT_SYMBOL(gen_pool_alloc);
 276
 277/**
 278 * gen_pool_alloc_algo - allocate special memory from the pool
 279 * @pool: pool to allocate from
 280 * @size: number of bytes to allocate from the pool
 281 * @algo: algorithm passed from caller
 282 * @data: data passed to algorithm
 283 *
 284 * Allocate the requested number of bytes from the specified pool.
 285 * Uses the pool allocation function (with first-fit algorithm by default).
 286 * Can not be used in NMI handler on architectures without
 287 * NMI-safe cmpxchg implementation.
 288 */
 289unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
 290                genpool_algo_t algo, void *data)
 291{
 292        struct gen_pool_chunk *chunk;
 293        unsigned long addr = 0;
 294        int order = pool->min_alloc_order;
 295        int nbits, start_bit = 0, end_bit, remain;
 296
 297#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
 298        BUG_ON(in_nmi());
 299#endif
 300
 301        if (size == 0)
 302                return 0;
 303
 304        nbits = (size + (1UL << order) - 1) >> order;
 305        rcu_read_lock();
 306        list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
 307                if (size > atomic_read(&chunk->avail))
 308                        continue;
 309
 310                end_bit = chunk_size(chunk) >> order;
 311retry:
 312                start_bit = algo(chunk->bits, end_bit, start_bit,
 313                                 nbits, data, pool);
 314                if (start_bit >= end_bit)
 315                        continue;
 316                remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
 317                if (remain) {
 318                        remain = bitmap_clear_ll(chunk->bits, start_bit,
 319                                                 nbits - remain);
 320                        BUG_ON(remain);
 321                        goto retry;
 322                }
 323
 324                addr = chunk->start_addr + ((unsigned long)start_bit << order);
 325                size = nbits << order;
 326                atomic_sub(size, &chunk->avail);
 327                break;
 328        }
 329        rcu_read_unlock();
 330        return addr;
 331}
 332EXPORT_SYMBOL(gen_pool_alloc_algo);
 333
 334/**
 335 * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
 336 * @pool: pool to allocate from
 337 * @size: number of bytes to allocate from the pool
 338 * @dma: dma-view physical address return value.  Use NULL if unneeded.
 339 *
 340 * Allocate the requested number of bytes from the specified pool.
 341 * Uses the pool allocation function (with first-fit algorithm by default).
 342 * Can not be used in NMI handler on architectures without
 343 * NMI-safe cmpxchg implementation.
 344 */
 345void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
 346{
 347        unsigned long vaddr;
 348
 349        if (!pool)
 350                return NULL;
 351
 352        vaddr = gen_pool_alloc(pool, size);
 353        if (!vaddr)
 354                return NULL;
 355
 356        if (dma)
 357                *dma = gen_pool_virt_to_phys(pool, vaddr);
 358
 359        return (void *)vaddr;
 360}
 361EXPORT_SYMBOL(gen_pool_dma_alloc);
 362
 363/**
 364 * gen_pool_free - free allocated special memory back to the pool
 365 * @pool: pool to free to
 366 * @addr: starting address of memory to free back to pool
 367 * @size: size in bytes of memory to free
 368 *
 369 * Free previously allocated special memory back to the specified
 370 * pool.  Can not be used in NMI handler on architectures without
 371 * NMI-safe cmpxchg implementation.
 372 */
 373void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
 374{
 375        struct gen_pool_chunk *chunk;
 376        int order = pool->min_alloc_order;
 377        int start_bit, nbits, remain;
 378
 379#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
 380        BUG_ON(in_nmi());
 381#endif
 382
 383        nbits = (size + (1UL << order) - 1) >> order;
 384        rcu_read_lock();
 385        list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
 386                if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
 387                        BUG_ON(addr + size - 1 > chunk->end_addr);
 388                        start_bit = (addr - chunk->start_addr) >> order;
 389                        remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
 390                        BUG_ON(remain);
 391                        size = nbits << order;
 392                        atomic_add(size, &chunk->avail);
 393                        rcu_read_unlock();
 394                        return;
 395                }
 396        }
 397        rcu_read_unlock();
 398        BUG();
 399}
 400EXPORT_SYMBOL(gen_pool_free);
 401
 402/**
 403 * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
 404 * @pool:       the generic memory pool
 405 * @func:       func to call
 406 * @data:       additional data used by @func
 407 *
 408 * Call @func for every chunk of generic memory pool.  The @func is
 409 * called with rcu_read_lock held.
 410 */
 411void gen_pool_for_each_chunk(struct gen_pool *pool,
 412        void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
 413        void *data)
 414{
 415        struct gen_pool_chunk *chunk;
 416
 417        rcu_read_lock();
 418        list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
 419                func(pool, chunk, data);
 420        rcu_read_unlock();
 421}
 422EXPORT_SYMBOL(gen_pool_for_each_chunk);
 423
 424/**
 425 * addr_in_gen_pool - checks if an address falls within the range of a pool
 426 * @pool:       the generic memory pool
 427 * @start:      start address
 428 * @size:       size of the region
 429 *
 430 * Check if the range of addresses falls within the specified pool. Returns
 431 * true if the entire range is contained in the pool and false otherwise.
 432 */
 433bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
 434                        size_t size)
 435{
 436        bool found = false;
 437        unsigned long end = start + size - 1;
 438        struct gen_pool_chunk *chunk;
 439
 440        rcu_read_lock();
 441        list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
 442                if (start >= chunk->start_addr && start <= chunk->end_addr) {
 443                        if (end <= chunk->end_addr) {
 444                                found = true;
 445                                break;
 446                        }
 447                }
 448        }
 449        rcu_read_unlock();
 450        return found;
 451}
 452
 453/**
 454 * gen_pool_avail - get available free space of the pool
 455 * @pool: pool to get available free space
 456 *
 457 * Return available free space of the specified pool.
 458 */
 459size_t gen_pool_avail(struct gen_pool *pool)
 460{
 461        struct gen_pool_chunk *chunk;
 462        size_t avail = 0;
 463
 464        rcu_read_lock();
 465        list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
 466                avail += atomic_read(&chunk->avail);
 467        rcu_read_unlock();
 468        return avail;
 469}
 470EXPORT_SYMBOL_GPL(gen_pool_avail);
 471
 472/**
 473 * gen_pool_size - get size in bytes of memory managed by the pool
 474 * @pool: pool to get size
 475 *
 476 * Return size in bytes of memory managed by the pool.
 477 */
 478size_t gen_pool_size(struct gen_pool *pool)
 479{
 480        struct gen_pool_chunk *chunk;
 481        size_t size = 0;
 482
 483        rcu_read_lock();
 484        list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
 485                size += chunk_size(chunk);
 486        rcu_read_unlock();
 487        return size;
 488}
 489EXPORT_SYMBOL_GPL(gen_pool_size);
 490
 491/**
 492 * gen_pool_set_algo - set the allocation algorithm
 493 * @pool: pool to change allocation algorithm
 494 * @algo: custom algorithm function
 495 * @data: additional data used by @algo
 496 *
 497 * Call @algo for each memory allocation in the pool.
 498 * If @algo is NULL use gen_pool_first_fit as default
 499 * memory allocation function.
 500 */
 501void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
 502{
 503        rcu_read_lock();
 504
 505        pool->algo = algo;
 506        if (!pool->algo)
 507                pool->algo = gen_pool_first_fit;
 508
 509        pool->data = data;
 510
 511        rcu_read_unlock();
 512}
 513EXPORT_SYMBOL(gen_pool_set_algo);
 514
 515/**
 516 * gen_pool_first_fit - find the first available region
 517 * of memory matching the size requirement (no alignment constraint)
 518 * @map: The address to base the search on
 519 * @size: The bitmap size in bits
 520 * @start: The bitnumber to start searching at
 521 * @nr: The number of zeroed bits we're looking for
 522 * @data: additional data - unused
 523 * @pool: pool to find the fit region memory from
 524 */
 525unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
 526                unsigned long start, unsigned int nr, void *data,
 527                struct gen_pool *pool)
 528{
 529        return bitmap_find_next_zero_area(map, size, start, nr, 0);
 530}
 531EXPORT_SYMBOL(gen_pool_first_fit);
 532
 533/**
 534 * gen_pool_first_fit_align - find the first available region
 535 * of memory matching the size requirement (alignment constraint)
 536 * @map: The address to base the search on
 537 * @size: The bitmap size in bits
 538 * @start: The bitnumber to start searching at
 539 * @nr: The number of zeroed bits we're looking for
 540 * @data: data for alignment
 541 * @pool: pool to get order from
 542 */
 543unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size,
 544                unsigned long start, unsigned int nr, void *data,
 545                struct gen_pool *pool)
 546{
 547        struct genpool_data_align *alignment;
 548        unsigned long align_mask;
 549        int order;
 550
 551        alignment = data;
 552        order = pool->min_alloc_order;
 553        align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1;
 554        return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
 555}
 556EXPORT_SYMBOL(gen_pool_first_fit_align);
 557
 558/**
 559 * gen_pool_fixed_alloc - reserve a specific region
 560 * @map: The address to base the search on
 561 * @size: The bitmap size in bits
 562 * @start: The bitnumber to start searching at
 563 * @nr: The number of zeroed bits we're looking for
 564 * @data: data for alignment
 565 * @pool: pool to get order from
 566 */
 567unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size,
 568                unsigned long start, unsigned int nr, void *data,
 569                struct gen_pool *pool)
 570{
 571        struct genpool_data_fixed *fixed_data;
 572        int order;
 573        unsigned long offset_bit;
 574        unsigned long start_bit;
 575
 576        fixed_data = data;
 577        order = pool->min_alloc_order;
 578        offset_bit = fixed_data->offset >> order;
 579        if (WARN_ON(fixed_data->offset & ((1UL << order) - 1)))
 580                return size;
 581
 582        start_bit = bitmap_find_next_zero_area(map, size,
 583                        start + offset_bit, nr, 0);
 584        if (start_bit != offset_bit)
 585                start_bit = size;
 586        return start_bit;
 587}
 588EXPORT_SYMBOL(gen_pool_fixed_alloc);
 589
 590/**
 591 * gen_pool_first_fit_order_align - find the first available region
 592 * of memory matching the size requirement. The region will be aligned
 593 * to the order of the size specified.
 594 * @map: The address to base the search on
 595 * @size: The bitmap size in bits
 596 * @start: The bitnumber to start searching at
 597 * @nr: The number of zeroed bits we're looking for
 598 * @data: additional data - unused
 599 * @pool: pool to find the fit region memory from
 600 */
 601unsigned long gen_pool_first_fit_order_align(unsigned long *map,
 602                unsigned long size, unsigned long start,
 603                unsigned int nr, void *data, struct gen_pool *pool)
 604{
 605        unsigned long align_mask = roundup_pow_of_two(nr) - 1;
 606
 607        return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
 608}
 609EXPORT_SYMBOL(gen_pool_first_fit_order_align);
 610
 611/**
 612 * gen_pool_best_fit - find the best fitting region of memory
 613 * macthing the size requirement (no alignment constraint)
 614 * @map: The address to base the search on
 615 * @size: The bitmap size in bits
 616 * @start: The bitnumber to start searching at
 617 * @nr: The number of zeroed bits we're looking for
 618 * @data: additional data - unused
 619 * @pool: pool to find the fit region memory from
 620 *
 621 * Iterate over the bitmap to find the smallest free region
 622 * which we can allocate the memory.
 623 */
 624unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
 625                unsigned long start, unsigned int nr, void *data,
 626                struct gen_pool *pool)
 627{
 628        unsigned long start_bit = size;
 629        unsigned long len = size + 1;
 630        unsigned long index;
 631
 632        index = bitmap_find_next_zero_area(map, size, start, nr, 0);
 633
 634        while (index < size) {
 635                int next_bit = find_next_bit(map, size, index + nr);
 636                if ((next_bit - index) < len) {
 637                        len = next_bit - index;
 638                        start_bit = index;
 639                        if (len == nr)
 640                                return start_bit;
 641                }
 642                index = bitmap_find_next_zero_area(map, size,
 643                                                   next_bit + 1, nr, 0);
 644        }
 645
 646        return start_bit;
 647}
 648EXPORT_SYMBOL(gen_pool_best_fit);
 649
 650static void devm_gen_pool_release(struct device *dev, void *res)
 651{
 652        gen_pool_destroy(*(struct gen_pool **)res);
 653}
 654
 655static int devm_gen_pool_match(struct device *dev, void *res, void *data)
 656{
 657        struct gen_pool **p = res;
 658
 659        /* NULL data matches only a pool without an assigned name */
 660        if (!data && !(*p)->name)
 661                return 1;
 662
 663        if (!data || !(*p)->name)
 664                return 0;
 665
 666        return !strcmp((*p)->name, data);
 667}
 668
 669/**
 670 * gen_pool_get - Obtain the gen_pool (if any) for a device
 671 * @dev: device to retrieve the gen_pool from
 672 * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
 673 *
 674 * Returns the gen_pool for the device if one is present, or NULL.
 675 */
 676struct gen_pool *gen_pool_get(struct device *dev, const char *name)
 677{
 678        struct gen_pool **p;
 679
 680        p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match,
 681                        (void *)name);
 682        if (!p)
 683                return NULL;
 684        return *p;
 685}
 686EXPORT_SYMBOL_GPL(gen_pool_get);
 687
 688/**
 689 * devm_gen_pool_create - managed gen_pool_create
 690 * @dev: device that provides the gen_pool
 691 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
 692 * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes
 693 * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
 694 *
 695 * Create a new special memory pool that can be used to manage special purpose
 696 * memory not managed by the regular kmalloc/kfree interface. The pool will be
 697 * automatically destroyed by the device management code.
 698 */
 699struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
 700                                      int nid, const char *name)
 701{
 702        struct gen_pool **ptr, *pool;
 703        const char *pool_name = NULL;
 704
 705        /* Check that genpool to be created is uniquely addressed on device */
 706        if (gen_pool_get(dev, name))
 707                return ERR_PTR(-EINVAL);
 708
 709        if (name) {
 710                pool_name = kstrdup_const(name, GFP_KERNEL);
 711                if (!pool_name)
 712                        return ERR_PTR(-ENOMEM);
 713        }
 714
 715        ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
 716        if (!ptr)
 717                goto free_pool_name;
 718
 719        pool = gen_pool_create(min_alloc_order, nid);
 720        if (!pool)
 721                goto free_devres;
 722
 723        *ptr = pool;
 724        pool->name = pool_name;
 725        devres_add(dev, ptr);
 726
 727        return pool;
 728
 729free_devres:
 730        devres_free(ptr);
 731free_pool_name:
 732        kfree_const(pool_name);
 733
 734        return ERR_PTR(-ENOMEM);
 735}
 736EXPORT_SYMBOL(devm_gen_pool_create);
 737
 738#ifdef CONFIG_OF
 739/**
 740 * of_gen_pool_get - find a pool by phandle property
 741 * @np: device node
 742 * @propname: property name containing phandle(s)
 743 * @index: index into the phandle array
 744 *
 745 * Returns the pool that contains the chunk starting at the physical
 746 * address of the device tree node pointed at by the phandle property,
 747 * or NULL if not found.
 748 */
 749struct gen_pool *of_gen_pool_get(struct device_node *np,
 750        const char *propname, int index)
 751{
 752        struct platform_device *pdev;
 753        struct device_node *np_pool, *parent;
 754        const char *name = NULL;
 755        struct gen_pool *pool = NULL;
 756
 757        np_pool = of_parse_phandle(np, propname, index);
 758        if (!np_pool)
 759                return NULL;
 760
 761        pdev = of_find_device_by_node(np_pool);
 762        if (!pdev) {
 763                /* Check if named gen_pool is created by parent node device */
 764                parent = of_get_parent(np_pool);
 765                pdev = of_find_device_by_node(parent);
 766                of_node_put(parent);
 767
 768                of_property_read_string(np_pool, "label", &name);
 769                if (!name)
 770                        name = np_pool->name;
 771        }
 772        if (pdev)
 773                pool = gen_pool_get(&pdev->dev, name);
 774        of_node_put(np_pool);
 775
 776        return pool;
 777}
 778EXPORT_SYMBOL_GPL(of_gen_pool_get);
 779#endif /* CONFIG_OF */
 780