linux/lib/bucket_locks.c
<<
>>
Prefs
   1#include <linux/export.h>
   2#include <linux/kernel.h>
   3#include <linux/mm.h>
   4#include <linux/slab.h>
   5#include <linux/vmalloc.h>
   6
   7/* Allocate an array of spinlocks to be accessed by a hash. Two arguments
   8 * indicate the number of elements to allocate in the array. max_size
   9 * gives the maximum number of elements to allocate. cpu_mult gives
  10 * the number of locks per CPU to allocate. The size is rounded up
  11 * to a power of 2 to be suitable as a hash table.
  12 */
  13
  14int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
  15                           size_t max_size, unsigned int cpu_mult, gfp_t gfp)
  16{
  17        spinlock_t *tlocks = NULL;
  18        unsigned int i, size;
  19#if defined(CONFIG_PROVE_LOCKING)
  20        unsigned int nr_pcpus = 2;
  21#else
  22        unsigned int nr_pcpus = num_possible_cpus();
  23#endif
  24
  25        if (cpu_mult) {
  26                nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
  27                size = min_t(unsigned int, nr_pcpus * cpu_mult, max_size);
  28        } else {
  29                size = max_size;
  30        }
  31
  32        if (sizeof(spinlock_t) != 0) {
  33                if (gfpflags_allow_blocking(gfp))
  34                        tlocks = kvmalloc(size * sizeof(spinlock_t), gfp);
  35                else
  36                        tlocks = kmalloc_array(size, sizeof(spinlock_t), gfp);
  37                if (!tlocks)
  38                        return -ENOMEM;
  39                for (i = 0; i < size; i++)
  40                        spin_lock_init(&tlocks[i]);
  41        }
  42
  43        *locks = tlocks;
  44        *locks_mask = size - 1;
  45
  46        return 0;
  47}
  48EXPORT_SYMBOL(alloc_bucket_spinlocks);
  49
  50void free_bucket_spinlocks(spinlock_t *locks)
  51{
  52        kvfree(locks);
  53}
  54EXPORT_SYMBOL(free_bucket_spinlocks);
  55