linux/include/linux/blockgroup_lock.h
<<
>>
Prefs
   1#ifndef _LINUX_BLOCKGROUP_LOCK_H
   2#define _LINUX_BLOCKGROUP_LOCK_H
   3/*
   4 * Per-blockgroup locking for ext2 and ext3.
   5 *
   6 * Simple hashed spinlocking.
   7 */
   8
   9#include <linux/spinlock.h>
  10#include <linux/cache.h>
  11
  12#ifdef CONFIG_SMP
  13
  14/*
  15 * We want a power-of-two.  Is there a better way than this?
  16 */
  17
  18#if NR_CPUS >= 32
  19#define NR_BG_LOCKS     128
  20#elif NR_CPUS >= 16
  21#define NR_BG_LOCKS     64
  22#elif NR_CPUS >= 8
  23#define NR_BG_LOCKS     32
  24#elif NR_CPUS >= 4
  25#define NR_BG_LOCKS     16
  26#elif NR_CPUS >= 2
  27#define NR_BG_LOCKS     8
  28#else
  29#define NR_BG_LOCKS     4
  30#endif
  31
  32#else   /* CONFIG_SMP */
  33#define NR_BG_LOCKS     1
  34#endif  /* CONFIG_SMP */
  35
  36struct bgl_lock {
  37        spinlock_t lock;
  38} ____cacheline_aligned_in_smp;
  39
  40struct blockgroup_lock {
  41        struct bgl_lock locks[NR_BG_LOCKS];
  42};
  43
  44static inline void bgl_lock_init(struct blockgroup_lock *bgl)
  45{
  46        int i;
  47
  48        for (i = 0; i < NR_BG_LOCKS; i++)
  49                spin_lock_init(&bgl->locks[i].lock);
  50}
  51
  52/*
  53 * The accessor is a macro so we can embed a blockgroup_lock into different
  54 * superblock types
  55 */
  56static inline spinlock_t *
  57bgl_lock_ptr(struct blockgroup_lock *bgl, unsigned int block_group)
  58{
  59        return &bgl->locks[(block_group) & (NR_BG_LOCKS-1)].lock;
  60}
  61
  62#endif
  63