linux/fs/ext4/mballoc.h
<<
>>
Prefs
   1/*
   2 *  fs/ext4/mballoc.h
   3 *
   4 *  Written by: Alex Tomas <alex@clusterfs.com>
   5 *
   6 */
   7#ifndef _EXT4_MBALLOC_H
   8#define _EXT4_MBALLOC_H
   9
  10#include <linux/time.h>
  11#include <linux/fs.h>
  12#include <linux/namei.h>
  13#include <linux/quotaops.h>
  14#include <linux/buffer_head.h>
  15#include <linux/module.h>
  16#include <linux/swap.h>
  17#include <linux/proc_fs.h>
  18#include <linux/pagemap.h>
  19#include <linux/seq_file.h>
  20#include <linux/blkdev.h>
  21#include <linux/mutex.h>
  22#include "ext4_jbd2.h"
  23#include "ext4.h"
  24
  25/*
  26 * with AGGRESSIVE_CHECK allocator runs consistency checks over
  27 * structures. these checks slow things down a lot
  28 */
  29#define AGGRESSIVE_CHECK__
  30
  31/*
  32 * with DOUBLE_CHECK defined mballoc creates persistent in-core
  33 * bitmaps, maintains and uses them to check for double allocations
  34 */
  35#define DOUBLE_CHECK__
  36
  37/*
  38 */
  39#ifdef CONFIG_EXT4_DEBUG
  40extern ushort ext4_mballoc_debug;
  41
  42#define mb_debug(n, fmt, a...)                                          \
  43        do {                                                            \
  44                if ((n) <= ext4_mballoc_debug) {                        \
  45                        printk(KERN_DEBUG "(%s, %d): %s: ",             \
  46                               __FILE__, __LINE__, __func__);           \
  47                        printk(fmt, ## a);                              \
  48                }                                                       \
  49        } while (0)
  50#else
  51#define mb_debug(n, fmt, a...)
  52#endif
  53
  54#define EXT4_MB_HISTORY_ALLOC           1       /* allocation */
  55#define EXT4_MB_HISTORY_PREALLOC        2       /* preallocated blocks used */
  56
  57/*
  58 * How long mballoc can look for a best extent (in found extents)
  59 */
  60#define MB_DEFAULT_MAX_TO_SCAN          200
  61
  62/*
  63 * How long mballoc must look for a best extent
  64 */
  65#define MB_DEFAULT_MIN_TO_SCAN          10
  66
  67/*
  68 * with 'ext4_mb_stats' allocator will collect stats that will be
  69 * shown at umount. The collecting costs though!
  70 */
  71#define MB_DEFAULT_STATS                0
  72
  73/*
  74 * files smaller than MB_DEFAULT_STREAM_THRESHOLD are served
  75 * by the stream allocator, which purpose is to pack requests
  76 * as close each to other as possible to produce smooth I/O traffic
  77 * We use locality group prealloc space for stream request.
  78 * We can tune the same via /proc/fs/ext4/<parition>/stream_req
  79 */
  80#define MB_DEFAULT_STREAM_THRESHOLD     16      /* 64K */
  81
  82/*
  83 * for which requests use 2^N search using buddies
  84 */
  85#define MB_DEFAULT_ORDER2_REQS          2
  86
  87/*
  88 * default group prealloc size 512 blocks
  89 */
  90#define MB_DEFAULT_GROUP_PREALLOC       512
  91
  92
  93struct ext4_free_data {
  94        /* MUST be the first member */
  95        struct ext4_journal_cb_entry    efd_jce;
  96
  97        /* ext4_free_data private data starts from here */
  98
  99        /* this links the free block information from group_info */
 100        struct rb_node                  efd_node;
 101
 102        /* group which free block extent belongs */
 103        ext4_group_t                    efd_group;
 104
 105        /* free block extent */
 106        ext4_grpblk_t                   efd_start_cluster;
 107        ext4_grpblk_t                   efd_count;
 108
 109        /* transaction which freed this extent */
 110        tid_t                           efd_tid;
 111};
 112
 113struct ext4_prealloc_space {
 114        struct list_head        pa_inode_list;
 115        struct list_head        pa_group_list;
 116        union {
 117                struct list_head pa_tmp_list;
 118                struct rcu_head pa_rcu;
 119        } u;
 120        spinlock_t              pa_lock;
 121        atomic_t                pa_count;
 122        unsigned                pa_deleted;
 123        ext4_fsblk_t            pa_pstart;      /* phys. block */
 124        ext4_lblk_t             pa_lstart;      /* log. block */
 125        ext4_grpblk_t           pa_len;         /* len of preallocated chunk */
 126        ext4_grpblk_t           pa_free;        /* how many blocks are free */
 127        unsigned short          pa_type;        /* pa type. inode or group */
 128        spinlock_t              *pa_obj_lock;
 129        struct inode            *pa_inode;      /* hack, for history only */
 130};
 131
 132enum {
 133        MB_INODE_PA = 0,
 134        MB_GROUP_PA = 1
 135};
 136
 137struct ext4_free_extent {
 138        ext4_lblk_t fe_logical;
 139        ext4_grpblk_t fe_start; /* In cluster units */
 140        ext4_group_t fe_group;
 141        ext4_grpblk_t fe_len;   /* In cluster units */
 142};
 143
 144/*
 145 * Locality group:
 146 *   we try to group all related changes together
 147 *   so that writeback can flush/allocate them together as well
 148 *   Size of lg_prealloc_list hash is determined by MB_DEFAULT_GROUP_PREALLOC
 149 *   (512). We store prealloc space into the hash based on the pa_free blocks
 150 *   order value.ie, fls(pa_free)-1;
 151 */
 152#define PREALLOC_TB_SIZE 10
 153struct ext4_locality_group {
 154        /* for allocator */
 155        /* to serialize allocates */
 156        struct mutex            lg_mutex;
 157        /* list of preallocations */
 158        struct list_head        lg_prealloc_list[PREALLOC_TB_SIZE];
 159        spinlock_t              lg_prealloc_lock;
 160};
 161
 162struct ext4_allocation_context {
 163        struct inode *ac_inode;
 164        struct super_block *ac_sb;
 165
 166        /* original request */
 167        struct ext4_free_extent ac_o_ex;
 168
 169        /* goal request (normalized ac_o_ex) */
 170        struct ext4_free_extent ac_g_ex;
 171
 172        /* the best found extent */
 173        struct ext4_free_extent ac_b_ex;
 174
 175        /* copy of the best found extent taken before preallocation efforts */
 176        struct ext4_free_extent ac_f_ex;
 177
 178        /* number of iterations done. we have to track to limit searching */
 179        unsigned long ac_ex_scanned;
 180        __u16 ac_groups_scanned;
 181        __u16 ac_found;
 182        __u16 ac_tail;
 183        __u16 ac_buddy;
 184        __u16 ac_flags;         /* allocation hints */
 185        __u8 ac_status;
 186        __u8 ac_criteria;
 187        __u8 ac_2order;         /* if request is to allocate 2^N blocks and
 188                                 * N > 0, the field stores N, otherwise 0 */
 189        __u8 ac_op;             /* operation, for history only */
 190        struct page *ac_bitmap_page;
 191        struct page *ac_buddy_page;
 192        struct ext4_prealloc_space *ac_pa;
 193        struct ext4_locality_group *ac_lg;
 194};
 195
 196#define AC_STATUS_CONTINUE      1
 197#define AC_STATUS_FOUND         2
 198#define AC_STATUS_BREAK         3
 199
 200struct ext4_buddy {
 201        struct page *bd_buddy_page;
 202        void *bd_buddy;
 203        struct page *bd_bitmap_page;
 204        void *bd_bitmap;
 205        struct ext4_group_info *bd_info;
 206        struct super_block *bd_sb;
 207        __u16 bd_blkbits;
 208        ext4_group_t bd_group;
 209};
 210
 211static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
 212                                        struct ext4_free_extent *fex)
 213{
 214        return ext4_group_first_block_no(sb, fex->fe_group) +
 215                (fex->fe_start << EXT4_SB(sb)->s_cluster_bits);
 216}
 217#endif
 218