linux/include/linux/buffer_head.h
<<
>>
Prefs
   1/*
   2 * include/linux/buffer_head.h
   3 *
   4 * Everything to do with buffer_heads.
   5 */
   6
   7#ifndef _LINUX_BUFFER_HEAD_H
   8#define _LINUX_BUFFER_HEAD_H
   9
  10#include <linux/types.h>
  11#include <linux/fs.h>
  12#include <linux/linkage.h>
  13#include <linux/pagemap.h>
  14#include <linux/wait.h>
  15#include <linux/atomic.h>
  16
  17#ifdef CONFIG_BLOCK
  18
  19enum bh_state_bits {
  20        BH_Uptodate,    /* Contains valid data */
  21        BH_Dirty,       /* Is dirty */
  22        BH_Lock,        /* Is locked */
  23        BH_Req,         /* Has been submitted for I/O */
  24        BH_Uptodate_Lock,/* Used by the first bh in a page, to serialise
  25                          * IO completion of other buffers in the page
  26                          */
  27
  28        BH_Mapped,      /* Has a disk mapping */
  29        BH_New,         /* Disk mapping was newly created by get_block */
  30        BH_Async_Read,  /* Is under end_buffer_async_read I/O */
  31        BH_Async_Write, /* Is under end_buffer_async_write I/O */
  32        BH_Delay,       /* Buffer is not yet allocated on disk */
  33        BH_Boundary,    /* Block is followed by a discontiguity */
  34        BH_Write_EIO,   /* I/O error on write */
  35        BH_Unwritten,   /* Buffer is allocated on disk but not written */
  36        BH_Quiet,       /* Buffer Error Prinks to be quiet */
  37        BH_Meta,        /* Buffer contains metadata */
  38        BH_Prio,        /* Buffer should be submitted with REQ_PRIO */
  39        BH_Defer_Completion, /* Defer AIO completion to workqueue */
  40
  41        BH_PrivateStart,/* not a state bit, but the first bit available
  42                         * for private allocation by other entities
  43                         */
  44};
  45
  46#define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
  47
  48struct page;
  49struct buffer_head;
  50struct address_space;
  51typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
  52
  53/*
  54 * Historically, a buffer_head was used to map a single block
  55 * within a page, and of course as the unit of I/O through the
  56 * filesystem and block layers.  Nowadays the basic I/O unit
  57 * is the bio, and buffer_heads are used for extracting block
  58 * mappings (via a get_block_t call), for tracking state within
  59 * a page (via a page_mapping) and for wrapping bio submission
  60 * for backward compatibility reasons (e.g. submit_bh).
  61 */
  62struct buffer_head {
  63        unsigned long b_state;          /* buffer state bitmap (see above) */
  64        struct buffer_head *b_this_page;/* circular list of page's buffers */
  65        struct page *b_page;            /* the page this bh is mapped to */
  66
  67        sector_t b_blocknr;             /* start block number */
  68        size_t b_size;                  /* size of mapping */
  69        char *b_data;                   /* pointer to data within the page */
  70
  71        struct block_device *b_bdev;
  72        bh_end_io_t *b_end_io;          /* I/O completion */
  73        void *b_private;                /* reserved for b_end_io */
  74        struct list_head b_assoc_buffers; /* associated with another mapping */
  75        struct address_space *b_assoc_map;      /* mapping this buffer is
  76                                                   associated with */
  77        atomic_t b_count;               /* users using this buffer_head */
  78};
  79
  80/*
  81 * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
  82 * and buffer_foo() functions.
  83 */
  84#define BUFFER_FNS(bit, name)                                           \
  85static __always_inline void set_buffer_##name(struct buffer_head *bh)   \
  86{                                                                       \
  87        set_bit(BH_##bit, &(bh)->b_state);                              \
  88}                                                                       \
  89static __always_inline void clear_buffer_##name(struct buffer_head *bh) \
  90{                                                                       \
  91        clear_bit(BH_##bit, &(bh)->b_state);                            \
  92}                                                                       \
  93static __always_inline int buffer_##name(const struct buffer_head *bh)  \
  94{                                                                       \
  95        return test_bit(BH_##bit, &(bh)->b_state);                      \
  96}
  97
  98/*
  99 * test_set_buffer_foo() and test_clear_buffer_foo()
 100 */
 101#define TAS_BUFFER_FNS(bit, name)                                       \
 102static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \
 103{                                                                       \
 104        return test_and_set_bit(BH_##bit, &(bh)->b_state);              \
 105}                                                                       \
 106static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
 107{                                                                       \
 108        return test_and_clear_bit(BH_##bit, &(bh)->b_state);            \
 109}                                                                       \
 110
 111/*
 112 * Emit the buffer bitops functions.   Note that there are also functions
 113 * of the form "mark_buffer_foo()".  These are higher-level functions which
 114 * do something in addition to setting a b_state bit.
 115 */
 116BUFFER_FNS(Uptodate, uptodate)
 117BUFFER_FNS(Dirty, dirty)
 118TAS_BUFFER_FNS(Dirty, dirty)
 119BUFFER_FNS(Lock, locked)
 120BUFFER_FNS(Req, req)
 121TAS_BUFFER_FNS(Req, req)
 122BUFFER_FNS(Mapped, mapped)
 123BUFFER_FNS(New, new)
 124BUFFER_FNS(Async_Read, async_read)
 125BUFFER_FNS(Async_Write, async_write)
 126BUFFER_FNS(Delay, delay)
 127BUFFER_FNS(Boundary, boundary)
 128BUFFER_FNS(Write_EIO, write_io_error)
 129BUFFER_FNS(Unwritten, unwritten)
 130BUFFER_FNS(Meta, meta)
 131BUFFER_FNS(Prio, prio)
 132BUFFER_FNS(Defer_Completion, defer_completion)
 133
 134#define bh_offset(bh)           ((unsigned long)(bh)->b_data & ~PAGE_MASK)
 135
 136/* If we *know* page->private refers to buffer_heads */
 137#define page_buffers(page)                                      \
 138        ({                                                      \
 139                BUG_ON(!PagePrivate(page));                     \
 140                ((struct buffer_head *)page_private(page));     \
 141        })
 142#define page_has_buffers(page)  PagePrivate(page)
 143
 144void buffer_check_dirty_writeback(struct page *page,
 145                                     bool *dirty, bool *writeback);
 146
 147/*
 148 * Declarations
 149 */
 150
 151void mark_buffer_dirty(struct buffer_head *bh);
 152void init_buffer(struct buffer_head *, bh_end_io_t *, void *);
 153void touch_buffer(struct buffer_head *bh);
 154void set_bh_page(struct buffer_head *bh,
 155                struct page *page, unsigned long offset);
 156int try_to_free_buffers(struct page *);
 157struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
 158                int retry);
 159void create_empty_buffers(struct page *, unsigned long,
 160                        unsigned long b_state);
 161void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
 162void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
 163void end_buffer_async_write(struct buffer_head *bh, int uptodate);
 164
 165/* Things to do with buffers at mapping->private_list */
 166void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
 167int inode_has_buffers(struct inode *);
 168void invalidate_inode_buffers(struct inode *);
 169int remove_inode_buffers(struct inode *inode);
 170int sync_mapping_buffers(struct address_space *mapping);
 171void unmap_underlying_metadata(struct block_device *bdev, sector_t block);
 172
 173void mark_buffer_async_write(struct buffer_head *bh);
 174void __wait_on_buffer(struct buffer_head *);
 175wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
 176struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
 177                        unsigned size);
 178struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
 179                                  unsigned size, gfp_t gfp);
 180void __brelse(struct buffer_head *);
 181void __bforget(struct buffer_head *);
 182void __breadahead(struct block_device *, sector_t block, unsigned int size);
 183struct buffer_head *__bread_gfp(struct block_device *,
 184                                sector_t block, unsigned size, gfp_t gfp);
 185void invalidate_bh_lrus(void);
 186struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
 187void free_buffer_head(struct buffer_head * bh);
 188void unlock_buffer(struct buffer_head *bh);
 189void __lock_buffer(struct buffer_head *bh);
 190void ll_rw_block(int, int, int, struct buffer_head * bh[]);
 191int sync_dirty_buffer(struct buffer_head *bh);
 192int __sync_dirty_buffer(struct buffer_head *bh, int op_flags);
 193void write_dirty_buffer(struct buffer_head *bh, int op_flags);
 194int _submit_bh(int op, int op_flags, struct buffer_head *bh,
 195               unsigned long bio_flags);
 196int submit_bh(int, int, struct buffer_head *);
 197void write_boundary_block(struct block_device *bdev,
 198                        sector_t bblock, unsigned blocksize);
 199int bh_uptodate_or_lock(struct buffer_head *bh);
 200int bh_submit_read(struct buffer_head *bh);
 201
 202extern int buffer_heads_over_limit;
 203
 204/*
 205 * Generic address_space_operations implementations for buffer_head-backed
 206 * address_spaces.
 207 */
 208void block_invalidatepage(struct page *page, unsigned int offset,
 209                          unsigned int length);
 210int block_write_full_page(struct page *page, get_block_t *get_block,
 211                                struct writeback_control *wbc);
 212int __block_write_full_page(struct inode *inode, struct page *page,
 213                        get_block_t *get_block, struct writeback_control *wbc,
 214                        bh_end_io_t *handler);
 215int block_read_full_page(struct page*, get_block_t*);
 216int block_is_partially_uptodate(struct page *page, unsigned long from,
 217                                unsigned long count);
 218int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
 219                unsigned flags, struct page **pagep, get_block_t *get_block);
 220int __block_write_begin(struct page *page, loff_t pos, unsigned len,
 221                get_block_t *get_block);
 222int block_write_end(struct file *, struct address_space *,
 223                                loff_t, unsigned, unsigned,
 224                                struct page *, void *);
 225int generic_write_end(struct file *, struct address_space *,
 226                                loff_t, unsigned, unsigned,
 227                                struct page *, void *);
 228void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
 229int cont_write_begin(struct file *, struct address_space *, loff_t,
 230                        unsigned, unsigned, struct page **, void **,
 231                        get_block_t *, loff_t *);
 232int generic_cont_expand_simple(struct inode *inode, loff_t size);
 233int block_commit_write(struct page *page, unsigned from, unsigned to);
 234int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
 235                                get_block_t get_block);
 236/* Convert errno to return value from ->page_mkwrite() call */
 237static inline int block_page_mkwrite_return(int err)
 238{
 239        if (err == 0)
 240                return VM_FAULT_LOCKED;
 241        if (err == -EFAULT)
 242                return VM_FAULT_NOPAGE;
 243        if (err == -ENOMEM)
 244                return VM_FAULT_OOM;
 245        if (err == -EAGAIN)
 246                return VM_FAULT_RETRY;
 247        /* -ENOSPC, -EDQUOT, -EIO ... */
 248        return VM_FAULT_SIGBUS;
 249}
 250sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
 251int block_truncate_page(struct address_space *, loff_t, get_block_t *);
 252int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned,
 253                                struct page **, void **, get_block_t*);
 254int nobh_write_end(struct file *, struct address_space *,
 255                                loff_t, unsigned, unsigned,
 256                                struct page *, void *);
 257int nobh_truncate_page(struct address_space *, loff_t, get_block_t *);
 258int nobh_writepage(struct page *page, get_block_t *get_block,
 259                        struct writeback_control *wbc);
 260
 261void buffer_init(void);
 262
 263/*
 264 * inline definitions
 265 */
 266
 267static inline void attach_page_buffers(struct page *page,
 268                struct buffer_head *head)
 269{
 270        get_page(page);
 271        SetPagePrivate(page);
 272        set_page_private(page, (unsigned long)head);
 273}
 274
 275static inline void get_bh(struct buffer_head *bh)
 276{
 277        atomic_inc(&bh->b_count);
 278}
 279
 280static inline void put_bh(struct buffer_head *bh)
 281{
 282        smp_mb__before_atomic();
 283        atomic_dec(&bh->b_count);
 284}
 285
 286static inline void brelse(struct buffer_head *bh)
 287{
 288        if (bh)
 289                __brelse(bh);
 290}
 291
 292static inline void bforget(struct buffer_head *bh)
 293{
 294        if (bh)
 295                __bforget(bh);
 296}
 297
 298static inline struct buffer_head *
 299sb_bread(struct super_block *sb, sector_t block)
 300{
 301        return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
 302}
 303
 304static inline struct buffer_head *
 305sb_bread_unmovable(struct super_block *sb, sector_t block)
 306{
 307        return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
 308}
 309
 310static inline void
 311sb_breadahead(struct super_block *sb, sector_t block)
 312{
 313        __breadahead(sb->s_bdev, block, sb->s_blocksize);
 314}
 315
 316static inline struct buffer_head *
 317sb_getblk(struct super_block *sb, sector_t block)
 318{
 319        return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
 320}
 321
 322
 323static inline struct buffer_head *
 324sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
 325{
 326        return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
 327}
 328
 329static inline struct buffer_head *
 330sb_find_get_block(struct super_block *sb, sector_t block)
 331{
 332        return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
 333}
 334
 335static inline void
 336map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
 337{
 338        set_buffer_mapped(bh);
 339        bh->b_bdev = sb->s_bdev;
 340        bh->b_blocknr = block;
 341        bh->b_size = sb->s_blocksize;
 342}
 343
 344static inline void wait_on_buffer(struct buffer_head *bh)
 345{
 346        might_sleep();
 347        if (buffer_locked(bh))
 348                __wait_on_buffer(bh);
 349}
 350
 351static inline int trylock_buffer(struct buffer_head *bh)
 352{
 353        return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state));
 354}
 355
 356static inline void lock_buffer(struct buffer_head *bh)
 357{
 358        might_sleep();
 359        if (!trylock_buffer(bh))
 360                __lock_buffer(bh);
 361}
 362
 363static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
 364                                                   sector_t block,
 365                                                   unsigned size)
 366{
 367        return __getblk_gfp(bdev, block, size, 0);
 368}
 369
 370static inline struct buffer_head *__getblk(struct block_device *bdev,
 371                                           sector_t block,
 372                                           unsigned size)
 373{
 374        return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
 375}
 376
 377/**
 378 *  __bread() - reads a specified block and returns the bh
 379 *  @bdev: the block_device to read from
 380 *  @block: number of block
 381 *  @size: size (in bytes) to read
 382 *
 383 *  Reads a specified block, and returns buffer head that contains it.
 384 *  The page cache is allocated from movable area so that it can be migrated.
 385 *  It returns NULL if the block was unreadable.
 386 */
 387static inline struct buffer_head *
 388__bread(struct block_device *bdev, sector_t block, unsigned size)
 389{
 390        return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
 391}
 392
 393extern int __set_page_dirty_buffers(struct page *page);
 394
 395#else /* CONFIG_BLOCK */
 396
 397static inline void buffer_init(void) {}
 398static inline int try_to_free_buffers(struct page *page) { return 1; }
 399static inline int inode_has_buffers(struct inode *inode) { return 0; }
 400static inline void invalidate_inode_buffers(struct inode *inode) {}
 401static inline int remove_inode_buffers(struct inode *inode) { return 1; }
 402static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
 403
 404#endif /* CONFIG_BLOCK */
 405#endif /* _LINUX_BUFFER_HEAD_H */
 406