linux/include/linux/iomap.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef LINUX_IOMAP_H
   3#define LINUX_IOMAP_H 1
   4
   5#include <linux/atomic.h>
   6#include <linux/bitmap.h>
   7#include <linux/blk_types.h>
   8#include <linux/mm.h>
   9#include <linux/types.h>
  10#include <linux/mm_types.h>
  11#include <linux/blkdev.h>
  12
  13struct address_space;
  14struct fiemap_extent_info;
  15struct inode;
  16struct iomap_writepage_ctx;
  17struct iov_iter;
  18struct kiocb;
  19struct page;
  20struct vm_area_struct;
  21struct vm_fault;
  22
  23/*
  24 * Types of block ranges for iomap mappings:
  25 */
  26#define IOMAP_HOLE      0       /* no blocks allocated, need allocation */
  27#define IOMAP_DELALLOC  1       /* delayed allocation blocks */
  28#define IOMAP_MAPPED    2       /* blocks allocated at @addr */
  29#define IOMAP_UNWRITTEN 3       /* blocks allocated at @addr in unwritten state */
  30#define IOMAP_INLINE    4       /* data inline in the inode */
  31
  32/*
  33 * Flags reported by the file system from iomap_begin:
  34 *
  35 * IOMAP_F_NEW indicates that the blocks have been newly allocated and need
  36 * zeroing for areas that no data is copied to.
  37 *
  38 * IOMAP_F_DIRTY indicates the inode has uncommitted metadata needed to access
  39 * written data and requires fdatasync to commit them to persistent storage.
  40 * This needs to take into account metadata changes that *may* be made at IO
  41 * completion, such as file size updates from direct IO.
  42 *
  43 * IOMAP_F_SHARED indicates that the blocks are shared, and will need to be
  44 * unshared as part a write.
  45 *
  46 * IOMAP_F_MERGED indicates that the iomap contains the merge of multiple block
  47 * mappings.
  48 *
  49 * IOMAP_F_BUFFER_HEAD indicates that the file system requires the use of
  50 * buffer heads for this mapping.
  51 */
  52#define IOMAP_F_NEW             0x01
  53#define IOMAP_F_DIRTY           0x02
  54#define IOMAP_F_SHARED          0x04
  55#define IOMAP_F_MERGED          0x08
  56#define IOMAP_F_BUFFER_HEAD     0x10
  57
  58/*
  59 * Flags set by the core iomap code during operations:
  60 *
  61 * IOMAP_F_SIZE_CHANGED indicates to the iomap_end method that the file size
  62 * has changed as the result of this write operation.
  63 */
  64#define IOMAP_F_SIZE_CHANGED    0x100
  65
  66/*
  67 * Flags from 0x1000 up are for file system specific usage:
  68 */
  69#define IOMAP_F_PRIVATE         0x1000
  70
  71
  72/*
  73 * Magic value for addr:
  74 */
  75#define IOMAP_NULL_ADDR -1ULL   /* addr is not valid */
  76
  77struct iomap_page_ops;
  78
  79struct iomap {
  80        u64                     addr; /* disk offset of mapping, bytes */
  81        loff_t                  offset; /* file offset of mapping, bytes */
  82        u64                     length; /* length of mapping, bytes */
  83        u16                     type;   /* type of mapping */
  84        u16                     flags;  /* flags for mapping */
  85        struct block_device     *bdev;  /* block device for I/O */
  86        struct dax_device       *dax_dev; /* dax_dev for dax operations */
  87        void                    *inline_data;
  88        void                    *private; /* filesystem private */
  89        const struct iomap_page_ops *page_ops;
  90};
  91
  92static inline sector_t
  93iomap_sector(struct iomap *iomap, loff_t pos)
  94{
  95        return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
  96}
  97
  98/*
  99 * When a filesystem sets page_ops in an iomap mapping it returns, page_prepare
 100 * and page_done will be called for each page written to.  This only applies to
 101 * buffered writes as unbuffered writes will not typically have pages
 102 * associated with them.
 103 *
 104 * When page_prepare succeeds, page_done will always be called to do any
 105 * cleanup work necessary.  In that page_done call, @page will be NULL if the
 106 * associated page could not be obtained.
 107 */
 108struct iomap_page_ops {
 109        int (*page_prepare)(struct inode *inode, loff_t pos, unsigned len,
 110                        struct iomap *iomap);
 111        void (*page_done)(struct inode *inode, loff_t pos, unsigned copied,
 112                        struct page *page, struct iomap *iomap);
 113};
 114
 115/*
 116 * Flags for iomap_begin / iomap_end.  No flag implies a read.
 117 */
 118#define IOMAP_WRITE             (1 << 0) /* writing, must allocate blocks */
 119#define IOMAP_ZERO              (1 << 1) /* zeroing operation, may skip holes */
 120#define IOMAP_REPORT            (1 << 2) /* report extent status, e.g. FIEMAP */
 121#define IOMAP_FAULT             (1 << 3) /* mapping for page fault */
 122#define IOMAP_DIRECT            (1 << 4) /* direct I/O */
 123#define IOMAP_NOWAIT            (1 << 5) /* do not block */
 124
 125struct iomap_ops {
 126        /*
 127         * Return the existing mapping at pos, or reserve space starting at
 128         * pos for up to length, as long as we can do it as a single mapping.
 129         * The actual length is returned in iomap->length.
 130         */
 131        int (*iomap_begin)(struct inode *inode, loff_t pos, loff_t length,
 132                        unsigned flags, struct iomap *iomap,
 133                        struct iomap *srcmap);
 134
 135        /*
 136         * Commit and/or unreserve space previous allocated using iomap_begin.
 137         * Written indicates the length of the successful write operation which
 138         * needs to be commited, while the rest needs to be unreserved.
 139         * Written might be zero if no data was written.
 140         */
 141        int (*iomap_end)(struct inode *inode, loff_t pos, loff_t length,
 142                        ssize_t written, unsigned flags, struct iomap *iomap);
 143};
 144
 145/*
 146 * Main iomap iterator function.
 147 */
 148typedef loff_t (*iomap_actor_t)(struct inode *inode, loff_t pos, loff_t len,
 149                void *data, struct iomap *iomap, struct iomap *srcmap);
 150
 151loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length,
 152                unsigned flags, const struct iomap_ops *ops, void *data,
 153                iomap_actor_t actor);
 154
 155ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
 156                const struct iomap_ops *ops);
 157int iomap_readpage(struct page *page, const struct iomap_ops *ops);
 158int iomap_readpages(struct address_space *mapping, struct list_head *pages,
 159                unsigned nr_pages, const struct iomap_ops *ops);
 160int iomap_set_page_dirty(struct page *page);
 161int iomap_is_partially_uptodate(struct page *page, unsigned long from,
 162                unsigned long count);
 163int iomap_releasepage(struct page *page, gfp_t gfp_mask);
 164void iomap_invalidatepage(struct page *page, unsigned int offset,
 165                unsigned int len);
 166#ifdef CONFIG_MIGRATION
 167int iomap_migrate_page(struct address_space *mapping, struct page *newpage,
 168                struct page *page, enum migrate_mode mode);
 169#else
 170#define iomap_migrate_page NULL
 171#endif
 172int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
 173                const struct iomap_ops *ops);
 174int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
 175                bool *did_zero, const struct iomap_ops *ops);
 176int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
 177                const struct iomap_ops *ops);
 178vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf,
 179                        const struct iomap_ops *ops);
 180int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 181                loff_t start, loff_t len, const struct iomap_ops *ops);
 182loff_t iomap_seek_hole(struct inode *inode, loff_t offset,
 183                const struct iomap_ops *ops);
 184loff_t iomap_seek_data(struct inode *inode, loff_t offset,
 185                const struct iomap_ops *ops);
 186sector_t iomap_bmap(struct address_space *mapping, sector_t bno,
 187                const struct iomap_ops *ops);
 188
 189/*
 190 * Structure for writeback I/O completions.
 191 */
 192struct iomap_ioend {
 193        struct list_head        io_list;        /* next ioend in chain */
 194        u16                     io_type;
 195        u16                     io_flags;       /* IOMAP_F_* */
 196        struct inode            *io_inode;      /* file being written to */
 197        size_t                  io_size;        /* size of the extent */
 198        loff_t                  io_offset;      /* offset in the file */
 199        void                    *io_private;    /* file system private data */
 200        struct bio              *io_bio;        /* bio being built */
 201        struct bio              io_inline_bio;  /* MUST BE LAST! */
 202};
 203
 204struct iomap_writeback_ops {
 205        /*
 206         * Required, maps the blocks so that writeback can be performed on
 207         * the range starting at offset.
 208         */
 209        int (*map_blocks)(struct iomap_writepage_ctx *wpc, struct inode *inode,
 210                                loff_t offset);
 211
 212        /*
 213         * Optional, allows the file systems to perform actions just before
 214         * submitting the bio and/or override the bio end_io handler for complex
 215         * operations like copy on write extent manipulation or unwritten extent
 216         * conversions.
 217         */
 218        int (*prepare_ioend)(struct iomap_ioend *ioend, int status);
 219
 220        /*
 221         * Optional, allows the file system to discard state on a page where
 222         * we failed to submit any I/O.
 223         */
 224        void (*discard_page)(struct page *page);
 225};
 226
 227struct iomap_writepage_ctx {
 228        struct iomap            iomap;
 229        struct iomap_ioend      *ioend;
 230        const struct iomap_writeback_ops *ops;
 231};
 232
 233void iomap_finish_ioends(struct iomap_ioend *ioend, int error);
 234void iomap_ioend_try_merge(struct iomap_ioend *ioend,
 235                struct list_head *more_ioends,
 236                void (*merge_private)(struct iomap_ioend *ioend,
 237                                struct iomap_ioend *next));
 238void iomap_sort_ioends(struct list_head *ioend_list);
 239int iomap_writepage(struct page *page, struct writeback_control *wbc,
 240                struct iomap_writepage_ctx *wpc,
 241                const struct iomap_writeback_ops *ops);
 242int iomap_writepages(struct address_space *mapping,
 243                struct writeback_control *wbc, struct iomap_writepage_ctx *wpc,
 244                const struct iomap_writeback_ops *ops);
 245
 246/*
 247 * Flags for direct I/O ->end_io:
 248 */
 249#define IOMAP_DIO_UNWRITTEN     (1 << 0)        /* covers unwritten extent(s) */
 250#define IOMAP_DIO_COW           (1 << 1)        /* covers COW extent(s) */
 251
 252struct iomap_dio_ops {
 253        int (*end_io)(struct kiocb *iocb, ssize_t size, int error,
 254                      unsigned flags);
 255};
 256
 257ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
 258                const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
 259                bool wait_for_completion);
 260int iomap_dio_iopoll(struct kiocb *kiocb, bool spin);
 261
 262#ifdef CONFIG_SWAP
 263struct file;
 264struct swap_info_struct;
 265
 266int iomap_swapfile_activate(struct swap_info_struct *sis,
 267                struct file *swap_file, sector_t *pagespan,
 268                const struct iomap_ops *ops);
 269#else
 270# define iomap_swapfile_activate(sis, swapfile, pagespan, ops)  (-EIO)
 271#endif /* CONFIG_SWAP */
 272
 273#endif /* LINUX_IOMAP_H */
 274