linux/include/linux/device-mapper.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2001 Sistina Software (UK) Limited.
   3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
   4 *
   5 * This file is released under the LGPL.
   6 */
   7
   8#ifndef _LINUX_DEVICE_MAPPER_H
   9#define _LINUX_DEVICE_MAPPER_H
  10
  11#include <linux/bio.h>
  12#include <linux/blkdev.h>
  13
  14struct dm_dev;
  15struct dm_target;
  16struct dm_table;
  17struct mapped_device;
  18struct bio_vec;
  19
  20typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
  21
  22union map_info {
  23        void *ptr;
  24        unsigned long long ll;
  25        unsigned target_request_nr;
  26};
  27
  28/*
  29 * In the constructor the target parameter will already have the
  30 * table, type, begin and len fields filled in.
  31 */
  32typedef int (*dm_ctr_fn) (struct dm_target *target,
  33                          unsigned int argc, char **argv);
  34
  35/*
  36 * The destructor doesn't need to free the dm_target, just
  37 * anything hidden ti->private.
  38 */
  39typedef void (*dm_dtr_fn) (struct dm_target *ti);
  40
  41/*
  42 * The map function must return:
  43 * < 0: error
  44 * = 0: The target will handle the io by resubmitting it later
  45 * = 1: simple remap complete
  46 * = 2: The target wants to push back the io
  47 */
  48typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio,
  49                          union map_info *map_context);
  50typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone,
  51                                  union map_info *map_context);
  52
  53/*
  54 * Returns:
  55 * < 0 : error (currently ignored)
  56 * 0   : ended successfully
  57 * 1   : for some reason the io has still not completed (eg,
  58 *       multipath target might want to requeue a failed io).
  59 * 2   : The target wants to push back the io
  60 */
  61typedef int (*dm_endio_fn) (struct dm_target *ti,
  62                            struct bio *bio, int error,
  63                            union map_info *map_context);
  64typedef int (*dm_request_endio_fn) (struct dm_target *ti,
  65                                    struct request *clone, int error,
  66                                    union map_info *map_context);
  67
  68typedef void (*dm_flush_fn) (struct dm_target *ti);
  69typedef void (*dm_presuspend_fn) (struct dm_target *ti);
  70typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
  71typedef int (*dm_preresume_fn) (struct dm_target *ti);
  72typedef void (*dm_resume_fn) (struct dm_target *ti);
  73
  74typedef int (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
  75                             char *result, unsigned int maxlen);
  76
  77typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
  78
  79typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
  80                            unsigned long arg);
  81
  82typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
  83                            struct bio_vec *biovec, int max_size);
  84
  85typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
  86                                           struct dm_dev *dev,
  87                                           sector_t start, sector_t len,
  88                                           void *data);
  89
  90typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
  91                                      iterate_devices_callout_fn fn,
  92                                      void *data);
  93
  94typedef void (*dm_io_hints_fn) (struct dm_target *ti,
  95                                struct queue_limits *limits);
  96
  97/*
  98 * Returns:
  99 *    0: The target can handle the next I/O immediately.
 100 *    1: The target can't handle the next I/O immediately.
 101 */
 102typedef int (*dm_busy_fn) (struct dm_target *ti);
 103
 104void dm_error(const char *message);
 105
 106/*
 107 * Combine device limits.
 108 */
 109int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
 110                         sector_t start, sector_t len, void *data);
 111
 112struct dm_dev {
 113        struct block_device *bdev;
 114        fmode_t mode;
 115        char name[16];
 116};
 117
 118/*
 119 * Constructors should call these functions to ensure destination devices
 120 * are opened/closed correctly.
 121 */
 122int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
 123                                                 struct dm_dev **result);
 124void dm_put_device(struct dm_target *ti, struct dm_dev *d);
 125
 126/*
 127 * Information about a target type
 128 */
 129
 130/*
 131 * Target features
 132 */
 133
 134struct target_type {
 135        uint64_t features;
 136        const char *name;
 137        struct module *module;
 138        unsigned version[3];
 139        dm_ctr_fn ctr;
 140        dm_dtr_fn dtr;
 141        dm_map_fn map;
 142        dm_map_request_fn map_rq;
 143        dm_endio_fn end_io;
 144        dm_request_endio_fn rq_end_io;
 145        dm_flush_fn flush;
 146        dm_presuspend_fn presuspend;
 147        dm_postsuspend_fn postsuspend;
 148        dm_preresume_fn preresume;
 149        dm_resume_fn resume;
 150        dm_status_fn status;
 151        dm_message_fn message;
 152        dm_ioctl_fn ioctl;
 153        dm_merge_fn merge;
 154        dm_busy_fn busy;
 155        dm_iterate_devices_fn iterate_devices;
 156        dm_io_hints_fn io_hints;
 157
 158        /* For internal device-mapper use. */
 159        struct list_head list;
 160};
 161
 162struct dm_target {
 163        struct dm_table *table;
 164        struct target_type *type;
 165
 166        /* target limits */
 167        sector_t begin;
 168        sector_t len;
 169
 170        /* Always a power of 2 */
 171        sector_t split_io;
 172
 173        /*
 174         * A number of zero-length barrier requests that will be submitted
 175         * to the target for the purpose of flushing cache.
 176         *
 177         * The request number will be placed in union map_info->target_request_nr.
 178         * It is a responsibility of the target driver to remap these requests
 179         * to the real underlying devices.
 180         */
 181        unsigned num_flush_requests;
 182
 183        /*
 184         * The number of discard requests that will be submitted to the
 185         * target.  map_info->request_nr is used just like num_flush_requests.
 186         */
 187        unsigned num_discard_requests;
 188
 189        /* target specific data */
 190        void *private;
 191
 192        /* Used to provide an error string from the ctr */
 193        char *error;
 194
 195        /*
 196         * Set if this target needs to receive discards regardless of
 197         * whether or not its underlying devices have support.
 198         */
 199        unsigned discards_supported:1;
 200};
 201
 202/* Each target can link one of these into the table */
 203struct dm_target_callbacks {
 204        struct list_head list;
 205        int (*congested_fn) (struct dm_target_callbacks *, int);
 206};
 207
 208int dm_register_target(struct target_type *t);
 209void dm_unregister_target(struct target_type *t);
 210
 211/*-----------------------------------------------------------------
 212 * Functions for creating and manipulating mapped devices.
 213 * Drop the reference with dm_put when you finish with the object.
 214 *---------------------------------------------------------------*/
 215
 216/*
 217 * DM_ANY_MINOR chooses the next available minor number.
 218 */
 219#define DM_ANY_MINOR (-1)
 220int dm_create(int minor, struct mapped_device **md);
 221
 222/*
 223 * Reference counting for md.
 224 */
 225struct mapped_device *dm_get_md(dev_t dev);
 226void dm_get(struct mapped_device *md);
 227void dm_put(struct mapped_device *md);
 228
 229/*
 230 * An arbitrary pointer may be stored alongside a mapped device.
 231 */
 232void dm_set_mdptr(struct mapped_device *md, void *ptr);
 233void *dm_get_mdptr(struct mapped_device *md);
 234
 235/*
 236 * A device can still be used while suspended, but I/O is deferred.
 237 */
 238int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
 239int dm_resume(struct mapped_device *md);
 240
 241/*
 242 * Event functions.
 243 */
 244uint32_t dm_get_event_nr(struct mapped_device *md);
 245int dm_wait_event(struct mapped_device *md, int event_nr);
 246uint32_t dm_next_uevent_seq(struct mapped_device *md);
 247void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
 248
 249/*
 250 * Info functions.
 251 */
 252const char *dm_device_name(struct mapped_device *md);
 253int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
 254struct gendisk *dm_disk(struct mapped_device *md);
 255int dm_suspended(struct dm_target *ti);
 256int dm_noflush_suspending(struct dm_target *ti);
 257union map_info *dm_get_mapinfo(struct bio *bio);
 258union map_info *dm_get_rq_mapinfo(struct request *rq);
 259
 260/*
 261 * Geometry functions.
 262 */
 263int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
 264int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
 265
 266
 267/*-----------------------------------------------------------------
 268 * Functions for manipulating device-mapper tables.
 269 *---------------------------------------------------------------*/
 270
 271/*
 272 * First create an empty table.
 273 */
 274int dm_table_create(struct dm_table **result, fmode_t mode,
 275                    unsigned num_targets, struct mapped_device *md);
 276
 277/*
 278 * Then call this once for each target.
 279 */
 280int dm_table_add_target(struct dm_table *t, const char *type,
 281                        sector_t start, sector_t len, char *params);
 282
 283/*
 284 * Target_ctr should call this if it needs to add any callbacks.
 285 */
 286void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb);
 287
 288/*
 289 * Finally call this to make the table ready for use.
 290 */
 291int dm_table_complete(struct dm_table *t);
 292
 293/*
 294 * Table reference counting.
 295 */
 296struct dm_table *dm_get_live_table(struct mapped_device *md);
 297void dm_table_get(struct dm_table *t);
 298void dm_table_put(struct dm_table *t);
 299
 300/*
 301 * Queries
 302 */
 303sector_t dm_table_get_size(struct dm_table *t);
 304unsigned int dm_table_get_num_targets(struct dm_table *t);
 305fmode_t dm_table_get_mode(struct dm_table *t);
 306struct mapped_device *dm_table_get_md(struct dm_table *t);
 307
 308/*
 309 * Trigger an event.
 310 */
 311void dm_table_event(struct dm_table *t);
 312
 313/*
 314 * The device must be suspended before calling this method.
 315 * Returns the previous table, which the caller must destroy.
 316 */
 317struct dm_table *dm_swap_table(struct mapped_device *md,
 318                               struct dm_table *t);
 319
 320/*
 321 * A wrapper around vmalloc.
 322 */
 323void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
 324
 325/*-----------------------------------------------------------------
 326 * Macros.
 327 *---------------------------------------------------------------*/
 328#define DM_NAME "device-mapper"
 329
 330#define DMCRIT(f, arg...) \
 331        printk(KERN_CRIT DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
 332
 333#define DMERR(f, arg...) \
 334        printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
 335#define DMERR_LIMIT(f, arg...) \
 336        do { \
 337                if (printk_ratelimit()) \
 338                        printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \
 339                               f "\n", ## arg); \
 340        } while (0)
 341
 342#define DMWARN(f, arg...) \
 343        printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
 344#define DMWARN_LIMIT(f, arg...) \
 345        do { \
 346                if (printk_ratelimit()) \
 347                        printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \
 348                               f "\n", ## arg); \
 349        } while (0)
 350
 351#define DMINFO(f, arg...) \
 352        printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
 353#define DMINFO_LIMIT(f, arg...) \
 354        do { \
 355                if (printk_ratelimit()) \
 356                        printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \
 357                               "\n", ## arg); \
 358        } while (0)
 359
 360#ifdef CONFIG_DM_DEBUG
 361#  define DMDEBUG(f, arg...) \
 362        printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg)
 363#  define DMDEBUG_LIMIT(f, arg...) \
 364        do { \
 365                if (printk_ratelimit()) \
 366                        printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \
 367                               "\n", ## arg); \
 368        } while (0)
 369#else
 370#  define DMDEBUG(f, arg...) do {} while (0)
 371#  define DMDEBUG_LIMIT(f, arg...) do {} while (0)
 372#endif
 373
 374#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
 375                          0 : scnprintf(result + sz, maxlen - sz, x))
 376
 377#define SECTOR_SHIFT 9
 378
 379/*
 380 * Definitions of return values from target end_io function.
 381 */
 382#define DM_ENDIO_INCOMPLETE     1
 383#define DM_ENDIO_REQUEUE        2
 384
 385/*
 386 * Definitions of return values from target map function.
 387 */
 388#define DM_MAPIO_SUBMITTED      0
 389#define DM_MAPIO_REMAPPED       1
 390#define DM_MAPIO_REQUEUE        DM_ENDIO_REQUEUE
 391
 392/*
 393 * Ceiling(n / sz)
 394 */
 395#define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
 396
 397#define dm_sector_div_up(n, sz) ( \
 398{ \
 399        sector_t _r = ((n) + (sz) - 1); \
 400        sector_div(_r, (sz)); \
 401        _r; \
 402} \
 403)
 404
 405/*
 406 * ceiling(n / size) * size
 407 */
 408#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
 409
 410#define dm_array_too_big(fixed, obj, num) \
 411        ((num) > (UINT_MAX - (fixed)) / (obj))
 412
 413/*
 414 * Sector offset taken relative to the start of the target instead of
 415 * relative to the start of the device.
 416 */
 417#define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
 418
 419static inline sector_t to_sector(unsigned long n)
 420{
 421        return (n >> SECTOR_SHIFT);
 422}
 423
 424static inline unsigned long to_bytes(sector_t n)
 425{
 426        return (n << SECTOR_SHIFT);
 427}
 428
 429/*-----------------------------------------------------------------
 430 * Helper for block layer and dm core operations
 431 *---------------------------------------------------------------*/
 432void dm_dispatch_request(struct request *rq);
 433void dm_requeue_unmapped_request(struct request *rq);
 434void dm_kill_unmapped_request(struct request *rq, int error);
 435int dm_underlying_device_busy(struct request_queue *q);
 436
 437#endif  /* _LINUX_DEVICE_MAPPER_H */
 438