linux/include/linux/device-mapper.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2001 Sistina Software (UK) Limited.
   3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
   4 *
   5 * This file is released under the LGPL.
   6 */
   7
   8#ifndef _LINUX_DEVICE_MAPPER_H
   9#define _LINUX_DEVICE_MAPPER_H
  10
  11#include <linux/bio.h>
  12#include <linux/blkdev.h>
  13#include <linux/math64.h>
  14#include <linux/ratelimit.h>
  15
  16struct dm_dev;
  17struct dm_target;
  18struct dm_table;
  19struct mapped_device;
  20struct bio_vec;
  21
  22typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
  23
  24union map_info {
  25        void *ptr;
  26};
  27
  28/*
  29 * In the constructor the target parameter will already have the
  30 * table, type, begin and len fields filled in.
  31 */
  32typedef int (*dm_ctr_fn) (struct dm_target *target,
  33                          unsigned int argc, char **argv);
  34
  35/*
  36 * The destructor doesn't need to free the dm_target, just
  37 * anything hidden ti->private.
  38 */
  39typedef void (*dm_dtr_fn) (struct dm_target *ti);
  40
  41/*
  42 * The map function must return:
  43 * < 0: error
  44 * = 0: The target will handle the io by resubmitting it later
  45 * = 1: simple remap complete
  46 * = 2: The target wants to push back the io
  47 */
  48typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
  49typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone,
  50                                  union map_info *map_context);
  51typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
  52                                            struct request *rq,
  53                                            union map_info *map_context,
  54                                            struct request **clone);
  55typedef void (*dm_release_clone_request_fn) (struct request *clone);
  56
  57/*
  58 * Returns:
  59 * < 0 : error (currently ignored)
  60 * 0   : ended successfully
  61 * 1   : for some reason the io has still not completed (eg,
  62 *       multipath target might want to requeue a failed io).
  63 * 2   : The target wants to push back the io
  64 */
  65typedef int (*dm_endio_fn) (struct dm_target *ti,
  66                            struct bio *bio, int error);
  67typedef int (*dm_request_endio_fn) (struct dm_target *ti,
  68                                    struct request *clone, int error,
  69                                    union map_info *map_context);
  70
  71typedef void (*dm_presuspend_fn) (struct dm_target *ti);
  72typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti);
  73typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
  74typedef int (*dm_preresume_fn) (struct dm_target *ti);
  75typedef void (*dm_resume_fn) (struct dm_target *ti);
  76
  77typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
  78                              unsigned status_flags, char *result, unsigned maxlen);
  79
  80typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
  81
  82typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
  83                            unsigned long arg);
  84
  85/*
  86 * These iteration functions are typically used to check (and combine)
  87 * properties of underlying devices.
  88 * E.g. Does at least one underlying device support flush?
  89 *      Does any underlying device not support WRITE_SAME?
  90 *
  91 * The callout function is called once for each contiguous section of
  92 * an underlying device.  State can be maintained in *data.
  93 * Return non-zero to stop iterating through any further devices.
  94 */
  95typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
  96                                           struct dm_dev *dev,
  97                                           sector_t start, sector_t len,
  98                                           void *data);
  99
 100/*
 101 * This function must iterate through each section of device used by the
 102 * target until it encounters a non-zero return code, which it then returns.
 103 * Returns zero if no callout returned non-zero.
 104 */
 105typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
 106                                      iterate_devices_callout_fn fn,
 107                                      void *data);
 108
 109typedef void (*dm_io_hints_fn) (struct dm_target *ti,
 110                                struct queue_limits *limits);
 111
 112/*
 113 * Returns:
 114 *    0: The target can handle the next I/O immediately.
 115 *    1: The target can't handle the next I/O immediately.
 116 */
 117typedef int (*dm_busy_fn) (struct dm_target *ti);
 118
 119void dm_error(const char *message);
 120
 121struct dm_dev {
 122        struct block_device *bdev;
 123        fmode_t mode;
 124        char name[16];
 125};
 126
 127/*
 128 * Constructors should call these functions to ensure destination devices
 129 * are opened/closed correctly.
 130 */
 131int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
 132                  struct dm_dev **result);
 133void dm_put_device(struct dm_target *ti, struct dm_dev *d);
 134
 135/*
 136 * Information about a target type
 137 */
 138
 139struct target_type {
 140        uint64_t features;
 141        const char *name;
 142        struct module *module;
 143        unsigned version[3];
 144        dm_ctr_fn ctr;
 145        dm_dtr_fn dtr;
 146        dm_map_fn map;
 147        dm_map_request_fn map_rq;
 148        dm_clone_and_map_request_fn clone_and_map_rq;
 149        dm_release_clone_request_fn release_clone_rq;
 150        dm_endio_fn end_io;
 151        dm_request_endio_fn rq_end_io;
 152        dm_presuspend_fn presuspend;
 153        dm_presuspend_undo_fn presuspend_undo;
 154        dm_postsuspend_fn postsuspend;
 155        dm_preresume_fn preresume;
 156        dm_resume_fn resume;
 157        dm_status_fn status;
 158        dm_message_fn message;
 159        dm_ioctl_fn ioctl;
 160        dm_busy_fn busy;
 161        dm_iterate_devices_fn iterate_devices;
 162        dm_io_hints_fn io_hints;
 163
 164        /* For internal device-mapper use. */
 165        struct list_head list;
 166};
 167
 168/*
 169 * Target features
 170 */
 171
 172/*
 173 * Any table that contains an instance of this target must have only one.
 174 */
 175#define DM_TARGET_SINGLETON             0x00000001
 176#define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON)
 177
 178/*
 179 * Indicates that a target does not support read-only devices.
 180 */
 181#define DM_TARGET_ALWAYS_WRITEABLE      0x00000002
 182#define dm_target_always_writeable(type) \
 183                ((type)->features & DM_TARGET_ALWAYS_WRITEABLE)
 184
 185/*
 186 * Any device that contains a table with an instance of this target may never
 187 * have tables containing any different target type.
 188 */
 189#define DM_TARGET_IMMUTABLE             0x00000004
 190#define dm_target_is_immutable(type)    ((type)->features & DM_TARGET_IMMUTABLE)
 191
 192/*
 193 * Some targets need to be sent the same WRITE bio severals times so
 194 * that they can send copies of it to different devices.  This function
 195 * examines any supplied bio and returns the number of copies of it the
 196 * target requires.
 197 */
 198typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio);
 199
 200struct dm_target {
 201        struct dm_table *table;
 202        struct target_type *type;
 203
 204        /* target limits */
 205        sector_t begin;
 206        sector_t len;
 207
 208        /* If non-zero, maximum size of I/O submitted to a target. */
 209        uint32_t max_io_len;
 210
 211        /*
 212         * A number of zero-length barrier bios that will be submitted
 213         * to the target for the purpose of flushing cache.
 214         *
 215         * The bio number can be accessed with dm_bio_get_target_bio_nr.
 216         * It is a responsibility of the target driver to remap these bios
 217         * to the real underlying devices.
 218         */
 219        unsigned num_flush_bios;
 220
 221        /*
 222         * The number of discard bios that will be submitted to the target.
 223         * The bio number can be accessed with dm_bio_get_target_bio_nr.
 224         */
 225        unsigned num_discard_bios;
 226
 227        /*
 228         * The number of WRITE SAME bios that will be submitted to the target.
 229         * The bio number can be accessed with dm_bio_get_target_bio_nr.
 230         */
 231        unsigned num_write_same_bios;
 232
 233        /*
 234         * The minimum number of extra bytes allocated in each bio for the
 235         * target to use.  dm_per_bio_data returns the data location.
 236         */
 237        unsigned per_bio_data_size;
 238
 239        /*
 240         * If defined, this function is called to find out how many
 241         * duplicate bios should be sent to the target when writing
 242         * data.
 243         */
 244        dm_num_write_bios_fn num_write_bios;
 245
 246        /* target specific data */
 247        void *private;
 248
 249        /* Used to provide an error string from the ctr */
 250        char *error;
 251
 252        /*
 253         * Set if this target needs to receive flushes regardless of
 254         * whether or not its underlying devices have support.
 255         */
 256        bool flush_supported:1;
 257
 258        /*
 259         * Set if this target needs to receive discards regardless of
 260         * whether or not its underlying devices have support.
 261         */
 262        bool discards_supported:1;
 263
 264        /*
 265         * Set if the target required discard bios to be split
 266         * on max_io_len boundary.
 267         */
 268        bool split_discard_bios:1;
 269
 270        /*
 271         * Set if this target does not return zeroes on discarded blocks.
 272         */
 273        bool discard_zeroes_data_unsupported:1;
 274};
 275
 276/* Each target can link one of these into the table */
 277struct dm_target_callbacks {
 278        struct list_head list;
 279        int (*congested_fn) (struct dm_target_callbacks *, int);
 280};
 281
 282/*
 283 * For bio-based dm.
 284 * One of these is allocated for each bio.
 285 * This structure shouldn't be touched directly by target drivers.
 286 * It is here so that we can inline dm_per_bio_data and
 287 * dm_bio_from_per_bio_data
 288 */
 289struct dm_target_io {
 290        struct dm_io *io;
 291        struct dm_target *ti;
 292        unsigned target_bio_nr;
 293        unsigned *len_ptr;
 294        struct bio clone;
 295};
 296
 297static inline void *dm_per_bio_data(struct bio *bio, size_t data_size)
 298{
 299        return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
 300}
 301
 302static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
 303{
 304        return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone));
 305}
 306
 307static inline unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
 308{
 309        return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
 310}
 311
 312int dm_register_target(struct target_type *t);
 313void dm_unregister_target(struct target_type *t);
 314
 315/*
 316 * Target argument parsing.
 317 */
 318struct dm_arg_set {
 319        unsigned argc;
 320        char **argv;
 321};
 322
 323/*
 324 * The minimum and maximum value of a numeric argument, together with
 325 * the error message to use if the number is found to be outside that range.
 326 */
 327struct dm_arg {
 328        unsigned min;
 329        unsigned max;
 330        char *error;
 331};
 332
 333/*
 334 * Validate the next argument, either returning it as *value or, if invalid,
 335 * returning -EINVAL and setting *error.
 336 */
 337int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
 338                unsigned *value, char **error);
 339
 340/*
 341 * Process the next argument as the start of a group containing between
 342 * arg->min and arg->max further arguments. Either return the size as
 343 * *num_args or, if invalid, return -EINVAL and set *error.
 344 */
 345int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set,
 346                      unsigned *num_args, char **error);
 347
 348/*
 349 * Return the current argument and shift to the next.
 350 */
 351const char *dm_shift_arg(struct dm_arg_set *as);
 352
 353/*
 354 * Move through num_args arguments.
 355 */
 356void dm_consume_args(struct dm_arg_set *as, unsigned num_args);
 357
 358/*-----------------------------------------------------------------
 359 * Functions for creating and manipulating mapped devices.
 360 * Drop the reference with dm_put when you finish with the object.
 361 *---------------------------------------------------------------*/
 362
 363/*
 364 * DM_ANY_MINOR chooses the next available minor number.
 365 */
 366#define DM_ANY_MINOR (-1)
 367int dm_create(int minor, struct mapped_device **md);
 368
 369/*
 370 * Reference counting for md.
 371 */
 372struct mapped_device *dm_get_md(dev_t dev);
 373void dm_get(struct mapped_device *md);
 374int dm_hold(struct mapped_device *md);
 375void dm_put(struct mapped_device *md);
 376
 377/*
 378 * An arbitrary pointer may be stored alongside a mapped device.
 379 */
 380void dm_set_mdptr(struct mapped_device *md, void *ptr);
 381void *dm_get_mdptr(struct mapped_device *md);
 382
 383/*
 384 * A device can still be used while suspended, but I/O is deferred.
 385 */
 386int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
 387int dm_resume(struct mapped_device *md);
 388
 389/*
 390 * Event functions.
 391 */
 392uint32_t dm_get_event_nr(struct mapped_device *md);
 393int dm_wait_event(struct mapped_device *md, int event_nr);
 394uint32_t dm_next_uevent_seq(struct mapped_device *md);
 395void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
 396
 397/*
 398 * Info functions.
 399 */
 400const char *dm_device_name(struct mapped_device *md);
 401int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
 402struct gendisk *dm_disk(struct mapped_device *md);
 403int dm_suspended(struct dm_target *ti);
 404int dm_noflush_suspending(struct dm_target *ti);
 405void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
 406union map_info *dm_get_rq_mapinfo(struct request *rq);
 407
 408struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
 409
 410/*
 411 * Geometry functions.
 412 */
 413int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
 414int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
 415
 416/*-----------------------------------------------------------------
 417 * Functions for manipulating device-mapper tables.
 418 *---------------------------------------------------------------*/
 419
 420/*
 421 * First create an empty table.
 422 */
 423int dm_table_create(struct dm_table **result, fmode_t mode,
 424                    unsigned num_targets, struct mapped_device *md);
 425
 426/*
 427 * Then call this once for each target.
 428 */
 429int dm_table_add_target(struct dm_table *t, const char *type,
 430                        sector_t start, sector_t len, char *params);
 431
 432/*
 433 * Target_ctr should call this if it needs to add any callbacks.
 434 */
 435void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb);
 436
 437/*
 438 * Finally call this to make the table ready for use.
 439 */
 440int dm_table_complete(struct dm_table *t);
 441
 442/*
 443 * Target may require that it is never sent I/O larger than len.
 444 */
 445int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
 446
 447/*
 448 * Table reference counting.
 449 */
 450struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx);
 451void dm_put_live_table(struct mapped_device *md, int srcu_idx);
 452void dm_sync_table(struct mapped_device *md);
 453
 454/*
 455 * Queries
 456 */
 457sector_t dm_table_get_size(struct dm_table *t);
 458unsigned int dm_table_get_num_targets(struct dm_table *t);
 459fmode_t dm_table_get_mode(struct dm_table *t);
 460struct mapped_device *dm_table_get_md(struct dm_table *t);
 461
 462/*
 463 * Trigger an event.
 464 */
 465void dm_table_event(struct dm_table *t);
 466
 467/*
 468 * Run the queue for request-based targets.
 469 */
 470void dm_table_run_md_queue_async(struct dm_table *t);
 471
 472/*
 473 * The device must be suspended before calling this method.
 474 * Returns the previous table, which the caller must destroy.
 475 */
 476struct dm_table *dm_swap_table(struct mapped_device *md,
 477                               struct dm_table *t);
 478
 479/*
 480 * A wrapper around vmalloc.
 481 */
 482void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
 483
 484/*-----------------------------------------------------------------
 485 * Macros.
 486 *---------------------------------------------------------------*/
 487#define DM_NAME "device-mapper"
 488
 489#ifdef CONFIG_PRINTK
 490extern struct ratelimit_state dm_ratelimit_state;
 491
 492#define dm_ratelimit()  __ratelimit(&dm_ratelimit_state)
 493#else
 494#define dm_ratelimit()  0
 495#endif
 496
 497#define DMCRIT(f, arg...) \
 498        printk(KERN_CRIT DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
 499
 500#define DMERR(f, arg...) \
 501        printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
 502#define DMERR_LIMIT(f, arg...) \
 503        do { \
 504                if (dm_ratelimit())     \
 505                        printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \
 506                               f "\n", ## arg); \
 507        } while (0)
 508
 509#define DMWARN(f, arg...) \
 510        printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
 511#define DMWARN_LIMIT(f, arg...) \
 512        do { \
 513                if (dm_ratelimit())     \
 514                        printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \
 515                               f "\n", ## arg); \
 516        } while (0)
 517
 518#define DMINFO(f, arg...) \
 519        printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
 520#define DMINFO_LIMIT(f, arg...) \
 521        do { \
 522                if (dm_ratelimit())     \
 523                        printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \
 524                               "\n", ## arg); \
 525        } while (0)
 526
 527#ifdef CONFIG_DM_DEBUG
 528#  define DMDEBUG(f, arg...) \
 529        printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg)
 530#  define DMDEBUG_LIMIT(f, arg...) \
 531        do { \
 532                if (dm_ratelimit())     \
 533                        printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \
 534                               "\n", ## arg); \
 535        } while (0)
 536#else
 537#  define DMDEBUG(f, arg...) do {} while (0)
 538#  define DMDEBUG_LIMIT(f, arg...) do {} while (0)
 539#endif
 540
 541#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
 542                          0 : scnprintf(result + sz, maxlen - sz, x))
 543
 544#define SECTOR_SHIFT 9
 545
 546/*
 547 * Definitions of return values from target end_io function.
 548 */
 549#define DM_ENDIO_INCOMPLETE     1
 550#define DM_ENDIO_REQUEUE        2
 551
 552/*
 553 * Definitions of return values from target map function.
 554 */
 555#define DM_MAPIO_SUBMITTED      0
 556#define DM_MAPIO_REMAPPED       1
 557#define DM_MAPIO_REQUEUE        DM_ENDIO_REQUEUE
 558
 559#define dm_sector_div64(x, y)( \
 560{ \
 561        u64 _res; \
 562        (x) = div64_u64_rem(x, y, &_res); \
 563        _res; \
 564} \
 565)
 566
 567/*
 568 * Ceiling(n / sz)
 569 */
 570#define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
 571
 572#define dm_sector_div_up(n, sz) ( \
 573{ \
 574        sector_t _r = ((n) + (sz) - 1); \
 575        sector_div(_r, (sz)); \
 576        _r; \
 577} \
 578)
 579
 580/*
 581 * ceiling(n / size) * size
 582 */
 583#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
 584
 585#define dm_array_too_big(fixed, obj, num) \
 586        ((num) > (UINT_MAX - (fixed)) / (obj))
 587
 588/*
 589 * Sector offset taken relative to the start of the target instead of
 590 * relative to the start of the device.
 591 */
 592#define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
 593
 594static inline sector_t to_sector(unsigned long n)
 595{
 596        return (n >> SECTOR_SHIFT);
 597}
 598
 599static inline unsigned long to_bytes(sector_t n)
 600{
 601        return (n << SECTOR_SHIFT);
 602}
 603
 604#endif  /* _LINUX_DEVICE_MAPPER_H */
 605