linux/drivers/md/dm-core.h
<<
>>
Prefs
   1/*
   2 * Internal header file _only_ for device mapper core
   3 *
   4 * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
   5 *
   6 * This file is released under the LGPL.
   7 */
   8
   9#ifndef DM_CORE_INTERNAL_H
  10#define DM_CORE_INTERNAL_H
  11
  12#include <linux/kthread.h>
  13#include <linux/ktime.h>
  14#include <linux/genhd.h>
  15#include <linux/blk-mq.h>
  16#include <linux/keyslot-manager.h>
  17
  18#include <trace/events/block.h>
  19
  20#include "dm.h"
  21#include "dm-ima.h"
  22
  23#define DM_RESERVED_MAX_IOS             1024
  24
  25struct dm_kobject_holder {
  26        struct kobject kobj;
  27        struct completion completion;
  28};
  29
  30/*
  31 * DM core internal structures used directly by dm.c, dm-rq.c and dm-table.c.
  32 * DM targets must _not_ deference a mapped_device or dm_table to directly
  33 * access their members!
  34 */
  35
  36struct mapped_device {
  37        struct mutex suspend_lock;
  38
  39        struct mutex table_devices_lock;
  40        struct list_head table_devices;
  41
  42        /*
  43         * The current mapping (struct dm_table *).
  44         * Use dm_get_live_table{_fast} or take suspend_lock for
  45         * dereference.
  46         */
  47        void __rcu *map;
  48
  49        unsigned long flags;
  50
  51        /* Protect queue and type against concurrent access. */
  52        struct mutex type_lock;
  53        enum dm_queue_mode type;
  54
  55        int numa_node_id;
  56        struct request_queue *queue;
  57
  58        atomic_t holders;
  59        atomic_t open_count;
  60
  61        struct dm_target *immutable_target;
  62        struct target_type *immutable_target_type;
  63
  64        char name[16];
  65        struct gendisk *disk;
  66        struct dax_device *dax_dev;
  67
  68        /*
  69         * A list of ios that arrived while we were suspended.
  70         */
  71        struct work_struct work;
  72        wait_queue_head_t wait;
  73        spinlock_t deferred_lock;
  74        struct bio_list deferred;
  75
  76        void *interface_ptr;
  77
  78        /*
  79         * Event handling.
  80         */
  81        wait_queue_head_t eventq;
  82        atomic_t event_nr;
  83        atomic_t uevent_seq;
  84        struct list_head uevent_list;
  85        spinlock_t uevent_lock; /* Protect access to uevent_list */
  86
  87        /* the number of internal suspends */
  88        unsigned internal_suspend_count;
  89
  90        /*
  91         * io objects are allocated from here.
  92         */
  93        struct bio_set io_bs;
  94        struct bio_set bs;
  95
  96        /*
  97         * Processing queue (flush)
  98         */
  99        struct workqueue_struct *wq;
 100
 101        /* forced geometry settings */
 102        struct hd_geometry geometry;
 103
 104        /* kobject and completion */
 105        struct dm_kobject_holder kobj_holder;
 106
 107        int swap_bios;
 108        struct semaphore swap_bios_semaphore;
 109        struct mutex swap_bios_lock;
 110
 111        struct dm_stats stats;
 112
 113        /* for blk-mq request-based DM support */
 114        struct blk_mq_tag_set *tag_set;
 115        bool init_tio_pdu:1;
 116
 117        struct srcu_struct io_barrier;
 118
 119#ifdef CONFIG_BLK_DEV_ZONED
 120        unsigned int nr_zones;
 121        unsigned int *zwp_offset;
 122#endif
 123
 124#ifdef CONFIG_IMA
 125        struct dm_ima_measurements ima;
 126#endif
 127};
 128
 129/*
 130 * Bits for the flags field of struct mapped_device.
 131 */
 132#define DMF_BLOCK_IO_FOR_SUSPEND 0
 133#define DMF_SUSPENDED 1
 134#define DMF_FROZEN 2
 135#define DMF_FREEING 3
 136#define DMF_DELETING 4
 137#define DMF_NOFLUSH_SUSPENDING 5
 138#define DMF_DEFERRED_REMOVE 6
 139#define DMF_SUSPENDED_INTERNALLY 7
 140#define DMF_POST_SUSPENDING 8
 141#define DMF_EMULATE_ZONE_APPEND 9
 142
 143void disable_discard(struct mapped_device *md);
 144void disable_write_same(struct mapped_device *md);
 145void disable_write_zeroes(struct mapped_device *md);
 146
 147static inline sector_t dm_get_size(struct mapped_device *md)
 148{
 149        return get_capacity(md->disk);
 150}
 151
 152static inline struct dm_stats *dm_get_stats(struct mapped_device *md)
 153{
 154        return &md->stats;
 155}
 156
 157static inline bool dm_emulate_zone_append(struct mapped_device *md)
 158{
 159        if (blk_queue_is_zoned(md->queue))
 160                return test_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
 161        return false;
 162}
 163
 164#define DM_TABLE_MAX_DEPTH 16
 165
 166struct dm_table {
 167        struct mapped_device *md;
 168        enum dm_queue_mode type;
 169
 170        /* btree table */
 171        unsigned int depth;
 172        unsigned int counts[DM_TABLE_MAX_DEPTH]; /* in nodes */
 173        sector_t *index[DM_TABLE_MAX_DEPTH];
 174
 175        unsigned int num_targets;
 176        unsigned int num_allocated;
 177        sector_t *highs;
 178        struct dm_target *targets;
 179
 180        struct target_type *immutable_target_type;
 181
 182        bool integrity_supported:1;
 183        bool singleton:1;
 184        unsigned integrity_added:1;
 185
 186        /*
 187         * Indicates the rw permissions for the new logical
 188         * device.  This should be a combination of FMODE_READ
 189         * and FMODE_WRITE.
 190         */
 191        fmode_t mode;
 192
 193        /* a list of devices used by this table */
 194        struct list_head devices;
 195
 196        /* events get handed up using this callback */
 197        void (*event_fn)(void *);
 198        void *event_context;
 199
 200        struct dm_md_mempools *mempools;
 201
 202#ifdef CONFIG_BLK_INLINE_ENCRYPTION
 203        struct blk_keyslot_manager *ksm;
 204#endif
 205};
 206
 207/*
 208 * One of these is allocated per clone bio.
 209 */
 210#define DM_TIO_MAGIC 7282014
 211struct dm_target_io {
 212        unsigned int magic;
 213        struct dm_io *io;
 214        struct dm_target *ti;
 215        unsigned int target_bio_nr;
 216        unsigned int *len_ptr;
 217        bool inside_dm_io;
 218        struct bio clone;
 219};
 220
 221/*
 222 * One of these is allocated per original bio.
 223 * It contains the first clone used for that original.
 224 */
 225#define DM_IO_MAGIC 5191977
 226struct dm_io {
 227        unsigned int magic;
 228        struct mapped_device *md;
 229        blk_status_t status;
 230        atomic_t io_count;
 231        struct bio *orig_bio;
 232        unsigned long start_time;
 233        spinlock_t endio_lock;
 234        struct dm_stats_aux stats_aux;
 235        /* last member of dm_target_io is 'struct bio' */
 236        struct dm_target_io tio;
 237};
 238
 239static inline void dm_io_inc_pending(struct dm_io *io)
 240{
 241        atomic_inc(&io->io_count);
 242}
 243
 244void dm_io_dec_pending(struct dm_io *io, blk_status_t error);
 245
 246static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
 247{
 248        return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
 249}
 250
 251unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
 252
 253static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
 254{
 255        return !maxlen || strlen(result) + 1 >= maxlen;
 256}
 257
 258extern atomic_t dm_global_event_nr;
 259extern wait_queue_head_t dm_global_eventq;
 260void dm_issue_global_event(void);
 261
 262#endif
 263