linux/drivers/md/dm-core.h
<<
>>
Prefs
   1/*
   2 * Internal header file _only_ for device mapper core
   3 *
   4 * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
   5 *
   6 * This file is released under the LGPL.
   7 */
   8
   9#ifndef DM_CORE_INTERNAL_H
  10#define DM_CORE_INTERNAL_H
  11
  12#include <linux/kthread.h>
  13#include <linux/ktime.h>
  14#include <linux/blk-mq.h>
  15
  16#include <trace/events/block.h>
  17
  18#include "dm.h"
  19
  20#define DM_RESERVED_MAX_IOS             1024
  21
  22struct dm_kobject_holder {
  23        struct kobject kobj;
  24        struct completion completion;
  25};
  26
  27/*
  28 * DM core internal structure that used directly by dm.c and dm-rq.c
  29 * DM targets must _not_ deference a mapped_device to directly access its members!
  30 */
  31struct mapped_device {
  32        struct srcu_struct io_barrier;
  33        struct mutex suspend_lock;
  34
  35        /*
  36         * The current mapping (struct dm_table *).
  37         * Use dm_get_live_table{_fast} or take suspend_lock for
  38         * dereference.
  39         */
  40        void __rcu *map;
  41
  42        struct list_head table_devices;
  43        struct mutex table_devices_lock;
  44
  45        unsigned long flags;
  46
  47        struct request_queue *queue;
  48        int numa_node_id;
  49
  50        unsigned type;
  51        /* Protect queue and type against concurrent access. */
  52        struct mutex type_lock;
  53
  54        atomic_t holders;
  55        atomic_t open_count;
  56
  57        struct dm_target *immutable_target;
  58        struct target_type *immutable_target_type;
  59
  60        struct gendisk *disk;
  61        char name[16];
  62
  63        void *interface_ptr;
  64
  65        /*
  66         * A list of ios that arrived while we were suspended.
  67         */
  68        atomic_t pending[2];
  69        wait_queue_head_t wait;
  70        struct work_struct work;
  71        spinlock_t deferred_lock;
  72        struct bio_list deferred;
  73
  74        /*
  75         * Event handling.
  76         */
  77        wait_queue_head_t eventq;
  78        atomic_t event_nr;
  79        atomic_t uevent_seq;
  80        struct list_head uevent_list;
  81        spinlock_t uevent_lock; /* Protect access to uevent_list */
  82
  83        /* the number of internal suspends */
  84        unsigned internal_suspend_count;
  85
  86        /*
  87         * Processing queue (flush)
  88         */
  89        struct workqueue_struct *wq;
  90
  91        /*
  92         * io objects are allocated from here.
  93         */
  94        mempool_t *io_pool;
  95        mempool_t *rq_pool;
  96
  97        struct bio_set *bs;
  98
  99        /*
 100         * freeze/thaw support require holding onto a super block
 101         */
 102        struct super_block *frozen_sb;
 103
 104        /* forced geometry settings */
 105        struct hd_geometry geometry;
 106
 107        struct block_device *bdev;
 108
 109        /* kobject and completion */
 110        struct dm_kobject_holder kobj_holder;
 111
 112        /* zero-length flush that will be cloned and submitted to targets */
 113        struct bio flush_bio;
 114
 115        struct dm_stats stats;
 116
 117        struct kthread_worker kworker;
 118        struct task_struct *kworker_task;
 119
 120        /* for request-based merge heuristic in dm_request_fn() */
 121        unsigned seq_rq_merge_deadline_usecs;
 122        int last_rq_rw;
 123        sector_t last_rq_pos;
 124        ktime_t last_rq_start_time;
 125
 126        /* for blk-mq request-based DM support */
 127        struct blk_mq_tag_set *tag_set;
 128        bool use_blk_mq:1;
 129        bool init_tio_pdu:1;
 130};
 131
 132void dm_init_md_queue(struct mapped_device *md);
 133void dm_init_normal_md_queue(struct mapped_device *md);
 134int md_in_flight(struct mapped_device *md);
 135void disable_write_same(struct mapped_device *md);
 136
 137static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
 138{
 139        return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
 140}
 141
 142unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
 143
 144static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
 145{
 146        return !maxlen || strlen(result) + 1 >= maxlen;
 147}
 148
 149#endif
 150