linux/drivers/block/null_blk/null_blk.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __BLK_NULL_BLK_H
   3#define __BLK_NULL_BLK_H
   4
   5#undef pr_fmt
   6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   7
   8#include <linux/blkdev.h>
   9#include <linux/slab.h>
  10#include <linux/blk-mq.h>
  11#include <linux/hrtimer.h>
  12#include <linux/configfs.h>
  13#include <linux/badblocks.h>
  14#include <linux/fault-inject.h>
  15#include <linux/spinlock.h>
  16#include <linux/mutex.h>
  17
  18struct nullb_cmd {
  19        struct request *rq;
  20        struct bio *bio;
  21        unsigned int tag;
  22        blk_status_t error;
  23        struct nullb_queue *nq;
  24        struct hrtimer timer;
  25        bool fake_timeout;
  26};
  27
  28struct nullb_queue {
  29        unsigned long *tag_map;
  30        wait_queue_head_t wait;
  31        unsigned int queue_depth;
  32        struct nullb_device *dev;
  33        unsigned int requeue_selection;
  34
  35        struct list_head poll_list;
  36        spinlock_t poll_lock;
  37
  38        struct nullb_cmd *cmds;
  39};
  40
  41struct nullb_zone {
  42        /*
  43         * Zone lock to prevent concurrent modification of a zone write
  44         * pointer position and condition: with memory backing, a write
  45         * command execution may sleep on memory allocation. For this case,
  46         * use mutex as the zone lock. Otherwise, use the spinlock for
  47         * locking the zone.
  48         */
  49        union {
  50                spinlock_t spinlock;
  51                struct mutex mutex;
  52        };
  53        enum blk_zone_type type;
  54        enum blk_zone_cond cond;
  55        sector_t start;
  56        sector_t wp;
  57        unsigned int len;
  58        unsigned int capacity;
  59};
  60
  61struct nullb_device {
  62        struct nullb *nullb;
  63        struct config_item item;
  64        struct radix_tree_root data; /* data stored in the disk */
  65        struct radix_tree_root cache; /* disk cache data */
  66        unsigned long flags; /* device flags */
  67        unsigned int curr_cache;
  68        struct badblocks badblocks;
  69
  70        unsigned int nr_zones;
  71        unsigned int nr_zones_imp_open;
  72        unsigned int nr_zones_exp_open;
  73        unsigned int nr_zones_closed;
  74        unsigned int imp_close_zone_no;
  75        struct nullb_zone *zones;
  76        sector_t zone_size_sects;
  77        bool need_zone_res_mgmt;
  78        spinlock_t zone_res_lock;
  79
  80        unsigned long size; /* device size in MB */
  81        unsigned long completion_nsec; /* time in ns to complete a request */
  82        unsigned long cache_size; /* disk cache size in MB */
  83        unsigned long zone_size; /* zone size in MB if device is zoned */
  84        unsigned long zone_capacity; /* zone capacity in MB if device is zoned */
  85        unsigned int zone_nr_conv; /* number of conventional zones */
  86        unsigned int zone_max_open; /* max number of open zones */
  87        unsigned int zone_max_active; /* max number of active zones */
  88        unsigned int submit_queues; /* number of submission queues */
  89        unsigned int prev_submit_queues; /* number of submission queues before change */
  90        unsigned int poll_queues; /* number of IOPOLL submission queues */
  91        unsigned int prev_poll_queues; /* number of IOPOLL submission queues before change */
  92        unsigned int home_node; /* home node for the device */
  93        unsigned int queue_mode; /* block interface */
  94        unsigned int blocksize; /* block size */
  95        unsigned int max_sectors; /* Max sectors per command */
  96        unsigned int irqmode; /* IRQ completion handler */
  97        unsigned int hw_queue_depth; /* queue depth */
  98        unsigned int index; /* index of the disk, only valid with a disk */
  99        unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */
 100        bool blocking; /* blocking blk-mq device */
 101        bool use_per_node_hctx; /* use per-node allocation for hardware context */
 102        bool power; /* power on/off the device */
 103        bool memory_backed; /* if data is stored in memory */
 104        bool discard; /* if support discard */
 105        bool zoned; /* if device is zoned */
 106        bool virt_boundary; /* virtual boundary on/off for the device */
 107};
 108
 109struct nullb {
 110        struct nullb_device *dev;
 111        struct list_head list;
 112        unsigned int index;
 113        struct request_queue *q;
 114        struct gendisk *disk;
 115        struct blk_mq_tag_set *tag_set;
 116        struct blk_mq_tag_set __tag_set;
 117        unsigned int queue_depth;
 118        atomic_long_t cur_bytes;
 119        struct hrtimer bw_timer;
 120        unsigned long cache_flush_pos;
 121        spinlock_t lock;
 122
 123        struct nullb_queue *queues;
 124        unsigned int nr_queues;
 125        char disk_name[DISK_NAME_LEN];
 126};
 127
 128blk_status_t null_handle_discard(struct nullb_device *dev, sector_t sector,
 129                                 sector_t nr_sectors);
 130blk_status_t null_process_cmd(struct nullb_cmd *cmd,
 131                              enum req_opf op, sector_t sector,
 132                              unsigned int nr_sectors);
 133
 134#ifdef CONFIG_BLK_DEV_ZONED
 135int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q);
 136int null_register_zoned_dev(struct nullb *nullb);
 137void null_free_zoned_dev(struct nullb_device *dev);
 138int null_report_zones(struct gendisk *disk, sector_t sector,
 139                      unsigned int nr_zones, report_zones_cb cb, void *data);
 140blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd,
 141                                    enum req_opf op, sector_t sector,
 142                                    sector_t nr_sectors);
 143size_t null_zone_valid_read_len(struct nullb *nullb,
 144                                sector_t sector, unsigned int len);
 145#else
 146static inline int null_init_zoned_dev(struct nullb_device *dev,
 147                                      struct request_queue *q)
 148{
 149        pr_err("CONFIG_BLK_DEV_ZONED not enabled\n");
 150        return -EINVAL;
 151}
 152static inline int null_register_zoned_dev(struct nullb *nullb)
 153{
 154        return -ENODEV;
 155}
 156static inline void null_free_zoned_dev(struct nullb_device *dev) {}
 157static inline blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd,
 158                        enum req_opf op, sector_t sector, sector_t nr_sectors)
 159{
 160        return BLK_STS_NOTSUPP;
 161}
 162static inline size_t null_zone_valid_read_len(struct nullb *nullb,
 163                                              sector_t sector,
 164                                              unsigned int len)
 165{
 166        return len;
 167}
 168#define null_report_zones       NULL
 169#endif /* CONFIG_BLK_DEV_ZONED */
 170#endif /* __NULL_BLK_H */
 171