linux/drivers/nvme/host/nvme.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2011-2014, Intel Corporation.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms and conditions of the GNU General Public License,
   6 * version 2, as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope it will be useful, but WITHOUT
   9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  11 * more details.
  12 */
  13
  14#ifndef _NVME_H
  15#define _NVME_H
  16
  17#include <linux/nvme.h>
  18#include <linux/pci.h>
  19#include <linux/kref.h>
  20#include <linux/blk-mq.h>
  21#include <linux/lightnvm.h>
  22
  23enum {
  24        /*
  25         * Driver internal status code for commands that were cancelled due
  26         * to timeouts or controller shutdown.  The value is negative so
  27         * that it a) doesn't overlap with the unsigned hardware error codes,
  28         * and b) can easily be tested for.
  29         */
  30        NVME_SC_CANCELLED               = -EINTR,
  31};
  32
  33extern unsigned char nvme_io_timeout;
  34#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
  35
  36extern unsigned char admin_timeout;
  37#define ADMIN_TIMEOUT   (admin_timeout * HZ)
  38
  39extern unsigned char shutdown_timeout;
  40#define SHUTDOWN_TIMEOUT        (shutdown_timeout * HZ)
  41
  42#define NVME_DEFAULT_KATO       5
  43#define NVME_KATO_GRACE         10
  44
  45extern unsigned int nvme_max_retries;
  46
  47enum {
  48        NVME_NS_LBA             = 0,
  49        NVME_NS_LIGHTNVM        = 1,
  50};
  51
  52/*
  53 * List of workarounds for devices that required behavior not specified in
  54 * the standard.
  55 */
  56enum nvme_quirks {
  57        /*
  58         * Prefers I/O aligned to a stripe size specified in a vendor
  59         * specific Identify field.
  60         */
  61        NVME_QUIRK_STRIPE_SIZE                  = (1 << 0),
  62
  63        /*
  64         * The controller doesn't handle Identify value others than 0 or 1
  65         * correctly.
  66         */
  67        NVME_QUIRK_IDENTIFY_CNS                 = (1 << 1),
  68
  69        /*
  70         * The controller deterministically returns O's on reads to discarded
  71         * logical blocks.
  72         */
  73        NVME_QUIRK_DISCARD_ZEROES               = (1 << 2),
  74
  75        /*
  76         * The controller needs a delay before starts checking the device
  77         * readiness, which is done by reading the NVME_CSTS_RDY bit.
  78         */
  79        NVME_QUIRK_DELAY_BEFORE_CHK_RDY         = (1 << 3),
  80};
  81
  82/*
  83 * Common request structure for NVMe passthrough.  All drivers must have
  84 * this structure as the first member of their request-private data.
  85 */
  86struct nvme_request {
  87        struct nvme_command     *cmd;
  88        union nvme_result       result;
  89};
  90
  91static inline struct nvme_request *nvme_req(struct request *req)
  92{
  93        return blk_mq_rq_to_pdu(req);
  94}
  95
  96/* The below value is the specific amount of delay needed before checking
  97 * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
  98 * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
  99 * found empirically.
 100 */
 101#define NVME_QUIRK_DELAY_AMOUNT         2000
 102
 103enum nvme_ctrl_state {
 104        NVME_CTRL_NEW,
 105        NVME_CTRL_LIVE,
 106        NVME_CTRL_RESETTING,
 107        NVME_CTRL_RECONNECTING,
 108        NVME_CTRL_DELETING,
 109        NVME_CTRL_DEAD,
 110};
 111
 112struct nvme_ctrl {
 113        enum nvme_ctrl_state state;
 114        spinlock_t lock;
 115        const struct nvme_ctrl_ops *ops;
 116        struct request_queue *admin_q;
 117        struct request_queue *connect_q;
 118        struct device *dev;
 119        struct kref kref;
 120        int instance;
 121        struct blk_mq_tag_set *tagset;
 122        struct list_head namespaces;
 123        struct mutex namespaces_mutex;
 124        struct device *device;  /* char device */
 125        struct list_head node;
 126        struct ida ns_ida;
 127
 128        char name[12];
 129        char serial[20];
 130        char model[40];
 131        char firmware_rev[8];
 132        u16 cntlid;
 133
 134        u32 ctrl_config;
 135
 136        u32 page_size;
 137        u32 max_hw_sectors;
 138        u16 oncs;
 139        u16 vid;
 140        atomic_t abort_limit;
 141        u8 event_limit;
 142        u8 vwc;
 143        u32 vs;
 144        u32 sgls;
 145        u16 kas;
 146        unsigned int kato;
 147        bool subsystem;
 148        unsigned long quirks;
 149        struct work_struct scan_work;
 150        struct work_struct async_event_work;
 151        struct delayed_work ka_work;
 152
 153        /* Fabrics only */
 154        u16 sqsize;
 155        u32 ioccsz;
 156        u32 iorcsz;
 157        u16 icdoff;
 158        u16 maxcmd;
 159        struct nvmf_ctrl_options *opts;
 160};
 161
 162/*
 163 * An NVM Express namespace is equivalent to a SCSI LUN
 164 */
 165struct nvme_ns {
 166        struct list_head list;
 167
 168        struct nvme_ctrl *ctrl;
 169        struct request_queue *queue;
 170        struct gendisk *disk;
 171        struct nvm_dev *ndev;
 172        struct kref kref;
 173        int instance;
 174
 175        u8 eui[8];
 176        u8 uuid[16];
 177
 178        unsigned ns_id;
 179        int lba_shift;
 180        u16 ms;
 181        bool ext;
 182        u8 pi_type;
 183        unsigned long flags;
 184
 185#define NVME_NS_REMOVING 0
 186#define NVME_NS_DEAD     1
 187
 188        u64 mode_select_num_blocks;
 189        u32 mode_select_block_len;
 190};
 191
 192struct nvme_ctrl_ops {
 193        const char *name;
 194        struct module *module;
 195        bool is_fabrics;
 196        int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
 197        int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
 198        int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
 199        int (*reset_ctrl)(struct nvme_ctrl *ctrl);
 200        void (*free_ctrl)(struct nvme_ctrl *ctrl);
 201        void (*submit_async_event)(struct nvme_ctrl *ctrl, int aer_idx);
 202        int (*delete_ctrl)(struct nvme_ctrl *ctrl);
 203        const char *(*get_subsysnqn)(struct nvme_ctrl *ctrl);
 204        int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
 205};
 206
 207static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
 208{
 209        u32 val = 0;
 210
 211        if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
 212                return false;
 213        return val & NVME_CSTS_RDY;
 214}
 215
 216static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
 217{
 218        if (!ctrl->subsystem)
 219                return -ENOTTY;
 220        return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
 221}
 222
 223static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
 224{
 225        return (sector >> (ns->lba_shift - 9));
 226}
 227
 228static inline void nvme_cleanup_cmd(struct request *req)
 229{
 230        if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
 231                kfree(page_address(req->special_vec.bv_page) +
 232                      req->special_vec.bv_offset);
 233        }
 234}
 235
 236static inline int nvme_error_status(u16 status)
 237{
 238        switch (status & 0x7ff) {
 239        case NVME_SC_SUCCESS:
 240                return 0;
 241        case NVME_SC_CAP_EXCEEDED:
 242                return -ENOSPC;
 243        default:
 244                return -EIO;
 245        }
 246}
 247
 248static inline bool nvme_req_needs_retry(struct request *req, u16 status)
 249{
 250        return !(status & NVME_SC_DNR || blk_noretry_request(req)) &&
 251                (jiffies - req->start_time) < req->timeout &&
 252                req->retries < nvme_max_retries;
 253}
 254
 255void nvme_cancel_request(struct request *req, void *data, bool reserved);
 256bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
 257                enum nvme_ctrl_state new_state);
 258int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
 259int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
 260int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
 261int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
 262                const struct nvme_ctrl_ops *ops, unsigned long quirks);
 263void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
 264void nvme_put_ctrl(struct nvme_ctrl *ctrl);
 265int nvme_init_identify(struct nvme_ctrl *ctrl);
 266
 267void nvme_queue_scan(struct nvme_ctrl *ctrl);
 268void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
 269
 270#define NVME_NR_AERS    1
 271void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
 272                union nvme_result *res);
 273void nvme_queue_async_events(struct nvme_ctrl *ctrl);
 274
 275void nvme_stop_queues(struct nvme_ctrl *ctrl);
 276void nvme_start_queues(struct nvme_ctrl *ctrl);
 277void nvme_kill_queues(struct nvme_ctrl *ctrl);
 278
 279#define NVME_QID_ANY -1
 280struct request *nvme_alloc_request(struct request_queue *q,
 281                struct nvme_command *cmd, unsigned int flags, int qid);
 282void nvme_requeue_req(struct request *req);
 283int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
 284                struct nvme_command *cmd);
 285int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
 286                void *buf, unsigned bufflen);
 287int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
 288                union nvme_result *result, void *buffer, unsigned bufflen,
 289                unsigned timeout, int qid, int at_head, int flags);
 290int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
 291                void __user *ubuffer, unsigned bufflen, u32 *result,
 292                unsigned timeout);
 293int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
 294                void __user *ubuffer, unsigned bufflen,
 295                void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
 296                u32 *result, unsigned timeout);
 297int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id);
 298int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
 299                struct nvme_id_ns **id);
 300int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log);
 301int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
 302                      void *buffer, size_t buflen, u32 *result);
 303int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
 304                      void *buffer, size_t buflen, u32 *result);
 305int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
 306void nvme_start_keep_alive(struct nvme_ctrl *ctrl);
 307void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
 308
 309struct sg_io_hdr;
 310
 311int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
 312int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg);
 313int nvme_sg_get_version_num(int __user *ip);
 314
 315#ifdef CONFIG_NVM
 316int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id);
 317int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
 318void nvme_nvm_unregister(struct nvme_ns *ns);
 319int nvme_nvm_register_sysfs(struct nvme_ns *ns);
 320void nvme_nvm_unregister_sysfs(struct nvme_ns *ns);
 321#else
 322static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
 323                                    int node)
 324{
 325        return 0;
 326}
 327
 328static inline void nvme_nvm_unregister(struct nvme_ns *ns) {};
 329static inline int nvme_nvm_register_sysfs(struct nvme_ns *ns)
 330{
 331        return 0;
 332}
 333static inline void nvme_nvm_unregister_sysfs(struct nvme_ns *ns) {};
 334static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
 335{
 336        return 0;
 337}
 338#endif /* CONFIG_NVM */
 339
 340static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
 341{
 342        return dev_to_disk(dev)->private_data;
 343}
 344
 345int __init nvme_core_init(void);
 346void nvme_core_exit(void);
 347
 348#endif /* _NVME_H */
 349