1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#ifndef _LINUX_NVME_H
16#define _LINUX_NVME_H
17
18#include <uapi/linux/nvme.h>
19#include <linux/pci.h>
20#include <linux/kref.h>
21#include <linux/blk-mq.h>
22
23struct nvme_bar {
24 __u64 cap;
25 __u32 vs;
26 __u32 intms;
27 __u32 intmc;
28 __u32 cc;
29 __u32 rsvd1;
30 __u32 csts;
31 __u32 rsvd2;
32 __u32 aqa;
33 __u64 asq;
34 __u64 acq;
35};
36
37#define NVME_CAP_MQES(cap) ((cap) & 0xffff)
38#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
39#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
40#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
41#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
42
43enum {
44 NVME_CC_ENABLE = 1 << 0,
45 NVME_CC_CSS_NVM = 0 << 4,
46 NVME_CC_MPS_SHIFT = 7,
47 NVME_CC_ARB_RR = 0 << 11,
48 NVME_CC_ARB_WRRU = 1 << 11,
49 NVME_CC_ARB_VS = 7 << 11,
50 NVME_CC_SHN_NONE = 0 << 14,
51 NVME_CC_SHN_NORMAL = 1 << 14,
52 NVME_CC_SHN_ABRUPT = 2 << 14,
53 NVME_CC_SHN_MASK = 3 << 14,
54 NVME_CC_IOSQES = 6 << 16,
55 NVME_CC_IOCQES = 4 << 20,
56 NVME_CSTS_RDY = 1 << 0,
57 NVME_CSTS_CFS = 1 << 1,
58 NVME_CSTS_SHST_NORMAL = 0 << 2,
59 NVME_CSTS_SHST_OCCUR = 1 << 2,
60 NVME_CSTS_SHST_CMPLT = 2 << 2,
61 NVME_CSTS_SHST_MASK = 3 << 2,
62};
63
64extern unsigned char nvme_io_timeout;
65#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
66
67
68
69
70struct nvme_dev {
71 struct list_head node;
72 struct nvme_queue **queues;
73 struct request_queue *admin_q;
74 struct blk_mq_tag_set tagset;
75 struct blk_mq_tag_set admin_tagset;
76 u32 __iomem *dbs;
77 struct pci_dev *pci_dev;
78 struct dma_pool *prp_page_pool;
79 struct dma_pool *prp_small_pool;
80 int instance;
81 unsigned queue_count;
82 unsigned online_queues;
83 unsigned max_qid;
84 int q_depth;
85 u32 db_stride;
86 u32 ctrl_config;
87 struct msix_entry *entry;
88 struct nvme_bar __iomem *bar;
89 struct list_head namespaces;
90 struct kref kref;
91 struct device *device;
92 work_func_t reset_workfn;
93 struct work_struct reset_work;
94 struct work_struct probe_work;
95 char name[12];
96 char serial[20];
97 char model[40];
98 char firmware_rev[8];
99 u32 max_hw_sectors;
100 u32 stripe_size;
101 u32 page_size;
102 u16 oncs;
103 u16 abort_limit;
104 u8 event_limit;
105 u8 vwc;
106};
107
108
109
110
111struct nvme_ns {
112 struct list_head list;
113
114 struct nvme_dev *dev;
115 struct request_queue *queue;
116 struct gendisk *disk;
117
118 unsigned ns_id;
119 int lba_shift;
120 int ms;
121 int pi_type;
122 u64 mode_select_num_blocks;
123 u32 mode_select_block_len;
124};
125
126
127
128
129
130
131
132struct nvme_iod {
133 unsigned long private;
134 int npages;
135 int offset;
136 int nents;
137 int length;
138 dma_addr_t first_dma;
139 struct scatterlist meta_sg[1];
140 struct scatterlist sg[0];
141};
142
143static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
144{
145 return (sector >> (ns->lba_shift - 9));
146}
147
148
149
150
151
152
153void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod);
154
155int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int, gfp_t);
156struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
157 unsigned long addr, unsigned length);
158void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
159 struct nvme_iod *iod);
160int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_ns *,
161 struct nvme_command *, u32 *);
162int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns);
163int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *,
164 u32 *result);
165int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns,
166 dma_addr_t dma_addr);
167int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
168 dma_addr_t dma_addr, u32 *result);
169int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
170 dma_addr_t dma_addr, u32 *result);
171
172struct sg_io_hdr;
173
174int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
175int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg);
176int nvme_sg_get_version_num(int __user *ip);
177
178#endif
179