1
2#ifndef __NITROX_DEV_H
3#define __NITROX_DEV_H
4
5#include <linux/dma-mapping.h>
6#include <linux/interrupt.h>
7#include <linux/pci.h>
8#include <linux/if.h>
9
10#define VERSION_LEN 32
11
12#define MAX_PF_QUEUES 64
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35struct nitrox_cmdq {
36 spinlock_t cmd_qlock;
37 spinlock_t resp_qlock;
38 spinlock_t backlog_qlock;
39
40 struct nitrox_device *ndev;
41 struct list_head response_head;
42 struct list_head backlog_head;
43
44 u8 __iomem *dbell_csr_addr;
45 u8 __iomem *compl_cnt_csr_addr;
46 u8 *base;
47 dma_addr_t dma;
48
49 struct work_struct backlog_qflush;
50
51 atomic_t pending_count;
52 atomic_t backlog_count;
53
54 int write_idx;
55 u8 instr_size;
56 u8 qno;
57 u32 qsize;
58
59 u8 *unalign_base;
60 dma_addr_t unalign_dma;
61};
62
63
64
65
66
67
68
69
70
71
72
73
74
75struct nitrox_hw {
76 char partname[IFNAMSIZ * 2];
77 char fw_name[VERSION_LEN];
78
79 int freq;
80 u16 vendor_id;
81 u16 device_id;
82 u8 revision_id;
83
84 u8 se_cores;
85 u8 ae_cores;
86 u8 zip_cores;
87};
88
89struct nitrox_stats {
90 atomic64_t posted;
91 atomic64_t completed;
92 atomic64_t dropped;
93};
94
95#define IRQ_NAMESZ 32
96
97struct nitrox_q_vector {
98 char name[IRQ_NAMESZ];
99 bool valid;
100 int ring;
101 struct tasklet_struct resp_tasklet;
102 union {
103 struct nitrox_cmdq *cmdq;
104 struct nitrox_device *ndev;
105 };
106};
107
108
109
110
111
112
113
114union mbox_msg {
115 u64 value;
116 struct {
117 u64 type: 2;
118 u64 opcode: 6;
119 u64 data: 58;
120 };
121 struct {
122 u64 type: 2;
123 u64 opcode: 6;
124 u64 chipid: 8;
125 u64 vfid: 8;
126 } id;
127};
128
129
130
131
132
133
134
135
136
137
138struct nitrox_vfdev {
139 atomic_t state;
140 int vfno;
141 int nr_queues;
142 int ring;
143 union mbox_msg msg;
144 atomic64_t mbx_resp;
145};
146
147
148
149
150
151
152
153
154
155struct nitrox_iov {
156 int num_vfs;
157 int max_vf_queues;
158 struct nitrox_vfdev *vfdev;
159 struct workqueue_struct *pf2vf_wq;
160 struct msix_entry msix;
161};
162
163
164
165
166enum ndev_state {
167 __NDEV_NOT_READY,
168 __NDEV_READY,
169 __NDEV_IN_RESET,
170};
171
172
173enum vf_mode {
174 __NDEV_MODE_PF,
175 __NDEV_MODE_VF16,
176 __NDEV_MODE_VF32,
177 __NDEV_MODE_VF64,
178 __NDEV_MODE_VF128,
179};
180
181#define __NDEV_SRIOV_BIT 0
182
183
184#define DEFAULT_CMD_QLEN 2048
185
186#define CMD_TIMEOUT 2000
187
188#define DEV(ndev) ((struct device *)(&(ndev)->pdev->dev))
189
190#define NITROX_CSR_ADDR(ndev, offset) \
191 ((ndev)->bar_addr + (offset))
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216struct nitrox_device {
217 struct list_head list;
218
219 u8 __iomem *bar_addr;
220 struct pci_dev *pdev;
221
222 atomic_t state;
223 unsigned long flags;
224 unsigned long timeout;
225 refcount_t refcnt;
226
227 u8 idx;
228 int node;
229 u16 qlen;
230 u16 nr_queues;
231 enum vf_mode mode;
232
233 struct dma_pool *ctx_pool;
234 struct nitrox_cmdq *pkt_inq;
235
236 struct nitrox_q_vector *qvec;
237 struct nitrox_iov iov;
238 int num_vecs;
239
240 struct nitrox_stats stats;
241 struct nitrox_hw hw;
242#if IS_ENABLED(CONFIG_DEBUG_FS)
243 struct dentry *debugfs_dir;
244#endif
245};
246
247
248
249
250
251
252
253
254static inline u64 nitrox_read_csr(struct nitrox_device *ndev, u64 offset)
255{
256 return readq(ndev->bar_addr + offset);
257}
258
259
260
261
262
263
264
265static inline void nitrox_write_csr(struct nitrox_device *ndev, u64 offset,
266 u64 value)
267{
268 writeq(value, (ndev->bar_addr + offset));
269}
270
271static inline bool nitrox_ready(struct nitrox_device *ndev)
272{
273 return atomic_read(&ndev->state) == __NDEV_READY;
274}
275
276static inline bool nitrox_vfdev_ready(struct nitrox_vfdev *vfdev)
277{
278 return atomic_read(&vfdev->state) == __NDEV_READY;
279}
280
281#endif
282