1
2#ifndef __NITROX_DEV_H
3#define __NITROX_DEV_H
4
5#include <linux/dma-mapping.h>
6#include <linux/interrupt.h>
7#include <linux/pci.h>
8#include <linux/if.h>
9
10#define VERSION_LEN 32
11
12#define MAX_PF_QUEUES 64
13
14#define MAX_DEV_QUEUES (MAX_PF_QUEUES)
15
16#define CNN55XX_MAX_UCD_BLOCKS 8
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39struct nitrox_cmdq {
40 spinlock_t cmd_qlock;
41 spinlock_t resp_qlock;
42 spinlock_t backlog_qlock;
43
44 struct nitrox_device *ndev;
45 struct list_head response_head;
46 struct list_head backlog_head;
47
48 u8 __iomem *dbell_csr_addr;
49 u8 __iomem *compl_cnt_csr_addr;
50 u8 *base;
51 dma_addr_t dma;
52
53 struct work_struct backlog_qflush;
54
55 atomic_t pending_count;
56 atomic_t backlog_count;
57
58 int write_idx;
59 u8 instr_size;
60 u8 qno;
61 u32 qsize;
62
63 u8 *unalign_base;
64 dma_addr_t unalign_dma;
65};
66
67
68
69
70
71
72
73
74
75
76
77
78
79struct nitrox_hw {
80 char partname[IFNAMSIZ * 2];
81 char fw_name[CNN55XX_MAX_UCD_BLOCKS][VERSION_LEN];
82
83 int freq;
84 u16 vendor_id;
85 u16 device_id;
86 u8 revision_id;
87
88 u8 se_cores;
89 u8 ae_cores;
90 u8 zip_cores;
91};
92
93struct nitrox_stats {
94 atomic64_t posted;
95 atomic64_t completed;
96 atomic64_t dropped;
97};
98
99#define IRQ_NAMESZ 32
100
101struct nitrox_q_vector {
102 char name[IRQ_NAMESZ];
103 bool valid;
104 int ring;
105 struct tasklet_struct resp_tasklet;
106 union {
107 struct nitrox_cmdq *cmdq;
108 struct nitrox_device *ndev;
109 };
110};
111
112
113
114
115
116
117
118union mbox_msg {
119 u64 value;
120 struct {
121 u64 type: 2;
122 u64 opcode: 6;
123 u64 data: 58;
124 };
125 struct {
126 u64 type: 2;
127 u64 opcode: 6;
128 u64 chipid: 8;
129 u64 vfid: 8;
130 } id;
131};
132
133
134
135
136
137
138
139
140
141
142struct nitrox_vfdev {
143 atomic_t state;
144 int vfno;
145 int nr_queues;
146 int ring;
147 union mbox_msg msg;
148 atomic64_t mbx_resp;
149};
150
151
152
153
154
155
156
157
158
159struct nitrox_iov {
160 int num_vfs;
161 int max_vf_queues;
162 struct nitrox_vfdev *vfdev;
163 struct workqueue_struct *pf2vf_wq;
164 struct msix_entry msix;
165};
166
167
168
169
170enum ndev_state {
171 __NDEV_NOT_READY,
172 __NDEV_READY,
173 __NDEV_IN_RESET,
174};
175
176
177enum vf_mode {
178 __NDEV_MODE_PF,
179 __NDEV_MODE_VF16,
180 __NDEV_MODE_VF32,
181 __NDEV_MODE_VF64,
182 __NDEV_MODE_VF128,
183};
184
185#define __NDEV_SRIOV_BIT 0
186
187
188#define DEFAULT_CMD_QLEN 2048
189
190#define CMD_TIMEOUT 2000
191
192#define DEV(ndev) ((struct device *)(&(ndev)->pdev->dev))
193
194#define NITROX_CSR_ADDR(ndev, offset) \
195 ((ndev)->bar_addr + (offset))
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221struct nitrox_device {
222 struct list_head list;
223
224 u8 __iomem *bar_addr;
225 struct pci_dev *pdev;
226
227 atomic_t state;
228 unsigned long flags;
229 unsigned long timeout;
230 refcount_t refcnt;
231
232 u8 idx;
233 int node;
234 u16 qlen;
235 u16 nr_queues;
236 enum vf_mode mode;
237
238 struct dma_pool *ctx_pool;
239 struct nitrox_cmdq *pkt_inq;
240 struct nitrox_cmdq *aqmq[MAX_DEV_QUEUES] ____cacheline_aligned_in_smp;
241
242 struct nitrox_q_vector *qvec;
243 struct nitrox_iov iov;
244 int num_vecs;
245
246 struct nitrox_stats stats;
247 struct nitrox_hw hw;
248#if IS_ENABLED(CONFIG_DEBUG_FS)
249 struct dentry *debugfs_dir;
250#endif
251};
252
253
254
255
256
257
258
259
260static inline u64 nitrox_read_csr(struct nitrox_device *ndev, u64 offset)
261{
262 return readq(ndev->bar_addr + offset);
263}
264
265
266
267
268
269
270
271static inline void nitrox_write_csr(struct nitrox_device *ndev, u64 offset,
272 u64 value)
273{
274 writeq(value, (ndev->bar_addr + offset));
275}
276
277static inline bool nitrox_ready(struct nitrox_device *ndev)
278{
279 return atomic_read(&ndev->state) == __NDEV_READY;
280}
281
282static inline bool nitrox_vfdev_ready(struct nitrox_vfdev *vfdev)
283{
284 return atomic_read(&vfdev->state) == __NDEV_READY;
285}
286
287#endif
288