1
2
3
4
5#ifndef __DPAA2_QDMA_H__
6#define __DPAA2_QDMA_H__
7
8struct qdma_sdd;
9struct rte_qdma_job;
10
11#define DPAA2_QDMA_MAX_FLE 3
12#define DPAA2_QDMA_MAX_SDD 2
13
14#define DPAA2_QDMA_MAX_SG_NB 64
15
16#define DPAA2_DPDMAI_MAX_QUEUES 8
17
18
19
20
21#define QDMA_FLE_SINGLE_POOL_SIZE (sizeof(uint64_t) + \
22 sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \
23 sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD)
24
25
26
27
28
29#define QDMA_FLE_SG_POOL_SIZE (sizeof(uint64_t) + \
30 sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \
31 sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD + \
32 sizeof(struct qdma_sg_entry) * (DPAA2_QDMA_MAX_SG_NB * 2) + \
33 sizeof(struct rte_qdma_job *) * DPAA2_QDMA_MAX_SG_NB)
34
35#define QDMA_FLE_JOB_NB_OFFSET 0
36
37#define QDMA_FLE_SINGLE_JOB_OFFSET 0
38
39#define QDMA_FLE_FLE_OFFSET \
40 (QDMA_FLE_JOB_NB_OFFSET + sizeof(uint64_t))
41
42#define QDMA_FLE_SDD_OFFSET \
43 (QDMA_FLE_FLE_OFFSET + \
44 sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE)
45
46#define QDMA_FLE_SG_ENTRY_OFFSET \
47 (QDMA_FLE_SDD_OFFSET + \
48 sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD)
49
50#define QDMA_FLE_SG_JOBS_OFFSET \
51 (QDMA_FLE_SG_ENTRY_OFFSET + \
52 sizeof(struct qdma_sg_entry) * DPAA2_QDMA_MAX_SG_NB * 2)
53
54
55#define QDMA_FLE_CACHE_SIZE(_num) (_num/(RTE_MAX_LCORE * 2))
56
57
58#define QDMA_SER_CTX (1 << 8)
59#define DPAA2_RBP_MEM_RW 0x0
60
61
62
63
64#define DPAA2_COHERENT_NO_ALLOCATE_CACHE 0xb
65#define DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE 0x7
66
67
68
69
70#define DPAA2_COHERENT_ALLOCATE_CACHE 0x6
71#define DPAA2_LX2_COHERENT_ALLOCATE_CACHE 0xb
72
73
74#define MAX_HW_QUEUE_PER_CORE 64
75
76#define QDMA_RBP_UPPER_ADDRESS_MASK (0xfff0000000000)
77
78
79
80
81struct qdma_device {
82
83 uint16_t num_hw_queues;
84
85
86
87
88 uint16_t max_hw_queues_per_core;
89
90
91 struct qdma_virt_queue *vqs;
92
93 uint16_t max_vqs;
94
95 uint8_t state;
96
97 int fle_queue_pool_cnt;
98
99 rte_spinlock_t lock;
100};
101
102
103struct qdma_hw_queue {
104
105 TAILQ_ENTRY(qdma_hw_queue) next;
106
107 struct dpaa2_dpdmai_dev *dpdmai_dev;
108
109 uint16_t queue_id;
110
111 uint32_t lcore_id;
112
113 uint32_t num_users;
114};
115
116struct qdma_virt_queue;
117
118typedef uint16_t (qdma_get_job_t)(struct qdma_virt_queue *qdma_vq,
119 const struct qbman_fd *fd,
120 struct rte_qdma_job **job,
121 uint16_t *nb_jobs);
122typedef int (qdma_set_fd_t)(struct qdma_virt_queue *qdma_vq,
123 struct qbman_fd *fd,
124 struct rte_qdma_job **job,
125 uint16_t nb_jobs);
126
127typedef int (qdma_dequeue_multijob_t)(
128 struct qdma_virt_queue *qdma_vq,
129 uint16_t *vq_id,
130 struct rte_qdma_job **job,
131 uint16_t nb_jobs);
132
133typedef int (qdma_enqueue_multijob_t)(
134 struct qdma_virt_queue *qdma_vq,
135 struct rte_qdma_job **job,
136 uint16_t nb_jobs);
137
138
139struct qdma_virt_queue {
140
141 struct rte_ring *status_ring;
142
143 struct qdma_hw_queue *hw_queue;
144
145 struct rte_mempool *fle_pool;
146
147 struct rte_qdma_rbp rbp;
148
149 uint32_t lcore_id;
150
151 uint8_t in_use;
152
153 uint8_t exclusive_hw_queue;
154
155 uint64_t num_enqueues;
156
157 uint64_t num_dequeues;
158
159 uint16_t vq_id;
160 uint32_t flags;
161
162 qdma_set_fd_t *set_fd;
163 qdma_get_job_t *get_job;
164
165 qdma_dequeue_multijob_t *dequeue_job;
166 qdma_enqueue_multijob_t *enqueue_job;
167};
168
169
170struct qdma_per_core_info {
171
172 struct qdma_hw_queue *hw_queues[MAX_HW_QUEUE_PER_CORE];
173
174 uint16_t num_hw_queues;
175};
176
177
178struct qdma_sdd {
179 uint32_t rsv;
180
181 uint32_t stride;
182
183 union {
184 uint32_t rbpcmd;
185 struct rbpcmd_st {
186 uint32_t vfid:6;
187 uint32_t rsv4:2;
188 uint32_t pfid:1;
189 uint32_t rsv3:7;
190 uint32_t attr:3;
191 uint32_t rsv2:1;
192 uint32_t at:2;
193 uint32_t vfa:1;
194 uint32_t ca:1;
195 uint32_t tc:3;
196 uint32_t rsv1:5;
197 } rbpcmd_simple;
198 };
199 union {
200 uint32_t cmd;
201 struct rcmd_simple {
202 uint32_t portid:4;
203 uint32_t rsv1:14;
204 uint32_t rbp:1;
205 uint32_t ssen:1;
206 uint32_t rthrotl:4;
207 uint32_t sqos:3;
208 uint32_t ns:1;
209 uint32_t rdtype:4;
210 } read_cmd;
211 struct wcmd_simple {
212 uint32_t portid:4;
213 uint32_t rsv3:10;
214 uint32_t rsv2:2;
215 uint32_t lwc:2;
216 uint32_t rbp:1;
217 uint32_t dsen:1;
218 uint32_t rsv1:4;
219 uint32_t dqos:3;
220 uint32_t ns:1;
221 uint32_t wrttype:4;
222 } write_cmd;
223 };
224} __rte_packed;
225
226#define QDMA_SG_FMT_SDB 0x0
227#define QDMA_SG_FMT_FDS 0x1
228#define QDMA_SG_FMT_SGTE 0x2
229#define QDMA_SG_SL_SHORT 0x1
230#define QDMA_SG_SL_LONG 0x0
231#define QDMA_SG_F 0x1
232#define QDMA_SG_BMT_ENABLE 0x1
233#define QDMA_SG_BMT_DISABLE 0x0
234
235struct qdma_sg_entry {
236 uint32_t addr_lo;
237 uint32_t addr_hi:17;
238 uint32_t rsv:15;
239 union {
240 uint32_t data_len_sl0;
241 struct {
242 uint32_t len:17;
243 uint32_t reserve:3;
244 uint32_t sf:1;
245 uint32_t sr:1;
246 uint32_t size:10;
247 } data_len_sl1;
248 } data_len;
249 union {
250 uint32_t ctrl_fields;
251 struct {
252 uint32_t bpid:14;
253 uint32_t ivp:1;
254 uint32_t bmt:1;
255 uint32_t offset:12;
256 uint32_t fmt:2;
257 uint32_t sl:1;
258 uint32_t f:1;
259 } ctrl;
260 };
261} __rte_packed;
262
263
264struct dpaa2_dpdmai_dev {
265
266 TAILQ_ENTRY(dpaa2_qdma_device) next;
267
268 struct fsl_mc_io dpdmai;
269
270 uint32_t dpdmai_id;
271
272 uint16_t token;
273
274 uint8_t num_queues;
275
276 struct dpaa2_queue rx_queue[DPAA2_DPDMAI_MAX_QUEUES];
277
278 struct dpaa2_queue tx_queue[DPAA2_DPDMAI_MAX_QUEUES];
279 struct qdma_device *qdma_dev;
280};
281
282static inline struct qdma_device *
283QDMA_DEV_OF_VQ(struct qdma_virt_queue *vq)
284{
285 return vq->hw_queue->dpdmai_dev->qdma_dev;
286}
287
288#endif
289