1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#ifndef _QED_SP_H
34#define _QED_SP_H
35
36#include <linux/types.h>
37#include <linux/kernel.h>
38#include <linux/list.h>
39#include <linux/slab.h>
40#include <linux/spinlock.h>
41#include <linux/qed/qed_chain.h>
42#include "qed.h"
43#include "qed_hsi.h"
44
45enum spq_mode {
46 QED_SPQ_MODE_BLOCK,
47 QED_SPQ_MODE_CB,
48 QED_SPQ_MODE_EBLOCK,
49};
50
51struct qed_spq_comp_cb {
52 void (*function)(struct qed_hwfn *,
53 void *,
54 union event_ring_data *,
55 u8 fw_return_code);
56 void *cookie;
57};
58
59
60
61
62
63
64
65
66
67
68int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
69 struct eth_slow_path_rx_cqe *cqe);
70
71
72
73
74
75
76
77union ramrod_data {
78 struct pf_start_ramrod_data pf_start;
79 struct pf_update_ramrod_data pf_update;
80 struct rx_queue_start_ramrod_data rx_queue_start;
81 struct rx_queue_update_ramrod_data rx_queue_update;
82 struct rx_queue_stop_ramrod_data rx_queue_stop;
83 struct tx_queue_start_ramrod_data tx_queue_start;
84 struct tx_queue_stop_ramrod_data tx_queue_stop;
85 struct vport_start_ramrod_data vport_start;
86 struct vport_stop_ramrod_data vport_stop;
87 struct vport_update_ramrod_data vport_update;
88 struct core_rx_start_ramrod_data core_rx_queue_start;
89 struct core_rx_stop_ramrod_data core_rx_queue_stop;
90 struct core_tx_start_ramrod_data core_tx_queue_start;
91 struct core_tx_stop_ramrod_data core_tx_queue_stop;
92 struct vport_filter_update_ramrod_data vport_filter_update;
93
94 struct rdma_init_func_ramrod_data rdma_init_func;
95 struct rdma_close_func_ramrod_data rdma_close_func;
96 struct rdma_register_tid_ramrod_data rdma_register_tid;
97 struct rdma_deregister_tid_ramrod_data rdma_deregister_tid;
98 struct roce_create_qp_resp_ramrod_data roce_create_qp_resp;
99 struct roce_create_qp_req_ramrod_data roce_create_qp_req;
100 struct roce_modify_qp_resp_ramrod_data roce_modify_qp_resp;
101 struct roce_modify_qp_req_ramrod_data roce_modify_qp_req;
102 struct roce_query_qp_resp_ramrod_data roce_query_qp_resp;
103 struct roce_query_qp_req_ramrod_data roce_query_qp_req;
104 struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp;
105 struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req;
106 struct rdma_create_cq_ramrod_data rdma_create_cq;
107 struct rdma_destroy_cq_ramrod_data rdma_destroy_cq;
108 struct rdma_srq_create_ramrod_data rdma_create_srq;
109 struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
110 struct rdma_srq_modify_ramrod_data rdma_modify_srq;
111 struct roce_init_func_ramrod_data roce_init_func;
112 struct fcoe_init_ramrod_params fcoe_init;
113 struct fcoe_conn_offload_ramrod_params fcoe_conn_ofld;
114 struct fcoe_conn_terminate_ramrod_params fcoe_conn_terminate;
115 struct fcoe_stat_ramrod_params fcoe_stat;
116
117 struct iscsi_slow_path_hdr iscsi_empty;
118 struct iscsi_init_ramrod_params iscsi_init;
119 struct iscsi_spe_func_dstry iscsi_destroy;
120 struct iscsi_spe_conn_offload iscsi_conn_offload;
121 struct iscsi_conn_update_ramrod_params iscsi_conn_update;
122 struct iscsi_spe_conn_termination iscsi_conn_terminate;
123
124 struct vf_start_ramrod_data vf_start;
125 struct vf_stop_ramrod_data vf_stop;
126};
127
128#define EQ_MAX_CREDIT 0xffffffff
129
130enum spq_priority {
131 QED_SPQ_PRIORITY_NORMAL,
132 QED_SPQ_PRIORITY_HIGH,
133};
134
135union qed_spq_req_comp {
136 struct qed_spq_comp_cb cb;
137 u64 *done_addr;
138};
139
140struct qed_spq_comp_done {
141 unsigned int done;
142 u8 fw_return_code;
143};
144
145struct qed_spq_entry {
146 struct list_head list;
147
148 u8 flags;
149
150
151 struct slow_path_element elem;
152
153 union ramrod_data ramrod;
154
155 enum spq_priority priority;
156
157
158 struct list_head *queue;
159
160 enum spq_mode comp_mode;
161 struct qed_spq_comp_cb comp_cb;
162 struct qed_spq_comp_done comp_done;
163};
164
165struct qed_eq {
166 struct qed_chain chain;
167 u8 eq_sb_index;
168 __le16 *p_fw_cons;
169};
170
171struct qed_consq {
172 struct qed_chain chain;
173};
174
175struct qed_spq {
176 spinlock_t lock;
177
178 struct list_head unlimited_pending;
179 struct list_head pending;
180 struct list_head completion_pending;
181 struct list_head free_pool;
182
183 struct qed_chain chain;
184
185
186 dma_addr_t p_phys;
187 struct qed_spq_entry *p_virt;
188
189#define SPQ_RING_SIZE \
190 (CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element))
191
192
193 DECLARE_BITMAP(p_comp_bitmap, SPQ_RING_SIZE);
194 u8 comp_bitmap_idx;
195
196
197 u32 unlimited_pending_count;
198 u32 normal_count;
199 u32 high_count;
200 u32 comp_sent_count;
201 u32 comp_count;
202
203 u32 cid;
204};
205
206
207
208
209
210
211
212
213
214
215int qed_spq_post(struct qed_hwfn *p_hwfn,
216 struct qed_spq_entry *p_ent,
217 u8 *fw_return_code);
218
219
220
221
222
223
224
225
226int qed_spq_alloc(struct qed_hwfn *p_hwfn);
227
228
229
230
231
232
233void qed_spq_setup(struct qed_hwfn *p_hwfn);
234
235
236
237
238
239
240void qed_spq_free(struct qed_hwfn *p_hwfn);
241
242
243
244
245
246
247
248
249
250
251
252
253int
254qed_spq_get_entry(struct qed_hwfn *p_hwfn,
255 struct qed_spq_entry **pp_ent);
256
257
258
259
260
261
262
263
264void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
265 struct qed_spq_entry *p_ent);
266
267
268
269
270
271
272
273
274struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
275 u16 num_elem);
276
277
278
279
280
281
282
283void qed_eq_setup(struct qed_hwfn *p_hwfn,
284 struct qed_eq *p_eq);
285
286
287
288
289
290
291
292void qed_eq_free(struct qed_hwfn *p_hwfn,
293 struct qed_eq *p_eq);
294
295
296
297
298
299
300
301void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
302 u16 prod);
303
304
305
306
307
308
309
310
311
312int qed_eq_completion(struct qed_hwfn *p_hwfn,
313 void *cookie);
314
315
316
317
318
319
320
321
322
323
324int qed_spq_completion(struct qed_hwfn *p_hwfn,
325 __le16 echo,
326 u8 fw_return_code,
327 union event_ring_data *p_data);
328
329
330
331
332
333
334
335
336u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn);
337
338
339
340
341
342
343
344
345
346struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn);
347
348
349
350
351
352
353
354
355void qed_consq_setup(struct qed_hwfn *p_hwfn,
356 struct qed_consq *p_consq);
357
358
359
360
361
362
363
364void qed_consq_free(struct qed_hwfn *p_hwfn,
365 struct qed_consq *p_consq);
366
367
368
369
370
371
372
373#define QED_SP_EQ_COMPLETION 0x01
374#define QED_SP_CQE_COMPLETION 0x02
375
376struct qed_sp_init_data {
377 u32 cid;
378 u16 opaque_fid;
379
380
381 enum spq_mode comp_mode;
382 struct qed_spq_comp_cb *p_comp_data;
383};
384
385int qed_sp_init_request(struct qed_hwfn *p_hwfn,
386 struct qed_spq_entry **pp_ent,
387 u8 cmd,
388 u8 protocol,
389 struct qed_sp_init_data *p_data);
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
411 struct qed_tunn_start_params *p_tunn,
412 enum qed_mf_mode mode, bool allow_npar_tx_switch);
413
414
415
416
417
418
419
420
421
422
423
424
425int qed_sp_pf_update(struct qed_hwfn *p_hwfn);
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
442
443int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
444 struct qed_tunn_update_params *p_tunn,
445 enum spq_mode comp_mode,
446 struct qed_spq_comp_cb *p_comp_data);
447
448
449
450
451
452
453
454
455int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn);
456
457#endif
458