1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include <linux/types.h>
33#include <asm/byteorder.h>
34#include <linux/bitops.h>
35#include <linux/delay.h>
36#include <linux/dma-mapping.h>
37#include <linux/errno.h>
38#include <linux/io.h>
39#include <linux/kernel.h>
40#include <linux/list.h>
41#include <linux/module.h>
42#include <linux/mutex.h>
43#include <linux/pci.h>
44#include <linux/slab.h>
45#include <linux/spinlock.h>
46#include <linux/string.h>
47#include "qed.h"
48#include "qed_cxt.h"
49#include "qed_hsi.h"
50#include "qed_hw.h"
51#include "qed_init_ops.h"
52#include "qed_int.h"
53#include "qed_ll2.h"
54#include "qed_mcp.h"
55#include "qed_reg_addr.h"
56#include <linux/qed/qed_rdma_if.h>
57#include "qed_rdma.h"
58#include "qed_roce.h"
59#include "qed_sp.h"
60
61
62int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
63 struct qed_bmap *bmap, u32 max_count, char *name)
64{
65 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count);
66
67 bmap->max_count = max_count;
68
69 bmap->bitmap = kcalloc(BITS_TO_LONGS(max_count), sizeof(long),
70 GFP_KERNEL);
71 if (!bmap->bitmap)
72 return -ENOMEM;
73
74 snprintf(bmap->name, QED_RDMA_MAX_BMAP_NAME, "%s", name);
75
76 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
77 return 0;
78}
79
80int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
81 struct qed_bmap *bmap, u32 *id_num)
82{
83 *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count);
84 if (*id_num >= bmap->max_count)
85 return -EINVAL;
86
87 __set_bit(*id_num, bmap->bitmap);
88
89 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: allocated id %d\n",
90 bmap->name, *id_num);
91
92 return 0;
93}
94
95void qed_bmap_set_id(struct qed_hwfn *p_hwfn,
96 struct qed_bmap *bmap, u32 id_num)
97{
98 if (id_num >= bmap->max_count)
99 return;
100
101 __set_bit(id_num, bmap->bitmap);
102}
103
104void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
105 struct qed_bmap *bmap, u32 id_num)
106{
107 bool b_acquired;
108
109 if (id_num >= bmap->max_count)
110 return;
111
112 b_acquired = test_and_clear_bit(id_num, bmap->bitmap);
113 if (!b_acquired) {
114 DP_NOTICE(p_hwfn, "%s bitmap: id %d already released\n",
115 bmap->name, id_num);
116 return;
117 }
118
119 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: released id %d\n",
120 bmap->name, id_num);
121}
122
123int qed_bmap_test_id(struct qed_hwfn *p_hwfn,
124 struct qed_bmap *bmap, u32 id_num)
125{
126 if (id_num >= bmap->max_count)
127 return -1;
128
129 return test_bit(id_num, bmap->bitmap);
130}
131
132static bool qed_bmap_is_empty(struct qed_bmap *bmap)
133{
134 return bmap->max_count == find_first_bit(bmap->bitmap, bmap->max_count);
135}
136
137static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
138{
139
140 return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
141}
142
143static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
144 struct qed_ptt *p_ptt,
145 struct qed_rdma_start_in_params *params)
146{
147 struct qed_rdma_info *p_rdma_info;
148 u32 num_cons, num_tasks;
149 int rc = -ENOMEM;
150
151 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
152
153
154 p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL);
155 if (!p_rdma_info)
156 return rc;
157
158 p_hwfn->p_rdma_info = p_rdma_info;
159 if (QED_IS_IWARP_PERSONALITY(p_hwfn))
160 p_rdma_info->proto = PROTOCOLID_IWARP;
161 else
162 p_rdma_info->proto = PROTOCOLID_ROCE;
163
164 num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
165 NULL);
166
167 if (QED_IS_IWARP_PERSONALITY(p_hwfn))
168 p_rdma_info->num_qps = num_cons;
169 else
170 p_rdma_info->num_qps = num_cons / 2;
171
172 num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE);
173
174
175 p_rdma_info->num_mrs = num_tasks;
176
177
178
179
180 p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
181 p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE);
182
183
184 p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
185 if (!p_rdma_info->dev)
186 goto free_rdma_info;
187
188
189 p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL);
190 if (!p_rdma_info->port)
191 goto free_rdma_dev;
192
193
194 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS,
195 "PD");
196 if (rc) {
197 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
198 "Failed to allocate pd_map, rc = %d\n",
199 rc);
200 goto free_rdma_port;
201 }
202
203
204 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
205 p_hwfn->dpi_count, "DPI");
206 if (rc) {
207 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
208 "Failed to allocate DPI bitmap, rc = %d\n", rc);
209 goto free_pd_map;
210 }
211
212
213
214
215
216 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map, num_cons, "CQ");
217 if (rc) {
218 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
219 "Failed to allocate cq bitmap, rc = %d\n", rc);
220 goto free_dpi_map;
221 }
222
223
224
225
226
227 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
228 num_cons, "Toggle");
229 if (rc) {
230 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
231 "Failed to allocate toggle bits, rc = %d\n", rc);
232 goto free_cq_map;
233 }
234
235
236 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
237 p_rdma_info->num_mrs, "MR");
238 if (rc) {
239 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
240 "Failed to allocate itids bitmaps, rc = %d\n", rc);
241 goto free_toggle_map;
242 }
243
244
245 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons,
246 "CID");
247 if (rc) {
248 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
249 "Failed to allocate cid bitmap, rc = %d\n", rc);
250 goto free_tid_map;
251 }
252
253
254 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons,
255 "REAL_CID");
256 if (rc) {
257 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
258 "Failed to allocate real cid bitmap, rc = %d\n", rc);
259 goto free_cid_map;
260 }
261
262
263 p_rdma_info->num_srqs = qed_cxt_get_srq_count(p_hwfn);
264 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->srq_map,
265 p_rdma_info->num_srqs, "SRQ");
266 if (rc) {
267 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
268 "Failed to allocate srq bitmap, rc = %d\n", rc);
269 goto free_real_cid_map;
270 }
271
272 if (QED_IS_IWARP_PERSONALITY(p_hwfn))
273 rc = qed_iwarp_alloc(p_hwfn);
274
275 if (rc)
276 goto free_srq_map;
277
278 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
279 return 0;
280
281free_srq_map:
282 kfree(p_rdma_info->srq_map.bitmap);
283free_real_cid_map:
284 kfree(p_rdma_info->real_cid_map.bitmap);
285free_cid_map:
286 kfree(p_rdma_info->cid_map.bitmap);
287free_tid_map:
288 kfree(p_rdma_info->tid_map.bitmap);
289free_toggle_map:
290 kfree(p_rdma_info->toggle_bits.bitmap);
291free_cq_map:
292 kfree(p_rdma_info->cq_map.bitmap);
293free_dpi_map:
294 kfree(p_rdma_info->dpi_map.bitmap);
295free_pd_map:
296 kfree(p_rdma_info->pd_map.bitmap);
297free_rdma_port:
298 kfree(p_rdma_info->port);
299free_rdma_dev:
300 kfree(p_rdma_info->dev);
301free_rdma_info:
302 kfree(p_rdma_info);
303
304 return rc;
305}
306
307void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn,
308 struct qed_bmap *bmap, bool check)
309{
310 int weight = bitmap_weight(bmap->bitmap, bmap->max_count);
311 int last_line = bmap->max_count / (64 * 8);
312 int last_item = last_line * 8 +
313 DIV_ROUND_UP(bmap->max_count % (64 * 8), 64);
314 u64 *pmap = (u64 *)bmap->bitmap;
315 int line, item, offset;
316 u8 str_last_line[200] = { 0 };
317
318 if (!weight || !check)
319 goto end;
320
321 DP_NOTICE(p_hwfn,
322 "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n",
323 bmap->name, bmap->max_count, weight);
324
325
326 for (item = 0, line = 0; line < last_line; line++, item += 8)
327 if (bitmap_weight((unsigned long *)&pmap[item], 64 * 8))
328 DP_NOTICE(p_hwfn,
329 "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
330 line,
331 pmap[item],
332 pmap[item + 1],
333 pmap[item + 2],
334 pmap[item + 3],
335 pmap[item + 4],
336 pmap[item + 5],
337 pmap[item + 6], pmap[item + 7]);
338
339
340 if ((bmap->max_count % (64 * 8)) &&
341 (bitmap_weight((unsigned long *)&pmap[item],
342 bmap->max_count - item * 64))) {
343 offset = sprintf(str_last_line, "line 0x%04x: ", line);
344 for (; item < last_item; item++)
345 offset += sprintf(str_last_line + offset,
346 "0x%016llx ", pmap[item]);
347 DP_NOTICE(p_hwfn, "%s\n", str_last_line);
348 }
349
350end:
351 kfree(bmap->bitmap);
352 bmap->bitmap = NULL;
353}
354
355static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
356{
357 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
358
359 if (QED_IS_IWARP_PERSONALITY(p_hwfn))
360 qed_iwarp_resc_free(p_hwfn);
361
362 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1);
363 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1);
364 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1);
365 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1);
366 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0);
367 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1);
368 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->srq_map, 1);
369 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, 1);
370
371 kfree(p_rdma_info->port);
372 kfree(p_rdma_info->dev);
373
374 kfree(p_rdma_info);
375}
376
377static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
378{
379 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
380
381 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
382
383 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
384 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
385 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
386}
387
388static void qed_rdma_free_reserved_lkey(struct qed_hwfn *p_hwfn)
389{
390 qed_rdma_free_tid(p_hwfn, p_hwfn->p_rdma_info->dev->reserved_lkey);
391}
392
393static void qed_rdma_free(struct qed_hwfn *p_hwfn)
394{
395 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
396
397 qed_rdma_free_reserved_lkey(p_hwfn);
398 qed_cxt_free_proto_ilt(p_hwfn, p_hwfn->p_rdma_info->proto);
399 qed_rdma_resc_free(p_hwfn);
400}
401
402static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid)
403{
404 guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2;
405 guid[1] = p_hwfn->hw_info.hw_mac_addr[1];
406 guid[2] = p_hwfn->hw_info.hw_mac_addr[2];
407 guid[3] = 0xff;
408 guid[4] = 0xfe;
409 guid[5] = p_hwfn->hw_info.hw_mac_addr[3];
410 guid[6] = p_hwfn->hw_info.hw_mac_addr[4];
411 guid[7] = p_hwfn->hw_info.hw_mac_addr[5];
412}
413
414static void qed_rdma_init_events(struct qed_hwfn *p_hwfn,
415 struct qed_rdma_start_in_params *params)
416{
417 struct qed_rdma_events *events;
418
419 events = &p_hwfn->p_rdma_info->events;
420
421 events->unaffiliated_event = params->events->unaffiliated_event;
422 events->affiliated_event = params->events->affiliated_event;
423 events->context = params->events->context;
424}
425
426static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
427 struct qed_rdma_start_in_params *params)
428{
429 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
430 struct qed_dev *cdev = p_hwfn->cdev;
431 u32 pci_status_control;
432 u32 num_qps;
433
434
435 dev->vendor_id = cdev->vendor_id;
436 dev->vendor_part_id = cdev->device_id;
437 dev->hw_ver = 0;
438 dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
439 (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
440
441 qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid);
442 dev->node_guid = dev->sys_image_guid;
443
444 dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE,
445 RDMA_MAX_SGE_PER_RQ_WQE);
446
447 if (cdev->rdma_max_sge)
448 dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge);
449
450 dev->max_srq_sge = QED_RDMA_MAX_SGE_PER_SRQ_WQE;
451 if (p_hwfn->cdev->rdma_max_srq_sge) {
452 dev->max_srq_sge = min_t(u32,
453 p_hwfn->cdev->rdma_max_srq_sge,
454 dev->max_srq_sge);
455 }
456 dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE;
457
458 dev->max_inline = (cdev->rdma_max_inline) ?
459 min_t(u32, cdev->rdma_max_inline, dev->max_inline) :
460 dev->max_inline;
461
462 dev->max_wqe = QED_RDMA_MAX_WQE;
463 dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ);
464
465
466
467
468
469
470 num_qps = ROCE_MAX_QPS;
471 num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps);
472 dev->max_qp = num_qps;
473
474
475
476
477 dev->max_cq = num_qps * 2;
478
479
480 dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1;
481 dev->max_mr_size = QED_RDMA_MAX_MR_SIZE;
482
483
484
485
486
487
488 if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS)
489 dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT;
490 else
491 dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT;
492
493 dev->max_mw = 0;
494 dev->max_fmr = QED_RDMA_MAX_FMR;
495 dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8);
496 dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE;
497 dev->max_pkey = QED_RDMA_MAX_P_KEY;
498
499 dev->max_srq = p_hwfn->p_rdma_info->num_srqs;
500 dev->max_srq_wr = QED_RDMA_MAX_SRQ_WQE_ELEM;
501 dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
502 (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2);
503 dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
504 RDMA_REQ_RD_ATOMIC_ELM_SIZE;
505 dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc *
506 p_hwfn->p_rdma_info->num_qps;
507 dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS;
508 dev->dev_ack_delay = QED_RDMA_ACK_DELAY;
509 dev->max_pd = RDMA_MAX_PDS;
510 dev->max_ah = p_hwfn->p_rdma_info->num_qps;
511 dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE);
512
513
514 dev->dev_caps = 0;
515 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1);
516 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1);
517 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1);
518 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1);
519 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1);
520 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1);
521 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1);
522 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
523
524
525 pci_read_config_dword(cdev->pdev,
526 cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2,
527 &pci_status_control);
528
529 if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN)
530 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1);
531
532 if (QED_IS_IWARP_PERSONALITY(p_hwfn))
533 qed_iwarp_init_devinfo(p_hwfn);
534}
535
536static void qed_rdma_init_port(struct qed_hwfn *p_hwfn)
537{
538 struct qed_rdma_port *port = p_hwfn->p_rdma_info->port;
539 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
540
541 port->port_state = p_hwfn->mcp_info->link_output.link_up ?
542 QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
543
544 port->max_msg_size = min_t(u64,
545 (dev->max_mr_mw_fmr_size *
546 p_hwfn->cdev->rdma_max_sge),
547 BIT(31));
548
549 port->pkey_bad_counter = 0;
550}
551
552static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
553{
554 int rc = 0;
555
556 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n");
557 p_hwfn->b_rdma_enabled_in_prs = false;
558
559 if (QED_IS_IWARP_PERSONALITY(p_hwfn))
560 qed_iwarp_init_hw(p_hwfn, p_ptt);
561 else
562 rc = qed_roce_init_hw(p_hwfn, p_ptt);
563
564 return rc;
565}
566
567static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
568 struct qed_rdma_start_in_params *params,
569 struct qed_ptt *p_ptt)
570{
571 struct rdma_init_func_ramrod_data *p_ramrod;
572 struct qed_rdma_cnq_params *p_cnq_pbl_list;
573 struct rdma_init_func_hdr *p_params_header;
574 struct rdma_cnq_params *p_cnq_params;
575 struct qed_sp_init_data init_data;
576 struct qed_spq_entry *p_ent;
577 u32 cnq_id, sb_id;
578 u16 igu_sb_id;
579 int rc;
580
581 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n");
582
583
584 p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq;
585
586
587 memset(&init_data, 0, sizeof(init_data));
588 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
589 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
590
591 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT,
592 p_hwfn->p_rdma_info->proto, &init_data);
593 if (rc)
594 return rc;
595
596 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
597 qed_iwarp_init_fw_ramrod(p_hwfn,
598 &p_ent->ramrod.iwarp_init_func);
599 p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma;
600 } else {
601 p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
602 }
603
604 p_params_header = &p_ramrod->params_header;
605 p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn,
606 QED_RDMA_CNQ_RAM);
607 p_params_header->num_cnqs = params->desired_cnq;
608
609 if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS)
610 p_params_header->cq_ring_mode = 1;
611 else
612 p_params_header->cq_ring_mode = 0;
613
614 for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) {
615 sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id);
616 igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
617 p_ramrod->cnq_params[cnq_id].sb_num = cpu_to_le16(igu_sb_id);
618 p_cnq_params = &p_ramrod->cnq_params[cnq_id];
619 p_cnq_pbl_list = ¶ms->cnq_pbl_list[cnq_id];
620
621 p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi;
622 p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages;
623
624 DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr,
625 p_cnq_pbl_list->pbl_ptr);
626
627
628 p_cnq_params->queue_zone_num =
629 cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base +
630 cnq_id);
631 }
632
633 return qed_spq_post(p_hwfn, p_ent, NULL);
634}
635
636static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
637{
638 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
639 int rc;
640
641 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
642
643 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
644 rc = qed_rdma_bmap_alloc_id(p_hwfn,
645 &p_hwfn->p_rdma_info->tid_map, itid);
646 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
647 if (rc)
648 goto out;
649
650 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
651out:
652 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
653 return rc;
654}
655
656static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
657{
658 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
659
660
661
662
663
664 qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey);
665 if (dev->reserved_lkey != RDMA_RESERVED_LKEY) {
666 DP_NOTICE(p_hwfn,
667 "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n");
668 return -EINVAL;
669 }
670
671 return 0;
672}
673
674static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
675 struct qed_ptt *p_ptt,
676 struct qed_rdma_start_in_params *params)
677{
678 int rc;
679
680 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n");
681
682 spin_lock_init(&p_hwfn->p_rdma_info->lock);
683
684 qed_rdma_init_devinfo(p_hwfn, params);
685 qed_rdma_init_port(p_hwfn);
686 qed_rdma_init_events(p_hwfn, params);
687
688 rc = qed_rdma_reserve_lkey(p_hwfn);
689 if (rc)
690 return rc;
691
692 rc = qed_rdma_init_hw(p_hwfn, p_ptt);
693 if (rc)
694 return rc;
695
696 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
697 rc = qed_iwarp_setup(p_hwfn, p_ptt, params);
698 if (rc)
699 return rc;
700 } else {
701 rc = qed_roce_setup(p_hwfn);
702 if (rc)
703 return rc;
704 }
705
706 return qed_rdma_start_fw(p_hwfn, params, p_ptt);
707}
708
709static int qed_rdma_stop(void *rdma_cxt)
710{
711 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
712 struct rdma_close_func_ramrod_data *p_ramrod;
713 struct qed_sp_init_data init_data;
714 struct qed_spq_entry *p_ent;
715 struct qed_ptt *p_ptt;
716 u32 ll2_ethertype_en;
717 int rc = -EBUSY;
718
719 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n");
720
721 p_ptt = qed_ptt_acquire(p_hwfn);
722 if (!p_ptt) {
723 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n");
724 return rc;
725 }
726
727
728 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
729 p_hwfn->b_rdma_enabled_in_prs = false;
730
731 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
732
733 ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
734
735 qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
736 (ll2_ethertype_en & 0xFFFE));
737
738 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
739 rc = qed_iwarp_stop(p_hwfn, p_ptt);
740 if (rc) {
741 qed_ptt_release(p_hwfn, p_ptt);
742 return rc;
743 }
744 } else {
745 qed_roce_stop(p_hwfn);
746 }
747
748 qed_ptt_release(p_hwfn, p_ptt);
749
750
751 memset(&init_data, 0, sizeof(init_data));
752 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
753 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
754
755
756 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE,
757 p_hwfn->p_rdma_info->proto, &init_data);
758 if (rc)
759 goto out;
760
761 p_ramrod = &p_ent->ramrod.rdma_close_func;
762
763 p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs;
764 p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM);
765
766 rc = qed_spq_post(p_hwfn, p_ent, NULL);
767
768out:
769 qed_rdma_free(p_hwfn);
770
771 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc);
772 return rc;
773}
774
775static int qed_rdma_add_user(void *rdma_cxt,
776 struct qed_rdma_add_user_out_params *out_params)
777{
778 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
779 u32 dpi_start_offset;
780 u32 returned_id = 0;
781 int rc;
782
783 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n");
784
785
786 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
787 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map,
788 &returned_id);
789 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
790
791 out_params->dpi = (u16)returned_id;
792
793
794 dpi_start_offset = p_hwfn->dpi_start_offset;
795
796 out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells +
797 dpi_start_offset +
798 ((out_params->dpi) * p_hwfn->dpi_size));
799
800 out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr +
801 dpi_start_offset +
802 ((out_params->dpi) * p_hwfn->dpi_size);
803
804 out_params->dpi_size = p_hwfn->dpi_size;
805 out_params->wid_count = p_hwfn->wid_count;
806
807 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
808 return rc;
809}
810
811static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
812{
813 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
814 struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
815
816 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n");
817
818
819 p_port->port_state = p_hwfn->mcp_info->link_output.link_up ?
820 QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
821
822 p_port->link_speed = p_hwfn->mcp_info->link_output.speed;
823
824 p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE;
825
826 return p_port;
827}
828
829static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
830{
831 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
832
833 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n");
834
835
836 return p_hwfn->p_rdma_info->dev;
837}
838
839static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
840{
841 struct qed_hwfn *p_hwfn;
842 u16 qz_num;
843 u32 addr;
844
845 p_hwfn = (struct qed_hwfn *)rdma_cxt;
846
847 if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) {
848 DP_NOTICE(p_hwfn,
849 "queue zone offset %d is too large (max is %d)\n",
850 qz_offset, p_hwfn->p_rdma_info->max_queue_zones);
851 return;
852 }
853
854 qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
855 addr = GTT_BAR0_MAP_REG_USDM_RAM +
856 USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
857
858 REG_WR16(p_hwfn, addr, prod);
859
860
861 wmb();
862}
863
864static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
865 struct qed_dev_rdma_info *info)
866{
867 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
868
869 memset(info, 0, sizeof(*info));
870
871 info->rdma_type = QED_IS_ROCE_PERSONALITY(p_hwfn) ?
872 QED_RDMA_TYPE_ROCE : QED_RDMA_TYPE_IWARP;
873
874 info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0);
875
876 qed_fill_dev_info(cdev, &info->common);
877
878 return 0;
879}
880
881static int qed_rdma_get_sb_start(struct qed_dev *cdev)
882{
883 int feat_num;
884
885 if (cdev->num_hwfns > 1)
886 feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE);
887 else
888 feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE) *
889 cdev->num_hwfns;
890
891 return feat_num;
892}
893
894static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev)
895{
896 int n_cnq = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_RDMA_CNQ);
897 int n_msix = cdev->int_params.rdma_msix_cnt;
898
899 return min_t(int, n_cnq, n_msix);
900}
901
902static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt)
903{
904 int limit = 0;
905
906
907 cdev->int_params.fp_initialized = cnt ? true : false;
908
909 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) {
910 DP_ERR(cdev,
911 "qed roce supports only MSI-X interrupts (detected %d).\n",
912 cdev->int_params.out.int_mode);
913 return -EINVAL;
914 } else if (cdev->int_params.fp_msix_cnt) {
915 limit = cdev->int_params.rdma_msix_cnt;
916 }
917
918 if (!limit)
919 return -ENOMEM;
920
921 return min_t(int, cnt, limit);
922}
923
924static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
925{
926 memset(info, 0, sizeof(*info));
927
928 if (!cdev->int_params.fp_initialized) {
929 DP_INFO(cdev,
930 "Protocol driver requested interrupt information, but its support is not yet configured\n");
931 return -EINVAL;
932 }
933
934 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
935 int msix_base = cdev->int_params.rdma_msix_base;
936
937 info->msix_cnt = cdev->int_params.rdma_msix_cnt;
938 info->msix = &cdev->int_params.msix_table[msix_base];
939
940 DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n",
941 info->msix_cnt, msix_base);
942 }
943
944 return 0;
945}
946
947static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
948{
949 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
950 u32 returned_id;
951 int rc;
952
953 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n");
954
955
956 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
957 rc = qed_rdma_bmap_alloc_id(p_hwfn,
958 &p_hwfn->p_rdma_info->pd_map, &returned_id);
959 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
960
961 *pd = (u16)returned_id;
962
963 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc);
964 return rc;
965}
966
967static void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
968{
969 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
970
971 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd);
972
973
974 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
975 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd);
976 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
977}
978
979static enum qed_rdma_toggle_bit
980qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
981{
982 struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
983 enum qed_rdma_toggle_bit toggle_bit;
984 u32 bmap_id;
985
986 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid);
987
988
989
990
991 bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto);
992
993 spin_lock_bh(&p_info->lock);
994 toggle_bit = !test_and_change_bit(bmap_id,
995 p_info->toggle_bits.bitmap);
996 spin_unlock_bh(&p_info->lock);
997
998 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n",
999 toggle_bit);
1000
1001 return toggle_bit;
1002}
1003
1004static int qed_rdma_create_cq(void *rdma_cxt,
1005 struct qed_rdma_create_cq_in_params *params,
1006 u16 *icid)
1007{
1008 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1009 struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
1010 struct rdma_create_cq_ramrod_data *p_ramrod;
1011 enum qed_rdma_toggle_bit toggle_bit;
1012 struct qed_sp_init_data init_data;
1013 struct qed_spq_entry *p_ent;
1014 u32 returned_id, start_cid;
1015 int rc;
1016
1017 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n",
1018 params->cq_handle_hi, params->cq_handle_lo);
1019
1020
1021 spin_lock_bh(&p_info->lock);
1022 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id);
1023 spin_unlock_bh(&p_info->lock);
1024
1025 if (rc) {
1026 DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc);
1027 return rc;
1028 }
1029
1030 start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
1031 p_info->proto);
1032 *icid = returned_id + start_cid;
1033
1034
1035 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid);
1036 if (rc)
1037 goto err;
1038
1039
1040 memset(&init_data, 0, sizeof(init_data));
1041 init_data.cid = *icid;
1042 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1043 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1044
1045
1046 rc = qed_sp_init_request(p_hwfn, &p_ent,
1047 RDMA_RAMROD_CREATE_CQ,
1048 p_info->proto, &init_data);
1049 if (rc)
1050 goto err;
1051
1052 p_ramrod = &p_ent->ramrod.rdma_create_cq;
1053
1054 p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi);
1055 p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo);
1056 p_ramrod->dpi = cpu_to_le16(params->dpi);
1057 p_ramrod->is_two_level_pbl = params->pbl_two_level;
1058 p_ramrod->max_cqes = cpu_to_le32(params->cq_size);
1059 DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr);
1060 p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages);
1061 p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) +
1062 params->cnq_id;
1063 p_ramrod->int_timeout = params->int_timeout;
1064
1065
1066 toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
1067
1068 p_ramrod->toggle_bit = toggle_bit;
1069
1070 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1071 if (rc) {
1072
1073 qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
1074 goto err;
1075 }
1076
1077 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc);
1078 return rc;
1079
1080err:
1081
1082 spin_lock_bh(&p_info->lock);
1083 qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id);
1084 spin_unlock_bh(&p_info->lock);
1085 DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc);
1086
1087 return rc;
1088}
1089
1090static int
1091qed_rdma_destroy_cq(void *rdma_cxt,
1092 struct qed_rdma_destroy_cq_in_params *in_params,
1093 struct qed_rdma_destroy_cq_out_params *out_params)
1094{
1095 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1096 struct rdma_destroy_cq_output_params *p_ramrod_res;
1097 struct rdma_destroy_cq_ramrod_data *p_ramrod;
1098 struct qed_sp_init_data init_data;
1099 struct qed_spq_entry *p_ent;
1100 dma_addr_t ramrod_res_phys;
1101 enum protocol_type proto;
1102 int rc = -ENOMEM;
1103
1104 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
1105
1106 p_ramrod_res =
1107 (struct rdma_destroy_cq_output_params *)
1108 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1109 sizeof(struct rdma_destroy_cq_output_params),
1110 &ramrod_res_phys, GFP_KERNEL);
1111 if (!p_ramrod_res) {
1112 DP_NOTICE(p_hwfn,
1113 "qed destroy cq failed: cannot allocate memory (ramrod)\n");
1114 return rc;
1115 }
1116
1117
1118 memset(&init_data, 0, sizeof(init_data));
1119 init_data.cid = in_params->icid;
1120 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1121 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1122 proto = p_hwfn->p_rdma_info->proto;
1123
1124 rc = qed_sp_init_request(p_hwfn, &p_ent,
1125 RDMA_RAMROD_DESTROY_CQ,
1126 proto, &init_data);
1127 if (rc)
1128 goto err;
1129
1130 p_ramrod = &p_ent->ramrod.rdma_destroy_cq;
1131 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1132
1133 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1134 if (rc)
1135 goto err;
1136
1137 out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num);
1138
1139 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1140 sizeof(struct rdma_destroy_cq_output_params),
1141 p_ramrod_res, ramrod_res_phys);
1142
1143
1144 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1145
1146 qed_bmap_release_id(p_hwfn,
1147 &p_hwfn->p_rdma_info->cq_map,
1148 (in_params->icid -
1149 qed_cxt_get_proto_cid_start(p_hwfn, proto)));
1150
1151 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1152
1153 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc);
1154 return rc;
1155
1156err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1157 sizeof(struct rdma_destroy_cq_output_params),
1158 p_ramrod_res, ramrod_res_phys);
1159
1160 return rc;
1161}
1162
1163void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac)
1164{
1165 p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]);
1166 p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]);
1167 p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]);
1168}
1169
1170static int qed_rdma_query_qp(void *rdma_cxt,
1171 struct qed_rdma_qp *qp,
1172 struct qed_rdma_query_qp_out_params *out_params)
1173{
1174 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1175 int rc = 0;
1176
1177 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1178
1179
1180
1181
1182 out_params->mtu = qp->mtu;
1183 out_params->dest_qp = qp->dest_qp;
1184 out_params->incoming_atomic_en = qp->incoming_atomic_en;
1185 out_params->e2e_flow_control_en = qp->e2e_flow_control_en;
1186 out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en;
1187 out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en;
1188 out_params->dgid = qp->dgid;
1189 out_params->flow_label = qp->flow_label;
1190 out_params->hop_limit_ttl = qp->hop_limit_ttl;
1191 out_params->traffic_class_tos = qp->traffic_class_tos;
1192 out_params->timeout = qp->ack_timeout;
1193 out_params->rnr_retry = qp->rnr_retry_cnt;
1194 out_params->retry_cnt = qp->retry_cnt;
1195 out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer;
1196 out_params->pkey_index = 0;
1197 out_params->max_rd_atomic = qp->max_rd_atomic_req;
1198 out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp;
1199 out_params->sqd_async = qp->sqd_async;
1200
1201 if (QED_IS_IWARP_PERSONALITY(p_hwfn))
1202 qed_iwarp_query_qp(qp, out_params);
1203 else
1204 rc = qed_roce_query_qp(p_hwfn, qp, out_params);
1205
1206 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc);
1207 return rc;
1208}
1209
1210static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
1211{
1212 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1213 int rc = 0;
1214
1215 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1216
1217 if (QED_IS_IWARP_PERSONALITY(p_hwfn))
1218 rc = qed_iwarp_destroy_qp(p_hwfn, qp);
1219 else
1220 rc = qed_roce_destroy_qp(p_hwfn, qp);
1221
1222
1223 kfree(qp);
1224
1225 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n");
1226 return rc;
1227}
1228
1229static struct qed_rdma_qp *
1230qed_rdma_create_qp(void *rdma_cxt,
1231 struct qed_rdma_create_qp_in_params *in_params,
1232 struct qed_rdma_create_qp_out_params *out_params)
1233{
1234 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1235 struct qed_rdma_qp *qp;
1236 u8 max_stats_queues;
1237 int rc;
1238
1239 if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) {
1240 DP_ERR(p_hwfn->cdev,
1241 "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
1242 rdma_cxt, in_params, out_params);
1243 return NULL;
1244 }
1245
1246 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1247 "qed rdma create qp called with qp_handle = %08x%08x\n",
1248 in_params->qp_handle_hi, in_params->qp_handle_lo);
1249
1250
1251 max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
1252 if (in_params->stats_queue >= max_stats_queues) {
1253 DP_ERR(p_hwfn->cdev,
1254 "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n",
1255 in_params->stats_queue, max_stats_queues);
1256 return NULL;
1257 }
1258
1259 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
1260 if (in_params->sq_num_pages * sizeof(struct regpair) >
1261 IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE) {
1262 DP_NOTICE(p_hwfn->cdev,
1263 "Sq num pages: %d exceeds maximum\n",
1264 in_params->sq_num_pages);
1265 return NULL;
1266 }
1267 if (in_params->rq_num_pages * sizeof(struct regpair) >
1268 IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE) {
1269 DP_NOTICE(p_hwfn->cdev,
1270 "Rq num pages: %d exceeds maximum\n",
1271 in_params->rq_num_pages);
1272 return NULL;
1273 }
1274 }
1275
1276 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1277 if (!qp)
1278 return NULL;
1279
1280 qp->cur_state = QED_ROCE_QP_STATE_RESET;
1281 qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi);
1282 qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo);
1283 qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi);
1284 qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo);
1285 qp->use_srq = in_params->use_srq;
1286 qp->signal_all = in_params->signal_all;
1287 qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey;
1288 qp->pd = in_params->pd;
1289 qp->dpi = in_params->dpi;
1290 qp->sq_cq_id = in_params->sq_cq_id;
1291 qp->sq_num_pages = in_params->sq_num_pages;
1292 qp->sq_pbl_ptr = in_params->sq_pbl_ptr;
1293 qp->rq_cq_id = in_params->rq_cq_id;
1294 qp->rq_num_pages = in_params->rq_num_pages;
1295 qp->rq_pbl_ptr = in_params->rq_pbl_ptr;
1296 qp->srq_id = in_params->srq_id;
1297 qp->req_offloaded = false;
1298 qp->resp_offloaded = false;
1299 qp->e2e_flow_control_en = qp->use_srq ? false : true;
1300 qp->stats_queue = in_params->stats_queue;
1301
1302 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
1303 rc = qed_iwarp_create_qp(p_hwfn, qp, out_params);
1304 qp->qpid = qp->icid;
1305 } else {
1306 rc = qed_roce_alloc_cid(p_hwfn, &qp->icid);
1307 qp->qpid = ((0xFF << 16) | qp->icid);
1308 }
1309
1310 if (rc) {
1311 kfree(qp);
1312 return NULL;
1313 }
1314
1315 out_params->icid = qp->icid;
1316 out_params->qp_id = qp->qpid;
1317
1318 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc);
1319 return qp;
1320}
1321
1322static int qed_rdma_modify_qp(void *rdma_cxt,
1323 struct qed_rdma_qp *qp,
1324 struct qed_rdma_modify_qp_in_params *params)
1325{
1326 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1327 enum qed_roce_qp_state prev_state;
1328 int rc = 0;
1329
1330 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n",
1331 qp->icid, params->new_state);
1332
1333 if (rc) {
1334 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1335 return rc;
1336 }
1337
1338 if (GET_FIELD(params->modify_flags,
1339 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) {
1340 qp->incoming_rdma_read_en = params->incoming_rdma_read_en;
1341 qp->incoming_rdma_write_en = params->incoming_rdma_write_en;
1342 qp->incoming_atomic_en = params->incoming_atomic_en;
1343 }
1344
1345
1346 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE))
1347 qp->roce_mode = params->roce_mode;
1348 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY))
1349 qp->pkey = params->pkey;
1350 if (GET_FIELD(params->modify_flags,
1351 QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN))
1352 qp->e2e_flow_control_en = params->e2e_flow_control_en;
1353 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP))
1354 qp->dest_qp = params->dest_qp;
1355 if (GET_FIELD(params->modify_flags,
1356 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) {
1357
1358
1359
1360
1361 qp->traffic_class_tos = params->traffic_class_tos;
1362 qp->flow_label = params->flow_label;
1363 qp->hop_limit_ttl = params->hop_limit_ttl;
1364
1365 qp->sgid = params->sgid;
1366 qp->dgid = params->dgid;
1367 qp->udp_src_port = 0;
1368 qp->vlan_id = params->vlan_id;
1369 qp->mtu = params->mtu;
1370 qp->lb_indication = params->lb_indication;
1371 memcpy((u8 *)&qp->remote_mac_addr[0],
1372 (u8 *)¶ms->remote_mac_addr[0], ETH_ALEN);
1373 if (params->use_local_mac) {
1374 memcpy((u8 *)&qp->local_mac_addr[0],
1375 (u8 *)¶ms->local_mac_addr[0], ETH_ALEN);
1376 } else {
1377 memcpy((u8 *)&qp->local_mac_addr[0],
1378 (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
1379 }
1380 }
1381 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN))
1382 qp->rq_psn = params->rq_psn;
1383 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN))
1384 qp->sq_psn = params->sq_psn;
1385 if (GET_FIELD(params->modify_flags,
1386 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ))
1387 qp->max_rd_atomic_req = params->max_rd_atomic_req;
1388 if (GET_FIELD(params->modify_flags,
1389 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP))
1390 qp->max_rd_atomic_resp = params->max_rd_atomic_resp;
1391 if (GET_FIELD(params->modify_flags,
1392 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT))
1393 qp->ack_timeout = params->ack_timeout;
1394 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT))
1395 qp->retry_cnt = params->retry_cnt;
1396 if (GET_FIELD(params->modify_flags,
1397 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT))
1398 qp->rnr_retry_cnt = params->rnr_retry_cnt;
1399 if (GET_FIELD(params->modify_flags,
1400 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER))
1401 qp->min_rnr_nak_timer = params->min_rnr_nak_timer;
1402
1403 qp->sqd_async = params->sqd_async;
1404
1405 prev_state = qp->cur_state;
1406 if (GET_FIELD(params->modify_flags,
1407 QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) {
1408 qp->cur_state = params->new_state;
1409 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n",
1410 qp->cur_state);
1411 }
1412
1413 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
1414 enum qed_iwarp_qp_state new_state =
1415 qed_roce2iwarp_state(qp->cur_state);
1416
1417 rc = qed_iwarp_modify_qp(p_hwfn, qp, new_state, 0);
1418 } else {
1419 rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params);
1420 }
1421
1422 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc);
1423 return rc;
1424}
1425
1426static int
1427qed_rdma_register_tid(void *rdma_cxt,
1428 struct qed_rdma_register_tid_in_params *params)
1429{
1430 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1431 struct rdma_register_tid_ramrod_data *p_ramrod;
1432 struct qed_sp_init_data init_data;
1433 struct qed_spq_entry *p_ent;
1434 enum rdma_tid_type tid_type;
1435 u8 fw_return_code;
1436 int rc;
1437
1438 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid);
1439
1440
1441 memset(&init_data, 0, sizeof(init_data));
1442 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1443 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1444
1445 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR,
1446 p_hwfn->p_rdma_info->proto, &init_data);
1447 if (rc) {
1448 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1449 return rc;
1450 }
1451
1452 if (p_hwfn->p_rdma_info->last_tid < params->itid)
1453 p_hwfn->p_rdma_info->last_tid = params->itid;
1454
1455 p_ramrod = &p_ent->ramrod.rdma_register_tid;
1456
1457 p_ramrod->flags = 0;
1458 SET_FIELD(p_ramrod->flags,
1459 RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL,
1460 params->pbl_two_level);
1461
1462 SET_FIELD(p_ramrod->flags,
1463 RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, params->zbva);
1464
1465 SET_FIELD(p_ramrod->flags,
1466 RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr);
1467
1468
1469 if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr))
1470 SET_FIELD(p_ramrod->flags,
1471 RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG,
1472 params->page_size_log - 12);
1473
1474 SET_FIELD(p_ramrod->flags,
1475 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ,
1476 params->remote_read);
1477
1478 SET_FIELD(p_ramrod->flags,
1479 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE,
1480 params->remote_write);
1481
1482 SET_FIELD(p_ramrod->flags,
1483 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC,
1484 params->remote_atomic);
1485
1486 SET_FIELD(p_ramrod->flags,
1487 RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE,
1488 params->local_write);
1489
1490 SET_FIELD(p_ramrod->flags,
1491 RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read);
1492
1493 SET_FIELD(p_ramrod->flags,
1494 RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND,
1495 params->mw_bind);
1496
1497 SET_FIELD(p_ramrod->flags1,
1498 RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG,
1499 params->pbl_page_size_log - 12);
1500
1501 SET_FIELD(p_ramrod->flags2,
1502 RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr);
1503
1504 switch (params->tid_type) {
1505 case QED_RDMA_TID_REGISTERED_MR:
1506 tid_type = RDMA_TID_REGISTERED_MR;
1507 break;
1508 case QED_RDMA_TID_FMR:
1509 tid_type = RDMA_TID_FMR;
1510 break;
1511 case QED_RDMA_TID_MW:
1512 tid_type = RDMA_TID_MW;
1513 break;
1514 default:
1515 rc = -EINVAL;
1516 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1517 return rc;
1518 }
1519 SET_FIELD(p_ramrod->flags1,
1520 RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type);
1521
1522 p_ramrod->itid = cpu_to_le32(params->itid);
1523 p_ramrod->key = params->key;
1524 p_ramrod->pd = cpu_to_le16(params->pd);
1525 p_ramrod->length_hi = (u8)(params->length >> 32);
1526 p_ramrod->length_lo = DMA_LO_LE(params->length);
1527 if (params->zbva) {
1528
1529
1530
1531 p_ramrod->va.hi = 0;
1532 p_ramrod->va.lo = cpu_to_le32(params->fbo);
1533 } else {
1534 DMA_REGPAIR_LE(p_ramrod->va, params->vaddr);
1535 }
1536 DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr);
1537
1538
1539 if (params->dif_enabled) {
1540 SET_FIELD(p_ramrod->flags2,
1541 RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
1542 DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
1543 params->dif_error_addr);
1544 }
1545
1546 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
1547 if (rc)
1548 return rc;
1549
1550 if (fw_return_code != RDMA_RETURN_OK) {
1551 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
1552 return -EINVAL;
1553 }
1554
1555 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc);
1556 return rc;
1557}
1558
1559static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
1560{
1561 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1562 struct rdma_deregister_tid_ramrod_data *p_ramrod;
1563 struct qed_sp_init_data init_data;
1564 struct qed_spq_entry *p_ent;
1565 struct qed_ptt *p_ptt;
1566 u8 fw_return_code;
1567 int rc;
1568
1569 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
1570
1571
1572 memset(&init_data, 0, sizeof(init_data));
1573 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1574 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1575
1576 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR,
1577 p_hwfn->p_rdma_info->proto, &init_data);
1578 if (rc) {
1579 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1580 return rc;
1581 }
1582
1583 p_ramrod = &p_ent->ramrod.rdma_deregister_tid;
1584 p_ramrod->itid = cpu_to_le32(itid);
1585
1586 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
1587 if (rc) {
1588 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1589 return rc;
1590 }
1591
1592 if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) {
1593 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
1594 return -EINVAL;
1595 } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) {
1596
1597
1598
1599 p_ptt = qed_ptt_acquire(p_hwfn);
1600 if (!p_ptt) {
1601 rc = -EBUSY;
1602 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1603 "Failed to acquire PTT\n");
1604 return rc;
1605 }
1606
1607 rc = qed_mcp_drain(p_hwfn, p_ptt);
1608 if (rc) {
1609 qed_ptt_release(p_hwfn, p_ptt);
1610 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1611 "Drain failed\n");
1612 return rc;
1613 }
1614
1615 qed_ptt_release(p_hwfn, p_ptt);
1616
1617
1618 rc = qed_sp_init_request(p_hwfn, &p_ent,
1619 RDMA_RAMROD_DEREGISTER_MR,
1620 p_hwfn->p_rdma_info->proto,
1621 &init_data);
1622 if (rc) {
1623 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1624 "Failed to init sp-element\n");
1625 return rc;
1626 }
1627
1628 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
1629 if (rc) {
1630 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1631 "Ramrod failed\n");
1632 return rc;
1633 }
1634
1635 if (fw_return_code != RDMA_RETURN_OK) {
1636 DP_NOTICE(p_hwfn, "fw_return_code = %d\n",
1637 fw_return_code);
1638 return rc;
1639 }
1640 }
1641
1642 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc);
1643 return rc;
1644}
1645
1646static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
1647{
1648 return QED_LEADING_HWFN(cdev);
1649}
1650
1651static int qed_rdma_modify_srq(void *rdma_cxt,
1652 struct qed_rdma_modify_srq_in_params *in_params)
1653{
1654 struct rdma_srq_modify_ramrod_data *p_ramrod;
1655 struct qed_sp_init_data init_data = {};
1656 struct qed_hwfn *p_hwfn = rdma_cxt;
1657 struct qed_spq_entry *p_ent;
1658 u16 opaque_fid;
1659 int rc;
1660
1661 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1662 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1663
1664 rc = qed_sp_init_request(p_hwfn, &p_ent,
1665 RDMA_RAMROD_MODIFY_SRQ,
1666 p_hwfn->p_rdma_info->proto, &init_data);
1667 if (rc)
1668 return rc;
1669
1670 p_ramrod = &p_ent->ramrod.rdma_modify_srq;
1671 p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id);
1672 opaque_fid = p_hwfn->hw_info.opaque_fid;
1673 p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid);
1674 p_ramrod->wqe_limit = cpu_to_le32(in_params->wqe_limit);
1675
1676 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1677 if (rc)
1678 return rc;
1679
1680 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "modified SRQ id = %x",
1681 in_params->srq_id);
1682
1683 return rc;
1684}
1685
1686static int
1687qed_rdma_destroy_srq(void *rdma_cxt,
1688 struct qed_rdma_destroy_srq_in_params *in_params)
1689{
1690 struct rdma_srq_destroy_ramrod_data *p_ramrod;
1691 struct qed_sp_init_data init_data = {};
1692 struct qed_hwfn *p_hwfn = rdma_cxt;
1693 struct qed_spq_entry *p_ent;
1694 struct qed_bmap *bmap;
1695 u16 opaque_fid;
1696 int rc;
1697
1698 opaque_fid = p_hwfn->hw_info.opaque_fid;
1699
1700 init_data.opaque_fid = opaque_fid;
1701 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1702
1703 rc = qed_sp_init_request(p_hwfn, &p_ent,
1704 RDMA_RAMROD_DESTROY_SRQ,
1705 p_hwfn->p_rdma_info->proto, &init_data);
1706 if (rc)
1707 return rc;
1708
1709 p_ramrod = &p_ent->ramrod.rdma_destroy_srq;
1710 p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id);
1711 p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid);
1712
1713 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1714 if (rc)
1715 return rc;
1716
1717 bmap = &p_hwfn->p_rdma_info->srq_map;
1718
1719 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1720 qed_bmap_release_id(p_hwfn, bmap, in_params->srq_id);
1721 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1722
1723 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "SRQ destroyed Id = %x",
1724 in_params->srq_id);
1725
1726 return rc;
1727}
1728
1729static int
1730qed_rdma_create_srq(void *rdma_cxt,
1731 struct qed_rdma_create_srq_in_params *in_params,
1732 struct qed_rdma_create_srq_out_params *out_params)
1733{
1734 struct rdma_srq_create_ramrod_data *p_ramrod;
1735 struct qed_sp_init_data init_data = {};
1736 struct qed_hwfn *p_hwfn = rdma_cxt;
1737 enum qed_cxt_elem_type elem_type;
1738 struct qed_spq_entry *p_ent;
1739 u16 opaque_fid, srq_id;
1740 struct qed_bmap *bmap;
1741 u32 returned_id;
1742 int rc;
1743
1744 bmap = &p_hwfn->p_rdma_info->srq_map;
1745 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1746 rc = qed_rdma_bmap_alloc_id(p_hwfn, bmap, &returned_id);
1747 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1748
1749 if (rc) {
1750 DP_NOTICE(p_hwfn, "failed to allocate srq id\n");
1751 return rc;
1752 }
1753
1754 elem_type = QED_ELEM_SRQ;
1755 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, elem_type, returned_id);
1756 if (rc)
1757 goto err;
1758
1759 srq_id = (u16)returned_id;
1760 opaque_fid = p_hwfn->hw_info.opaque_fid;
1761
1762 opaque_fid = p_hwfn->hw_info.opaque_fid;
1763 init_data.opaque_fid = opaque_fid;
1764 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1765
1766 rc = qed_sp_init_request(p_hwfn, &p_ent,
1767 RDMA_RAMROD_CREATE_SRQ,
1768 p_hwfn->p_rdma_info->proto, &init_data);
1769 if (rc)
1770 goto err;
1771
1772 p_ramrod = &p_ent->ramrod.rdma_create_srq;
1773 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, in_params->pbl_base_addr);
1774 p_ramrod->pages_in_srq_pbl = cpu_to_le16(in_params->num_pages);
1775 p_ramrod->pd_id = cpu_to_le16(in_params->pd_id);
1776 p_ramrod->srq_id.srq_idx = cpu_to_le16(srq_id);
1777 p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid);
1778 p_ramrod->page_size = cpu_to_le16(in_params->page_size);
1779 DMA_REGPAIR_LE(p_ramrod->producers_addr, in_params->prod_pair_addr);
1780
1781 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1782 if (rc)
1783 goto err;
1784
1785 out_params->srq_id = srq_id;
1786
1787 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1788 "SRQ created Id = %x\n", out_params->srq_id);
1789
1790 return rc;
1791
1792err:
1793 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1794 qed_bmap_release_id(p_hwfn, bmap, returned_id);
1795 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1796
1797 return rc;
1798}
1799
1800bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn)
1801{
1802 bool result;
1803
1804
1805 if (!p_hwfn->p_rdma_info)
1806 return false;
1807
1808 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1809 if (!p_hwfn->p_rdma_info->cid_map.bitmap)
1810 result = false;
1811 else
1812 result = !qed_bmap_is_empty(&p_hwfn->p_rdma_info->cid_map);
1813 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1814 return result;
1815}
1816
1817void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1818{
1819 u32 val;
1820
1821 val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1;
1822
1823 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val);
1824 DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA),
1825 "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n",
1826 val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm);
1827}
1828
1829
1830void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1831{
1832 p_hwfn->db_bar_no_edpm = true;
1833
1834 qed_rdma_dpm_conf(p_hwfn, p_ptt);
1835}
1836
1837static int qed_rdma_start(void *rdma_cxt,
1838 struct qed_rdma_start_in_params *params)
1839{
1840 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1841 struct qed_ptt *p_ptt;
1842 int rc = -EBUSY;
1843
1844 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1845 "desired_cnq = %08x\n", params->desired_cnq);
1846
1847 p_ptt = qed_ptt_acquire(p_hwfn);
1848 if (!p_ptt)
1849 goto err;
1850
1851 rc = qed_rdma_alloc(p_hwfn, p_ptt, params);
1852 if (rc)
1853 goto err1;
1854
1855 rc = qed_rdma_setup(p_hwfn, p_ptt, params);
1856 if (rc)
1857 goto err2;
1858
1859 qed_ptt_release(p_hwfn, p_ptt);
1860
1861 return rc;
1862
1863err2:
1864 qed_rdma_free(p_hwfn);
1865err1:
1866 qed_ptt_release(p_hwfn, p_ptt);
1867err:
1868 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc);
1869 return rc;
1870}
1871
1872static int qed_rdma_init(struct qed_dev *cdev,
1873 struct qed_rdma_start_in_params *params)
1874{
1875 return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
1876}
1877
1878static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
1879{
1880 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1881
1882 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi);
1883
1884 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1885 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi);
1886 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1887}
1888
1889static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
1890 u8 *old_mac_address,
1891 u8 *new_mac_address)
1892{
1893 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
1894 struct qed_ptt *p_ptt;
1895 int rc = 0;
1896
1897 p_ptt = qed_ptt_acquire(p_hwfn);
1898 if (!p_ptt) {
1899 DP_ERR(cdev,
1900 "qed roce ll2 mac filter set: failed to acquire PTT\n");
1901 return -EINVAL;
1902 }
1903
1904 if (old_mac_address)
1905 qed_llh_remove_mac_filter(p_hwfn, p_ptt, old_mac_address);
1906 if (new_mac_address)
1907 rc = qed_llh_add_mac_filter(p_hwfn, p_ptt, new_mac_address);
1908
1909 qed_ptt_release(p_hwfn, p_ptt);
1910
1911 if (rc)
1912 DP_ERR(cdev,
1913 "qed roce ll2 mac filter set: failed to add MAC filter\n");
1914
1915 return rc;
1916}
1917
1918static const struct qed_rdma_ops qed_rdma_ops_pass = {
1919 .common = &qed_common_ops_pass,
1920 .fill_dev_info = &qed_fill_rdma_dev_info,
1921 .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx,
1922 .rdma_init = &qed_rdma_init,
1923 .rdma_add_user = &qed_rdma_add_user,
1924 .rdma_remove_user = &qed_rdma_remove_user,
1925 .rdma_stop = &qed_rdma_stop,
1926 .rdma_query_port = &qed_rdma_query_port,
1927 .rdma_query_device = &qed_rdma_query_device,
1928 .rdma_get_start_sb = &qed_rdma_get_sb_start,
1929 .rdma_get_rdma_int = &qed_rdma_get_int,
1930 .rdma_set_rdma_int = &qed_rdma_set_int,
1931 .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix,
1932 .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update,
1933 .rdma_alloc_pd = &qed_rdma_alloc_pd,
1934 .rdma_dealloc_pd = &qed_rdma_free_pd,
1935 .rdma_create_cq = &qed_rdma_create_cq,
1936 .rdma_destroy_cq = &qed_rdma_destroy_cq,
1937 .rdma_create_qp = &qed_rdma_create_qp,
1938 .rdma_modify_qp = &qed_rdma_modify_qp,
1939 .rdma_query_qp = &qed_rdma_query_qp,
1940 .rdma_destroy_qp = &qed_rdma_destroy_qp,
1941 .rdma_alloc_tid = &qed_rdma_alloc_tid,
1942 .rdma_free_tid = &qed_rdma_free_tid,
1943 .rdma_register_tid = &qed_rdma_register_tid,
1944 .rdma_deregister_tid = &qed_rdma_deregister_tid,
1945 .rdma_create_srq = &qed_rdma_create_srq,
1946 .rdma_modify_srq = &qed_rdma_modify_srq,
1947 .rdma_destroy_srq = &qed_rdma_destroy_srq,
1948 .ll2_acquire_connection = &qed_ll2_acquire_connection,
1949 .ll2_establish_connection = &qed_ll2_establish_connection,
1950 .ll2_terminate_connection = &qed_ll2_terminate_connection,
1951 .ll2_release_connection = &qed_ll2_release_connection,
1952 .ll2_post_rx_buffer = &qed_ll2_post_rx_buffer,
1953 .ll2_prepare_tx_packet = &qed_ll2_prepare_tx_packet,
1954 .ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet,
1955 .ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
1956 .ll2_get_stats = &qed_ll2_get_stats,
1957 .iwarp_connect = &qed_iwarp_connect,
1958 .iwarp_create_listen = &qed_iwarp_create_listen,
1959 .iwarp_destroy_listen = &qed_iwarp_destroy_listen,
1960 .iwarp_accept = &qed_iwarp_accept,
1961 .iwarp_reject = &qed_iwarp_reject,
1962 .iwarp_send_rtr = &qed_iwarp_send_rtr,
1963};
1964
1965const struct qed_rdma_ops *qed_get_rdma_ops(void)
1966{
1967 return &qed_rdma_ops_pass;
1968}
1969EXPORT_SYMBOL(qed_get_rdma_ops);
1970