1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#define dev_fmt(fmt) "QPLIB: " fmt
40
41#include <linux/spinlock.h>
42#include <linux/pci.h>
43#include <linux/interrupt.h>
44#include <linux/inetdevice.h>
45#include <linux/dma-mapping.h>
46#include <linux/if_vlan.h>
47#include <linux/vmalloc.h>
48#include "roce_hsi.h"
49#include "qplib_res.h"
50#include "qplib_sp.h"
51#include "qplib_rcfw.h"
52
53static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
54 struct bnxt_qplib_stats *stats);
55static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
56 struct bnxt_qplib_stats *stats);
57
58
59static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl,
60 bool is_umem)
61{
62 struct pci_dev *pdev = res->pdev;
63 int i;
64
65 if (!is_umem) {
66 for (i = 0; i < pbl->pg_count; i++) {
67 if (pbl->pg_arr[i])
68 dma_free_coherent(&pdev->dev, pbl->pg_size,
69 (void *)((unsigned long)
70 pbl->pg_arr[i] &
71 PAGE_MASK),
72 pbl->pg_map_arr[i]);
73 else
74 dev_warn(&pdev->dev,
75 "PBL free pg_arr[%d] empty?!\n", i);
76 pbl->pg_arr[i] = NULL;
77 }
78 }
79 vfree(pbl->pg_arr);
80 pbl->pg_arr = NULL;
81 vfree(pbl->pg_map_arr);
82 pbl->pg_map_arr = NULL;
83 pbl->pg_count = 0;
84 pbl->pg_size = 0;
85}
86
87static void bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl *pbl,
88 struct bnxt_qplib_sg_info *sginfo)
89{
90 struct scatterlist *sghead = sginfo->sghead;
91 struct sg_dma_page_iter sg_iter;
92 int i = 0;
93
94 for_each_sg_dma_page(sghead, &sg_iter, sginfo->nmap, 0) {
95 pbl->pg_map_arr[i] = sg_page_iter_dma_address(&sg_iter);
96 pbl->pg_arr[i] = NULL;
97 pbl->pg_count++;
98 i++;
99 }
100}
101
102static int __alloc_pbl(struct bnxt_qplib_res *res,
103 struct bnxt_qplib_pbl *pbl,
104 struct bnxt_qplib_sg_info *sginfo)
105{
106 struct pci_dev *pdev = res->pdev;
107 struct scatterlist *sghead;
108 bool is_umem = false;
109 u32 pages;
110 int i;
111
112 if (sginfo->nopte)
113 return 0;
114 pages = sginfo->npages;
115 sghead = sginfo->sghead;
116
117 pbl->pg_arr = vmalloc(pages * sizeof(void *));
118 if (!pbl->pg_arr)
119 return -ENOMEM;
120
121 pbl->pg_map_arr = vmalloc(pages * sizeof(dma_addr_t));
122 if (!pbl->pg_map_arr) {
123 vfree(pbl->pg_arr);
124 pbl->pg_arr = NULL;
125 return -ENOMEM;
126 }
127 pbl->pg_count = 0;
128 pbl->pg_size = sginfo->pgsize;
129
130 if (!sghead) {
131 for (i = 0; i < pages; i++) {
132 pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
133 pbl->pg_size,
134 &pbl->pg_map_arr[i],
135 GFP_KERNEL);
136 if (!pbl->pg_arr[i])
137 goto fail;
138 pbl->pg_count++;
139 }
140 } else {
141 is_umem = true;
142 bnxt_qplib_fill_user_dma_pages(pbl, sginfo);
143 }
144
145 return 0;
146fail:
147 __free_pbl(res, pbl, is_umem);
148 return -ENOMEM;
149}
150
151
152void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res,
153 struct bnxt_qplib_hwq *hwq)
154{
155 int i;
156
157 if (!hwq->max_elements)
158 return;
159 if (hwq->level >= PBL_LVL_MAX)
160 return;
161
162 for (i = 0; i < hwq->level + 1; i++) {
163 if (i == hwq->level)
164 __free_pbl(res, &hwq->pbl[i], hwq->is_user);
165 else
166 __free_pbl(res, &hwq->pbl[i], false);
167 }
168
169 hwq->level = PBL_LVL_MAX;
170 hwq->max_elements = 0;
171 hwq->element_size = 0;
172 hwq->prod = 0;
173 hwq->cons = 0;
174 hwq->cp_bit = 0;
175}
176
177
178
179int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
180 struct bnxt_qplib_hwq_attr *hwq_attr)
181{
182 u32 npages, aux_slots, pg_size, aux_pages = 0, aux_size = 0;
183 struct bnxt_qplib_sg_info sginfo = {};
184 u32 depth, stride, npbl, npde;
185 dma_addr_t *src_phys_ptr, **dst_virt_ptr;
186 struct scatterlist *sghead = NULL;
187 struct bnxt_qplib_res *res;
188 struct pci_dev *pdev;
189 int i, rc, lvl;
190
191 res = hwq_attr->res;
192 pdev = res->pdev;
193 sghead = hwq_attr->sginfo->sghead;
194 pg_size = hwq_attr->sginfo->pgsize;
195 hwq->level = PBL_LVL_MAX;
196
197 depth = roundup_pow_of_two(hwq_attr->depth);
198 stride = roundup_pow_of_two(hwq_attr->stride);
199 if (hwq_attr->aux_depth) {
200 aux_slots = hwq_attr->aux_depth;
201 aux_size = roundup_pow_of_two(hwq_attr->aux_stride);
202 aux_pages = (aux_slots * aux_size) / pg_size;
203 if ((aux_slots * aux_size) % pg_size)
204 aux_pages++;
205 }
206
207 if (!sghead) {
208 hwq->is_user = false;
209 npages = (depth * stride) / pg_size + aux_pages;
210 if ((depth * stride) % pg_size)
211 npages++;
212 if (!npages)
213 return -EINVAL;
214 hwq_attr->sginfo->npages = npages;
215 } else {
216 hwq->is_user = true;
217 npages = hwq_attr->sginfo->npages;
218 npages = (npages * PAGE_SIZE) /
219 BIT_ULL(hwq_attr->sginfo->pgshft);
220 if ((hwq_attr->sginfo->npages * PAGE_SIZE) %
221 BIT_ULL(hwq_attr->sginfo->pgshft))
222 if (!npages)
223 npages++;
224 }
225
226 if (npages == MAX_PBL_LVL_0_PGS) {
227
228 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], hwq_attr->sginfo);
229 if (rc)
230 goto fail;
231 hwq->level = PBL_LVL_0;
232 }
233
234 if (npages > MAX_PBL_LVL_0_PGS) {
235 if (npages > MAX_PBL_LVL_1_PGS) {
236 u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
237 0 : PTU_PTE_VALID;
238
239 npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
240 if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
241 npbl++;
242 npde = npbl >> MAX_PDL_LVL_SHIFT;
243 if (npbl % BIT(MAX_PDL_LVL_SHIFT))
244 npde++;
245
246 sginfo.pgsize = npde * pg_size;
247 sginfo.npages = 1;
248 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
249
250
251 sginfo.npages = npbl;
252 sginfo.pgsize = PAGE_SIZE;
253 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1], &sginfo);
254 if (rc)
255 goto fail;
256
257 dst_virt_ptr =
258 (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
259 src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
260 if (hwq_attr->type == HWQ_TYPE_MR) {
261
262
263
264
265 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
266 i++)
267 dst_virt_ptr[0][i] = src_phys_ptr[i] |
268 flag;
269 } else {
270 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
271 i++)
272 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
273 src_phys_ptr[i] |
274 PTU_PDE_VALID;
275 }
276
277 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2],
278 hwq_attr->sginfo);
279 if (rc)
280 goto fail;
281 hwq->level = PBL_LVL_2;
282 if (hwq_attr->sginfo->nopte)
283 goto done;
284
285 dst_virt_ptr =
286 (dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr;
287 src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr;
288 for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) {
289 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
290 src_phys_ptr[i] | PTU_PTE_VALID;
291 }
292 if (hwq_attr->type == HWQ_TYPE_QUEUE) {
293
294 i = hwq->pbl[PBL_LVL_2].pg_count;
295 dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
296 PTU_PTE_LAST;
297 if (i > 1)
298 dst_virt_ptr[PTR_PG(i - 2)]
299 [PTR_IDX(i - 2)] |=
300 PTU_PTE_NEXT_TO_LAST;
301 }
302 } else {
303 u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
304 0 : PTU_PTE_VALID;
305
306
307 npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
308 if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
309 npbl++;
310 sginfo.npages = npbl;
311 sginfo.pgsize = PAGE_SIZE;
312
313 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
314 if (rc)
315 goto fail;
316
317 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1],
318 hwq_attr->sginfo);
319 if (rc)
320 goto fail;
321 hwq->level = PBL_LVL_1;
322 if (hwq_attr->sginfo->nopte)
323 goto done;
324
325 dst_virt_ptr =
326 (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
327 src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
328 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
329 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
330 src_phys_ptr[i] | flag;
331 if (hwq_attr->type == HWQ_TYPE_QUEUE) {
332
333 i = hwq->pbl[PBL_LVL_1].pg_count;
334 dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
335 PTU_PTE_LAST;
336 if (i > 1)
337 dst_virt_ptr[PTR_PG(i - 2)]
338 [PTR_IDX(i - 2)] |=
339 PTU_PTE_NEXT_TO_LAST;
340 }
341 }
342 }
343done:
344 hwq->prod = 0;
345 hwq->cons = 0;
346 hwq->pdev = pdev;
347 hwq->depth = hwq_attr->depth;
348 hwq->max_elements = depth;
349 hwq->element_size = stride;
350 hwq->qe_ppg = pg_size / stride;
351
352 lvl = hwq->level;
353 if (hwq_attr->sginfo->nopte && hwq->level)
354 lvl = hwq->level - 1;
355 hwq->pbl_ptr = hwq->pbl[lvl].pg_arr;
356 hwq->pbl_dma_ptr = hwq->pbl[lvl].pg_map_arr;
357 spin_lock_init(&hwq->lock);
358
359 return 0;
360fail:
361 bnxt_qplib_free_hwq(res, hwq);
362 return -ENOMEM;
363}
364
365
366void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
367 struct bnxt_qplib_ctx *ctx)
368{
369 int i;
370
371 bnxt_qplib_free_hwq(res, &ctx->qpc_tbl);
372 bnxt_qplib_free_hwq(res, &ctx->mrw_tbl);
373 bnxt_qplib_free_hwq(res, &ctx->srqc_tbl);
374 bnxt_qplib_free_hwq(res, &ctx->cq_tbl);
375 bnxt_qplib_free_hwq(res, &ctx->tim_tbl);
376 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
377 bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.qtbl[i]);
378
379 ctx->tqm_ctx.pde.level = ctx->tqm_ctx.pde_level;
380 bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.pde);
381 bnxt_qplib_free_stats_ctx(res->pdev, &ctx->stats);
382}
383
384static int bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res *res,
385 struct bnxt_qplib_ctx *ctx)
386{
387 struct bnxt_qplib_hwq_attr hwq_attr = {};
388 struct bnxt_qplib_sg_info sginfo = {};
389 struct bnxt_qplib_tqm_ctx *tqmctx;
390 int rc = 0;
391 int i;
392
393 tqmctx = &ctx->tqm_ctx;
394
395 sginfo.pgsize = PAGE_SIZE;
396 sginfo.pgshft = PAGE_SHIFT;
397 hwq_attr.sginfo = &sginfo;
398 hwq_attr.res = res;
399 hwq_attr.type = HWQ_TYPE_CTX;
400 hwq_attr.depth = 512;
401 hwq_attr.stride = sizeof(u64);
402
403 rc = bnxt_qplib_alloc_init_hwq(&tqmctx->pde, &hwq_attr);
404 if (rc)
405 goto out;
406
407 tqmctx->pde_level = tqmctx->pde.level;
408
409 hwq_attr.stride = 1;
410 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
411 if (!tqmctx->qcount[i])
412 continue;
413 hwq_attr.depth = ctx->qpc_count * tqmctx->qcount[i];
414 rc = bnxt_qplib_alloc_init_hwq(&tqmctx->qtbl[i], &hwq_attr);
415 if (rc)
416 goto out;
417 }
418out:
419 return rc;
420}
421
422static void bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx *ctx)
423{
424 struct bnxt_qplib_hwq *tbl;
425 dma_addr_t *dma_ptr;
426 __le64 **pbl_ptr, *ptr;
427 int i, j, k;
428 int fnz_idx = -1;
429 int pg_count;
430
431 pbl_ptr = (__le64 **)ctx->pde.pbl_ptr;
432
433 for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
434 i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
435 tbl = &ctx->qtbl[i];
436 if (!tbl->max_elements)
437 continue;
438 if (fnz_idx == -1)
439 fnz_idx = i;
440 switch (tbl->level) {
441 case PBL_LVL_2:
442 pg_count = tbl->pbl[PBL_LVL_1].pg_count;
443 for (k = 0; k < pg_count; k++) {
444 ptr = &pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)];
445 dma_ptr = &tbl->pbl[PBL_LVL_1].pg_map_arr[k];
446 *ptr = cpu_to_le64(*dma_ptr | PTU_PTE_VALID);
447 }
448 break;
449 case PBL_LVL_1:
450 case PBL_LVL_0:
451 default:
452 ptr = &pbl_ptr[PTR_PG(j)][PTR_IDX(j)];
453 *ptr = cpu_to_le64(tbl->pbl[PBL_LVL_0].pg_map_arr[0] |
454 PTU_PTE_VALID);
455 break;
456 }
457 }
458 if (fnz_idx == -1)
459 fnz_idx = 0;
460
461 ctx->pde.level = (ctx->qtbl[fnz_idx].level == PBL_LVL_2) ? PBL_LVL_2 :
462 ctx->qtbl[fnz_idx].level + 1;
463}
464
465static int bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res *res,
466 struct bnxt_qplib_ctx *ctx)
467{
468 int rc = 0;
469
470 rc = bnxt_qplib_alloc_tqm_rings(res, ctx);
471 if (rc)
472 goto fail;
473
474 bnxt_qplib_map_tqm_pgtbl(&ctx->tqm_ctx);
475fail:
476 return rc;
477}
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
501 struct bnxt_qplib_ctx *ctx,
502 bool virt_fn, bool is_p5)
503{
504 struct bnxt_qplib_hwq_attr hwq_attr = {};
505 struct bnxt_qplib_sg_info sginfo = {};
506 int rc = 0;
507
508 if (virt_fn || is_p5)
509 goto stats_alloc;
510
511
512 sginfo.pgsize = PAGE_SIZE;
513 sginfo.pgshft = PAGE_SHIFT;
514 hwq_attr.sginfo = &sginfo;
515
516 hwq_attr.res = res;
517 hwq_attr.depth = ctx->qpc_count;
518 hwq_attr.stride = BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE;
519 hwq_attr.type = HWQ_TYPE_CTX;
520 rc = bnxt_qplib_alloc_init_hwq(&ctx->qpc_tbl, &hwq_attr);
521 if (rc)
522 goto fail;
523
524
525 hwq_attr.depth = ctx->mrw_count;
526 hwq_attr.stride = BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE;
527 rc = bnxt_qplib_alloc_init_hwq(&ctx->mrw_tbl, &hwq_attr);
528 if (rc)
529 goto fail;
530
531
532 hwq_attr.depth = ctx->srqc_count;
533 hwq_attr.stride = BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE;
534 rc = bnxt_qplib_alloc_init_hwq(&ctx->srqc_tbl, &hwq_attr);
535 if (rc)
536 goto fail;
537
538
539 hwq_attr.depth = ctx->cq_count;
540 hwq_attr.stride = BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE;
541 rc = bnxt_qplib_alloc_init_hwq(&ctx->cq_tbl, &hwq_attr);
542 if (rc)
543 goto fail;
544
545
546 rc = bnxt_qplib_setup_tqm_rings(res, ctx);
547 if (rc)
548 goto fail;
549
550 ctx->tim_tbl.max_elements = ctx->qpc_count * 16;
551 hwq_attr.depth = ctx->qpc_count * 16;
552 hwq_attr.stride = 1;
553 rc = bnxt_qplib_alloc_init_hwq(&ctx->tim_tbl, &hwq_attr);
554 if (rc)
555 goto fail;
556stats_alloc:
557
558 rc = bnxt_qplib_alloc_stats_ctx(res->pdev, &ctx->stats);
559 if (rc)
560 goto fail;
561
562 return 0;
563
564fail:
565 bnxt_qplib_free_ctx(res, ctx);
566 return rc;
567}
568
569
570void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid)
571{
572 u8 mac[ETH_ALEN];
573
574
575 memcpy(mac, dev_addr, ETH_ALEN);
576 guid[0] = mac[0] ^ 2;
577 guid[1] = mac[1];
578 guid[2] = mac[2];
579 guid[3] = 0xff;
580 guid[4] = 0xfe;
581 guid[5] = mac[3];
582 guid[6] = mac[4];
583 guid[7] = mac[5];
584}
585
586static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res,
587 struct bnxt_qplib_sgid_tbl *sgid_tbl)
588{
589 kfree(sgid_tbl->tbl);
590 kfree(sgid_tbl->hw_id);
591 kfree(sgid_tbl->ctx);
592 kfree(sgid_tbl->vlan);
593 sgid_tbl->tbl = NULL;
594 sgid_tbl->hw_id = NULL;
595 sgid_tbl->ctx = NULL;
596 sgid_tbl->vlan = NULL;
597 sgid_tbl->max = 0;
598 sgid_tbl->active = 0;
599}
600
601static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
602 struct bnxt_qplib_sgid_tbl *sgid_tbl,
603 u16 max)
604{
605 sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL);
606 if (!sgid_tbl->tbl)
607 return -ENOMEM;
608
609 sgid_tbl->hw_id = kcalloc(max, sizeof(u16), GFP_KERNEL);
610 if (!sgid_tbl->hw_id)
611 goto out_free1;
612
613 sgid_tbl->ctx = kcalloc(max, sizeof(void *), GFP_KERNEL);
614 if (!sgid_tbl->ctx)
615 goto out_free2;
616
617 sgid_tbl->vlan = kcalloc(max, sizeof(u8), GFP_KERNEL);
618 if (!sgid_tbl->vlan)
619 goto out_free3;
620
621 sgid_tbl->max = max;
622 return 0;
623out_free3:
624 kfree(sgid_tbl->ctx);
625 sgid_tbl->ctx = NULL;
626out_free2:
627 kfree(sgid_tbl->hw_id);
628 sgid_tbl->hw_id = NULL;
629out_free1:
630 kfree(sgid_tbl->tbl);
631 sgid_tbl->tbl = NULL;
632 return -ENOMEM;
633};
634
635static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
636 struct bnxt_qplib_sgid_tbl *sgid_tbl)
637{
638 int i;
639
640 for (i = 0; i < sgid_tbl->max; i++) {
641 if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
642 sizeof(bnxt_qplib_gid_zero)))
643 bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid,
644 sgid_tbl->tbl[i].vlan_id, true);
645 }
646 memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max);
647 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
648 memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
649 sgid_tbl->active = 0;
650}
651
652static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
653 struct net_device *netdev)
654{
655 u32 i;
656
657 for (i = 0; i < sgid_tbl->max; i++)
658 sgid_tbl->tbl[i].vlan_id = 0xffff;
659
660 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
661}
662
663static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res *res,
664 struct bnxt_qplib_pkey_tbl *pkey_tbl)
665{
666 if (!pkey_tbl->tbl)
667 dev_dbg(&res->pdev->dev, "PKEY tbl not present\n");
668 else
669 kfree(pkey_tbl->tbl);
670
671 pkey_tbl->tbl = NULL;
672 pkey_tbl->max = 0;
673 pkey_tbl->active = 0;
674}
675
676static int bnxt_qplib_alloc_pkey_tbl(struct bnxt_qplib_res *res,
677 struct bnxt_qplib_pkey_tbl *pkey_tbl,
678 u16 max)
679{
680 pkey_tbl->tbl = kcalloc(max, sizeof(u16), GFP_KERNEL);
681 if (!pkey_tbl->tbl)
682 return -ENOMEM;
683
684 pkey_tbl->max = max;
685 return 0;
686};
687
688
689int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd)
690{
691 u32 bit_num;
692
693 bit_num = find_first_bit(pdt->tbl, pdt->max);
694 if (bit_num == pdt->max)
695 return -ENOMEM;
696
697
698 clear_bit(bit_num, pdt->tbl);
699 pd->id = bit_num;
700 return 0;
701}
702
703int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
704 struct bnxt_qplib_pd_tbl *pdt,
705 struct bnxt_qplib_pd *pd)
706{
707 if (test_and_set_bit(pd->id, pdt->tbl)) {
708 dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d\n",
709 pd->id);
710 return -EINVAL;
711 }
712 pd->id = 0;
713 return 0;
714}
715
716static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt)
717{
718 kfree(pdt->tbl);
719 pdt->tbl = NULL;
720 pdt->max = 0;
721}
722
723static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res,
724 struct bnxt_qplib_pd_tbl *pdt,
725 u32 max)
726{
727 u32 bytes;
728
729 bytes = max >> 3;
730 if (!bytes)
731 bytes = 1;
732 pdt->tbl = kmalloc(bytes, GFP_KERNEL);
733 if (!pdt->tbl)
734 return -ENOMEM;
735
736 pdt->max = max;
737 memset((u8 *)pdt->tbl, 0xFF, bytes);
738
739 return 0;
740}
741
742
743int bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl *dpit,
744 struct bnxt_qplib_dpi *dpi,
745 void *app)
746{
747 u32 bit_num;
748
749 bit_num = find_first_bit(dpit->tbl, dpit->max);
750 if (bit_num == dpit->max)
751 return -ENOMEM;
752
753
754 clear_bit(bit_num, dpit->tbl);
755 dpit->app_tbl[bit_num] = app;
756
757 dpi->dpi = bit_num;
758 dpi->dbr = dpit->dbr_bar_reg_iomem + (bit_num * PAGE_SIZE);
759 dpi->umdbr = dpit->unmapped_dbr + (bit_num * PAGE_SIZE);
760
761 return 0;
762}
763
764int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
765 struct bnxt_qplib_dpi_tbl *dpit,
766 struct bnxt_qplib_dpi *dpi)
767{
768 if (dpi->dpi >= dpit->max) {
769 dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d\n", dpi->dpi);
770 return -EINVAL;
771 }
772 if (test_and_set_bit(dpi->dpi, dpit->tbl)) {
773 dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d\n",
774 dpi->dpi);
775 return -EINVAL;
776 }
777 if (dpit->app_tbl)
778 dpit->app_tbl[dpi->dpi] = NULL;
779 memset(dpi, 0, sizeof(*dpi));
780
781 return 0;
782}
783
784static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res *res,
785 struct bnxt_qplib_dpi_tbl *dpit)
786{
787 kfree(dpit->tbl);
788 kfree(dpit->app_tbl);
789 if (dpit->dbr_bar_reg_iomem)
790 pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
791 memset(dpit, 0, sizeof(*dpit));
792}
793
794static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
795 struct bnxt_qplib_dpi_tbl *dpit,
796 u32 dbr_offset)
797{
798 u32 dbr_bar_reg = RCFW_DBR_PCI_BAR_REGION;
799 resource_size_t bar_reg_base;
800 u32 dbr_len, bytes;
801
802 if (dpit->dbr_bar_reg_iomem) {
803 dev_err(&res->pdev->dev, "DBR BAR region %d already mapped\n",
804 dbr_bar_reg);
805 return -EALREADY;
806 }
807
808 bar_reg_base = pci_resource_start(res->pdev, dbr_bar_reg);
809 if (!bar_reg_base) {
810 dev_err(&res->pdev->dev, "BAR region %d resc start failed\n",
811 dbr_bar_reg);
812 return -ENOMEM;
813 }
814
815 dbr_len = pci_resource_len(res->pdev, dbr_bar_reg) - dbr_offset;
816 if (!dbr_len || ((dbr_len & (PAGE_SIZE - 1)) != 0)) {
817 dev_err(&res->pdev->dev, "Invalid DBR length %d\n", dbr_len);
818 return -ENOMEM;
819 }
820
821 dpit->dbr_bar_reg_iomem = ioremap(bar_reg_base + dbr_offset,
822 dbr_len);
823 if (!dpit->dbr_bar_reg_iomem) {
824 dev_err(&res->pdev->dev,
825 "FP: DBR BAR region %d mapping failed\n", dbr_bar_reg);
826 return -ENOMEM;
827 }
828
829 dpit->unmapped_dbr = bar_reg_base + dbr_offset;
830 dpit->max = dbr_len / PAGE_SIZE;
831
832 dpit->app_tbl = kcalloc(dpit->max, sizeof(void *), GFP_KERNEL);
833 if (!dpit->app_tbl)
834 goto unmap_io;
835
836 bytes = dpit->max >> 3;
837 if (!bytes)
838 bytes = 1;
839
840 dpit->tbl = kmalloc(bytes, GFP_KERNEL);
841 if (!dpit->tbl) {
842 kfree(dpit->app_tbl);
843 dpit->app_tbl = NULL;
844 goto unmap_io;
845 }
846
847 memset((u8 *)dpit->tbl, 0xFF, bytes);
848
849 return 0;
850
851unmap_io:
852 pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
853 return -ENOMEM;
854}
855
856
857static void bnxt_qplib_cleanup_pkey_tbl(struct bnxt_qplib_pkey_tbl *pkey_tbl)
858{
859 memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
860 pkey_tbl->active = 0;
861}
862
863static void bnxt_qplib_init_pkey_tbl(struct bnxt_qplib_res *res,
864 struct bnxt_qplib_pkey_tbl *pkey_tbl)
865{
866 u16 pkey = 0xFFFF;
867
868 memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
869
870
871 bnxt_qplib_add_pkey(res, pkey_tbl, &pkey, false);
872}
873
874
875static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
876 struct bnxt_qplib_stats *stats)
877{
878 if (stats->dma) {
879 dma_free_coherent(&pdev->dev, stats->size,
880 stats->dma, stats->dma_map);
881 }
882 memset(stats, 0, sizeof(*stats));
883 stats->fw_id = -1;
884}
885
886static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
887 struct bnxt_qplib_stats *stats)
888{
889 memset(stats, 0, sizeof(*stats));
890 stats->fw_id = -1;
891
892
893
894
895 stats->size = ALIGN(sizeof(struct ctx_hw_stats), 128);
896 stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
897 &stats->dma_map, GFP_KERNEL);
898 if (!stats->dma) {
899 dev_err(&pdev->dev, "Stats DMA allocation failed\n");
900 return -ENOMEM;
901 }
902 return 0;
903}
904
905void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res)
906{
907 bnxt_qplib_cleanup_pkey_tbl(&res->pkey_tbl);
908 bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl);
909}
910
911int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
912{
913 bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev);
914 bnxt_qplib_init_pkey_tbl(res, &res->pkey_tbl);
915
916 return 0;
917}
918
919void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
920{
921 bnxt_qplib_free_pkey_tbl(res, &res->pkey_tbl);
922 bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
923 bnxt_qplib_free_pd_tbl(&res->pd_tbl);
924 bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
925}
926
927int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
928 struct net_device *netdev,
929 struct bnxt_qplib_dev_attr *dev_attr)
930{
931 int rc = 0;
932
933 res->pdev = pdev;
934 res->netdev = netdev;
935
936 rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
937 if (rc)
938 goto fail;
939
940 rc = bnxt_qplib_alloc_pkey_tbl(res, &res->pkey_tbl, dev_attr->max_pkey);
941 if (rc)
942 goto fail;
943
944 rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd);
945 if (rc)
946 goto fail;
947
948 rc = bnxt_qplib_alloc_dpi_tbl(res, &res->dpi_tbl, dev_attr->l2_db_size);
949 if (rc)
950 goto fail;
951
952 return 0;
953fail:
954 bnxt_qplib_free_res(res);
955 return rc;
956}
957