1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#define dev_fmt(fmt) "QPLIB: " fmt
40
41#include <linux/spinlock.h>
42#include <linux/pci.h>
43#include <linux/interrupt.h>
44#include <linux/inetdevice.h>
45#include <linux/dma-mapping.h>
46#include <linux/if_vlan.h>
47#include <linux/vmalloc.h>
48#include <rdma/ib_verbs.h>
49#include <rdma/ib_umem.h>
50
51#include "roce_hsi.h"
52#include "qplib_res.h"
53#include "qplib_sp.h"
54#include "qplib_rcfw.h"
55
56static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
57 struct bnxt_qplib_stats *stats);
58static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
59 struct bnxt_qplib_chip_ctx *cctx,
60 struct bnxt_qplib_stats *stats);
61
62
63static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl,
64 bool is_umem)
65{
66 struct pci_dev *pdev = res->pdev;
67 int i;
68
69 if (!is_umem) {
70 for (i = 0; i < pbl->pg_count; i++) {
71 if (pbl->pg_arr[i])
72 dma_free_coherent(&pdev->dev, pbl->pg_size,
73 (void *)((unsigned long)
74 pbl->pg_arr[i] &
75 PAGE_MASK),
76 pbl->pg_map_arr[i]);
77 else
78 dev_warn(&pdev->dev,
79 "PBL free pg_arr[%d] empty?!\n", i);
80 pbl->pg_arr[i] = NULL;
81 }
82 }
83 vfree(pbl->pg_arr);
84 pbl->pg_arr = NULL;
85 vfree(pbl->pg_map_arr);
86 pbl->pg_map_arr = NULL;
87 pbl->pg_count = 0;
88 pbl->pg_size = 0;
89}
90
91static void bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl *pbl,
92 struct bnxt_qplib_sg_info *sginfo)
93{
94 struct ib_block_iter biter;
95 int i = 0;
96
97 rdma_umem_for_each_dma_block(sginfo->umem, &biter, sginfo->pgsize) {
98 pbl->pg_map_arr[i] = rdma_block_iter_dma_address(&biter);
99 pbl->pg_arr[i] = NULL;
100 pbl->pg_count++;
101 i++;
102 }
103}
104
105static int __alloc_pbl(struct bnxt_qplib_res *res,
106 struct bnxt_qplib_pbl *pbl,
107 struct bnxt_qplib_sg_info *sginfo)
108{
109 struct pci_dev *pdev = res->pdev;
110 bool is_umem = false;
111 u32 pages;
112 int i;
113
114 if (sginfo->nopte)
115 return 0;
116 if (sginfo->umem)
117 pages = ib_umem_num_dma_blocks(sginfo->umem, sginfo->pgsize);
118 else
119 pages = sginfo->npages;
120
121 pbl->pg_arr = vmalloc(pages * sizeof(void *));
122 if (!pbl->pg_arr)
123 return -ENOMEM;
124
125 pbl->pg_map_arr = vmalloc(pages * sizeof(dma_addr_t));
126 if (!pbl->pg_map_arr) {
127 vfree(pbl->pg_arr);
128 pbl->pg_arr = NULL;
129 return -ENOMEM;
130 }
131 pbl->pg_count = 0;
132 pbl->pg_size = sginfo->pgsize;
133
134 if (!sginfo->umem) {
135 for (i = 0; i < pages; i++) {
136 pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
137 pbl->pg_size,
138 &pbl->pg_map_arr[i],
139 GFP_KERNEL);
140 if (!pbl->pg_arr[i])
141 goto fail;
142 pbl->pg_count++;
143 }
144 } else {
145 is_umem = true;
146 bnxt_qplib_fill_user_dma_pages(pbl, sginfo);
147 }
148
149 return 0;
150fail:
151 __free_pbl(res, pbl, is_umem);
152 return -ENOMEM;
153}
154
155
156void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res,
157 struct bnxt_qplib_hwq *hwq)
158{
159 int i;
160
161 if (!hwq->max_elements)
162 return;
163 if (hwq->level >= PBL_LVL_MAX)
164 return;
165
166 for (i = 0; i < hwq->level + 1; i++) {
167 if (i == hwq->level)
168 __free_pbl(res, &hwq->pbl[i], hwq->is_user);
169 else
170 __free_pbl(res, &hwq->pbl[i], false);
171 }
172
173 hwq->level = PBL_LVL_MAX;
174 hwq->max_elements = 0;
175 hwq->element_size = 0;
176 hwq->prod = 0;
177 hwq->cons = 0;
178 hwq->cp_bit = 0;
179}
180
181
182
183int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
184 struct bnxt_qplib_hwq_attr *hwq_attr)
185{
186 u32 npages, aux_slots, pg_size, aux_pages = 0, aux_size = 0;
187 struct bnxt_qplib_sg_info sginfo = {};
188 u32 depth, stride, npbl, npde;
189 dma_addr_t *src_phys_ptr, **dst_virt_ptr;
190 struct bnxt_qplib_res *res;
191 struct pci_dev *pdev;
192 int i, rc, lvl;
193
194 res = hwq_attr->res;
195 pdev = res->pdev;
196 pg_size = hwq_attr->sginfo->pgsize;
197 hwq->level = PBL_LVL_MAX;
198
199 depth = roundup_pow_of_two(hwq_attr->depth);
200 stride = roundup_pow_of_two(hwq_attr->stride);
201 if (hwq_attr->aux_depth) {
202 aux_slots = hwq_attr->aux_depth;
203 aux_size = roundup_pow_of_two(hwq_attr->aux_stride);
204 aux_pages = (aux_slots * aux_size) / pg_size;
205 if ((aux_slots * aux_size) % pg_size)
206 aux_pages++;
207 }
208
209 if (!hwq_attr->sginfo->umem) {
210 hwq->is_user = false;
211 npages = (depth * stride) / pg_size + aux_pages;
212 if ((depth * stride) % pg_size)
213 npages++;
214 if (!npages)
215 return -EINVAL;
216 hwq_attr->sginfo->npages = npages;
217 } else {
218 unsigned long sginfo_num_pages = ib_umem_num_dma_blocks(
219 hwq_attr->sginfo->umem, hwq_attr->sginfo->pgsize);
220
221 hwq->is_user = true;
222 npages = sginfo_num_pages;
223 npages = (npages * PAGE_SIZE) /
224 BIT_ULL(hwq_attr->sginfo->pgshft);
225 if ((sginfo_num_pages * PAGE_SIZE) %
226 BIT_ULL(hwq_attr->sginfo->pgshft))
227 if (!npages)
228 npages++;
229 }
230
231 if (npages == MAX_PBL_LVL_0_PGS && !hwq_attr->sginfo->nopte) {
232
233 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], hwq_attr->sginfo);
234 if (rc)
235 goto fail;
236 hwq->level = PBL_LVL_0;
237 goto done;
238 }
239
240 if (npages >= MAX_PBL_LVL_0_PGS) {
241 if (npages > MAX_PBL_LVL_1_PGS) {
242 u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
243 0 : PTU_PTE_VALID;
244
245 npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
246 if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
247 npbl++;
248 npde = npbl >> MAX_PDL_LVL_SHIFT;
249 if (npbl % BIT(MAX_PDL_LVL_SHIFT))
250 npde++;
251
252 sginfo.pgsize = npde * pg_size;
253 sginfo.npages = 1;
254 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
255
256
257 sginfo.npages = npbl;
258 sginfo.pgsize = PAGE_SIZE;
259 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1], &sginfo);
260 if (rc)
261 goto fail;
262
263 dst_virt_ptr =
264 (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
265 src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
266 if (hwq_attr->type == HWQ_TYPE_MR) {
267
268
269
270
271 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
272 i++)
273 dst_virt_ptr[0][i] = src_phys_ptr[i] |
274 flag;
275 } else {
276 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
277 i++)
278 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
279 src_phys_ptr[i] |
280 PTU_PDE_VALID;
281 }
282
283 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2],
284 hwq_attr->sginfo);
285 if (rc)
286 goto fail;
287 hwq->level = PBL_LVL_2;
288 if (hwq_attr->sginfo->nopte)
289 goto done;
290
291 dst_virt_ptr =
292 (dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr;
293 src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr;
294 for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) {
295 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
296 src_phys_ptr[i] | PTU_PTE_VALID;
297 }
298 if (hwq_attr->type == HWQ_TYPE_QUEUE) {
299
300 i = hwq->pbl[PBL_LVL_2].pg_count;
301 dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
302 PTU_PTE_LAST;
303 if (i > 1)
304 dst_virt_ptr[PTR_PG(i - 2)]
305 [PTR_IDX(i - 2)] |=
306 PTU_PTE_NEXT_TO_LAST;
307 }
308 } else {
309 u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
310 0 : PTU_PTE_VALID;
311
312
313 npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
314 if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
315 npbl++;
316 sginfo.npages = npbl;
317 sginfo.pgsize = PAGE_SIZE;
318
319 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
320 if (rc)
321 goto fail;
322
323 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1],
324 hwq_attr->sginfo);
325 if (rc)
326 goto fail;
327 hwq->level = PBL_LVL_1;
328 if (hwq_attr->sginfo->nopte)
329 goto done;
330
331 dst_virt_ptr =
332 (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
333 src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
334 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
335 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
336 src_phys_ptr[i] | flag;
337 if (hwq_attr->type == HWQ_TYPE_QUEUE) {
338
339 i = hwq->pbl[PBL_LVL_1].pg_count;
340 dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
341 PTU_PTE_LAST;
342 if (i > 1)
343 dst_virt_ptr[PTR_PG(i - 2)]
344 [PTR_IDX(i - 2)] |=
345 PTU_PTE_NEXT_TO_LAST;
346 }
347 }
348 }
349done:
350 hwq->prod = 0;
351 hwq->cons = 0;
352 hwq->pdev = pdev;
353 hwq->depth = hwq_attr->depth;
354 hwq->max_elements = depth;
355 hwq->element_size = stride;
356 hwq->qe_ppg = pg_size / stride;
357
358 lvl = hwq->level;
359 if (hwq_attr->sginfo->nopte && hwq->level)
360 lvl = hwq->level - 1;
361 hwq->pbl_ptr = hwq->pbl[lvl].pg_arr;
362 hwq->pbl_dma_ptr = hwq->pbl[lvl].pg_map_arr;
363 spin_lock_init(&hwq->lock);
364
365 return 0;
366fail:
367 bnxt_qplib_free_hwq(res, hwq);
368 return -ENOMEM;
369}
370
371
372void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
373 struct bnxt_qplib_ctx *ctx)
374{
375 int i;
376
377 bnxt_qplib_free_hwq(res, &ctx->qpc_tbl);
378 bnxt_qplib_free_hwq(res, &ctx->mrw_tbl);
379 bnxt_qplib_free_hwq(res, &ctx->srqc_tbl);
380 bnxt_qplib_free_hwq(res, &ctx->cq_tbl);
381 bnxt_qplib_free_hwq(res, &ctx->tim_tbl);
382 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
383 bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.qtbl[i]);
384
385 ctx->tqm_ctx.pde.level = ctx->tqm_ctx.pde_level;
386 bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.pde);
387 bnxt_qplib_free_stats_ctx(res->pdev, &ctx->stats);
388}
389
390static int bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res *res,
391 struct bnxt_qplib_ctx *ctx)
392{
393 struct bnxt_qplib_hwq_attr hwq_attr = {};
394 struct bnxt_qplib_sg_info sginfo = {};
395 struct bnxt_qplib_tqm_ctx *tqmctx;
396 int rc = 0;
397 int i;
398
399 tqmctx = &ctx->tqm_ctx;
400
401 sginfo.pgsize = PAGE_SIZE;
402 sginfo.pgshft = PAGE_SHIFT;
403 hwq_attr.sginfo = &sginfo;
404 hwq_attr.res = res;
405 hwq_attr.type = HWQ_TYPE_CTX;
406 hwq_attr.depth = 512;
407 hwq_attr.stride = sizeof(u64);
408
409 rc = bnxt_qplib_alloc_init_hwq(&tqmctx->pde, &hwq_attr);
410 if (rc)
411 goto out;
412
413 tqmctx->pde_level = tqmctx->pde.level;
414
415 hwq_attr.stride = 1;
416 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
417 if (!tqmctx->qcount[i])
418 continue;
419 hwq_attr.depth = ctx->qpc_count * tqmctx->qcount[i];
420 rc = bnxt_qplib_alloc_init_hwq(&tqmctx->qtbl[i], &hwq_attr);
421 if (rc)
422 goto out;
423 }
424out:
425 return rc;
426}
427
428static void bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx *ctx)
429{
430 struct bnxt_qplib_hwq *tbl;
431 dma_addr_t *dma_ptr;
432 __le64 **pbl_ptr, *ptr;
433 int i, j, k;
434 int fnz_idx = -1;
435 int pg_count;
436
437 pbl_ptr = (__le64 **)ctx->pde.pbl_ptr;
438
439 for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
440 i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
441 tbl = &ctx->qtbl[i];
442 if (!tbl->max_elements)
443 continue;
444 if (fnz_idx == -1)
445 fnz_idx = i;
446 switch (tbl->level) {
447 case PBL_LVL_2:
448 pg_count = tbl->pbl[PBL_LVL_1].pg_count;
449 for (k = 0; k < pg_count; k++) {
450 ptr = &pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)];
451 dma_ptr = &tbl->pbl[PBL_LVL_1].pg_map_arr[k];
452 *ptr = cpu_to_le64(*dma_ptr | PTU_PTE_VALID);
453 }
454 break;
455 case PBL_LVL_1:
456 case PBL_LVL_0:
457 default:
458 ptr = &pbl_ptr[PTR_PG(j)][PTR_IDX(j)];
459 *ptr = cpu_to_le64(tbl->pbl[PBL_LVL_0].pg_map_arr[0] |
460 PTU_PTE_VALID);
461 break;
462 }
463 }
464 if (fnz_idx == -1)
465 fnz_idx = 0;
466
467 ctx->pde.level = (ctx->qtbl[fnz_idx].level == PBL_LVL_2) ? PBL_LVL_2 :
468 ctx->qtbl[fnz_idx].level + 1;
469}
470
471static int bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res *res,
472 struct bnxt_qplib_ctx *ctx)
473{
474 int rc = 0;
475
476 rc = bnxt_qplib_alloc_tqm_rings(res, ctx);
477 if (rc)
478 goto fail;
479
480 bnxt_qplib_map_tqm_pgtbl(&ctx->tqm_ctx);
481fail:
482 return rc;
483}
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
507 struct bnxt_qplib_ctx *ctx,
508 bool virt_fn, bool is_p5)
509{
510 struct bnxt_qplib_hwq_attr hwq_attr = {};
511 struct bnxt_qplib_sg_info sginfo = {};
512 int rc = 0;
513
514 if (virt_fn || is_p5)
515 goto stats_alloc;
516
517
518 sginfo.pgsize = PAGE_SIZE;
519 sginfo.pgshft = PAGE_SHIFT;
520 hwq_attr.sginfo = &sginfo;
521
522 hwq_attr.res = res;
523 hwq_attr.depth = ctx->qpc_count;
524 hwq_attr.stride = BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE;
525 hwq_attr.type = HWQ_TYPE_CTX;
526 rc = bnxt_qplib_alloc_init_hwq(&ctx->qpc_tbl, &hwq_attr);
527 if (rc)
528 goto fail;
529
530
531 hwq_attr.depth = ctx->mrw_count;
532 hwq_attr.stride = BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE;
533 rc = bnxt_qplib_alloc_init_hwq(&ctx->mrw_tbl, &hwq_attr);
534 if (rc)
535 goto fail;
536
537
538 hwq_attr.depth = ctx->srqc_count;
539 hwq_attr.stride = BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE;
540 rc = bnxt_qplib_alloc_init_hwq(&ctx->srqc_tbl, &hwq_attr);
541 if (rc)
542 goto fail;
543
544
545 hwq_attr.depth = ctx->cq_count;
546 hwq_attr.stride = BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE;
547 rc = bnxt_qplib_alloc_init_hwq(&ctx->cq_tbl, &hwq_attr);
548 if (rc)
549 goto fail;
550
551
552 rc = bnxt_qplib_setup_tqm_rings(res, ctx);
553 if (rc)
554 goto fail;
555
556 ctx->tim_tbl.max_elements = ctx->qpc_count * 16;
557 hwq_attr.depth = ctx->qpc_count * 16;
558 hwq_attr.stride = 1;
559 rc = bnxt_qplib_alloc_init_hwq(&ctx->tim_tbl, &hwq_attr);
560 if (rc)
561 goto fail;
562stats_alloc:
563
564 rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &ctx->stats);
565 if (rc)
566 goto fail;
567
568 return 0;
569
570fail:
571 bnxt_qplib_free_ctx(res, ctx);
572 return rc;
573}
574
575static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res,
576 struct bnxt_qplib_sgid_tbl *sgid_tbl)
577{
578 kfree(sgid_tbl->tbl);
579 kfree(sgid_tbl->hw_id);
580 kfree(sgid_tbl->ctx);
581 kfree(sgid_tbl->vlan);
582 sgid_tbl->tbl = NULL;
583 sgid_tbl->hw_id = NULL;
584 sgid_tbl->ctx = NULL;
585 sgid_tbl->vlan = NULL;
586 sgid_tbl->max = 0;
587 sgid_tbl->active = 0;
588}
589
590static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
591 struct bnxt_qplib_sgid_tbl *sgid_tbl,
592 u16 max)
593{
594 sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL);
595 if (!sgid_tbl->tbl)
596 return -ENOMEM;
597
598 sgid_tbl->hw_id = kcalloc(max, sizeof(u16), GFP_KERNEL);
599 if (!sgid_tbl->hw_id)
600 goto out_free1;
601
602 sgid_tbl->ctx = kcalloc(max, sizeof(void *), GFP_KERNEL);
603 if (!sgid_tbl->ctx)
604 goto out_free2;
605
606 sgid_tbl->vlan = kcalloc(max, sizeof(u8), GFP_KERNEL);
607 if (!sgid_tbl->vlan)
608 goto out_free3;
609
610 sgid_tbl->max = max;
611 return 0;
612out_free3:
613 kfree(sgid_tbl->ctx);
614 sgid_tbl->ctx = NULL;
615out_free2:
616 kfree(sgid_tbl->hw_id);
617 sgid_tbl->hw_id = NULL;
618out_free1:
619 kfree(sgid_tbl->tbl);
620 sgid_tbl->tbl = NULL;
621 return -ENOMEM;
622};
623
624static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
625 struct bnxt_qplib_sgid_tbl *sgid_tbl)
626{
627 int i;
628
629 for (i = 0; i < sgid_tbl->max; i++) {
630 if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
631 sizeof(bnxt_qplib_gid_zero)))
632 bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid,
633 sgid_tbl->tbl[i].vlan_id, true);
634 }
635 memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max);
636 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
637 memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
638 sgid_tbl->active = 0;
639}
640
641static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
642 struct net_device *netdev)
643{
644 u32 i;
645
646 for (i = 0; i < sgid_tbl->max; i++)
647 sgid_tbl->tbl[i].vlan_id = 0xffff;
648
649 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
650}
651
652
653int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd)
654{
655 u32 bit_num;
656
657 bit_num = find_first_bit(pdt->tbl, pdt->max);
658 if (bit_num == pdt->max)
659 return -ENOMEM;
660
661
662 clear_bit(bit_num, pdt->tbl);
663 pd->id = bit_num;
664 return 0;
665}
666
667int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
668 struct bnxt_qplib_pd_tbl *pdt,
669 struct bnxt_qplib_pd *pd)
670{
671 if (test_and_set_bit(pd->id, pdt->tbl)) {
672 dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d\n",
673 pd->id);
674 return -EINVAL;
675 }
676 pd->id = 0;
677 return 0;
678}
679
680static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt)
681{
682 kfree(pdt->tbl);
683 pdt->tbl = NULL;
684 pdt->max = 0;
685}
686
687static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res,
688 struct bnxt_qplib_pd_tbl *pdt,
689 u32 max)
690{
691 u32 bytes;
692
693 bytes = max >> 3;
694 if (!bytes)
695 bytes = 1;
696 pdt->tbl = kmalloc(bytes, GFP_KERNEL);
697 if (!pdt->tbl)
698 return -ENOMEM;
699
700 pdt->max = max;
701 memset((u8 *)pdt->tbl, 0xFF, bytes);
702
703 return 0;
704}
705
706
707int bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl *dpit,
708 struct bnxt_qplib_dpi *dpi,
709 void *app)
710{
711 u32 bit_num;
712
713 bit_num = find_first_bit(dpit->tbl, dpit->max);
714 if (bit_num == dpit->max)
715 return -ENOMEM;
716
717
718 clear_bit(bit_num, dpit->tbl);
719 dpit->app_tbl[bit_num] = app;
720
721 dpi->dpi = bit_num;
722 dpi->dbr = dpit->dbr_bar_reg_iomem + (bit_num * PAGE_SIZE);
723 dpi->umdbr = dpit->unmapped_dbr + (bit_num * PAGE_SIZE);
724
725 return 0;
726}
727
728int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
729 struct bnxt_qplib_dpi_tbl *dpit,
730 struct bnxt_qplib_dpi *dpi)
731{
732 if (dpi->dpi >= dpit->max) {
733 dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d\n", dpi->dpi);
734 return -EINVAL;
735 }
736 if (test_and_set_bit(dpi->dpi, dpit->tbl)) {
737 dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d\n",
738 dpi->dpi);
739 return -EINVAL;
740 }
741 if (dpit->app_tbl)
742 dpit->app_tbl[dpi->dpi] = NULL;
743 memset(dpi, 0, sizeof(*dpi));
744
745 return 0;
746}
747
748static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res *res,
749 struct bnxt_qplib_dpi_tbl *dpit)
750{
751 kfree(dpit->tbl);
752 kfree(dpit->app_tbl);
753 if (dpit->dbr_bar_reg_iomem)
754 pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
755 memset(dpit, 0, sizeof(*dpit));
756}
757
758static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
759 struct bnxt_qplib_dpi_tbl *dpit,
760 u32 dbr_offset)
761{
762 u32 dbr_bar_reg = RCFW_DBR_PCI_BAR_REGION;
763 resource_size_t bar_reg_base;
764 u32 dbr_len, bytes;
765
766 if (dpit->dbr_bar_reg_iomem) {
767 dev_err(&res->pdev->dev, "DBR BAR region %d already mapped\n",
768 dbr_bar_reg);
769 return -EALREADY;
770 }
771
772 bar_reg_base = pci_resource_start(res->pdev, dbr_bar_reg);
773 if (!bar_reg_base) {
774 dev_err(&res->pdev->dev, "BAR region %d resc start failed\n",
775 dbr_bar_reg);
776 return -ENOMEM;
777 }
778
779 dbr_len = pci_resource_len(res->pdev, dbr_bar_reg) - dbr_offset;
780 if (!dbr_len || ((dbr_len & (PAGE_SIZE - 1)) != 0)) {
781 dev_err(&res->pdev->dev, "Invalid DBR length %d\n", dbr_len);
782 return -ENOMEM;
783 }
784
785 dpit->dbr_bar_reg_iomem = ioremap(bar_reg_base + dbr_offset,
786 dbr_len);
787 if (!dpit->dbr_bar_reg_iomem) {
788 dev_err(&res->pdev->dev,
789 "FP: DBR BAR region %d mapping failed\n", dbr_bar_reg);
790 return -ENOMEM;
791 }
792
793 dpit->unmapped_dbr = bar_reg_base + dbr_offset;
794 dpit->max = dbr_len / PAGE_SIZE;
795
796 dpit->app_tbl = kcalloc(dpit->max, sizeof(void *), GFP_KERNEL);
797 if (!dpit->app_tbl)
798 goto unmap_io;
799
800 bytes = dpit->max >> 3;
801 if (!bytes)
802 bytes = 1;
803
804 dpit->tbl = kmalloc(bytes, GFP_KERNEL);
805 if (!dpit->tbl) {
806 kfree(dpit->app_tbl);
807 dpit->app_tbl = NULL;
808 goto unmap_io;
809 }
810
811 memset((u8 *)dpit->tbl, 0xFF, bytes);
812
813 return 0;
814
815unmap_io:
816 pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
817 dpit->dbr_bar_reg_iomem = NULL;
818 return -ENOMEM;
819}
820
821
822static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
823 struct bnxt_qplib_stats *stats)
824{
825 if (stats->dma) {
826 dma_free_coherent(&pdev->dev, stats->size,
827 stats->dma, stats->dma_map);
828 }
829 memset(stats, 0, sizeof(*stats));
830 stats->fw_id = -1;
831}
832
833static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
834 struct bnxt_qplib_chip_ctx *cctx,
835 struct bnxt_qplib_stats *stats)
836{
837 memset(stats, 0, sizeof(*stats));
838 stats->fw_id = -1;
839 stats->size = cctx->hw_stats_size;
840 stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
841 &stats->dma_map, GFP_KERNEL);
842 if (!stats->dma) {
843 dev_err(&pdev->dev, "Stats DMA allocation failed\n");
844 return -ENOMEM;
845 }
846 return 0;
847}
848
849void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res)
850{
851 bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl);
852}
853
854int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
855{
856 bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev);
857
858 return 0;
859}
860
861void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
862{
863 bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
864 bnxt_qplib_free_pd_tbl(&res->pd_tbl);
865 bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
866}
867
868int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
869 struct net_device *netdev,
870 struct bnxt_qplib_dev_attr *dev_attr)
871{
872 int rc = 0;
873
874 res->pdev = pdev;
875 res->netdev = netdev;
876
877 rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
878 if (rc)
879 goto fail;
880
881 rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd);
882 if (rc)
883 goto fail;
884
885 rc = bnxt_qplib_alloc_dpi_tbl(res, &res->dpi_tbl, dev_attr->l2_db_size);
886 if (rc)
887 goto fail;
888
889 return 0;
890fail:
891 bnxt_qplib_free_res(res);
892 return rc;
893}
894
895int bnxt_qplib_determine_atomics(struct pci_dev *dev)
896{
897 int comp;
898 u16 ctl2;
899
900 comp = pci_enable_atomic_ops_to_root(dev,
901 PCI_EXP_DEVCAP2_ATOMIC_COMP32);
902 if (comp)
903 return -EOPNOTSUPP;
904 comp = pci_enable_atomic_ops_to_root(dev,
905 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
906 if (comp)
907 return -EOPNOTSUPP;
908 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &ctl2);
909 return !(ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
910}
911