1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#define dev_fmt(fmt) "QPLIB: " fmt
40
41#include <linux/spinlock.h>
42#include <linux/pci.h>
43#include <linux/interrupt.h>
44#include <linux/inetdevice.h>
45#include <linux/dma-mapping.h>
46#include <linux/if_vlan.h>
47#include "roce_hsi.h"
48#include "qplib_res.h"
49#include "qplib_sp.h"
50#include "qplib_rcfw.h"
51
52static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
53 struct bnxt_qplib_stats *stats);
54static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
55 struct bnxt_qplib_stats *stats);
56
57
58static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
59 bool is_umem)
60{
61 int i;
62
63 if (!is_umem) {
64 for (i = 0; i < pbl->pg_count; i++) {
65 if (pbl->pg_arr[i])
66 dma_free_coherent(&pdev->dev, pbl->pg_size,
67 (void *)((unsigned long)
68 pbl->pg_arr[i] &
69 PAGE_MASK),
70 pbl->pg_map_arr[i]);
71 else
72 dev_warn(&pdev->dev,
73 "PBL free pg_arr[%d] empty?!\n", i);
74 pbl->pg_arr[i] = NULL;
75 }
76 }
77 kfree(pbl->pg_arr);
78 pbl->pg_arr = NULL;
79 kfree(pbl->pg_map_arr);
80 pbl->pg_map_arr = NULL;
81 pbl->pg_count = 0;
82 pbl->pg_size = 0;
83}
84
85static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
86 struct scatterlist *sghead, u32 pages, u32 pg_size)
87{
88 struct scatterlist *sg;
89 bool is_umem = false;
90 int i;
91
92
93 pbl->pg_arr = kcalloc(pages, sizeof(void *), GFP_KERNEL);
94 if (!pbl->pg_arr)
95 return -ENOMEM;
96
97 pbl->pg_map_arr = kcalloc(pages, sizeof(dma_addr_t), GFP_KERNEL);
98 if (!pbl->pg_map_arr) {
99 kfree(pbl->pg_arr);
100 pbl->pg_arr = NULL;
101 return -ENOMEM;
102 }
103 pbl->pg_count = 0;
104 pbl->pg_size = pg_size;
105
106 if (!sghead) {
107 for (i = 0; i < pages; i++) {
108 pbl->pg_arr[i] = dma_zalloc_coherent(&pdev->dev,
109 pbl->pg_size,
110 &pbl->pg_map_arr[i],
111 GFP_KERNEL);
112 if (!pbl->pg_arr[i])
113 goto fail;
114 pbl->pg_count++;
115 }
116 } else {
117 i = 0;
118 is_umem = true;
119 for_each_sg(sghead, sg, pages, i) {
120 pbl->pg_map_arr[i] = sg_dma_address(sg);
121 pbl->pg_arr[i] = sg_virt(sg);
122 if (!pbl->pg_arr[i])
123 goto fail;
124
125 pbl->pg_count++;
126 }
127 }
128
129 return 0;
130
131fail:
132 __free_pbl(pdev, pbl, is_umem);
133 return -ENOMEM;
134}
135
136
137void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq)
138{
139 int i;
140
141 if (!hwq->max_elements)
142 return;
143 if (hwq->level >= PBL_LVL_MAX)
144 return;
145
146 for (i = 0; i < hwq->level + 1; i++) {
147 if (i == hwq->level)
148 __free_pbl(pdev, &hwq->pbl[i], hwq->is_user);
149 else
150 __free_pbl(pdev, &hwq->pbl[i], false);
151 }
152
153 hwq->level = PBL_LVL_MAX;
154 hwq->max_elements = 0;
155 hwq->element_size = 0;
156 hwq->prod = 0;
157 hwq->cons = 0;
158 hwq->cp_bit = 0;
159}
160
161
162int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
163 struct scatterlist *sghead, int nmap,
164 u32 *elements, u32 element_size, u32 aux,
165 u32 pg_size, enum bnxt_qplib_hwq_type hwq_type)
166{
167 u32 pages, slots, size, aux_pages = 0, aux_size = 0;
168 dma_addr_t *src_phys_ptr, **dst_virt_ptr;
169 int i, rc;
170
171 hwq->level = PBL_LVL_MAX;
172
173 slots = roundup_pow_of_two(*elements);
174 if (aux) {
175 aux_size = roundup_pow_of_two(aux);
176 aux_pages = (slots * aux_size) / pg_size;
177 if ((slots * aux_size) % pg_size)
178 aux_pages++;
179 }
180 size = roundup_pow_of_two(element_size);
181
182 if (!sghead) {
183 hwq->is_user = false;
184 pages = (slots * size) / pg_size + aux_pages;
185 if ((slots * size) % pg_size)
186 pages++;
187 if (!pages)
188 return -EINVAL;
189 } else {
190 hwq->is_user = true;
191 pages = nmap;
192 }
193
194
195 if (sghead && (pages == MAX_PBL_LVL_0_PGS))
196 rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], sghead,
197 pages, pg_size);
198 else
199 rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], NULL, 1, pg_size);
200 if (rc)
201 goto fail;
202
203 hwq->level = PBL_LVL_0;
204
205 if (pages > MAX_PBL_LVL_0_PGS) {
206 if (pages > MAX_PBL_LVL_1_PGS) {
207
208 rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], NULL,
209 MAX_PBL_LVL_1_PGS_FOR_LVL_2, pg_size);
210 if (rc)
211 goto fail;
212
213 dst_virt_ptr =
214 (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
215 src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
216 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
217 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
218 src_phys_ptr[i] | PTU_PDE_VALID;
219 hwq->level = PBL_LVL_1;
220
221 rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_2], sghead,
222 pages, pg_size);
223 if (rc)
224 goto fail;
225
226
227 dst_virt_ptr =
228 (dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr;
229 src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr;
230 for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) {
231 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
232 src_phys_ptr[i] | PTU_PTE_VALID;
233 }
234 if (hwq_type == HWQ_TYPE_QUEUE) {
235
236 i = hwq->pbl[PBL_LVL_2].pg_count;
237 dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
238 PTU_PTE_LAST;
239 if (i > 1)
240 dst_virt_ptr[PTR_PG(i - 2)]
241 [PTR_IDX(i - 2)] |=
242 PTU_PTE_NEXT_TO_LAST;
243 }
244 hwq->level = PBL_LVL_2;
245 } else {
246 u32 flag = hwq_type == HWQ_TYPE_L2_CMPL ? 0 :
247 PTU_PTE_VALID;
248
249
250 rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], sghead,
251 pages, pg_size);
252 if (rc)
253 goto fail;
254
255 dst_virt_ptr =
256 (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
257 src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
258 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++) {
259 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
260 src_phys_ptr[i] | flag;
261 }
262 if (hwq_type == HWQ_TYPE_QUEUE) {
263
264 i = hwq->pbl[PBL_LVL_1].pg_count;
265 dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
266 PTU_PTE_LAST;
267 if (i > 1)
268 dst_virt_ptr[PTR_PG(i - 2)]
269 [PTR_IDX(i - 2)] |=
270 PTU_PTE_NEXT_TO_LAST;
271 }
272 hwq->level = PBL_LVL_1;
273 }
274 }
275 hwq->pdev = pdev;
276 spin_lock_init(&hwq->lock);
277 hwq->prod = 0;
278 hwq->cons = 0;
279 *elements = hwq->max_elements = slots;
280 hwq->element_size = size;
281
282
283 hwq->pbl_ptr = hwq->pbl[hwq->level].pg_arr;
284 hwq->pbl_dma_ptr = hwq->pbl[hwq->level].pg_map_arr;
285
286 return 0;
287
288fail:
289 bnxt_qplib_free_hwq(pdev, hwq);
290 return -ENOMEM;
291}
292
293
294void bnxt_qplib_free_ctx(struct pci_dev *pdev,
295 struct bnxt_qplib_ctx *ctx)
296{
297 int i;
298
299 bnxt_qplib_free_hwq(pdev, &ctx->qpc_tbl);
300 bnxt_qplib_free_hwq(pdev, &ctx->mrw_tbl);
301 bnxt_qplib_free_hwq(pdev, &ctx->srqc_tbl);
302 bnxt_qplib_free_hwq(pdev, &ctx->cq_tbl);
303 bnxt_qplib_free_hwq(pdev, &ctx->tim_tbl);
304 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
305 bnxt_qplib_free_hwq(pdev, &ctx->tqm_tbl[i]);
306 bnxt_qplib_free_hwq(pdev, &ctx->tqm_pde);
307 bnxt_qplib_free_stats_ctx(pdev, &ctx->stats);
308}
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
332 struct bnxt_qplib_ctx *ctx,
333 bool virt_fn, bool is_p5)
334{
335 int i, j, k, rc = 0;
336 int fnz_idx = -1;
337 __le64 **pbl_ptr;
338
339 if (virt_fn || is_p5)
340 goto stats_alloc;
341
342
343 ctx->qpc_tbl.max_elements = ctx->qpc_count;
344 rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->qpc_tbl, NULL, 0,
345 &ctx->qpc_tbl.max_elements,
346 BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE, 0,
347 PAGE_SIZE, HWQ_TYPE_CTX);
348 if (rc)
349 goto fail;
350
351
352 ctx->mrw_tbl.max_elements = ctx->mrw_count;
353 rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->mrw_tbl, NULL, 0,
354 &ctx->mrw_tbl.max_elements,
355 BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE, 0,
356 PAGE_SIZE, HWQ_TYPE_CTX);
357 if (rc)
358 goto fail;
359
360
361 ctx->srqc_tbl.max_elements = ctx->srqc_count;
362 rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->srqc_tbl, NULL, 0,
363 &ctx->srqc_tbl.max_elements,
364 BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE, 0,
365 PAGE_SIZE, HWQ_TYPE_CTX);
366 if (rc)
367 goto fail;
368
369
370 ctx->cq_tbl.max_elements = ctx->cq_count;
371 rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->cq_tbl, NULL, 0,
372 &ctx->cq_tbl.max_elements,
373 BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE, 0,
374 PAGE_SIZE, HWQ_TYPE_CTX);
375 if (rc)
376 goto fail;
377
378
379 ctx->tqm_pde.max_elements = 512;
380 rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_pde, NULL, 0,
381 &ctx->tqm_pde.max_elements, sizeof(u64),
382 0, PAGE_SIZE, HWQ_TYPE_CTX);
383 if (rc)
384 goto fail;
385
386 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
387 if (!ctx->tqm_count[i])
388 continue;
389 ctx->tqm_tbl[i].max_elements = ctx->qpc_count *
390 ctx->tqm_count[i];
391 rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_tbl[i], NULL, 0,
392 &ctx->tqm_tbl[i].max_elements, 1,
393 0, PAGE_SIZE, HWQ_TYPE_CTX);
394 if (rc)
395 goto fail;
396 }
397 pbl_ptr = (__le64 **)ctx->tqm_pde.pbl_ptr;
398 for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
399 i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
400 if (!ctx->tqm_tbl[i].max_elements)
401 continue;
402 if (fnz_idx == -1)
403 fnz_idx = i;
404 switch (ctx->tqm_tbl[i].level) {
405 case PBL_LVL_2:
406 for (k = 0; k < ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_count;
407 k++)
408 pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)] =
409 cpu_to_le64(
410 ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_map_arr[k]
411 | PTU_PTE_VALID);
412 break;
413 case PBL_LVL_1:
414 case PBL_LVL_0:
415 default:
416 pbl_ptr[PTR_PG(j)][PTR_IDX(j)] = cpu_to_le64(
417 ctx->tqm_tbl[i].pbl[PBL_LVL_0].pg_map_arr[0] |
418 PTU_PTE_VALID);
419 break;
420 }
421 }
422 if (fnz_idx == -1)
423 fnz_idx = 0;
424 ctx->tqm_pde_level = ctx->tqm_tbl[fnz_idx].level == PBL_LVL_2 ?
425 PBL_LVL_2 : ctx->tqm_tbl[fnz_idx].level + 1;
426
427
428 ctx->tim_tbl.max_elements = ctx->qpc_count * 16;
429 rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tim_tbl, NULL, 0,
430 &ctx->tim_tbl.max_elements, 1,
431 0, PAGE_SIZE, HWQ_TYPE_CTX);
432 if (rc)
433 goto fail;
434
435stats_alloc:
436
437 rc = bnxt_qplib_alloc_stats_ctx(pdev, &ctx->stats);
438 if (rc)
439 goto fail;
440
441 return 0;
442
443fail:
444 bnxt_qplib_free_ctx(pdev, ctx);
445 return rc;
446}
447
448
449void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid)
450{
451 u8 mac[ETH_ALEN];
452
453
454 memcpy(mac, dev_addr, ETH_ALEN);
455 guid[0] = mac[0] ^ 2;
456 guid[1] = mac[1];
457 guid[2] = mac[2];
458 guid[3] = 0xff;
459 guid[4] = 0xfe;
460 guid[5] = mac[3];
461 guid[6] = mac[4];
462 guid[7] = mac[5];
463}
464
465static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res,
466 struct bnxt_qplib_sgid_tbl *sgid_tbl)
467{
468 kfree(sgid_tbl->tbl);
469 kfree(sgid_tbl->hw_id);
470 kfree(sgid_tbl->ctx);
471 kfree(sgid_tbl->vlan);
472 sgid_tbl->tbl = NULL;
473 sgid_tbl->hw_id = NULL;
474 sgid_tbl->ctx = NULL;
475 sgid_tbl->vlan = NULL;
476 sgid_tbl->max = 0;
477 sgid_tbl->active = 0;
478}
479
480static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
481 struct bnxt_qplib_sgid_tbl *sgid_tbl,
482 u16 max)
483{
484 sgid_tbl->tbl = kcalloc(max, sizeof(struct bnxt_qplib_gid), GFP_KERNEL);
485 if (!sgid_tbl->tbl)
486 return -ENOMEM;
487
488 sgid_tbl->hw_id = kcalloc(max, sizeof(u16), GFP_KERNEL);
489 if (!sgid_tbl->hw_id)
490 goto out_free1;
491
492 sgid_tbl->ctx = kcalloc(max, sizeof(void *), GFP_KERNEL);
493 if (!sgid_tbl->ctx)
494 goto out_free2;
495
496 sgid_tbl->vlan = kcalloc(max, sizeof(u8), GFP_KERNEL);
497 if (!sgid_tbl->vlan)
498 goto out_free3;
499
500 sgid_tbl->max = max;
501 return 0;
502out_free3:
503 kfree(sgid_tbl->ctx);
504 sgid_tbl->ctx = NULL;
505out_free2:
506 kfree(sgid_tbl->hw_id);
507 sgid_tbl->hw_id = NULL;
508out_free1:
509 kfree(sgid_tbl->tbl);
510 sgid_tbl->tbl = NULL;
511 return -ENOMEM;
512};
513
514static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
515 struct bnxt_qplib_sgid_tbl *sgid_tbl)
516{
517 int i;
518
519 for (i = 0; i < sgid_tbl->max; i++) {
520 if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
521 sizeof(bnxt_qplib_gid_zero)))
522 bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i], true);
523 }
524 memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
525 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
526 memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
527 sgid_tbl->active = 0;
528}
529
530static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
531 struct net_device *netdev)
532{
533 memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
534 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
535}
536
537static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res *res,
538 struct bnxt_qplib_pkey_tbl *pkey_tbl)
539{
540 if (!pkey_tbl->tbl)
541 dev_dbg(&res->pdev->dev, "PKEY tbl not present\n");
542 else
543 kfree(pkey_tbl->tbl);
544
545 pkey_tbl->tbl = NULL;
546 pkey_tbl->max = 0;
547 pkey_tbl->active = 0;
548}
549
550static int bnxt_qplib_alloc_pkey_tbl(struct bnxt_qplib_res *res,
551 struct bnxt_qplib_pkey_tbl *pkey_tbl,
552 u16 max)
553{
554 pkey_tbl->tbl = kcalloc(max, sizeof(u16), GFP_KERNEL);
555 if (!pkey_tbl->tbl)
556 return -ENOMEM;
557
558 pkey_tbl->max = max;
559 return 0;
560};
561
562
563int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd)
564{
565 u32 bit_num;
566
567 bit_num = find_first_bit(pdt->tbl, pdt->max);
568 if (bit_num == pdt->max)
569 return -ENOMEM;
570
571
572 clear_bit(bit_num, pdt->tbl);
573 pd->id = bit_num;
574 return 0;
575}
576
577int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
578 struct bnxt_qplib_pd_tbl *pdt,
579 struct bnxt_qplib_pd *pd)
580{
581 if (test_and_set_bit(pd->id, pdt->tbl)) {
582 dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d\n",
583 pd->id);
584 return -EINVAL;
585 }
586 pd->id = 0;
587 return 0;
588}
589
590static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt)
591{
592 kfree(pdt->tbl);
593 pdt->tbl = NULL;
594 pdt->max = 0;
595}
596
597static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res,
598 struct bnxt_qplib_pd_tbl *pdt,
599 u32 max)
600{
601 u32 bytes;
602
603 bytes = max >> 3;
604 if (!bytes)
605 bytes = 1;
606 pdt->tbl = kmalloc(bytes, GFP_KERNEL);
607 if (!pdt->tbl)
608 return -ENOMEM;
609
610 pdt->max = max;
611 memset((u8 *)pdt->tbl, 0xFF, bytes);
612
613 return 0;
614}
615
616
617int bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl *dpit,
618 struct bnxt_qplib_dpi *dpi,
619 void *app)
620{
621 u32 bit_num;
622
623 bit_num = find_first_bit(dpit->tbl, dpit->max);
624 if (bit_num == dpit->max)
625 return -ENOMEM;
626
627
628 clear_bit(bit_num, dpit->tbl);
629 dpit->app_tbl[bit_num] = app;
630
631 dpi->dpi = bit_num;
632 dpi->dbr = dpit->dbr_bar_reg_iomem + (bit_num * PAGE_SIZE);
633 dpi->umdbr = dpit->unmapped_dbr + (bit_num * PAGE_SIZE);
634
635 return 0;
636}
637
638int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
639 struct bnxt_qplib_dpi_tbl *dpit,
640 struct bnxt_qplib_dpi *dpi)
641{
642 if (dpi->dpi >= dpit->max) {
643 dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d\n", dpi->dpi);
644 return -EINVAL;
645 }
646 if (test_and_set_bit(dpi->dpi, dpit->tbl)) {
647 dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d\n",
648 dpi->dpi);
649 return -EINVAL;
650 }
651 if (dpit->app_tbl)
652 dpit->app_tbl[dpi->dpi] = NULL;
653 memset(dpi, 0, sizeof(*dpi));
654
655 return 0;
656}
657
658static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res *res,
659 struct bnxt_qplib_dpi_tbl *dpit)
660{
661 kfree(dpit->tbl);
662 kfree(dpit->app_tbl);
663 if (dpit->dbr_bar_reg_iomem)
664 pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
665 memset(dpit, 0, sizeof(*dpit));
666}
667
668static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
669 struct bnxt_qplib_dpi_tbl *dpit,
670 u32 dbr_offset)
671{
672 u32 dbr_bar_reg = RCFW_DBR_PCI_BAR_REGION;
673 resource_size_t bar_reg_base;
674 u32 dbr_len, bytes;
675
676 if (dpit->dbr_bar_reg_iomem) {
677 dev_err(&res->pdev->dev, "DBR BAR region %d already mapped\n",
678 dbr_bar_reg);
679 return -EALREADY;
680 }
681
682 bar_reg_base = pci_resource_start(res->pdev, dbr_bar_reg);
683 if (!bar_reg_base) {
684 dev_err(&res->pdev->dev, "BAR region %d resc start failed\n",
685 dbr_bar_reg);
686 return -ENOMEM;
687 }
688
689 dbr_len = pci_resource_len(res->pdev, dbr_bar_reg) - dbr_offset;
690 if (!dbr_len || ((dbr_len & (PAGE_SIZE - 1)) != 0)) {
691 dev_err(&res->pdev->dev, "Invalid DBR length %d\n", dbr_len);
692 return -ENOMEM;
693 }
694
695 dpit->dbr_bar_reg_iomem = ioremap_nocache(bar_reg_base + dbr_offset,
696 dbr_len);
697 if (!dpit->dbr_bar_reg_iomem) {
698 dev_err(&res->pdev->dev,
699 "FP: DBR BAR region %d mapping failed\n", dbr_bar_reg);
700 return -ENOMEM;
701 }
702
703 dpit->unmapped_dbr = bar_reg_base + dbr_offset;
704 dpit->max = dbr_len / PAGE_SIZE;
705
706 dpit->app_tbl = kcalloc(dpit->max, sizeof(void *), GFP_KERNEL);
707 if (!dpit->app_tbl)
708 goto unmap_io;
709
710 bytes = dpit->max >> 3;
711 if (!bytes)
712 bytes = 1;
713
714 dpit->tbl = kmalloc(bytes, GFP_KERNEL);
715 if (!dpit->tbl) {
716 kfree(dpit->app_tbl);
717 dpit->app_tbl = NULL;
718 goto unmap_io;
719 }
720
721 memset((u8 *)dpit->tbl, 0xFF, bytes);
722
723 return 0;
724
725unmap_io:
726 pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
727 return -ENOMEM;
728}
729
730
731static void bnxt_qplib_cleanup_pkey_tbl(struct bnxt_qplib_pkey_tbl *pkey_tbl)
732{
733 memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
734 pkey_tbl->active = 0;
735}
736
737static void bnxt_qplib_init_pkey_tbl(struct bnxt_qplib_res *res,
738 struct bnxt_qplib_pkey_tbl *pkey_tbl)
739{
740 u16 pkey = 0xFFFF;
741
742 memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
743
744
745 bnxt_qplib_add_pkey(res, pkey_tbl, &pkey, false);
746}
747
748
749static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
750 struct bnxt_qplib_stats *stats)
751{
752 if (stats->dma) {
753 dma_free_coherent(&pdev->dev, stats->size,
754 stats->dma, stats->dma_map);
755 }
756 memset(stats, 0, sizeof(*stats));
757 stats->fw_id = -1;
758}
759
760static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
761 struct bnxt_qplib_stats *stats)
762{
763 memset(stats, 0, sizeof(*stats));
764 stats->fw_id = -1;
765
766
767
768
769 stats->size = ALIGN(sizeof(struct ctx_hw_stats), 128);
770 stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
771 &stats->dma_map, GFP_KERNEL);
772 if (!stats->dma) {
773 dev_err(&pdev->dev, "Stats DMA allocation failed\n");
774 return -ENOMEM;
775 }
776 return 0;
777}
778
779void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res)
780{
781 bnxt_qplib_cleanup_pkey_tbl(&res->pkey_tbl);
782 bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl);
783}
784
785int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
786{
787 bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev);
788 bnxt_qplib_init_pkey_tbl(res, &res->pkey_tbl);
789
790 return 0;
791}
792
793void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
794{
795 bnxt_qplib_free_pkey_tbl(res, &res->pkey_tbl);
796 bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
797 bnxt_qplib_free_pd_tbl(&res->pd_tbl);
798 bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
799
800 res->netdev = NULL;
801 res->pdev = NULL;
802}
803
804int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
805 struct net_device *netdev,
806 struct bnxt_qplib_dev_attr *dev_attr)
807{
808 int rc = 0;
809
810 res->pdev = pdev;
811 res->netdev = netdev;
812
813 rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
814 if (rc)
815 goto fail;
816
817 rc = bnxt_qplib_alloc_pkey_tbl(res, &res->pkey_tbl, dev_attr->max_pkey);
818 if (rc)
819 goto fail;
820
821 rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd);
822 if (rc)
823 goto fail;
824
825 rc = bnxt_qplib_alloc_dpi_tbl(res, &res->dpi_tbl, dev_attr->l2_db_size);
826 if (rc)
827 goto fail;
828
829 return 0;
830fail:
831 bnxt_qplib_free_res(res);
832 return rc;
833}
834