linux/drivers/infiniband/hw/bnxt_re/qplib_res.c
<<
>>
Prefs
   1/*
   2 * Broadcom NetXtreme-E RoCE driver.
   3 *
   4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
   5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
   6 *
   7 * This software is available to you under a choice of one of two
   8 * licenses.  You may choose to be licensed under the terms of the GNU
   9 * General Public License (GPL) Version 2, available from the file
  10 * COPYING in the main directory of this source tree, or the
  11 * BSD license below:
  12 *
  13 * Redistribution and use in source and binary forms, with or without
  14 * modification, are permitted provided that the following conditions
  15 * are met:
  16 *
  17 * 1. Redistributions of source code must retain the above copyright
  18 *    notice, this list of conditions and the following disclaimer.
  19 * 2. Redistributions in binary form must reproduce the above copyright
  20 *    notice, this list of conditions and the following disclaimer in
  21 *    the documentation and/or other materials provided with the
  22 *    distribution.
  23 *
  24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
  25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
  28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
  33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
  34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  35 *
  36 * Description: QPLib resource manager
  37 */
  38
  39#include <linux/spinlock.h>
  40#include <linux/pci.h>
  41#include <linux/interrupt.h>
  42#include <linux/inetdevice.h>
  43#include <linux/dma-mapping.h>
  44#include <linux/if_vlan.h>
  45#include "roce_hsi.h"
  46#include "qplib_res.h"
  47#include "qplib_sp.h"
  48#include "qplib_rcfw.h"
  49
  50static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
  51                                      struct bnxt_qplib_stats *stats);
  52static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
  53                                      struct bnxt_qplib_stats *stats);
  54
  55/* PBL */
  56static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
  57                       bool is_umem)
  58{
  59        int i;
  60
  61        if (!is_umem) {
  62                for (i = 0; i < pbl->pg_count; i++) {
  63                        if (pbl->pg_arr[i])
  64                                dma_free_coherent(&pdev->dev, pbl->pg_size,
  65                                                  (void *)((unsigned long)
  66                                                   pbl->pg_arr[i] &
  67                                                  PAGE_MASK),
  68                                                  pbl->pg_map_arr[i]);
  69                        else
  70                                dev_warn(&pdev->dev,
  71                                         "QPLIB: PBL free pg_arr[%d] empty?!",
  72                                         i);
  73                        pbl->pg_arr[i] = NULL;
  74                }
  75        }
  76        kfree(pbl->pg_arr);
  77        pbl->pg_arr = NULL;
  78        kfree(pbl->pg_map_arr);
  79        pbl->pg_map_arr = NULL;
  80        pbl->pg_count = 0;
  81        pbl->pg_size = 0;
  82}
  83
  84static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
  85                       struct scatterlist *sghead, u32 pages, u32 pg_size)
  86{
  87        struct scatterlist *sg;
  88        bool is_umem = false;
  89        int i;
  90
  91        /* page ptr arrays */
  92        pbl->pg_arr = kcalloc(pages, sizeof(void *), GFP_KERNEL);
  93        if (!pbl->pg_arr)
  94                return -ENOMEM;
  95
  96        pbl->pg_map_arr = kcalloc(pages, sizeof(dma_addr_t), GFP_KERNEL);
  97        if (!pbl->pg_map_arr) {
  98                kfree(pbl->pg_arr);
  99                pbl->pg_arr = NULL;
 100                return -ENOMEM;
 101        }
 102        pbl->pg_count = 0;
 103        pbl->pg_size = pg_size;
 104
 105        if (!sghead) {
 106                for (i = 0; i < pages; i++) {
 107                        pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
 108                                                            pbl->pg_size,
 109                                                            &pbl->pg_map_arr[i],
 110                                                            GFP_KERNEL);
 111                        if (!pbl->pg_arr[i])
 112                                goto fail;
 113                        memset(pbl->pg_arr[i], 0, pbl->pg_size);
 114                        pbl->pg_count++;
 115                }
 116        } else {
 117                i = 0;
 118                is_umem = true;
 119                for_each_sg(sghead, sg, pages, i) {
 120                        pbl->pg_map_arr[i] = sg_dma_address(sg);
 121                        pbl->pg_arr[i] = sg_virt(sg);
 122                        if (!pbl->pg_arr[i])
 123                                goto fail;
 124
 125                        pbl->pg_count++;
 126                }
 127        }
 128
 129        return 0;
 130
 131fail:
 132        __free_pbl(pdev, pbl, is_umem);
 133        return -ENOMEM;
 134}
 135
 136/* HWQ */
 137void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq)
 138{
 139        int i;
 140
 141        if (!hwq->max_elements)
 142                return;
 143        if (hwq->level >= PBL_LVL_MAX)
 144                return;
 145
 146        for (i = 0; i < hwq->level + 1; i++) {
 147                if (i == hwq->level)
 148                        __free_pbl(pdev, &hwq->pbl[i], hwq->is_user);
 149                else
 150                        __free_pbl(pdev, &hwq->pbl[i], false);
 151        }
 152
 153        hwq->level = PBL_LVL_MAX;
 154        hwq->max_elements = 0;
 155        hwq->element_size = 0;
 156        hwq->prod = 0;
 157        hwq->cons = 0;
 158        hwq->cp_bit = 0;
 159}
 160
 161/* All HWQs are power of 2 in size */
 162int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
 163                              struct scatterlist *sghead, int nmap,
 164                              u32 *elements, u32 element_size, u32 aux,
 165                              u32 pg_size, enum bnxt_qplib_hwq_type hwq_type)
 166{
 167        u32 pages, slots, size, aux_pages = 0, aux_size = 0;
 168        dma_addr_t *src_phys_ptr, **dst_virt_ptr;
 169        int i, rc;
 170
 171        hwq->level = PBL_LVL_MAX;
 172
 173        slots = roundup_pow_of_two(*elements);
 174        if (aux) {
 175                aux_size = roundup_pow_of_two(aux);
 176                aux_pages = (slots * aux_size) / pg_size;
 177                if ((slots * aux_size) % pg_size)
 178                        aux_pages++;
 179        }
 180        size = roundup_pow_of_two(element_size);
 181
 182        if (!sghead) {
 183                hwq->is_user = false;
 184                pages = (slots * size) / pg_size + aux_pages;
 185                if ((slots * size) % pg_size)
 186                        pages++;
 187                if (!pages)
 188                        return -EINVAL;
 189        } else {
 190                hwq->is_user = true;
 191                pages = nmap;
 192        }
 193
 194        /* Alloc the 1st memory block; can be a PDL/PTL/PBL */
 195        if (sghead && (pages == MAX_PBL_LVL_0_PGS))
 196                rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], sghead,
 197                                 pages, pg_size);
 198        else
 199                rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], NULL, 1, pg_size);
 200        if (rc)
 201                goto fail;
 202
 203        hwq->level = PBL_LVL_0;
 204
 205        if (pages > MAX_PBL_LVL_0_PGS) {
 206                if (pages > MAX_PBL_LVL_1_PGS) {
 207                        /* 2 levels of indirection */
 208                        rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], NULL,
 209                                         MAX_PBL_LVL_1_PGS_FOR_LVL_2, pg_size);
 210                        if (rc)
 211                                goto fail;
 212                        /* Fill in lvl0 PBL */
 213                        dst_virt_ptr =
 214                                (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
 215                        src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
 216                        for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
 217                                dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
 218                                        src_phys_ptr[i] | PTU_PDE_VALID;
 219                        hwq->level = PBL_LVL_1;
 220
 221                        rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_2], sghead,
 222                                         pages, pg_size);
 223                        if (rc)
 224                                goto fail;
 225
 226                        /* Fill in lvl1 PBL */
 227                        dst_virt_ptr =
 228                                (dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr;
 229                        src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr;
 230                        for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) {
 231                                dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
 232                                        src_phys_ptr[i] | PTU_PTE_VALID;
 233                        }
 234                        if (hwq_type == HWQ_TYPE_QUEUE) {
 235                                /* Find the last pg of the size */
 236                                i = hwq->pbl[PBL_LVL_2].pg_count;
 237                                dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
 238                                                                  PTU_PTE_LAST;
 239                                if (i > 1)
 240                                        dst_virt_ptr[PTR_PG(i - 2)]
 241                                                    [PTR_IDX(i - 2)] |=
 242                                                    PTU_PTE_NEXT_TO_LAST;
 243                        }
 244                        hwq->level = PBL_LVL_2;
 245                } else {
 246                        u32 flag = hwq_type == HWQ_TYPE_L2_CMPL ? 0 :
 247                                                PTU_PTE_VALID;
 248
 249                        /* 1 level of indirection */
 250                        rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], sghead,
 251                                         pages, pg_size);
 252                        if (rc)
 253                                goto fail;
 254                        /* Fill in lvl0 PBL */
 255                        dst_virt_ptr =
 256                                (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
 257                        src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
 258                        for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++) {
 259                                dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
 260                                        src_phys_ptr[i] | flag;
 261                        }
 262                        if (hwq_type == HWQ_TYPE_QUEUE) {
 263                                /* Find the last pg of the size */
 264                                i = hwq->pbl[PBL_LVL_1].pg_count;
 265                                dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
 266                                                                  PTU_PTE_LAST;
 267                                if (i > 1)
 268                                        dst_virt_ptr[PTR_PG(i - 2)]
 269                                                    [PTR_IDX(i - 2)] |=
 270                                                    PTU_PTE_NEXT_TO_LAST;
 271                        }
 272                        hwq->level = PBL_LVL_1;
 273                }
 274        }
 275        hwq->pdev = pdev;
 276        spin_lock_init(&hwq->lock);
 277        hwq->prod = 0;
 278        hwq->cons = 0;
 279        *elements = hwq->max_elements = slots;
 280        hwq->element_size = size;
 281
 282        /* For direct access to the elements */
 283        hwq->pbl_ptr = hwq->pbl[hwq->level].pg_arr;
 284        hwq->pbl_dma_ptr = hwq->pbl[hwq->level].pg_map_arr;
 285
 286        return 0;
 287
 288fail:
 289        bnxt_qplib_free_hwq(pdev, hwq);
 290        return -ENOMEM;
 291}
 292
 293/* Context Tables */
 294void bnxt_qplib_free_ctx(struct pci_dev *pdev,
 295                         struct bnxt_qplib_ctx *ctx)
 296{
 297        int i;
 298
 299        bnxt_qplib_free_hwq(pdev, &ctx->qpc_tbl);
 300        bnxt_qplib_free_hwq(pdev, &ctx->mrw_tbl);
 301        bnxt_qplib_free_hwq(pdev, &ctx->srqc_tbl);
 302        bnxt_qplib_free_hwq(pdev, &ctx->cq_tbl);
 303        bnxt_qplib_free_hwq(pdev, &ctx->tim_tbl);
 304        for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
 305                bnxt_qplib_free_hwq(pdev, &ctx->tqm_tbl[i]);
 306        bnxt_qplib_free_hwq(pdev, &ctx->tqm_pde);
 307        bnxt_qplib_free_stats_ctx(pdev, &ctx->stats);
 308}
 309
 310/*
 311 * Routine: bnxt_qplib_alloc_ctx
 312 * Description:
 313 *     Context tables are memories which are used by the chip fw.
 314 *     The 6 tables defined are:
 315 *             QPC ctx - holds QP states
 316 *             MRW ctx - holds memory region and window
 317 *             SRQ ctx - holds shared RQ states
 318 *             CQ ctx - holds completion queue states
 319 *             TQM ctx - holds Tx Queue Manager context
 320 *             TIM ctx - holds timer context
 321 *     Depending on the size of the tbl requested, either a 1 Page Buffer List
 322 *     or a 1-to-2-stage indirection Page Directory List + 1 PBL is used
 323 *     instead.
 324 *     Table might be employed as follows:
 325 *             For 0      < ctx size <= 1 PAGE, 0 level of ind is used
 326 *             For 1 PAGE < ctx size <= 512 entries size, 1 level of ind is used
 327 *             For 512    < ctx size <= MAX, 2 levels of ind is used
 328 * Returns:
 329 *     0 if success, else -ERRORS
 330 */
 331int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
 332                         struct bnxt_qplib_ctx *ctx,
 333                         bool virt_fn)
 334{
 335        int i, j, k, rc = 0;
 336        int fnz_idx = -1;
 337        __le64 **pbl_ptr;
 338
 339        if (virt_fn)
 340                goto stats_alloc;
 341
 342        /* QPC Tables */
 343        ctx->qpc_tbl.max_elements = ctx->qpc_count;
 344        rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->qpc_tbl, NULL, 0,
 345                                       &ctx->qpc_tbl.max_elements,
 346                                       BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE, 0,
 347                                       PAGE_SIZE, HWQ_TYPE_CTX);
 348        if (rc)
 349                goto fail;
 350
 351        /* MRW Tables */
 352        ctx->mrw_tbl.max_elements = ctx->mrw_count;
 353        rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->mrw_tbl, NULL, 0,
 354                                       &ctx->mrw_tbl.max_elements,
 355                                       BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE, 0,
 356                                       PAGE_SIZE, HWQ_TYPE_CTX);
 357        if (rc)
 358                goto fail;
 359
 360        /* SRQ Tables */
 361        ctx->srqc_tbl.max_elements = ctx->srqc_count;
 362        rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->srqc_tbl, NULL, 0,
 363                                       &ctx->srqc_tbl.max_elements,
 364                                       BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE, 0,
 365                                       PAGE_SIZE, HWQ_TYPE_CTX);
 366        if (rc)
 367                goto fail;
 368
 369        /* CQ Tables */
 370        ctx->cq_tbl.max_elements = ctx->cq_count;
 371        rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->cq_tbl, NULL, 0,
 372                                       &ctx->cq_tbl.max_elements,
 373                                       BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE, 0,
 374                                       PAGE_SIZE, HWQ_TYPE_CTX);
 375        if (rc)
 376                goto fail;
 377
 378        /* TQM Buffer */
 379        ctx->tqm_pde.max_elements = 512;
 380        rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_pde, NULL, 0,
 381                                       &ctx->tqm_pde.max_elements, sizeof(u64),
 382                                       0, PAGE_SIZE, HWQ_TYPE_CTX);
 383        if (rc)
 384                goto fail;
 385
 386        for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
 387                if (!ctx->tqm_count[i])
 388                        continue;
 389                ctx->tqm_tbl[i].max_elements = ctx->qpc_count *
 390                                               ctx->tqm_count[i];
 391                rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_tbl[i], NULL, 0,
 392                                               &ctx->tqm_tbl[i].max_elements, 1,
 393                                               0, PAGE_SIZE, HWQ_TYPE_CTX);
 394                if (rc)
 395                        goto fail;
 396        }
 397        pbl_ptr = (__le64 **)ctx->tqm_pde.pbl_ptr;
 398        for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
 399             i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
 400                if (!ctx->tqm_tbl[i].max_elements)
 401                        continue;
 402                if (fnz_idx == -1)
 403                        fnz_idx = i;
 404                switch (ctx->tqm_tbl[i].level) {
 405                case PBL_LVL_2:
 406                        for (k = 0; k < ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_count;
 407                             k++)
 408                                pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)] =
 409                                  cpu_to_le64(
 410                                    ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_map_arr[k]
 411                                    | PTU_PTE_VALID);
 412                        break;
 413                case PBL_LVL_1:
 414                case PBL_LVL_0:
 415                default:
 416                        pbl_ptr[PTR_PG(j)][PTR_IDX(j)] = cpu_to_le64(
 417                                ctx->tqm_tbl[i].pbl[PBL_LVL_0].pg_map_arr[0] |
 418                                PTU_PTE_VALID);
 419                        break;
 420                }
 421        }
 422        if (fnz_idx == -1)
 423                fnz_idx = 0;
 424        ctx->tqm_pde_level = ctx->tqm_tbl[fnz_idx].level == PBL_LVL_2 ?
 425                             PBL_LVL_2 : ctx->tqm_tbl[fnz_idx].level + 1;
 426
 427        /* TIM Buffer */
 428        ctx->tim_tbl.max_elements = ctx->qpc_count * 16;
 429        rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tim_tbl, NULL, 0,
 430                                       &ctx->tim_tbl.max_elements, 1,
 431                                       0, PAGE_SIZE, HWQ_TYPE_CTX);
 432        if (rc)
 433                goto fail;
 434
 435stats_alloc:
 436        /* Stats */
 437        rc = bnxt_qplib_alloc_stats_ctx(pdev, &ctx->stats);
 438        if (rc)
 439                goto fail;
 440
 441        return 0;
 442
 443fail:
 444        bnxt_qplib_free_ctx(pdev, ctx);
 445        return rc;
 446}
 447
 448/* GUID */
 449void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid)
 450{
 451        u8 mac[ETH_ALEN];
 452
 453        /* MAC-48 to EUI-64 mapping */
 454        memcpy(mac, dev_addr, ETH_ALEN);
 455        guid[0] = mac[0] ^ 2;
 456        guid[1] = mac[1];
 457        guid[2] = mac[2];
 458        guid[3] = 0xff;
 459        guid[4] = 0xfe;
 460        guid[5] = mac[3];
 461        guid[6] = mac[4];
 462        guid[7] = mac[5];
 463}
 464
 465static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res,
 466                                     struct bnxt_qplib_sgid_tbl *sgid_tbl)
 467{
 468        kfree(sgid_tbl->tbl);
 469        kfree(sgid_tbl->hw_id);
 470        kfree(sgid_tbl->ctx);
 471        kfree(sgid_tbl->vlan);
 472        sgid_tbl->tbl = NULL;
 473        sgid_tbl->hw_id = NULL;
 474        sgid_tbl->ctx = NULL;
 475        sgid_tbl->vlan = NULL;
 476        sgid_tbl->max = 0;
 477        sgid_tbl->active = 0;
 478}
 479
 480static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
 481                                     struct bnxt_qplib_sgid_tbl *sgid_tbl,
 482                                     u16 max)
 483{
 484        sgid_tbl->tbl = kcalloc(max, sizeof(struct bnxt_qplib_gid), GFP_KERNEL);
 485        if (!sgid_tbl->tbl)
 486                return -ENOMEM;
 487
 488        sgid_tbl->hw_id = kcalloc(max, sizeof(u16), GFP_KERNEL);
 489        if (!sgid_tbl->hw_id)
 490                goto out_free1;
 491
 492        sgid_tbl->ctx = kcalloc(max, sizeof(void *), GFP_KERNEL);
 493        if (!sgid_tbl->ctx)
 494                goto out_free2;
 495
 496        sgid_tbl->vlan = kcalloc(max, sizeof(u8), GFP_KERNEL);
 497        if (!sgid_tbl->vlan)
 498                goto out_free3;
 499
 500        sgid_tbl->max = max;
 501        return 0;
 502out_free3:
 503        kfree(sgid_tbl->ctx);
 504        sgid_tbl->ctx = NULL;
 505out_free2:
 506        kfree(sgid_tbl->hw_id);
 507        sgid_tbl->hw_id = NULL;
 508out_free1:
 509        kfree(sgid_tbl->tbl);
 510        sgid_tbl->tbl = NULL;
 511        return -ENOMEM;
 512};
 513
 514static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
 515                                        struct bnxt_qplib_sgid_tbl *sgid_tbl)
 516{
 517        int i;
 518
 519        for (i = 0; i < sgid_tbl->max; i++) {
 520                if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
 521                           sizeof(bnxt_qplib_gid_zero)))
 522                        bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i], true);
 523        }
 524        memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
 525        memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
 526        memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
 527        sgid_tbl->active = 0;
 528}
 529
 530static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
 531                                     struct net_device *netdev)
 532{
 533        memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
 534        memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
 535}
 536
 537static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res *res,
 538                                     struct bnxt_qplib_pkey_tbl *pkey_tbl)
 539{
 540        if (!pkey_tbl->tbl)
 541                dev_dbg(&res->pdev->dev, "QPLIB: PKEY tbl not present");
 542        else
 543                kfree(pkey_tbl->tbl);
 544
 545        pkey_tbl->tbl = NULL;
 546        pkey_tbl->max = 0;
 547        pkey_tbl->active = 0;
 548}
 549
 550static int bnxt_qplib_alloc_pkey_tbl(struct bnxt_qplib_res *res,
 551                                     struct bnxt_qplib_pkey_tbl *pkey_tbl,
 552                                     u16 max)
 553{
 554        pkey_tbl->tbl = kcalloc(max, sizeof(u16), GFP_KERNEL);
 555        if (!pkey_tbl->tbl)
 556                return -ENOMEM;
 557
 558        pkey_tbl->max = max;
 559        return 0;
 560};
 561
 562/* PDs */
 563int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd)
 564{
 565        u32 bit_num;
 566
 567        bit_num = find_first_bit(pdt->tbl, pdt->max);
 568        if (bit_num == pdt->max)
 569                return -ENOMEM;
 570
 571        /* Found unused PD */
 572        clear_bit(bit_num, pdt->tbl);
 573        pd->id = bit_num;
 574        return 0;
 575}
 576
 577int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
 578                          struct bnxt_qplib_pd_tbl *pdt,
 579                          struct bnxt_qplib_pd *pd)
 580{
 581        if (test_and_set_bit(pd->id, pdt->tbl)) {
 582                dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d",
 583                         pd->id);
 584                return -EINVAL;
 585        }
 586        pd->id = 0;
 587        return 0;
 588}
 589
 590static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt)
 591{
 592        kfree(pdt->tbl);
 593        pdt->tbl = NULL;
 594        pdt->max = 0;
 595}
 596
 597static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res,
 598                                   struct bnxt_qplib_pd_tbl *pdt,
 599                                   u32 max)
 600{
 601        u32 bytes;
 602
 603        bytes = max >> 3;
 604        if (!bytes)
 605                bytes = 1;
 606        pdt->tbl = kmalloc(bytes, GFP_KERNEL);
 607        if (!pdt->tbl)
 608                return -ENOMEM;
 609
 610        pdt->max = max;
 611        memset((u8 *)pdt->tbl, 0xFF, bytes);
 612
 613        return 0;
 614}
 615
 616/* DPIs */
 617int bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl *dpit,
 618                         struct bnxt_qplib_dpi     *dpi,
 619                         void                      *app)
 620{
 621        u32 bit_num;
 622
 623        bit_num = find_first_bit(dpit->tbl, dpit->max);
 624        if (bit_num == dpit->max)
 625                return -ENOMEM;
 626
 627        /* Found unused DPI */
 628        clear_bit(bit_num, dpit->tbl);
 629        dpit->app_tbl[bit_num] = app;
 630
 631        dpi->dpi = bit_num;
 632        dpi->dbr = dpit->dbr_bar_reg_iomem + (bit_num * PAGE_SIZE);
 633        dpi->umdbr = dpit->unmapped_dbr + (bit_num * PAGE_SIZE);
 634
 635        return 0;
 636}
 637
 638int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
 639                           struct bnxt_qplib_dpi_tbl *dpit,
 640                           struct bnxt_qplib_dpi     *dpi)
 641{
 642        if (dpi->dpi >= dpit->max) {
 643                dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d", dpi->dpi);
 644                return -EINVAL;
 645        }
 646        if (test_and_set_bit(dpi->dpi, dpit->tbl)) {
 647                dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d",
 648                         dpi->dpi);
 649                return -EINVAL;
 650        }
 651        if (dpit->app_tbl)
 652                dpit->app_tbl[dpi->dpi] = NULL;
 653        memset(dpi, 0, sizeof(*dpi));
 654
 655        return 0;
 656}
 657
 658static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res     *res,
 659                                    struct bnxt_qplib_dpi_tbl *dpit)
 660{
 661        kfree(dpit->tbl);
 662        kfree(dpit->app_tbl);
 663        if (dpit->dbr_bar_reg_iomem)
 664                pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
 665        memset(dpit, 0, sizeof(*dpit));
 666}
 667
 668static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res     *res,
 669                                    struct bnxt_qplib_dpi_tbl *dpit,
 670                                    u32                       dbr_offset)
 671{
 672        u32 dbr_bar_reg = RCFW_DBR_PCI_BAR_REGION;
 673        resource_size_t bar_reg_base;
 674        u32 dbr_len, bytes;
 675
 676        if (dpit->dbr_bar_reg_iomem) {
 677                dev_err(&res->pdev->dev,
 678                        "QPLIB: DBR BAR region %d already mapped", dbr_bar_reg);
 679                return -EALREADY;
 680        }
 681
 682        bar_reg_base = pci_resource_start(res->pdev, dbr_bar_reg);
 683        if (!bar_reg_base) {
 684                dev_err(&res->pdev->dev,
 685                        "QPLIB: BAR region %d resc start failed", dbr_bar_reg);
 686                return -ENOMEM;
 687        }
 688
 689        dbr_len = pci_resource_len(res->pdev, dbr_bar_reg) - dbr_offset;
 690        if (!dbr_len || ((dbr_len & (PAGE_SIZE - 1)) != 0)) {
 691                dev_err(&res->pdev->dev, "QPLIB: Invalid DBR length %d",
 692                        dbr_len);
 693                return -ENOMEM;
 694        }
 695
 696        dpit->dbr_bar_reg_iomem = ioremap_nocache(bar_reg_base + dbr_offset,
 697                                                  dbr_len);
 698        if (!dpit->dbr_bar_reg_iomem) {
 699                dev_err(&res->pdev->dev,
 700                        "QPLIB: FP: DBR BAR region %d mapping failed",
 701                        dbr_bar_reg);
 702                return -ENOMEM;
 703        }
 704
 705        dpit->unmapped_dbr = bar_reg_base + dbr_offset;
 706        dpit->max = dbr_len / PAGE_SIZE;
 707
 708        dpit->app_tbl = kcalloc(dpit->max, sizeof(void *), GFP_KERNEL);
 709        if (!dpit->app_tbl) {
 710                pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
 711                dev_err(&res->pdev->dev,
 712                        "QPLIB: DPI app tbl allocation failed");
 713                return -ENOMEM;
 714        }
 715
 716        bytes = dpit->max >> 3;
 717        if (!bytes)
 718                bytes = 1;
 719
 720        dpit->tbl = kmalloc(bytes, GFP_KERNEL);
 721        if (!dpit->tbl) {
 722                pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
 723                kfree(dpit->app_tbl);
 724                dpit->app_tbl = NULL;
 725                dev_err(&res->pdev->dev,
 726                        "QPLIB: DPI tbl allocation failed for size = %d",
 727                        bytes);
 728                return -ENOMEM;
 729        }
 730
 731        memset((u8 *)dpit->tbl, 0xFF, bytes);
 732
 733        return 0;
 734}
 735
 736/* PKEYs */
 737static void bnxt_qplib_cleanup_pkey_tbl(struct bnxt_qplib_pkey_tbl *pkey_tbl)
 738{
 739        memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
 740        pkey_tbl->active = 0;
 741}
 742
 743static void bnxt_qplib_init_pkey_tbl(struct bnxt_qplib_res *res,
 744                                     struct bnxt_qplib_pkey_tbl *pkey_tbl)
 745{
 746        u16 pkey = 0xFFFF;
 747
 748        memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
 749
 750        /* pkey default = 0xFFFF */
 751        bnxt_qplib_add_pkey(res, pkey_tbl, &pkey, false);
 752}
 753
 754/* Stats */
 755static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
 756                                      struct bnxt_qplib_stats *stats)
 757{
 758        if (stats->dma) {
 759                dma_free_coherent(&pdev->dev, stats->size,
 760                                  stats->dma, stats->dma_map);
 761        }
 762        memset(stats, 0, sizeof(*stats));
 763        stats->fw_id = -1;
 764}
 765
 766static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
 767                                      struct bnxt_qplib_stats *stats)
 768{
 769        memset(stats, 0, sizeof(*stats));
 770        stats->fw_id = -1;
 771        stats->size = sizeof(struct ctx_hw_stats);
 772        stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
 773                                        &stats->dma_map, GFP_KERNEL);
 774        if (!stats->dma) {
 775                dev_err(&pdev->dev, "QPLIB: Stats DMA allocation failed");
 776                return -ENOMEM;
 777        }
 778        return 0;
 779}
 780
 781void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res)
 782{
 783        bnxt_qplib_cleanup_pkey_tbl(&res->pkey_tbl);
 784        bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl);
 785}
 786
 787int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
 788{
 789        bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev);
 790        bnxt_qplib_init_pkey_tbl(res, &res->pkey_tbl);
 791
 792        return 0;
 793}
 794
 795void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
 796{
 797        bnxt_qplib_free_pkey_tbl(res, &res->pkey_tbl);
 798        bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
 799        bnxt_qplib_free_pd_tbl(&res->pd_tbl);
 800        bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
 801
 802        res->netdev = NULL;
 803        res->pdev = NULL;
 804}
 805
 806int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
 807                         struct net_device *netdev,
 808                         struct bnxt_qplib_dev_attr *dev_attr)
 809{
 810        int rc = 0;
 811
 812        res->pdev = pdev;
 813        res->netdev = netdev;
 814
 815        rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
 816        if (rc)
 817                goto fail;
 818
 819        rc = bnxt_qplib_alloc_pkey_tbl(res, &res->pkey_tbl, dev_attr->max_pkey);
 820        if (rc)
 821                goto fail;
 822
 823        rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd);
 824        if (rc)
 825                goto fail;
 826
 827        rc = bnxt_qplib_alloc_dpi_tbl(res, &res->dpi_tbl, dev_attr->l2_db_size);
 828        if (rc)
 829                goto fail;
 830
 831        return 0;
 832fail:
 833        bnxt_qplib_free_res(res);
 834        return rc;
 835}
 836