linux/drivers/infiniband/hw/bnxt_re/qplib_res.c
<<
>>
Prefs
   1/*
   2 * Broadcom NetXtreme-E RoCE driver.
   3 *
   4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
   5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
   6 *
   7 * This software is available to you under a choice of one of two
   8 * licenses.  You may choose to be licensed under the terms of the GNU
   9 * General Public License (GPL) Version 2, available from the file
  10 * COPYING in the main directory of this source tree, or the
  11 * BSD license below:
  12 *
  13 * Redistribution and use in source and binary forms, with or without
  14 * modification, are permitted provided that the following conditions
  15 * are met:
  16 *
  17 * 1. Redistributions of source code must retain the above copyright
  18 *    notice, this list of conditions and the following disclaimer.
  19 * 2. Redistributions in binary form must reproduce the above copyright
  20 *    notice, this list of conditions and the following disclaimer in
  21 *    the documentation and/or other materials provided with the
  22 *    distribution.
  23 *
  24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
  25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
  28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
  33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
  34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  35 *
  36 * Description: QPLib resource manager
  37 */
  38
  39#define dev_fmt(fmt) "QPLIB: " fmt
  40
  41#include <linux/spinlock.h>
  42#include <linux/pci.h>
  43#include <linux/interrupt.h>
  44#include <linux/inetdevice.h>
  45#include <linux/dma-mapping.h>
  46#include <linux/if_vlan.h>
  47#include "roce_hsi.h"
  48#include "qplib_res.h"
  49#include "qplib_sp.h"
  50#include "qplib_rcfw.h"
  51
  52static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
  53                                      struct bnxt_qplib_stats *stats);
  54static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
  55                                      struct bnxt_qplib_stats *stats);
  56
  57/* PBL */
  58static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
  59                       bool is_umem)
  60{
  61        int i;
  62
  63        if (!is_umem) {
  64                for (i = 0; i < pbl->pg_count; i++) {
  65                        if (pbl->pg_arr[i])
  66                                dma_free_coherent(&pdev->dev, pbl->pg_size,
  67                                                  (void *)((unsigned long)
  68                                                   pbl->pg_arr[i] &
  69                                                  PAGE_MASK),
  70                                                  pbl->pg_map_arr[i]);
  71                        else
  72                                dev_warn(&pdev->dev,
  73                                         "PBL free pg_arr[%d] empty?!\n", i);
  74                        pbl->pg_arr[i] = NULL;
  75                }
  76        }
  77        kfree(pbl->pg_arr);
  78        pbl->pg_arr = NULL;
  79        kfree(pbl->pg_map_arr);
  80        pbl->pg_map_arr = NULL;
  81        pbl->pg_count = 0;
  82        pbl->pg_size = 0;
  83}
  84
  85static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
  86                       struct scatterlist *sghead, u32 pages,
  87                       u32 nmaps, u32 pg_size)
  88{
  89        struct sg_dma_page_iter sg_iter;
  90        bool is_umem = false;
  91        int i;
  92
  93        /* page ptr arrays */
  94        pbl->pg_arr = kcalloc(pages, sizeof(void *), GFP_KERNEL);
  95        if (!pbl->pg_arr)
  96                return -ENOMEM;
  97
  98        pbl->pg_map_arr = kcalloc(pages, sizeof(dma_addr_t), GFP_KERNEL);
  99        if (!pbl->pg_map_arr) {
 100                kfree(pbl->pg_arr);
 101                pbl->pg_arr = NULL;
 102                return -ENOMEM;
 103        }
 104        pbl->pg_count = 0;
 105        pbl->pg_size = pg_size;
 106
 107        if (!sghead) {
 108                for (i = 0; i < pages; i++) {
 109                        pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
 110                                                            pbl->pg_size,
 111                                                            &pbl->pg_map_arr[i],
 112                                                            GFP_KERNEL);
 113                        if (!pbl->pg_arr[i])
 114                                goto fail;
 115                        pbl->pg_count++;
 116                }
 117        } else {
 118                i = 0;
 119                is_umem = true;
 120                for_each_sg_dma_page(sghead, &sg_iter, nmaps, 0) {
 121                        pbl->pg_map_arr[i] = sg_page_iter_dma_address(&sg_iter);
 122                        pbl->pg_arr[i] = NULL;
 123                        pbl->pg_count++;
 124                        i++;
 125                }
 126        }
 127
 128        return 0;
 129
 130fail:
 131        __free_pbl(pdev, pbl, is_umem);
 132        return -ENOMEM;
 133}
 134
 135/* HWQ */
 136void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq)
 137{
 138        int i;
 139
 140        if (!hwq->max_elements)
 141                return;
 142        if (hwq->level >= PBL_LVL_MAX)
 143                return;
 144
 145        for (i = 0; i < hwq->level + 1; i++) {
 146                if (i == hwq->level)
 147                        __free_pbl(pdev, &hwq->pbl[i], hwq->is_user);
 148                else
 149                        __free_pbl(pdev, &hwq->pbl[i], false);
 150        }
 151
 152        hwq->level = PBL_LVL_MAX;
 153        hwq->max_elements = 0;
 154        hwq->element_size = 0;
 155        hwq->prod = 0;
 156        hwq->cons = 0;
 157        hwq->cp_bit = 0;
 158}
 159
 160/* All HWQs are power of 2 in size */
 161int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
 162                              struct bnxt_qplib_sg_info *sg_info,
 163                              u32 *elements, u32 element_size, u32 aux,
 164                              u32 pg_size, enum bnxt_qplib_hwq_type hwq_type)
 165{
 166        u32 pages, maps, slots, size, aux_pages = 0, aux_size = 0;
 167        dma_addr_t *src_phys_ptr, **dst_virt_ptr;
 168        struct scatterlist *sghead = NULL;
 169        int i, rc;
 170
 171        hwq->level = PBL_LVL_MAX;
 172
 173        slots = roundup_pow_of_two(*elements);
 174        if (aux) {
 175                aux_size = roundup_pow_of_two(aux);
 176                aux_pages = (slots * aux_size) / pg_size;
 177                if ((slots * aux_size) % pg_size)
 178                        aux_pages++;
 179        }
 180        size = roundup_pow_of_two(element_size);
 181
 182        if (sg_info)
 183                sghead = sg_info->sglist;
 184
 185        if (!sghead) {
 186                hwq->is_user = false;
 187                pages = (slots * size) / pg_size + aux_pages;
 188                if ((slots * size) % pg_size)
 189                        pages++;
 190                if (!pages)
 191                        return -EINVAL;
 192                maps = 0;
 193        } else {
 194                hwq->is_user = true;
 195                pages = sg_info->npages;
 196                maps = sg_info->nmap;
 197        }
 198
 199        /* Alloc the 1st memory block; can be a PDL/PTL/PBL */
 200        if (sghead && (pages == MAX_PBL_LVL_0_PGS))
 201                rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], sghead,
 202                                 pages, maps, pg_size);
 203        else
 204                rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], NULL,
 205                                 1, 0, pg_size);
 206        if (rc)
 207                goto fail;
 208
 209        hwq->level = PBL_LVL_0;
 210
 211        if (pages > MAX_PBL_LVL_0_PGS) {
 212                if (pages > MAX_PBL_LVL_1_PGS) {
 213                        /* 2 levels of indirection */
 214                        rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], NULL,
 215                                         MAX_PBL_LVL_1_PGS_FOR_LVL_2,
 216                                         0, pg_size);
 217                        if (rc)
 218                                goto fail;
 219                        /* Fill in lvl0 PBL */
 220                        dst_virt_ptr =
 221                                (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
 222                        src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
 223                        for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
 224                                dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
 225                                        src_phys_ptr[i] | PTU_PDE_VALID;
 226                        hwq->level = PBL_LVL_1;
 227
 228                        rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_2], sghead,
 229                                         pages, maps, pg_size);
 230                        if (rc)
 231                                goto fail;
 232
 233                        /* Fill in lvl1 PBL */
 234                        dst_virt_ptr =
 235                                (dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr;
 236                        src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr;
 237                        for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) {
 238                                dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
 239                                        src_phys_ptr[i] | PTU_PTE_VALID;
 240                        }
 241                        if (hwq_type == HWQ_TYPE_QUEUE) {
 242                                /* Find the last pg of the size */
 243                                i = hwq->pbl[PBL_LVL_2].pg_count;
 244                                dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
 245                                                                  PTU_PTE_LAST;
 246                                if (i > 1)
 247                                        dst_virt_ptr[PTR_PG(i - 2)]
 248                                                    [PTR_IDX(i - 2)] |=
 249                                                    PTU_PTE_NEXT_TO_LAST;
 250                        }
 251                        hwq->level = PBL_LVL_2;
 252                } else {
 253                        u32 flag = hwq_type == HWQ_TYPE_L2_CMPL ? 0 :
 254                                                PTU_PTE_VALID;
 255
 256                        /* 1 level of indirection */
 257                        rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], sghead,
 258                                         pages, maps, pg_size);
 259                        if (rc)
 260                                goto fail;
 261                        /* Fill in lvl0 PBL */
 262                        dst_virt_ptr =
 263                                (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
 264                        src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
 265                        for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++) {
 266                                dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
 267                                        src_phys_ptr[i] | flag;
 268                        }
 269                        if (hwq_type == HWQ_TYPE_QUEUE) {
 270                                /* Find the last pg of the size */
 271                                i = hwq->pbl[PBL_LVL_1].pg_count;
 272                                dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
 273                                                                  PTU_PTE_LAST;
 274                                if (i > 1)
 275                                        dst_virt_ptr[PTR_PG(i - 2)]
 276                                                    [PTR_IDX(i - 2)] |=
 277                                                    PTU_PTE_NEXT_TO_LAST;
 278                        }
 279                        hwq->level = PBL_LVL_1;
 280                }
 281        }
 282        hwq->pdev = pdev;
 283        spin_lock_init(&hwq->lock);
 284        hwq->prod = 0;
 285        hwq->cons = 0;
 286        *elements = hwq->max_elements = slots;
 287        hwq->element_size = size;
 288
 289        /* For direct access to the elements */
 290        hwq->pbl_ptr = hwq->pbl[hwq->level].pg_arr;
 291        hwq->pbl_dma_ptr = hwq->pbl[hwq->level].pg_map_arr;
 292
 293        return 0;
 294
 295fail:
 296        bnxt_qplib_free_hwq(pdev, hwq);
 297        return -ENOMEM;
 298}
 299
 300/* Context Tables */
 301void bnxt_qplib_free_ctx(struct pci_dev *pdev,
 302                         struct bnxt_qplib_ctx *ctx)
 303{
 304        int i;
 305
 306        bnxt_qplib_free_hwq(pdev, &ctx->qpc_tbl);
 307        bnxt_qplib_free_hwq(pdev, &ctx->mrw_tbl);
 308        bnxt_qplib_free_hwq(pdev, &ctx->srqc_tbl);
 309        bnxt_qplib_free_hwq(pdev, &ctx->cq_tbl);
 310        bnxt_qplib_free_hwq(pdev, &ctx->tim_tbl);
 311        for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
 312                bnxt_qplib_free_hwq(pdev, &ctx->tqm_tbl[i]);
 313        bnxt_qplib_free_hwq(pdev, &ctx->tqm_pde);
 314        bnxt_qplib_free_stats_ctx(pdev, &ctx->stats);
 315}
 316
 317/*
 318 * Routine: bnxt_qplib_alloc_ctx
 319 * Description:
 320 *     Context tables are memories which are used by the chip fw.
 321 *     The 6 tables defined are:
 322 *             QPC ctx - holds QP states
 323 *             MRW ctx - holds memory region and window
 324 *             SRQ ctx - holds shared RQ states
 325 *             CQ ctx - holds completion queue states
 326 *             TQM ctx - holds Tx Queue Manager context
 327 *             TIM ctx - holds timer context
 328 *     Depending on the size of the tbl requested, either a 1 Page Buffer List
 329 *     or a 1-to-2-stage indirection Page Directory List + 1 PBL is used
 330 *     instead.
 331 *     Table might be employed as follows:
 332 *             For 0      < ctx size <= 1 PAGE, 0 level of ind is used
 333 *             For 1 PAGE < ctx size <= 512 entries size, 1 level of ind is used
 334 *             For 512    < ctx size <= MAX, 2 levels of ind is used
 335 * Returns:
 336 *     0 if success, else -ERRORS
 337 */
 338int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
 339                         struct bnxt_qplib_ctx *ctx,
 340                         bool virt_fn, bool is_p5)
 341{
 342        int i, j, k, rc = 0;
 343        int fnz_idx = -1;
 344        __le64 **pbl_ptr;
 345
 346        if (virt_fn || is_p5)
 347                goto stats_alloc;
 348
 349        /* QPC Tables */
 350        ctx->qpc_tbl.max_elements = ctx->qpc_count;
 351        rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->qpc_tbl, NULL,
 352                                       &ctx->qpc_tbl.max_elements,
 353                                       BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE, 0,
 354                                       PAGE_SIZE, HWQ_TYPE_CTX);
 355        if (rc)
 356                goto fail;
 357
 358        /* MRW Tables */
 359        ctx->mrw_tbl.max_elements = ctx->mrw_count;
 360        rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->mrw_tbl, NULL,
 361                                       &ctx->mrw_tbl.max_elements,
 362                                       BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE, 0,
 363                                       PAGE_SIZE, HWQ_TYPE_CTX);
 364        if (rc)
 365                goto fail;
 366
 367        /* SRQ Tables */
 368        ctx->srqc_tbl.max_elements = ctx->srqc_count;
 369        rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->srqc_tbl, NULL,
 370                                       &ctx->srqc_tbl.max_elements,
 371                                       BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE, 0,
 372                                       PAGE_SIZE, HWQ_TYPE_CTX);
 373        if (rc)
 374                goto fail;
 375
 376        /* CQ Tables */
 377        ctx->cq_tbl.max_elements = ctx->cq_count;
 378        rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->cq_tbl, NULL,
 379                                       &ctx->cq_tbl.max_elements,
 380                                       BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE, 0,
 381                                       PAGE_SIZE, HWQ_TYPE_CTX);
 382        if (rc)
 383                goto fail;
 384
 385        /* TQM Buffer */
 386        ctx->tqm_pde.max_elements = 512;
 387        rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_pde, NULL,
 388                                       &ctx->tqm_pde.max_elements, sizeof(u64),
 389                                       0, PAGE_SIZE, HWQ_TYPE_CTX);
 390        if (rc)
 391                goto fail;
 392
 393        for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
 394                if (!ctx->tqm_count[i])
 395                        continue;
 396                ctx->tqm_tbl[i].max_elements = ctx->qpc_count *
 397                                               ctx->tqm_count[i];
 398                rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_tbl[i], NULL,
 399                                               &ctx->tqm_tbl[i].max_elements, 1,
 400                                               0, PAGE_SIZE, HWQ_TYPE_CTX);
 401                if (rc)
 402                        goto fail;
 403        }
 404        pbl_ptr = (__le64 **)ctx->tqm_pde.pbl_ptr;
 405        for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
 406             i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
 407                if (!ctx->tqm_tbl[i].max_elements)
 408                        continue;
 409                if (fnz_idx == -1)
 410                        fnz_idx = i;
 411                switch (ctx->tqm_tbl[i].level) {
 412                case PBL_LVL_2:
 413                        for (k = 0; k < ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_count;
 414                             k++)
 415                                pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)] =
 416                                  cpu_to_le64(
 417                                    ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_map_arr[k]
 418                                    | PTU_PTE_VALID);
 419                        break;
 420                case PBL_LVL_1:
 421                case PBL_LVL_0:
 422                default:
 423                        pbl_ptr[PTR_PG(j)][PTR_IDX(j)] = cpu_to_le64(
 424                                ctx->tqm_tbl[i].pbl[PBL_LVL_0].pg_map_arr[0] |
 425                                PTU_PTE_VALID);
 426                        break;
 427                }
 428        }
 429        if (fnz_idx == -1)
 430                fnz_idx = 0;
 431        ctx->tqm_pde_level = ctx->tqm_tbl[fnz_idx].level == PBL_LVL_2 ?
 432                             PBL_LVL_2 : ctx->tqm_tbl[fnz_idx].level + 1;
 433
 434        /* TIM Buffer */
 435        ctx->tim_tbl.max_elements = ctx->qpc_count * 16;
 436        rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tim_tbl, NULL,
 437                                       &ctx->tim_tbl.max_elements, 1,
 438                                       0, PAGE_SIZE, HWQ_TYPE_CTX);
 439        if (rc)
 440                goto fail;
 441
 442stats_alloc:
 443        /* Stats */
 444        rc = bnxt_qplib_alloc_stats_ctx(pdev, &ctx->stats);
 445        if (rc)
 446                goto fail;
 447
 448        return 0;
 449
 450fail:
 451        bnxt_qplib_free_ctx(pdev, ctx);
 452        return rc;
 453}
 454
 455/* GUID */
 456void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid)
 457{
 458        u8 mac[ETH_ALEN];
 459
 460        /* MAC-48 to EUI-64 mapping */
 461        memcpy(mac, dev_addr, ETH_ALEN);
 462        guid[0] = mac[0] ^ 2;
 463        guid[1] = mac[1];
 464        guid[2] = mac[2];
 465        guid[3] = 0xff;
 466        guid[4] = 0xfe;
 467        guid[5] = mac[3];
 468        guid[6] = mac[4];
 469        guid[7] = mac[5];
 470}
 471
 472static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res,
 473                                     struct bnxt_qplib_sgid_tbl *sgid_tbl)
 474{
 475        kfree(sgid_tbl->tbl);
 476        kfree(sgid_tbl->hw_id);
 477        kfree(sgid_tbl->ctx);
 478        kfree(sgid_tbl->vlan);
 479        sgid_tbl->tbl = NULL;
 480        sgid_tbl->hw_id = NULL;
 481        sgid_tbl->ctx = NULL;
 482        sgid_tbl->vlan = NULL;
 483        sgid_tbl->max = 0;
 484        sgid_tbl->active = 0;
 485}
 486
 487static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
 488                                     struct bnxt_qplib_sgid_tbl *sgid_tbl,
 489                                     u16 max)
 490{
 491        sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL);
 492        if (!sgid_tbl->tbl)
 493                return -ENOMEM;
 494
 495        sgid_tbl->hw_id = kcalloc(max, sizeof(u16), GFP_KERNEL);
 496        if (!sgid_tbl->hw_id)
 497                goto out_free1;
 498
 499        sgid_tbl->ctx = kcalloc(max, sizeof(void *), GFP_KERNEL);
 500        if (!sgid_tbl->ctx)
 501                goto out_free2;
 502
 503        sgid_tbl->vlan = kcalloc(max, sizeof(u8), GFP_KERNEL);
 504        if (!sgid_tbl->vlan)
 505                goto out_free3;
 506
 507        sgid_tbl->max = max;
 508        return 0;
 509out_free3:
 510        kfree(sgid_tbl->ctx);
 511        sgid_tbl->ctx = NULL;
 512out_free2:
 513        kfree(sgid_tbl->hw_id);
 514        sgid_tbl->hw_id = NULL;
 515out_free1:
 516        kfree(sgid_tbl->tbl);
 517        sgid_tbl->tbl = NULL;
 518        return -ENOMEM;
 519};
 520
 521static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
 522                                        struct bnxt_qplib_sgid_tbl *sgid_tbl)
 523{
 524        int i;
 525
 526        for (i = 0; i < sgid_tbl->max; i++) {
 527                if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
 528                           sizeof(bnxt_qplib_gid_zero)))
 529                        bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid,
 530                                            sgid_tbl->tbl[i].vlan_id, true);
 531        }
 532        memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max);
 533        memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
 534        memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
 535        sgid_tbl->active = 0;
 536}
 537
 538static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
 539                                     struct net_device *netdev)
 540{
 541        u32 i;
 542
 543        for (i = 0; i < sgid_tbl->max; i++)
 544                sgid_tbl->tbl[i].vlan_id = 0xffff;
 545
 546        memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
 547}
 548
 549static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res *res,
 550                                     struct bnxt_qplib_pkey_tbl *pkey_tbl)
 551{
 552        if (!pkey_tbl->tbl)
 553                dev_dbg(&res->pdev->dev, "PKEY tbl not present\n");
 554        else
 555                kfree(pkey_tbl->tbl);
 556
 557        pkey_tbl->tbl = NULL;
 558        pkey_tbl->max = 0;
 559        pkey_tbl->active = 0;
 560}
 561
 562static int bnxt_qplib_alloc_pkey_tbl(struct bnxt_qplib_res *res,
 563                                     struct bnxt_qplib_pkey_tbl *pkey_tbl,
 564                                     u16 max)
 565{
 566        pkey_tbl->tbl = kcalloc(max, sizeof(u16), GFP_KERNEL);
 567        if (!pkey_tbl->tbl)
 568                return -ENOMEM;
 569
 570        pkey_tbl->max = max;
 571        return 0;
 572};
 573
 574/* PDs */
 575int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd)
 576{
 577        u32 bit_num;
 578
 579        bit_num = find_first_bit(pdt->tbl, pdt->max);
 580        if (bit_num == pdt->max)
 581                return -ENOMEM;
 582
 583        /* Found unused PD */
 584        clear_bit(bit_num, pdt->tbl);
 585        pd->id = bit_num;
 586        return 0;
 587}
 588
 589int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
 590                          struct bnxt_qplib_pd_tbl *pdt,
 591                          struct bnxt_qplib_pd *pd)
 592{
 593        if (test_and_set_bit(pd->id, pdt->tbl)) {
 594                dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d\n",
 595                         pd->id);
 596                return -EINVAL;
 597        }
 598        pd->id = 0;
 599        return 0;
 600}
 601
 602static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt)
 603{
 604        kfree(pdt->tbl);
 605        pdt->tbl = NULL;
 606        pdt->max = 0;
 607}
 608
 609static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res,
 610                                   struct bnxt_qplib_pd_tbl *pdt,
 611                                   u32 max)
 612{
 613        u32 bytes;
 614
 615        bytes = max >> 3;
 616        if (!bytes)
 617                bytes = 1;
 618        pdt->tbl = kmalloc(bytes, GFP_KERNEL);
 619        if (!pdt->tbl)
 620                return -ENOMEM;
 621
 622        pdt->max = max;
 623        memset((u8 *)pdt->tbl, 0xFF, bytes);
 624
 625        return 0;
 626}
 627
 628/* DPIs */
 629int bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl *dpit,
 630                         struct bnxt_qplib_dpi     *dpi,
 631                         void                      *app)
 632{
 633        u32 bit_num;
 634
 635        bit_num = find_first_bit(dpit->tbl, dpit->max);
 636        if (bit_num == dpit->max)
 637                return -ENOMEM;
 638
 639        /* Found unused DPI */
 640        clear_bit(bit_num, dpit->tbl);
 641        dpit->app_tbl[bit_num] = app;
 642
 643        dpi->dpi = bit_num;
 644        dpi->dbr = dpit->dbr_bar_reg_iomem + (bit_num * PAGE_SIZE);
 645        dpi->umdbr = dpit->unmapped_dbr + (bit_num * PAGE_SIZE);
 646
 647        return 0;
 648}
 649
 650int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
 651                           struct bnxt_qplib_dpi_tbl *dpit,
 652                           struct bnxt_qplib_dpi     *dpi)
 653{
 654        if (dpi->dpi >= dpit->max) {
 655                dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d\n", dpi->dpi);
 656                return -EINVAL;
 657        }
 658        if (test_and_set_bit(dpi->dpi, dpit->tbl)) {
 659                dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d\n",
 660                         dpi->dpi);
 661                return -EINVAL;
 662        }
 663        if (dpit->app_tbl)
 664                dpit->app_tbl[dpi->dpi] = NULL;
 665        memset(dpi, 0, sizeof(*dpi));
 666
 667        return 0;
 668}
 669
 670static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res     *res,
 671                                    struct bnxt_qplib_dpi_tbl *dpit)
 672{
 673        kfree(dpit->tbl);
 674        kfree(dpit->app_tbl);
 675        if (dpit->dbr_bar_reg_iomem)
 676                pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
 677        memset(dpit, 0, sizeof(*dpit));
 678}
 679
 680static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res     *res,
 681                                    struct bnxt_qplib_dpi_tbl *dpit,
 682                                    u32                       dbr_offset)
 683{
 684        u32 dbr_bar_reg = RCFW_DBR_PCI_BAR_REGION;
 685        resource_size_t bar_reg_base;
 686        u32 dbr_len, bytes;
 687
 688        if (dpit->dbr_bar_reg_iomem) {
 689                dev_err(&res->pdev->dev, "DBR BAR region %d already mapped\n",
 690                        dbr_bar_reg);
 691                return -EALREADY;
 692        }
 693
 694        bar_reg_base = pci_resource_start(res->pdev, dbr_bar_reg);
 695        if (!bar_reg_base) {
 696                dev_err(&res->pdev->dev, "BAR region %d resc start failed\n",
 697                        dbr_bar_reg);
 698                return -ENOMEM;
 699        }
 700
 701        dbr_len = pci_resource_len(res->pdev, dbr_bar_reg) - dbr_offset;
 702        if (!dbr_len || ((dbr_len & (PAGE_SIZE - 1)) != 0)) {
 703                dev_err(&res->pdev->dev, "Invalid DBR length %d\n", dbr_len);
 704                return -ENOMEM;
 705        }
 706
 707        dpit->dbr_bar_reg_iomem = ioremap_nocache(bar_reg_base + dbr_offset,
 708                                                  dbr_len);
 709        if (!dpit->dbr_bar_reg_iomem) {
 710                dev_err(&res->pdev->dev,
 711                        "FP: DBR BAR region %d mapping failed\n", dbr_bar_reg);
 712                return -ENOMEM;
 713        }
 714
 715        dpit->unmapped_dbr = bar_reg_base + dbr_offset;
 716        dpit->max = dbr_len / PAGE_SIZE;
 717
 718        dpit->app_tbl = kcalloc(dpit->max, sizeof(void *), GFP_KERNEL);
 719        if (!dpit->app_tbl)
 720                goto unmap_io;
 721
 722        bytes = dpit->max >> 3;
 723        if (!bytes)
 724                bytes = 1;
 725
 726        dpit->tbl = kmalloc(bytes, GFP_KERNEL);
 727        if (!dpit->tbl) {
 728                kfree(dpit->app_tbl);
 729                dpit->app_tbl = NULL;
 730                goto unmap_io;
 731        }
 732
 733        memset((u8 *)dpit->tbl, 0xFF, bytes);
 734
 735        return 0;
 736
 737unmap_io:
 738        pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
 739        return -ENOMEM;
 740}
 741
 742/* PKEYs */
 743static void bnxt_qplib_cleanup_pkey_tbl(struct bnxt_qplib_pkey_tbl *pkey_tbl)
 744{
 745        memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
 746        pkey_tbl->active = 0;
 747}
 748
 749static void bnxt_qplib_init_pkey_tbl(struct bnxt_qplib_res *res,
 750                                     struct bnxt_qplib_pkey_tbl *pkey_tbl)
 751{
 752        u16 pkey = 0xFFFF;
 753
 754        memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
 755
 756        /* pkey default = 0xFFFF */
 757        bnxt_qplib_add_pkey(res, pkey_tbl, &pkey, false);
 758}
 759
 760/* Stats */
 761static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
 762                                      struct bnxt_qplib_stats *stats)
 763{
 764        if (stats->dma) {
 765                dma_free_coherent(&pdev->dev, stats->size,
 766                                  stats->dma, stats->dma_map);
 767        }
 768        memset(stats, 0, sizeof(*stats));
 769        stats->fw_id = -1;
 770}
 771
 772static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
 773                                      struct bnxt_qplib_stats *stats)
 774{
 775        memset(stats, 0, sizeof(*stats));
 776        stats->fw_id = -1;
 777        /* 128 byte aligned context memory is required only for 57500.
 778         * However making this unconditional, it does not harm previous
 779         * generation.
 780         */
 781        stats->size = ALIGN(sizeof(struct ctx_hw_stats), 128);
 782        stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
 783                                        &stats->dma_map, GFP_KERNEL);
 784        if (!stats->dma) {
 785                dev_err(&pdev->dev, "Stats DMA allocation failed\n");
 786                return -ENOMEM;
 787        }
 788        return 0;
 789}
 790
 791void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res)
 792{
 793        bnxt_qplib_cleanup_pkey_tbl(&res->pkey_tbl);
 794        bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl);
 795}
 796
 797int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
 798{
 799        bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev);
 800        bnxt_qplib_init_pkey_tbl(res, &res->pkey_tbl);
 801
 802        return 0;
 803}
 804
 805void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
 806{
 807        bnxt_qplib_free_pkey_tbl(res, &res->pkey_tbl);
 808        bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
 809        bnxt_qplib_free_pd_tbl(&res->pd_tbl);
 810        bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
 811
 812        res->netdev = NULL;
 813        res->pdev = NULL;
 814}
 815
 816int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
 817                         struct net_device *netdev,
 818                         struct bnxt_qplib_dev_attr *dev_attr)
 819{
 820        int rc = 0;
 821
 822        res->pdev = pdev;
 823        res->netdev = netdev;
 824
 825        rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
 826        if (rc)
 827                goto fail;
 828
 829        rc = bnxt_qplib_alloc_pkey_tbl(res, &res->pkey_tbl, dev_attr->max_pkey);
 830        if (rc)
 831                goto fail;
 832
 833        rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd);
 834        if (rc)
 835                goto fail;
 836
 837        rc = bnxt_qplib_alloc_dpi_tbl(res, &res->dpi_tbl, dev_attr->l2_db_size);
 838        if (rc)
 839                goto fail;
 840
 841        return 0;
 842fail:
 843        bnxt_qplib_free_res(res);
 844        return rc;
 845}
 846