uboot/drivers/net/octeontx2/nix_af.c
<<
>>
Prefs
   1// SPDX-License-Identifier:    GPL-2.0
   2/*
   3 * Copyright (C) 2018 Marvell International Ltd.
   4 */
   5
   6#include <dm.h>
   7#include <errno.h>
   8#include <malloc.h>
   9#include <memalign.h>
  10#include <misc.h>
  11#include <net.h>
  12#include <pci.h>
  13#include <watchdog.h>
  14#include <linux/types.h>
  15#include <linux/list.h>
  16#include <linux/log2.h>
  17#include <asm/arch/board.h>
  18#include <asm/arch/csrs/csrs-npc.h>
  19#include <asm/arch/csrs/csrs-lmt.h>
  20#include <asm/io.h>
  21
  22#include "nix.h"
  23#include "lmt.h"
  24#include "cgx.h"
  25
  26static struct nix_aq_cq_dis cq_dis ALIGNED;
  27static struct nix_aq_rq_dis rq_dis ALIGNED;
  28static struct nix_aq_sq_dis sq_dis ALIGNED;
  29
  30/***************
  31 * NPA API
  32 ***************/
  33int npa_attach_aura(struct nix_af *nix_af, int lf,
  34                    const union npa_aura_s *desc, u32 aura_id)
  35{
  36        struct npa_af *npa = nix_af->npa_af;
  37        union npa_aq_inst_s *inst;
  38        union npa_aq_res_s *res;
  39        union npa_af_aq_status aq_stat;
  40        union npa_aura_s *context;
  41        u64 head;
  42        ulong start;
  43
  44        debug("%s(%p, %d, %p, %u)\n", __func__, nix_af, lf, desc, aura_id);
  45        aq_stat.u = npa_af_reg_read(npa, NPA_AF_AQ_STATUS());
  46        head = aq_stat.s.head_ptr;
  47        inst = (union npa_aq_inst_s *)(npa->aq.inst.base) + head;
  48        res = (union npa_aq_res_s *)(npa->aq.res.base);
  49
  50        memset(inst, 0, sizeof(*inst));
  51        inst->s.lf = lf;
  52        inst->s.doneint = 0;
  53        inst->s.ctype = NPA_AQ_CTYPE_E_AURA;
  54        inst->s.op = NPA_AQ_INSTOP_E_INIT;
  55        inst->s.res_addr = npa->aq.res.iova;
  56        inst->s.cindex = aura_id;
  57
  58        context = (union npa_aura_s *)(npa->aq.res.base +
  59                                                CONFIG_SYS_CACHELINE_SIZE);
  60        memset(npa->aq.res.base, 0, npa->aq.res.entry_sz);
  61        memcpy(context, desc, sizeof(union npa_aura_s));
  62        __iowmb();
  63        npa_af_reg_write(npa, NPA_AF_AQ_DOOR(), 1);
  64
  65        start = get_timer(0);
  66        while ((res->s.compcode == NPA_AQ_COMP_E_NOTDONE) &&
  67               (get_timer(start) < 1000))
  68                WATCHDOG_RESET();
  69        if (res->s.compcode != NPA_AQ_COMP_E_GOOD) {
  70                printf("%s: Error: result 0x%x not good\n",
  71                       __func__, res->s.compcode);
  72                return -1;
  73        }
  74
  75        return 0;
  76}
  77
  78int npa_attach_pool(struct nix_af *nix_af, int lf,
  79                    const union npa_pool_s *desc, u32 pool_id)
  80{
  81        union npa_aq_inst_s *inst;
  82        union npa_aq_res_s *res;
  83        union npa_af_aq_status aq_stat;
  84        struct npa_af *npa = nix_af->npa_af;
  85        union npa_aura_s *context;
  86        u64 head;
  87        ulong start;
  88
  89        debug("%s(%p, %d, %p, %u)\n", __func__, nix_af, lf, desc, pool_id);
  90        aq_stat.u = npa_af_reg_read(npa, NPA_AF_AQ_STATUS());
  91        head = aq_stat.s.head_ptr;
  92
  93        inst = (union npa_aq_inst_s *)(npa->aq.inst.base) + head;
  94        res = (union npa_aq_res_s *)(npa->aq.res.base);
  95
  96        memset(inst, 0, sizeof(*inst));
  97        inst->s.cindex = pool_id;
  98        inst->s.lf = lf;
  99        inst->s.doneint = 0;
 100        inst->s.ctype = NPA_AQ_CTYPE_E_POOL;
 101        inst->s.op = NPA_AQ_INSTOP_E_INIT;
 102        inst->s.res_addr = npa->aq.res.iova;
 103
 104        context = (union npa_aura_s *)(npa->aq.res.base +
 105                                                CONFIG_SYS_CACHELINE_SIZE);
 106        memset(npa->aq.res.base, 0, npa->aq.res.entry_sz);
 107        memcpy(context, desc, sizeof(union npa_aura_s));
 108        __iowmb();
 109        npa_af_reg_write(npa, NPA_AF_AQ_DOOR(), 1);
 110
 111        start = get_timer(0);
 112        while ((res->s.compcode == NPA_AQ_COMP_E_NOTDONE) &&
 113               (get_timer(start) < 1000))
 114                WATCHDOG_RESET();
 115
 116        if (res->s.compcode != NPA_AQ_COMP_E_GOOD) {
 117                printf("%s: Error: result 0x%x not good\n",
 118                       __func__, res->s.compcode);
 119                return -1;
 120        }
 121
 122        return 0;
 123}
 124
 125int npa_lf_admin_setup(struct npa *npa, int lf, dma_addr_t aura_base)
 126{
 127        union npa_af_lf_rst lf_rst;
 128        union npa_af_lfx_auras_cfg auras_cfg;
 129        struct npa_af *npa_af = npa->npa_af;
 130
 131        debug("%s(%p, %d, 0x%llx)\n", __func__, npa_af, lf, aura_base);
 132        lf_rst.u = 0;
 133        lf_rst.s.exec = 1;
 134        lf_rst.s.lf = lf;
 135        npa_af_reg_write(npa_af, NPA_AF_LF_RST(), lf_rst.u);
 136
 137        do {
 138                lf_rst.u = npa_af_reg_read(npa_af, NPA_AF_LF_RST());
 139                WATCHDOG_RESET();
 140        } while (lf_rst.s.exec);
 141
 142        /* Set Aura size and enable caching of contexts */
 143        auras_cfg.u = npa_af_reg_read(npa_af, NPA_AF_LFX_AURAS_CFG(lf));
 144        auras_cfg.s.loc_aura_size = NPA_AURA_SIZE_DEFAULT; //FIXME aura_size;
 145        auras_cfg.s.caching = 1;
 146        auras_cfg.s.rmt_aura_size = 0;
 147        auras_cfg.s.rmt_aura_offset = 0;
 148        auras_cfg.s.rmt_lf = 0;
 149        npa_af_reg_write(npa_af, NPA_AF_LFX_AURAS_CFG(lf), auras_cfg.u);
 150        /* Configure aura HW context base */
 151        npa_af_reg_write(npa_af, NPA_AF_LFX_LOC_AURAS_BASE(lf),
 152                         aura_base);
 153
 154        return 0;
 155}
 156
 157int npa_lf_admin_shutdown(struct nix_af *nix_af, int lf, u32 pool_count)
 158{
 159        int pool_id;
 160        u32 head;
 161        union npa_aq_inst_s *inst;
 162        union npa_aq_res_s *res;
 163        struct npa_aq_pool_request {
 164                union npa_aq_res_s      resp ALIGNED;
 165                union npa_pool_s p0 ALIGNED;
 166                union npa_pool_s p1 ALIGNED;
 167        } pool_req ALIGNED;
 168        struct npa_aq_aura_request {
 169                union npa_aq_res_s      resp ALIGNED;
 170                union npa_aura_s a0 ALIGNED;
 171                union npa_aura_s a1 ALIGNED;
 172        } aura_req ALIGNED;
 173        union npa_af_aq_status aq_stat;
 174        union npa_af_lf_rst lf_rst;
 175        struct npa_af *npa = nix_af->npa_af;
 176        ulong start;
 177
 178        for (pool_id = 0; pool_id < pool_count; pool_id++) {
 179                aq_stat.u = npa_af_reg_read(npa, NPA_AF_AQ_STATUS());
 180                head = aq_stat.s.head_ptr;
 181                inst = (union npa_aq_inst_s *)(npa->aq.inst.base) + head;
 182                res = &pool_req.resp;
 183
 184                memset(inst, 0, sizeof(*inst));
 185                inst->s.cindex = pool_id;
 186                inst->s.lf = lf;
 187                inst->s.doneint = 0;
 188                inst->s.ctype = NPA_AQ_CTYPE_E_POOL;
 189                inst->s.op = NPA_AQ_INSTOP_E_WRITE;
 190                inst->s.res_addr = (u64)&pool_req.resp;
 191
 192                memset((void *)&pool_req, 0, sizeof(pool_req));
 193                pool_req.p0.s.ena = 0;
 194                pool_req.p1.s.ena = 1;  /* Write mask */
 195                __iowmb();
 196
 197                npa_af_reg_write(npa, NPA_AF_AQ_DOOR(), 1);
 198
 199                start = get_timer(0);
 200                while ((res->s.compcode == NPA_AQ_COMP_E_NOTDONE) &&
 201                       (get_timer(start) < 1000))
 202                        WATCHDOG_RESET();
 203
 204                if (res->s.compcode != NPA_AQ_COMP_E_GOOD) {
 205                        printf("%s: Error: result 0x%x not good for lf %d\n"
 206                               " aura id %d", __func__, res->s.compcode, lf,
 207                                pool_id);
 208                        return -1;
 209                }
 210                debug("%s(LF %d, pool id %d) disabled\n", __func__, lf,
 211                      pool_id);
 212        }
 213
 214        for (pool_id = 0; pool_id < pool_count; pool_id++) {
 215                aq_stat.u = npa_af_reg_read(npa, NPA_AF_AQ_STATUS());
 216                head = aq_stat.s.head_ptr;
 217                inst = (union npa_aq_inst_s *)(npa->aq.inst.base) + head;
 218                res = &aura_req.resp;
 219
 220                memset(inst, 0, sizeof(*inst));
 221                inst->s.cindex = pool_id;
 222                inst->s.lf = lf;
 223                inst->s.doneint = 0;
 224                inst->s.ctype = NPA_AQ_CTYPE_E_AURA;
 225                inst->s.op = NPA_AQ_INSTOP_E_WRITE;
 226                inst->s.res_addr = (u64)&aura_req.resp;
 227
 228                memset((void *)&aura_req, 0, sizeof(aura_req));
 229                aura_req.a0.s.ena = 0;
 230                aura_req.a1.s.ena = 1;  /* Write mask */
 231                __iowmb();
 232
 233                npa_af_reg_write(npa, NPA_AF_AQ_DOOR(), 1);
 234
 235                start = get_timer(0);
 236                while ((res->s.compcode == NPA_AQ_COMP_E_NOTDONE) &&
 237                       (get_timer(start) < 1000))
 238                        WATCHDOG_RESET();
 239
 240                if (res->s.compcode != NPA_AQ_COMP_E_GOOD) {
 241                        printf("%s: Error: result 0x%x not good for lf %d\n"
 242                               " aura id %d", __func__, res->s.compcode, lf,
 243                               pool_id);
 244                        return -1;
 245                }
 246                debug("%s(LF %d, aura id %d) disabled\n", __func__, lf,
 247                      pool_id);
 248        }
 249
 250        /* Reset the LF */
 251        lf_rst.u = 0;
 252        lf_rst.s.exec = 1;
 253        lf_rst.s.lf = lf;
 254        npa_af_reg_write(npa, NPA_AF_LF_RST(), lf_rst.u);
 255
 256        do {
 257                lf_rst.u = npa_af_reg_read(npa, NPA_AF_LF_RST());
 258                WATCHDOG_RESET();
 259        } while (lf_rst.s.exec);
 260
 261        return 0;
 262}
 263
 264int npa_af_setup(struct npa_af *npa_af)
 265{
 266        int err;
 267        union npa_af_gen_cfg npa_cfg;
 268        union npa_af_ndc_cfg ndc_cfg;
 269        union npa_af_aq_cfg aq_cfg;
 270        union npa_af_blk_rst blk_rst;
 271
 272        err = rvu_aq_alloc(&npa_af->aq, Q_COUNT(AQ_SIZE),
 273                           sizeof(union npa_aq_inst_s),
 274                           sizeof(union npa_aq_res_s));
 275        if (err) {
 276                printf("%s: Error %d allocating admin queue\n", __func__, err);
 277                return err;
 278        }
 279        debug("%s: NPA admin queue allocated at %p %llx\n", __func__,
 280              npa_af->aq.inst.base, npa_af->aq.inst.iova);
 281
 282        blk_rst.u = 0;
 283        blk_rst.s.rst = 1;
 284        npa_af_reg_write(npa_af, NPA_AF_BLK_RST(), blk_rst.u);
 285
 286        /* Wait for reset to complete */
 287        do {
 288                blk_rst.u = npa_af_reg_read(npa_af, NPA_AF_BLK_RST());
 289                WATCHDOG_RESET();
 290        } while (blk_rst.s.busy);
 291
 292        /* Set little Endian */
 293        npa_cfg.u = npa_af_reg_read(npa_af, NPA_AF_GEN_CFG());
 294        npa_cfg.s.af_be = 0;
 295        npa_af_reg_write(npa_af, NPA_AF_GEN_CFG(), npa_cfg.u);
 296        /* Enable NDC cache */
 297        ndc_cfg.u = npa_af_reg_read(npa_af, NPA_AF_NDC_CFG());
 298        ndc_cfg.s.ndc_bypass = 0;
 299        npa_af_reg_write(npa_af, NPA_AF_NDC_CFG(), ndc_cfg.u);
 300        /* Set up queue size */
 301        aq_cfg.u = npa_af_reg_read(npa_af, NPA_AF_AQ_CFG());
 302        aq_cfg.s.qsize = AQ_SIZE;
 303        npa_af_reg_write(npa_af, NPA_AF_AQ_CFG(), aq_cfg.u);
 304        /* Set up queue base address */
 305        npa_af_reg_write(npa_af, NPA_AF_AQ_BASE(), npa_af->aq.inst.iova);
 306
 307        return 0;
 308}
 309
 310int npa_af_shutdown(struct npa_af *npa_af)
 311{
 312        union npa_af_blk_rst blk_rst;
 313
 314        blk_rst.u = 0;
 315        blk_rst.s.rst = 1;
 316        npa_af_reg_write(npa_af, NPA_AF_BLK_RST(), blk_rst.u);
 317
 318        /* Wait for reset to complete */
 319        do {
 320                blk_rst.u = npa_af_reg_read(npa_af, NPA_AF_BLK_RST());
 321                WATCHDOG_RESET();
 322        } while (blk_rst.s.busy);
 323
 324        rvu_aq_free(&npa_af->aq);
 325
 326        debug("%s: npa af reset --\n", __func__);
 327
 328        return 0;
 329}
 330
 331/***************
 332 * NIX API
 333 ***************/
 334/**
 335 * Setup SMQ -> TL4 -> TL3 -> TL2 -> TL1 -> MAC mapping
 336 *
 337 * @param nix     Handle to setup
 338 *
 339 * @return 0, or negative on failure
 340 */
 341static int nix_af_setup_sq(struct nix *nix)
 342{
 343        union nixx_af_tl1x_schedule tl1_sched;
 344        union nixx_af_tl2x_parent tl2_parent;
 345        union nixx_af_tl3x_parent tl3_parent;
 346        union nixx_af_tl3_tl2x_cfg tl3_tl2_cfg;
 347        union nixx_af_tl3_tl2x_linkx_cfg tl3_tl2_link_cfg;
 348        union nixx_af_tl4x_parent tl4_parent;
 349        union nixx_af_tl4x_sdp_link_cfg tl4_sdp_link_cfg;
 350        union nixx_af_smqx_cfg smq_cfg;
 351        union nixx_af_mdqx_schedule mdq_sched;
 352        union nixx_af_mdqx_parent mdq_parent;
 353        union nixx_af_rx_linkx_cfg link_cfg;
 354        int tl1_index = nix->lmac->link_num; /* NIX_LINK_E enum */
 355        int tl2_index = tl1_index;
 356        int tl3_index = tl2_index;
 357        int tl4_index = tl3_index;
 358        int smq_index = tl4_index;
 359        struct nix_af *nix_af = nix->nix_af;
 360        u64 offset = 0;
 361
 362        tl1_sched.u = nix_af_reg_read(nix_af,
 363                                      NIXX_AF_TL1X_SCHEDULE(tl1_index));
 364        tl1_sched.s.rr_quantum = MAX_MTU;
 365        nix_af_reg_write(nix_af, NIXX_AF_TL1X_SCHEDULE(tl1_index),
 366                         tl1_sched.u);
 367
 368        tl2_parent.u = nix_af_reg_read(nix_af,
 369                                       NIXX_AF_TL2X_PARENT(tl2_index));
 370        tl2_parent.s.parent = tl1_index;
 371        nix_af_reg_write(nix_af, NIXX_AF_TL2X_PARENT(tl2_index),
 372                         tl2_parent.u);
 373
 374        tl3_parent.u = nix_af_reg_read(nix_af,
 375                                       NIXX_AF_TL3X_PARENT(tl3_index));
 376        tl3_parent.s.parent = tl2_index;
 377        nix_af_reg_write(nix_af, NIXX_AF_TL3X_PARENT(tl3_index),
 378                         tl3_parent.u);
 379        tl3_tl2_cfg.u = nix_af_reg_read(nix_af,
 380                                        NIXX_AF_TL3_TL2X_CFG(tl3_index));
 381        tl3_tl2_cfg.s.express = 0;
 382        nix_af_reg_write(nix_af, NIXX_AF_TL3_TL2X_CFG(tl3_index),
 383                         tl3_tl2_cfg.u);
 384
 385        offset = NIXX_AF_TL3_TL2X_LINKX_CFG(tl3_index,
 386                                            nix->lmac->link_num);
 387        tl3_tl2_link_cfg.u = nix_af_reg_read(nix_af, offset);
 388        tl3_tl2_link_cfg.s.bp_ena = 1;
 389        tl3_tl2_link_cfg.s.ena = 1;
 390        tl3_tl2_link_cfg.s.relchan = 0;
 391        offset = NIXX_AF_TL3_TL2X_LINKX_CFG(tl3_index,
 392                                            nix->lmac->link_num);
 393        nix_af_reg_write(nix_af, offset, tl3_tl2_link_cfg.u);
 394
 395        tl4_parent.u = nix_af_reg_read(nix_af,
 396                                       NIXX_AF_TL4X_PARENT(tl4_index));
 397        tl4_parent.s.parent = tl3_index;
 398        nix_af_reg_write(nix_af, NIXX_AF_TL4X_PARENT(tl4_index),
 399                         tl4_parent.u);
 400
 401        offset = NIXX_AF_TL4X_SDP_LINK_CFG(tl4_index);
 402        tl4_sdp_link_cfg.u = nix_af_reg_read(nix_af, offset);
 403        tl4_sdp_link_cfg.s.bp_ena = 0;
 404        tl4_sdp_link_cfg.s.ena = 0;
 405        tl4_sdp_link_cfg.s.relchan = 0;
 406        offset = NIXX_AF_TL4X_SDP_LINK_CFG(tl4_index);
 407        nix_af_reg_write(nix_af, offset, tl4_sdp_link_cfg.u);
 408
 409        smq_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_SMQX_CFG(smq_index));
 410        smq_cfg.s.express = 0;
 411        smq_cfg.s.lf = nix->lf;
 412        smq_cfg.s.desc_shp_ctl_dis = 1;
 413        smq_cfg.s.maxlen = MAX_MTU;
 414        smq_cfg.s.minlen = NIX_MIN_HW_MTU;
 415        nix_af_reg_write(nix_af, NIXX_AF_SMQX_CFG(smq_index), smq_cfg.u);
 416
 417        mdq_sched.u = nix_af_reg_read(nix_af,
 418                                      NIXX_AF_MDQX_SCHEDULE(smq_index));
 419        mdq_sched.s.rr_quantum = MAX_MTU;
 420        offset = NIXX_AF_MDQX_SCHEDULE(smq_index);
 421        nix_af_reg_write(nix_af, offset, mdq_sched.u);
 422        mdq_parent.u = nix_af_reg_read(nix_af,
 423                                       NIXX_AF_MDQX_PARENT(smq_index));
 424        mdq_parent.s.parent = tl4_index;
 425        nix_af_reg_write(nix_af, NIXX_AF_MDQX_PARENT(smq_index),
 426                         mdq_parent.u);
 427
 428        link_cfg.u = 0;
 429        link_cfg.s.maxlen = NIX_MAX_HW_MTU;
 430        link_cfg.s.minlen = NIX_MIN_HW_MTU;
 431        nix_af_reg_write(nix->nix_af,
 432                         NIXX_AF_RX_LINKX_CFG(nix->lmac->link_num),
 433                         link_cfg.u);
 434
 435        return 0;
 436}
 437
 438/**
 439 * Issue a command to the NIX AF Admin Queue
 440 *
 441 * @param nix    nix handle
 442 * @param lf     Logical function number for command
 443 * @param op     Operation
 444 * @param ctype  Context type
 445 * @param cindex Context index
 446 * @param resp   Result pointer
 447 *
 448 * @return      0 for success, -EBUSY on failure
 449 */
 450static int nix_aq_issue_command(struct nix_af *nix_af,
 451                                int lf,
 452                                int op,
 453                                int ctype,
 454                                int cindex, union nix_aq_res_s *resp)
 455{
 456        union nixx_af_aq_status aq_status;
 457        union nix_aq_inst_s *aq_inst;
 458        union nix_aq_res_s *result = resp;
 459        ulong start;
 460
 461        debug("%s(%p, 0x%x, 0x%x, 0x%x, 0x%x, %p)\n", __func__, nix_af, lf,
 462              op, ctype, cindex, resp);
 463        aq_status.u = nix_af_reg_read(nix_af, NIXX_AF_AQ_STATUS());
 464        aq_inst = (union nix_aq_inst_s *)(nix_af->aq.inst.base) +
 465                                                aq_status.s.head_ptr;
 466        aq_inst->u[0] = 0;
 467        aq_inst->u[1] = 0;
 468        aq_inst->s.op = op;
 469        aq_inst->s.ctype = ctype;
 470        aq_inst->s.lf = lf;
 471        aq_inst->s.cindex = cindex;
 472        aq_inst->s.doneint = 0;
 473        aq_inst->s.res_addr = (u64)resp;
 474        debug("%s: inst@%p: 0x%llx 0x%llx\n", __func__, aq_inst,
 475              aq_inst->u[0], aq_inst->u[1]);
 476        __iowmb();
 477
 478        /* Ring doorbell and wait for result */
 479        nix_af_reg_write(nix_af, NIXX_AF_AQ_DOOR(), 1);
 480
 481        start = get_timer(0);
 482        /* Wait for completion */
 483        do {
 484                WATCHDOG_RESET();
 485                dsb();
 486        } while (result->s.compcode == 0 && get_timer(start) < 2);
 487
 488        if (result->s.compcode != NIX_AQ_COMP_E_GOOD) {
 489                printf("NIX:AQ fail or time out with code %d after %ld ms\n",
 490                       result->s.compcode, get_timer(start));
 491                return -EBUSY;
 492        }
 493        return 0;
 494}
 495
 496static int nix_attach_receive_queue(struct nix_af *nix_af, int lf)
 497{
 498        struct nix_aq_rq_request rq_req ALIGNED;
 499        int err;
 500
 501        debug("%s(%p, %d)\n", __func__, nix_af, lf);
 502
 503        memset(&rq_req, 0, sizeof(struct nix_aq_rq_request));
 504
 505        rq_req.rq.s.ena = 1;
 506        rq_req.rq.s.spb_ena = 1;
 507        rq_req.rq.s.ipsech_ena = 0;
 508        rq_req.rq.s.ena_wqwd = 0;
 509        rq_req.rq.s.cq = NIX_CQ_RX;
 510        rq_req.rq.s.substream = 0;      /* FIXME: Substream IDs? */
 511        rq_req.rq.s.wqe_aura = -1;      /* No WQE aura */
 512        rq_req.rq.s.spb_aura = NPA_POOL_RX;
 513        rq_req.rq.s.lpb_aura = NPA_POOL_RX;
 514        /* U-Boot doesn't use WQE group for anything */
 515        rq_req.rq.s.pb_caching = 1;
 516        rq_req.rq.s.xqe_drop_ena = 0;   /* Disable RED dropping */
 517        rq_req.rq.s.spb_drop_ena = 0;
 518        rq_req.rq.s.lpb_drop_ena = 0;
 519        rq_req.rq.s.spb_sizem1 = (MAX_MTU / (3 * 8)) - 1; /* 512 bytes */
 520        rq_req.rq.s.lpb_sizem1 = (MAX_MTU / 8) - 1;
 521        rq_req.rq.s.first_skip = 0;
 522        rq_req.rq.s.later_skip = 0;
 523        rq_req.rq.s.xqe_imm_copy = 0;
 524        rq_req.rq.s.xqe_hdr_split = 0;
 525        rq_req.rq.s.xqe_drop = 0;
 526        rq_req.rq.s.xqe_pass = 0;
 527        rq_req.rq.s.wqe_pool_drop = 0;  /* No WQE pool */
 528        rq_req.rq.s.wqe_pool_pass = 0;  /* No WQE pool */
 529        rq_req.rq.s.spb_aura_drop = 255;
 530        rq_req.rq.s.spb_aura_pass = 255;
 531        rq_req.rq.s.spb_pool_drop = 0;
 532        rq_req.rq.s.spb_pool_pass = 0;
 533        rq_req.rq.s.lpb_aura_drop = 255;
 534        rq_req.rq.s.lpb_aura_pass = 255;
 535        rq_req.rq.s.lpb_pool_drop = 0;
 536        rq_req.rq.s.lpb_pool_pass = 0;
 537        rq_req.rq.s.qint_idx = 0;
 538
 539        err = nix_aq_issue_command(nix_af, lf,
 540                                   NIX_AQ_INSTOP_E_INIT,
 541                                   NIX_AQ_CTYPE_E_RQ,
 542                                   0, &rq_req.resp);
 543        if (err) {
 544                printf("%s: Error requesting send queue\n", __func__);
 545                return err;
 546        }
 547
 548        return 0;
 549}
 550
 551static int nix_attach_send_queue(struct nix *nix)
 552{
 553        struct nix_af *nix_af = nix->nix_af;
 554        struct nix_aq_sq_request sq_req ALIGNED;
 555        int err;
 556
 557        debug("%s(%p)\n", __func__, nix_af);
 558        err = nix_af_setup_sq(nix);
 559
 560        memset(&sq_req, 0, sizeof(sq_req));
 561
 562        sq_req.sq.s.ena = 1;
 563        sq_req.sq.s.cq_ena = 1;
 564        sq_req.sq.s.max_sqe_size = NIX_MAXSQESZ_E_W16;
 565        sq_req.sq.s.substream = 0; // FIXME: Substream IDs?
 566        sq_req.sq.s.sdp_mcast = 0;
 567        sq_req.sq.s.cq = NIX_CQ_TX;
 568        sq_req.sq.s.cq_limit = 0;
 569        sq_req.sq.s.smq = nix->lmac->link_num; // scheduling index
 570        sq_req.sq.s.sso_ena = 0;
 571        sq_req.sq.s.smq_rr_quantum = MAX_MTU / 4;
 572        sq_req.sq.s.default_chan = nix->lmac->chan_num;
 573        sq_req.sq.s.sqe_stype = NIX_STYPE_E_STP;
 574        sq_req.sq.s.qint_idx = 0;
 575        sq_req.sq.s.sqb_aura = NPA_POOL_SQB;
 576
 577        err = nix_aq_issue_command(nix_af, nix->lf,
 578                                   NIX_AQ_INSTOP_E_INIT,
 579                                   NIX_AQ_CTYPE_E_SQ,
 580                                   0, &sq_req.resp);
 581        if (err) {
 582                printf("%s: Error requesting send queue\n", __func__);
 583                return err;
 584        }
 585
 586        return 0;
 587}
 588
 589static int nix_attach_completion_queue(struct nix *nix, int cq_idx)
 590{
 591        struct nix_af *nix_af = nix->nix_af;
 592        struct nix_aq_cq_request cq_req ALIGNED;
 593        int err;
 594
 595        debug("%s(%p)\n", __func__, nix_af);
 596        memset(&cq_req, 0, sizeof(cq_req));
 597        cq_req.cq.s.ena = 1;
 598        cq_req.cq.s.bpid = nix->lmac->pknd;
 599        cq_req.cq.s.substream = 0;      /* FIXME: Substream IDs? */
 600        cq_req.cq.s.drop_ena = 0;
 601        cq_req.cq.s.caching = 1;
 602        cq_req.cq.s.qsize = CQS_QSIZE;
 603        cq_req.cq.s.drop = 255 * 7 / 8;
 604        cq_req.cq.s.qint_idx = 0;
 605        cq_req.cq.s.cint_idx = 0;
 606        cq_req.cq.s.base = nix->cq[cq_idx].iova;
 607        debug("%s: CQ(%d)  base %p\n", __func__, cq_idx,
 608              nix->cq[cq_idx].base);
 609
 610        err = nix_aq_issue_command(nix_af, nix->lf,
 611                                   NIX_AQ_INSTOP_E_INIT,
 612                                   NIX_AQ_CTYPE_E_CQ,
 613                                   cq_idx, &cq_req.resp);
 614        if (err) {
 615                printf("%s: Error requesting completion queue\n", __func__);
 616                return err;
 617        }
 618        debug("%s: CQ(%d) allocated, base %p\n", __func__, cq_idx,
 619              nix->cq[cq_idx].base);
 620
 621        return 0;
 622}
 623
 624int nix_lf_admin_setup(struct nix *nix)
 625{
 626        union nixx_af_lfx_rqs_cfg rqs_cfg;
 627        union nixx_af_lfx_sqs_cfg sqs_cfg;
 628        union nixx_af_lfx_cqs_cfg cqs_cfg;
 629        union nixx_af_lfx_rss_cfg rss_cfg;
 630        union nixx_af_lfx_cints_cfg cints_cfg;
 631        union nixx_af_lfx_qints_cfg qints_cfg;
 632        union nixx_af_lfx_rss_grpx rss_grp;
 633        union nixx_af_lfx_tx_cfg2 tx_cfg2;
 634        union nixx_af_lfx_cfg lfx_cfg;
 635        union nixx_af_lf_rst lf_rst;
 636        u32 index;
 637        struct nix_af *nix_af = nix->nix_af;
 638        int err;
 639
 640        /* Reset the LF */
 641        lf_rst.u = 0;
 642        lf_rst.s.lf = nix->lf;
 643        lf_rst.s.exec = 1;
 644        nix_af_reg_write(nix_af, NIXX_AF_LF_RST(), lf_rst.u);
 645
 646        do {
 647                lf_rst.u = nix_af_reg_read(nix_af, NIXX_AF_LF_RST());
 648                WATCHDOG_RESET();
 649        } while (lf_rst.s.exec);
 650
 651        /* Config NIX RQ HW context and base*/
 652        nix_af_reg_write(nix_af, NIXX_AF_LFX_RQS_BASE(nix->lf),
 653                         (u64)nix->rq_ctx_base);
 654        /* Set caching and queue count in HW */
 655        rqs_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_LFX_RQS_CFG(nix->lf));
 656        rqs_cfg.s.caching = 1;
 657        rqs_cfg.s.max_queuesm1 = nix->rq_cnt - 1;
 658        nix_af_reg_write(nix_af, NIXX_AF_LFX_RQS_CFG(nix->lf), rqs_cfg.u);
 659
 660        /* Config NIX SQ HW context and base*/
 661        nix_af_reg_write(nix_af, NIXX_AF_LFX_SQS_BASE(nix->lf),
 662                         (u64)nix->sq_ctx_base);
 663        sqs_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_LFX_SQS_CFG(nix->lf));
 664        sqs_cfg.s.caching = 1;
 665        sqs_cfg.s.max_queuesm1 = nix->sq_cnt - 1;
 666        nix_af_reg_write(nix_af, NIXX_AF_LFX_SQS_CFG(nix->lf), sqs_cfg.u);
 667
 668        /* Config NIX CQ HW context and base*/
 669        nix_af_reg_write(nix_af, NIXX_AF_LFX_CQS_BASE(nix->lf),
 670                         (u64)nix->cq_ctx_base);
 671        cqs_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_LFX_CQS_CFG(nix->lf));
 672        cqs_cfg.s.caching = 1;
 673        cqs_cfg.s.max_queuesm1 = nix->cq_cnt - 1;
 674        nix_af_reg_write(nix_af, NIXX_AF_LFX_CQS_CFG(nix->lf), cqs_cfg.u);
 675
 676        /* Config NIX RSS HW context and base */
 677        nix_af_reg_write(nix_af, NIXX_AF_LFX_RSS_BASE(nix->lf),
 678                         (u64)nix->rss_base);
 679        rss_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_LFX_RSS_CFG(nix->lf));
 680        rss_cfg.s.ena = 1;
 681        rss_cfg.s.size = ilog2(nix->rss_sz) / 256;
 682        nix_af_reg_write(nix_af, NIXX_AF_LFX_RSS_CFG(nix->lf), rss_cfg.u);
 683
 684        for (index = 0; index < nix->rss_grps; index++) {
 685                rss_grp.u = 0;
 686                rss_grp.s.sizem1 = 0x7;
 687                rss_grp.s.offset = nix->rss_sz * index;
 688                nix_af_reg_write(nix_af,
 689                                 NIXX_AF_LFX_RSS_GRPX(nix->lf, index),
 690                                 rss_grp.u);
 691        }
 692
 693        /* Config CQints HW contexts and base */
 694        nix_af_reg_write(nix_af, NIXX_AF_LFX_CINTS_BASE(nix->lf),
 695                         (u64)nix->cint_base);
 696        cints_cfg.u = nix_af_reg_read(nix_af,
 697                                      NIXX_AF_LFX_CINTS_CFG(nix->lf));
 698        cints_cfg.s.caching = 1;
 699        nix_af_reg_write(nix_af, NIXX_AF_LFX_CINTS_CFG(nix->lf),
 700                         cints_cfg.u);
 701
 702        /* Config Qints HW context and base */
 703        nix_af_reg_write(nix_af, NIXX_AF_LFX_QINTS_BASE(nix->lf),
 704                         (u64)nix->qint_base);
 705        qints_cfg.u = nix_af_reg_read(nix_af,
 706                                      NIXX_AF_LFX_QINTS_CFG(nix->lf));
 707        qints_cfg.s.caching = 1;
 708        nix_af_reg_write(nix_af, NIXX_AF_LFX_QINTS_CFG(nix->lf),
 709                         qints_cfg.u);
 710
 711        debug("%s(%p, %d, %d)\n", __func__, nix_af, nix->lf, nix->pf);
 712
 713        /* Enable LMTST for this NIX LF */
 714        tx_cfg2.u = nix_af_reg_read(nix_af, NIXX_AF_LFX_TX_CFG2(nix->lf));
 715        tx_cfg2.s.lmt_ena = 1;
 716        nix_af_reg_write(nix_af, NIXX_AF_LFX_TX_CFG2(nix->lf), tx_cfg2.u);
 717
 718        /* Use 16-word XQEs, write the npa pf_func number only */
 719        lfx_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_LFX_CFG(nix->lf));
 720        lfx_cfg.s.xqe_size = NIX_XQESZ_E_W16;
 721        lfx_cfg.s.npa_pf_func = nix->pf_func;
 722        nix_af_reg_write(nix_af, NIXX_AF_LFX_CFG(nix->lf), lfx_cfg.u);
 723
 724        nix_af_reg_write(nix_af, NIXX_AF_LFX_RX_CFG(nix->lf), 0);
 725
 726        for (index = 0; index < nix->cq_cnt; index++) {
 727                err = nix_attach_completion_queue(nix, index);
 728                if (err) {
 729                        printf("%s: Error attaching completion queue %d\n",
 730                               __func__, index);
 731                        return err;
 732                }
 733        }
 734
 735        for (index = 0; index < nix->rq_cnt; index++) {
 736                err = nix_attach_receive_queue(nix_af, nix->lf);
 737                if (err) {
 738                        printf("%s: Error attaching receive queue %d\n",
 739                               __func__, index);
 740                        return err;
 741                }
 742        }
 743
 744        for (index = 0; index < nix->sq_cnt; index++) {
 745                err = nix_attach_send_queue(nix);
 746                if (err) {
 747                        printf("%s: Error attaching send queue %d\n",
 748                               __func__, index);
 749                        return err;
 750                }
 751        }
 752
 753        return 0;
 754}
 755
 756int nix_lf_admin_shutdown(struct nix_af *nix_af, int lf,
 757                          u32 cq_count, u32 rq_count, u32 sq_count)
 758{
 759        union nixx_af_rx_sw_sync sw_sync;
 760        union nixx_af_lf_rst lf_rst;
 761        int index, err;
 762
 763        /* Flush all tx packets */
 764        sw_sync.u = 0;
 765        sw_sync.s.ena = 1;
 766        nix_af_reg_write(nix_af, NIXX_AF_RX_SW_SYNC(), sw_sync.u);
 767
 768        do {
 769                sw_sync.u = nix_af_reg_read(nix_af, NIXX_AF_RX_SW_SYNC());
 770                WATCHDOG_RESET();
 771        } while (sw_sync.s.ena);
 772
 773        for (index = 0; index < rq_count; index++) {
 774                memset((void *)&rq_dis, 0, sizeof(rq_dis));
 775                rq_dis.rq.s.ena = 0;    /* Context */
 776                rq_dis.mrq.s.ena = 1;   /* Mask */
 777                __iowmb();
 778
 779                err = nix_aq_issue_command(nix_af, lf,
 780                                           NIX_AQ_INSTOP_E_WRITE,
 781                                           NIX_AQ_CTYPE_E_RQ,
 782                                           index, &rq_dis.resp);
 783                if (err) {
 784                        printf("%s: Error disabling LF %d RQ(%d)\n",
 785                               __func__, lf, index);
 786                        return err;
 787                }
 788                debug("%s: LF %d RQ(%d) disabled\n", __func__, lf, index);
 789        }
 790
 791        for (index = 0; index < sq_count; index++) {
 792                memset((void *)&sq_dis, 0, sizeof(sq_dis));
 793                sq_dis.sq.s.ena = 0;    /* Context */
 794                sq_dis.msq.s.ena = 1;   /* Mask */
 795                __iowmb();
 796
 797                err = nix_aq_issue_command(nix_af, lf,
 798                                           NIX_AQ_INSTOP_E_WRITE,
 799                                           NIX_AQ_CTYPE_E_SQ,
 800                                           index, &sq_dis.resp);
 801                if (err) {
 802                        printf("%s: Error disabling LF %d SQ(%d)\n",
 803                               __func__, lf, index);
 804                        return err;
 805                }
 806                debug("%s: LF %d SQ(%d) disabled\n", __func__, lf, index);
 807        }
 808
 809        for (index = 0; index < cq_count; index++) {
 810                memset((void *)&cq_dis, 0, sizeof(cq_dis));
 811                cq_dis.cq.s.ena = 0;    /* Context */
 812                cq_dis.mcq.s.ena = 1;   /* Mask */
 813                __iowmb();
 814
 815                err = nix_aq_issue_command(nix_af, lf,
 816                                           NIX_AQ_INSTOP_E_WRITE,
 817                                           NIX_AQ_CTYPE_E_CQ,
 818                                           index, &cq_dis.resp);
 819                if (err) {
 820                        printf("%s: Error disabling LF %d CQ(%d)\n",
 821                               __func__, lf, index);
 822                        return err;
 823                }
 824                debug("%s: LF %d CQ(%d) disabled\n", __func__, lf, index);
 825        }
 826
 827        /* Reset the LF */
 828        lf_rst.u = 0;
 829        lf_rst.s.lf = lf;
 830        lf_rst.s.exec = 1;
 831        nix_af_reg_write(nix_af, NIXX_AF_LF_RST(), lf_rst.u);
 832
 833        do {
 834                lf_rst.u = nix_af_reg_read(nix_af, NIXX_AF_LF_RST());
 835                WATCHDOG_RESET();
 836        } while (lf_rst.s.exec);
 837
 838        return 0;
 839}
 840
 841int npc_lf_admin_setup(struct nix *nix)
 842{
 843        union npc_af_const af_const;
 844        union npc_af_pkindx_action0 action0;
 845        union npc_af_pkindx_action1 action1;
 846        union npc_af_intfx_kex_cfg kex_cfg;
 847        union npc_af_intfx_miss_stat_act intfx_stat_act;
 848        union npc_af_mcamex_bankx_camx_intf camx_intf;
 849        union npc_af_mcamex_bankx_camx_w0 camx_w0;
 850        union npc_af_mcamex_bankx_cfg bankx_cfg;
 851        union npc_af_mcamex_bankx_stat_act mcamex_stat_act;
 852
 853        union nix_rx_action_s rx_action;
 854        union nix_tx_action_s tx_action;
 855
 856        struct nix_af *nix_af = nix->nix_af;
 857        u32 kpus;
 858        int pkind = nix->lmac->link_num;
 859        int index;
 860        u64 offset;
 861
 862        debug("%s(%p, pkind 0x%x)\n", __func__, nix_af, pkind);
 863        af_const.u = npc_af_reg_read(nix_af, NPC_AF_CONST());
 864        kpus = af_const.s.kpus;
 865
 866        action0.u = 0;
 867        action0.s.parse_done = 1;
 868        npc_af_reg_write(nix_af, NPC_AF_PKINDX_ACTION0(pkind), action0.u);
 869
 870        action1.u = 0;
 871        npc_af_reg_write(nix_af, NPC_AF_PKINDX_ACTION1(pkind), action1.u);
 872
 873        kex_cfg.u = 0;
 874        kex_cfg.s.keyw = NPC_MCAMKEYW_E_X1;
 875        kex_cfg.s.parse_nibble_ena = 0x7;
 876        npc_af_reg_write(nix_af,
 877                         NPC_AF_INTFX_KEX_CFG(NPC_INTF_E_NIXX_RX(0)),
 878                         kex_cfg.u);
 879
 880        /* HW Issue */
 881        kex_cfg.u = 0;
 882        kex_cfg.s.parse_nibble_ena = 0x7;
 883        npc_af_reg_write(nix_af,
 884                         NPC_AF_INTFX_KEX_CFG(NPC_INTF_E_NIXX_TX(0)),
 885                         kex_cfg.u);
 886
 887        camx_intf.u = 0;
 888        camx_intf.s.intf = ~NPC_INTF_E_NIXX_RX(0);
 889        npc_af_reg_write(nix_af,
 890                         NPC_AF_MCAMEX_BANKX_CAMX_INTF(pkind, 0, 0),
 891                         camx_intf.u);
 892
 893        camx_intf.u = 0;
 894        camx_intf.s.intf = NPC_INTF_E_NIXX_RX(0);
 895        npc_af_reg_write(nix_af,
 896                         NPC_AF_MCAMEX_BANKX_CAMX_INTF(pkind, 0, 1),
 897                         camx_intf.u);
 898
 899        camx_w0.u = 0;
 900        camx_w0.s.md = ~(nix->lmac->chan_num) & (~((~0x0ull) << 12));
 901        debug("NPC LF ADMIN camx_w0.u %llx\n", camx_w0.u);
 902        npc_af_reg_write(nix_af,
 903                         NPC_AF_MCAMEX_BANKX_CAMX_W0(pkind, 0, 0),
 904                         camx_w0.u);
 905
 906        camx_w0.u = 0;
 907        camx_w0.s.md = nix->lmac->chan_num;
 908        npc_af_reg_write(nix_af,
 909                         NPC_AF_MCAMEX_BANKX_CAMX_W0(pkind, 0, 1),
 910                         camx_w0.u);
 911
 912        npc_af_reg_write(nix_af, NPC_AF_MCAMEX_BANKX_CAMX_W1(pkind, 0, 0),
 913                         0);
 914
 915        npc_af_reg_write(nix_af, NPC_AF_MCAMEX_BANKX_CAMX_W1(pkind, 0, 1),
 916                         0);
 917
 918        /* Enable stats for NPC INTF RX */
 919        mcamex_stat_act.u = 0;
 920        mcamex_stat_act.s.ena = 1;
 921        mcamex_stat_act.s.stat_sel = pkind;
 922        npc_af_reg_write(nix_af,
 923                         NPC_AF_MCAMEX_BANKX_STAT_ACT(pkind, 0),
 924                         mcamex_stat_act.u);
 925        intfx_stat_act.u = 0;
 926        intfx_stat_act.s.ena = 1;
 927        intfx_stat_act.s.stat_sel = 16;
 928        offset = NPC_AF_INTFX_MISS_STAT_ACT(NPC_INTF_E_NIXX_RX(0));
 929        npc_af_reg_write(nix_af, offset, intfx_stat_act.u);
 930        rx_action.u = 0;
 931        rx_action.s.pf_func = nix->pf_func;
 932        rx_action.s.op = NIX_RX_ACTIONOP_E_UCAST;
 933        npc_af_reg_write(nix_af, NPC_AF_MCAMEX_BANKX_ACTION(pkind, 0),
 934                         rx_action.u);
 935
 936        for (index = 0; index < kpus; index++)
 937                npc_af_reg_write(nix_af, NPC_AF_KPUX_CFG(index), 0);
 938
 939        rx_action.u = 0;
 940        rx_action.s.pf_func = nix->pf_func;
 941        rx_action.s.op = NIX_RX_ACTIONOP_E_DROP;
 942        npc_af_reg_write(nix_af,
 943                         NPC_AF_INTFX_MISS_ACT(NPC_INTF_E_NIXX_RX(0)),
 944                         rx_action.u);
 945        bankx_cfg.u = 0;
 946        bankx_cfg.s.ena = 1;
 947        npc_af_reg_write(nix_af, NPC_AF_MCAMEX_BANKX_CFG(pkind, 0),
 948                         bankx_cfg.u);
 949
 950        tx_action.u = 0;
 951        tx_action.s.op = NIX_TX_ACTIONOP_E_UCAST_DEFAULT;
 952        npc_af_reg_write(nix_af,
 953                         NPC_AF_INTFX_MISS_ACT(NPC_INTF_E_NIXX_TX(0)),
 954                         tx_action.u);
 955
 956#ifdef DEBUG
 957        /* Enable debug capture on RX intf */
 958        npc_af_reg_write(nix_af, NPC_AF_DBG_CTL(), 0x4);
 959#endif
 960
 961        return 0;
 962}
 963
 964int npc_af_shutdown(struct nix_af *nix_af)
 965{
 966        union npc_af_blk_rst blk_rst;
 967
 968        blk_rst.u = 0;
 969        blk_rst.s.rst = 1;
 970        npc_af_reg_write(nix_af, NPC_AF_BLK_RST(), blk_rst.u);
 971
 972        /* Wait for reset to complete */
 973        do {
 974                blk_rst.u = npc_af_reg_read(nix_af, NPC_AF_BLK_RST());
 975                WATCHDOG_RESET();
 976        } while (blk_rst.s.busy);
 977
 978        debug("%s: npc af reset --\n", __func__);
 979
 980        return 0;
 981}
 982
 983int nix_af_setup(struct nix_af *nix_af)
 984{
 985        int err;
 986        union nixx_af_const2 af_const2;
 987        union nixx_af_const3 af_const3;
 988        union nixx_af_sq_const sq_const;
 989        union nixx_af_cfg af_cfg;
 990        union nixx_af_status af_status;
 991        union nixx_af_ndc_cfg ndc_cfg;
 992        union nixx_af_aq_cfg aq_cfg;
 993        union nixx_af_blk_rst blk_rst;
 994
 995        debug("%s(%p)\n", __func__, nix_af);
 996        err = rvu_aq_alloc(&nix_af->aq, Q_COUNT(AQ_SIZE),
 997                           sizeof(union nix_aq_inst_s),
 998                           sizeof(union nix_aq_res_s));
 999        if (err) {
1000                printf("%s: Error allocating nix admin queue\n", __func__);
1001                return err;
1002        }
1003
1004        blk_rst.u = 0;
1005        blk_rst.s.rst = 1;
1006        nix_af_reg_write(nix_af, NIXX_AF_BLK_RST(), blk_rst.u);
1007
1008        /* Wait for reset to complete */
1009        do {
1010                blk_rst.u = nix_af_reg_read(nix_af, NIXX_AF_BLK_RST());
1011                WATCHDOG_RESET();
1012        } while (blk_rst.s.busy);
1013
1014        /* Put in LE mode */
1015        af_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_CFG());
1016        if (af_cfg.s.force_cond_clk_en || af_cfg.s.calibrate_x2p ||
1017            af_cfg.s.force_intf_clk_en) {
1018                printf("%s: Error: Invalid NIX_AF_CFG value 0x%llx\n",
1019                       __func__, af_cfg.u);
1020                return -1;
1021        }
1022        af_cfg.s.af_be = 0;
1023        af_cfg.u |= 0x5E;       /* HW Issue */
1024        nix_af_reg_write(nix_af, NIXX_AF_CFG(), af_cfg.u);
1025
1026        /* Perform Calibration */
1027        af_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_CFG());
1028        af_cfg.s.calibrate_x2p = 1;
1029        nix_af_reg_write(nix_af, NIXX_AF_CFG(), af_cfg.u);
1030
1031        /* Wait for calibration to complete */
1032        do {
1033                af_status.u = nix_af_reg_read(nix_af, NIXX_AF_STATUS());
1034                WATCHDOG_RESET();
1035        } while (af_status.s.calibrate_done == 0);
1036
1037        af_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_CFG());
1038        af_cfg.s.calibrate_x2p = 0;
1039        nix_af_reg_write(nix_af, NIXX_AF_CFG(), af_cfg.u);
1040
1041        /* Enable NDC cache */
1042        ndc_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_NDC_CFG());
1043        ndc_cfg.s.ndc_ign_pois = 0;
1044        ndc_cfg.s.byp_sq = 0;
1045        ndc_cfg.s.byp_sqb = 0;
1046        ndc_cfg.s.byp_cqs = 0;
1047        ndc_cfg.s.byp_cints = 0;
1048        ndc_cfg.s.byp_dyno = 0;
1049        ndc_cfg.s.byp_mce = 0;
1050        ndc_cfg.s.byp_rqc = 0;
1051        ndc_cfg.s.byp_rsse = 0;
1052        ndc_cfg.s.byp_mc_data = 0;
1053        ndc_cfg.s.byp_mc_wqe = 0;
1054        ndc_cfg.s.byp_mr_data = 0;
1055        ndc_cfg.s.byp_mr_wqe = 0;
1056        ndc_cfg.s.byp_qints = 0;
1057        nix_af_reg_write(nix_af, NIXX_AF_NDC_CFG(), ndc_cfg.u);
1058
1059        /* Set up queue size */
1060        aq_cfg.u = 0;
1061        aq_cfg.s.qsize = AQ_SIZE;
1062        nix_af_reg_write(nix_af, NIXX_AF_AQ_CFG(), aq_cfg.u);
1063
1064        /* Set up queue base address */
1065        nix_af_reg_write(nix_af, NIXX_AF_AQ_BASE(), nix_af->aq.inst.iova);
1066
1067        af_const3.u = nix_af_reg_read(nix_af, NIXX_AF_CONST3());
1068        af_const2.u = nix_af_reg_read(nix_af, NIXX_AF_CONST2());
1069        sq_const.u = nix_af_reg_read(nix_af, NIXX_AF_SQ_CONST());
1070        nix_af->rq_ctx_sz = 1ULL << af_const3.s.rq_ctx_log2bytes;
1071        nix_af->sq_ctx_sz = 1ULL << af_const3.s.sq_ctx_log2bytes;
1072        nix_af->cq_ctx_sz = 1ULL << af_const3.s.cq_ctx_log2bytes;
1073        nix_af->rsse_ctx_sz = 1ULL << af_const3.s.rsse_log2bytes;
1074        nix_af->qints = af_const2.s.qints;
1075        nix_af->cints = af_const2.s.cints;
1076        nix_af->cint_ctx_sz = 1ULL << af_const3.s.cint_log2bytes;
1077        nix_af->qint_ctx_sz = 1ULL << af_const3.s.qint_log2bytes;
1078        nix_af->sqb_size = sq_const.s.sqb_size;
1079
1080        return 0;
1081}
1082
1083int nix_af_shutdown(struct nix_af *nix_af)
1084{
1085        union nixx_af_blk_rst blk_rst;
1086
1087        blk_rst.u = 0;
1088        blk_rst.s.rst = 1;
1089        nix_af_reg_write(nix_af, NIXX_AF_BLK_RST(), blk_rst.u);
1090
1091        /* Wait for reset to complete */
1092        do {
1093                blk_rst.u = nix_af_reg_read(nix_af, NIXX_AF_BLK_RST());
1094                WATCHDOG_RESET();
1095        } while (blk_rst.s.busy);
1096
1097        rvu_aq_free(&nix_af->aq);
1098
1099        debug("%s: nix af reset --\n", __func__);
1100
1101        return 0;
1102}
1103