linux/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Marvell OcteonTx2 RVU Ethernet driver
   3 *
   4 * Copyright (C) 2020 Marvell International Ltd.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10
  11#include <linux/interrupt.h>
  12#include <linux/pci.h>
  13#include <net/tso.h>
  14
  15#include "otx2_reg.h"
  16#include "otx2_common.h"
  17#include "otx2_struct.h"
  18
  19static void otx2_nix_rq_op_stats(struct queue_stats *stats,
  20                                 struct otx2_nic *pfvf, int qidx)
  21{
  22        u64 incr = (u64)qidx << 32;
  23        u64 *ptr;
  24
  25        ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS);
  26        stats->bytes = otx2_atomic64_add(incr, ptr);
  27
  28        ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS);
  29        stats->pkts = otx2_atomic64_add(incr, ptr);
  30}
  31
  32static void otx2_nix_sq_op_stats(struct queue_stats *stats,
  33                                 struct otx2_nic *pfvf, int qidx)
  34{
  35        u64 incr = (u64)qidx << 32;
  36        u64 *ptr;
  37
  38        ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS);
  39        stats->bytes = otx2_atomic64_add(incr, ptr);
  40
  41        ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS);
  42        stats->pkts = otx2_atomic64_add(incr, ptr);
  43}
  44
  45void otx2_update_lmac_stats(struct otx2_nic *pfvf)
  46{
  47        struct msg_req *req;
  48
  49        if (!netif_running(pfvf->netdev))
  50                return;
  51
  52        mutex_lock(&pfvf->mbox.lock);
  53        req = otx2_mbox_alloc_msg_cgx_stats(&pfvf->mbox);
  54        if (!req) {
  55                mutex_unlock(&pfvf->mbox.lock);
  56                return;
  57        }
  58
  59        otx2_sync_mbox_msg(&pfvf->mbox);
  60        mutex_unlock(&pfvf->mbox.lock);
  61}
  62
  63int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx)
  64{
  65        struct otx2_rcv_queue *rq = &pfvf->qset.rq[qidx];
  66
  67        if (!pfvf->qset.rq)
  68                return 0;
  69
  70        otx2_nix_rq_op_stats(&rq->stats, pfvf, qidx);
  71        return 1;
  72}
  73
  74int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx)
  75{
  76        struct otx2_snd_queue *sq = &pfvf->qset.sq[qidx];
  77
  78        if (!pfvf->qset.sq)
  79                return 0;
  80
  81        otx2_nix_sq_op_stats(&sq->stats, pfvf, qidx);
  82        return 1;
  83}
  84
  85void otx2_get_dev_stats(struct otx2_nic *pfvf)
  86{
  87        struct otx2_dev_stats *dev_stats = &pfvf->hw.dev_stats;
  88
  89#define OTX2_GET_RX_STATS(reg) \
  90         otx2_read64(pfvf, NIX_LF_RX_STATX(reg))
  91#define OTX2_GET_TX_STATS(reg) \
  92         otx2_read64(pfvf, NIX_LF_TX_STATX(reg))
  93
  94        dev_stats->rx_bytes = OTX2_GET_RX_STATS(RX_OCTS);
  95        dev_stats->rx_drops = OTX2_GET_RX_STATS(RX_DROP);
  96        dev_stats->rx_bcast_frames = OTX2_GET_RX_STATS(RX_BCAST);
  97        dev_stats->rx_mcast_frames = OTX2_GET_RX_STATS(RX_MCAST);
  98        dev_stats->rx_ucast_frames = OTX2_GET_RX_STATS(RX_UCAST);
  99        dev_stats->rx_frames = dev_stats->rx_bcast_frames +
 100                               dev_stats->rx_mcast_frames +
 101                               dev_stats->rx_ucast_frames;
 102
 103        dev_stats->tx_bytes = OTX2_GET_TX_STATS(TX_OCTS);
 104        dev_stats->tx_drops = OTX2_GET_TX_STATS(TX_DROP);
 105        dev_stats->tx_bcast_frames = OTX2_GET_TX_STATS(TX_BCAST);
 106        dev_stats->tx_mcast_frames = OTX2_GET_TX_STATS(TX_MCAST);
 107        dev_stats->tx_ucast_frames = OTX2_GET_TX_STATS(TX_UCAST);
 108        dev_stats->tx_frames = dev_stats->tx_bcast_frames +
 109                               dev_stats->tx_mcast_frames +
 110                               dev_stats->tx_ucast_frames;
 111}
 112
 113void otx2_get_stats64(struct net_device *netdev,
 114                      struct rtnl_link_stats64 *stats)
 115{
 116        struct otx2_nic *pfvf = netdev_priv(netdev);
 117        struct otx2_dev_stats *dev_stats;
 118
 119        otx2_get_dev_stats(pfvf);
 120
 121        dev_stats = &pfvf->hw.dev_stats;
 122        stats->rx_bytes = dev_stats->rx_bytes;
 123        stats->rx_packets = dev_stats->rx_frames;
 124        stats->rx_dropped = dev_stats->rx_drops;
 125        stats->multicast = dev_stats->rx_mcast_frames;
 126
 127        stats->tx_bytes = dev_stats->tx_bytes;
 128        stats->tx_packets = dev_stats->tx_frames;
 129        stats->tx_dropped = dev_stats->tx_drops;
 130}
 131EXPORT_SYMBOL(otx2_get_stats64);
 132
 133/* Sync MAC address with RVU AF */
 134static int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac)
 135{
 136        struct nix_set_mac_addr *req;
 137        int err;
 138
 139        mutex_lock(&pfvf->mbox.lock);
 140        req = otx2_mbox_alloc_msg_nix_set_mac_addr(&pfvf->mbox);
 141        if (!req) {
 142                mutex_unlock(&pfvf->mbox.lock);
 143                return -ENOMEM;
 144        }
 145
 146        ether_addr_copy(req->mac_addr, mac);
 147
 148        err = otx2_sync_mbox_msg(&pfvf->mbox);
 149        mutex_unlock(&pfvf->mbox.lock);
 150        return err;
 151}
 152
 153static int otx2_hw_get_mac_addr(struct otx2_nic *pfvf,
 154                                struct net_device *netdev)
 155{
 156        struct nix_get_mac_addr_rsp *rsp;
 157        struct mbox_msghdr *msghdr;
 158        struct msg_req *req;
 159        int err;
 160
 161        mutex_lock(&pfvf->mbox.lock);
 162        req = otx2_mbox_alloc_msg_nix_get_mac_addr(&pfvf->mbox);
 163        if (!req) {
 164                mutex_unlock(&pfvf->mbox.lock);
 165                return -ENOMEM;
 166        }
 167
 168        err = otx2_sync_mbox_msg(&pfvf->mbox);
 169        if (err) {
 170                mutex_unlock(&pfvf->mbox.lock);
 171                return err;
 172        }
 173
 174        msghdr = otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
 175        if (IS_ERR(msghdr)) {
 176                mutex_unlock(&pfvf->mbox.lock);
 177                return PTR_ERR(msghdr);
 178        }
 179        rsp = (struct nix_get_mac_addr_rsp *)msghdr;
 180        ether_addr_copy(netdev->dev_addr, rsp->mac_addr);
 181        mutex_unlock(&pfvf->mbox.lock);
 182
 183        return 0;
 184}
 185
 186int otx2_set_mac_address(struct net_device *netdev, void *p)
 187{
 188        struct otx2_nic *pfvf = netdev_priv(netdev);
 189        struct sockaddr *addr = p;
 190
 191        if (!is_valid_ether_addr(addr->sa_data))
 192                return -EADDRNOTAVAIL;
 193
 194        if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data))
 195                memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
 196        else
 197                return -EPERM;
 198
 199        return 0;
 200}
 201EXPORT_SYMBOL(otx2_set_mac_address);
 202
 203int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
 204{
 205        struct nix_frs_cfg *req;
 206        int err;
 207
 208        mutex_lock(&pfvf->mbox.lock);
 209        req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox);
 210        if (!req) {
 211                mutex_unlock(&pfvf->mbox.lock);
 212                return -ENOMEM;
 213        }
 214
 215        pfvf->max_frs = mtu +  OTX2_ETH_HLEN;
 216        req->maxlen = pfvf->max_frs;
 217
 218        err = otx2_sync_mbox_msg(&pfvf->mbox);
 219        mutex_unlock(&pfvf->mbox.lock);
 220        return err;
 221}
 222
 223int otx2_config_pause_frm(struct otx2_nic *pfvf)
 224{
 225        struct cgx_pause_frm_cfg *req;
 226        int err;
 227
 228        if (is_otx2_lbkvf(pfvf->pdev))
 229                return 0;
 230
 231        mutex_lock(&pfvf->mbox.lock);
 232        req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
 233        if (!req) {
 234                err = -ENOMEM;
 235                goto unlock;
 236        }
 237
 238        req->rx_pause = !!(pfvf->flags & OTX2_FLAG_RX_PAUSE_ENABLED);
 239        req->tx_pause = !!(pfvf->flags & OTX2_FLAG_TX_PAUSE_ENABLED);
 240        req->set = 1;
 241
 242        err = otx2_sync_mbox_msg(&pfvf->mbox);
 243unlock:
 244        mutex_unlock(&pfvf->mbox.lock);
 245        return err;
 246}
 247
 248int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
 249{
 250        struct otx2_rss_info *rss = &pfvf->hw.rss_info;
 251        struct nix_rss_flowkey_cfg *req;
 252        int err;
 253
 254        mutex_lock(&pfvf->mbox.lock);
 255        req = otx2_mbox_alloc_msg_nix_rss_flowkey_cfg(&pfvf->mbox);
 256        if (!req) {
 257                mutex_unlock(&pfvf->mbox.lock);
 258                return -ENOMEM;
 259        }
 260        req->mcam_index = -1; /* Default or reserved index */
 261        req->flowkey_cfg = rss->flowkey_cfg;
 262        req->group = DEFAULT_RSS_CONTEXT_GROUP;
 263
 264        err = otx2_sync_mbox_msg(&pfvf->mbox);
 265        mutex_unlock(&pfvf->mbox.lock);
 266        return err;
 267}
 268
 269int otx2_set_rss_table(struct otx2_nic *pfvf)
 270{
 271        struct otx2_rss_info *rss = &pfvf->hw.rss_info;
 272        struct mbox *mbox = &pfvf->mbox;
 273        struct nix_aq_enq_req *aq;
 274        int idx, err;
 275
 276        mutex_lock(&mbox->lock);
 277        /* Get memory to put this msg */
 278        for (idx = 0; idx < rss->rss_size; idx++) {
 279                aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
 280                if (!aq) {
 281                        /* The shared memory buffer can be full.
 282                         * Flush it and retry
 283                         */
 284                        err = otx2_sync_mbox_msg(mbox);
 285                        if (err) {
 286                                mutex_unlock(&mbox->lock);
 287                                return err;
 288                        }
 289                        aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
 290                        if (!aq) {
 291                                mutex_unlock(&mbox->lock);
 292                                return -ENOMEM;
 293                        }
 294                }
 295
 296                aq->rss.rq = rss->ind_tbl[idx];
 297
 298                /* Fill AQ info */
 299                aq->qidx = idx;
 300                aq->ctype = NIX_AQ_CTYPE_RSS;
 301                aq->op = NIX_AQ_INSTOP_INIT;
 302        }
 303        err = otx2_sync_mbox_msg(mbox);
 304        mutex_unlock(&mbox->lock);
 305        return err;
 306}
 307
 308void otx2_set_rss_key(struct otx2_nic *pfvf)
 309{
 310        struct otx2_rss_info *rss = &pfvf->hw.rss_info;
 311        u64 *key = (u64 *)&rss->key[4];
 312        int idx;
 313
 314        /* 352bit or 44byte key needs to be configured as below
 315         * NIX_LF_RX_SECRETX0 = key<351:288>
 316         * NIX_LF_RX_SECRETX1 = key<287:224>
 317         * NIX_LF_RX_SECRETX2 = key<223:160>
 318         * NIX_LF_RX_SECRETX3 = key<159:96>
 319         * NIX_LF_RX_SECRETX4 = key<95:32>
 320         * NIX_LF_RX_SECRETX5<63:32> = key<31:0>
 321         */
 322        otx2_write64(pfvf, NIX_LF_RX_SECRETX(5),
 323                     (u64)(*((u32 *)&rss->key)) << 32);
 324        idx = sizeof(rss->key) / sizeof(u64);
 325        while (idx > 0) {
 326                idx--;
 327                otx2_write64(pfvf, NIX_LF_RX_SECRETX(idx), *key++);
 328        }
 329}
 330
 331int otx2_rss_init(struct otx2_nic *pfvf)
 332{
 333        struct otx2_rss_info *rss = &pfvf->hw.rss_info;
 334        int idx, ret = 0;
 335
 336        rss->rss_size = sizeof(rss->ind_tbl);
 337
 338        /* Init RSS key if it is not setup already */
 339        if (!rss->enable)
 340                netdev_rss_key_fill(rss->key, sizeof(rss->key));
 341        otx2_set_rss_key(pfvf);
 342
 343        if (!netif_is_rxfh_configured(pfvf->netdev)) {
 344                /* Default indirection table */
 345                for (idx = 0; idx < rss->rss_size; idx++)
 346                        rss->ind_tbl[idx] =
 347                                ethtool_rxfh_indir_default(idx,
 348                                                           pfvf->hw.rx_queues);
 349        }
 350        ret = otx2_set_rss_table(pfvf);
 351        if (ret)
 352                return ret;
 353
 354        /* Flowkey or hash config to be used for generating flow tag */
 355        rss->flowkey_cfg = rss->enable ? rss->flowkey_cfg :
 356                           NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6 |
 357                           NIX_FLOW_KEY_TYPE_TCP | NIX_FLOW_KEY_TYPE_UDP |
 358                           NIX_FLOW_KEY_TYPE_SCTP | NIX_FLOW_KEY_TYPE_VLAN;
 359
 360        ret = otx2_set_flowkey_cfg(pfvf);
 361        if (ret)
 362                return ret;
 363
 364        rss->enable = true;
 365        return 0;
 366}
 367
 368/* Setup UDP segmentation algorithm in HW */
 369static void otx2_setup_udp_segmentation(struct nix_lso_format_cfg *lso, bool v4)
 370{
 371        struct nix_lso_format *field;
 372
 373        field = (struct nix_lso_format *)&lso->fields[0];
 374        lso->field_mask = GENMASK(18, 0);
 375
 376        /* IP's Length field */
 377        field->layer = NIX_TXLAYER_OL3;
 378        /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
 379        field->offset = v4 ? 2 : 4;
 380        field->sizem1 = 1; /* i.e 2 bytes */
 381        field->alg = NIX_LSOALG_ADD_PAYLEN;
 382        field++;
 383
 384        /* No ID field in IPv6 header */
 385        if (v4) {
 386                /* Increment IPID */
 387                field->layer = NIX_TXLAYER_OL3;
 388                field->offset = 4;
 389                field->sizem1 = 1; /* i.e 2 bytes */
 390                field->alg = NIX_LSOALG_ADD_SEGNUM;
 391                field++;
 392        }
 393
 394        /* Update length in UDP header */
 395        field->layer = NIX_TXLAYER_OL4;
 396        field->offset = 4;
 397        field->sizem1 = 1;
 398        field->alg = NIX_LSOALG_ADD_PAYLEN;
 399}
 400
 401/* Setup segmentation algorithms in HW and retrieve algorithm index */
 402void otx2_setup_segmentation(struct otx2_nic *pfvf)
 403{
 404        struct nix_lso_format_cfg_rsp *rsp;
 405        struct nix_lso_format_cfg *lso;
 406        struct otx2_hw *hw = &pfvf->hw;
 407        int err;
 408
 409        mutex_lock(&pfvf->mbox.lock);
 410
 411        /* UDPv4 segmentation */
 412        lso = otx2_mbox_alloc_msg_nix_lso_format_cfg(&pfvf->mbox);
 413        if (!lso)
 414                goto fail;
 415
 416        /* Setup UDP/IP header fields that HW should update per segment */
 417        otx2_setup_udp_segmentation(lso, true);
 418
 419        err = otx2_sync_mbox_msg(&pfvf->mbox);
 420        if (err)
 421                goto fail;
 422
 423        rsp = (struct nix_lso_format_cfg_rsp *)
 424                        otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &lso->hdr);
 425        if (IS_ERR(rsp))
 426                goto fail;
 427
 428        hw->lso_udpv4_idx = rsp->lso_format_idx;
 429
 430        /* UDPv6 segmentation */
 431        lso = otx2_mbox_alloc_msg_nix_lso_format_cfg(&pfvf->mbox);
 432        if (!lso)
 433                goto fail;
 434
 435        /* Setup UDP/IP header fields that HW should update per segment */
 436        otx2_setup_udp_segmentation(lso, false);
 437
 438        err = otx2_sync_mbox_msg(&pfvf->mbox);
 439        if (err)
 440                goto fail;
 441
 442        rsp = (struct nix_lso_format_cfg_rsp *)
 443                        otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &lso->hdr);
 444        if (IS_ERR(rsp))
 445                goto fail;
 446
 447        hw->lso_udpv6_idx = rsp->lso_format_idx;
 448        mutex_unlock(&pfvf->mbox.lock);
 449        return;
 450fail:
 451        mutex_unlock(&pfvf->mbox.lock);
 452        netdev_info(pfvf->netdev,
 453                    "Failed to get LSO index for UDP GSO offload, disabling\n");
 454        pfvf->netdev->hw_features &= ~NETIF_F_GSO_UDP_L4;
 455}
 456
 457void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx)
 458{
 459        /* Configure CQE interrupt coalescing parameters
 460         *
 461         * HW triggers an irq when ECOUNT > cq_ecount_wait, hence
 462         * set 1 less than cq_ecount_wait. And cq_time_wait is in
 463         * usecs, convert that to 100ns count.
 464         */
 465        otx2_write64(pfvf, NIX_LF_CINTX_WAIT(qidx),
 466                     ((u64)(pfvf->hw.cq_time_wait * 10) << 48) |
 467                     ((u64)pfvf->hw.cq_qcount_wait << 32) |
 468                     (pfvf->hw.cq_ecount_wait - 1));
 469}
 470
 471dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool)
 472{
 473        dma_addr_t iova;
 474        u8 *buf;
 475
 476        buf = napi_alloc_frag(pool->rbsize);
 477        if (unlikely(!buf))
 478                return -ENOMEM;
 479
 480        iova = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize,
 481                                    DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
 482        if (unlikely(dma_mapping_error(pfvf->dev, iova))) {
 483                page_frag_free(buf);
 484                return -ENOMEM;
 485        }
 486
 487        return iova;
 488}
 489
 490static dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool)
 491{
 492        dma_addr_t addr;
 493
 494        local_bh_disable();
 495        addr = __otx2_alloc_rbuf(pfvf, pool);
 496        local_bh_enable();
 497        return addr;
 498}
 499
 500void otx2_tx_timeout(struct net_device *netdev, unsigned int txq)
 501{
 502        struct otx2_nic *pfvf = netdev_priv(netdev);
 503
 504        schedule_work(&pfvf->reset_task);
 505}
 506EXPORT_SYMBOL(otx2_tx_timeout);
 507
 508void otx2_get_mac_from_af(struct net_device *netdev)
 509{
 510        struct otx2_nic *pfvf = netdev_priv(netdev);
 511        int err;
 512
 513        err = otx2_hw_get_mac_addr(pfvf, netdev);
 514        if (err)
 515                dev_warn(pfvf->dev, "Failed to read mac from hardware\n");
 516
 517        /* If AF doesn't provide a valid MAC, generate a random one */
 518        if (!is_valid_ether_addr(netdev->dev_addr))
 519                eth_hw_addr_random(netdev);
 520}
 521EXPORT_SYMBOL(otx2_get_mac_from_af);
 522
 523static int otx2_get_link(struct otx2_nic *pfvf)
 524{
 525        int link = 0;
 526        u16 map;
 527
 528        /* cgx lmac link */
 529        if (pfvf->hw.tx_chan_base >= CGX_CHAN_BASE) {
 530                map = pfvf->hw.tx_chan_base & 0x7FF;
 531                link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF);
 532        }
 533        /* LBK channel */
 534        if (pfvf->hw.tx_chan_base < SDP_CHAN_BASE)
 535                link = 12;
 536
 537        return link;
 538}
 539
 540int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
 541{
 542        struct otx2_hw *hw = &pfvf->hw;
 543        struct nix_txschq_config *req;
 544        u64 schq, parent;
 545
 546        req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
 547        if (!req)
 548                return -ENOMEM;
 549
 550        req->lvl = lvl;
 551        req->num_regs = 1;
 552
 553        schq = hw->txschq_list[lvl][0];
 554        /* Set topology e.t.c configuration */
 555        if (lvl == NIX_TXSCH_LVL_SMQ) {
 556                req->reg[0] = NIX_AF_SMQX_CFG(schq);
 557                req->regval[0] = ((OTX2_MAX_MTU + OTX2_ETH_HLEN) << 8) |
 558                                   OTX2_MIN_MTU;
 559
 560                req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) |
 561                                  (0x2ULL << 36);
 562                req->num_regs++;
 563                /* MDQ config */
 564                parent =  hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
 565                req->reg[1] = NIX_AF_MDQX_PARENT(schq);
 566                req->regval[1] = parent << 16;
 567                req->num_regs++;
 568                /* Set DWRR quantum */
 569                req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq);
 570                req->regval[2] =  DFLT_RR_QTM;
 571        } else if (lvl == NIX_TXSCH_LVL_TL4) {
 572                parent =  hw->txschq_list[NIX_TXSCH_LVL_TL3][0];
 573                req->reg[0] = NIX_AF_TL4X_PARENT(schq);
 574                req->regval[0] = parent << 16;
 575                req->num_regs++;
 576                req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
 577                req->regval[1] = DFLT_RR_QTM;
 578        } else if (lvl == NIX_TXSCH_LVL_TL3) {
 579                parent = hw->txschq_list[NIX_TXSCH_LVL_TL2][0];
 580                req->reg[0] = NIX_AF_TL3X_PARENT(schq);
 581                req->regval[0] = parent << 16;
 582                req->num_regs++;
 583                req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
 584                req->regval[1] = DFLT_RR_QTM;
 585        } else if (lvl == NIX_TXSCH_LVL_TL2) {
 586                parent =  hw->txschq_list[NIX_TXSCH_LVL_TL1][0];
 587                req->reg[0] = NIX_AF_TL2X_PARENT(schq);
 588                req->regval[0] = parent << 16;
 589
 590                req->num_regs++;
 591                req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
 592                req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | DFLT_RR_QTM;
 593
 594                req->num_regs++;
 595                req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
 596                                                        otx2_get_link(pfvf));
 597                /* Enable this queue and backpressure */
 598                req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
 599
 600        } else if (lvl == NIX_TXSCH_LVL_TL1) {
 601                /* Default config for TL1.
 602                 * For VF this is always ignored.
 603                 */
 604
 605                /* Set DWRR quantum */
 606                req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq);
 607                req->regval[0] = TXSCH_TL1_DFLT_RR_QTM;
 608
 609                req->num_regs++;
 610                req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
 611                req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
 612
 613                req->num_regs++;
 614                req->reg[2] = NIX_AF_TL1X_CIR(schq);
 615                req->regval[2] = 0;
 616        }
 617
 618        return otx2_sync_mbox_msg(&pfvf->mbox);
 619}
 620
 621int otx2_txsch_alloc(struct otx2_nic *pfvf)
 622{
 623        struct nix_txsch_alloc_req *req;
 624        int lvl;
 625
 626        /* Get memory to put this msg */
 627        req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox);
 628        if (!req)
 629                return -ENOMEM;
 630
 631        /* Request one schq per level */
 632        for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
 633                req->schq[lvl] = 1;
 634
 635        return otx2_sync_mbox_msg(&pfvf->mbox);
 636}
 637
 638int otx2_txschq_stop(struct otx2_nic *pfvf)
 639{
 640        struct nix_txsch_free_req *free_req;
 641        int lvl, schq, err;
 642
 643        mutex_lock(&pfvf->mbox.lock);
 644        /* Free the transmit schedulers */
 645        free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox);
 646        if (!free_req) {
 647                mutex_unlock(&pfvf->mbox.lock);
 648                return -ENOMEM;
 649        }
 650
 651        free_req->flags = TXSCHQ_FREE_ALL;
 652        err = otx2_sync_mbox_msg(&pfvf->mbox);
 653        mutex_unlock(&pfvf->mbox.lock);
 654
 655        /* Clear the txschq list */
 656        for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
 657                for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++)
 658                        pfvf->hw.txschq_list[lvl][schq] = 0;
 659        }
 660        return err;
 661}
 662
 663void otx2_sqb_flush(struct otx2_nic *pfvf)
 664{
 665        int qidx, sqe_tail, sqe_head;
 666        u64 incr, *ptr, val;
 667        int timeout = 1000;
 668
 669        ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
 670        for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
 671                incr = (u64)qidx << 32;
 672                while (timeout) {
 673                        val = otx2_atomic64_add(incr, ptr);
 674                        sqe_head = (val >> 20) & 0x3F;
 675                        sqe_tail = (val >> 28) & 0x3F;
 676                        if (sqe_head == sqe_tail)
 677                                break;
 678                        usleep_range(1, 3);
 679                        timeout--;
 680                }
 681        }
 682}
 683
 684/* RED and drop levels of CQ on packet reception.
 685 * For CQ level is measure of emptiness ( 0x0 = full, 255 = empty).
 686 */
 687#define RQ_PASS_LVL_CQ(skid, qsize)     ((((skid) + 16) * 256) / (qsize))
 688#define RQ_DROP_LVL_CQ(skid, qsize)     (((skid) * 256) / (qsize))
 689
 690/* RED and drop levels of AURA for packet reception.
 691 * For AURA level is measure of fullness (0x0 = empty, 255 = full).
 692 * Eg: For RQ length 1K, for pass/drop level 204/230.
 693 * RED accepts pkts if free pointers > 102 & <= 205.
 694 * Drops pkts if free pointers < 102.
 695 */
 696#define RQ_BP_LVL_AURA   (255 - ((85 * 256) / 100)) /* BP when 85% is full */
 697#define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */
 698#define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */
 699
 700/* Send skid of 2000 packets required for CQ size of 4K CQEs. */
 701#define SEND_CQ_SKID    2000
 702
 703static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
 704{
 705        struct otx2_qset *qset = &pfvf->qset;
 706        struct nix_aq_enq_req *aq;
 707
 708        /* Get memory to put this msg */
 709        aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
 710        if (!aq)
 711                return -ENOMEM;
 712
 713        aq->rq.cq = qidx;
 714        aq->rq.ena = 1;
 715        aq->rq.pb_caching = 1;
 716        aq->rq.lpb_aura = lpb_aura; /* Use large packet buffer aura */
 717        aq->rq.lpb_sizem1 = (DMA_BUFFER_LEN(pfvf->rbsize) / 8) - 1;
 718        aq->rq.xqe_imm_size = 0; /* Copying of packet to CQE not needed */
 719        aq->rq.flow_tagw = 32; /* Copy full 32bit flow_tag to CQE header */
 720        aq->rq.qint_idx = 0;
 721        aq->rq.lpb_drop_ena = 1; /* Enable RED dropping for AURA */
 722        aq->rq.xqe_drop_ena = 1; /* Enable RED dropping for CQ/SSO */
 723        aq->rq.xqe_pass = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
 724        aq->rq.xqe_drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
 725        aq->rq.lpb_aura_pass = RQ_PASS_LVL_AURA;
 726        aq->rq.lpb_aura_drop = RQ_DROP_LVL_AURA;
 727
 728        /* Fill AQ info */
 729        aq->qidx = qidx;
 730        aq->ctype = NIX_AQ_CTYPE_RQ;
 731        aq->op = NIX_AQ_INSTOP_INIT;
 732
 733        return otx2_sync_mbox_msg(&pfvf->mbox);
 734}
 735
 736static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
 737{
 738        struct otx2_qset *qset = &pfvf->qset;
 739        struct otx2_snd_queue *sq;
 740        struct nix_aq_enq_req *aq;
 741        struct otx2_pool *pool;
 742        int err;
 743
 744        pool = &pfvf->qset.pool[sqb_aura];
 745        sq = &qset->sq[qidx];
 746        sq->sqe_size = NIX_SQESZ_W16 ? 64 : 128;
 747        sq->sqe_cnt = qset->sqe_cnt;
 748
 749        err = qmem_alloc(pfvf->dev, &sq->sqe, 1, sq->sqe_size);
 750        if (err)
 751                return err;
 752
 753        err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
 754                         TSO_HEADER_SIZE);
 755        if (err)
 756                return err;
 757
 758        sq->sqe_base = sq->sqe->base;
 759        sq->sg = kcalloc(qset->sqe_cnt, sizeof(struct sg_list), GFP_KERNEL);
 760        if (!sq->sg)
 761                return -ENOMEM;
 762
 763        if (pfvf->ptp) {
 764                err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt,
 765                                 sizeof(*sq->timestamps));
 766                if (err)
 767                        return err;
 768        }
 769
 770        sq->head = 0;
 771        sq->sqe_per_sqb = (pfvf->hw.sqb_size / sq->sqe_size) - 1;
 772        sq->num_sqbs = (qset->sqe_cnt + sq->sqe_per_sqb) / sq->sqe_per_sqb;
 773        /* Set SQE threshold to 10% of total SQEs */
 774        sq->sqe_thresh = ((sq->num_sqbs * sq->sqe_per_sqb) * 10) / 100;
 775        sq->aura_id = sqb_aura;
 776        sq->aura_fc_addr = pool->fc_addr->base;
 777        sq->lmt_addr = (__force u64 *)(pfvf->reg_base + LMT_LF_LMTLINEX(qidx));
 778        sq->io_addr = (__force u64)otx2_get_regaddr(pfvf, NIX_LF_OP_SENDX(0));
 779
 780        sq->stats.bytes = 0;
 781        sq->stats.pkts = 0;
 782
 783        /* Get memory to put this msg */
 784        aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
 785        if (!aq)
 786                return -ENOMEM;
 787
 788        aq->sq.cq = pfvf->hw.rx_queues + qidx;
 789        aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
 790        aq->sq.cq_ena = 1;
 791        aq->sq.ena = 1;
 792        /* Only one SMQ is allocated, map all SQ's to that SMQ  */
 793        aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
 794        aq->sq.smq_rr_quantum = DFLT_RR_QTM;
 795        aq->sq.default_chan = pfvf->hw.tx_chan_base;
 796        aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
 797        aq->sq.sqb_aura = sqb_aura;
 798        aq->sq.sq_int_ena = NIX_SQINT_BITS;
 799        aq->sq.qint_idx = 0;
 800        /* Due pipelining impact minimum 2000 unused SQ CQE's
 801         * need to maintain to avoid CQ overflow.
 802         */
 803        aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (sq->sqe_cnt));
 804
 805        /* Fill AQ info */
 806        aq->qidx = qidx;
 807        aq->ctype = NIX_AQ_CTYPE_SQ;
 808        aq->op = NIX_AQ_INSTOP_INIT;
 809
 810        return otx2_sync_mbox_msg(&pfvf->mbox);
 811}
 812
 813static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
 814{
 815        struct otx2_qset *qset = &pfvf->qset;
 816        struct nix_aq_enq_req *aq;
 817        struct otx2_cq_queue *cq;
 818        int err, pool_id;
 819
 820        cq = &qset->cq[qidx];
 821        cq->cq_idx = qidx;
 822        if (qidx < pfvf->hw.rx_queues) {
 823                cq->cq_type = CQ_RX;
 824                cq->cint_idx = qidx;
 825                cq->cqe_cnt = qset->rqe_cnt;
 826        } else {
 827                cq->cq_type = CQ_TX;
 828                cq->cint_idx = qidx - pfvf->hw.rx_queues;
 829                cq->cqe_cnt = qset->sqe_cnt;
 830        }
 831        cq->cqe_size = pfvf->qset.xqe_size;
 832
 833        /* Allocate memory for CQEs */
 834        err = qmem_alloc(pfvf->dev, &cq->cqe, cq->cqe_cnt, cq->cqe_size);
 835        if (err)
 836                return err;
 837
 838        /* Save CQE CPU base for faster reference */
 839        cq->cqe_base = cq->cqe->base;
 840        /* In case where all RQs auras point to single pool,
 841         * all CQs receive buffer pool also point to same pool.
 842         */
 843        pool_id = ((cq->cq_type == CQ_RX) &&
 844                   (pfvf->hw.rqpool_cnt != pfvf->hw.rx_queues)) ? 0 : qidx;
 845        cq->rbpool = &qset->pool[pool_id];
 846        cq->refill_task_sched = false;
 847
 848        /* Get memory to put this msg */
 849        aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
 850        if (!aq)
 851                return -ENOMEM;
 852
 853        aq->cq.ena = 1;
 854        aq->cq.qsize = Q_SIZE(cq->cqe_cnt, 4);
 855        aq->cq.caching = 1;
 856        aq->cq.base = cq->cqe->iova;
 857        aq->cq.cint_idx = cq->cint_idx;
 858        aq->cq.cq_err_int_ena = NIX_CQERRINT_BITS;
 859        aq->cq.qint_idx = 0;
 860        aq->cq.avg_level = 255;
 861
 862        if (qidx < pfvf->hw.rx_queues) {
 863                aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt);
 864                aq->cq.drop_ena = 1;
 865
 866                /* Enable receive CQ backpressure */
 867                aq->cq.bp_ena = 1;
 868                aq->cq.bpid = pfvf->bpid[0];
 869
 870                /* Set backpressure level is same as cq pass level */
 871                aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
 872        }
 873
 874        /* Fill AQ info */
 875        aq->qidx = qidx;
 876        aq->ctype = NIX_AQ_CTYPE_CQ;
 877        aq->op = NIX_AQ_INSTOP_INIT;
 878
 879        return otx2_sync_mbox_msg(&pfvf->mbox);
 880}
 881
 882static void otx2_pool_refill_task(struct work_struct *work)
 883{
 884        struct otx2_cq_queue *cq;
 885        struct otx2_pool *rbpool;
 886        struct refill_work *wrk;
 887        int qidx, free_ptrs = 0;
 888        struct otx2_nic *pfvf;
 889        s64 bufptr;
 890
 891        wrk = container_of(work, struct refill_work, pool_refill_work.work);
 892        pfvf = wrk->pf;
 893        qidx = wrk - pfvf->refill_wrk;
 894        cq = &pfvf->qset.cq[qidx];
 895        rbpool = cq->rbpool;
 896        free_ptrs = cq->pool_ptrs;
 897
 898        while (cq->pool_ptrs) {
 899                bufptr = otx2_alloc_rbuf(pfvf, rbpool);
 900                if (bufptr <= 0) {
 901                        /* Schedule a WQ if we fails to free atleast half of the
 902                         * pointers else enable napi for this RQ.
 903                         */
 904                        if (!((free_ptrs - cq->pool_ptrs) > free_ptrs / 2)) {
 905                                struct delayed_work *dwork;
 906
 907                                dwork = &wrk->pool_refill_work;
 908                                schedule_delayed_work(dwork,
 909                                                      msecs_to_jiffies(100));
 910                        } else {
 911                                cq->refill_task_sched = false;
 912                        }
 913                        return;
 914                }
 915                otx2_aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
 916                cq->pool_ptrs--;
 917        }
 918        cq->refill_task_sched = false;
 919}
 920
 921int otx2_config_nix_queues(struct otx2_nic *pfvf)
 922{
 923        int qidx, err;
 924
 925        /* Initialize RX queues */
 926        for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
 927                u16 lpb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx);
 928
 929                err = otx2_rq_init(pfvf, qidx, lpb_aura);
 930                if (err)
 931                        return err;
 932        }
 933
 934        /* Initialize TX queues */
 935        for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
 936                u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
 937
 938                err = otx2_sq_init(pfvf, qidx, sqb_aura);
 939                if (err)
 940                        return err;
 941        }
 942
 943        /* Initialize completion queues */
 944        for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) {
 945                err = otx2_cq_init(pfvf, qidx);
 946                if (err)
 947                        return err;
 948        }
 949
 950        /* Initialize work queue for receive buffer refill */
 951        pfvf->refill_wrk = devm_kcalloc(pfvf->dev, pfvf->qset.cq_cnt,
 952                                        sizeof(struct refill_work), GFP_KERNEL);
 953        if (!pfvf->refill_wrk)
 954                return -ENOMEM;
 955
 956        for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) {
 957                pfvf->refill_wrk[qidx].pf = pfvf;
 958                INIT_DELAYED_WORK(&pfvf->refill_wrk[qidx].pool_refill_work,
 959                                  otx2_pool_refill_task);
 960        }
 961        return 0;
 962}
 963
 964int otx2_config_nix(struct otx2_nic *pfvf)
 965{
 966        struct nix_lf_alloc_req  *nixlf;
 967        struct nix_lf_alloc_rsp *rsp;
 968        int err;
 969
 970        pfvf->qset.xqe_size = NIX_XQESZ_W16 ? 128 : 512;
 971
 972        /* Get memory to put this msg */
 973        nixlf = otx2_mbox_alloc_msg_nix_lf_alloc(&pfvf->mbox);
 974        if (!nixlf)
 975                return -ENOMEM;
 976
 977        /* Set RQ/SQ/CQ counts */
 978        nixlf->rq_cnt = pfvf->hw.rx_queues;
 979        nixlf->sq_cnt = pfvf->hw.tx_queues;
 980        nixlf->cq_cnt = pfvf->qset.cq_cnt;
 981        nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
 982        nixlf->rss_grps = 1; /* Single RSS indir table supported, for now */
 983        nixlf->xqe_sz = NIX_XQESZ_W16;
 984        /* We don't know absolute NPA LF idx attached.
 985         * AF will replace 'RVU_DEFAULT_PF_FUNC' with
 986         * NPA LF attached to this RVU PF/VF.
 987         */
 988        nixlf->npa_func = RVU_DEFAULT_PF_FUNC;
 989        /* Disable alignment pad, enable L2 length check,
 990         * enable L4 TCP/UDP checksum verification.
 991         */
 992        nixlf->rx_cfg = BIT_ULL(33) | BIT_ULL(35) | BIT_ULL(37);
 993
 994        err = otx2_sync_mbox_msg(&pfvf->mbox);
 995        if (err)
 996                return err;
 997
 998        rsp = (struct nix_lf_alloc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0,
 999                                                           &nixlf->hdr);
1000        if (IS_ERR(rsp))
1001                return PTR_ERR(rsp);
1002
1003        if (rsp->qints < 1)
1004                return -ENXIO;
1005
1006        return rsp->hdr.rc;
1007}
1008
1009void otx2_sq_free_sqbs(struct otx2_nic *pfvf)
1010{
1011        struct otx2_qset *qset = &pfvf->qset;
1012        struct otx2_hw *hw = &pfvf->hw;
1013        struct otx2_snd_queue *sq;
1014        int sqb, qidx;
1015        u64 iova, pa;
1016
1017        for (qidx = 0; qidx < hw->tx_queues; qidx++) {
1018                sq = &qset->sq[qidx];
1019                if (!sq->sqb_ptrs)
1020                        continue;
1021                for (sqb = 0; sqb < sq->sqb_count; sqb++) {
1022                        if (!sq->sqb_ptrs[sqb])
1023                                continue;
1024                        iova = sq->sqb_ptrs[sqb];
1025                        pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
1026                        dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size,
1027                                             DMA_FROM_DEVICE,
1028                                             DMA_ATTR_SKIP_CPU_SYNC);
1029                        put_page(virt_to_page(phys_to_virt(pa)));
1030                }
1031                sq->sqb_count = 0;
1032        }
1033}
1034
1035void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type)
1036{
1037        int pool_id, pool_start = 0, pool_end = 0, size = 0;
1038        u64 iova, pa;
1039
1040        if (type == AURA_NIX_SQ) {
1041                pool_start = otx2_get_pool_idx(pfvf, type, 0);
1042                pool_end =  pool_start + pfvf->hw.sqpool_cnt;
1043                size = pfvf->hw.sqb_size;
1044        }
1045        if (type == AURA_NIX_RQ) {
1046                pool_start = otx2_get_pool_idx(pfvf, type, 0);
1047                pool_end = pfvf->hw.rqpool_cnt;
1048                size = pfvf->rbsize;
1049        }
1050
1051        /* Free SQB and RQB pointers from the aura pool */
1052        for (pool_id = pool_start; pool_id < pool_end; pool_id++) {
1053                iova = otx2_aura_allocptr(pfvf, pool_id);
1054                while (iova) {
1055                        if (type == AURA_NIX_RQ)
1056                                iova -= OTX2_HEAD_ROOM;
1057
1058                        pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
1059                        dma_unmap_page_attrs(pfvf->dev, iova, size,
1060                                             DMA_FROM_DEVICE,
1061                                             DMA_ATTR_SKIP_CPU_SYNC);
1062                        put_page(virt_to_page(phys_to_virt(pa)));
1063                        iova = otx2_aura_allocptr(pfvf, pool_id);
1064                }
1065        }
1066}
1067
1068void otx2_aura_pool_free(struct otx2_nic *pfvf)
1069{
1070        struct otx2_pool *pool;
1071        int pool_id;
1072
1073        if (!pfvf->qset.pool)
1074                return;
1075
1076        for (pool_id = 0; pool_id < pfvf->hw.pool_cnt; pool_id++) {
1077                pool = &pfvf->qset.pool[pool_id];
1078                qmem_free(pfvf->dev, pool->stack);
1079                qmem_free(pfvf->dev, pool->fc_addr);
1080        }
1081        devm_kfree(pfvf->dev, pfvf->qset.pool);
1082        pfvf->qset.pool = NULL;
1083}
1084
1085static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
1086                          int pool_id, int numptrs)
1087{
1088        struct npa_aq_enq_req *aq;
1089        struct otx2_pool *pool;
1090        int err;
1091
1092        pool = &pfvf->qset.pool[pool_id];
1093
1094        /* Allocate memory for HW to update Aura count.
1095         * Alloc one cache line, so that it fits all FC_STYPE modes.
1096         */
1097        if (!pool->fc_addr) {
1098                err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN);
1099                if (err)
1100                        return err;
1101        }
1102
1103        /* Initialize this aura's context via AF */
1104        aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
1105        if (!aq) {
1106                /* Shared mbox memory buffer is full, flush it and retry */
1107                err = otx2_sync_mbox_msg(&pfvf->mbox);
1108                if (err)
1109                        return err;
1110                aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
1111                if (!aq)
1112                        return -ENOMEM;
1113        }
1114
1115        aq->aura_id = aura_id;
1116        /* Will be filled by AF with correct pool context address */
1117        aq->aura.pool_addr = pool_id;
1118        aq->aura.pool_caching = 1;
1119        aq->aura.shift = ilog2(numptrs) - 8;
1120        aq->aura.count = numptrs;
1121        aq->aura.limit = numptrs;
1122        aq->aura.avg_level = 255;
1123        aq->aura.ena = 1;
1124        aq->aura.fc_ena = 1;
1125        aq->aura.fc_addr = pool->fc_addr->iova;
1126        aq->aura.fc_hyst_bits = 0; /* Store count on all updates */
1127
1128        /* Enable backpressure for RQ aura */
1129        if (aura_id < pfvf->hw.rqpool_cnt) {
1130                aq->aura.bp_ena = 0;
1131                aq->aura.nix0_bpid = pfvf->bpid[0];
1132                /* Set backpressure level for RQ's Aura */
1133                aq->aura.bp = RQ_BP_LVL_AURA;
1134        }
1135
1136        /* Fill AQ info */
1137        aq->ctype = NPA_AQ_CTYPE_AURA;
1138        aq->op = NPA_AQ_INSTOP_INIT;
1139
1140        return 0;
1141}
1142
1143static int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
1144                          int stack_pages, int numptrs, int buf_size)
1145{
1146        struct npa_aq_enq_req *aq;
1147        struct otx2_pool *pool;
1148        int err;
1149
1150        pool = &pfvf->qset.pool[pool_id];
1151        /* Alloc memory for stack which is used to store buffer pointers */
1152        err = qmem_alloc(pfvf->dev, &pool->stack,
1153                         stack_pages, pfvf->hw.stack_pg_bytes);
1154        if (err)
1155                return err;
1156
1157        pool->rbsize = buf_size;
1158
1159        /* Initialize this pool's context via AF */
1160        aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
1161        if (!aq) {
1162                /* Shared mbox memory buffer is full, flush it and retry */
1163                err = otx2_sync_mbox_msg(&pfvf->mbox);
1164                if (err) {
1165                        qmem_free(pfvf->dev, pool->stack);
1166                        return err;
1167                }
1168                aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
1169                if (!aq) {
1170                        qmem_free(pfvf->dev, pool->stack);
1171                        return -ENOMEM;
1172                }
1173        }
1174
1175        aq->aura_id = pool_id;
1176        aq->pool.stack_base = pool->stack->iova;
1177        aq->pool.stack_caching = 1;
1178        aq->pool.ena = 1;
1179        aq->pool.buf_size = buf_size / 128;
1180        aq->pool.stack_max_pages = stack_pages;
1181        aq->pool.shift = ilog2(numptrs) - 8;
1182        aq->pool.ptr_start = 0;
1183        aq->pool.ptr_end = ~0ULL;
1184
1185        /* Fill AQ info */
1186        aq->ctype = NPA_AQ_CTYPE_POOL;
1187        aq->op = NPA_AQ_INSTOP_INIT;
1188
1189        return 0;
1190}
1191
1192int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
1193{
1194        int qidx, pool_id, stack_pages, num_sqbs;
1195        struct otx2_qset *qset = &pfvf->qset;
1196        struct otx2_hw *hw = &pfvf->hw;
1197        struct otx2_snd_queue *sq;
1198        struct otx2_pool *pool;
1199        int err, ptr;
1200        s64 bufptr;
1201
1202        /* Calculate number of SQBs needed.
1203         *
1204         * For a 128byte SQE, and 4K size SQB, 31 SQEs will fit in one SQB.
1205         * Last SQE is used for pointing to next SQB.
1206         */
1207        num_sqbs = (hw->sqb_size / 128) - 1;
1208        num_sqbs = (qset->sqe_cnt + num_sqbs) / num_sqbs;
1209
1210        /* Get no of stack pages needed */
1211        stack_pages =
1212                (num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
1213
1214        for (qidx = 0; qidx < hw->tx_queues; qidx++) {
1215                pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
1216                /* Initialize aura context */
1217                err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs);
1218                if (err)
1219                        goto fail;
1220
1221                /* Initialize pool context */
1222                err = otx2_pool_init(pfvf, pool_id, stack_pages,
1223                                     num_sqbs, hw->sqb_size);
1224                if (err)
1225                        goto fail;
1226        }
1227
1228        /* Flush accumulated messages */
1229        err = otx2_sync_mbox_msg(&pfvf->mbox);
1230        if (err)
1231                goto fail;
1232
1233        /* Allocate pointers and free them to aura/pool */
1234        for (qidx = 0; qidx < hw->tx_queues; qidx++) {
1235                pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
1236                pool = &pfvf->qset.pool[pool_id];
1237
1238                sq = &qset->sq[qidx];
1239                sq->sqb_count = 0;
1240                sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(u64 *), GFP_KERNEL);
1241                if (!sq->sqb_ptrs)
1242                        return -ENOMEM;
1243
1244                for (ptr = 0; ptr < num_sqbs; ptr++) {
1245                        bufptr = otx2_alloc_rbuf(pfvf, pool);
1246                        if (bufptr <= 0)
1247                                return bufptr;
1248                        otx2_aura_freeptr(pfvf, pool_id, bufptr);
1249                        sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
1250                }
1251        }
1252
1253        return 0;
1254fail:
1255        otx2_mbox_reset(&pfvf->mbox.mbox, 0);
1256        otx2_aura_pool_free(pfvf);
1257        return err;
1258}
1259
1260int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
1261{
1262        struct otx2_hw *hw = &pfvf->hw;
1263        int stack_pages, pool_id, rq;
1264        struct otx2_pool *pool;
1265        int err, ptr, num_ptrs;
1266        s64 bufptr;
1267
1268        num_ptrs = pfvf->qset.rqe_cnt;
1269
1270        stack_pages =
1271                (num_ptrs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
1272
1273        for (rq = 0; rq < hw->rx_queues; rq++) {
1274                pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, rq);
1275                /* Initialize aura context */
1276                err = otx2_aura_init(pfvf, pool_id, pool_id, num_ptrs);
1277                if (err)
1278                        goto fail;
1279        }
1280        for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
1281                err = otx2_pool_init(pfvf, pool_id, stack_pages,
1282                                     num_ptrs, pfvf->rbsize);
1283                if (err)
1284                        goto fail;
1285        }
1286
1287        /* Flush accumulated messages */
1288        err = otx2_sync_mbox_msg(&pfvf->mbox);
1289        if (err)
1290                goto fail;
1291
1292        /* Allocate pointers and free them to aura/pool */
1293        for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
1294                pool = &pfvf->qset.pool[pool_id];
1295                for (ptr = 0; ptr < num_ptrs; ptr++) {
1296                        bufptr = otx2_alloc_rbuf(pfvf, pool);
1297                        if (bufptr <= 0)
1298                                return bufptr;
1299                        otx2_aura_freeptr(pfvf, pool_id,
1300                                          bufptr + OTX2_HEAD_ROOM);
1301                }
1302        }
1303
1304        return 0;
1305fail:
1306        otx2_mbox_reset(&pfvf->mbox.mbox, 0);
1307        otx2_aura_pool_free(pfvf);
1308        return err;
1309}
1310
1311int otx2_config_npa(struct otx2_nic *pfvf)
1312{
1313        struct otx2_qset *qset = &pfvf->qset;
1314        struct npa_lf_alloc_req  *npalf;
1315        struct otx2_hw *hw = &pfvf->hw;
1316        int aura_cnt;
1317
1318        /* Pool - Stack of free buffer pointers
1319         * Aura - Alloc/frees pointers from/to pool for NIX DMA.
1320         */
1321
1322        if (!hw->pool_cnt)
1323                return -EINVAL;
1324
1325        qset->pool = devm_kcalloc(pfvf->dev, hw->pool_cnt,
1326                                  sizeof(struct otx2_pool), GFP_KERNEL);
1327        if (!qset->pool)
1328                return -ENOMEM;
1329
1330        /* Get memory to put this msg */
1331        npalf = otx2_mbox_alloc_msg_npa_lf_alloc(&pfvf->mbox);
1332        if (!npalf)
1333                return -ENOMEM;
1334
1335        /* Set aura and pool counts */
1336        npalf->nr_pools = hw->pool_cnt;
1337        aura_cnt = ilog2(roundup_pow_of_two(hw->pool_cnt));
1338        npalf->aura_sz = (aura_cnt >= ilog2(128)) ? (aura_cnt - 6) : 1;
1339
1340        return otx2_sync_mbox_msg(&pfvf->mbox);
1341}
1342
1343int otx2_detach_resources(struct mbox *mbox)
1344{
1345        struct rsrc_detach *detach;
1346
1347        mutex_lock(&mbox->lock);
1348        detach = otx2_mbox_alloc_msg_detach_resources(mbox);
1349        if (!detach) {
1350                mutex_unlock(&mbox->lock);
1351                return -ENOMEM;
1352        }
1353
1354        /* detach all */
1355        detach->partial = false;
1356
1357        /* Send detach request to AF */
1358        otx2_mbox_msg_send(&mbox->mbox, 0);
1359        mutex_unlock(&mbox->lock);
1360        return 0;
1361}
1362EXPORT_SYMBOL(otx2_detach_resources);
1363
1364int otx2_attach_npa_nix(struct otx2_nic *pfvf)
1365{
1366        struct rsrc_attach *attach;
1367        struct msg_req *msix;
1368        int err;
1369
1370        mutex_lock(&pfvf->mbox.lock);
1371        /* Get memory to put this msg */
1372        attach = otx2_mbox_alloc_msg_attach_resources(&pfvf->mbox);
1373        if (!attach) {
1374                mutex_unlock(&pfvf->mbox.lock);
1375                return -ENOMEM;
1376        }
1377
1378        attach->npalf = true;
1379        attach->nixlf = true;
1380
1381        /* Send attach request to AF */
1382        err = otx2_sync_mbox_msg(&pfvf->mbox);
1383        if (err) {
1384                mutex_unlock(&pfvf->mbox.lock);
1385                return err;
1386        }
1387
1388        pfvf->nix_blkaddr = BLKADDR_NIX0;
1389
1390        /* If the platform has two NIX blocks then LF may be
1391         * allocated from NIX1.
1392         */
1393        if (otx2_read64(pfvf, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_NIX1)) & 0x1FFULL)
1394                pfvf->nix_blkaddr = BLKADDR_NIX1;
1395
1396        /* Get NPA and NIX MSIX vector offsets */
1397        msix = otx2_mbox_alloc_msg_msix_offset(&pfvf->mbox);
1398        if (!msix) {
1399                mutex_unlock(&pfvf->mbox.lock);
1400                return -ENOMEM;
1401        }
1402
1403        err = otx2_sync_mbox_msg(&pfvf->mbox);
1404        if (err) {
1405                mutex_unlock(&pfvf->mbox.lock);
1406                return err;
1407        }
1408        mutex_unlock(&pfvf->mbox.lock);
1409
1410        if (pfvf->hw.npa_msixoff == MSIX_VECTOR_INVALID ||
1411            pfvf->hw.nix_msixoff == MSIX_VECTOR_INVALID) {
1412                dev_err(pfvf->dev,
1413                        "RVUPF: Invalid MSIX vector offset for NPA/NIX\n");
1414                return -EINVAL;
1415        }
1416
1417        return 0;
1418}
1419EXPORT_SYMBOL(otx2_attach_npa_nix);
1420
1421void otx2_ctx_disable(struct mbox *mbox, int type, bool npa)
1422{
1423        struct hwctx_disable_req *req;
1424
1425        mutex_lock(&mbox->lock);
1426        /* Request AQ to disable this context */
1427        if (npa)
1428                req = otx2_mbox_alloc_msg_npa_hwctx_disable(mbox);
1429        else
1430                req = otx2_mbox_alloc_msg_nix_hwctx_disable(mbox);
1431
1432        if (!req) {
1433                mutex_unlock(&mbox->lock);
1434                return;
1435        }
1436
1437        req->ctype = type;
1438
1439        if (otx2_sync_mbox_msg(mbox))
1440                dev_err(mbox->pfvf->dev, "%s failed to disable context\n",
1441                        __func__);
1442
1443        mutex_unlock(&mbox->lock);
1444}
1445
1446int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
1447{
1448        struct nix_bp_cfg_req *req;
1449
1450        if (enable)
1451                req = otx2_mbox_alloc_msg_nix_bp_enable(&pfvf->mbox);
1452        else
1453                req = otx2_mbox_alloc_msg_nix_bp_disable(&pfvf->mbox);
1454
1455        if (!req)
1456                return -ENOMEM;
1457
1458        req->chan_base = 0;
1459        req->chan_cnt = 1;
1460        req->bpid_per_chan = 0;
1461
1462        return otx2_sync_mbox_msg(&pfvf->mbox);
1463}
1464
1465/* Mbox message handlers */
1466void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
1467                            struct cgx_stats_rsp *rsp)
1468{
1469        int id;
1470
1471        for (id = 0; id < CGX_RX_STATS_COUNT; id++)
1472                pfvf->hw.cgx_rx_stats[id] = rsp->rx_stats[id];
1473        for (id = 0; id < CGX_TX_STATS_COUNT; id++)
1474                pfvf->hw.cgx_tx_stats[id] = rsp->tx_stats[id];
1475}
1476
1477void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
1478                                  struct nix_txsch_alloc_rsp *rsp)
1479{
1480        int lvl, schq;
1481
1482        /* Setup transmit scheduler list */
1483        for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
1484                for (schq = 0; schq < rsp->schq[lvl]; schq++)
1485                        pf->hw.txschq_list[lvl][schq] =
1486                                rsp->schq_list[lvl][schq];
1487}
1488EXPORT_SYMBOL(mbox_handler_nix_txsch_alloc);
1489
1490void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
1491                               struct npa_lf_alloc_rsp *rsp)
1492{
1493        pfvf->hw.stack_pg_ptrs = rsp->stack_pg_ptrs;
1494        pfvf->hw.stack_pg_bytes = rsp->stack_pg_bytes;
1495}
1496EXPORT_SYMBOL(mbox_handler_npa_lf_alloc);
1497
1498void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
1499                               struct nix_lf_alloc_rsp *rsp)
1500{
1501        pfvf->hw.sqb_size = rsp->sqb_size;
1502        pfvf->hw.rx_chan_base = rsp->rx_chan_base;
1503        pfvf->hw.tx_chan_base = rsp->tx_chan_base;
1504        pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx;
1505        pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx;
1506}
1507EXPORT_SYMBOL(mbox_handler_nix_lf_alloc);
1508
1509void mbox_handler_msix_offset(struct otx2_nic *pfvf,
1510                              struct msix_offset_rsp *rsp)
1511{
1512        pfvf->hw.npa_msixoff = rsp->npa_msixoff;
1513        pfvf->hw.nix_msixoff = rsp->nix_msixoff;
1514}
1515EXPORT_SYMBOL(mbox_handler_msix_offset);
1516
1517void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
1518                                struct nix_bp_cfg_rsp *rsp)
1519{
1520        int chan, chan_id;
1521
1522        for (chan = 0; chan < rsp->chan_cnt; chan++) {
1523                chan_id = ((rsp->chan_bpid[chan] >> 10) & 0x7F);
1524                pfvf->bpid[chan_id] = rsp->chan_bpid[chan] & 0x3FF;
1525        }
1526}
1527EXPORT_SYMBOL(mbox_handler_nix_bp_enable);
1528
1529void otx2_free_cints(struct otx2_nic *pfvf, int n)
1530{
1531        struct otx2_qset *qset = &pfvf->qset;
1532        struct otx2_hw *hw = &pfvf->hw;
1533        int irq, qidx;
1534
1535        for (qidx = 0, irq = hw->nix_msixoff + NIX_LF_CINT_VEC_START;
1536             qidx < n;
1537             qidx++, irq++) {
1538                int vector = pci_irq_vector(pfvf->pdev, irq);
1539
1540                irq_set_affinity_hint(vector, NULL);
1541                free_cpumask_var(hw->affinity_mask[irq]);
1542                free_irq(vector, &qset->napi[qidx]);
1543        }
1544}
1545
1546void otx2_set_cints_affinity(struct otx2_nic *pfvf)
1547{
1548        struct otx2_hw *hw = &pfvf->hw;
1549        int vec, cpu, irq, cint;
1550
1551        vec = hw->nix_msixoff + NIX_LF_CINT_VEC_START;
1552        cpu = cpumask_first(cpu_online_mask);
1553
1554        /* CQ interrupts */
1555        for (cint = 0; cint < pfvf->hw.cint_cnt; cint++, vec++) {
1556                if (!alloc_cpumask_var(&hw->affinity_mask[vec], GFP_KERNEL))
1557                        return;
1558
1559                cpumask_set_cpu(cpu, hw->affinity_mask[vec]);
1560
1561                irq = pci_irq_vector(pfvf->pdev, vec);
1562                irq_set_affinity_hint(irq, hw->affinity_mask[vec]);
1563
1564                cpu = cpumask_next(cpu, cpu_online_mask);
1565                if (unlikely(cpu >= nr_cpu_ids))
1566                        cpu = 0;
1567        }
1568}
1569
1570#define M(_name, _id, _fn_name, _req_type, _rsp_type)                   \
1571int __weak                                                              \
1572otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf,                \
1573                                struct _req_type *req,                  \
1574                                struct _rsp_type *rsp)                  \
1575{                                                                       \
1576        /* Nothing to do here */                                        \
1577        return 0;                                                       \
1578}                                                                       \
1579EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name);
1580MBOX_UP_CGX_MESSAGES
1581#undef M
1582