linux/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2/* Copyright (C) 2018 Netronome Systems, Inc. */
   3
   4#include <linux/bitops.h>
   5#include <linux/kernel.h>
   6#include <linux/log2.h>
   7
   8#include "../nfpcore/nfp_cpp.h"
   9#include "../nfpcore/nfp_nffw.h"
  10#include "../nfp_app.h"
  11#include "../nfp_abi.h"
  12#include "../nfp_main.h"
  13#include "../nfp_net.h"
  14#include "main.h"
  15
  16#define NFP_NUM_PRIOS_SYM_NAME  "_abi_pci_dscp_num_prio_%u"
  17#define NFP_NUM_BANDS_SYM_NAME  "_abi_pci_dscp_num_band_%u"
  18#define NFP_ACT_MASK_SYM_NAME   "_abi_nfd_out_q_actions_%u"
  19
  20#define NFP_RED_SUPPORT_SYM_NAME        "_abi_nfd_out_red_offload_%u"
  21
  22#define NFP_QLVL_SYM_NAME       "_abi_nfd_out_q_lvls_%u%s"
  23#define NFP_QLVL_STRIDE         16
  24#define NFP_QLVL_BLOG_BYTES     0
  25#define NFP_QLVL_BLOG_PKTS      4
  26#define NFP_QLVL_THRS           8
  27#define NFP_QLVL_ACT            12
  28
  29#define NFP_QMSTAT_SYM_NAME     "_abi_nfdqm%u_stats%s"
  30#define NFP_QMSTAT_STRIDE       32
  31#define NFP_QMSTAT_NON_STO      0
  32#define NFP_QMSTAT_STO          8
  33#define NFP_QMSTAT_DROP         16
  34#define NFP_QMSTAT_ECN          24
  35
  36#define NFP_Q_STAT_SYM_NAME     "_abi_nfd_rxq_stats%u%s"
  37#define NFP_Q_STAT_STRIDE       16
  38#define NFP_Q_STAT_PKTS         0
  39#define NFP_Q_STAT_BYTES        8
  40
  41#define NFP_NET_ABM_MBOX_CMD            NFP_NET_CFG_MBOX_SIMPLE_CMD
  42#define NFP_NET_ABM_MBOX_RET            NFP_NET_CFG_MBOX_SIMPLE_RET
  43#define NFP_NET_ABM_MBOX_DATALEN        NFP_NET_CFG_MBOX_SIMPLE_VAL
  44#define NFP_NET_ABM_MBOX_RESERVED       (NFP_NET_CFG_MBOX_SIMPLE_VAL + 4)
  45#define NFP_NET_ABM_MBOX_DATA           (NFP_NET_CFG_MBOX_SIMPLE_VAL + 8)
  46
  47static int
  48nfp_abm_ctrl_stat(struct nfp_abm_link *alink, const struct nfp_rtsym *sym,
  49                  unsigned int stride, unsigned int offset, unsigned int band,
  50                  unsigned int queue, bool is_u64, u64 *res)
  51{
  52        struct nfp_cpp *cpp = alink->abm->app->cpp;
  53        u64 val, sym_offset;
  54        unsigned int qid;
  55        u32 val32;
  56        int err;
  57
  58        qid = band * NFP_NET_MAX_RX_RINGS + alink->queue_base + queue;
  59
  60        sym_offset = qid * stride + offset;
  61        if (is_u64)
  62                err = __nfp_rtsym_readq(cpp, sym, 3, 0, sym_offset, &val);
  63        else
  64                err = __nfp_rtsym_readl(cpp, sym, 3, 0, sym_offset, &val32);
  65        if (err) {
  66                nfp_err(cpp, "RED offload reading stat failed on vNIC %d band %d queue %d (+ %d)\n",
  67                        alink->id, band, queue, alink->queue_base);
  68                return err;
  69        }
  70
  71        *res = is_u64 ? val : val32;
  72        return 0;
  73}
  74
  75int __nfp_abm_ctrl_set_q_lvl(struct nfp_abm *abm, unsigned int id, u32 val)
  76{
  77        struct nfp_cpp *cpp = abm->app->cpp;
  78        u64 sym_offset;
  79        int err;
  80
  81        __clear_bit(id, abm->threshold_undef);
  82        if (abm->thresholds[id] == val)
  83                return 0;
  84
  85        sym_offset = id * NFP_QLVL_STRIDE + NFP_QLVL_THRS;
  86        err = __nfp_rtsym_writel(cpp, abm->q_lvls, 4, 0, sym_offset, val);
  87        if (err) {
  88                nfp_err(cpp,
  89                        "RED offload setting level failed on subqueue %d\n",
  90                        id);
  91                return err;
  92        }
  93
  94        abm->thresholds[id] = val;
  95        return 0;
  96}
  97
  98int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int band,
  99                           unsigned int queue, u32 val)
 100{
 101        unsigned int threshold;
 102
 103        threshold = band * NFP_NET_MAX_RX_RINGS + alink->queue_base + queue;
 104
 105        return __nfp_abm_ctrl_set_q_lvl(alink->abm, threshold, val);
 106}
 107
 108int __nfp_abm_ctrl_set_q_act(struct nfp_abm *abm, unsigned int id,
 109                             enum nfp_abm_q_action act)
 110{
 111        struct nfp_cpp *cpp = abm->app->cpp;
 112        u64 sym_offset;
 113        int err;
 114
 115        if (abm->actions[id] == act)
 116                return 0;
 117
 118        sym_offset = id * NFP_QLVL_STRIDE + NFP_QLVL_ACT;
 119        err = __nfp_rtsym_writel(cpp, abm->q_lvls, 4, 0, sym_offset, act);
 120        if (err) {
 121                nfp_err(cpp,
 122                        "RED offload setting action failed on subqueue %d\n",
 123                        id);
 124                return err;
 125        }
 126
 127        abm->actions[id] = act;
 128        return 0;
 129}
 130
 131int nfp_abm_ctrl_set_q_act(struct nfp_abm_link *alink, unsigned int band,
 132                           unsigned int queue, enum nfp_abm_q_action act)
 133{
 134        unsigned int qid;
 135
 136        qid = band * NFP_NET_MAX_RX_RINGS + alink->queue_base + queue;
 137
 138        return __nfp_abm_ctrl_set_q_act(alink->abm, qid, act);
 139}
 140
 141u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int queue)
 142{
 143        unsigned int band;
 144        u64 val, sum = 0;
 145
 146        for (band = 0; band < alink->abm->num_bands; band++) {
 147                if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
 148                                      NFP_QMSTAT_STRIDE, NFP_QMSTAT_NON_STO,
 149                                      band, queue, true, &val))
 150                        return 0;
 151                sum += val;
 152        }
 153
 154        return sum;
 155}
 156
 157u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int queue)
 158{
 159        unsigned int band;
 160        u64 val, sum = 0;
 161
 162        for (band = 0; band < alink->abm->num_bands; band++) {
 163                if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
 164                                      NFP_QMSTAT_STRIDE, NFP_QMSTAT_STO,
 165                                      band, queue, true, &val))
 166                        return 0;
 167                sum += val;
 168        }
 169
 170        return sum;
 171}
 172
 173static int
 174nfp_abm_ctrl_stat_basic(struct nfp_abm_link *alink, unsigned int band,
 175                        unsigned int queue, unsigned int off, u64 *val)
 176{
 177        if (!nfp_abm_has_prio(alink->abm)) {
 178                if (!band) {
 179                        unsigned int id = alink->queue_base + queue;
 180
 181                        *val = nn_readq(alink->vnic,
 182                                        NFP_NET_CFG_RXR_STATS(id) + off);
 183                } else {
 184                        *val = 0;
 185                }
 186
 187                return 0;
 188        } else {
 189                return nfp_abm_ctrl_stat(alink, alink->abm->q_stats,
 190                                         NFP_Q_STAT_STRIDE, off, band, queue,
 191                                         true, val);
 192        }
 193}
 194
 195int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int band,
 196                              unsigned int queue, struct nfp_alink_stats *stats)
 197{
 198        int err;
 199
 200        err = nfp_abm_ctrl_stat_basic(alink, band, queue, NFP_Q_STAT_PKTS,
 201                                      &stats->tx_pkts);
 202        if (err)
 203                return err;
 204
 205        err = nfp_abm_ctrl_stat_basic(alink, band, queue, NFP_Q_STAT_BYTES,
 206                                      &stats->tx_bytes);
 207        if (err)
 208                return err;
 209
 210        err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls, NFP_QLVL_STRIDE,
 211                                NFP_QLVL_BLOG_BYTES, band, queue, false,
 212                                &stats->backlog_bytes);
 213        if (err)
 214                return err;
 215
 216        err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls,
 217                                NFP_QLVL_STRIDE, NFP_QLVL_BLOG_PKTS,
 218                                band, queue, false, &stats->backlog_pkts);
 219        if (err)
 220                return err;
 221
 222        err = nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
 223                                NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
 224                                band, queue, true, &stats->drops);
 225        if (err)
 226                return err;
 227
 228        return nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
 229                                 NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
 230                                 band, queue, true, &stats->overlimits);
 231}
 232
 233int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink,
 234                               unsigned int band, unsigned int queue,
 235                               struct nfp_alink_xstats *xstats)
 236{
 237        int err;
 238
 239        err = nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
 240                                NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
 241                                band, queue, true, &xstats->pdrop);
 242        if (err)
 243                return err;
 244
 245        return nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
 246                                 NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
 247                                 band, queue, true, &xstats->ecn_marked);
 248}
 249
 250int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm)
 251{
 252        return nfp_mbox_cmd(abm->app->pf, NFP_MBOX_PCIE_ABM_ENABLE,
 253                            NULL, 0, NULL, 0);
 254}
 255
 256int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm)
 257{
 258        return nfp_mbox_cmd(abm->app->pf, NFP_MBOX_PCIE_ABM_DISABLE,
 259                            NULL, 0, NULL, 0);
 260}
 261
 262int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link *alink, u32 *packed)
 263{
 264        const u32 cmd = NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET;
 265        struct nfp_net *nn = alink->vnic;
 266        unsigned int i;
 267        int err;
 268
 269        err = nfp_net_mbox_lock(nn, alink->abm->prio_map_len);
 270        if (err)
 271                return err;
 272
 273        /* Write data_len and wipe reserved */
 274        nn_writeq(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATALEN,
 275                  alink->abm->prio_map_len);
 276
 277        for (i = 0; i < alink->abm->prio_map_len; i += sizeof(u32))
 278                nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATA + i,
 279                          packed[i / sizeof(u32)]);
 280
 281        err = nfp_net_mbox_reconfig_and_unlock(nn, cmd);
 282        if (err)
 283                nfp_err(alink->abm->app->cpp,
 284                        "setting DSCP -> VQ map failed with error %d\n", err);
 285        return err;
 286}
 287
 288static int nfp_abm_ctrl_prio_check_params(struct nfp_abm_link *alink)
 289{
 290        struct nfp_abm *abm = alink->abm;
 291        struct nfp_net *nn = alink->vnic;
 292        unsigned int min_mbox_sz;
 293
 294        if (!nfp_abm_has_prio(alink->abm))
 295                return 0;
 296
 297        min_mbox_sz = NFP_NET_ABM_MBOX_DATA + alink->abm->prio_map_len;
 298        if (nn->tlv_caps.mbox_len < min_mbox_sz) {
 299                nfp_err(abm->app->pf->cpp, "vNIC mailbox too small for prio offload: %u, need: %u\n",
 300                        nn->tlv_caps.mbox_len,  min_mbox_sz);
 301                return -EINVAL;
 302        }
 303
 304        return 0;
 305}
 306
 307int nfp_abm_ctrl_read_params(struct nfp_abm_link *alink)
 308{
 309        alink->queue_base = nn_readl(alink->vnic, NFP_NET_CFG_START_RXQ);
 310        alink->queue_base /= alink->vnic->stride_rx;
 311
 312        return nfp_abm_ctrl_prio_check_params(alink);
 313}
 314
 315static unsigned int nfp_abm_ctrl_prio_map_size(struct nfp_abm *abm)
 316{
 317        unsigned int size;
 318
 319        size = roundup_pow_of_two(order_base_2(abm->num_bands));
 320        size = DIV_ROUND_UP(size * abm->num_prios, BITS_PER_BYTE);
 321        size = round_up(size, sizeof(u32));
 322
 323        return size;
 324}
 325
 326static const struct nfp_rtsym *
 327nfp_abm_ctrl_find_rtsym(struct nfp_pf *pf, const char *name, unsigned int size)
 328{
 329        const struct nfp_rtsym *sym;
 330
 331        sym = nfp_rtsym_lookup(pf->rtbl, name);
 332        if (!sym) {
 333                nfp_err(pf->cpp, "Symbol '%s' not found\n", name);
 334                return ERR_PTR(-ENOENT);
 335        }
 336        if (nfp_rtsym_size(sym) != size) {
 337                nfp_err(pf->cpp,
 338                        "Symbol '%s' wrong size: expected %u got %llu\n",
 339                        name, size, nfp_rtsym_size(sym));
 340                return ERR_PTR(-EINVAL);
 341        }
 342
 343        return sym;
 344}
 345
 346static const struct nfp_rtsym *
 347nfp_abm_ctrl_find_q_rtsym(struct nfp_abm *abm, const char *name_fmt,
 348                          size_t size)
 349{
 350        char pf_symbol[64];
 351
 352        size = array3_size(size, abm->num_bands, NFP_NET_MAX_RX_RINGS);
 353        snprintf(pf_symbol, sizeof(pf_symbol), name_fmt,
 354                 abm->pf_id, nfp_abm_has_prio(abm) ? "_per_band" : "");
 355
 356        return nfp_abm_ctrl_find_rtsym(abm->app->pf, pf_symbol, size);
 357}
 358
 359int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm)
 360{
 361        struct nfp_pf *pf = abm->app->pf;
 362        const struct nfp_rtsym *sym;
 363        int res;
 364
 365        abm->pf_id = nfp_cppcore_pcie_unit(pf->cpp);
 366
 367        /* Check if Qdisc offloads are supported */
 368        res = nfp_pf_rtsym_read_optional(pf, NFP_RED_SUPPORT_SYM_NAME, 1);
 369        if (res < 0)
 370                return res;
 371        abm->red_support = res;
 372
 373        /* Read count of prios and prio bands */
 374        res = nfp_pf_rtsym_read_optional(pf, NFP_NUM_BANDS_SYM_NAME, 1);
 375        if (res < 0)
 376                return res;
 377        abm->num_bands = res;
 378
 379        res = nfp_pf_rtsym_read_optional(pf, NFP_NUM_PRIOS_SYM_NAME, 1);
 380        if (res < 0)
 381                return res;
 382        abm->num_prios = res;
 383
 384        /* Read available actions */
 385        res = nfp_pf_rtsym_read_optional(pf, NFP_ACT_MASK_SYM_NAME,
 386                                         BIT(NFP_ABM_ACT_MARK_DROP));
 387        if (res < 0)
 388                return res;
 389        abm->action_mask = res;
 390
 391        abm->prio_map_len = nfp_abm_ctrl_prio_map_size(abm);
 392        abm->dscp_mask = GENMASK(7, 8 - order_base_2(abm->num_prios));
 393
 394        /* Check values are sane, U16_MAX is arbitrarily chosen as max */
 395        if (!is_power_of_2(abm->num_bands) || !is_power_of_2(abm->num_prios) ||
 396            abm->num_bands > U16_MAX || abm->num_prios > U16_MAX ||
 397            (abm->num_bands == 1) != (abm->num_prios == 1)) {
 398                nfp_err(pf->cpp,
 399                        "invalid priomap description num bands: %u and num prios: %u\n",
 400                        abm->num_bands, abm->num_prios);
 401                return -EINVAL;
 402        }
 403
 404        /* Find level and stat symbols */
 405        if (!abm->red_support)
 406                return 0;
 407
 408        sym = nfp_abm_ctrl_find_q_rtsym(abm, NFP_QLVL_SYM_NAME,
 409                                        NFP_QLVL_STRIDE);
 410        if (IS_ERR(sym))
 411                return PTR_ERR(sym);
 412        abm->q_lvls = sym;
 413
 414        sym = nfp_abm_ctrl_find_q_rtsym(abm, NFP_QMSTAT_SYM_NAME,
 415                                        NFP_QMSTAT_STRIDE);
 416        if (IS_ERR(sym))
 417                return PTR_ERR(sym);
 418        abm->qm_stats = sym;
 419
 420        if (nfp_abm_has_prio(abm)) {
 421                sym = nfp_abm_ctrl_find_q_rtsym(abm, NFP_Q_STAT_SYM_NAME,
 422                                                NFP_Q_STAT_STRIDE);
 423                if (IS_ERR(sym))
 424                        return PTR_ERR(sym);
 425                abm->q_stats = sym;
 426        }
 427
 428        return 0;
 429}
 430