dpdk/drivers/common/octeontx2/otx2_sec_idev.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(C) 2020 Marvell International Ltd.
   3 */
   4
   5#include <rte_atomic.h>
   6#include <rte_bus_pci.h>
   7#include <ethdev_driver.h>
   8#include <rte_spinlock.h>
   9
  10#include "otx2_common.h"
  11#include "otx2_sec_idev.h"
  12
  13static struct otx2_sec_idev_cfg sec_cfg[OTX2_MAX_INLINE_PORTS];
  14
  15/**
  16 * @internal
  17 * Check if rte_eth_dev is security offload capable otx2_eth_dev
  18 */
  19uint8_t
  20otx2_eth_dev_is_sec_capable(struct rte_eth_dev *eth_dev)
  21{
  22        struct rte_pci_device *pci_dev;
  23
  24        pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
  25
  26        if (pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_PF ||
  27            pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_VF ||
  28            pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_AF_VF)
  29                return 1;
  30
  31        return 0;
  32}
  33
  34int
  35otx2_sec_idev_cfg_init(int port_id)
  36{
  37        struct otx2_sec_idev_cfg *cfg;
  38        int i;
  39
  40        cfg = &sec_cfg[port_id];
  41        cfg->tx_cpt_idx = 0;
  42        rte_spinlock_init(&cfg->tx_cpt_lock);
  43
  44        for (i = 0; i < OTX2_MAX_CPT_QP_PER_PORT; i++) {
  45                cfg->tx_cpt[i].qp = NULL;
  46                rte_atomic16_set(&cfg->tx_cpt[i].ref_cnt, 0);
  47        }
  48
  49        return 0;
  50}
  51
  52int
  53otx2_sec_idev_tx_cpt_qp_add(uint16_t port_id, struct otx2_cpt_qp *qp)
  54{
  55        struct otx2_sec_idev_cfg *cfg;
  56        int i, ret;
  57
  58        if (qp == NULL || port_id >= OTX2_MAX_INLINE_PORTS)
  59                return -EINVAL;
  60
  61        cfg = &sec_cfg[port_id];
  62
  63        /* Find a free slot to save CPT LF */
  64
  65        rte_spinlock_lock(&cfg->tx_cpt_lock);
  66
  67        for (i = 0; i < OTX2_MAX_CPT_QP_PER_PORT; i++) {
  68                if (cfg->tx_cpt[i].qp == NULL) {
  69                        cfg->tx_cpt[i].qp = qp;
  70                        ret = 0;
  71                        goto unlock;
  72                }
  73        }
  74
  75        ret = -EINVAL;
  76
  77unlock:
  78        rte_spinlock_unlock(&cfg->tx_cpt_lock);
  79        return ret;
  80}
  81
  82int
  83otx2_sec_idev_tx_cpt_qp_remove(struct otx2_cpt_qp *qp)
  84{
  85        struct otx2_sec_idev_cfg *cfg;
  86        uint16_t port_id;
  87        int i, ret;
  88
  89        if (qp == NULL)
  90                return -EINVAL;
  91
  92        for (port_id = 0; port_id < OTX2_MAX_INLINE_PORTS; port_id++) {
  93                cfg = &sec_cfg[port_id];
  94
  95                rte_spinlock_lock(&cfg->tx_cpt_lock);
  96
  97                for (i = 0; i < OTX2_MAX_CPT_QP_PER_PORT; i++) {
  98                        if (cfg->tx_cpt[i].qp != qp)
  99                                continue;
 100
 101                        /* Don't free if the QP is in use by any sec session */
 102                        if (rte_atomic16_read(&cfg->tx_cpt[i].ref_cnt)) {
 103                                ret = -EBUSY;
 104                        } else {
 105                                cfg->tx_cpt[i].qp = NULL;
 106                                ret = 0;
 107                        }
 108
 109                        goto unlock;
 110                }
 111
 112                rte_spinlock_unlock(&cfg->tx_cpt_lock);
 113        }
 114
 115        return -ENOENT;
 116
 117unlock:
 118        rte_spinlock_unlock(&cfg->tx_cpt_lock);
 119        return ret;
 120}
 121
 122int
 123otx2_sec_idev_tx_cpt_qp_get(uint16_t port_id, struct otx2_cpt_qp **qp)
 124{
 125        struct otx2_sec_idev_cfg *cfg;
 126        uint16_t index;
 127        int i, ret;
 128
 129        if (port_id >= OTX2_MAX_INLINE_PORTS || qp == NULL)
 130                return -EINVAL;
 131
 132        cfg = &sec_cfg[port_id];
 133
 134        rte_spinlock_lock(&cfg->tx_cpt_lock);
 135
 136        index = cfg->tx_cpt_idx;
 137
 138        /* Get the next index with valid data */
 139        for (i = 0; i < OTX2_MAX_CPT_QP_PER_PORT; i++) {
 140                if (cfg->tx_cpt[index].qp != NULL)
 141                        break;
 142                index = (index + 1) % OTX2_MAX_CPT_QP_PER_PORT;
 143        }
 144
 145        if (i >= OTX2_MAX_CPT_QP_PER_PORT) {
 146                ret = -EINVAL;
 147                goto unlock;
 148        }
 149
 150        *qp = cfg->tx_cpt[index].qp;
 151        rte_atomic16_inc(&cfg->tx_cpt[index].ref_cnt);
 152
 153        cfg->tx_cpt_idx = (index + 1) % OTX2_MAX_CPT_QP_PER_PORT;
 154
 155        ret = 0;
 156
 157unlock:
 158        rte_spinlock_unlock(&cfg->tx_cpt_lock);
 159        return ret;
 160}
 161
 162int
 163otx2_sec_idev_tx_cpt_qp_put(struct otx2_cpt_qp *qp)
 164{
 165        struct otx2_sec_idev_cfg *cfg;
 166        uint16_t port_id;
 167        int i;
 168
 169        if (qp == NULL)
 170                return -EINVAL;
 171
 172        for (port_id = 0; port_id < OTX2_MAX_INLINE_PORTS; port_id++) {
 173                cfg = &sec_cfg[port_id];
 174                for (i = 0; i < OTX2_MAX_CPT_QP_PER_PORT; i++) {
 175                        if (cfg->tx_cpt[i].qp == qp) {
 176                                rte_atomic16_dec(&cfg->tx_cpt[i].ref_cnt);
 177                                return 0;
 178                        }
 179                }
 180        }
 181
 182        return -EINVAL;
 183}
 184