dpdk/drivers/net/dpaa2/dpaa2_ethdev.c
<<
>>
Prefs
   1/* * SPDX-License-Identifier: BSD-3-Clause
   2 *
   3 *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
   4 *   Copyright 2016-2020 NXP
   5 *
   6 */
   7
   8#include <time.h>
   9#include <net/if.h>
  10
  11#include <rte_mbuf.h>
  12#include <ethdev_driver.h>
  13#include <rte_malloc.h>
  14#include <rte_memcpy.h>
  15#include <rte_string_fns.h>
  16#include <rte_cycles.h>
  17#include <rte_kvargs.h>
  18#include <rte_dev.h>
  19#include <rte_fslmc.h>
  20#include <rte_flow_driver.h>
  21
  22#include "dpaa2_pmd_logs.h"
  23#include <fslmc_vfio.h>
  24#include <dpaa2_hw_pvt.h>
  25#include <dpaa2_hw_mempool.h>
  26#include <dpaa2_hw_dpio.h>
  27#include <mc/fsl_dpmng.h>
  28#include "dpaa2_ethdev.h"
  29#include "dpaa2_sparser.h"
  30#include <fsl_qbman_debug.h>
  31
  32#define DRIVER_LOOPBACK_MODE "drv_loopback"
  33#define DRIVER_NO_PREFETCH_MODE "drv_no_prefetch"
  34
  35/* Supported Rx offloads */
  36static uint64_t dev_rx_offloads_sup =
  37                DEV_RX_OFFLOAD_CHECKSUM |
  38                DEV_RX_OFFLOAD_SCTP_CKSUM |
  39                DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
  40                DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
  41                DEV_RX_OFFLOAD_VLAN_STRIP |
  42                DEV_RX_OFFLOAD_VLAN_FILTER |
  43                DEV_RX_OFFLOAD_JUMBO_FRAME |
  44                DEV_RX_OFFLOAD_TIMESTAMP;
  45
  46/* Rx offloads which cannot be disabled */
  47static uint64_t dev_rx_offloads_nodis =
  48                DEV_RX_OFFLOAD_RSS_HASH |
  49                DEV_RX_OFFLOAD_SCATTER;
  50
  51/* Supported Tx offloads */
  52static uint64_t dev_tx_offloads_sup =
  53                DEV_TX_OFFLOAD_VLAN_INSERT |
  54                DEV_TX_OFFLOAD_IPV4_CKSUM |
  55                DEV_TX_OFFLOAD_UDP_CKSUM |
  56                DEV_TX_OFFLOAD_TCP_CKSUM |
  57                DEV_TX_OFFLOAD_SCTP_CKSUM |
  58                DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
  59                DEV_TX_OFFLOAD_MT_LOCKFREE |
  60                DEV_TX_OFFLOAD_MBUF_FAST_FREE;
  61
  62/* Tx offloads which cannot be disabled */
  63static uint64_t dev_tx_offloads_nodis =
  64                DEV_TX_OFFLOAD_MULTI_SEGS;
  65
  66/* enable timestamp in mbuf */
  67bool dpaa2_enable_ts[RTE_MAX_ETHPORTS];
  68uint64_t dpaa2_timestamp_rx_dynflag;
  69int dpaa2_timestamp_dynfield_offset = -1;
  70
  71struct rte_dpaa2_xstats_name_off {
  72        char name[RTE_ETH_XSTATS_NAME_SIZE];
  73        uint8_t page_id; /* dpni statistics page id */
  74        uint8_t stats_id; /* stats id in the given page */
  75};
  76
  77static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = {
  78        {"ingress_multicast_frames", 0, 2},
  79        {"ingress_multicast_bytes", 0, 3},
  80        {"ingress_broadcast_frames", 0, 4},
  81        {"ingress_broadcast_bytes", 0, 5},
  82        {"egress_multicast_frames", 1, 2},
  83        {"egress_multicast_bytes", 1, 3},
  84        {"egress_broadcast_frames", 1, 4},
  85        {"egress_broadcast_bytes", 1, 5},
  86        {"ingress_filtered_frames", 2, 0},
  87        {"ingress_discarded_frames", 2, 1},
  88        {"ingress_nobuffer_discards", 2, 2},
  89        {"egress_discarded_frames", 2, 3},
  90        {"egress_confirmed_frames", 2, 4},
  91        {"cgr_reject_frames", 4, 0},
  92        {"cgr_reject_bytes", 4, 1},
  93};
  94
  95static const enum rte_filter_op dpaa2_supported_filter_ops[] = {
  96        RTE_ETH_FILTER_GET
  97};
  98
  99static struct rte_dpaa2_driver rte_dpaa2_pmd;
 100static int dpaa2_dev_link_update(struct rte_eth_dev *dev,
 101                                 int wait_to_complete);
 102static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev);
 103static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev);
 104static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
 105
 106static int
 107dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 108{
 109        int ret;
 110        struct dpaa2_dev_priv *priv = dev->data->dev_private;
 111        struct fsl_mc_io *dpni = dev->process_private;
 112
 113        PMD_INIT_FUNC_TRACE();
 114
 115        if (dpni == NULL) {
 116                DPAA2_PMD_ERR("dpni is NULL");
 117                return -1;
 118        }
 119
 120        if (on)
 121                ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, priv->token,
 122                                       vlan_id, 0, 0, 0);
 123        else
 124                ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW,
 125                                          priv->token, vlan_id);
 126
 127        if (ret < 0)
 128                DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d",
 129                              ret, vlan_id, priv->hw_id);
 130
 131        return ret;
 132}
 133
 134static int
 135dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 136{
 137        struct dpaa2_dev_priv *priv = dev->data->dev_private;
 138        struct fsl_mc_io *dpni = dev->process_private;
 139        int ret = 0;
 140
 141        PMD_INIT_FUNC_TRACE();
 142
 143        if (mask & ETH_VLAN_FILTER_MASK) {
 144                /* VLAN Filter not avaialble */
 145                if (!priv->max_vlan_filters) {
 146                        DPAA2_PMD_INFO("VLAN filter not available");
 147                        return -ENOTSUP;
 148                }
 149
 150                if (dev->data->dev_conf.rxmode.offloads &
 151                        DEV_RX_OFFLOAD_VLAN_FILTER)
 152                        ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
 153                                                      priv->token, true);
 154                else
 155                        ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
 156                                                      priv->token, false);
 157                if (ret < 0)
 158                        DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret);
 159        }
 160
 161        return ret;
 162}
 163
 164static int
 165dpaa2_vlan_tpid_set(struct rte_eth_dev *dev,
 166                      enum rte_vlan_type vlan_type __rte_unused,
 167                      uint16_t tpid)
 168{
 169        struct dpaa2_dev_priv *priv = dev->data->dev_private;
 170        struct fsl_mc_io *dpni = dev->process_private;
 171        int ret = -ENOTSUP;
 172
 173        PMD_INIT_FUNC_TRACE();
 174
 175        /* nothing to be done for standard vlan tpids */
 176        if (tpid == 0x8100 || tpid == 0x88A8)
 177                return 0;
 178
 179        ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW,
 180                                   priv->token, tpid);
 181        if (ret < 0)
 182                DPAA2_PMD_INFO("Unable to set vlan tpid = %d", ret);
 183        /* if already configured tpids, remove them first */
 184        if (ret == -EBUSY) {
 185                struct dpni_custom_tpid_cfg tpid_list = {0};
 186
 187                ret = dpni_get_custom_tpid(dpni, CMD_PRI_LOW,
 188                                   priv->token, &tpid_list);
 189                if (ret < 0)
 190                        goto fail;
 191                ret = dpni_remove_custom_tpid(dpni, CMD_PRI_LOW,
 192                                   priv->token, tpid_list.tpid1);
 193                if (ret < 0)
 194                        goto fail;
 195                ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW,
 196                                           priv->token, tpid);
 197        }
 198fail:
 199        return ret;
 200}
 201
 202static int
 203dpaa2_fw_version_get(struct rte_eth_dev *dev,
 204                     char *fw_version,
 205                     size_t fw_size)
 206{
 207        int ret;
 208        struct fsl_mc_io *dpni = dev->process_private;
 209        struct mc_soc_version mc_plat_info = {0};
 210        struct mc_version mc_ver_info = {0};
 211
 212        PMD_INIT_FUNC_TRACE();
 213
 214        if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info))
 215                DPAA2_PMD_WARN("\tmc_get_soc_version failed");
 216
 217        if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info))
 218                DPAA2_PMD_WARN("\tmc_get_version failed");
 219
 220        ret = snprintf(fw_version, fw_size,
 221                       "%x-%d.%d.%d",
 222                       mc_plat_info.svr,
 223                       mc_ver_info.major,
 224                       mc_ver_info.minor,
 225                       mc_ver_info.revision);
 226
 227        ret += 1; /* add the size of '\0' */
 228        if (fw_size < (uint32_t)ret)
 229                return ret;
 230        else
 231                return 0;
 232}
 233
 234static int
 235dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 236{
 237        struct dpaa2_dev_priv *priv = dev->data->dev_private;
 238
 239        PMD_INIT_FUNC_TRACE();
 240
 241        dev_info->max_mac_addrs = priv->max_mac_filters;
 242        dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN;
 243        dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
 244        dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
 245        dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
 246        dev_info->rx_offload_capa = dev_rx_offloads_sup |
 247                                        dev_rx_offloads_nodis;
 248        dev_info->tx_offload_capa = dev_tx_offloads_sup |
 249                                        dev_tx_offloads_nodis;
 250        dev_info->speed_capa = ETH_LINK_SPEED_1G |
 251                        ETH_LINK_SPEED_2_5G |
 252                        ETH_LINK_SPEED_10G;
 253
 254        dev_info->max_hash_mac_addrs = 0;
 255        dev_info->max_vfs = 0;
 256        dev_info->max_vmdq_pools = ETH_16_POOLS;
 257        dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
 258
 259        dev_info->default_rxportconf.burst_size = dpaa2_dqrr_size;
 260        /* same is rx size for best perf */
 261        dev_info->default_txportconf.burst_size = dpaa2_dqrr_size;
 262
 263        dev_info->default_rxportconf.nb_queues = 1;
 264        dev_info->default_txportconf.nb_queues = 1;
 265        dev_info->default_txportconf.ring_size = CONG_ENTER_TX_THRESHOLD;
 266        dev_info->default_rxportconf.ring_size = DPAA2_RX_DEFAULT_NBDESC;
 267
 268        if (dpaa2_svr_family == SVR_LX2160A) {
 269                dev_info->speed_capa |= ETH_LINK_SPEED_25G |
 270                                ETH_LINK_SPEED_40G |
 271                                ETH_LINK_SPEED_50G |
 272                                ETH_LINK_SPEED_100G;
 273        }
 274
 275        return 0;
 276}
 277
 278static int
 279dpaa2_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
 280                        __rte_unused uint16_t queue_id,
 281                        struct rte_eth_burst_mode *mode)
 282{
 283        struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
 284        int ret = -EINVAL;
 285        unsigned int i;
 286        const struct burst_info {
 287                uint64_t flags;
 288                const char *output;
 289        } rx_offload_map[] = {
 290                        {DEV_RX_OFFLOAD_CHECKSUM, " Checksum,"},
 291                        {DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
 292                        {DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
 293                        {DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
 294                        {DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
 295                        {DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
 296                        {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"},
 297                        {DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
 298                        {DEV_RX_OFFLOAD_RSS_HASH, " RSS,"},
 299                        {DEV_RX_OFFLOAD_SCATTER, " Scattered,"}
 300        };
 301
 302        /* Update Rx offload info */
 303        for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
 304                if (eth_conf->rxmode.offloads & rx_offload_map[i].flags) {
 305                        snprintf(mode->info, sizeof(mode->info), "%s",
 306                                rx_offload_map[i].output);
 307                        ret = 0;
 308                        break;
 309                }
 310        }
 311        return ret;
 312}
 313
 314static int
 315dpaa2_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
 316                        __rte_unused uint16_t queue_id,
 317                        struct rte_eth_burst_mode *mode)
 318{
 319        struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
 320        int ret = -EINVAL;
 321        unsigned int i;
 322        const struct burst_info {
 323                uint64_t flags;
 324                const char *output;
 325        } tx_offload_map[] = {
 326                        {DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
 327                        {DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
 328                        {DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
 329                        {DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
 330                        {DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
 331                        {DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
 332                        {DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
 333                        {DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
 334                        {DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
 335        };
 336
 337        /* Update Tx offload info */
 338        for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
 339                if (eth_conf->txmode.offloads & tx_offload_map[i].flags) {
 340                        snprintf(mode->info, sizeof(mode->info), "%s",
 341                                tx_offload_map[i].output);
 342                        ret = 0;
 343                        break;
 344                }
 345        }
 346        return ret;
 347}
 348
 349static int
 350dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 351{
 352        struct dpaa2_dev_priv *priv = dev->data->dev_private;
 353        uint16_t dist_idx;
 354        uint32_t vq_id;
 355        uint8_t num_rxqueue_per_tc;
 356        struct dpaa2_queue *mc_q, *mcq;
 357        uint32_t tot_queues;
 358        int i;
 359        struct dpaa2_queue *dpaa2_q;
 360
 361        PMD_INIT_FUNC_TRACE();
 362
 363        num_rxqueue_per_tc = (priv->nb_rx_queues / priv->num_rx_tc);
 364        if (priv->tx_conf_en)
 365                tot_queues = priv->nb_rx_queues + 2 * priv->nb_tx_queues;
 366        else
 367                tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
 368        mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
 369                          RTE_CACHE_LINE_SIZE);
 370        if (!mc_q) {
 371                DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues");
 372                return -1;
 373        }
 374
 375        for (i = 0; i < priv->nb_rx_queues; i++) {
 376                mc_q->eth_data = dev->data;
 377                priv->rx_vq[i] = mc_q++;
 378                dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
 379                dpaa2_q->q_storage = rte_malloc("dq_storage",
 380                                        sizeof(struct queue_storage_info_t),
 381                                        RTE_CACHE_LINE_SIZE);
 382                if (!dpaa2_q->q_storage)
 383                        goto fail;
 384
 385                memset(dpaa2_q->q_storage, 0,
 386                       sizeof(struct queue_storage_info_t));
 387                if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
 388                        goto fail;
 389        }
 390
 391        for (i = 0; i < priv->nb_tx_queues; i++) {
 392                mc_q->eth_data = dev->data;
 393                mc_q->flow_id = 0xffff;
 394                priv->tx_vq[i] = mc_q++;
 395                dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
 396                dpaa2_q->cscn = rte_malloc(NULL,
 397                                           sizeof(struct qbman_result), 16);
 398                if (!dpaa2_q->cscn)
 399                        goto fail_tx;
 400        }
 401
 402        if (priv->tx_conf_en) {
 403                /*Setup tx confirmation queues*/
 404                for (i = 0; i < priv->nb_tx_queues; i++) {
 405                        mc_q->eth_data = dev->data;
 406                        mc_q->tc_index = i;
 407                        mc_q->flow_id = 0;
 408                        priv->tx_conf_vq[i] = mc_q++;
 409                        dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
 410                        dpaa2_q->q_storage =
 411                                rte_malloc("dq_storage",
 412                                        sizeof(struct queue_storage_info_t),
 413                                        RTE_CACHE_LINE_SIZE);
 414                        if (!dpaa2_q->q_storage)
 415                                goto fail_tx_conf;
 416
 417                        memset(dpaa2_q->q_storage, 0,
 418                               sizeof(struct queue_storage_info_t));
 419                        if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
 420                                goto fail_tx_conf;
 421                }
 422        }
 423
 424        vq_id = 0;
 425        for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
 426                mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
 427                mcq->tc_index = dist_idx / num_rxqueue_per_tc;
 428                mcq->flow_id = dist_idx % num_rxqueue_per_tc;
 429                vq_id++;
 430        }
 431
 432        return 0;
 433fail_tx_conf:
 434        i -= 1;
 435        while (i >= 0) {
 436                dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
 437                rte_free(dpaa2_q->q_storage);
 438                priv->tx_conf_vq[i--] = NULL;
 439        }
 440        i = priv->nb_tx_queues;
 441fail_tx:
 442        i -= 1;
 443        while (i >= 0) {
 444                dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
 445                rte_free(dpaa2_q->cscn);
 446                priv->tx_vq[i--] = NULL;
 447        }
 448        i = priv->nb_rx_queues;
 449fail:
 450        i -= 1;
 451        mc_q = priv->rx_vq[0];
 452        while (i >= 0) {
 453                dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
 454                dpaa2_free_dq_storage(dpaa2_q->q_storage);
 455                rte_free(dpaa2_q->q_storage);
 456                priv->rx_vq[i--] = NULL;
 457        }
 458        rte_free(mc_q);
 459        return -1;
 460}
 461
 462static void
 463dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev)
 464{
 465        struct dpaa2_dev_priv *priv = dev->data->dev_private;
 466        struct dpaa2_queue *dpaa2_q;
 467        int i;
 468
 469        PMD_INIT_FUNC_TRACE();
 470
 471        /* Queue allocation base */
 472        if (priv->rx_vq[0]) {
 473                /* cleaning up queue storage */
 474                for (i = 0; i < priv->nb_rx_queues; i++) {
 475                        dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
 476                        if (dpaa2_q->q_storage)
 477                                rte_free(dpaa2_q->q_storage);
 478                }
 479                /* cleanup tx queue cscn */
 480                for (i = 0; i < priv->nb_tx_queues; i++) {
 481                        dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
 482                        rte_free(dpaa2_q->cscn);
 483                }
 484                if (priv->tx_conf_en) {
 485                        /* cleanup tx conf queue storage */
 486                        for (i = 0; i < priv->nb_tx_queues; i++) {
 487                                dpaa2_q = (struct dpaa2_queue *)
 488                                                priv->tx_conf_vq[i];
 489                                rte_free(dpaa2_q->q_storage);
 490                        }
 491                }
 492                /*free memory for all queues (RX+TX) */
 493                rte_free(priv->rx_vq[0]);
 494                priv->rx_vq[0] = NULL;
 495        }
 496}
 497
 498static int
 499dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 500{
 501        struct dpaa2_dev_priv *priv = dev->data->dev_private;
 502        struct fsl_mc_io *dpni = dev->process_private;
 503        struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
 504        uint64_t rx_offloads = eth_conf->rxmode.offloads;
 505        uint64_t tx_offloads = eth_conf->txmode.offloads;
 506        int rx_l3_csum_offload = false;
 507        int rx_l4_csum_offload = false;
 508        int tx_l3_csum_offload = false;
 509        int tx_l4_csum_offload = false;
 510        int ret, tc_index;
 511
 512        PMD_INIT_FUNC_TRACE();
 513
 514        /* Rx offloads which are enabled by default */
 515        if (dev_rx_offloads_nodis & ~rx_offloads) {
 516                DPAA2_PMD_INFO(
 517                "Some of rx offloads enabled by default - requested 0x%" PRIx64
 518                " fixed are 0x%" PRIx64,
 519                rx_offloads, dev_rx_offloads_nodis);
 520        }
 521
 522        /* Tx offloads which are enabled by default */
 523        if (dev_tx_offloads_nodis & ~tx_offloads) {
 524                DPAA2_PMD_INFO(
 525                "Some of tx offloads enabled by default - requested 0x%" PRIx64
 526                " fixed are 0x%" PRIx64,
 527                tx_offloads, dev_tx_offloads_nodis);
 528        }
 529
 530        if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
 531                if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
 532                        ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
 533                                priv->token, eth_conf->rxmode.max_rx_pkt_len
 534                                - RTE_ETHER_CRC_LEN);
 535                        if (ret) {
 536                                DPAA2_PMD_ERR(
 537                                        "Unable to set mtu. check config");
 538                                return ret;
 539                        }
 540                        dev->data->mtu =
 541                                dev->data->dev_conf.rxmode.max_rx_pkt_len -
 542                                RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN -
 543                                VLAN_TAG_SIZE;
 544                } else {
 545                        return -1;
 546                }
 547        }
 548
 549        if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
 550                for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
 551                        ret = dpaa2_setup_flow_dist(dev,
 552                                        eth_conf->rx_adv_conf.rss_conf.rss_hf,
 553                                        tc_index);
 554                        if (ret) {
 555                                DPAA2_PMD_ERR(
 556                                        "Unable to set flow distribution on tc%d."
 557                                        "Check queue config", tc_index);
 558                                return ret;
 559                        }
 560                }
 561        }
 562
 563        if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
 564                rx_l3_csum_offload = true;
 565
 566        if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) ||
 567                (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) ||
 568                (rx_offloads & DEV_RX_OFFLOAD_SCTP_CKSUM))
 569                rx_l4_csum_offload = true;
 570
 571        ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
 572                               DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload);
 573        if (ret) {
 574                DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret);
 575                return ret;
 576        }
 577
 578        ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
 579                               DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload);
 580        if (ret) {
 581                DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret);
 582                return ret;
 583        }
 584
 585#if !defined(RTE_LIBRTE_IEEE1588)
 586        if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
 587#endif
 588        {
 589                ret = rte_mbuf_dyn_rx_timestamp_register(
 590                                &dpaa2_timestamp_dynfield_offset,
 591                                &dpaa2_timestamp_rx_dynflag);
 592                if (ret != 0) {
 593                        DPAA2_PMD_ERR("Error to register timestamp field/flag");
 594                        return -rte_errno;
 595                }
 596                dpaa2_enable_ts[dev->data->port_id] = true;
 597        }
 598
 599        if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
 600                tx_l3_csum_offload = true;
 601
 602        if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ||
 603                (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
 604                (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
 605                tx_l4_csum_offload = true;
 606
 607        ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
 608                               DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload);
 609        if (ret) {
 610                DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret);
 611                return ret;
 612        }
 613
 614        ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
 615                               DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload);
 616        if (ret) {
 617                DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret);
 618                return ret;
 619        }
 620
 621        /* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in
 622         * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC]
 623         * to 0 for LS2 in the hardware thus disabling data/annotation
 624         * stashing. For LX2 this is fixed in hardware and thus hash result and
 625         * parse results can be received in FD using this option.
 626         */
 627        if (dpaa2_svr_family == SVR_LX2160A) {
 628                ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
 629                                       DPNI_FLCTYPE_HASH, true);
 630                if (ret) {
 631                        DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret);
 632                        return ret;
 633                }
 634        }
 635
 636        if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
 637                dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
 638
 639        return 0;
 640}
 641
 642/* Function to setup RX flow information. It contains traffic class ID,
 643 * flow ID, destination configuration etc.
 644 */
 645static int
 646dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
 647                         uint16_t rx_queue_id,
 648                         uint16_t nb_rx_desc,
 649                         unsigned int socket_id __rte_unused,
 650                         const struct rte_eth_rxconf *rx_conf,
 651                         struct rte_mempool *mb_pool)
 652{
 653        struct dpaa2_dev_priv *priv = dev->data->dev_private;
 654        struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
 655        struct dpaa2_queue *dpaa2_q;
 656        struct dpni_queue cfg;
 657        uint8_t options = 0;
 658        uint8_t flow_id;
 659        uint32_t bpid;
 660        int i, ret;
 661
 662        PMD_INIT_FUNC_TRACE();
 663
 664        DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p",
 665                        dev, rx_queue_id, mb_pool, rx_conf);
 666
 667        /* Rx deferred start is not supported */
 668        if (rx_conf->rx_deferred_start) {
 669                DPAA2_PMD_ERR("%p:Rx deferred start not supported",
 670                                (void *)dev);
 671                return -EINVAL;
 672        }
 673
 674        if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
 675                bpid = mempool_to_bpid(mb_pool);
 676                ret = dpaa2_attach_bp_list(priv,
 677                                           rte_dpaa2_bpid_info[bpid].bp_list);
 678                if (ret)
 679                        return ret;
 680        }
 681        dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
 682        dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
 683        dpaa2_q->bp_array = rte_dpaa2_bpid_info;
 684        dpaa2_q->nb_desc = UINT16_MAX;
 685        dpaa2_q->offloads = rx_conf->offloads;
 686
 687        /*Get the flow id from given VQ id*/
 688        flow_id = dpaa2_q->flow_id;
 689        memset(&cfg, 0, sizeof(struct dpni_queue));
 690
 691        options = options | DPNI_QUEUE_OPT_USER_CTX;
 692        cfg.user_context = (size_t)(dpaa2_q);
 693
 694        /* check if a private cgr available. */
 695        for (i = 0; i < priv->max_cgs; i++) {
 696                if (!priv->cgid_in_use[i]) {
 697                        priv->cgid_in_use[i] = 1;
 698                        break;
 699                }
 700        }
 701
 702        if (i < priv->max_cgs) {
 703                options |= DPNI_QUEUE_OPT_SET_CGID;
 704                cfg.cgid = i;
 705                dpaa2_q->cgid = cfg.cgid;
 706        } else {
 707                dpaa2_q->cgid = 0xff;
 708        }
 709
 710        /*if ls2088 or rev2 device, enable the stashing */
 711
 712        if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) {
 713                options |= DPNI_QUEUE_OPT_FLC;
 714                cfg.flc.stash_control = true;
 715                cfg.flc.value &= 0xFFFFFFFFFFFFFFC0;
 716                /* 00 00 00 - last 6 bit represent annotation, context stashing,
 717                 * data stashing setting 01 01 00 (0x14)
 718                 * (in following order ->DS AS CS)
 719                 * to enable 1 line data, 1 line annotation.
 720                 * For LX2, this setting should be 01 00 00 (0x10)
 721                 */
 722                if ((dpaa2_svr_family & 0xffff0000) == SVR_LX2160A)
 723                        cfg.flc.value |= 0x10;
 724                else
 725                        cfg.flc.value |= 0x14;
 726        }
 727        ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
 728                             dpaa2_q->tc_index, flow_id, options, &cfg);
 729        if (ret) {
 730                DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret);
 731                return -1;
 732        }
 733
 734        if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) {
 735                struct dpni_taildrop taildrop;
 736
 737                taildrop.enable = 1;
 738                dpaa2_q->nb_desc = nb_rx_desc;
 739                /* Private CGR will use tail drop length as nb_rx_desc.
 740                 * for rest cases we can use standard byte based tail drop.
 741                 * There is no HW restriction, but number of CGRs are limited,
 742                 * hence this restriction is placed.
 743                 */
 744                if (dpaa2_q->cgid != 0xff) {
 745                        /*enabling per rx queue congestion control */
 746                        taildrop.threshold = nb_rx_desc;
 747                        taildrop.units = DPNI_CONGESTION_UNIT_FRAMES;
 748                        taildrop.oal = 0;
 749                        DPAA2_PMD_DEBUG("Enabling CG Tail Drop on queue = %d",
 750                                        rx_queue_id);
 751                        ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
 752                                                DPNI_CP_CONGESTION_GROUP,
 753                                                DPNI_QUEUE_RX,
 754                                                dpaa2_q->tc_index,
 755                                                dpaa2_q->cgid, &taildrop);
 756                } else {
 757                        /*enabling per rx queue congestion control */
 758                        taildrop.threshold = CONG_THRESHOLD_RX_BYTES_Q;
 759                        taildrop.units = DPNI_CONGESTION_UNIT_BYTES;
 760                        taildrop.oal = CONG_RX_OAL;
 761                        DPAA2_PMD_DEBUG("Enabling Byte based Drop on queue= %d",
 762                                        rx_queue_id);
 763                        ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
 764                                                DPNI_CP_QUEUE, DPNI_QUEUE_RX,
 765                                                dpaa2_q->tc_index, flow_id,
 766                                                &taildrop);
 767                }
 768                if (ret) {
 769                        DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
 770                                      ret);
 771                        return -1;
 772                }
 773        } else { /* Disable tail Drop */
 774                struct dpni_taildrop taildrop = {0};
 775                DPAA2_PMD_INFO("Tail drop is disabled on queue");
 776
 777                taildrop.enable = 0;
 778                if (dpaa2_q->cgid != 0xff) {
 779                        ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
 780                                        DPNI_CP_CONGESTION_GROUP, DPNI_QUEUE_RX,
 781                                        dpaa2_q->tc_index,
 782                                        dpaa2_q->cgid, &taildrop);
 783                } else {
 784                        ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
 785                                        DPNI_CP_QUEUE, DPNI_QUEUE_RX,
 786                                        dpaa2_q->tc_index, flow_id, &taildrop);
 787                }
 788                if (ret) {
 789                        DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
 790                                      ret);
 791                        return -1;
 792                }
 793        }
 794
 795        dev->data->rx_queues[rx_queue_id] = dpaa2_q;
 796        return 0;
 797}
 798
 799static int
 800dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
 801                         uint16_t tx_queue_id,
 802                         uint16_t nb_tx_desc,
 803                         unsigned int socket_id __rte_unused,
 804                         const struct rte_eth_txconf *tx_conf)
 805{
 806        struct dpaa2_dev_priv *priv = dev->data->dev_private;
 807        struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
 808                priv->tx_vq[tx_queue_id];
 809        struct dpaa2_queue *dpaa2_tx_conf_q = (struct dpaa2_queue *)
 810                priv->tx_conf_vq[tx_queue_id];
 811        struct fsl_mc_io *dpni = dev->process_private;
 812        struct dpni_queue tx_conf_cfg;
 813        struct dpni_queue tx_flow_cfg;
 814        uint8_t options = 0, flow_id;
 815        struct dpni_queue_id qid;
 816        uint32_t tc_id;
 817        int ret;
 818
 819        PMD_INIT_FUNC_TRACE();
 820
 821        /* Tx deferred start is not supported */
 822        if (tx_conf->tx_deferred_start) {
 823                DPAA2_PMD_ERR("%p:Tx deferred start not supported",
 824                                (void *)dev);
 825                return -EINVAL;
 826        }
 827
 828        dpaa2_q->nb_desc = UINT16_MAX;
 829        dpaa2_q->offloads = tx_conf->offloads;
 830
 831        /* Return if queue already configured */
 832        if (dpaa2_q->flow_id != 0xffff) {
 833                dev->data->tx_queues[tx_queue_id] = dpaa2_q;
 834                return 0;
 835        }
 836
 837        memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
 838        memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
 839
 840        tc_id = tx_queue_id;
 841        flow_id = 0;
 842
 843        ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
 844                        tc_id, flow_id, options, &tx_flow_cfg);
 845        if (ret) {
 846                DPAA2_PMD_ERR("Error in setting the tx flow: "
 847                        "tc_id=%d, flow=%d err=%d",
 848                        tc_id, flow_id, ret);
 849                        return -1;
 850        }
 851
 852        dpaa2_q->flow_id = flow_id;
 853
 854        if (tx_queue_id == 0) {
 855                /*Set tx-conf and error configuration*/
 856                if (priv->tx_conf_en)
 857                        ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
 858                                                            priv->token,
 859                                                            DPNI_CONF_AFFINE);
 860                else
 861                        ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
 862                                                            priv->token,
 863                                                            DPNI_CONF_DISABLE);
 864                if (ret) {
 865                        DPAA2_PMD_ERR("Error in set tx conf mode settings: "
 866                                      "err=%d", ret);
 867                        return -1;
 868                }
 869        }
 870        dpaa2_q->tc_index = tc_id;
 871
 872        ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
 873                             DPNI_QUEUE_TX, dpaa2_q->tc_index,
 874                             dpaa2_q->flow_id, &tx_flow_cfg, &qid);
 875        if (ret) {
 876                DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
 877                return -1;
 878        }
 879        dpaa2_q->fqid = qid.fqid;
 880
 881        if (!(priv->flags & DPAA2_TX_CGR_OFF)) {
 882                struct dpni_congestion_notification_cfg cong_notif_cfg = {0};
 883
 884                dpaa2_q->nb_desc = nb_tx_desc;
 885
 886                cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
 887                cong_notif_cfg.threshold_entry = nb_tx_desc;
 888                /* Notify that the queue is not congested when the data in
 889                 * the queue is below this thershold.
 890                 */
 891                cong_notif_cfg.threshold_exit = nb_tx_desc - 24;
 892                cong_notif_cfg.message_ctx = 0;
 893                cong_notif_cfg.message_iova =
 894                                (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn);
 895                cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
 896                cong_notif_cfg.notification_mode =
 897                                         DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
 898                                         DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
 899                                         DPNI_CONG_OPT_COHERENT_WRITE;
 900                cong_notif_cfg.cg_point = DPNI_CP_QUEUE;
 901
 902                ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
 903                                                       priv->token,
 904                                                       DPNI_QUEUE_TX,
 905                                                       tc_id,
 906                                                       &cong_notif_cfg);
 907                if (ret) {
 908                        DPAA2_PMD_ERR(
 909                           "Error in setting tx congestion notification: "
 910                           "err=%d", ret);
 911                        return -ret;
 912                }
 913        }
 914        dpaa2_q->cb_eqresp_free = dpaa2_dev_free_eqresp_buf;
 915        dev->data->tx_queues[tx_queue_id] = dpaa2_q;
 916
 917        if (priv->tx_conf_en) {
 918                dpaa2_q->tx_conf_queue = dpaa2_tx_conf_q;
 919                options = options | DPNI_QUEUE_OPT_USER_CTX;
 920                tx_conf_cfg.user_context = (size_t)(dpaa2_q);
 921                ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token,
 922                             DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index,
 923                             dpaa2_tx_conf_q->flow_id, options, &tx_conf_cfg);
 924                if (ret) {
 925                        DPAA2_PMD_ERR("Error in setting the tx conf flow: "
 926                              "tc_index=%d, flow=%d err=%d",
 927                              dpaa2_tx_conf_q->tc_index,
 928                              dpaa2_tx_conf_q->flow_id, ret);
 929                        return -1;
 930                }
 931
 932                ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
 933                             DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index,
 934                             dpaa2_tx_conf_q->flow_id, &tx_conf_cfg, &qid);
 935                if (ret) {
 936                        DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
 937                        return -1;
 938                }
 939                dpaa2_tx_conf_q->fqid = qid.fqid;
 940        }
 941        return 0;
 942}
 943
 944static void
 945dpaa2_dev_rx_queue_release(void *q __rte_unused)
 946{
 947        struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)q;
 948        struct dpaa2_dev_priv *priv = dpaa2_q->eth_data->dev_private;
 949        struct fsl_mc_io *dpni =
 950                (struct fsl_mc_io *)priv->eth_dev->process_private;
 951        uint8_t options = 0;
 952        int ret;
 953        struct dpni_queue cfg;
 954
 955        memset(&cfg, 0, sizeof(struct dpni_queue));
 956        PMD_INIT_FUNC_TRACE();
 957        if (dpaa2_q->cgid != 0xff) {
 958                options = DPNI_QUEUE_OPT_CLEAR_CGID;
 959                cfg.cgid = dpaa2_q->cgid;
 960
 961                ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token,
 962                                     DPNI_QUEUE_RX,
 963                                     dpaa2_q->tc_index, dpaa2_q->flow_id,
 964                                     options, &cfg);
 965                if (ret)
 966                        DPAA2_PMD_ERR("Unable to clear CGR from q=%u err=%d",
 967                                        dpaa2_q->fqid, ret);
 968                priv->cgid_in_use[dpaa2_q->cgid] = 0;
 969                dpaa2_q->cgid = 0xff;
 970        }
 971}
 972
 973static void
 974dpaa2_dev_tx_queue_release(void *q __rte_unused)
 975{
 976        PMD_INIT_FUNC_TRACE();
 977}
 978
 979static uint32_t
 980dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 981{
 982        int32_t ret;
 983        struct dpaa2_dev_priv *priv = dev->data->dev_private;
 984        struct dpaa2_queue *dpaa2_q;
 985        struct qbman_swp *swp;
 986        struct qbman_fq_query_np_rslt state;
 987        uint32_t frame_cnt = 0;
 988
 989        if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
 990                ret = dpaa2_affine_qbman_swp();
 991                if (ret) {
 992                        DPAA2_PMD_ERR(
 993                                "Failed to allocate IO portal, tid: %d\n",
 994                                rte_gettid());
 995                        return -EINVAL;
 996                }
 997        }
 998        swp = DPAA2_PER_LCORE_PORTAL;
 999
1000        dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
1001
1002        if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) {
1003                frame_cnt = qbman_fq_state_frame_count(&state);
1004                DPAA2_PMD_DP_DEBUG("RX frame count for q(%d) is %u",
1005                                rx_queue_id, frame_cnt);
1006        }
1007        return frame_cnt;
1008}
1009
1010static const uint32_t *
1011dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
1012{
1013        static const uint32_t ptypes[] = {
1014                /*todo -= add more types */
1015                RTE_PTYPE_L2_ETHER,
1016                RTE_PTYPE_L3_IPV4,
1017                RTE_PTYPE_L3_IPV4_EXT,
1018                RTE_PTYPE_L3_IPV6,
1019                RTE_PTYPE_L3_IPV6_EXT,
1020                RTE_PTYPE_L4_TCP,
1021                RTE_PTYPE_L4_UDP,
1022                RTE_PTYPE_L4_SCTP,
1023                RTE_PTYPE_L4_ICMP,
1024                RTE_PTYPE_UNKNOWN
1025        };
1026
1027        if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx ||
1028                dev->rx_pkt_burst == dpaa2_dev_rx ||
1029                dev->rx_pkt_burst == dpaa2_dev_loopback_rx)
1030                return ptypes;
1031        return NULL;
1032}
1033
1034/**
1035 * Dpaa2 link Interrupt handler
1036 *
1037 * @param param
1038 *  The address of parameter (struct rte_eth_dev *) regsitered before.
1039 *
1040 * @return
1041 *  void
1042 */
1043static void
1044dpaa2_interrupt_handler(void *param)
1045{
1046        struct rte_eth_dev *dev = param;
1047        struct dpaa2_dev_priv *priv = dev->data->dev_private;
1048        struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1049        int ret;
1050        int irq_index = DPNI_IRQ_INDEX;
1051        unsigned int status = 0, clear = 0;
1052
1053        PMD_INIT_FUNC_TRACE();
1054
1055        if (dpni == NULL) {
1056                DPAA2_PMD_ERR("dpni is NULL");
1057                return;
1058        }
1059
1060        ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token,
1061                                  irq_index, &status);
1062        if (unlikely(ret)) {
1063                DPAA2_PMD_ERR("Can't get irq status (err %d)", ret);
1064                clear = 0xffffffff;
1065                goto out;
1066        }
1067
1068        if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
1069                clear = DPNI_IRQ_EVENT_LINK_CHANGED;
1070                dpaa2_dev_link_update(dev, 0);
1071                /* calling all the apps registered for link status event */
1072                rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1073        }
1074out:
1075        ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token,
1076                                    irq_index, clear);
1077        if (unlikely(ret))
1078                DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret);
1079}
1080
1081static int
1082dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable)
1083{
1084        int err = 0;
1085        struct dpaa2_dev_priv *priv = dev->data->dev_private;
1086        struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1087        int irq_index = DPNI_IRQ_INDEX;
1088        unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED;
1089
1090        PMD_INIT_FUNC_TRACE();
1091
1092        err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token,
1093                                irq_index, mask);
1094        if (err < 0) {
1095                DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err,
1096                              strerror(-err));
1097                return err;
1098        }
1099
1100        err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token,
1101                                  irq_index, enable);
1102        if (err < 0)
1103                DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err,
1104                              strerror(-err));
1105
1106        return err;
1107}
1108
1109static int
1110dpaa2_dev_start(struct rte_eth_dev *dev)
1111{
1112        struct rte_device *rdev = dev->device;
1113        struct rte_dpaa2_device *dpaa2_dev;
1114        struct rte_eth_dev_data *data = dev->data;
1115        struct dpaa2_dev_priv *priv = data->dev_private;
1116        struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1117        struct dpni_queue cfg;
1118        struct dpni_error_cfg   err_cfg;
1119        uint16_t qdid;
1120        struct dpni_queue_id qid;
1121        struct dpaa2_queue *dpaa2_q;
1122        int ret, i;
1123        struct rte_intr_handle *intr_handle;
1124
1125        dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device);
1126        intr_handle = &dpaa2_dev->intr_handle;
1127
1128        PMD_INIT_FUNC_TRACE();
1129
1130        ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
1131        if (ret) {
1132                DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d",
1133                              priv->hw_id, ret);
1134                return ret;
1135        }
1136
1137        /* Power up the phy. Needed to make the link go UP */
1138        dpaa2_dev_set_link_up(dev);
1139
1140        ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
1141                            DPNI_QUEUE_TX, &qdid);
1142        if (ret) {
1143                DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret);
1144                return ret;
1145        }
1146        priv->qdid = qdid;
1147
1148        for (i = 0; i < data->nb_rx_queues; i++) {
1149                dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
1150                ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
1151                                     DPNI_QUEUE_RX, dpaa2_q->tc_index,
1152                                       dpaa2_q->flow_id, &cfg, &qid);
1153                if (ret) {
1154                        DPAA2_PMD_ERR("Error in getting flow information: "
1155                                      "err=%d", ret);
1156                        return ret;
1157                }
1158                dpaa2_q->fqid = qid.fqid;
1159        }
1160
1161        /*checksum errors, send them to normal path and set it in annotation */
1162        err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;
1163        err_cfg.errors |= DPNI_ERROR_PHE;
1164
1165        err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE;
1166        err_cfg.set_frame_annotation = true;
1167
1168        ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW,
1169                                       priv->token, &err_cfg);
1170        if (ret) {
1171                DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d",
1172                              ret);
1173                return ret;
1174        }
1175
1176        /* if the interrupts were configured on this devices*/
1177        if (intr_handle && (intr_handle->fd) &&
1178            (dev->data->dev_conf.intr_conf.lsc != 0)) {
1179                /* Registering LSC interrupt handler */
1180                rte_intr_callback_register(intr_handle,
1181                                           dpaa2_interrupt_handler,
1182                                           (void *)dev);
1183
1184                /* enable vfio intr/eventfd mapping
1185                 * Interrupt index 0 is required, so we can not use
1186                 * rte_intr_enable.
1187                 */
1188                rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX);
1189
1190                /* enable dpni_irqs */
1191                dpaa2_eth_setup_irqs(dev, 1);
1192        }
1193
1194        /* Change the tx burst function if ordered queues are used */
1195        if (priv->en_ordered)
1196                dev->tx_pkt_burst = dpaa2_dev_tx_ordered;
1197
1198        return 0;
1199}
1200
1201/**
1202 *  This routine disables all traffic on the adapter by issuing a
1203 *  global reset on the MAC.
1204 */
1205static int
1206dpaa2_dev_stop(struct rte_eth_dev *dev)
1207{
1208        struct dpaa2_dev_priv *priv = dev->data->dev_private;
1209        struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1210        int ret;
1211        struct rte_eth_link link;
1212        struct rte_intr_handle *intr_handle = dev->intr_handle;
1213
1214        PMD_INIT_FUNC_TRACE();
1215
1216        /* reset interrupt callback  */
1217        if (intr_handle && (intr_handle->fd) &&
1218            (dev->data->dev_conf.intr_conf.lsc != 0)) {
1219                /*disable dpni irqs */
1220                dpaa2_eth_setup_irqs(dev, 0);
1221
1222                /* disable vfio intr before callback unregister */
1223                rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX);
1224
1225                /* Unregistering LSC interrupt handler */
1226                rte_intr_callback_unregister(intr_handle,
1227                                             dpaa2_interrupt_handler,
1228                                             (void *)dev);
1229        }
1230
1231        dpaa2_dev_set_link_down(dev);
1232
1233        ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
1234        if (ret) {
1235                DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev",
1236                              ret, priv->hw_id);
1237                return ret;
1238        }
1239
1240        /* clear the recorded link status */
1241        memset(&link, 0, sizeof(link));
1242        rte_eth_linkstatus_set(dev, &link);
1243
1244        return 0;
1245}
1246
1247static int
1248dpaa2_dev_close(struct rte_eth_dev *dev)
1249{
1250        struct dpaa2_dev_priv *priv = dev->data->dev_private;
1251        struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1252        int i, ret;
1253        struct rte_eth_link link;
1254
1255        PMD_INIT_FUNC_TRACE();
1256
1257        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1258                return 0;
1259
1260        if (!dpni) {
1261                DPAA2_PMD_WARN("Already closed or not started");
1262                return -1;
1263        }
1264
1265        dpaa2_flow_clean(dev);
1266        /* Clean the device first */
1267        ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
1268        if (ret) {
1269                DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret);
1270                return -1;
1271        }
1272
1273        memset(&link, 0, sizeof(link));
1274        rte_eth_linkstatus_set(dev, &link);
1275
1276        /* Free private queues memory */
1277        dpaa2_free_rx_tx_queues(dev);
1278        /* Close the device at underlying layer*/
1279        ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
1280        if (ret) {
1281                DPAA2_PMD_ERR("Failure closing dpni device with err code %d",
1282                              ret);
1283        }
1284
1285        /* Free the allocated memory for ethernet private data and dpni*/
1286        priv->hw = NULL;
1287        dev->process_private = NULL;
1288        rte_free(dpni);
1289
1290        for (i = 0; i < MAX_TCS; i++)
1291                rte_free((void *)(size_t)priv->extract.tc_extract_param[i]);
1292
1293        if (priv->extract.qos_extract_param)
1294                rte_free((void *)(size_t)priv->extract.qos_extract_param);
1295
1296        DPAA2_PMD_INFO("%s: netdev deleted", dev->data->name);
1297        return 0;
1298}
1299
1300static int
1301dpaa2_dev_promiscuous_enable(
1302                struct rte_eth_dev *dev)
1303{
1304        int ret;
1305        struct dpaa2_dev_priv *priv = dev->data->dev_private;
1306        struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1307
1308        PMD_INIT_FUNC_TRACE();
1309
1310        if (dpni == NULL) {
1311                DPAA2_PMD_ERR("dpni is NULL");
1312                return -ENODEV;
1313        }
1314
1315        ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1316        if (ret < 0)
1317                DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret);
1318
1319        ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1320        if (ret < 0)
1321                DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret);
1322
1323        return ret;
1324}
1325
1326static int
1327dpaa2_dev_promiscuous_disable(
1328                struct rte_eth_dev *dev)
1329{
1330        int ret;
1331        struct dpaa2_dev_priv *priv = dev->data->dev_private;
1332        struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1333
1334        PMD_INIT_FUNC_TRACE();
1335
1336        if (dpni == NULL) {
1337                DPAA2_PMD_ERR("dpni is NULL");
1338                return -ENODEV;
1339        }
1340
1341        ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
1342        if (ret < 0)
1343                DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret);
1344
1345        if (dev->data->all_multicast == 0) {
1346                ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW,
1347                                                 priv->token, false);
1348                if (ret < 0)
1349                        DPAA2_PMD_ERR("Unable to disable M promisc mode %d",
1350                                      ret);
1351        }
1352
1353        return ret;
1354}
1355
1356static int
1357dpaa2_dev_allmulticast_enable(
1358                struct rte_eth_dev *dev)
1359{
1360        int ret;
1361        struct dpaa2_dev_priv *priv = dev->data->dev_private;
1362        struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1363
1364        PMD_INIT_FUNC_TRACE();
1365
1366        if (dpni == NULL) {
1367                DPAA2_PMD_ERR("dpni is NULL");
1368                return -ENODEV;
1369        }
1370
1371        ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1372        if (ret < 0)
1373                DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret);
1374
1375        return ret;
1376}
1377
1378static int
1379dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev)
1380{
1381        int ret;
1382        struct dpaa2_dev_priv *priv = dev->data->dev_private;
1383        struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1384
1385        PMD_INIT_FUNC_TRACE();
1386
1387        if (dpni == NULL) {
1388                DPAA2_PMD_ERR("dpni is NULL");
1389                return -ENODEV;
1390        }
1391
1392        /* must remain on for all promiscuous */
1393        if (dev->data->promiscuous == 1)
1394                return 0;
1395
1396        ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
1397        if (ret < 0)
1398                DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret);
1399
1400        return ret;
1401}
1402
1403static int
1404dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1405{
1406        int ret;
1407        struct dpaa2_dev_priv *priv = dev->data->dev_private;
1408        struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1409        uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
1410                                + VLAN_TAG_SIZE;
1411
1412        PMD_INIT_FUNC_TRACE();
1413
1414        if (dpni == NULL) {
1415                DPAA2_PMD_ERR("dpni is NULL");
1416                return -EINVAL;
1417        }
1418
1419        /* check that mtu is within the allowed range */
1420        if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA2_MAX_RX_PKT_LEN)
1421                return -EINVAL;
1422
1423        if (frame_size > DPAA2_ETH_MAX_LEN)
1424                dev->data->dev_conf.rxmode.offloads |=
1425                                                DEV_RX_OFFLOAD_JUMBO_FRAME;
1426        else
1427                dev->data->dev_conf.rxmode.offloads &=
1428                                                ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1429
1430        dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1431
1432        /* Set the Max Rx frame length as 'mtu' +
1433         * Maximum Ethernet header length
1434         */
1435        ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
1436                                        frame_size - RTE_ETHER_CRC_LEN);
1437        if (ret) {
1438                DPAA2_PMD_ERR("Setting the max frame length failed");
1439                return -1;
1440        }
1441        DPAA2_PMD_INFO("MTU configured for the device: %d", mtu);
1442        return 0;
1443}
1444
1445static int
1446dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev,
1447                       struct rte_ether_addr *addr,
1448                       __rte_unused uint32_t index,
1449                       __rte_unused uint32_t pool)
1450{
1451        int ret;
1452        struct dpaa2_dev_priv *priv = dev->data->dev_private;
1453        struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1454
1455        PMD_INIT_FUNC_TRACE();
1456
1457        if (dpni == NULL) {
1458                DPAA2_PMD_ERR("dpni is NULL");
1459                return -1;
1460        }
1461
1462        ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, priv->token,
1463                                addr->addr_bytes, 0, 0, 0);
1464        if (ret)
1465                DPAA2_PMD_ERR(
1466                        "error: Adding the MAC ADDR failed: err = %d", ret);
1467        return 0;
1468}
1469
1470static void
1471dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev,
1472                          uint32_t index)
1473{
1474        int ret;
1475        struct dpaa2_dev_priv *priv = dev->data->dev_private;
1476        struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1477        struct rte_eth_dev_data *data = dev->data;
1478        struct rte_ether_addr *macaddr;
1479
1480        PMD_INIT_FUNC_TRACE();
1481
1482        macaddr = &data->mac_addrs[index];
1483
1484        if (dpni == NULL) {
1485                DPAA2_PMD_ERR("dpni is NULL");
1486                return;
1487        }
1488
1489        ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW,
1490                                   priv->token, macaddr->addr_bytes);
1491        if (ret)
1492                DPAA2_PMD_ERR(
1493                        "error: Removing the MAC ADDR failed: err = %d", ret);
1494}
1495
1496static int
1497dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
1498                       struct rte_ether_addr *addr)
1499{
1500        int ret;
1501        struct dpaa2_dev_priv *priv = dev->data->dev_private;
1502        struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1503
1504        PMD_INIT_FUNC_TRACE();
1505
1506        if (dpni == NULL) {
1507                DPAA2_PMD_ERR("dpni is NULL");
1508                return -EINVAL;
1509        }
1510
1511        ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW,
1512                                        priv->token, addr->addr_bytes);
1513
1514        if (ret)
1515                DPAA2_PMD_ERR(
1516                        "error: Setting the MAC ADDR failed %d", ret);
1517
1518        return ret;
1519}
1520
1521static
1522int dpaa2_dev_stats_get(struct rte_eth_dev *dev,
1523                         struct rte_eth_stats *stats)
1524{
1525        struct dpaa2_dev_priv *priv = dev->data->dev_private;
1526        struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1527        int32_t  retcode;
1528        uint8_t page0 = 0, page1 = 1, page2 = 2;
1529        union dpni_statistics value;
1530        int i;
1531        struct dpaa2_queue *dpaa2_rxq, *dpaa2_txq;
1532
1533        memset(&value, 0, sizeof(union dpni_statistics));
1534
1535        PMD_INIT_FUNC_TRACE();
1536
1537        if (!dpni) {
1538                DPAA2_PMD_ERR("dpni is NULL");
1539                return -EINVAL;
1540        }
1541
1542        if (!stats) {
1543                DPAA2_PMD_ERR("stats is NULL");
1544                return -EINVAL;
1545        }
1546
1547        /*Get Counters from page_0*/
1548        retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1549                                      page0, 0, &value);
1550        if (retcode)
1551                goto err;
1552
1553        stats->ipackets = value.page_0.ingress_all_frames;
1554        stats->ibytes = value.page_0.ingress_all_bytes;
1555
1556        /*Get Counters from page_1*/
1557        retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1558                                      page1, 0, &value);
1559        if (retcode)
1560                goto err;
1561
1562        stats->opackets = value.page_1.egress_all_frames;
1563        stats->obytes = value.page_1.egress_all_bytes;
1564
1565        /*Get Counters from page_2*/
1566        retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1567                                      page2, 0, &value);
1568        if (retcode)
1569                goto err;
1570
1571        /* Ingress drop frame count due to configured rules */
1572        stats->ierrors = value.page_2.ingress_filtered_frames;
1573        /* Ingress drop frame count due to error */
1574        stats->ierrors += value.page_2.ingress_discarded_frames;
1575
1576        stats->oerrors = value.page_2.egress_discarded_frames;
1577        stats->imissed = value.page_2.ingress_nobuffer_discards;
1578
1579        /* Fill in per queue stats */
1580        for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1581                (i < priv->nb_rx_queues || i < priv->nb_tx_queues); ++i) {
1582                dpaa2_rxq = (struct dpaa2_queue *)priv->rx_vq[i];
1583                dpaa2_txq = (struct dpaa2_queue *)priv->tx_vq[i];
1584                if (dpaa2_rxq)
1585                        stats->q_ipackets[i] = dpaa2_rxq->rx_pkts;
1586                if (dpaa2_txq)
1587                        stats->q_opackets[i] = dpaa2_txq->tx_pkts;
1588
1589                /* Byte counting is not implemented */
1590                stats->q_ibytes[i]   = 0;
1591                stats->q_obytes[i]   = 0;
1592        }
1593
1594        return 0;
1595
1596err:
1597        DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
1598        return retcode;
1599};
1600
1601static int
1602dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1603                     unsigned int n)
1604{
1605        struct dpaa2_dev_priv *priv = dev->data->dev_private;
1606        struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1607        int32_t  retcode;
1608        union dpni_statistics value[5] = {};
1609        unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings);
1610
1611        if (n < num)
1612                return num;
1613
1614        if (xstats == NULL)
1615                return 0;
1616
1617        /* Get Counters from page_0*/
1618        retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1619                                      0, 0, &value[0]);
1620        if (retcode)
1621                goto err;
1622
1623        /* Get Counters from page_1*/
1624        retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1625                                      1, 0, &value[1]);
1626        if (retcode)
1627                goto err;
1628
1629        /* Get Counters from page_2*/
1630        retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1631                                      2, 0, &value[2]);
1632        if (retcode)
1633                goto err;
1634
1635        for (i = 0; i < priv->max_cgs; i++) {
1636                if (!priv->cgid_in_use[i]) {
1637                        /* Get Counters from page_4*/
1638                        retcode = dpni_get_statistics(dpni, CMD_PRI_LOW,
1639                                                      priv->token,
1640                                                      4, 0, &value[4]);
1641                        if (retcode)
1642                                goto err;
1643                        break;
1644                }
1645        }
1646
1647        for (i = 0; i < num; i++) {
1648                xstats[i].id = i;
1649                xstats[i].value = value[dpaa2_xstats_strings[i].page_id].
1650                        raw.counter[dpaa2_xstats_strings[i].stats_id];
1651        }
1652        return i;
1653err:
1654        DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode);
1655        return retcode;
1656}
1657
1658static int
1659dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1660                       struct rte_eth_xstat_name *xstats_names,
1661                       unsigned int limit)
1662{
1663        unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1664
1665        if (limit < stat_cnt)
1666                return stat_cnt;
1667
1668        if (xstats_names != NULL)
1669                for (i = 0; i < stat_cnt; i++)
1670                        strlcpy(xstats_names[i].name,
1671                                dpaa2_xstats_strings[i].name,
1672                                sizeof(xstats_names[i].name));
1673
1674        return stat_cnt;
1675}
1676
1677static int
1678dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1679                       uint64_t *values, unsigned int n)
1680{
1681        unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1682        uint64_t values_copy[stat_cnt];
1683
1684        if (!ids) {
1685                struct dpaa2_dev_priv *priv = dev->data->dev_private;
1686                struct fsl_mc_io *dpni =
1687                        (struct fsl_mc_io *)dev->process_private;
1688                int32_t  retcode;
1689                union dpni_statistics value[5] = {};
1690
1691                if (n < stat_cnt)
1692                        return stat_cnt;
1693
1694                if (!values)
1695                        return 0;
1696
1697                /* Get Counters from page_0*/
1698                retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1699                                              0, 0, &value[0]);
1700                if (retcode)
1701                        return 0;
1702
1703                /* Get Counters from page_1*/
1704                retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1705                                              1, 0, &value[1]);
1706                if (retcode)
1707                        return 0;
1708
1709                /* Get Counters from page_2*/
1710                retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1711                                              2, 0, &value[2]);
1712                if (retcode)
1713                        return 0;
1714
1715                /* Get Counters from page_4*/
1716                retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1717                                              4, 0, &value[4]);
1718                if (retcode)
1719                        return 0;
1720
1721                for (i = 0; i < stat_cnt; i++) {
1722                        values[i] = value[dpaa2_xstats_strings[i].page_id].
1723                                raw.counter[dpaa2_xstats_strings[i].stats_id];
1724                }
1725                return stat_cnt;
1726        }
1727
1728        dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
1729
1730        for (i = 0; i < n; i++) {
1731                if (ids[i] >= stat_cnt) {
1732                        DPAA2_PMD_ERR("xstats id value isn't valid");
1733                        return -1;
1734                }
1735                values[i] = values_copy[ids[i]];
1736        }
1737        return n;
1738}
1739
1740static int
1741dpaa2_xstats_get_names_by_id(
1742        struct rte_eth_dev *dev,
1743        struct rte_eth_xstat_name *xstats_names,
1744        const uint64_t *ids,
1745        unsigned int limit)
1746{
1747        unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1748        struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
1749
1750        if (!ids)
1751                return dpaa2_xstats_get_names(dev, xstats_names, limit);
1752
1753        dpaa2_xstats_get_names(dev, xstats_names_copy, limit);
1754
1755        for (i = 0; i < limit; i++) {
1756                if (ids[i] >= stat_cnt) {
1757                        DPAA2_PMD_ERR("xstats id value isn't valid");
1758                        return -1;
1759                }
1760                strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
1761        }
1762        return limit;
1763}
1764
1765static int
1766dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
1767{
1768        struct dpaa2_dev_priv *priv = dev->data->dev_private;
1769        struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1770        int retcode;
1771        int i;
1772        struct dpaa2_queue *dpaa2_q;
1773
1774        PMD_INIT_FUNC_TRACE();
1775
1776        if (dpni == NULL) {
1777                DPAA2_PMD_ERR("dpni is NULL");
1778                return -EINVAL;
1779        }
1780
1781        retcode =  dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token);
1782        if (retcode)
1783                goto error;
1784
1785        /* Reset the per queue stats in dpaa2_queue structure */
1786        for (i = 0; i < priv->nb_rx_queues; i++) {
1787                dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
1788                if (dpaa2_q)
1789                        dpaa2_q->rx_pkts = 0;
1790        }
1791
1792        for (i = 0; i < priv->nb_tx_queues; i++) {
1793                dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
1794                if (dpaa2_q)
1795                        dpaa2_q->tx_pkts = 0;
1796        }
1797
1798        return 0;
1799
1800error:
1801        DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
1802        return retcode;
1803};
1804
1805/* return 0 means link status changed, -1 means not changed */
1806static int
1807dpaa2_dev_link_update(struct rte_eth_dev *dev,
1808                        int wait_to_complete __rte_unused)
1809{
1810        int ret;
1811        struct dpaa2_dev_priv *priv = dev->data->dev_private;
1812        struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1813        struct rte_eth_link link;
1814        struct dpni_link_state state = {0};
1815
1816        if (dpni == NULL) {
1817                DPAA2_PMD_ERR("dpni is NULL");
1818                return 0;
1819        }
1820
1821        ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1822        if (ret < 0) {
1823                DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret);
1824                return -1;
1825        }
1826
1827        memset(&link, 0, sizeof(struct rte_eth_link));
1828        link.link_status = state.up;
1829        link.link_speed = state.rate;
1830
1831        if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
1832                link.link_duplex = ETH_LINK_HALF_DUPLEX;
1833        else
1834                link.link_duplex = ETH_LINK_FULL_DUPLEX;
1835
1836        ret = rte_eth_linkstatus_set(dev, &link);
1837        if (ret == -1)
1838                DPAA2_PMD_DEBUG("No change in status");
1839        else
1840                DPAA2_PMD_INFO("Port %d Link is %s\n", dev->data->port_id,
1841                               link.link_status ? "Up" : "Down");
1842
1843        return ret;
1844}
1845
1846/**
1847 * Toggle the DPNI to enable, if not already enabled.
1848 * This is not strictly PHY up/down - it is more of logical toggling.
1849 */
1850static int
1851dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
1852{
1853        int ret = -EINVAL;
1854        struct dpaa2_dev_priv *priv;
1855        struct fsl_mc_io *dpni;
1856        int en = 0;
1857        struct dpni_link_state state = {0};
1858
1859        priv = dev->data->dev_private;
1860        dpni = (struct fsl_mc_io *)dev->process_private;
1861
1862        if (dpni == NULL) {
1863                DPAA2_PMD_ERR("dpni is NULL");
1864                return ret;
1865        }
1866
1867        /* Check if DPNI is currently enabled */
1868        ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en);
1869        if (ret) {
1870                /* Unable to obtain dpni status; Not continuing */
1871                DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
1872                return -EINVAL;
1873        }
1874
1875        /* Enable link if not already enabled */
1876        if (!en) {
1877                ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
1878                if (ret) {
1879                        DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
1880                        return -EINVAL;
1881                }
1882        }
1883        ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1884        if (ret < 0) {
1885                DPAA2_PMD_DEBUG("Unable to get link state (%d)", ret);
1886                return -1;
1887        }
1888
1889        /* changing tx burst function to start enqueues */
1890        dev->tx_pkt_burst = dpaa2_dev_tx;
1891        dev->data->dev_link.link_status = state.up;
1892        dev->data->dev_link.link_speed = state.rate;
1893
1894        if (state.up)
1895                DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id);
1896        else
1897                DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id);
1898        return ret;
1899}
1900
1901/**
1902 * Toggle the DPNI to disable, if not already disabled.
1903 * This is not strictly PHY up/down - it is more of logical toggling.
1904 */
1905static int
1906dpaa2_dev_set_link_down(struct rte_eth_dev *dev)
1907{
1908        int ret = -EINVAL;
1909        struct dpaa2_dev_priv *priv;
1910        struct fsl_mc_io *dpni;
1911        int dpni_enabled = 0;
1912        int retries = 10;
1913
1914        PMD_INIT_FUNC_TRACE();
1915
1916        priv = dev->data->dev_private;
1917        dpni = (struct fsl_mc_io *)dev->process_private;
1918
1919        if (dpni == NULL) {
1920                DPAA2_PMD_ERR("Device has not yet been configured");
1921                return ret;
1922        }
1923
1924        /*changing  tx burst function to avoid any more enqueues */
1925        dev->tx_pkt_burst = dummy_dev_tx;
1926
1927        /* Loop while dpni_disable() attempts to drain the egress FQs
1928         * and confirm them back to us.
1929         */
1930        do {
1931                ret = dpni_disable(dpni, 0, priv->token);
1932                if (ret) {
1933                        DPAA2_PMD_ERR("dpni disable failed (%d)", ret);
1934                        return ret;
1935                }
1936                ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled);
1937                if (ret) {
1938                        DPAA2_PMD_ERR("dpni enable check failed (%d)", ret);
1939                        return ret;
1940                }
1941                if (dpni_enabled)
1942                        /* Allow the MC some slack */
1943                        rte_delay_us(100 * 1000);
1944        } while (dpni_enabled && --retries);
1945
1946        if (!retries) {
1947                DPAA2_PMD_WARN("Retry count exceeded disabling dpni");
1948                /* todo- we may have to manually cleanup queues.
1949                 */
1950        } else {
1951                DPAA2_PMD_INFO("Port %d Link DOWN successful",
1952                               dev->data->port_id);
1953        }
1954
1955        dev->data->dev_link.link_status = 0;
1956
1957        return ret;
1958}
1959
1960static int
1961dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1962{
1963        int ret = -EINVAL;
1964        struct dpaa2_dev_priv *priv;
1965        struct fsl_mc_io *dpni;
1966        struct dpni_link_state state = {0};
1967
1968        PMD_INIT_FUNC_TRACE();
1969
1970        priv = dev->data->dev_private;
1971        dpni = (struct fsl_mc_io *)dev->process_private;
1972
1973        if (dpni == NULL || fc_conf == NULL) {
1974                DPAA2_PMD_ERR("device not configured");
1975                return ret;
1976        }
1977
1978        ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1979        if (ret) {
1980                DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret);
1981                return ret;
1982        }
1983
1984        memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
1985        if (state.options & DPNI_LINK_OPT_PAUSE) {
1986                /* DPNI_LINK_OPT_PAUSE set
1987                 *  if ASYM_PAUSE not set,
1988                 *      RX Side flow control (handle received Pause frame)
1989                 *      TX side flow control (send Pause frame)
1990                 *  if ASYM_PAUSE set,
1991                 *      RX Side flow control (handle received Pause frame)
1992                 *      No TX side flow control (send Pause frame disabled)
1993                 */
1994                if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE))
1995                        fc_conf->mode = RTE_FC_FULL;
1996                else
1997                        fc_conf->mode = RTE_FC_RX_PAUSE;
1998        } else {
1999                /* DPNI_LINK_OPT_PAUSE not set
2000                 *  if ASYM_PAUSE set,
2001                 *      TX side flow control (send Pause frame)
2002                 *      No RX side flow control (No action on pause frame rx)
2003                 *  if ASYM_PAUSE not set,
2004                 *      Flow control disabled
2005                 */
2006                if (state.options & DPNI_LINK_OPT_ASYM_PAUSE)
2007                        fc_conf->mode = RTE_FC_TX_PAUSE;
2008                else
2009                        fc_conf->mode = RTE_FC_NONE;
2010        }
2011
2012        return ret;
2013}
2014
2015static int
2016dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2017{
2018        int ret = -EINVAL;
2019        struct dpaa2_dev_priv *priv;
2020        struct fsl_mc_io *dpni;
2021        struct dpni_link_state state = {0};
2022        struct dpni_link_cfg cfg = {0};
2023
2024        PMD_INIT_FUNC_TRACE();
2025
2026        priv = dev->data->dev_private;
2027        dpni = (struct fsl_mc_io *)dev->process_private;
2028
2029        if (dpni == NULL) {
2030                DPAA2_PMD_ERR("dpni is NULL");
2031                return ret;
2032        }
2033
2034        /* It is necessary to obtain the current state before setting fc_conf
2035         * as MC would return error in case rate, autoneg or duplex values are
2036         * different.
2037         */
2038        ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
2039        if (ret) {
2040                DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret);
2041                return -1;
2042        }
2043
2044        /* Disable link before setting configuration */
2045        dpaa2_dev_set_link_down(dev);
2046
2047        /* Based on fc_conf, update cfg */
2048        cfg.rate = state.rate;
2049        cfg.options = state.options;
2050
2051        /* update cfg with fc_conf */
2052        switch (fc_conf->mode) {
2053        case RTE_FC_FULL:
2054                /* Full flow control;
2055                 * OPT_PAUSE set, ASYM_PAUSE not set
2056                 */
2057                cfg.options |= DPNI_LINK_OPT_PAUSE;
2058                cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
2059                break;
2060        case RTE_FC_TX_PAUSE:
2061                /* Enable RX flow control
2062                 * OPT_PAUSE not set;
2063                 * ASYM_PAUSE set;
2064                 */
2065                cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
2066                cfg.options &= ~DPNI_LINK_OPT_PAUSE;
2067                break;
2068        case RTE_FC_RX_PAUSE:
2069                /* Enable TX Flow control
2070                 * OPT_PAUSE set
2071                 * ASYM_PAUSE set
2072                 */
2073                cfg.options |= DPNI_LINK_OPT_PAUSE;
2074                cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
2075                break;
2076        case RTE_FC_NONE:
2077                /* Disable Flow control
2078                 * OPT_PAUSE not set
2079                 * ASYM_PAUSE not set
2080                 */
2081                cfg.options &= ~DPNI_LINK_OPT_PAUSE;
2082                cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
2083                break;
2084        default:
2085                DPAA2_PMD_ERR("Incorrect Flow control flag (%d)",
2086                              fc_conf->mode);
2087                return -1;
2088        }
2089
2090        ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg);
2091        if (ret)
2092                DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)",
2093                              ret);
2094
2095        /* Enable link */
2096        dpaa2_dev_set_link_up(dev);
2097
2098        return ret;
2099}
2100
2101static int
2102dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev,
2103                          struct rte_eth_rss_conf *rss_conf)
2104{
2105        struct rte_eth_dev_data *data = dev->data;
2106        struct dpaa2_dev_priv *priv = data->dev_private;
2107        struct rte_eth_conf *eth_conf = &data->dev_conf;
2108        int ret, tc_index;
2109
2110        PMD_INIT_FUNC_TRACE();
2111
2112        if (rss_conf->rss_hf) {
2113                for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
2114                        ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf,
2115                                tc_index);
2116                        if (ret) {
2117                                DPAA2_PMD_ERR("Unable to set flow dist on tc%d",
2118                                        tc_index);
2119                                return ret;
2120                        }
2121                }
2122        } else {
2123                for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
2124                        ret = dpaa2_remove_flow_dist(dev, tc_index);
2125                        if (ret) {
2126                                DPAA2_PMD_ERR(
2127                                        "Unable to remove flow dist on tc%d",
2128                                        tc_index);
2129                                return ret;
2130                        }
2131                }
2132        }
2133        eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
2134        return 0;
2135}
2136
2137static int
2138dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
2139                            struct rte_eth_rss_conf *rss_conf)
2140{
2141        struct rte_eth_dev_data *data = dev->data;
2142        struct rte_eth_conf *eth_conf = &data->dev_conf;
2143
2144        /* dpaa2 does not support rss_key, so length should be 0*/
2145        rss_conf->rss_key_len = 0;
2146        rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf;
2147        return 0;
2148}
2149
2150int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
2151                int eth_rx_queue_id,
2152                struct dpaa2_dpcon_dev *dpcon,
2153                const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
2154{
2155        struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
2156        struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
2157        struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
2158        uint8_t flow_id = dpaa2_ethq->flow_id;
2159        struct dpni_queue cfg;
2160        uint8_t options, priority;
2161        int ret;
2162
2163        if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
2164                dpaa2_ethq->cb = dpaa2_dev_process_parallel_event;
2165        else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC)
2166                dpaa2_ethq->cb = dpaa2_dev_process_atomic_event;
2167        else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED)
2168                dpaa2_ethq->cb = dpaa2_dev_process_ordered_event;
2169        else
2170                return -EINVAL;
2171
2172        priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / queue_conf->ev.priority) *
2173                   (dpcon->num_priorities - 1);
2174
2175        memset(&cfg, 0, sizeof(struct dpni_queue));
2176        options = DPNI_QUEUE_OPT_DEST;
2177        cfg.destination.type = DPNI_DEST_DPCON;
2178        cfg.destination.id = dpcon->dpcon_id;
2179        cfg.destination.priority = priority;
2180
2181        if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
2182                options |= DPNI_QUEUE_OPT_HOLD_ACTIVE;
2183                cfg.destination.hold_active = 1;
2184        }
2185
2186        if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED &&
2187                        !eth_priv->en_ordered) {
2188                struct opr_cfg ocfg;
2189
2190                /* Restoration window size = 256 frames */
2191                ocfg.oprrws = 3;
2192                /* Restoration window size = 512 frames for LX2 */
2193                if (dpaa2_svr_family == SVR_LX2160A)
2194                        ocfg.oprrws = 4;
2195                /* Auto advance NESN window enabled */
2196                ocfg.oa = 1;
2197                /* Late arrival window size disabled */
2198                ocfg.olws = 0;
2199                /* ORL resource exhaustaion advance NESN disabled */
2200                ocfg.oeane = 0;
2201                /* Loose ordering enabled */
2202                ocfg.oloe = 1;
2203                eth_priv->en_loose_ordered = 1;
2204                /* Strict ordering enabled if explicitly set */
2205                if (getenv("DPAA2_STRICT_ORDERING_ENABLE")) {
2206                        ocfg.oloe = 0;
2207                        eth_priv->en_loose_ordered = 0;
2208                }
2209
2210                ret = dpni_set_opr(dpni, CMD_PRI_LOW, eth_priv->token,
2211                                   dpaa2_ethq->tc_index, flow_id,
2212                                   OPR_OPT_CREATE, &ocfg);
2213                if (ret) {
2214                        DPAA2_PMD_ERR("Error setting opr: ret: %d\n", ret);
2215                        return ret;
2216                }
2217
2218                eth_priv->en_ordered = 1;
2219        }
2220
2221        options |= DPNI_QUEUE_OPT_USER_CTX;
2222        cfg.user_context = (size_t)(dpaa2_ethq);
2223
2224        ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
2225                             dpaa2_ethq->tc_index, flow_id, options, &cfg);
2226        if (ret) {
2227                DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
2228                return ret;
2229        }
2230
2231        memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event));
2232
2233        return 0;
2234}
2235
2236int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
2237                int eth_rx_queue_id)
2238{
2239        struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
2240        struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
2241        struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
2242        uint8_t flow_id = dpaa2_ethq->flow_id;
2243        struct dpni_queue cfg;
2244        uint8_t options;
2245        int ret;
2246
2247        memset(&cfg, 0, sizeof(struct dpni_queue));
2248        options = DPNI_QUEUE_OPT_DEST;
2249        cfg.destination.type = DPNI_DEST_NONE;
2250
2251        ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
2252                             dpaa2_ethq->tc_index, flow_id, options, &cfg);
2253        if (ret)
2254                DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
2255
2256        return ret;
2257}
2258
2259static inline int
2260dpaa2_dev_verify_filter_ops(enum rte_filter_op filter_op)
2261{
2262        unsigned int i;
2263
2264        for (i = 0; i < RTE_DIM(dpaa2_supported_filter_ops); i++) {
2265                if (dpaa2_supported_filter_ops[i] == filter_op)
2266                        return 0;
2267        }
2268        return -ENOTSUP;
2269}
2270
2271static int
2272dpaa2_dev_flow_ctrl(struct rte_eth_dev *dev,
2273                    enum rte_filter_type filter_type,
2274                                 enum rte_filter_op filter_op,
2275                                 void *arg)
2276{
2277        int ret = 0;
2278
2279        if (!dev)
2280                return -ENODEV;
2281
2282        switch (filter_type) {
2283        case RTE_ETH_FILTER_GENERIC:
2284                if (dpaa2_dev_verify_filter_ops(filter_op) < 0) {
2285                        ret = -ENOTSUP;
2286                        break;
2287                }
2288                *(const void **)arg = &dpaa2_flow_ops;
2289                dpaa2_filter_type |= filter_type;
2290                break;
2291        default:
2292                RTE_LOG(ERR, PMD, "Filter type (%d) not supported",
2293                        filter_type);
2294                ret = -ENOTSUP;
2295                break;
2296        }
2297        return ret;
2298}
2299
2300static void
2301dpaa2_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2302        struct rte_eth_rxq_info *qinfo)
2303{
2304        struct dpaa2_queue *rxq;
2305
2306        rxq = (struct dpaa2_queue *)dev->data->rx_queues[queue_id];
2307
2308        qinfo->mp = rxq->mb_pool;
2309        qinfo->scattered_rx = dev->data->scattered_rx;
2310        qinfo->nb_desc = rxq->nb_desc;
2311
2312        qinfo->conf.rx_free_thresh = 1;
2313        qinfo->conf.rx_drop_en = 1;
2314        qinfo->conf.rx_deferred_start = 0;
2315        qinfo->conf.offloads = rxq->offloads;
2316}
2317
2318static void
2319dpaa2_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2320        struct rte_eth_txq_info *qinfo)
2321{
2322        struct dpaa2_queue *txq;
2323
2324        txq = dev->data->tx_queues[queue_id];
2325
2326        qinfo->nb_desc = txq->nb_desc;
2327        qinfo->conf.tx_thresh.pthresh = 0;
2328        qinfo->conf.tx_thresh.hthresh = 0;
2329        qinfo->conf.tx_thresh.wthresh = 0;
2330
2331        qinfo->conf.tx_free_thresh = 0;
2332        qinfo->conf.tx_rs_thresh = 0;
2333        qinfo->conf.offloads = txq->offloads;
2334        qinfo->conf.tx_deferred_start = 0;
2335}
2336
2337static struct eth_dev_ops dpaa2_ethdev_ops = {
2338        .dev_configure    = dpaa2_eth_dev_configure,
2339        .dev_start            = dpaa2_dev_start,
2340        .dev_stop             = dpaa2_dev_stop,
2341        .dev_close            = dpaa2_dev_close,
2342        .promiscuous_enable   = dpaa2_dev_promiscuous_enable,
2343        .promiscuous_disable  = dpaa2_dev_promiscuous_disable,
2344        .allmulticast_enable  = dpaa2_dev_allmulticast_enable,
2345        .allmulticast_disable = dpaa2_dev_allmulticast_disable,
2346        .dev_set_link_up      = dpaa2_dev_set_link_up,
2347        .dev_set_link_down    = dpaa2_dev_set_link_down,
2348        .link_update       = dpaa2_dev_link_update,
2349        .stats_get             = dpaa2_dev_stats_get,
2350        .xstats_get            = dpaa2_dev_xstats_get,
2351        .xstats_get_by_id     = dpaa2_xstats_get_by_id,
2352        .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id,
2353        .xstats_get_names      = dpaa2_xstats_get_names,
2354        .stats_reset       = dpaa2_dev_stats_reset,
2355        .xstats_reset         = dpaa2_dev_stats_reset,
2356        .fw_version_get    = dpaa2_fw_version_get,
2357        .dev_infos_get     = dpaa2_dev_info_get,
2358        .dev_supported_ptypes_get = dpaa2_supported_ptypes_get,
2359        .mtu_set           = dpaa2_dev_mtu_set,
2360        .vlan_filter_set      = dpaa2_vlan_filter_set,
2361        .vlan_offload_set     = dpaa2_vlan_offload_set,
2362        .vlan_tpid_set        = dpaa2_vlan_tpid_set,
2363        .rx_queue_setup    = dpaa2_dev_rx_queue_setup,
2364        .rx_queue_release  = dpaa2_dev_rx_queue_release,
2365        .tx_queue_setup    = dpaa2_dev_tx_queue_setup,
2366        .tx_queue_release  = dpaa2_dev_tx_queue_release,
2367        .rx_burst_mode_get = dpaa2_dev_rx_burst_mode_get,
2368        .tx_burst_mode_get = dpaa2_dev_tx_burst_mode_get,
2369        .flow_ctrl_get        = dpaa2_flow_ctrl_get,
2370        .flow_ctrl_set        = dpaa2_flow_ctrl_set,
2371        .mac_addr_add         = dpaa2_dev_add_mac_addr,
2372        .mac_addr_remove      = dpaa2_dev_remove_mac_addr,
2373        .mac_addr_set         = dpaa2_dev_set_mac_addr,
2374        .rss_hash_update      = dpaa2_dev_rss_hash_update,
2375        .rss_hash_conf_get    = dpaa2_dev_rss_hash_conf_get,
2376        .filter_ctrl          = dpaa2_dev_flow_ctrl,
2377        .rxq_info_get         = dpaa2_rxq_info_get,
2378        .txq_info_get         = dpaa2_txq_info_get,
2379#if defined(RTE_LIBRTE_IEEE1588)
2380        .timesync_enable      = dpaa2_timesync_enable,
2381        .timesync_disable     = dpaa2_timesync_disable,
2382        .timesync_read_time   = dpaa2_timesync_read_time,
2383        .timesync_write_time  = dpaa2_timesync_write_time,
2384        .timesync_adjust_time = dpaa2_timesync_adjust_time,
2385        .timesync_read_rx_timestamp = dpaa2_timesync_read_rx_timestamp,
2386        .timesync_read_tx_timestamp = dpaa2_timesync_read_tx_timestamp,
2387#endif
2388};
2389
2390/* Populate the mac address from physically available (u-boot/firmware) and/or
2391 * one set by higher layers like MC (restool) etc.
2392 * Returns the table of MAC entries (multiple entries)
2393 */
2394static int
2395populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv,
2396                  struct rte_ether_addr *mac_entry)
2397{
2398        int ret;
2399        struct rte_ether_addr phy_mac, prime_mac;
2400
2401        memset(&phy_mac, 0, sizeof(struct rte_ether_addr));
2402        memset(&prime_mac, 0, sizeof(struct rte_ether_addr));
2403
2404        /* Get the physical device MAC address */
2405        ret = dpni_get_port_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token,
2406                                     phy_mac.addr_bytes);
2407        if (ret) {
2408                DPAA2_PMD_ERR("DPNI get physical port MAC failed: %d", ret);
2409                goto cleanup;
2410        }
2411
2412        ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token,
2413                                        prime_mac.addr_bytes);
2414        if (ret) {
2415                DPAA2_PMD_ERR("DPNI get Prime port MAC failed: %d", ret);
2416                goto cleanup;
2417        }
2418
2419        /* Now that both MAC have been obtained, do:
2420         *  if not_empty_mac(phy) && phy != Prime, overwrite prime with Phy
2421         *     and return phy
2422         *  If empty_mac(phy), return prime.
2423         *  if both are empty, create random MAC, set as prime and return
2424         */
2425        if (!rte_is_zero_ether_addr(&phy_mac)) {
2426                /* If the addresses are not same, overwrite prime */
2427                if (!rte_is_same_ether_addr(&phy_mac, &prime_mac)) {
2428                        ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
2429                                                        priv->token,
2430                                                        phy_mac.addr_bytes);
2431                        if (ret) {
2432                                DPAA2_PMD_ERR("Unable to set MAC Address: %d",
2433                                              ret);
2434                                goto cleanup;
2435                        }
2436                        memcpy(&prime_mac, &phy_mac,
2437                                sizeof(struct rte_ether_addr));
2438                }
2439        } else if (rte_is_zero_ether_addr(&prime_mac)) {
2440                /* In case phys and prime, both are zero, create random MAC */
2441                rte_eth_random_addr(prime_mac.addr_bytes);
2442                ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
2443                                                priv->token,
2444                                                prime_mac.addr_bytes);
2445                if (ret) {
2446                        DPAA2_PMD_ERR("Unable to set MAC Address: %d", ret);
2447                        goto cleanup;
2448                }
2449        }
2450
2451        /* prime_mac the final MAC address */
2452        memcpy(mac_entry, &prime_mac, sizeof(struct rte_ether_addr));
2453        return 0;
2454
2455cleanup:
2456        return -1;
2457}
2458
2459static int
2460check_devargs_handler(__rte_unused const char *key, const char *value,
2461                      __rte_unused void *opaque)
2462{
2463        if (strcmp(value, "1"))
2464                return -1;
2465
2466        return 0;
2467}
2468
2469static int
2470dpaa2_get_devargs(struct rte_devargs *devargs, const char *key)
2471{
2472        struct rte_kvargs *kvlist;
2473
2474        if (!devargs)
2475                return 0;
2476
2477        kvlist = rte_kvargs_parse(devargs->args, NULL);
2478        if (!kvlist)
2479                return 0;
2480
2481        if (!rte_kvargs_count(kvlist, key)) {
2482                rte_kvargs_free(kvlist);
2483                return 0;
2484        }
2485
2486        if (rte_kvargs_process(kvlist, key,
2487                               check_devargs_handler, NULL) < 0) {
2488                rte_kvargs_free(kvlist);
2489                return 0;
2490        }
2491        rte_kvargs_free(kvlist);
2492
2493        return 1;
2494}
2495
2496static int
2497dpaa2_dev_init(struct rte_eth_dev *eth_dev)
2498{
2499        struct rte_device *dev = eth_dev->device;
2500        struct rte_dpaa2_device *dpaa2_dev;
2501        struct fsl_mc_io *dpni_dev;
2502        struct dpni_attr attr;
2503        struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
2504        struct dpni_buffer_layout layout;
2505        int ret, hw_id, i;
2506
2507        PMD_INIT_FUNC_TRACE();
2508
2509        dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0);
2510        if (!dpni_dev) {
2511                DPAA2_PMD_ERR("Memory allocation failed for dpni device");
2512                return -1;
2513        }
2514        dpni_dev->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
2515        eth_dev->process_private = (void *)dpni_dev;
2516
2517        /* For secondary processes, the primary has done all the work */
2518        if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2519                /* In case of secondary, only burst and ops API need to be
2520                 * plugged.
2521                 */
2522                eth_dev->dev_ops = &dpaa2_ethdev_ops;
2523                eth_dev->rx_queue_count = dpaa2_dev_rx_queue_count;
2524                if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE))
2525                        eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
2526                else if (dpaa2_get_devargs(dev->devargs,
2527                                        DRIVER_NO_PREFETCH_MODE))
2528                        eth_dev->rx_pkt_burst = dpaa2_dev_rx;
2529                else
2530                        eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
2531                eth_dev->tx_pkt_burst = dpaa2_dev_tx;
2532                return 0;
2533        }
2534
2535        dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
2536
2537        hw_id = dpaa2_dev->object_id;
2538        ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
2539        if (ret) {
2540                DPAA2_PMD_ERR(
2541                             "Failure in opening dpni@%d with err code %d",
2542                             hw_id, ret);
2543                rte_free(dpni_dev);
2544                return -1;
2545        }
2546
2547        /* Clean the device first */
2548        ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
2549        if (ret) {
2550                DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d",
2551                              hw_id, ret);
2552                goto init_err;
2553        }
2554
2555        ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
2556        if (ret) {
2557                DPAA2_PMD_ERR(
2558                             "Failure in get dpni@%d attribute, err code %d",
2559                             hw_id, ret);
2560                goto init_err;
2561        }
2562
2563        priv->num_rx_tc = attr.num_rx_tcs;
2564        priv->qos_entries = attr.qos_entries;
2565        priv->fs_entries = attr.fs_entries;
2566        priv->dist_queues = attr.num_queues;
2567
2568        /* only if the custom CG is enabled */
2569        if (attr.options & DPNI_OPT_CUSTOM_CG)
2570                priv->max_cgs = attr.num_cgs;
2571        else
2572                priv->max_cgs = 0;
2573
2574        for (i = 0; i < priv->max_cgs; i++)
2575                priv->cgid_in_use[i] = 0;
2576
2577        for (i = 0; i < attr.num_rx_tcs; i++)
2578                priv->nb_rx_queues += attr.num_queues;
2579
2580        /* Using number of TX queues as number of TX TCs */
2581        priv->nb_tx_queues = attr.num_tx_tcs;
2582
2583        DPAA2_PMD_DEBUG("RX-TC= %d, rx_queues= %d, tx_queues=%d, max_cgs=%d",
2584                        priv->num_rx_tc, priv->nb_rx_queues,
2585                        priv->nb_tx_queues, priv->max_cgs);
2586
2587        priv->hw = dpni_dev;
2588        priv->hw_id = hw_id;
2589        priv->options = attr.options;
2590        priv->max_mac_filters = attr.mac_filter_entries;
2591        priv->max_vlan_filters = attr.vlan_filter_entries;
2592        priv->flags = 0;
2593#if defined(RTE_LIBRTE_IEEE1588)
2594        priv->tx_conf_en = 1;
2595#else
2596        priv->tx_conf_en = 0;
2597#endif
2598
2599        /* Allocate memory for hardware structure for queues */
2600        ret = dpaa2_alloc_rx_tx_queues(eth_dev);
2601        if (ret) {
2602                DPAA2_PMD_ERR("Queue allocation Failed");
2603                goto init_err;
2604        }
2605
2606        /* Allocate memory for storing MAC addresses.
2607         * Table of mac_filter_entries size is allocated so that RTE ether lib
2608         * can add MAC entries when rte_eth_dev_mac_addr_add is called.
2609         */
2610        eth_dev->data->mac_addrs = rte_zmalloc("dpni",
2611                RTE_ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
2612        if (eth_dev->data->mac_addrs == NULL) {
2613                DPAA2_PMD_ERR(
2614                   "Failed to allocate %d bytes needed to store MAC addresses",
2615                   RTE_ETHER_ADDR_LEN * attr.mac_filter_entries);
2616                ret = -ENOMEM;
2617                goto init_err;
2618        }
2619
2620        ret = populate_mac_addr(dpni_dev, priv, &eth_dev->data->mac_addrs[0]);
2621        if (ret) {
2622                DPAA2_PMD_ERR("Unable to fetch MAC Address for device");
2623                rte_free(eth_dev->data->mac_addrs);
2624                eth_dev->data->mac_addrs = NULL;
2625                goto init_err;
2626        }
2627
2628        /* ... tx buffer layout ... */
2629        memset(&layout, 0, sizeof(struct dpni_buffer_layout));
2630        if (priv->tx_conf_en) {
2631                layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
2632                                 DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2633                layout.pass_timestamp = true;
2634        } else {
2635                layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
2636        }
2637        layout.pass_frame_status = 1;
2638        ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
2639                                     DPNI_QUEUE_TX, &layout);
2640        if (ret) {
2641                DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret);
2642                goto init_err;
2643        }
2644
2645        /* ... tx-conf and error buffer layout ... */
2646        memset(&layout, 0, sizeof(struct dpni_buffer_layout));
2647        if (priv->tx_conf_en) {
2648                layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
2649                                 DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2650                layout.pass_timestamp = true;
2651        } else {
2652                layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
2653        }
2654        layout.pass_frame_status = 1;
2655        ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
2656                                     DPNI_QUEUE_TX_CONFIRM, &layout);
2657        if (ret) {
2658                DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout",
2659                             ret);
2660                goto init_err;
2661        }
2662
2663        eth_dev->dev_ops = &dpaa2_ethdev_ops;
2664
2665        if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) {
2666                eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
2667                DPAA2_PMD_INFO("Loopback mode");
2668        } else if (dpaa2_get_devargs(dev->devargs, DRIVER_NO_PREFETCH_MODE)) {
2669                eth_dev->rx_pkt_burst = dpaa2_dev_rx;
2670                DPAA2_PMD_INFO("No Prefetch mode");
2671        } else {
2672                eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
2673        }
2674        eth_dev->tx_pkt_burst = dpaa2_dev_tx;
2675
2676        /*Init fields w.r.t. classficaition*/
2677        memset(&priv->extract.qos_key_extract, 0,
2678                sizeof(struct dpaa2_key_extract));
2679        priv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64);
2680        if (!priv->extract.qos_extract_param) {
2681                DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow "
2682                            " classificaiton ", ret);
2683                goto init_err;
2684        }
2685        priv->extract.qos_key_extract.key_info.ipv4_src_offset =
2686                IP_ADDRESS_OFFSET_INVALID;
2687        priv->extract.qos_key_extract.key_info.ipv4_dst_offset =
2688                IP_ADDRESS_OFFSET_INVALID;
2689        priv->extract.qos_key_extract.key_info.ipv6_src_offset =
2690                IP_ADDRESS_OFFSET_INVALID;
2691        priv->extract.qos_key_extract.key_info.ipv6_dst_offset =
2692                IP_ADDRESS_OFFSET_INVALID;
2693
2694        for (i = 0; i < MAX_TCS; i++) {
2695                memset(&priv->extract.tc_key_extract[i], 0,
2696                        sizeof(struct dpaa2_key_extract));
2697                priv->extract.tc_extract_param[i] =
2698                        (size_t)rte_malloc(NULL, 256, 64);
2699                if (!priv->extract.tc_extract_param[i]) {
2700                        DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classificaiton",
2701                                     ret);
2702                        goto init_err;
2703                }
2704                priv->extract.tc_key_extract[i].key_info.ipv4_src_offset =
2705                        IP_ADDRESS_OFFSET_INVALID;
2706                priv->extract.tc_key_extract[i].key_info.ipv4_dst_offset =
2707                        IP_ADDRESS_OFFSET_INVALID;
2708                priv->extract.tc_key_extract[i].key_info.ipv6_src_offset =
2709                        IP_ADDRESS_OFFSET_INVALID;
2710                priv->extract.tc_key_extract[i].key_info.ipv6_dst_offset =
2711                        IP_ADDRESS_OFFSET_INVALID;
2712        }
2713
2714        ret = dpni_set_max_frame_length(dpni_dev, CMD_PRI_LOW, priv->token,
2715                                        RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN
2716                                        + VLAN_TAG_SIZE);
2717        if (ret) {
2718                DPAA2_PMD_ERR("Unable to set mtu. check config");
2719                goto init_err;
2720        }
2721
2722        /*TODO To enable soft parser support DPAA2 driver needs to integrate
2723         * with external entity to receive byte code for software sequence
2724         * and same will be offload to the H/W using MC interface.
2725         * Currently it is assumed that DPAA2 driver has byte code by some
2726         * mean and same if offloaded to H/W.
2727         */
2728        if (getenv("DPAA2_ENABLE_SOFT_PARSER")) {
2729                WRIOP_SS_INITIALIZER(priv);
2730                ret = dpaa2_eth_load_wriop_soft_parser(priv, DPNI_SS_INGRESS);
2731                if (ret < 0) {
2732                        DPAA2_PMD_ERR(" Error(%d) in loading softparser\n",
2733                                      ret);
2734                        return ret;
2735                }
2736
2737                ret = dpaa2_eth_enable_wriop_soft_parser(priv,
2738                                                         DPNI_SS_INGRESS);
2739                if (ret < 0) {
2740                        DPAA2_PMD_ERR(" Error(%d) in enabling softparser\n",
2741                                      ret);
2742                        return ret;
2743                }
2744        }
2745        RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name);
2746        return 0;
2747init_err:
2748        dpaa2_dev_close(eth_dev);
2749
2750        return ret;
2751}
2752
2753static int
2754rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
2755                struct rte_dpaa2_device *dpaa2_dev)
2756{
2757        struct rte_eth_dev *eth_dev;
2758        struct dpaa2_dev_priv *dev_priv;
2759        int diag;
2760
2761        if ((DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) >
2762                RTE_PKTMBUF_HEADROOM) {
2763                DPAA2_PMD_ERR(
2764                "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA2 Annotation req(%d)",
2765                RTE_PKTMBUF_HEADROOM,
2766                DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE);
2767
2768                return -1;
2769        }
2770
2771        if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2772                eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name);
2773                if (!eth_dev)
2774                        return -ENODEV;
2775                dev_priv = rte_zmalloc("ethdev private structure",
2776                                       sizeof(struct dpaa2_dev_priv),
2777                                       RTE_CACHE_LINE_SIZE);
2778                if (dev_priv == NULL) {
2779                        DPAA2_PMD_CRIT(
2780                                "Unable to allocate memory for private data");
2781                        rte_eth_dev_release_port(eth_dev);
2782                        return -ENOMEM;
2783                }
2784                eth_dev->data->dev_private = (void *)dev_priv;
2785                /* Store a pointer to eth_dev in dev_private */
2786                dev_priv->eth_dev = eth_dev;
2787                dev_priv->tx_conf_en = 0;
2788        } else {
2789                eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name);
2790                if (!eth_dev) {
2791                        DPAA2_PMD_DEBUG("returning enodev");
2792                        return -ENODEV;
2793                }
2794        }
2795
2796        eth_dev->device = &dpaa2_dev->device;
2797
2798        dpaa2_dev->eth_dev = eth_dev;
2799        eth_dev->data->rx_mbuf_alloc_failed = 0;
2800
2801        if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC)
2802                eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
2803
2804        eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2805
2806        /* Invoke PMD device initialization function */
2807        diag = dpaa2_dev_init(eth_dev);
2808        if (diag == 0) {
2809                rte_eth_dev_probing_finish(eth_dev);
2810                return 0;
2811        }
2812
2813        rte_eth_dev_release_port(eth_dev);
2814        return diag;
2815}
2816
2817static int
2818rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
2819{
2820        struct rte_eth_dev *eth_dev;
2821        int ret;
2822
2823        eth_dev = dpaa2_dev->eth_dev;
2824        dpaa2_dev_close(eth_dev);
2825        ret = rte_eth_dev_release_port(eth_dev);
2826
2827        return ret;
2828}
2829
2830static struct rte_dpaa2_driver rte_dpaa2_pmd = {
2831        .drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA,
2832        .drv_type = DPAA2_ETH,
2833        .probe = rte_dpaa2_probe,
2834        .remove = rte_dpaa2_remove,
2835};
2836
2837RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);
2838RTE_PMD_REGISTER_PARAM_STRING(net_dpaa2,
2839                DRIVER_LOOPBACK_MODE "=<int> "
2840                DRIVER_NO_PREFETCH_MODE "=<int>");
2841RTE_LOG_REGISTER(dpaa2_logtype_pmd, pmd.net.dpaa2, NOTICE);
2842