dpdk/drivers/net/dpaa/dpaa_rxtx.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 *
   3 *   Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
   4 *   Copyright 2017,2019 NXP
   5 *
   6 */
   7
   8/* System headers */
   9#include <inttypes.h>
  10#include <unistd.h>
  11#include <stdio.h>
  12#include <limits.h>
  13#include <sched.h>
  14#include <pthread.h>
  15
  16#include <rte_byteorder.h>
  17#include <rte_common.h>
  18#include <rte_interrupts.h>
  19#include <rte_log.h>
  20#include <rte_debug.h>
  21#include <rte_pci.h>
  22#include <rte_atomic.h>
  23#include <rte_branch_prediction.h>
  24#include <rte_memory.h>
  25#include <rte_tailq.h>
  26#include <rte_eal.h>
  27#include <rte_alarm.h>
  28#include <rte_ether.h>
  29#include <rte_ethdev_driver.h>
  30#include <rte_malloc.h>
  31#include <rte_ring.h>
  32#include <rte_ip.h>
  33#include <rte_tcp.h>
  34#include <rte_udp.h>
  35#include <rte_net.h>
  36#include <rte_eventdev.h>
  37
  38#include "dpaa_ethdev.h"
  39#include "dpaa_rxtx.h"
  40#include <rte_dpaa_bus.h>
  41#include <dpaa_mempool.h>
  42
  43#include <qman.h>
  44#include <fsl_usd.h>
  45#include <fsl_qman.h>
  46#include <fsl_bman.h>
  47#include <dpaa_of.h>
  48#include <netcfg.h>
  49
  50#define DPAA_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) \
  51        do { \
  52                (_fd)->cmd = 0; \
  53                (_fd)->opaque_addr = 0; \
  54                (_fd)->opaque = QM_FD_CONTIG << DPAA_FD_FORMAT_SHIFT; \
  55                (_fd)->opaque |= ((_mbuf)->data_off) << DPAA_FD_OFFSET_SHIFT; \
  56                (_fd)->opaque |= (_mbuf)->pkt_len; \
  57                (_fd)->addr = (_mbuf)->buf_iova; \
  58                (_fd)->bpid = _bpid; \
  59        } while (0)
  60
  61#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
  62#define DISPLAY_PRINT printf
  63static void dpaa_display_frame_info(const struct qm_fd *fd,
  64                        uint32_t fqid, bool rx)
  65{
  66        int ii;
  67        char *ptr;
  68        struct annotations_t *annot = rte_dpaa_mem_ptov(fd->addr);
  69        uint8_t format;
  70
  71        if (!fd->status) {
  72                /* Do not display correct packets.*/
  73                return;
  74        }
  75
  76        format = (fd->opaque & DPAA_FD_FORMAT_MASK) >>
  77                                DPAA_FD_FORMAT_SHIFT;
  78
  79        DISPLAY_PRINT("fqid %d bpid %d addr 0x%lx, format %d\r\n",
  80                      fqid, fd->bpid, (unsigned long)fd->addr, fd->format);
  81        DISPLAY_PRINT("off %d, len %d stat 0x%x\r\n",
  82                      fd->offset, fd->length20, fd->status);
  83        if (rx) {
  84                ptr = (char *)&annot->parse;
  85                DISPLAY_PRINT("RX parser result:\r\n");
  86                for (ii = 0; ii < (int)sizeof(struct dpaa_eth_parse_results_t);
  87                        ii++) {
  88                        DISPLAY_PRINT("%02x ", ptr[ii]);
  89                        if (((ii + 1) % 16) == 0)
  90                                DISPLAY_PRINT("\n");
  91                }
  92                DISPLAY_PRINT("\n");
  93        }
  94
  95        if (unlikely(format == qm_fd_sg)) {
  96                /*TBD:S/G display: to be implemented*/
  97                return;
  98        }
  99
 100        DISPLAY_PRINT("Frame payload:\r\n");
 101        ptr = (char *)annot;
 102        ptr += fd->offset;
 103        for (ii = 0; ii < fd->length20; ii++) {
 104                DISPLAY_PRINT("%02x ", ptr[ii]);
 105                if (((ii + 1) % 16) == 0)
 106                        printf("\n");
 107        }
 108        DISPLAY_PRINT("\n");
 109}
 110#else
 111#define dpaa_display_frame_info(a, b, c)
 112#endif
 113
 114static inline void dpaa_slow_parsing(struct rte_mbuf *m __rte_unused,
 115                                     uint64_t prs __rte_unused)
 116{
 117        DPAA_DP_LOG(DEBUG, "Slow parsing");
 118        /*TBD:XXX: to be implemented*/
 119}
 120
 121static inline void dpaa_eth_packet_info(struct rte_mbuf *m, void *fd_virt_addr)
 122{
 123        struct annotations_t *annot = GET_ANNOTATIONS(fd_virt_addr);
 124        uint64_t prs = *((uintptr_t *)(&annot->parse)) & DPAA_PARSE_MASK;
 125
 126        DPAA_DP_LOG(DEBUG, " Parsing mbuf: %p with annotations: %p", m, annot);
 127
 128        m->ol_flags = PKT_RX_RSS_HASH | PKT_RX_IP_CKSUM_GOOD |
 129                PKT_RX_L4_CKSUM_GOOD;
 130
 131        switch (prs) {
 132        case DPAA_PKT_TYPE_IPV4:
 133                m->packet_type = RTE_PTYPE_L2_ETHER |
 134                        RTE_PTYPE_L3_IPV4;
 135                break;
 136        case DPAA_PKT_TYPE_IPV6:
 137                m->packet_type = RTE_PTYPE_L2_ETHER |
 138                        RTE_PTYPE_L3_IPV6;
 139                break;
 140        case DPAA_PKT_TYPE_ETHER:
 141                m->packet_type = RTE_PTYPE_L2_ETHER;
 142                break;
 143        case DPAA_PKT_TYPE_IPV4_FRAG:
 144        case DPAA_PKT_TYPE_IPV4_FRAG_UDP:
 145        case DPAA_PKT_TYPE_IPV4_FRAG_TCP:
 146        case DPAA_PKT_TYPE_IPV4_FRAG_SCTP:
 147                m->packet_type = RTE_PTYPE_L2_ETHER |
 148                        RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG;
 149                break;
 150        case DPAA_PKT_TYPE_IPV6_FRAG:
 151        case DPAA_PKT_TYPE_IPV6_FRAG_UDP:
 152        case DPAA_PKT_TYPE_IPV6_FRAG_TCP:
 153        case DPAA_PKT_TYPE_IPV6_FRAG_SCTP:
 154                m->packet_type = RTE_PTYPE_L2_ETHER |
 155                        RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG;
 156                break;
 157        case DPAA_PKT_TYPE_IPV4_EXT:
 158                m->packet_type = RTE_PTYPE_L2_ETHER |
 159                        RTE_PTYPE_L3_IPV4_EXT;
 160                break;
 161        case DPAA_PKT_TYPE_IPV6_EXT:
 162                m->packet_type = RTE_PTYPE_L2_ETHER |
 163                        RTE_PTYPE_L3_IPV6_EXT;
 164                break;
 165        case DPAA_PKT_TYPE_IPV4_TCP:
 166                m->packet_type = RTE_PTYPE_L2_ETHER |
 167                        RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
 168                break;
 169        case DPAA_PKT_TYPE_IPV6_TCP:
 170                m->packet_type = RTE_PTYPE_L2_ETHER |
 171                        RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
 172                break;
 173        case DPAA_PKT_TYPE_IPV4_UDP:
 174                m->packet_type = RTE_PTYPE_L2_ETHER |
 175                        RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
 176                break;
 177        case DPAA_PKT_TYPE_IPV6_UDP:
 178                m->packet_type = RTE_PTYPE_L2_ETHER |
 179                        RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
 180                break;
 181        case DPAA_PKT_TYPE_IPV4_EXT_UDP:
 182                m->packet_type = RTE_PTYPE_L2_ETHER |
 183                        RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP;
 184                break;
 185        case DPAA_PKT_TYPE_IPV6_EXT_UDP:
 186                m->packet_type = RTE_PTYPE_L2_ETHER |
 187                        RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP;
 188                break;
 189        case DPAA_PKT_TYPE_IPV4_EXT_TCP:
 190                m->packet_type = RTE_PTYPE_L2_ETHER |
 191                        RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP;
 192                break;
 193        case DPAA_PKT_TYPE_IPV6_EXT_TCP:
 194                m->packet_type = RTE_PTYPE_L2_ETHER |
 195                        RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP;
 196                break;
 197        case DPAA_PKT_TYPE_IPV4_SCTP:
 198                m->packet_type = RTE_PTYPE_L2_ETHER |
 199                        RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
 200                break;
 201        case DPAA_PKT_TYPE_IPV6_SCTP:
 202                m->packet_type = RTE_PTYPE_L2_ETHER |
 203                        RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
 204                break;
 205        case DPAA_PKT_TYPE_IPV4_CSUM_ERR:
 206        case DPAA_PKT_TYPE_IPV6_CSUM_ERR:
 207                m->ol_flags = PKT_RX_RSS_HASH | PKT_RX_IP_CKSUM_BAD;
 208                break;
 209        case DPAA_PKT_TYPE_IPV4_TCP_CSUM_ERR:
 210        case DPAA_PKT_TYPE_IPV6_TCP_CSUM_ERR:
 211        case DPAA_PKT_TYPE_IPV4_UDP_CSUM_ERR:
 212        case DPAA_PKT_TYPE_IPV6_UDP_CSUM_ERR:
 213                m->ol_flags = PKT_RX_RSS_HASH | PKT_RX_L4_CKSUM_BAD;
 214                break;
 215        case DPAA_PKT_TYPE_NONE:
 216                m->packet_type = 0;
 217                break;
 218        /* More switch cases can be added */
 219        default:
 220                dpaa_slow_parsing(m, prs);
 221        }
 222
 223        m->tx_offload = annot->parse.ip_off[0];
 224        m->tx_offload |= (annot->parse.l4_off - annot->parse.ip_off[0])
 225                                        << DPAA_PKT_L3_LEN_SHIFT;
 226
 227        /* Set the hash values */
 228        m->hash.rss = (uint32_t)(annot->hash);
 229
 230        /* Check if Vlan is present */
 231        if (prs & DPAA_PARSE_VLAN_MASK)
 232                m->ol_flags |= PKT_RX_VLAN;
 233        /* Packet received without stripping the vlan */
 234}
 235
 236static inline void dpaa_checksum(struct rte_mbuf *mbuf)
 237{
 238        struct rte_ether_hdr *eth_hdr =
 239                rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *);
 240        char *l3_hdr = (char *)eth_hdr + mbuf->l2_len;
 241        struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
 242        struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
 243
 244        DPAA_DP_LOG(DEBUG, "Calculating checksum for mbuf: %p", mbuf);
 245
 246        if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) ||
 247            ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
 248            RTE_PTYPE_L3_IPV4_EXT)) {
 249                ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
 250                ipv4_hdr->hdr_checksum = 0;
 251                ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
 252        } else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
 253                   RTE_PTYPE_L3_IPV6) ||
 254                   ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
 255                   RTE_PTYPE_L3_IPV6_EXT))
 256                ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
 257
 258        if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
 259                struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)(l3_hdr +
 260                                          mbuf->l3_len);
 261                tcp_hdr->cksum = 0;
 262                if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_IPV4))
 263                        tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,
 264                                                               tcp_hdr);
 265                else /* assume ethertype == RTE_ETHER_TYPE_IPV6 */
 266                        tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,
 267                                                               tcp_hdr);
 268        } else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) ==
 269                   RTE_PTYPE_L4_UDP) {
 270                struct rte_udp_hdr *udp_hdr = (struct rte_udp_hdr *)(l3_hdr +
 271                                                             mbuf->l3_len);
 272                udp_hdr->dgram_cksum = 0;
 273                if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_IPV4))
 274                        udp_hdr->dgram_cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,
 275                                                                     udp_hdr);
 276                else /* assume ethertype == RTE_ETHER_TYPE_IPV6 */
 277                        udp_hdr->dgram_cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,
 278                                                                     udp_hdr);
 279        }
 280}
 281
 282static inline void dpaa_checksum_offload(struct rte_mbuf *mbuf,
 283                                         struct qm_fd *fd, char *prs_buf)
 284{
 285        struct dpaa_eth_parse_results_t *prs;
 286
 287        DPAA_DP_LOG(DEBUG, " Offloading checksum for mbuf: %p", mbuf);
 288
 289        prs = GET_TX_PRS(prs_buf);
 290        prs->l3r = 0;
 291        prs->l4r = 0;
 292        if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) ||
 293           ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
 294           RTE_PTYPE_L3_IPV4_EXT))
 295                prs->l3r = DPAA_L3_PARSE_RESULT_IPV4;
 296        else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
 297                   RTE_PTYPE_L3_IPV6) ||
 298                 ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
 299                RTE_PTYPE_L3_IPV6_EXT))
 300                prs->l3r = DPAA_L3_PARSE_RESULT_IPV6;
 301
 302        if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP)
 303                prs->l4r = DPAA_L4_PARSE_RESULT_TCP;
 304        else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP)
 305                prs->l4r = DPAA_L4_PARSE_RESULT_UDP;
 306
 307        prs->ip_off[0] = mbuf->l2_len;
 308        prs->l4_off = mbuf->l3_len + mbuf->l2_len;
 309        /* Enable L3 (and L4, if TCP or UDP) HW checksum*/
 310        fd->cmd = DPAA_FD_CMD_RPD | DPAA_FD_CMD_DTC;
 311}
 312
 313static inline void
 314dpaa_unsegmented_checksum(struct rte_mbuf *mbuf, struct qm_fd *fd_arr)
 315{
 316        if (!mbuf->packet_type) {
 317                struct rte_net_hdr_lens hdr_lens;
 318
 319                mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens,
 320                                RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK
 321                                | RTE_PTYPE_L4_MASK);
 322                mbuf->l2_len = hdr_lens.l2_len;
 323                mbuf->l3_len = hdr_lens.l3_len;
 324        }
 325        if (mbuf->data_off < (DEFAULT_TX_ICEOF +
 326            sizeof(struct dpaa_eth_parse_results_t))) {
 327                DPAA_DP_LOG(DEBUG, "Checksum offload Err: "
 328                        "Not enough Headroom "
 329                        "space for correct Checksum offload."
 330                        "So Calculating checksum in Software.");
 331                dpaa_checksum(mbuf);
 332        } else {
 333                dpaa_checksum_offload(mbuf, fd_arr, mbuf->buf_addr);
 334        }
 335}
 336
 337struct rte_mbuf *
 338dpaa_eth_sg_to_mbuf(const struct qm_fd *fd, uint32_t ifid)
 339{
 340        struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
 341        struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp;
 342        struct qm_sg_entry *sgt, *sg_temp;
 343        void *vaddr, *sg_vaddr;
 344        int i = 0;
 345        uint16_t fd_offset = fd->offset;
 346
 347        vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd));
 348        if (!vaddr) {
 349                DPAA_PMD_ERR("unable to convert physical address");
 350                return NULL;
 351        }
 352        sgt = vaddr + fd_offset;
 353        sg_temp = &sgt[i++];
 354        hw_sg_to_cpu(sg_temp);
 355        temp = (struct rte_mbuf *)((char *)vaddr - bp_info->meta_data_size);
 356        sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_sg_entry_get64(sg_temp));
 357
 358        first_seg = (struct rte_mbuf *)((char *)sg_vaddr -
 359                                                bp_info->meta_data_size);
 360        first_seg->data_off = sg_temp->offset;
 361        first_seg->data_len = sg_temp->length;
 362        first_seg->pkt_len = sg_temp->length;
 363        rte_mbuf_refcnt_set(first_seg, 1);
 364
 365        first_seg->port = ifid;
 366        first_seg->nb_segs = 1;
 367        first_seg->ol_flags = 0;
 368        prev_seg = first_seg;
 369        while (i < DPAA_SGT_MAX_ENTRIES) {
 370                sg_temp = &sgt[i++];
 371                hw_sg_to_cpu(sg_temp);
 372                sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
 373                                             qm_sg_entry_get64(sg_temp));
 374                cur_seg = (struct rte_mbuf *)((char *)sg_vaddr -
 375                                                      bp_info->meta_data_size);
 376                cur_seg->data_off = sg_temp->offset;
 377                cur_seg->data_len = sg_temp->length;
 378                first_seg->pkt_len += sg_temp->length;
 379                first_seg->nb_segs += 1;
 380                rte_mbuf_refcnt_set(cur_seg, 1);
 381                prev_seg->next = cur_seg;
 382                if (sg_temp->final) {
 383                        cur_seg->next = NULL;
 384                        break;
 385                }
 386                prev_seg = cur_seg;
 387        }
 388        DPAA_DP_LOG(DEBUG, "Received an SG frame len =%d, num_sg =%d",
 389                        first_seg->pkt_len, first_seg->nb_segs);
 390
 391        dpaa_eth_packet_info(first_seg, vaddr);
 392        rte_pktmbuf_free_seg(temp);
 393
 394        return first_seg;
 395}
 396
 397static inline struct rte_mbuf *
 398dpaa_eth_fd_to_mbuf(const struct qm_fd *fd, uint32_t ifid)
 399{
 400        struct rte_mbuf *mbuf;
 401        struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
 402        void *ptr;
 403        uint8_t format =
 404                (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT;
 405        uint16_t offset;
 406        uint32_t length;
 407
 408        if (unlikely(format == qm_fd_sg))
 409                return dpaa_eth_sg_to_mbuf(fd, ifid);
 410
 411        offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >> DPAA_FD_OFFSET_SHIFT;
 412        length = fd->opaque & DPAA_FD_LENGTH_MASK;
 413
 414        DPAA_DP_LOG(DEBUG, " FD--->MBUF off %d len = %d", offset, length);
 415
 416        /* Ignoring case when format != qm_fd_contig */
 417        ptr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd));
 418
 419        mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
 420        /* Prefetch the Parse results and packet data to L1 */
 421        rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
 422
 423        mbuf->data_off = offset;
 424        mbuf->data_len = length;
 425        mbuf->pkt_len = length;
 426
 427        mbuf->port = ifid;
 428        mbuf->nb_segs = 1;
 429        mbuf->ol_flags = 0;
 430        mbuf->next = NULL;
 431        rte_mbuf_refcnt_set(mbuf, 1);
 432        dpaa_eth_packet_info(mbuf, mbuf->buf_addr);
 433
 434        return mbuf;
 435}
 436
 437uint16_t
 438dpaa_free_mbuf(const struct qm_fd *fd)
 439{
 440        struct rte_mbuf *mbuf;
 441        struct dpaa_bp_info *bp_info;
 442        uint8_t format;
 443        void *ptr;
 444
 445        bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
 446        format = (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT;
 447        if (unlikely(format == qm_fd_sg)) {
 448                struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp;
 449                struct qm_sg_entry *sgt, *sg_temp;
 450                void *vaddr, *sg_vaddr;
 451                int i = 0;
 452                uint16_t fd_offset = fd->offset;
 453
 454                vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd));
 455                if (!vaddr) {
 456                        DPAA_PMD_ERR("unable to convert physical address");
 457                        return -1;
 458                }
 459                sgt = vaddr + fd_offset;
 460                sg_temp = &sgt[i++];
 461                hw_sg_to_cpu(sg_temp);
 462                temp = (struct rte_mbuf *)
 463                        ((char *)vaddr - bp_info->meta_data_size);
 464                sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
 465                                                qm_sg_entry_get64(sg_temp));
 466
 467                first_seg = (struct rte_mbuf *)((char *)sg_vaddr -
 468                                                bp_info->meta_data_size);
 469                first_seg->nb_segs = 1;
 470                prev_seg = first_seg;
 471                while (i < DPAA_SGT_MAX_ENTRIES) {
 472                        sg_temp = &sgt[i++];
 473                        hw_sg_to_cpu(sg_temp);
 474                        sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
 475                                                qm_sg_entry_get64(sg_temp));
 476                        cur_seg = (struct rte_mbuf *)((char *)sg_vaddr -
 477                                                      bp_info->meta_data_size);
 478                        first_seg->nb_segs += 1;
 479                        prev_seg->next = cur_seg;
 480                        if (sg_temp->final) {
 481                                cur_seg->next = NULL;
 482                                break;
 483                        }
 484                        prev_seg = cur_seg;
 485                }
 486
 487                rte_pktmbuf_free_seg(temp);
 488                rte_pktmbuf_free_seg(first_seg);
 489                return 0;
 490        }
 491
 492        ptr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd));
 493        mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
 494
 495        rte_pktmbuf_free(mbuf);
 496
 497        return 0;
 498}
 499
 500/* Specific for LS1043 */
 501void
 502dpaa_rx_cb_no_prefetch(struct qman_fq **fq, struct qm_dqrr_entry **dqrr,
 503           void **bufs, int num_bufs)
 504{
 505        struct rte_mbuf *mbuf;
 506        struct dpaa_bp_info *bp_info;
 507        const struct qm_fd *fd;
 508        void *ptr;
 509        struct dpaa_if *dpaa_intf;
 510        uint16_t offset, i;
 511        uint32_t length;
 512        uint8_t format;
 513
 514        bp_info = DPAA_BPID_TO_POOL_INFO(dqrr[0]->fd.bpid);
 515        ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr[0]->fd));
 516        rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
 517        bufs[0] = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
 518
 519        for (i = 0; i < num_bufs; i++) {
 520                if (i < num_bufs - 1) {
 521                        bp_info = DPAA_BPID_TO_POOL_INFO(dqrr[i + 1]->fd.bpid);
 522                        ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr[i + 1]->fd));
 523                        rte_prefetch0((void *)((uint8_t *)ptr +
 524                                        DEFAULT_RX_ICEOF));
 525                        bufs[i + 1] = (struct rte_mbuf *)((char *)ptr -
 526                                        bp_info->meta_data_size);
 527                }
 528
 529                fd = &dqrr[i]->fd;
 530                dpaa_intf = fq[0]->dpaa_intf;
 531                format = (fd->opaque & DPAA_FD_FORMAT_MASK) >>
 532                                DPAA_FD_FORMAT_SHIFT;
 533                if (unlikely(format == qm_fd_sg)) {
 534                        bufs[i] = dpaa_eth_sg_to_mbuf(fd, dpaa_intf->ifid);
 535                        continue;
 536                }
 537
 538                offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >>
 539                                DPAA_FD_OFFSET_SHIFT;
 540                length = fd->opaque & DPAA_FD_LENGTH_MASK;
 541
 542                mbuf = bufs[i];
 543                mbuf->data_off = offset;
 544                mbuf->data_len = length;
 545                mbuf->pkt_len = length;
 546                mbuf->port = dpaa_intf->ifid;
 547
 548                mbuf->nb_segs = 1;
 549                mbuf->ol_flags = 0;
 550                mbuf->next = NULL;
 551                rte_mbuf_refcnt_set(mbuf, 1);
 552                dpaa_eth_packet_info(mbuf, mbuf->buf_addr);
 553                dpaa_display_frame_info(fd, fq[0]->fqid, true);
 554        }
 555}
 556
 557void
 558dpaa_rx_cb(struct qman_fq **fq, struct qm_dqrr_entry **dqrr,
 559           void **bufs, int num_bufs)
 560{
 561        struct rte_mbuf *mbuf;
 562        const struct qm_fd *fd;
 563        struct dpaa_if *dpaa_intf;
 564        uint16_t offset, i;
 565        uint32_t length;
 566        uint8_t format;
 567
 568        for (i = 0; i < num_bufs; i++) {
 569                fd = &dqrr[i]->fd;
 570                dpaa_intf = fq[0]->dpaa_intf;
 571                format = (fd->opaque & DPAA_FD_FORMAT_MASK) >>
 572                                DPAA_FD_FORMAT_SHIFT;
 573                if (unlikely(format == qm_fd_sg)) {
 574                        bufs[i] = dpaa_eth_sg_to_mbuf(fd, dpaa_intf->ifid);
 575                        continue;
 576                }
 577
 578                offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >>
 579                                DPAA_FD_OFFSET_SHIFT;
 580                length = fd->opaque & DPAA_FD_LENGTH_MASK;
 581
 582                mbuf = bufs[i];
 583                mbuf->data_off = offset;
 584                mbuf->data_len = length;
 585                mbuf->pkt_len = length;
 586                mbuf->port = dpaa_intf->ifid;
 587
 588                mbuf->nb_segs = 1;
 589                mbuf->ol_flags = 0;
 590                mbuf->next = NULL;
 591                rte_mbuf_refcnt_set(mbuf, 1);
 592                dpaa_eth_packet_info(mbuf, mbuf->buf_addr);
 593                dpaa_display_frame_info(fd, fq[0]->fqid, true);
 594        }
 595}
 596
 597void dpaa_rx_cb_prepare(struct qm_dqrr_entry *dq, void **bufs)
 598{
 599        struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(dq->fd.bpid);
 600        void *ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dq->fd));
 601
 602        /* In case of LS1046, annotation stashing is disabled due to L2 cache
 603         * being bottleneck in case of multicore scanario for this platform.
 604         * So we prefetch the annoation beforehand, so that it is available
 605         * in cache when accessed.
 606         */
 607        rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
 608
 609        *bufs = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
 610}
 611
 612static uint16_t
 613dpaa_eth_queue_portal_rx(struct qman_fq *fq,
 614                         struct rte_mbuf **bufs,
 615                         uint16_t nb_bufs)
 616{
 617        int ret;
 618
 619        if (unlikely(!fq->qp_initialized)) {
 620                ret = rte_dpaa_portal_fq_init((void *)0, fq);
 621                if (ret) {
 622                        DPAA_PMD_ERR("Failure in affining portal %d", ret);
 623                        return 0;
 624                }
 625                fq->qp_initialized = 1;
 626        }
 627
 628        return qman_portal_poll_rx(nb_bufs, (void **)bufs, fq->qp);
 629}
 630
 631enum qman_cb_dqrr_result
 632dpaa_rx_cb_parallel(void *event,
 633                    struct qman_portal *qm __always_unused,
 634                    struct qman_fq *fq,
 635                    const struct qm_dqrr_entry *dqrr,
 636                    void **bufs)
 637{
 638        u32 ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;
 639        struct rte_mbuf *mbuf;
 640        struct rte_event *ev = (struct rte_event *)event;
 641
 642        mbuf = dpaa_eth_fd_to_mbuf(&dqrr->fd, ifid);
 643        ev->event_ptr = (void *)mbuf;
 644        ev->flow_id = fq->ev.flow_id;
 645        ev->sub_event_type = fq->ev.sub_event_type;
 646        ev->event_type = RTE_EVENT_TYPE_ETHDEV;
 647        ev->op = RTE_EVENT_OP_NEW;
 648        ev->sched_type = fq->ev.sched_type;
 649        ev->queue_id = fq->ev.queue_id;
 650        ev->priority = fq->ev.priority;
 651        ev->impl_opaque = (uint8_t)DPAA_INVALID_MBUF_SEQN;
 652        *dpaa_seqn(mbuf) = DPAA_INVALID_MBUF_SEQN;
 653        *bufs = mbuf;
 654
 655        return qman_cb_dqrr_consume;
 656}
 657
 658enum qman_cb_dqrr_result
 659dpaa_rx_cb_atomic(void *event,
 660                  struct qman_portal *qm __always_unused,
 661                  struct qman_fq *fq,
 662                  const struct qm_dqrr_entry *dqrr,
 663                  void **bufs)
 664{
 665        u8 index;
 666        u32 ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;
 667        struct rte_mbuf *mbuf;
 668        struct rte_event *ev = (struct rte_event *)event;
 669
 670        mbuf = dpaa_eth_fd_to_mbuf(&dqrr->fd, ifid);
 671        ev->event_ptr = (void *)mbuf;
 672        ev->flow_id = fq->ev.flow_id;
 673        ev->sub_event_type = fq->ev.sub_event_type;
 674        ev->event_type = RTE_EVENT_TYPE_ETHDEV;
 675        ev->op = RTE_EVENT_OP_NEW;
 676        ev->sched_type = fq->ev.sched_type;
 677        ev->queue_id = fq->ev.queue_id;
 678        ev->priority = fq->ev.priority;
 679
 680        /* Save active dqrr entries */
 681        index = DQRR_PTR2IDX(dqrr);
 682        DPAA_PER_LCORE_DQRR_SIZE++;
 683        DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
 684        DPAA_PER_LCORE_DQRR_MBUF(index) = mbuf;
 685        ev->impl_opaque = index + 1;
 686        *dpaa_seqn(mbuf) = (uint32_t)index + 1;
 687        *bufs = mbuf;
 688
 689        return qman_cb_dqrr_defer;
 690}
 691
 692#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
 693static inline void dpaa_eth_err_queue(struct dpaa_if *dpaa_intf)
 694{
 695        struct rte_mbuf *mbuf;
 696        struct qman_fq *debug_fq;
 697        int ret, i;
 698        struct qm_dqrr_entry *dq;
 699        struct qm_fd *fd;
 700
 701        if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
 702                ret = rte_dpaa_portal_init((void *)0);
 703                if (ret) {
 704                        DPAA_PMD_ERR("Failure in affining portal");
 705                        return;
 706                }
 707        }
 708        for (i = 0; i <= DPAA_DEBUG_FQ_TX_ERROR; i++) {
 709                debug_fq = &dpaa_intf->debug_queues[i];
 710                ret = qman_set_vdq(debug_fq, 4, QM_VDQCR_EXACT);
 711                if (ret)
 712                        return;
 713
 714                do {
 715                        dq = qman_dequeue(debug_fq);
 716                        if (!dq)
 717                                continue;
 718                        fd = &dq->fd;
 719                        if (i == DPAA_DEBUG_FQ_RX_ERROR)
 720                                DPAA_PMD_ERR("RX ERROR status: 0x%08x",
 721                                        fd->status);
 722                        else
 723                                DPAA_PMD_ERR("TX ERROR status: 0x%08x",
 724                                        fd->status);
 725                        dpaa_display_frame_info(fd, debug_fq->fqid,
 726                                i == DPAA_DEBUG_FQ_RX_ERROR);
 727
 728                        mbuf = dpaa_eth_fd_to_mbuf(fd, dpaa_intf->ifid);
 729                        rte_pktmbuf_free(mbuf);
 730                        qman_dqrr_consume(debug_fq, dq);
 731                } while (debug_fq->flags & QMAN_FQ_STATE_VDQCR);
 732        }
 733}
 734#endif
 735
 736uint16_t dpaa_eth_queue_rx(void *q,
 737                           struct rte_mbuf **bufs,
 738                           uint16_t nb_bufs)
 739{
 740        struct qman_fq *fq = q;
 741        struct qm_dqrr_entry *dq;
 742        uint32_t num_rx = 0, ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;
 743        int num_rx_bufs, ret;
 744        uint32_t vdqcr_flags = 0;
 745
 746        if (unlikely(rte_dpaa_bpid_info == NULL &&
 747                                rte_eal_process_type() == RTE_PROC_SECONDARY))
 748                rte_dpaa_bpid_info = fq->bp_array;
 749
 750#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
 751        if (fq->fqid == ((struct dpaa_if *)fq->dpaa_intf)->rx_queues[0].fqid)
 752                dpaa_eth_err_queue((struct dpaa_if *)fq->dpaa_intf);
 753#endif
 754
 755        if (likely(fq->is_static))
 756                return dpaa_eth_queue_portal_rx(fq, bufs, nb_bufs);
 757
 758        if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
 759                ret = rte_dpaa_portal_init((void *)0);
 760                if (ret) {
 761                        DPAA_PMD_ERR("Failure in affining portal");
 762                        return 0;
 763                }
 764        }
 765
 766        /* Until request for four buffers, we provide exact number of buffers.
 767         * Otherwise we do not set the QM_VDQCR_EXACT flag.
 768         * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
 769         * requested, so we request two less in this case.
 770         */
 771        if (nb_bufs < 4) {
 772                vdqcr_flags = QM_VDQCR_EXACT;
 773                num_rx_bufs = nb_bufs;
 774        } else {
 775                num_rx_bufs = nb_bufs > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
 776                        (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_bufs - 2);
 777        }
 778        ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
 779        if (ret)
 780                return 0;
 781
 782        do {
 783                dq = qman_dequeue(fq);
 784                if (!dq)
 785                        continue;
 786                bufs[num_rx++] = dpaa_eth_fd_to_mbuf(&dq->fd, ifid);
 787                dpaa_display_frame_info(&dq->fd, fq->fqid, true);
 788                qman_dqrr_consume(fq, dq);
 789        } while (fq->flags & QMAN_FQ_STATE_VDQCR);
 790
 791        return num_rx;
 792}
 793
 794int
 795dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
 796                struct qm_fd *fd,
 797                uint32_t bpid)
 798{
 799        struct rte_mbuf *cur_seg = mbuf, *prev_seg = NULL;
 800        struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(bpid);
 801        struct rte_mbuf *temp, *mi;
 802        struct qm_sg_entry *sg_temp, *sgt;
 803        int i = 0;
 804
 805        DPAA_DP_LOG(DEBUG, "Creating SG FD to transmit");
 806
 807        temp = rte_pktmbuf_alloc(bp_info->mp);
 808        if (!temp) {
 809                DPAA_PMD_ERR("Failure in allocation of mbuf");
 810                return -1;
 811        }
 812        if (temp->buf_len < ((mbuf->nb_segs * sizeof(struct qm_sg_entry))
 813                                + temp->data_off)) {
 814                DPAA_PMD_ERR("Insufficient space in mbuf for SG entries");
 815                return -1;
 816        }
 817
 818        fd->cmd = 0;
 819        fd->opaque_addr = 0;
 820
 821        if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) {
 822                if (!mbuf->packet_type) {
 823                        struct rte_net_hdr_lens hdr_lens;
 824
 825                        mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens,
 826                                        RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK
 827                                        | RTE_PTYPE_L4_MASK);
 828                        mbuf->l2_len = hdr_lens.l2_len;
 829                        mbuf->l3_len = hdr_lens.l3_len;
 830                }
 831                if (temp->data_off < DEFAULT_TX_ICEOF
 832                        + sizeof(struct dpaa_eth_parse_results_t))
 833                        temp->data_off = DEFAULT_TX_ICEOF
 834                                + sizeof(struct dpaa_eth_parse_results_t);
 835                dcbz_64(temp->buf_addr);
 836                dpaa_checksum_offload(mbuf, fd, temp->buf_addr);
 837        }
 838
 839        sgt = temp->buf_addr + temp->data_off;
 840        fd->format = QM_FD_SG;
 841        fd->addr = temp->buf_iova;
 842        fd->offset = temp->data_off;
 843        fd->bpid = bpid;
 844        fd->length20 = mbuf->pkt_len;
 845
 846        while (i < DPAA_SGT_MAX_ENTRIES) {
 847                sg_temp = &sgt[i++];
 848                sg_temp->opaque = 0;
 849                sg_temp->val = 0;
 850                sg_temp->addr = cur_seg->buf_iova;
 851                sg_temp->offset = cur_seg->data_off;
 852                sg_temp->length = cur_seg->data_len;
 853                if (RTE_MBUF_DIRECT(cur_seg)) {
 854                        if (rte_mbuf_refcnt_read(cur_seg) > 1) {
 855                                /*If refcnt > 1, invalid bpid is set to ensure
 856                                 * buffer is not freed by HW.
 857                                 */
 858                                sg_temp->bpid = 0xff;
 859                                rte_mbuf_refcnt_update(cur_seg, -1);
 860                        } else {
 861                                sg_temp->bpid =
 862                                        DPAA_MEMPOOL_TO_BPID(cur_seg->pool);
 863                        }
 864                        cur_seg = cur_seg->next;
 865                } else {
 866                        /* Get owner MBUF from indirect buffer */
 867                        mi = rte_mbuf_from_indirect(cur_seg);
 868                        if (rte_mbuf_refcnt_read(mi) > 1) {
 869                                /*If refcnt > 1, invalid bpid is set to ensure
 870                                 * owner buffer is not freed by HW.
 871                                 */
 872                                sg_temp->bpid = 0xff;
 873                        } else {
 874                                sg_temp->bpid = DPAA_MEMPOOL_TO_BPID(mi->pool);
 875                                rte_mbuf_refcnt_update(mi, 1);
 876                        }
 877                        prev_seg = cur_seg;
 878                        cur_seg = cur_seg->next;
 879                        prev_seg->next = NULL;
 880                        rte_pktmbuf_free(prev_seg);
 881                }
 882                if (cur_seg == NULL) {
 883                        sg_temp->final = 1;
 884                        cpu_to_hw_sg(sg_temp);
 885                        break;
 886                }
 887                cpu_to_hw_sg(sg_temp);
 888        }
 889        return 0;
 890}
 891
 892/* Handle mbufs which are not segmented (non SG) */
 893static inline void
 894tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf,
 895                            struct dpaa_bp_info *bp_info,
 896                            struct qm_fd *fd_arr)
 897{
 898        struct rte_mbuf *mi = NULL;
 899
 900        if (RTE_MBUF_DIRECT(mbuf)) {
 901                if (rte_mbuf_refcnt_read(mbuf) > 1) {
 902                        /* In case of direct mbuf and mbuf being cloned,
 903                         * BMAN should _not_ release buffer.
 904                         */
 905                        DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff);
 906                        /* Buffer should be releasd by EAL */
 907                        rte_mbuf_refcnt_update(mbuf, -1);
 908                } else {
 909                        /* In case of direct mbuf and no cloning, mbuf can be
 910                         * released by BMAN.
 911                         */
 912                        DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid);
 913                }
 914        } else {
 915                /* This is data-containing core mbuf: 'mi' */
 916                mi = rte_mbuf_from_indirect(mbuf);
 917                if (rte_mbuf_refcnt_read(mi) > 1) {
 918                        /* In case of indirect mbuf, and mbuf being cloned,
 919                         * BMAN should _not_ release it and let EAL release
 920                         * it through pktmbuf_free below.
 921                         */
 922                        DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff);
 923                } else {
 924                        /* In case of indirect mbuf, and no cloning, core mbuf
 925                         * should be released by BMAN.
 926                         * Increate refcnt of core mbuf so that when
 927                         * pktmbuf_free is called and mbuf is released, EAL
 928                         * doesn't try to release core mbuf which would have
 929                         * been released by BMAN.
 930                         */
 931                        rte_mbuf_refcnt_update(mi, 1);
 932                        DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid);
 933                }
 934                rte_pktmbuf_free(mbuf);
 935        }
 936
 937        if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK)
 938                dpaa_unsegmented_checksum(mbuf, fd_arr);
 939}
 940
 941/* Handle all mbufs on dpaa BMAN managed pool */
 942static inline uint16_t
 943tx_on_dpaa_pool(struct rte_mbuf *mbuf,
 944                struct dpaa_bp_info *bp_info,
 945                struct qm_fd *fd_arr)
 946{
 947        DPAA_DP_LOG(DEBUG, "BMAN offloaded buffer, mbuf: %p", mbuf);
 948
 949        if (mbuf->nb_segs == 1) {
 950                /* Case for non-segmented buffers */
 951                tx_on_dpaa_pool_unsegmented(mbuf, bp_info, fd_arr);
 952        } else if (mbuf->nb_segs > 1 &&
 953                   mbuf->nb_segs <= DPAA_SGT_MAX_ENTRIES) {
 954                if (dpaa_eth_mbuf_to_sg_fd(mbuf, fd_arr, bp_info->bpid)) {
 955                        DPAA_PMD_DEBUG("Unable to create Scatter Gather FD");
 956                        return 1;
 957                }
 958        } else {
 959                DPAA_PMD_DEBUG("Number of Segments not supported");
 960                return 1;
 961        }
 962
 963        return 0;
 964}
 965
 966/* Handle all mbufs on an external pool (non-dpaa) */
 967static inline struct rte_mbuf *
 968reallocate_mbuf(struct qman_fq *txq, struct rte_mbuf *mbuf)
 969{
 970        struct dpaa_if *dpaa_intf = txq->dpaa_intf;
 971        struct dpaa_bp_info *bp_info = dpaa_intf->bp_info;
 972        struct rte_mbuf *new_mbufs[DPAA_SGT_MAX_ENTRIES + 1] = {0};
 973        struct rte_mbuf *temp_mbuf;
 974        int num_new_segs, mbuf_greater, ret, extra_seg = 0, i = 0;
 975        uint64_t mbufs_size, bytes_to_copy, offset1 = 0, offset2 = 0;
 976        char *data;
 977
 978        DPAA_DP_LOG(DEBUG, "Reallocating transmit buffer");
 979
 980        mbufs_size = bp_info->size -
 981                bp_info->meta_data_size - RTE_PKTMBUF_HEADROOM;
 982        extra_seg = !!(mbuf->pkt_len % mbufs_size);
 983        num_new_segs = (mbuf->pkt_len / mbufs_size) + extra_seg;
 984
 985        ret = rte_pktmbuf_alloc_bulk(bp_info->mp, new_mbufs, num_new_segs);
 986        if (ret != 0) {
 987                DPAA_DP_LOG(DEBUG, "Allocation for new buffers failed");
 988                return NULL;
 989        }
 990
 991        temp_mbuf = mbuf;
 992
 993        while (temp_mbuf) {
 994                /* If mbuf data is less than new mbuf remaining memory */
 995                if ((temp_mbuf->data_len - offset1) < (mbufs_size - offset2)) {
 996                        bytes_to_copy = temp_mbuf->data_len - offset1;
 997                        mbuf_greater = -1;
 998                /* If mbuf data is greater than new mbuf remaining memory */
 999                } else if ((temp_mbuf->data_len - offset1) >
1000                           (mbufs_size - offset2)) {
1001                        bytes_to_copy = mbufs_size - offset2;
1002                        mbuf_greater = 1;
1003                /* if mbuf data is equal to new mbuf remaining memory */
1004                } else {
1005                        bytes_to_copy = temp_mbuf->data_len - offset1;
1006                        mbuf_greater = 0;
1007                }
1008
1009                /* Copy the data */
1010                data = rte_pktmbuf_append(new_mbufs[0], bytes_to_copy);
1011
1012                rte_memcpy((uint8_t *)data, rte_pktmbuf_mtod_offset(mbuf,
1013                           void *, offset1), bytes_to_copy);
1014
1015                /* Set new offsets and the temp buffers */
1016                if (mbuf_greater == -1) {
1017                        offset1 = 0;
1018                        offset2 += bytes_to_copy;
1019                        temp_mbuf = temp_mbuf->next;
1020                } else if (mbuf_greater == 1) {
1021                        offset2 = 0;
1022                        offset1 += bytes_to_copy;
1023                        new_mbufs[i]->next = new_mbufs[i + 1];
1024                        new_mbufs[0]->nb_segs++;
1025                        i++;
1026                } else {
1027                        offset1 = 0;
1028                        offset2 = 0;
1029                        temp_mbuf = temp_mbuf->next;
1030                        new_mbufs[i]->next = new_mbufs[i + 1];
1031                        if (new_mbufs[i + 1])
1032                                new_mbufs[0]->nb_segs++;
1033                        i++;
1034                }
1035        }
1036
1037        /* Copy other required fields */
1038        new_mbufs[0]->ol_flags = mbuf->ol_flags;
1039        new_mbufs[0]->packet_type = mbuf->packet_type;
1040        new_mbufs[0]->tx_offload = mbuf->tx_offload;
1041
1042        rte_pktmbuf_free(mbuf);
1043
1044        return new_mbufs[0];
1045}
1046
1047uint16_t
1048dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
1049{
1050        struct rte_mbuf *mbuf, *mi = NULL;
1051        struct rte_mempool *mp;
1052        struct dpaa_bp_info *bp_info;
1053        struct qm_fd fd_arr[DPAA_TX_BURST_SIZE];
1054        uint32_t frames_to_send, loop, sent = 0;
1055        uint16_t state;
1056        int ret, realloc_mbuf = 0;
1057        uint32_t seqn, index, flags[DPAA_TX_BURST_SIZE] = {0};
1058
1059        if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1060                ret = rte_dpaa_portal_init((void *)0);
1061                if (ret) {
1062                        DPAA_PMD_ERR("Failure in affining portal");
1063                        return 0;
1064                }
1065        }
1066
1067        DPAA_DP_LOG(DEBUG, "Transmitting %d buffers on queue: %p", nb_bufs, q);
1068
1069        while (nb_bufs) {
1070                frames_to_send = (nb_bufs > DPAA_TX_BURST_SIZE) ?
1071                                DPAA_TX_BURST_SIZE : nb_bufs;
1072                for (loop = 0; loop < frames_to_send; loop++) {
1073                        mbuf = *(bufs++);
1074                        /* In case the data offset is not multiple of 16,
1075                         * FMAN can stall because of an errata. So reallocate
1076                         * the buffer in such case.
1077                         */
1078                        if (dpaa_svr_family == SVR_LS1043A_FAMILY &&
1079                                        (mbuf->data_off & 0x7F) != 0x0)
1080                                realloc_mbuf = 1;
1081                        seqn = *dpaa_seqn(mbuf);
1082                        if (seqn != DPAA_INVALID_MBUF_SEQN) {
1083                                index = seqn - 1;
1084                                if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1085                                        flags[loop] =
1086                                           ((index & QM_EQCR_DCA_IDXMASK) << 8);
1087                                        flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1088                                        DPAA_PER_LCORE_DQRR_SIZE--;
1089                                        DPAA_PER_LCORE_DQRR_HELD &=
1090                                                                ~(1 << index);
1091                                }
1092                        }
1093
1094                        if (likely(RTE_MBUF_DIRECT(mbuf))) {
1095                                mp = mbuf->pool;
1096                                bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
1097                                if (likely(mp->ops_index ==
1098                                                bp_info->dpaa_ops_index &&
1099                                        mbuf->nb_segs == 1 &&
1100                                        realloc_mbuf == 0 &&
1101                                        rte_mbuf_refcnt_read(mbuf) == 1)) {
1102                                        DPAA_MBUF_TO_CONTIG_FD(mbuf,
1103                                                &fd_arr[loop], bp_info->bpid);
1104                                        if (mbuf->ol_flags &
1105                                                DPAA_TX_CKSUM_OFFLOAD_MASK)
1106                                                dpaa_unsegmented_checksum(mbuf,
1107                                                        &fd_arr[loop]);
1108                                        continue;
1109                                }
1110                        } else {
1111                                mi = rte_mbuf_from_indirect(mbuf);
1112                                mp = mi->pool;
1113                        }
1114
1115                        bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
1116                        if (unlikely(mp->ops_index != bp_info->dpaa_ops_index ||
1117                                     realloc_mbuf == 1)) {
1118                                struct rte_mbuf *temp_mbuf;
1119
1120                                temp_mbuf = reallocate_mbuf(q, mbuf);
1121                                if (!temp_mbuf) {
1122                                        /* Set frames_to_send & nb_bufs so
1123                                         * that packets are transmitted till
1124                                         * previous frame.
1125                                         */
1126                                        frames_to_send = loop;
1127                                        nb_bufs = loop;
1128                                        goto send_pkts;
1129                                }
1130                                mbuf = temp_mbuf;
1131                                realloc_mbuf = 0;
1132                        }
1133
1134                        state = tx_on_dpaa_pool(mbuf, bp_info,
1135                                                &fd_arr[loop]);
1136                        if (unlikely(state)) {
1137                                /* Set frames_to_send & nb_bufs so
1138                                 * that packets are transmitted till
1139                                 * previous frame.
1140                                 */
1141                                frames_to_send = loop;
1142                                nb_bufs = loop;
1143                                goto send_pkts;
1144                        }
1145                }
1146
1147send_pkts:
1148                loop = 0;
1149                while (loop < frames_to_send) {
1150                        loop += qman_enqueue_multi(q, &fd_arr[loop],
1151                                                   &flags[loop],
1152                                                   frames_to_send - loop);
1153                }
1154                nb_bufs -= frames_to_send;
1155                sent += frames_to_send;
1156        }
1157
1158        DPAA_DP_LOG(DEBUG, "Transmitted %d buffers on queue: %p", sent, q);
1159
1160        return sent;
1161}
1162
1163uint16_t
1164dpaa_eth_queue_tx_slow(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
1165{
1166        qman_ern_poll_free();
1167
1168        return dpaa_eth_queue_tx(q, bufs, nb_bufs);
1169}
1170
1171uint16_t dpaa_eth_tx_drop_all(void *q  __rte_unused,
1172                              struct rte_mbuf **bufs __rte_unused,
1173                uint16_t nb_bufs __rte_unused)
1174{
1175        DPAA_DP_LOG(DEBUG, "Drop all packets");
1176
1177        /* Drop all incoming packets. No need to free packets here
1178         * because the rte_eth f/w frees up the packets through tx_buffer
1179         * callback in case this functions returns count less than nb_bufs
1180         */
1181        return 0;
1182}
1183