linux/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
   2/* Copyright 2014-2016 Freescale Semiconductor Inc.
   3 * Copyright 2016-2020 NXP
   4 */
   5#include <linux/init.h>
   6#include <linux/module.h>
   7#include <linux/platform_device.h>
   8#include <linux/etherdevice.h>
   9#include <linux/of_net.h>
  10#include <linux/interrupt.h>
  11#include <linux/msi.h>
  12#include <linux/kthread.h>
  13#include <linux/iommu.h>
  14#include <linux/fsl/mc.h>
  15#include <linux/bpf.h>
  16#include <linux/bpf_trace.h>
  17#include <linux/fsl/ptp_qoriq.h>
  18#include <linux/ptp_classify.h>
  19#include <net/pkt_cls.h>
  20#include <net/sock.h>
  21
  22#include "dpaa2-eth.h"
  23
  24/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
  25 * using trace events only need to #include <trace/events/sched.h>
  26 */
  27#define CREATE_TRACE_POINTS
  28#include "dpaa2-eth-trace.h"
  29
  30MODULE_LICENSE("Dual BSD/GPL");
  31MODULE_AUTHOR("Freescale Semiconductor, Inc");
  32MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
  33
  34struct ptp_qoriq *dpaa2_ptp;
  35EXPORT_SYMBOL(dpaa2_ptp);
  36
  37static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
  38                                dma_addr_t iova_addr)
  39{
  40        phys_addr_t phys_addr;
  41
  42        phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
  43
  44        return phys_to_virt(phys_addr);
  45}
  46
  47static void dpaa2_eth_validate_rx_csum(struct dpaa2_eth_priv *priv,
  48                                       u32 fd_status,
  49                                       struct sk_buff *skb)
  50{
  51        skb_checksum_none_assert(skb);
  52
  53        /* HW checksum validation is disabled, nothing to do here */
  54        if (!(priv->net_dev->features & NETIF_F_RXCSUM))
  55                return;
  56
  57        /* Read checksum validation bits */
  58        if (!((fd_status & DPAA2_FAS_L3CV) &&
  59              (fd_status & DPAA2_FAS_L4CV)))
  60                return;
  61
  62        /* Inform the stack there's no need to compute L3/L4 csum anymore */
  63        skb->ip_summed = CHECKSUM_UNNECESSARY;
  64}
  65
  66/* Free a received FD.
  67 * Not to be used for Tx conf FDs or on any other paths.
  68 */
  69static void dpaa2_eth_free_rx_fd(struct dpaa2_eth_priv *priv,
  70                                 const struct dpaa2_fd *fd,
  71                                 void *vaddr)
  72{
  73        struct device *dev = priv->net_dev->dev.parent;
  74        dma_addr_t addr = dpaa2_fd_get_addr(fd);
  75        u8 fd_format = dpaa2_fd_get_format(fd);
  76        struct dpaa2_sg_entry *sgt;
  77        void *sg_vaddr;
  78        int i;
  79
  80        /* If single buffer frame, just free the data buffer */
  81        if (fd_format == dpaa2_fd_single)
  82                goto free_buf;
  83        else if (fd_format != dpaa2_fd_sg)
  84                /* We don't support any other format */
  85                return;
  86
  87        /* For S/G frames, we first need to free all SG entries
  88         * except the first one, which was taken care of already
  89         */
  90        sgt = vaddr + dpaa2_fd_get_offset(fd);
  91        for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
  92                addr = dpaa2_sg_get_addr(&sgt[i]);
  93                sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
  94                dma_unmap_page(dev, addr, priv->rx_buf_size,
  95                               DMA_BIDIRECTIONAL);
  96
  97                free_pages((unsigned long)sg_vaddr, 0);
  98                if (dpaa2_sg_is_final(&sgt[i]))
  99                        break;
 100        }
 101
 102free_buf:
 103        free_pages((unsigned long)vaddr, 0);
 104}
 105
 106/* Build a linear skb based on a single-buffer frame descriptor */
 107static struct sk_buff *dpaa2_eth_build_linear_skb(struct dpaa2_eth_channel *ch,
 108                                                  const struct dpaa2_fd *fd,
 109                                                  void *fd_vaddr)
 110{
 111        struct sk_buff *skb = NULL;
 112        u16 fd_offset = dpaa2_fd_get_offset(fd);
 113        u32 fd_length = dpaa2_fd_get_len(fd);
 114
 115        ch->buf_count--;
 116
 117        skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
 118        if (unlikely(!skb))
 119                return NULL;
 120
 121        skb_reserve(skb, fd_offset);
 122        skb_put(skb, fd_length);
 123
 124        return skb;
 125}
 126
 127/* Build a non linear (fragmented) skb based on a S/G table */
 128static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv,
 129                                                struct dpaa2_eth_channel *ch,
 130                                                struct dpaa2_sg_entry *sgt)
 131{
 132        struct sk_buff *skb = NULL;
 133        struct device *dev = priv->net_dev->dev.parent;
 134        void *sg_vaddr;
 135        dma_addr_t sg_addr;
 136        u16 sg_offset;
 137        u32 sg_length;
 138        struct page *page, *head_page;
 139        int page_offset;
 140        int i;
 141
 142        for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
 143                struct dpaa2_sg_entry *sge = &sgt[i];
 144
 145                /* NOTE: We only support SG entries in dpaa2_sg_single format,
 146                 * but this is the only format we may receive from HW anyway
 147                 */
 148
 149                /* Get the address and length from the S/G entry */
 150                sg_addr = dpaa2_sg_get_addr(sge);
 151                sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
 152                dma_unmap_page(dev, sg_addr, priv->rx_buf_size,
 153                               DMA_BIDIRECTIONAL);
 154
 155                sg_length = dpaa2_sg_get_len(sge);
 156
 157                if (i == 0) {
 158                        /* We build the skb around the first data buffer */
 159                        skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
 160                        if (unlikely(!skb)) {
 161                                /* Free the first SG entry now, since we already
 162                                 * unmapped it and obtained the virtual address
 163                                 */
 164                                free_pages((unsigned long)sg_vaddr, 0);
 165
 166                                /* We still need to subtract the buffers used
 167                                 * by this FD from our software counter
 168                                 */
 169                                while (!dpaa2_sg_is_final(&sgt[i]) &&
 170                                       i < DPAA2_ETH_MAX_SG_ENTRIES)
 171                                        i++;
 172                                break;
 173                        }
 174
 175                        sg_offset = dpaa2_sg_get_offset(sge);
 176                        skb_reserve(skb, sg_offset);
 177                        skb_put(skb, sg_length);
 178                } else {
 179                        /* Rest of the data buffers are stored as skb frags */
 180                        page = virt_to_page(sg_vaddr);
 181                        head_page = virt_to_head_page(sg_vaddr);
 182
 183                        /* Offset in page (which may be compound).
 184                         * Data in subsequent SG entries is stored from the
 185                         * beginning of the buffer, so we don't need to add the
 186                         * sg_offset.
 187                         */
 188                        page_offset = ((unsigned long)sg_vaddr &
 189                                (PAGE_SIZE - 1)) +
 190                                (page_address(page) - page_address(head_page));
 191
 192                        skb_add_rx_frag(skb, i - 1, head_page, page_offset,
 193                                        sg_length, priv->rx_buf_size);
 194                }
 195
 196                if (dpaa2_sg_is_final(sge))
 197                        break;
 198        }
 199
 200        WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
 201
 202        /* Count all data buffers + SG table buffer */
 203        ch->buf_count -= i + 2;
 204
 205        return skb;
 206}
 207
 208/* Free buffers acquired from the buffer pool or which were meant to
 209 * be released in the pool
 210 */
 211static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array,
 212                                int count)
 213{
 214        struct device *dev = priv->net_dev->dev.parent;
 215        void *vaddr;
 216        int i;
 217
 218        for (i = 0; i < count; i++) {
 219                vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
 220                dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
 221                               DMA_BIDIRECTIONAL);
 222                free_pages((unsigned long)vaddr, 0);
 223        }
 224}
 225
 226static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
 227                                  struct dpaa2_eth_channel *ch,
 228                                  dma_addr_t addr)
 229{
 230        int retries = 0;
 231        int err;
 232
 233        ch->recycled_bufs[ch->recycled_bufs_cnt++] = addr;
 234        if (ch->recycled_bufs_cnt < DPAA2_ETH_BUFS_PER_CMD)
 235                return;
 236
 237        while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
 238                                               ch->recycled_bufs,
 239                                               ch->recycled_bufs_cnt)) == -EBUSY) {
 240                if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
 241                        break;
 242                cpu_relax();
 243        }
 244
 245        if (err) {
 246                dpaa2_eth_free_bufs(priv, ch->recycled_bufs, ch->recycled_bufs_cnt);
 247                ch->buf_count -= ch->recycled_bufs_cnt;
 248        }
 249
 250        ch->recycled_bufs_cnt = 0;
 251}
 252
 253static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv,
 254                               struct dpaa2_eth_fq *fq,
 255                               struct dpaa2_eth_xdp_fds *xdp_fds)
 256{
 257        int total_enqueued = 0, retries = 0, enqueued;
 258        struct dpaa2_eth_drv_stats *percpu_extras;
 259        int num_fds, err, max_retries;
 260        struct dpaa2_fd *fds;
 261
 262        percpu_extras = this_cpu_ptr(priv->percpu_extras);
 263
 264        /* try to enqueue all the FDs until the max number of retries is hit */
 265        fds = xdp_fds->fds;
 266        num_fds = xdp_fds->num;
 267        max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
 268        while (total_enqueued < num_fds && retries < max_retries) {
 269                err = priv->enqueue(priv, fq, &fds[total_enqueued],
 270                                    0, num_fds - total_enqueued, &enqueued);
 271                if (err == -EBUSY) {
 272                        percpu_extras->tx_portal_busy += ++retries;
 273                        continue;
 274                }
 275                total_enqueued += enqueued;
 276        }
 277        xdp_fds->num = 0;
 278
 279        return total_enqueued;
 280}
 281
 282static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv,
 283                                   struct dpaa2_eth_channel *ch,
 284                                   struct dpaa2_eth_fq *fq)
 285{
 286        struct rtnl_link_stats64 *percpu_stats;
 287        struct dpaa2_fd *fds;
 288        int enqueued, i;
 289
 290        percpu_stats = this_cpu_ptr(priv->percpu_stats);
 291
 292        // enqueue the array of XDP_TX frames
 293        enqueued = dpaa2_eth_xdp_flush(priv, fq, &fq->xdp_tx_fds);
 294
 295        /* update statistics */
 296        percpu_stats->tx_packets += enqueued;
 297        fds = fq->xdp_tx_fds.fds;
 298        for (i = 0; i < enqueued; i++) {
 299                percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
 300                ch->stats.xdp_tx++;
 301        }
 302        for (i = enqueued; i < fq->xdp_tx_fds.num; i++) {
 303                dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(&fds[i]));
 304                percpu_stats->tx_errors++;
 305                ch->stats.xdp_tx_err++;
 306        }
 307        fq->xdp_tx_fds.num = 0;
 308}
 309
 310static void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
 311                                  struct dpaa2_eth_channel *ch,
 312                                  struct dpaa2_fd *fd,
 313                                  void *buf_start, u16 queue_id)
 314{
 315        struct dpaa2_faead *faead;
 316        struct dpaa2_fd *dest_fd;
 317        struct dpaa2_eth_fq *fq;
 318        u32 ctrl, frc;
 319
 320        /* Mark the egress frame hardware annotation area as valid */
 321        frc = dpaa2_fd_get_frc(fd);
 322        dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
 323        dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
 324
 325        /* Instruct hardware to release the FD buffer directly into
 326         * the buffer pool once transmission is completed, instead of
 327         * sending a Tx confirmation frame to us
 328         */
 329        ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
 330        faead = dpaa2_get_faead(buf_start, false);
 331        faead->ctrl = cpu_to_le32(ctrl);
 332        faead->conf_fqid = 0;
 333
 334        fq = &priv->fq[queue_id];
 335        dest_fd = &fq->xdp_tx_fds.fds[fq->xdp_tx_fds.num++];
 336        memcpy(dest_fd, fd, sizeof(*dest_fd));
 337
 338        if (fq->xdp_tx_fds.num < DEV_MAP_BULK_SIZE)
 339                return;
 340
 341        dpaa2_eth_xdp_tx_flush(priv, ch, fq);
 342}
 343
 344static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
 345                             struct dpaa2_eth_channel *ch,
 346                             struct dpaa2_eth_fq *rx_fq,
 347                             struct dpaa2_fd *fd, void *vaddr)
 348{
 349        dma_addr_t addr = dpaa2_fd_get_addr(fd);
 350        struct bpf_prog *xdp_prog;
 351        struct xdp_buff xdp;
 352        u32 xdp_act = XDP_PASS;
 353        int err, offset;
 354
 355        xdp_prog = READ_ONCE(ch->xdp.prog);
 356        if (!xdp_prog)
 357                goto out;
 358
 359        offset = dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM;
 360        xdp_init_buff(&xdp, DPAA2_ETH_RX_BUF_RAW_SIZE - offset, &ch->xdp_rxq);
 361        xdp_prepare_buff(&xdp, vaddr + offset, XDP_PACKET_HEADROOM,
 362                         dpaa2_fd_get_len(fd), false);
 363
 364        xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
 365
 366        /* xdp.data pointer may have changed */
 367        dpaa2_fd_set_offset(fd, xdp.data - vaddr);
 368        dpaa2_fd_set_len(fd, xdp.data_end - xdp.data);
 369
 370        switch (xdp_act) {
 371        case XDP_PASS:
 372                break;
 373        case XDP_TX:
 374                dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
 375                break;
 376        default:
 377                bpf_warn_invalid_xdp_action(xdp_act);
 378                fallthrough;
 379        case XDP_ABORTED:
 380                trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
 381                fallthrough;
 382        case XDP_DROP:
 383                dpaa2_eth_recycle_buf(priv, ch, addr);
 384                ch->stats.xdp_drop++;
 385                break;
 386        case XDP_REDIRECT:
 387                dma_unmap_page(priv->net_dev->dev.parent, addr,
 388                               priv->rx_buf_size, DMA_BIDIRECTIONAL);
 389                ch->buf_count--;
 390
 391                /* Allow redirect use of full headroom */
 392                xdp.data_hard_start = vaddr;
 393                xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE;
 394
 395                err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
 396                if (unlikely(err)) {
 397                        addr = dma_map_page(priv->net_dev->dev.parent,
 398                                            virt_to_page(vaddr), 0,
 399                                            priv->rx_buf_size, DMA_BIDIRECTIONAL);
 400                        if (unlikely(dma_mapping_error(priv->net_dev->dev.parent, addr))) {
 401                                free_pages((unsigned long)vaddr, 0);
 402                        } else {
 403                                ch->buf_count++;
 404                                dpaa2_eth_recycle_buf(priv, ch, addr);
 405                        }
 406                        ch->stats.xdp_drop++;
 407                } else {
 408                        ch->stats.xdp_redirect++;
 409                }
 410                break;
 411        }
 412
 413        ch->xdp.res |= xdp_act;
 414out:
 415        return xdp_act;
 416}
 417
 418static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch,
 419                                           const struct dpaa2_fd *fd,
 420                                           void *fd_vaddr)
 421{
 422        u16 fd_offset = dpaa2_fd_get_offset(fd);
 423        struct dpaa2_eth_priv *priv = ch->priv;
 424        u32 fd_length = dpaa2_fd_get_len(fd);
 425        struct sk_buff *skb = NULL;
 426        unsigned int skb_len;
 427
 428        if (fd_length > priv->rx_copybreak)
 429                return NULL;
 430
 431        skb_len = fd_length + dpaa2_eth_needed_headroom(NULL);
 432
 433        skb = napi_alloc_skb(&ch->napi, skb_len);
 434        if (!skb)
 435                return NULL;
 436
 437        skb_reserve(skb, dpaa2_eth_needed_headroom(NULL));
 438        skb_put(skb, fd_length);
 439
 440        memcpy(skb->data, fd_vaddr + fd_offset, fd_length);
 441
 442        dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
 443
 444        return skb;
 445}
 446
 447/* Main Rx frame processing routine */
 448static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
 449                         struct dpaa2_eth_channel *ch,
 450                         const struct dpaa2_fd *fd,
 451                         struct dpaa2_eth_fq *fq)
 452{
 453        dma_addr_t addr = dpaa2_fd_get_addr(fd);
 454        u8 fd_format = dpaa2_fd_get_format(fd);
 455        void *vaddr;
 456        struct sk_buff *skb;
 457        struct rtnl_link_stats64 *percpu_stats;
 458        struct dpaa2_eth_drv_stats *percpu_extras;
 459        struct device *dev = priv->net_dev->dev.parent;
 460        struct dpaa2_fas *fas;
 461        void *buf_data;
 462        u32 status = 0;
 463        u32 xdp_act;
 464
 465        /* Tracing point */
 466        trace_dpaa2_rx_fd(priv->net_dev, fd);
 467
 468        vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
 469        dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
 470                                DMA_BIDIRECTIONAL);
 471
 472        fas = dpaa2_get_fas(vaddr, false);
 473        prefetch(fas);
 474        buf_data = vaddr + dpaa2_fd_get_offset(fd);
 475        prefetch(buf_data);
 476
 477        percpu_stats = this_cpu_ptr(priv->percpu_stats);
 478        percpu_extras = this_cpu_ptr(priv->percpu_extras);
 479
 480        if (fd_format == dpaa2_fd_single) {
 481                xdp_act = dpaa2_eth_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
 482                if (xdp_act != XDP_PASS) {
 483                        percpu_stats->rx_packets++;
 484                        percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
 485                        return;
 486                }
 487
 488                skb = dpaa2_eth_copybreak(ch, fd, vaddr);
 489                if (!skb) {
 490                        dma_unmap_page(dev, addr, priv->rx_buf_size,
 491                                       DMA_BIDIRECTIONAL);
 492                        skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
 493                }
 494        } else if (fd_format == dpaa2_fd_sg) {
 495                WARN_ON(priv->xdp_prog);
 496
 497                dma_unmap_page(dev, addr, priv->rx_buf_size,
 498                               DMA_BIDIRECTIONAL);
 499                skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
 500                free_pages((unsigned long)vaddr, 0);
 501                percpu_extras->rx_sg_frames++;
 502                percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
 503        } else {
 504                /* We don't support any other format */
 505                goto err_frame_format;
 506        }
 507
 508        if (unlikely(!skb))
 509                goto err_build_skb;
 510
 511        prefetch(skb->data);
 512
 513        /* Get the timestamp value */
 514        if (priv->rx_tstamp) {
 515                struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
 516                __le64 *ts = dpaa2_get_ts(vaddr, false);
 517                u64 ns;
 518
 519                memset(shhwtstamps, 0, sizeof(*shhwtstamps));
 520
 521                ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
 522                shhwtstamps->hwtstamp = ns_to_ktime(ns);
 523        }
 524
 525        /* Check if we need to validate the L4 csum */
 526        if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
 527                status = le32_to_cpu(fas->status);
 528                dpaa2_eth_validate_rx_csum(priv, status, skb);
 529        }
 530
 531        skb->protocol = eth_type_trans(skb, priv->net_dev);
 532        skb_record_rx_queue(skb, fq->flowid);
 533
 534        percpu_stats->rx_packets++;
 535        percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
 536
 537        list_add_tail(&skb->list, ch->rx_list);
 538
 539        return;
 540
 541err_build_skb:
 542        dpaa2_eth_free_rx_fd(priv, fd, vaddr);
 543err_frame_format:
 544        percpu_stats->rx_dropped++;
 545}
 546
 547/* Processing of Rx frames received on the error FQ
 548 * We check and print the error bits and then free the frame
 549 */
 550static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
 551                             struct dpaa2_eth_channel *ch,
 552                             const struct dpaa2_fd *fd,
 553                             struct dpaa2_eth_fq *fq __always_unused)
 554{
 555        struct device *dev = priv->net_dev->dev.parent;
 556        dma_addr_t addr = dpaa2_fd_get_addr(fd);
 557        u8 fd_format = dpaa2_fd_get_format(fd);
 558        struct rtnl_link_stats64 *percpu_stats;
 559        struct dpaa2_eth_trap_item *trap_item;
 560        struct dpaa2_fapr *fapr;
 561        struct sk_buff *skb;
 562        void *buf_data;
 563        void *vaddr;
 564
 565        vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
 566        dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
 567                                DMA_BIDIRECTIONAL);
 568
 569        buf_data = vaddr + dpaa2_fd_get_offset(fd);
 570
 571        if (fd_format == dpaa2_fd_single) {
 572                dma_unmap_page(dev, addr, priv->rx_buf_size,
 573                               DMA_BIDIRECTIONAL);
 574                skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
 575        } else if (fd_format == dpaa2_fd_sg) {
 576                dma_unmap_page(dev, addr, priv->rx_buf_size,
 577                               DMA_BIDIRECTIONAL);
 578                skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
 579                free_pages((unsigned long)vaddr, 0);
 580        } else {
 581                /* We don't support any other format */
 582                dpaa2_eth_free_rx_fd(priv, fd, vaddr);
 583                goto err_frame_format;
 584        }
 585
 586        fapr = dpaa2_get_fapr(vaddr, false);
 587        trap_item = dpaa2_eth_dl_get_trap(priv, fapr);
 588        if (trap_item)
 589                devlink_trap_report(priv->devlink, skb, trap_item->trap_ctx,
 590                                    &priv->devlink_port, NULL);
 591        consume_skb(skb);
 592
 593err_frame_format:
 594        percpu_stats = this_cpu_ptr(priv->percpu_stats);
 595        percpu_stats->rx_errors++;
 596        ch->buf_count--;
 597}
 598
 599/* Consume all frames pull-dequeued into the store. This is the simplest way to
 600 * make sure we don't accidentally issue another volatile dequeue which would
 601 * overwrite (leak) frames already in the store.
 602 *
 603 * Observance of NAPI budget is not our concern, leaving that to the caller.
 604 */
 605static int dpaa2_eth_consume_frames(struct dpaa2_eth_channel *ch,
 606                                    struct dpaa2_eth_fq **src)
 607{
 608        struct dpaa2_eth_priv *priv = ch->priv;
 609        struct dpaa2_eth_fq *fq = NULL;
 610        struct dpaa2_dq *dq;
 611        const struct dpaa2_fd *fd;
 612        int cleaned = 0, retries = 0;
 613        int is_last;
 614
 615        do {
 616                dq = dpaa2_io_store_next(ch->store, &is_last);
 617                if (unlikely(!dq)) {
 618                        /* If we're here, we *must* have placed a
 619                         * volatile dequeue comnmand, so keep reading through
 620                         * the store until we get some sort of valid response
 621                         * token (either a valid frame or an "empty dequeue")
 622                         */
 623                        if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) {
 624                                netdev_err_once(priv->net_dev,
 625                                                "Unable to read a valid dequeue response\n");
 626                                return -ETIMEDOUT;
 627                        }
 628                        continue;
 629                }
 630
 631                fd = dpaa2_dq_fd(dq);
 632                fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
 633
 634                fq->consume(priv, ch, fd, fq);
 635                cleaned++;
 636                retries = 0;
 637        } while (!is_last);
 638
 639        if (!cleaned)
 640                return 0;
 641
 642        fq->stats.frames += cleaned;
 643        ch->stats.frames += cleaned;
 644
 645        /* A dequeue operation only pulls frames from a single queue
 646         * into the store. Return the frame queue as an out param.
 647         */
 648        if (src)
 649                *src = fq;
 650
 651        return cleaned;
 652}
 653
 654static int dpaa2_eth_ptp_parse(struct sk_buff *skb,
 655                               u8 *msgtype, u8 *twostep, u8 *udp,
 656                               u16 *correction_offset,
 657                               u16 *origintimestamp_offset)
 658{
 659        unsigned int ptp_class;
 660        struct ptp_header *hdr;
 661        unsigned int type;
 662        u8 *base;
 663
 664        ptp_class = ptp_classify_raw(skb);
 665        if (ptp_class == PTP_CLASS_NONE)
 666                return -EINVAL;
 667
 668        hdr = ptp_parse_header(skb, ptp_class);
 669        if (!hdr)
 670                return -EINVAL;
 671
 672        *msgtype = ptp_get_msgtype(hdr, ptp_class);
 673        *twostep = hdr->flag_field[0] & 0x2;
 674
 675        type = ptp_class & PTP_CLASS_PMASK;
 676        if (type == PTP_CLASS_IPV4 ||
 677            type == PTP_CLASS_IPV6)
 678                *udp = 1;
 679        else
 680                *udp = 0;
 681
 682        base = skb_mac_header(skb);
 683        *correction_offset = (u8 *)&hdr->correction - base;
 684        *origintimestamp_offset = (u8 *)hdr + sizeof(struct ptp_header) - base;
 685
 686        return 0;
 687}
 688
 689/* Configure the egress frame annotation for timestamp update */
 690static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
 691                                       struct dpaa2_fd *fd,
 692                                       void *buf_start,
 693                                       struct sk_buff *skb)
 694{
 695        struct ptp_tstamp origin_timestamp;
 696        struct dpni_single_step_cfg cfg;
 697        u8 msgtype, twostep, udp;
 698        struct dpaa2_faead *faead;
 699        struct dpaa2_fas *fas;
 700        struct timespec64 ts;
 701        u16 offset1, offset2;
 702        u32 ctrl, frc;
 703        __le64 *ns;
 704        u8 *data;
 705
 706        /* Mark the egress frame annotation area as valid */
 707        frc = dpaa2_fd_get_frc(fd);
 708        dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
 709
 710        /* Set hardware annotation size */
 711        ctrl = dpaa2_fd_get_ctrl(fd);
 712        dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
 713
 714        /* enable UPD (update prepanded data) bit in FAEAD field of
 715         * hardware frame annotation area
 716         */
 717        ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
 718        faead = dpaa2_get_faead(buf_start, true);
 719        faead->ctrl = cpu_to_le32(ctrl);
 720
 721        if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
 722                if (dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
 723                                        &offset1, &offset2) ||
 724                    msgtype != PTP_MSGTYPE_SYNC || twostep) {
 725                        WARN_ONCE(1, "Bad packet for one-step timestamping\n");
 726                        return;
 727                }
 728
 729                /* Mark the frame annotation status as valid */
 730                frc = dpaa2_fd_get_frc(fd);
 731                dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FASV);
 732
 733                /* Mark the PTP flag for one step timestamping */
 734                fas = dpaa2_get_fas(buf_start, true);
 735                fas->status = cpu_to_le32(DPAA2_FAS_PTP);
 736
 737                dpaa2_ptp->caps.gettime64(&dpaa2_ptp->caps, &ts);
 738                ns = dpaa2_get_ts(buf_start, true);
 739                *ns = cpu_to_le64(timespec64_to_ns(&ts) /
 740                                  DPAA2_PTP_CLK_PERIOD_NS);
 741
 742                /* Update current time to PTP message originTimestamp field */
 743                ns_to_ptp_tstamp(&origin_timestamp, le64_to_cpup(ns));
 744                data = skb_mac_header(skb);
 745                *(__be16 *)(data + offset2) = htons(origin_timestamp.sec_msb);
 746                *(__be32 *)(data + offset2 + 2) =
 747                        htonl(origin_timestamp.sec_lsb);
 748                *(__be32 *)(data + offset2 + 6) = htonl(origin_timestamp.nsec);
 749
 750                cfg.en = 1;
 751                cfg.ch_update = udp;
 752                cfg.offset = offset1;
 753                cfg.peer_delay = 0;
 754
 755                if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token,
 756                                             &cfg))
 757                        WARN_ONCE(1, "Failed to set single step register");
 758        }
 759}
 760
 761/* Create a frame descriptor based on a fragmented skb */
 762static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
 763                                 struct sk_buff *skb,
 764                                 struct dpaa2_fd *fd,
 765                                 void **swa_addr)
 766{
 767        struct device *dev = priv->net_dev->dev.parent;
 768        void *sgt_buf = NULL;
 769        dma_addr_t addr;
 770        int nr_frags = skb_shinfo(skb)->nr_frags;
 771        struct dpaa2_sg_entry *sgt;
 772        int i, err;
 773        int sgt_buf_size;
 774        struct scatterlist *scl, *crt_scl;
 775        int num_sg;
 776        int num_dma_bufs;
 777        struct dpaa2_eth_swa *swa;
 778
 779        /* Create and map scatterlist.
 780         * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
 781         * to go beyond nr_frags+1.
 782         * Note: We don't support chained scatterlists
 783         */
 784        if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
 785                return -EINVAL;
 786
 787        scl = kmalloc_array(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
 788        if (unlikely(!scl))
 789                return -ENOMEM;
 790
 791        sg_init_table(scl, nr_frags + 1);
 792        num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
 793        if (unlikely(num_sg < 0)) {
 794                err = -ENOMEM;
 795                goto dma_map_sg_failed;
 796        }
 797        num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
 798        if (unlikely(!num_dma_bufs)) {
 799                err = -ENOMEM;
 800                goto dma_map_sg_failed;
 801        }
 802
 803        /* Prepare the HW SGT structure */
 804        sgt_buf_size = priv->tx_data_offset +
 805                       sizeof(struct dpaa2_sg_entry) *  num_dma_bufs;
 806        sgt_buf = napi_alloc_frag_align(sgt_buf_size, DPAA2_ETH_TX_BUF_ALIGN);
 807        if (unlikely(!sgt_buf)) {
 808                err = -ENOMEM;
 809                goto sgt_buf_alloc_failed;
 810        }
 811        memset(sgt_buf, 0, sgt_buf_size);
 812
 813        sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
 814
 815        /* Fill in the HW SGT structure.
 816         *
 817         * sgt_buf is zeroed out, so the following fields are implicit
 818         * in all sgt entries:
 819         *   - offset is 0
 820         *   - format is 'dpaa2_sg_single'
 821         */
 822        for_each_sg(scl, crt_scl, num_dma_bufs, i) {
 823                dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
 824                dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
 825        }
 826        dpaa2_sg_set_final(&sgt[i - 1], true);
 827
 828        /* Store the skb backpointer in the SGT buffer.
 829         * Fit the scatterlist and the number of buffers alongside the
 830         * skb backpointer in the software annotation area. We'll need
 831         * all of them on Tx Conf.
 832         */
 833        *swa_addr = (void *)sgt_buf;
 834        swa = (struct dpaa2_eth_swa *)sgt_buf;
 835        swa->type = DPAA2_ETH_SWA_SG;
 836        swa->sg.skb = skb;
 837        swa->sg.scl = scl;
 838        swa->sg.num_sg = num_sg;
 839        swa->sg.sgt_size = sgt_buf_size;
 840
 841        /* Separately map the SGT buffer */
 842        addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
 843        if (unlikely(dma_mapping_error(dev, addr))) {
 844                err = -ENOMEM;
 845                goto dma_map_single_failed;
 846        }
 847        dpaa2_fd_set_offset(fd, priv->tx_data_offset);
 848        dpaa2_fd_set_format(fd, dpaa2_fd_sg);
 849        dpaa2_fd_set_addr(fd, addr);
 850        dpaa2_fd_set_len(fd, skb->len);
 851        dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
 852
 853        return 0;
 854
 855dma_map_single_failed:
 856        skb_free_frag(sgt_buf);
 857sgt_buf_alloc_failed:
 858        dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
 859dma_map_sg_failed:
 860        kfree(scl);
 861        return err;
 862}
 863
 864/* Create a SG frame descriptor based on a linear skb.
 865 *
 866 * This function is used on the Tx path when the skb headroom is not large
 867 * enough for the HW requirements, thus instead of realloc-ing the skb we
 868 * create a SG frame descriptor with only one entry.
 869 */
 870static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
 871                                            struct sk_buff *skb,
 872                                            struct dpaa2_fd *fd,
 873                                            void **swa_addr)
 874{
 875        struct device *dev = priv->net_dev->dev.parent;
 876        struct dpaa2_eth_sgt_cache *sgt_cache;
 877        struct dpaa2_sg_entry *sgt;
 878        struct dpaa2_eth_swa *swa;
 879        dma_addr_t addr, sgt_addr;
 880        void *sgt_buf = NULL;
 881        int sgt_buf_size;
 882        int err;
 883
 884        /* Prepare the HW SGT structure */
 885        sgt_cache = this_cpu_ptr(priv->sgt_cache);
 886        sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry);
 887
 888        if (sgt_cache->count == 0)
 889                sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN,
 890                                  GFP_ATOMIC);
 891        else
 892                sgt_buf = sgt_cache->buf[--sgt_cache->count];
 893        if (unlikely(!sgt_buf))
 894                return -ENOMEM;
 895
 896        sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
 897        sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
 898
 899        addr = dma_map_single(dev, skb->data, skb->len, DMA_BIDIRECTIONAL);
 900        if (unlikely(dma_mapping_error(dev, addr))) {
 901                err = -ENOMEM;
 902                goto data_map_failed;
 903        }
 904
 905        /* Fill in the HW SGT structure */
 906        dpaa2_sg_set_addr(sgt, addr);
 907        dpaa2_sg_set_len(sgt, skb->len);
 908        dpaa2_sg_set_final(sgt, true);
 909
 910        /* Store the skb backpointer in the SGT buffer */
 911        *swa_addr = (void *)sgt_buf;
 912        swa = (struct dpaa2_eth_swa *)sgt_buf;
 913        swa->type = DPAA2_ETH_SWA_SINGLE;
 914        swa->single.skb = skb;
 915        swa->single.sgt_size = sgt_buf_size;
 916
 917        /* Separately map the SGT buffer */
 918        sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
 919        if (unlikely(dma_mapping_error(dev, sgt_addr))) {
 920                err = -ENOMEM;
 921                goto sgt_map_failed;
 922        }
 923
 924        dpaa2_fd_set_offset(fd, priv->tx_data_offset);
 925        dpaa2_fd_set_format(fd, dpaa2_fd_sg);
 926        dpaa2_fd_set_addr(fd, sgt_addr);
 927        dpaa2_fd_set_len(fd, skb->len);
 928        dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
 929
 930        return 0;
 931
 932sgt_map_failed:
 933        dma_unmap_single(dev, addr, skb->len, DMA_BIDIRECTIONAL);
 934data_map_failed:
 935        if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
 936                kfree(sgt_buf);
 937        else
 938                sgt_cache->buf[sgt_cache->count++] = sgt_buf;
 939
 940        return err;
 941}
 942
 943/* Create a frame descriptor based on a linear skb */
 944static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
 945                                     struct sk_buff *skb,
 946                                     struct dpaa2_fd *fd,
 947                                     void **swa_addr)
 948{
 949        struct device *dev = priv->net_dev->dev.parent;
 950        u8 *buffer_start, *aligned_start;
 951        struct dpaa2_eth_swa *swa;
 952        dma_addr_t addr;
 953
 954        buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
 955
 956        /* If there's enough room to align the FD address, do it.
 957         * It will help hardware optimize accesses.
 958         */
 959        aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
 960                                  DPAA2_ETH_TX_BUF_ALIGN);
 961        if (aligned_start >= skb->head)
 962                buffer_start = aligned_start;
 963
 964        /* Store a backpointer to the skb at the beginning of the buffer
 965         * (in the private data area) such that we can release it
 966         * on Tx confirm
 967         */
 968        *swa_addr = (void *)buffer_start;
 969        swa = (struct dpaa2_eth_swa *)buffer_start;
 970        swa->type = DPAA2_ETH_SWA_SINGLE;
 971        swa->single.skb = skb;
 972
 973        addr = dma_map_single(dev, buffer_start,
 974                              skb_tail_pointer(skb) - buffer_start,
 975                              DMA_BIDIRECTIONAL);
 976        if (unlikely(dma_mapping_error(dev, addr)))
 977                return -ENOMEM;
 978
 979        dpaa2_fd_set_addr(fd, addr);
 980        dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
 981        dpaa2_fd_set_len(fd, skb->len);
 982        dpaa2_fd_set_format(fd, dpaa2_fd_single);
 983        dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
 984
 985        return 0;
 986}
 987
 988/* FD freeing routine on the Tx path
 989 *
 990 * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
 991 * back-pointed to is also freed.
 992 * This can be called either from dpaa2_eth_tx_conf() or on the error path of
 993 * dpaa2_eth_tx().
 994 */
 995static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
 996                                 struct dpaa2_eth_fq *fq,
 997                                 const struct dpaa2_fd *fd, bool in_napi)
 998{
 999        struct device *dev = priv->net_dev->dev.parent;
1000        dma_addr_t fd_addr, sg_addr;
1001        struct sk_buff *skb = NULL;
1002        unsigned char *buffer_start;
1003        struct dpaa2_eth_swa *swa;
1004        u8 fd_format = dpaa2_fd_get_format(fd);
1005        u32 fd_len = dpaa2_fd_get_len(fd);
1006
1007        struct dpaa2_eth_sgt_cache *sgt_cache;
1008        struct dpaa2_sg_entry *sgt;
1009
1010        fd_addr = dpaa2_fd_get_addr(fd);
1011        buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
1012        swa = (struct dpaa2_eth_swa *)buffer_start;
1013
1014        if (fd_format == dpaa2_fd_single) {
1015                if (swa->type == DPAA2_ETH_SWA_SINGLE) {
1016                        skb = swa->single.skb;
1017                        /* Accessing the skb buffer is safe before dma unmap,
1018                         * because we didn't map the actual skb shell.
1019                         */
1020                        dma_unmap_single(dev, fd_addr,
1021                                         skb_tail_pointer(skb) - buffer_start,
1022                                         DMA_BIDIRECTIONAL);
1023                } else {
1024                        WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type");
1025                        dma_unmap_single(dev, fd_addr, swa->xdp.dma_size,
1026                                         DMA_BIDIRECTIONAL);
1027                }
1028        } else if (fd_format == dpaa2_fd_sg) {
1029                if (swa->type == DPAA2_ETH_SWA_SG) {
1030                        skb = swa->sg.skb;
1031
1032                        /* Unmap the scatterlist */
1033                        dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg,
1034                                     DMA_BIDIRECTIONAL);
1035                        kfree(swa->sg.scl);
1036
1037                        /* Unmap the SGT buffer */
1038                        dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
1039                                         DMA_BIDIRECTIONAL);
1040                } else {
1041                        skb = swa->single.skb;
1042
1043                        /* Unmap the SGT Buffer */
1044                        dma_unmap_single(dev, fd_addr, swa->single.sgt_size,
1045                                         DMA_BIDIRECTIONAL);
1046
1047                        sgt = (struct dpaa2_sg_entry *)(buffer_start +
1048                                                        priv->tx_data_offset);
1049                        sg_addr = dpaa2_sg_get_addr(sgt);
1050                        dma_unmap_single(dev, sg_addr, skb->len, DMA_BIDIRECTIONAL);
1051                }
1052        } else {
1053                netdev_dbg(priv->net_dev, "Invalid FD format\n");
1054                return;
1055        }
1056
1057        if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) {
1058                fq->dq_frames++;
1059                fq->dq_bytes += fd_len;
1060        }
1061
1062        if (swa->type == DPAA2_ETH_SWA_XDP) {
1063                xdp_return_frame(swa->xdp.xdpf);
1064                return;
1065        }
1066
1067        /* Get the timestamp value */
1068        if (skb->cb[0] == TX_TSTAMP) {
1069                struct skb_shared_hwtstamps shhwtstamps;
1070                __le64 *ts = dpaa2_get_ts(buffer_start, true);
1071                u64 ns;
1072
1073                memset(&shhwtstamps, 0, sizeof(shhwtstamps));
1074
1075                ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
1076                shhwtstamps.hwtstamp = ns_to_ktime(ns);
1077                skb_tstamp_tx(skb, &shhwtstamps);
1078        } else if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
1079                mutex_unlock(&priv->onestep_tstamp_lock);
1080        }
1081
1082        /* Free SGT buffer allocated on tx */
1083        if (fd_format != dpaa2_fd_single) {
1084                sgt_cache = this_cpu_ptr(priv->sgt_cache);
1085                if (swa->type == DPAA2_ETH_SWA_SG) {
1086                        skb_free_frag(buffer_start);
1087                } else {
1088                        if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
1089                                kfree(buffer_start);
1090                        else
1091                                sgt_cache->buf[sgt_cache->count++] = buffer_start;
1092                }
1093        }
1094
1095        /* Move on with skb release */
1096        napi_consume_skb(skb, in_napi);
1097}
1098
1099static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
1100                                  struct net_device *net_dev)
1101{
1102        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1103        struct dpaa2_fd fd;
1104        struct rtnl_link_stats64 *percpu_stats;
1105        struct dpaa2_eth_drv_stats *percpu_extras;
1106        struct dpaa2_eth_fq *fq;
1107        struct netdev_queue *nq;
1108        u16 queue_mapping;
1109        unsigned int needed_headroom;
1110        u32 fd_len;
1111        u8 prio = 0;
1112        int err, i;
1113        void *swa;
1114
1115        percpu_stats = this_cpu_ptr(priv->percpu_stats);
1116        percpu_extras = this_cpu_ptr(priv->percpu_extras);
1117
1118        needed_headroom = dpaa2_eth_needed_headroom(skb);
1119
1120        /* We'll be holding a back-reference to the skb until Tx Confirmation;
1121         * we don't want that overwritten by a concurrent Tx with a cloned skb.
1122         */
1123        skb = skb_unshare(skb, GFP_ATOMIC);
1124        if (unlikely(!skb)) {
1125                /* skb_unshare() has already freed the skb */
1126                percpu_stats->tx_dropped++;
1127                return NETDEV_TX_OK;
1128        }
1129
1130        /* Setup the FD fields */
1131        memset(&fd, 0, sizeof(fd));
1132
1133        if (skb_is_nonlinear(skb)) {
1134                err = dpaa2_eth_build_sg_fd(priv, skb, &fd, &swa);
1135                percpu_extras->tx_sg_frames++;
1136                percpu_extras->tx_sg_bytes += skb->len;
1137        } else if (skb_headroom(skb) < needed_headroom) {
1138                err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, &fd, &swa);
1139                percpu_extras->tx_sg_frames++;
1140                percpu_extras->tx_sg_bytes += skb->len;
1141                percpu_extras->tx_converted_sg_frames++;
1142                percpu_extras->tx_converted_sg_bytes += skb->len;
1143        } else {
1144                err = dpaa2_eth_build_single_fd(priv, skb, &fd, &swa);
1145        }
1146
1147        if (unlikely(err)) {
1148                percpu_stats->tx_dropped++;
1149                goto err_build_fd;
1150        }
1151
1152        if (skb->cb[0])
1153                dpaa2_eth_enable_tx_tstamp(priv, &fd, swa, skb);
1154
1155        /* Tracing point */
1156        trace_dpaa2_tx_fd(net_dev, &fd);
1157
1158        /* TxConf FQ selection relies on queue id from the stack.
1159         * In case of a forwarded frame from another DPNI interface, we choose
1160         * a queue affined to the same core that processed the Rx frame
1161         */
1162        queue_mapping = skb_get_queue_mapping(skb);
1163
1164        if (net_dev->num_tc) {
1165                prio = netdev_txq_to_tc(net_dev, queue_mapping);
1166                /* Hardware interprets priority level 0 as being the highest,
1167                 * so we need to do a reverse mapping to the netdev tc index
1168                 */
1169                prio = net_dev->num_tc - prio - 1;
1170                /* We have only one FQ array entry for all Tx hardware queues
1171                 * with the same flow id (but different priority levels)
1172                 */
1173                queue_mapping %= dpaa2_eth_queue_count(priv);
1174        }
1175        fq = &priv->fq[queue_mapping];
1176
1177        fd_len = dpaa2_fd_get_len(&fd);
1178        nq = netdev_get_tx_queue(net_dev, queue_mapping);
1179        netdev_tx_sent_queue(nq, fd_len);
1180
1181        /* Everything that happens after this enqueues might race with
1182         * the Tx confirmation callback for this frame
1183         */
1184        for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
1185                err = priv->enqueue(priv, fq, &fd, prio, 1, NULL);
1186                if (err != -EBUSY)
1187                        break;
1188        }
1189        percpu_extras->tx_portal_busy += i;
1190        if (unlikely(err < 0)) {
1191                percpu_stats->tx_errors++;
1192                /* Clean up everything, including freeing the skb */
1193                dpaa2_eth_free_tx_fd(priv, fq, &fd, false);
1194                netdev_tx_completed_queue(nq, 1, fd_len);
1195        } else {
1196                percpu_stats->tx_packets++;
1197                percpu_stats->tx_bytes += fd_len;
1198        }
1199
1200        return NETDEV_TX_OK;
1201
1202err_build_fd:
1203        dev_kfree_skb(skb);
1204
1205        return NETDEV_TX_OK;
1206}
1207
1208static void dpaa2_eth_tx_onestep_tstamp(struct work_struct *work)
1209{
1210        struct dpaa2_eth_priv *priv = container_of(work, struct dpaa2_eth_priv,
1211                                                   tx_onestep_tstamp);
1212        struct sk_buff *skb;
1213
1214        while (true) {
1215                skb = skb_dequeue(&priv->tx_skbs);
1216                if (!skb)
1217                        return;
1218
1219                /* Lock just before TX one-step timestamping packet,
1220                 * and release the lock in dpaa2_eth_free_tx_fd when
1221                 * confirm the packet has been sent on hardware, or
1222                 * when clean up during transmit failure.
1223                 */
1224                mutex_lock(&priv->onestep_tstamp_lock);
1225                __dpaa2_eth_tx(skb, priv->net_dev);
1226        }
1227}
1228
1229static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
1230{
1231        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1232        u8 msgtype, twostep, udp;
1233        u16 offset1, offset2;
1234
1235        /* Utilize skb->cb[0] for timestamping request per skb */
1236        skb->cb[0] = 0;
1237
1238        if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && dpaa2_ptp) {
1239                if (priv->tx_tstamp_type == HWTSTAMP_TX_ON)
1240                        skb->cb[0] = TX_TSTAMP;
1241                else if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
1242                        skb->cb[0] = TX_TSTAMP_ONESTEP_SYNC;
1243        }
1244
1245        /* TX for one-step timestamping PTP Sync packet */
1246        if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
1247                if (!dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
1248                                         &offset1, &offset2))
1249                        if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0) {
1250                                skb_queue_tail(&priv->tx_skbs, skb);
1251                                queue_work(priv->dpaa2_ptp_wq,
1252                                           &priv->tx_onestep_tstamp);
1253                                return NETDEV_TX_OK;
1254                        }
1255                /* Use two-step timestamping if not one-step timestamping
1256                 * PTP Sync packet
1257                 */
1258                skb->cb[0] = TX_TSTAMP;
1259        }
1260
1261        /* TX for other packets */
1262        return __dpaa2_eth_tx(skb, net_dev);
1263}
1264
1265/* Tx confirmation frame processing routine */
1266static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
1267                              struct dpaa2_eth_channel *ch __always_unused,
1268                              const struct dpaa2_fd *fd,
1269                              struct dpaa2_eth_fq *fq)
1270{
1271        struct rtnl_link_stats64 *percpu_stats;
1272        struct dpaa2_eth_drv_stats *percpu_extras;
1273        u32 fd_len = dpaa2_fd_get_len(fd);
1274        u32 fd_errors;
1275
1276        /* Tracing point */
1277        trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
1278
1279        percpu_extras = this_cpu_ptr(priv->percpu_extras);
1280        percpu_extras->tx_conf_frames++;
1281        percpu_extras->tx_conf_bytes += fd_len;
1282
1283        /* Check frame errors in the FD field */
1284        fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
1285        dpaa2_eth_free_tx_fd(priv, fq, fd, true);
1286
1287        if (likely(!fd_errors))
1288                return;
1289
1290        if (net_ratelimit())
1291                netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
1292                           fd_errors);
1293
1294        percpu_stats = this_cpu_ptr(priv->percpu_stats);
1295        /* Tx-conf logically pertains to the egress path. */
1296        percpu_stats->tx_errors++;
1297}
1298
1299static int dpaa2_eth_set_rx_vlan_filtering(struct dpaa2_eth_priv *priv,
1300                                           bool enable)
1301{
1302        int err;
1303
1304        err = dpni_enable_vlan_filter(priv->mc_io, 0, priv->mc_token, enable);
1305
1306        if (err) {
1307                netdev_err(priv->net_dev,
1308                           "dpni_enable_vlan_filter failed\n");
1309                return err;
1310        }
1311
1312        return 0;
1313}
1314
1315static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
1316{
1317        int err;
1318
1319        err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1320                               DPNI_OFF_RX_L3_CSUM, enable);
1321        if (err) {
1322                netdev_err(priv->net_dev,
1323                           "dpni_set_offload(RX_L3_CSUM) failed\n");
1324                return err;
1325        }
1326
1327        err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1328                               DPNI_OFF_RX_L4_CSUM, enable);
1329        if (err) {
1330                netdev_err(priv->net_dev,
1331                           "dpni_set_offload(RX_L4_CSUM) failed\n");
1332                return err;
1333        }
1334
1335        return 0;
1336}
1337
1338static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
1339{
1340        int err;
1341
1342        err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1343                               DPNI_OFF_TX_L3_CSUM, enable);
1344        if (err) {
1345                netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
1346                return err;
1347        }
1348
1349        err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1350                               DPNI_OFF_TX_L4_CSUM, enable);
1351        if (err) {
1352                netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
1353                return err;
1354        }
1355
1356        return 0;
1357}
1358
1359/* Perform a single release command to add buffers
1360 * to the specified buffer pool
1361 */
1362static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv,
1363                              struct dpaa2_eth_channel *ch, u16 bpid)
1364{
1365        struct device *dev = priv->net_dev->dev.parent;
1366        u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
1367        struct page *page;
1368        dma_addr_t addr;
1369        int retries = 0;
1370        int i, err;
1371
1372        for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
1373                /* Allocate buffer visible to WRIOP + skb shared info +
1374                 * alignment padding
1375                 */
1376                /* allocate one page for each Rx buffer. WRIOP sees
1377                 * the entire page except for a tailroom reserved for
1378                 * skb shared info
1379                 */
1380                page = dev_alloc_pages(0);
1381                if (!page)
1382                        goto err_alloc;
1383
1384                addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
1385                                    DMA_BIDIRECTIONAL);
1386                if (unlikely(dma_mapping_error(dev, addr)))
1387                        goto err_map;
1388
1389                buf_array[i] = addr;
1390
1391                /* tracing point */
1392                trace_dpaa2_eth_buf_seed(priv->net_dev,
1393                                         page, DPAA2_ETH_RX_BUF_RAW_SIZE,
1394                                         addr, priv->rx_buf_size,
1395                                         bpid);
1396        }
1397
1398release_bufs:
1399        /* In case the portal is busy, retry until successful */
1400        while ((err = dpaa2_io_service_release(ch->dpio, bpid,
1401                                               buf_array, i)) == -EBUSY) {
1402                if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
1403                        break;
1404                cpu_relax();
1405        }
1406
1407        /* If release command failed, clean up and bail out;
1408         * not much else we can do about it
1409         */
1410        if (err) {
1411                dpaa2_eth_free_bufs(priv, buf_array, i);
1412                return 0;
1413        }
1414
1415        return i;
1416
1417err_map:
1418        __free_pages(page, 0);
1419err_alloc:
1420        /* If we managed to allocate at least some buffers,
1421         * release them to hardware
1422         */
1423        if (i)
1424                goto release_bufs;
1425
1426        return 0;
1427}
1428
1429static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
1430{
1431        int i, j;
1432        int new_count;
1433
1434        for (j = 0; j < priv->num_channels; j++) {
1435                for (i = 0; i < DPAA2_ETH_NUM_BUFS;
1436                     i += DPAA2_ETH_BUFS_PER_CMD) {
1437                        new_count = dpaa2_eth_add_bufs(priv, priv->channel[j], bpid);
1438                        priv->channel[j]->buf_count += new_count;
1439
1440                        if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
1441                                return -ENOMEM;
1442                        }
1443                }
1444        }
1445
1446        return 0;
1447}
1448
1449/*
1450 * Drain the specified number of buffers from the DPNI's private buffer pool.
1451 * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
1452 */
1453static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count)
1454{
1455        u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
1456        int retries = 0;
1457        int ret;
1458
1459        do {
1460                ret = dpaa2_io_service_acquire(NULL, priv->bpid,
1461                                               buf_array, count);
1462                if (ret < 0) {
1463                        if (ret == -EBUSY &&
1464                            retries++ < DPAA2_ETH_SWP_BUSY_RETRIES)
1465                                continue;
1466                        netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
1467                        return;
1468                }
1469                dpaa2_eth_free_bufs(priv, buf_array, ret);
1470                retries = 0;
1471        } while (ret);
1472}
1473
1474static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv)
1475{
1476        int i;
1477
1478        dpaa2_eth_drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
1479        dpaa2_eth_drain_bufs(priv, 1);
1480
1481        for (i = 0; i < priv->num_channels; i++)
1482                priv->channel[i]->buf_count = 0;
1483}
1484
1485/* Function is called from softirq context only, so we don't need to guard
1486 * the access to percpu count
1487 */
1488static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv,
1489                                 struct dpaa2_eth_channel *ch,
1490                                 u16 bpid)
1491{
1492        int new_count;
1493
1494        if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
1495                return 0;
1496
1497        do {
1498                new_count = dpaa2_eth_add_bufs(priv, ch, bpid);
1499                if (unlikely(!new_count)) {
1500                        /* Out of memory; abort for now, we'll try later on */
1501                        break;
1502                }
1503                ch->buf_count += new_count;
1504        } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
1505
1506        if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
1507                return -ENOMEM;
1508
1509        return 0;
1510}
1511
1512static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv *priv)
1513{
1514        struct dpaa2_eth_sgt_cache *sgt_cache;
1515        u16 count;
1516        int k, i;
1517
1518        for_each_possible_cpu(k) {
1519                sgt_cache = per_cpu_ptr(priv->sgt_cache, k);
1520                count = sgt_cache->count;
1521
1522                for (i = 0; i < count; i++)
1523                        kfree(sgt_cache->buf[i]);
1524                sgt_cache->count = 0;
1525        }
1526}
1527
1528static int dpaa2_eth_pull_channel(struct dpaa2_eth_channel *ch)
1529{
1530        int err;
1531        int dequeues = -1;
1532
1533        /* Retry while portal is busy */
1534        do {
1535                err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
1536                                                    ch->store);
1537                dequeues++;
1538                cpu_relax();
1539        } while (err == -EBUSY && dequeues < DPAA2_ETH_SWP_BUSY_RETRIES);
1540
1541        ch->stats.dequeue_portal_busy += dequeues;
1542        if (unlikely(err))
1543                ch->stats.pull_err++;
1544
1545        return err;
1546}
1547
1548/* NAPI poll routine
1549 *
1550 * Frames are dequeued from the QMan channel associated with this NAPI context.
1551 * Rx, Tx confirmation and (if configured) Rx error frames all count
1552 * towards the NAPI budget.
1553 */
1554static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
1555{
1556        struct dpaa2_eth_channel *ch;
1557        struct dpaa2_eth_priv *priv;
1558        int rx_cleaned = 0, txconf_cleaned = 0;
1559        struct dpaa2_eth_fq *fq, *txc_fq = NULL;
1560        struct netdev_queue *nq;
1561        int store_cleaned, work_done;
1562        struct list_head rx_list;
1563        int retries = 0;
1564        u16 flowid;
1565        int err;
1566
1567        ch = container_of(napi, struct dpaa2_eth_channel, napi);
1568        ch->xdp.res = 0;
1569        priv = ch->priv;
1570
1571        INIT_LIST_HEAD(&rx_list);
1572        ch->rx_list = &rx_list;
1573
1574        do {
1575                err = dpaa2_eth_pull_channel(ch);
1576                if (unlikely(err))
1577                        break;
1578
1579                /* Refill pool if appropriate */
1580                dpaa2_eth_refill_pool(priv, ch, priv->bpid);
1581
1582                store_cleaned = dpaa2_eth_consume_frames(ch, &fq);
1583                if (store_cleaned <= 0)
1584                        break;
1585                if (fq->type == DPAA2_RX_FQ) {
1586                        rx_cleaned += store_cleaned;
1587                        flowid = fq->flowid;
1588                } else {
1589                        txconf_cleaned += store_cleaned;
1590                        /* We have a single Tx conf FQ on this channel */
1591                        txc_fq = fq;
1592                }
1593
1594                /* If we either consumed the whole NAPI budget with Rx frames
1595                 * or we reached the Tx confirmations threshold, we're done.
1596                 */
1597                if (rx_cleaned >= budget ||
1598                    txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
1599                        work_done = budget;
1600                        goto out;
1601                }
1602        } while (store_cleaned);
1603
1604        /* We didn't consume the entire budget, so finish napi and
1605         * re-enable data availability notifications
1606         */
1607        napi_complete_done(napi, rx_cleaned);
1608        do {
1609                err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
1610                cpu_relax();
1611        } while (err == -EBUSY && retries++ < DPAA2_ETH_SWP_BUSY_RETRIES);
1612        WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
1613                  ch->nctx.desired_cpu);
1614
1615        work_done = max(rx_cleaned, 1);
1616
1617out:
1618        netif_receive_skb_list(ch->rx_list);
1619
1620        if (txc_fq && txc_fq->dq_frames) {
1621                nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
1622                netdev_tx_completed_queue(nq, txc_fq->dq_frames,
1623                                          txc_fq->dq_bytes);
1624                txc_fq->dq_frames = 0;
1625                txc_fq->dq_bytes = 0;
1626        }
1627
1628        if (ch->xdp.res & XDP_REDIRECT)
1629                xdp_do_flush_map();
1630        else if (rx_cleaned && ch->xdp.res & XDP_TX)
1631                dpaa2_eth_xdp_tx_flush(priv, ch, &priv->fq[flowid]);
1632
1633        return work_done;
1634}
1635
1636static void dpaa2_eth_enable_ch_napi(struct dpaa2_eth_priv *priv)
1637{
1638        struct dpaa2_eth_channel *ch;
1639        int i;
1640
1641        for (i = 0; i < priv->num_channels; i++) {
1642                ch = priv->channel[i];
1643                napi_enable(&ch->napi);
1644        }
1645}
1646
1647static void dpaa2_eth_disable_ch_napi(struct dpaa2_eth_priv *priv)
1648{
1649        struct dpaa2_eth_channel *ch;
1650        int i;
1651
1652        for (i = 0; i < priv->num_channels; i++) {
1653                ch = priv->channel[i];
1654                napi_disable(&ch->napi);
1655        }
1656}
1657
1658void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
1659                               bool tx_pause, bool pfc)
1660{
1661        struct dpni_taildrop td = {0};
1662        struct dpaa2_eth_fq *fq;
1663        int i, err;
1664
1665        /* FQ taildrop: threshold is in bytes, per frame queue. Enabled if
1666         * flow control is disabled (as it might interfere with either the
1667         * buffer pool depletion trigger for pause frames or with the group
1668         * congestion trigger for PFC frames)
1669         */
1670        td.enable = !tx_pause;
1671        if (priv->rx_fqtd_enabled == td.enable)
1672                goto set_cgtd;
1673
1674        td.threshold = DPAA2_ETH_FQ_TAILDROP_THRESH;
1675        td.units = DPNI_CONGESTION_UNIT_BYTES;
1676
1677        for (i = 0; i < priv->num_fqs; i++) {
1678                fq = &priv->fq[i];
1679                if (fq->type != DPAA2_RX_FQ)
1680                        continue;
1681                err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
1682                                        DPNI_CP_QUEUE, DPNI_QUEUE_RX,
1683                                        fq->tc, fq->flowid, &td);
1684                if (err) {
1685                        netdev_err(priv->net_dev,
1686                                   "dpni_set_taildrop(FQ) failed\n");
1687                        return;
1688                }
1689        }
1690
1691        priv->rx_fqtd_enabled = td.enable;
1692
1693set_cgtd:
1694        /* Congestion group taildrop: threshold is in frames, per group
1695         * of FQs belonging to the same traffic class
1696         * Enabled if general Tx pause disabled or if PFCs are enabled
1697         * (congestion group threhsold for PFC generation is lower than the
1698         * CG taildrop threshold, so it won't interfere with it; we also
1699         * want frames in non-PFC enabled traffic classes to be kept in check)
1700         */
1701        td.enable = !tx_pause || pfc;
1702        if (priv->rx_cgtd_enabled == td.enable)
1703                return;
1704
1705        td.threshold = DPAA2_ETH_CG_TAILDROP_THRESH(priv);
1706        td.units = DPNI_CONGESTION_UNIT_FRAMES;
1707        for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
1708                err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
1709                                        DPNI_CP_GROUP, DPNI_QUEUE_RX,
1710                                        i, 0, &td);
1711                if (err) {
1712                        netdev_err(priv->net_dev,
1713                                   "dpni_set_taildrop(CG) failed\n");
1714                        return;
1715                }
1716        }
1717
1718        priv->rx_cgtd_enabled = td.enable;
1719}
1720
1721static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv)
1722{
1723        struct dpni_link_state state = {0};
1724        bool tx_pause;
1725        int err;
1726
1727        err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
1728        if (unlikely(err)) {
1729                netdev_err(priv->net_dev,
1730                           "dpni_get_link_state() failed\n");
1731                return err;
1732        }
1733
1734        /* If Tx pause frame settings have changed, we need to update
1735         * Rx FQ taildrop configuration as well. We configure taildrop
1736         * only when pause frame generation is disabled.
1737         */
1738        tx_pause = dpaa2_eth_tx_pause_enabled(state.options);
1739        dpaa2_eth_set_rx_taildrop(priv, tx_pause, priv->pfc_enabled);
1740
1741        /* When we manage the MAC/PHY using phylink there is no need
1742         * to manually update the netif_carrier.
1743         */
1744        if (dpaa2_eth_is_type_phy(priv))
1745                goto out;
1746
1747        /* Chech link state; speed / duplex changes are not treated yet */
1748        if (priv->link_state.up == state.up)
1749                goto out;
1750
1751        if (state.up) {
1752                netif_carrier_on(priv->net_dev);
1753                netif_tx_start_all_queues(priv->net_dev);
1754        } else {
1755                netif_tx_stop_all_queues(priv->net_dev);
1756                netif_carrier_off(priv->net_dev);
1757        }
1758
1759        netdev_info(priv->net_dev, "Link Event: state %s\n",
1760                    state.up ? "up" : "down");
1761
1762out:
1763        priv->link_state = state;
1764
1765        return 0;
1766}
1767
1768static int dpaa2_eth_open(struct net_device *net_dev)
1769{
1770        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1771        int err;
1772
1773        err = dpaa2_eth_seed_pool(priv, priv->bpid);
1774        if (err) {
1775                /* Not much to do; the buffer pool, though not filled up,
1776                 * may still contain some buffers which would enable us
1777                 * to limp on.
1778                 */
1779                netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
1780                           priv->dpbp_dev->obj_desc.id, priv->bpid);
1781        }
1782
1783        if (!dpaa2_eth_is_type_phy(priv)) {
1784                /* We'll only start the txqs when the link is actually ready;
1785                 * make sure we don't race against the link up notification,
1786                 * which may come immediately after dpni_enable();
1787                 */
1788                netif_tx_stop_all_queues(net_dev);
1789
1790                /* Also, explicitly set carrier off, otherwise
1791                 * netif_carrier_ok() will return true and cause 'ip link show'
1792                 * to report the LOWER_UP flag, even though the link
1793                 * notification wasn't even received.
1794                 */
1795                netif_carrier_off(net_dev);
1796        }
1797        dpaa2_eth_enable_ch_napi(priv);
1798
1799        err = dpni_enable(priv->mc_io, 0, priv->mc_token);
1800        if (err < 0) {
1801                netdev_err(net_dev, "dpni_enable() failed\n");
1802                goto enable_err;
1803        }
1804
1805        if (dpaa2_eth_is_type_phy(priv))
1806                phylink_start(priv->mac->phylink);
1807
1808        return 0;
1809
1810enable_err:
1811        dpaa2_eth_disable_ch_napi(priv);
1812        dpaa2_eth_drain_pool(priv);
1813        return err;
1814}
1815
1816/* Total number of in-flight frames on ingress queues */
1817static u32 dpaa2_eth_ingress_fq_count(struct dpaa2_eth_priv *priv)
1818{
1819        struct dpaa2_eth_fq *fq;
1820        u32 fcnt = 0, bcnt = 0, total = 0;
1821        int i, err;
1822
1823        for (i = 0; i < priv->num_fqs; i++) {
1824                fq = &priv->fq[i];
1825                err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
1826                if (err) {
1827                        netdev_warn(priv->net_dev, "query_fq_count failed");
1828                        break;
1829                }
1830                total += fcnt;
1831        }
1832
1833        return total;
1834}
1835
1836static void dpaa2_eth_wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv)
1837{
1838        int retries = 10;
1839        u32 pending;
1840
1841        do {
1842                pending = dpaa2_eth_ingress_fq_count(priv);
1843                if (pending)
1844                        msleep(100);
1845        } while (pending && --retries);
1846}
1847
1848#define DPNI_TX_PENDING_VER_MAJOR       7
1849#define DPNI_TX_PENDING_VER_MINOR       13
1850static void dpaa2_eth_wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv)
1851{
1852        union dpni_statistics stats;
1853        int retries = 10;
1854        int err;
1855
1856        if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_TX_PENDING_VER_MAJOR,
1857                                   DPNI_TX_PENDING_VER_MINOR) < 0)
1858                goto out;
1859
1860        do {
1861                err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, 6,
1862                                          &stats);
1863                if (err)
1864                        goto out;
1865                if (stats.page_6.tx_pending_frames == 0)
1866                        return;
1867        } while (--retries);
1868
1869out:
1870        msleep(500);
1871}
1872
1873static int dpaa2_eth_stop(struct net_device *net_dev)
1874{
1875        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1876        int dpni_enabled = 0;
1877        int retries = 10;
1878
1879        if (dpaa2_eth_is_type_phy(priv)) {
1880                phylink_stop(priv->mac->phylink);
1881        } else {
1882                netif_tx_stop_all_queues(net_dev);
1883                netif_carrier_off(net_dev);
1884        }
1885
1886        /* On dpni_disable(), the MC firmware will:
1887         * - stop MAC Rx and wait for all Rx frames to be enqueued to software
1888         * - cut off WRIOP dequeues from egress FQs and wait until transmission
1889         * of all in flight Tx frames is finished (and corresponding Tx conf
1890         * frames are enqueued back to software)
1891         *
1892         * Before calling dpni_disable(), we wait for all Tx frames to arrive
1893         * on WRIOP. After it finishes, wait until all remaining frames on Rx
1894         * and Tx conf queues are consumed on NAPI poll.
1895         */
1896        dpaa2_eth_wait_for_egress_fq_empty(priv);
1897
1898        do {
1899                dpni_disable(priv->mc_io, 0, priv->mc_token);
1900                dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
1901                if (dpni_enabled)
1902                        /* Allow the hardware some slack */
1903                        msleep(100);
1904        } while (dpni_enabled && --retries);
1905        if (!retries) {
1906                netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
1907                /* Must go on and disable NAPI nonetheless, so we don't crash at
1908                 * the next "ifconfig up"
1909                 */
1910        }
1911
1912        dpaa2_eth_wait_for_ingress_fq_empty(priv);
1913        dpaa2_eth_disable_ch_napi(priv);
1914
1915        /* Empty the buffer pool */
1916        dpaa2_eth_drain_pool(priv);
1917
1918        /* Empty the Scatter-Gather Buffer cache */
1919        dpaa2_eth_sgt_cache_drain(priv);
1920
1921        return 0;
1922}
1923
1924static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
1925{
1926        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1927        struct device *dev = net_dev->dev.parent;
1928        int err;
1929
1930        err = eth_mac_addr(net_dev, addr);
1931        if (err < 0) {
1932                dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
1933                return err;
1934        }
1935
1936        err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
1937                                        net_dev->dev_addr);
1938        if (err) {
1939                dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
1940                return err;
1941        }
1942
1943        return 0;
1944}
1945
1946/** Fill in counters maintained by the GPP driver. These may be different from
1947 * the hardware counters obtained by ethtool.
1948 */
1949static void dpaa2_eth_get_stats(struct net_device *net_dev,
1950                                struct rtnl_link_stats64 *stats)
1951{
1952        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1953        struct rtnl_link_stats64 *percpu_stats;
1954        u64 *cpustats;
1955        u64 *netstats = (u64 *)stats;
1956        int i, j;
1957        int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
1958
1959        for_each_possible_cpu(i) {
1960                percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
1961                cpustats = (u64 *)percpu_stats;
1962                for (j = 0; j < num; j++)
1963                        netstats[j] += cpustats[j];
1964        }
1965}
1966
1967/* Copy mac unicast addresses from @net_dev to @priv.
1968 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1969 */
1970static void dpaa2_eth_add_uc_hw_addr(const struct net_device *net_dev,
1971                                     struct dpaa2_eth_priv *priv)
1972{
1973        struct netdev_hw_addr *ha;
1974        int err;
1975
1976        netdev_for_each_uc_addr(ha, net_dev) {
1977                err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1978                                        ha->addr);
1979                if (err)
1980                        netdev_warn(priv->net_dev,
1981                                    "Could not add ucast MAC %pM to the filtering table (err %d)\n",
1982                                    ha->addr, err);
1983        }
1984}
1985
1986/* Copy mac multicast addresses from @net_dev to @priv
1987 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1988 */
1989static void dpaa2_eth_add_mc_hw_addr(const struct net_device *net_dev,
1990                                     struct dpaa2_eth_priv *priv)
1991{
1992        struct netdev_hw_addr *ha;
1993        int err;
1994
1995        netdev_for_each_mc_addr(ha, net_dev) {
1996                err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1997                                        ha->addr);
1998                if (err)
1999                        netdev_warn(priv->net_dev,
2000                                    "Could not add mcast MAC %pM to the filtering table (err %d)\n",
2001                                    ha->addr, err);
2002        }
2003}
2004
2005static int dpaa2_eth_rx_add_vid(struct net_device *net_dev,
2006                                __be16 vlan_proto, u16 vid)
2007{
2008        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2009        int err;
2010
2011        err = dpni_add_vlan_id(priv->mc_io, 0, priv->mc_token,
2012                               vid, 0, 0, 0);
2013
2014        if (err) {
2015                netdev_warn(priv->net_dev,
2016                            "Could not add the vlan id %u\n",
2017                            vid);
2018                return err;
2019        }
2020
2021        return 0;
2022}
2023
2024static int dpaa2_eth_rx_kill_vid(struct net_device *net_dev,
2025                                 __be16 vlan_proto, u16 vid)
2026{
2027        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2028        int err;
2029
2030        err = dpni_remove_vlan_id(priv->mc_io, 0, priv->mc_token, vid);
2031
2032        if (err) {
2033                netdev_warn(priv->net_dev,
2034                            "Could not remove the vlan id %u\n",
2035                            vid);
2036                return err;
2037        }
2038
2039        return 0;
2040}
2041
2042static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
2043{
2044        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2045        int uc_count = netdev_uc_count(net_dev);
2046        int mc_count = netdev_mc_count(net_dev);
2047        u8 max_mac = priv->dpni_attrs.mac_filter_entries;
2048        u32 options = priv->dpni_attrs.options;
2049        u16 mc_token = priv->mc_token;
2050        struct fsl_mc_io *mc_io = priv->mc_io;
2051        int err;
2052
2053        /* Basic sanity checks; these probably indicate a misconfiguration */
2054        if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
2055                netdev_info(net_dev,
2056                            "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
2057                            max_mac);
2058
2059        /* Force promiscuous if the uc or mc counts exceed our capabilities. */
2060        if (uc_count > max_mac) {
2061                netdev_info(net_dev,
2062                            "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
2063                            uc_count, max_mac);
2064                goto force_promisc;
2065        }
2066        if (mc_count + uc_count > max_mac) {
2067                netdev_info(net_dev,
2068                            "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
2069                            uc_count + mc_count, max_mac);
2070                goto force_mc_promisc;
2071        }
2072
2073        /* Adjust promisc settings due to flag combinations */
2074        if (net_dev->flags & IFF_PROMISC)
2075                goto force_promisc;
2076        if (net_dev->flags & IFF_ALLMULTI) {
2077                /* First, rebuild unicast filtering table. This should be done
2078                 * in promisc mode, in order to avoid frame loss while we
2079                 * progressively add entries to the table.
2080                 * We don't know whether we had been in promisc already, and
2081                 * making an MC call to find out is expensive; so set uc promisc
2082                 * nonetheless.
2083                 */
2084                err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2085                if (err)
2086                        netdev_warn(net_dev, "Can't set uc promisc\n");
2087
2088                /* Actual uc table reconstruction. */
2089                err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
2090                if (err)
2091                        netdev_warn(net_dev, "Can't clear uc filters\n");
2092                dpaa2_eth_add_uc_hw_addr(net_dev, priv);
2093
2094                /* Finally, clear uc promisc and set mc promisc as requested. */
2095                err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
2096                if (err)
2097                        netdev_warn(net_dev, "Can't clear uc promisc\n");
2098                goto force_mc_promisc;
2099        }
2100
2101        /* Neither unicast, nor multicast promisc will be on... eventually.
2102         * For now, rebuild mac filtering tables while forcing both of them on.
2103         */
2104        err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2105        if (err)
2106                netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
2107        err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
2108        if (err)
2109                netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
2110
2111        /* Actual mac filtering tables reconstruction */
2112        err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
2113        if (err)
2114                netdev_warn(net_dev, "Can't clear mac filters\n");
2115        dpaa2_eth_add_mc_hw_addr(net_dev, priv);
2116        dpaa2_eth_add_uc_hw_addr(net_dev, priv);
2117
2118        /* Now we can clear both ucast and mcast promisc, without risking
2119         * to drop legitimate frames anymore.
2120         */
2121        err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
2122        if (err)
2123                netdev_warn(net_dev, "Can't clear ucast promisc\n");
2124        err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
2125        if (err)
2126                netdev_warn(net_dev, "Can't clear mcast promisc\n");
2127
2128        return;
2129
2130force_promisc:
2131        err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2132        if (err)
2133                netdev_warn(net_dev, "Can't set ucast promisc\n");
2134force_mc_promisc:
2135        err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
2136        if (err)
2137                netdev_warn(net_dev, "Can't set mcast promisc\n");
2138}
2139
2140static int dpaa2_eth_set_features(struct net_device *net_dev,
2141                                  netdev_features_t features)
2142{
2143        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2144        netdev_features_t changed = features ^ net_dev->features;
2145        bool enable;
2146        int err;
2147
2148        if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
2149                enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
2150                err = dpaa2_eth_set_rx_vlan_filtering(priv, enable);
2151                if (err)
2152                        return err;
2153        }
2154
2155        if (changed & NETIF_F_RXCSUM) {
2156                enable = !!(features & NETIF_F_RXCSUM);
2157                err = dpaa2_eth_set_rx_csum(priv, enable);
2158                if (err)
2159                        return err;
2160        }
2161
2162        if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
2163                enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
2164                err = dpaa2_eth_set_tx_csum(priv, enable);
2165                if (err)
2166                        return err;
2167        }
2168
2169        return 0;
2170}
2171
2172static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2173{
2174        struct dpaa2_eth_priv *priv = netdev_priv(dev);
2175        struct hwtstamp_config config;
2176
2177        if (!dpaa2_ptp)
2178                return -EINVAL;
2179
2180        if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
2181                return -EFAULT;
2182
2183        switch (config.tx_type) {
2184        case HWTSTAMP_TX_OFF:
2185        case HWTSTAMP_TX_ON:
2186        case HWTSTAMP_TX_ONESTEP_SYNC:
2187                priv->tx_tstamp_type = config.tx_type;
2188                break;
2189        default:
2190                return -ERANGE;
2191        }
2192
2193        if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
2194                priv->rx_tstamp = false;
2195        } else {
2196                priv->rx_tstamp = true;
2197                /* TS is set for all frame types, not only those requested */
2198                config.rx_filter = HWTSTAMP_FILTER_ALL;
2199        }
2200
2201        return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
2202                        -EFAULT : 0;
2203}
2204
2205static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2206{
2207        struct dpaa2_eth_priv *priv = netdev_priv(dev);
2208
2209        if (cmd == SIOCSHWTSTAMP)
2210                return dpaa2_eth_ts_ioctl(dev, rq, cmd);
2211
2212        if (dpaa2_eth_is_type_phy(priv))
2213                return phylink_mii_ioctl(priv->mac->phylink, rq, cmd);
2214
2215        return -EOPNOTSUPP;
2216}
2217
2218static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
2219{
2220        int mfl, linear_mfl;
2221
2222        mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
2223        linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE -
2224                     dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
2225
2226        if (mfl > linear_mfl) {
2227                netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n",
2228                            linear_mfl - VLAN_ETH_HLEN);
2229                return false;
2230        }
2231
2232        return true;
2233}
2234
2235static int dpaa2_eth_set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
2236{
2237        int mfl, err;
2238
2239        /* We enforce a maximum Rx frame length based on MTU only if we have
2240         * an XDP program attached (in order to avoid Rx S/G frames).
2241         * Otherwise, we accept all incoming frames as long as they are not
2242         * larger than maximum size supported in hardware
2243         */
2244        if (has_xdp)
2245                mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
2246        else
2247                mfl = DPAA2_ETH_MFL;
2248
2249        err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl);
2250        if (err) {
2251                netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n");
2252                return err;
2253        }
2254
2255        return 0;
2256}
2257
2258static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
2259{
2260        struct dpaa2_eth_priv *priv = netdev_priv(dev);
2261        int err;
2262
2263        if (!priv->xdp_prog)
2264                goto out;
2265
2266        if (!xdp_mtu_valid(priv, new_mtu))
2267                return -EINVAL;
2268
2269        err = dpaa2_eth_set_rx_mfl(priv, new_mtu, true);
2270        if (err)
2271                return err;
2272
2273out:
2274        dev->mtu = new_mtu;
2275        return 0;
2276}
2277
2278static int dpaa2_eth_update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
2279{
2280        struct dpni_buffer_layout buf_layout = {0};
2281        int err;
2282
2283        err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token,
2284                                     DPNI_QUEUE_RX, &buf_layout);
2285        if (err) {
2286                netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n");
2287                return err;
2288        }
2289
2290        /* Reserve extra headroom for XDP header size changes */
2291        buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) +
2292                                    (has_xdp ? XDP_PACKET_HEADROOM : 0);
2293        buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
2294        err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2295                                     DPNI_QUEUE_RX, &buf_layout);
2296        if (err) {
2297                netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n");
2298                return err;
2299        }
2300
2301        return 0;
2302}
2303
2304static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog)
2305{
2306        struct dpaa2_eth_priv *priv = netdev_priv(dev);
2307        struct dpaa2_eth_channel *ch;
2308        struct bpf_prog *old;
2309        bool up, need_update;
2310        int i, err;
2311
2312        if (prog && !xdp_mtu_valid(priv, dev->mtu))
2313                return -EINVAL;
2314
2315        if (prog)
2316                bpf_prog_add(prog, priv->num_channels);
2317
2318        up = netif_running(dev);
2319        need_update = (!!priv->xdp_prog != !!prog);
2320
2321        if (up)
2322                dpaa2_eth_stop(dev);
2323
2324        /* While in xdp mode, enforce a maximum Rx frame size based on MTU.
2325         * Also, when switching between xdp/non-xdp modes we need to reconfigure
2326         * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop,
2327         * so we are sure no old format buffers will be used from now on.
2328         */
2329        if (need_update) {
2330                err = dpaa2_eth_set_rx_mfl(priv, dev->mtu, !!prog);
2331                if (err)
2332                        goto out_err;
2333                err = dpaa2_eth_update_rx_buffer_headroom(priv, !!prog);
2334                if (err)
2335                        goto out_err;
2336        }
2337
2338        old = xchg(&priv->xdp_prog, prog);
2339        if (old)
2340                bpf_prog_put(old);
2341
2342        for (i = 0; i < priv->num_channels; i++) {
2343                ch = priv->channel[i];
2344                old = xchg(&ch->xdp.prog, prog);
2345                if (old)
2346                        bpf_prog_put(old);
2347        }
2348
2349        if (up) {
2350                err = dpaa2_eth_open(dev);
2351                if (err)
2352                        return err;
2353        }
2354
2355        return 0;
2356
2357out_err:
2358        if (prog)
2359                bpf_prog_sub(prog, priv->num_channels);
2360        if (up)
2361                dpaa2_eth_open(dev);
2362
2363        return err;
2364}
2365
2366static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2367{
2368        switch (xdp->command) {
2369        case XDP_SETUP_PROG:
2370                return dpaa2_eth_setup_xdp(dev, xdp->prog);
2371        default:
2372                return -EINVAL;
2373        }
2374
2375        return 0;
2376}
2377
2378static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev,
2379                                   struct xdp_frame *xdpf,
2380                                   struct dpaa2_fd *fd)
2381{
2382        struct device *dev = net_dev->dev.parent;
2383        unsigned int needed_headroom;
2384        struct dpaa2_eth_swa *swa;
2385        void *buffer_start, *aligned_start;
2386        dma_addr_t addr;
2387
2388        /* We require a minimum headroom to be able to transmit the frame.
2389         * Otherwise return an error and let the original net_device handle it
2390         */
2391        needed_headroom = dpaa2_eth_needed_headroom(NULL);
2392        if (xdpf->headroom < needed_headroom)
2393                return -EINVAL;
2394
2395        /* Setup the FD fields */
2396        memset(fd, 0, sizeof(*fd));
2397
2398        /* Align FD address, if possible */
2399        buffer_start = xdpf->data - needed_headroom;
2400        aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
2401                                  DPAA2_ETH_TX_BUF_ALIGN);
2402        if (aligned_start >= xdpf->data - xdpf->headroom)
2403                buffer_start = aligned_start;
2404
2405        swa = (struct dpaa2_eth_swa *)buffer_start;
2406        /* fill in necessary fields here */
2407        swa->type = DPAA2_ETH_SWA_XDP;
2408        swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start;
2409        swa->xdp.xdpf = xdpf;
2410
2411        addr = dma_map_single(dev, buffer_start,
2412                              swa->xdp.dma_size,
2413                              DMA_BIDIRECTIONAL);
2414        if (unlikely(dma_mapping_error(dev, addr)))
2415                return -ENOMEM;
2416
2417        dpaa2_fd_set_addr(fd, addr);
2418        dpaa2_fd_set_offset(fd, xdpf->data - buffer_start);
2419        dpaa2_fd_set_len(fd, xdpf->len);
2420        dpaa2_fd_set_format(fd, dpaa2_fd_single);
2421        dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
2422
2423        return 0;
2424}
2425
2426static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
2427                              struct xdp_frame **frames, u32 flags)
2428{
2429        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2430        struct dpaa2_eth_xdp_fds *xdp_redirect_fds;
2431        struct rtnl_link_stats64 *percpu_stats;
2432        struct dpaa2_eth_fq *fq;
2433        struct dpaa2_fd *fds;
2434        int enqueued, i, err;
2435
2436        if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2437                return -EINVAL;
2438
2439        if (!netif_running(net_dev))
2440                return -ENETDOWN;
2441
2442        fq = &priv->fq[smp_processor_id()];
2443        xdp_redirect_fds = &fq->xdp_redirect_fds;
2444        fds = xdp_redirect_fds->fds;
2445
2446        percpu_stats = this_cpu_ptr(priv->percpu_stats);
2447
2448        /* create a FD for each xdp_frame in the list received */
2449        for (i = 0; i < n; i++) {
2450                err = dpaa2_eth_xdp_create_fd(net_dev, frames[i], &fds[i]);
2451                if (err)
2452                        break;
2453        }
2454        xdp_redirect_fds->num = i;
2455
2456        /* enqueue all the frame descriptors */
2457        enqueued = dpaa2_eth_xdp_flush(priv, fq, xdp_redirect_fds);
2458
2459        /* update statistics */
2460        percpu_stats->tx_packets += enqueued;
2461        for (i = 0; i < enqueued; i++)
2462                percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
2463
2464        return enqueued;
2465}
2466
2467static int update_xps(struct dpaa2_eth_priv *priv)
2468{
2469        struct net_device *net_dev = priv->net_dev;
2470        struct cpumask xps_mask;
2471        struct dpaa2_eth_fq *fq;
2472        int i, num_queues, netdev_queues;
2473        int err = 0;
2474
2475        num_queues = dpaa2_eth_queue_count(priv);
2476        netdev_queues = (net_dev->num_tc ? : 1) * num_queues;
2477
2478        /* The first <num_queues> entries in priv->fq array are Tx/Tx conf
2479         * queues, so only process those
2480         */
2481        for (i = 0; i < netdev_queues; i++) {
2482                fq = &priv->fq[i % num_queues];
2483
2484                cpumask_clear(&xps_mask);
2485                cpumask_set_cpu(fq->target_cpu, &xps_mask);
2486
2487                err = netif_set_xps_queue(net_dev, &xps_mask, i);
2488                if (err) {
2489                        netdev_warn_once(net_dev, "Error setting XPS queue\n");
2490                        break;
2491                }
2492        }
2493
2494        return err;
2495}
2496
2497static int dpaa2_eth_setup_mqprio(struct net_device *net_dev,
2498                                  struct tc_mqprio_qopt *mqprio)
2499{
2500        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2501        u8 num_tc, num_queues;
2502        int i;
2503
2504        mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
2505        num_queues = dpaa2_eth_queue_count(priv);
2506        num_tc = mqprio->num_tc;
2507
2508        if (num_tc == net_dev->num_tc)
2509                return 0;
2510
2511        if (num_tc  > dpaa2_eth_tc_count(priv)) {
2512                netdev_err(net_dev, "Max %d traffic classes supported\n",
2513                           dpaa2_eth_tc_count(priv));
2514                return -EOPNOTSUPP;
2515        }
2516
2517        if (!num_tc) {
2518                netdev_reset_tc(net_dev);
2519                netif_set_real_num_tx_queues(net_dev, num_queues);
2520                goto out;
2521        }
2522
2523        netdev_set_num_tc(net_dev, num_tc);
2524        netif_set_real_num_tx_queues(net_dev, num_tc * num_queues);
2525
2526        for (i = 0; i < num_tc; i++)
2527                netdev_set_tc_queue(net_dev, i, num_queues, i * num_queues);
2528
2529out:
2530        update_xps(priv);
2531
2532        return 0;
2533}
2534
2535#define bps_to_mbits(rate) (div_u64((rate), 1000000) * 8)
2536
2537static int dpaa2_eth_setup_tbf(struct net_device *net_dev, struct tc_tbf_qopt_offload *p)
2538{
2539        struct tc_tbf_qopt_offload_replace_params *cfg = &p->replace_params;
2540        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2541        struct dpni_tx_shaping_cfg tx_cr_shaper = { 0 };
2542        struct dpni_tx_shaping_cfg tx_er_shaper = { 0 };
2543        int err;
2544
2545        if (p->command == TC_TBF_STATS)
2546                return -EOPNOTSUPP;
2547
2548        /* Only per port Tx shaping */
2549        if (p->parent != TC_H_ROOT)
2550                return -EOPNOTSUPP;
2551
2552        if (p->command == TC_TBF_REPLACE) {
2553                if (cfg->max_size > DPAA2_ETH_MAX_BURST_SIZE) {
2554                        netdev_err(net_dev, "burst size cannot be greater than %d\n",
2555                                   DPAA2_ETH_MAX_BURST_SIZE);
2556                        return -EINVAL;
2557                }
2558
2559                tx_cr_shaper.max_burst_size = cfg->max_size;
2560                /* The TBF interface is in bytes/s, whereas DPAA2 expects the
2561                 * rate in Mbits/s
2562                 */
2563                tx_cr_shaper.rate_limit = bps_to_mbits(cfg->rate.rate_bytes_ps);
2564        }
2565
2566        err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &tx_cr_shaper,
2567                                  &tx_er_shaper, 0);
2568        if (err) {
2569                netdev_err(net_dev, "dpni_set_tx_shaping() = %d\n", err);
2570                return err;
2571        }
2572
2573        return 0;
2574}
2575
2576static int dpaa2_eth_setup_tc(struct net_device *net_dev,
2577                              enum tc_setup_type type, void *type_data)
2578{
2579        switch (type) {
2580        case TC_SETUP_QDISC_MQPRIO:
2581                return dpaa2_eth_setup_mqprio(net_dev, type_data);
2582        case TC_SETUP_QDISC_TBF:
2583                return dpaa2_eth_setup_tbf(net_dev, type_data);
2584        default:
2585                return -EOPNOTSUPP;
2586        }
2587}
2588
2589static const struct net_device_ops dpaa2_eth_ops = {
2590        .ndo_open = dpaa2_eth_open,
2591        .ndo_start_xmit = dpaa2_eth_tx,
2592        .ndo_stop = dpaa2_eth_stop,
2593        .ndo_set_mac_address = dpaa2_eth_set_addr,
2594        .ndo_get_stats64 = dpaa2_eth_get_stats,
2595        .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
2596        .ndo_set_features = dpaa2_eth_set_features,
2597        .ndo_eth_ioctl = dpaa2_eth_ioctl,
2598        .ndo_change_mtu = dpaa2_eth_change_mtu,
2599        .ndo_bpf = dpaa2_eth_xdp,
2600        .ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
2601        .ndo_setup_tc = dpaa2_eth_setup_tc,
2602        .ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid,
2603        .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid
2604};
2605
2606static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
2607{
2608        struct dpaa2_eth_channel *ch;
2609
2610        ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
2611
2612        /* Update NAPI statistics */
2613        ch->stats.cdan++;
2614
2615        napi_schedule(&ch->napi);
2616}
2617
2618/* Allocate and configure a DPCON object */
2619static struct fsl_mc_device *dpaa2_eth_setup_dpcon(struct dpaa2_eth_priv *priv)
2620{
2621        struct fsl_mc_device *dpcon;
2622        struct device *dev = priv->net_dev->dev.parent;
2623        int err;
2624
2625        err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
2626                                     FSL_MC_POOL_DPCON, &dpcon);
2627        if (err) {
2628                if (err == -ENXIO)
2629                        err = -EPROBE_DEFER;
2630                else
2631                        dev_info(dev, "Not enough DPCONs, will go on as-is\n");
2632                return ERR_PTR(err);
2633        }
2634
2635        err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
2636        if (err) {
2637                dev_err(dev, "dpcon_open() failed\n");
2638                goto free;
2639        }
2640
2641        err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
2642        if (err) {
2643                dev_err(dev, "dpcon_reset() failed\n");
2644                goto close;
2645        }
2646
2647        err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
2648        if (err) {
2649                dev_err(dev, "dpcon_enable() failed\n");
2650                goto close;
2651        }
2652
2653        return dpcon;
2654
2655close:
2656        dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
2657free:
2658        fsl_mc_object_free(dpcon);
2659
2660        return ERR_PTR(err);
2661}
2662
2663static void dpaa2_eth_free_dpcon(struct dpaa2_eth_priv *priv,
2664                                 struct fsl_mc_device *dpcon)
2665{
2666        dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
2667        dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
2668        fsl_mc_object_free(dpcon);
2669}
2670
2671static struct dpaa2_eth_channel *dpaa2_eth_alloc_channel(struct dpaa2_eth_priv *priv)
2672{
2673        struct dpaa2_eth_channel *channel;
2674        struct dpcon_attr attr;
2675        struct device *dev = priv->net_dev->dev.parent;
2676        int err;
2677
2678        channel = kzalloc(sizeof(*channel), GFP_KERNEL);
2679        if (!channel)
2680                return NULL;
2681
2682        channel->dpcon = dpaa2_eth_setup_dpcon(priv);
2683        if (IS_ERR(channel->dpcon)) {
2684                err = PTR_ERR(channel->dpcon);
2685                goto err_setup;
2686        }
2687
2688        err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
2689                                   &attr);
2690        if (err) {
2691                dev_err(dev, "dpcon_get_attributes() failed\n");
2692                goto err_get_attr;
2693        }
2694
2695        channel->dpcon_id = attr.id;
2696        channel->ch_id = attr.qbman_ch_id;
2697        channel->priv = priv;
2698
2699        return channel;
2700
2701err_get_attr:
2702        dpaa2_eth_free_dpcon(priv, channel->dpcon);
2703err_setup:
2704        kfree(channel);
2705        return ERR_PTR(err);
2706}
2707
2708static void dpaa2_eth_free_channel(struct dpaa2_eth_priv *priv,
2709                                   struct dpaa2_eth_channel *channel)
2710{
2711        dpaa2_eth_free_dpcon(priv, channel->dpcon);
2712        kfree(channel);
2713}
2714
2715/* DPIO setup: allocate and configure QBMan channels, setup core affinity
2716 * and register data availability notifications
2717 */
2718static int dpaa2_eth_setup_dpio(struct dpaa2_eth_priv *priv)
2719{
2720        struct dpaa2_io_notification_ctx *nctx;
2721        struct dpaa2_eth_channel *channel;
2722        struct dpcon_notification_cfg dpcon_notif_cfg;
2723        struct device *dev = priv->net_dev->dev.parent;
2724        int i, err;
2725
2726        /* We want the ability to spread ingress traffic (RX, TX conf) to as
2727         * many cores as possible, so we need one channel for each core
2728         * (unless there's fewer queues than cores, in which case the extra
2729         * channels would be wasted).
2730         * Allocate one channel per core and register it to the core's
2731         * affine DPIO. If not enough channels are available for all cores
2732         * or if some cores don't have an affine DPIO, there will be no
2733         * ingress frame processing on those cores.
2734         */
2735        cpumask_clear(&priv->dpio_cpumask);
2736        for_each_online_cpu(i) {
2737                /* Try to allocate a channel */
2738                channel = dpaa2_eth_alloc_channel(priv);
2739                if (IS_ERR_OR_NULL(channel)) {
2740                        err = PTR_ERR_OR_ZERO(channel);
2741                        if (err != -EPROBE_DEFER)
2742                                dev_info(dev,
2743                                         "No affine channel for cpu %d and above\n", i);
2744                        goto err_alloc_ch;
2745                }
2746
2747                priv->channel[priv->num_channels] = channel;
2748
2749                nctx = &channel->nctx;
2750                nctx->is_cdan = 1;
2751                nctx->cb = dpaa2_eth_cdan_cb;
2752                nctx->id = channel->ch_id;
2753                nctx->desired_cpu = i;
2754
2755                /* Register the new context */
2756                channel->dpio = dpaa2_io_service_select(i);
2757                err = dpaa2_io_service_register(channel->dpio, nctx, dev);
2758                if (err) {
2759                        dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
2760                        /* If no affine DPIO for this core, there's probably
2761                         * none available for next cores either. Signal we want
2762                         * to retry later, in case the DPIO devices weren't
2763                         * probed yet.
2764                         */
2765                        err = -EPROBE_DEFER;
2766                        goto err_service_reg;
2767                }
2768
2769                /* Register DPCON notification with MC */
2770                dpcon_notif_cfg.dpio_id = nctx->dpio_id;
2771                dpcon_notif_cfg.priority = 0;
2772                dpcon_notif_cfg.user_ctx = nctx->qman64;
2773                err = dpcon_set_notification(priv->mc_io, 0,
2774                                             channel->dpcon->mc_handle,
2775                                             &dpcon_notif_cfg);
2776                if (err) {
2777                        dev_err(dev, "dpcon_set_notification failed()\n");
2778                        goto err_set_cdan;
2779                }
2780
2781                /* If we managed to allocate a channel and also found an affine
2782                 * DPIO for this core, add it to the final mask
2783                 */
2784                cpumask_set_cpu(i, &priv->dpio_cpumask);
2785                priv->num_channels++;
2786
2787                /* Stop if we already have enough channels to accommodate all
2788                 * RX and TX conf queues
2789                 */
2790                if (priv->num_channels == priv->dpni_attrs.num_queues)
2791                        break;
2792        }
2793
2794        return 0;
2795
2796err_set_cdan:
2797        dpaa2_io_service_deregister(channel->dpio, nctx, dev);
2798err_service_reg:
2799        dpaa2_eth_free_channel(priv, channel);
2800err_alloc_ch:
2801        if (err == -EPROBE_DEFER) {
2802                for (i = 0; i < priv->num_channels; i++) {
2803                        channel = priv->channel[i];
2804                        nctx = &channel->nctx;
2805                        dpaa2_io_service_deregister(channel->dpio, nctx, dev);
2806                        dpaa2_eth_free_channel(priv, channel);
2807                }
2808                priv->num_channels = 0;
2809                return err;
2810        }
2811
2812        if (cpumask_empty(&priv->dpio_cpumask)) {
2813                dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
2814                return -ENODEV;
2815        }
2816
2817        dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
2818                 cpumask_pr_args(&priv->dpio_cpumask));
2819
2820        return 0;
2821}
2822
2823static void dpaa2_eth_free_dpio(struct dpaa2_eth_priv *priv)
2824{
2825        struct device *dev = priv->net_dev->dev.parent;
2826        struct dpaa2_eth_channel *ch;
2827        int i;
2828
2829        /* deregister CDAN notifications and free channels */
2830        for (i = 0; i < priv->num_channels; i++) {
2831                ch = priv->channel[i];
2832                dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
2833                dpaa2_eth_free_channel(priv, ch);
2834        }
2835}
2836
2837static struct dpaa2_eth_channel *dpaa2_eth_get_affine_channel(struct dpaa2_eth_priv *priv,
2838                                                              int cpu)
2839{
2840        struct device *dev = priv->net_dev->dev.parent;
2841        int i;
2842
2843        for (i = 0; i < priv->num_channels; i++)
2844                if (priv->channel[i]->nctx.desired_cpu == cpu)
2845                        return priv->channel[i];
2846
2847        /* We should never get here. Issue a warning and return
2848         * the first channel, because it's still better than nothing
2849         */
2850        dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
2851
2852        return priv->channel[0];
2853}
2854
2855static void dpaa2_eth_set_fq_affinity(struct dpaa2_eth_priv *priv)
2856{
2857        struct device *dev = priv->net_dev->dev.parent;
2858        struct dpaa2_eth_fq *fq;
2859        int rx_cpu, txc_cpu;
2860        int i;
2861
2862        /* For each FQ, pick one channel/CPU to deliver frames to.
2863         * This may well change at runtime, either through irqbalance or
2864         * through direct user intervention.
2865         */
2866        rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
2867
2868        for (i = 0; i < priv->num_fqs; i++) {
2869                fq = &priv->fq[i];
2870                switch (fq->type) {
2871                case DPAA2_RX_FQ:
2872                case DPAA2_RX_ERR_FQ:
2873                        fq->target_cpu = rx_cpu;
2874                        rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
2875                        if (rx_cpu >= nr_cpu_ids)
2876                                rx_cpu = cpumask_first(&priv->dpio_cpumask);
2877                        break;
2878                case DPAA2_TX_CONF_FQ:
2879                        fq->target_cpu = txc_cpu;
2880                        txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
2881                        if (txc_cpu >= nr_cpu_ids)
2882                                txc_cpu = cpumask_first(&priv->dpio_cpumask);
2883                        break;
2884                default:
2885                        dev_err(dev, "Unknown FQ type: %d\n", fq->type);
2886                }
2887                fq->channel = dpaa2_eth_get_affine_channel(priv, fq->target_cpu);
2888        }
2889
2890        update_xps(priv);
2891}
2892
2893static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv)
2894{
2895        int i, j;
2896
2897        /* We have one TxConf FQ per Tx flow.
2898         * The number of Tx and Rx queues is the same.
2899         * Tx queues come first in the fq array.
2900         */
2901        for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
2902                priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
2903                priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
2904                priv->fq[priv->num_fqs++].flowid = (u16)i;
2905        }
2906
2907        for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
2908                for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
2909                        priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
2910                        priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
2911                        priv->fq[priv->num_fqs].tc = (u8)j;
2912                        priv->fq[priv->num_fqs++].flowid = (u16)i;
2913                }
2914        }
2915
2916        /* We have exactly one Rx error queue per DPNI */
2917        priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
2918        priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
2919
2920        /* For each FQ, decide on which core to process incoming frames */
2921        dpaa2_eth_set_fq_affinity(priv);
2922}
2923
2924/* Allocate and configure one buffer pool for each interface */
2925static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv)
2926{
2927        int err;
2928        struct fsl_mc_device *dpbp_dev;
2929        struct device *dev = priv->net_dev->dev.parent;
2930        struct dpbp_attr dpbp_attrs;
2931
2932        err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
2933                                     &dpbp_dev);
2934        if (err) {
2935                if (err == -ENXIO)
2936                        err = -EPROBE_DEFER;
2937                else
2938                        dev_err(dev, "DPBP device allocation failed\n");
2939                return err;
2940        }
2941
2942        priv->dpbp_dev = dpbp_dev;
2943
2944        err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
2945                        &dpbp_dev->mc_handle);
2946        if (err) {
2947                dev_err(dev, "dpbp_open() failed\n");
2948                goto err_open;
2949        }
2950
2951        err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
2952        if (err) {
2953                dev_err(dev, "dpbp_reset() failed\n");
2954                goto err_reset;
2955        }
2956
2957        err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
2958        if (err) {
2959                dev_err(dev, "dpbp_enable() failed\n");
2960                goto err_enable;
2961        }
2962
2963        err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
2964                                  &dpbp_attrs);
2965        if (err) {
2966                dev_err(dev, "dpbp_get_attributes() failed\n");
2967                goto err_get_attr;
2968        }
2969        priv->bpid = dpbp_attrs.bpid;
2970
2971        return 0;
2972
2973err_get_attr:
2974        dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
2975err_enable:
2976err_reset:
2977        dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
2978err_open:
2979        fsl_mc_object_free(dpbp_dev);
2980
2981        return err;
2982}
2983
2984static void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv)
2985{
2986        dpaa2_eth_drain_pool(priv);
2987        dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
2988        dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
2989        fsl_mc_object_free(priv->dpbp_dev);
2990}
2991
2992static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv)
2993{
2994        struct device *dev = priv->net_dev->dev.parent;
2995        struct dpni_buffer_layout buf_layout = {0};
2996        u16 rx_buf_align;
2997        int err;
2998
2999        /* We need to check for WRIOP version 1.0.0, but depending on the MC
3000         * version, this number is not always provided correctly on rev1.
3001         * We need to check for both alternatives in this situation.
3002         */
3003        if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
3004            priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
3005                rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
3006        else
3007                rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
3008
3009        /* We need to ensure that the buffer size seen by WRIOP is a multiple
3010         * of 64 or 256 bytes depending on the WRIOP version.
3011         */
3012        priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align);
3013
3014        /* tx buffer */
3015        buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
3016        buf_layout.pass_timestamp = true;
3017        buf_layout.pass_frame_status = true;
3018        buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
3019                             DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
3020                             DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
3021        err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3022                                     DPNI_QUEUE_TX, &buf_layout);
3023        if (err) {
3024                dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
3025                return err;
3026        }
3027
3028        /* tx-confirm buffer */
3029        buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
3030                             DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
3031        err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3032                                     DPNI_QUEUE_TX_CONFIRM, &buf_layout);
3033        if (err) {
3034                dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
3035                return err;
3036        }
3037
3038        /* Now that we've set our tx buffer layout, retrieve the minimum
3039         * required tx data offset.
3040         */
3041        err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
3042                                      &priv->tx_data_offset);
3043        if (err) {
3044                dev_err(dev, "dpni_get_tx_data_offset() failed\n");
3045                return err;
3046        }
3047
3048        if ((priv->tx_data_offset % 64) != 0)
3049                dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
3050                         priv->tx_data_offset);
3051
3052        /* rx buffer */
3053        buf_layout.pass_frame_status = true;
3054        buf_layout.pass_parser_result = true;
3055        buf_layout.data_align = rx_buf_align;
3056        buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
3057        buf_layout.private_data_size = 0;
3058        buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
3059                             DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
3060                             DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
3061                             DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
3062                             DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
3063        err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3064                                     DPNI_QUEUE_RX, &buf_layout);
3065        if (err) {
3066                dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
3067                return err;
3068        }
3069
3070        return 0;
3071}
3072
3073#define DPNI_ENQUEUE_FQID_VER_MAJOR     7
3074#define DPNI_ENQUEUE_FQID_VER_MINOR     9
3075
3076static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
3077                                       struct dpaa2_eth_fq *fq,
3078                                       struct dpaa2_fd *fd, u8 prio,
3079                                       u32 num_frames __always_unused,
3080                                       int *frames_enqueued)
3081{
3082        int err;
3083
3084        err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
3085                                          priv->tx_qdid, prio,
3086                                          fq->tx_qdbin, fd);
3087        if (!err && frames_enqueued)
3088                *frames_enqueued = 1;
3089        return err;
3090}
3091
3092static inline int dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv *priv,
3093                                                struct dpaa2_eth_fq *fq,
3094                                                struct dpaa2_fd *fd,
3095                                                u8 prio, u32 num_frames,
3096                                                int *frames_enqueued)
3097{
3098        int err;
3099
3100        err = dpaa2_io_service_enqueue_multiple_fq(fq->channel->dpio,
3101                                                   fq->tx_fqid[prio],
3102                                                   fd, num_frames);
3103
3104        if (err == 0)
3105                return -EBUSY;
3106
3107        if (frames_enqueued)
3108                *frames_enqueued = err;
3109        return 0;
3110}
3111
3112static void dpaa2_eth_set_enqueue_mode(struct dpaa2_eth_priv *priv)
3113{
3114        if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
3115                                   DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
3116                priv->enqueue = dpaa2_eth_enqueue_qd;
3117        else
3118                priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
3119}
3120
3121static int dpaa2_eth_set_pause(struct dpaa2_eth_priv *priv)
3122{
3123        struct device *dev = priv->net_dev->dev.parent;
3124        struct dpni_link_cfg link_cfg = {0};
3125        int err;
3126
3127        /* Get the default link options so we don't override other flags */
3128        err = dpni_get_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
3129        if (err) {
3130                dev_err(dev, "dpni_get_link_cfg() failed\n");
3131                return err;
3132        }
3133
3134        /* By default, enable both Rx and Tx pause frames */
3135        link_cfg.options |= DPNI_LINK_OPT_PAUSE;
3136        link_cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
3137        err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
3138        if (err) {
3139                dev_err(dev, "dpni_set_link_cfg() failed\n");
3140                return err;
3141        }
3142
3143        priv->link_state.options = link_cfg.options;
3144
3145        return 0;
3146}
3147
3148static void dpaa2_eth_update_tx_fqids(struct dpaa2_eth_priv *priv)
3149{
3150        struct dpni_queue_id qid = {0};
3151        struct dpaa2_eth_fq *fq;
3152        struct dpni_queue queue;
3153        int i, j, err;
3154
3155        /* We only use Tx FQIDs for FQID-based enqueue, so check
3156         * if DPNI version supports it before updating FQIDs
3157         */
3158        if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
3159                                   DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
3160                return;
3161
3162        for (i = 0; i < priv->num_fqs; i++) {
3163                fq = &priv->fq[i];
3164                if (fq->type != DPAA2_TX_CONF_FQ)
3165                        continue;
3166                for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
3167                        err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3168                                             DPNI_QUEUE_TX, j, fq->flowid,
3169                                             &queue, &qid);
3170                        if (err)
3171                                goto out_err;
3172
3173                        fq->tx_fqid[j] = qid.fqid;
3174                        if (fq->tx_fqid[j] == 0)
3175                                goto out_err;
3176                }
3177        }
3178
3179        priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
3180
3181        return;
3182
3183out_err:
3184        netdev_info(priv->net_dev,
3185                    "Error reading Tx FQID, fallback to QDID-based enqueue\n");
3186        priv->enqueue = dpaa2_eth_enqueue_qd;
3187}
3188
3189/* Configure ingress classification based on VLAN PCP */
3190static int dpaa2_eth_set_vlan_qos(struct dpaa2_eth_priv *priv)
3191{
3192        struct device *dev = priv->net_dev->dev.parent;
3193        struct dpkg_profile_cfg kg_cfg = {0};
3194        struct dpni_qos_tbl_cfg qos_cfg = {0};
3195        struct dpni_rule_cfg key_params;
3196        void *dma_mem, *key, *mask;
3197        u8 key_size = 2;        /* VLAN TCI field */
3198        int i, pcp, err;
3199
3200        /* VLAN-based classification only makes sense if we have multiple
3201         * traffic classes.
3202         * Also, we need to extract just the 3-bit PCP field from the VLAN
3203         * header and we can only do that by using a mask
3204         */
3205        if (dpaa2_eth_tc_count(priv) == 1 || !dpaa2_eth_fs_mask_enabled(priv)) {
3206                dev_dbg(dev, "VLAN-based QoS classification not supported\n");
3207                return -EOPNOTSUPP;
3208        }
3209
3210        dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
3211        if (!dma_mem)
3212                return -ENOMEM;
3213
3214        kg_cfg.num_extracts = 1;
3215        kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
3216        kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN;
3217        kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
3218        kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI;
3219
3220        err = dpni_prepare_key_cfg(&kg_cfg, dma_mem);
3221        if (err) {
3222                dev_err(dev, "dpni_prepare_key_cfg failed\n");
3223                goto out_free_tbl;
3224        }
3225
3226        /* set QoS table */
3227        qos_cfg.default_tc = 0;
3228        qos_cfg.discard_on_miss = 0;
3229        qos_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
3230                                              DPAA2_CLASSIFIER_DMA_SIZE,
3231                                              DMA_TO_DEVICE);
3232        if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) {
3233                dev_err(dev, "QoS table DMA mapping failed\n");
3234                err = -ENOMEM;
3235                goto out_free_tbl;
3236        }
3237
3238        err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg);
3239        if (err) {
3240                dev_err(dev, "dpni_set_qos_table failed\n");
3241                goto out_unmap_tbl;
3242        }
3243
3244        /* Add QoS table entries */
3245        key = kzalloc(key_size * 2, GFP_KERNEL);
3246        if (!key) {
3247                err = -ENOMEM;
3248                goto out_unmap_tbl;
3249        }
3250        mask = key + key_size;
3251        *(__be16 *)mask = cpu_to_be16(VLAN_PRIO_MASK);
3252
3253        key_params.key_iova = dma_map_single(dev, key, key_size * 2,
3254                                             DMA_TO_DEVICE);
3255        if (dma_mapping_error(dev, key_params.key_iova)) {
3256                dev_err(dev, "Qos table entry DMA mapping failed\n");
3257                err = -ENOMEM;
3258                goto out_free_key;
3259        }
3260
3261        key_params.mask_iova = key_params.key_iova + key_size;
3262        key_params.key_size = key_size;
3263
3264        /* We add rules for PCP-based distribution starting with highest
3265         * priority (VLAN PCP = 7). If this DPNI doesn't have enough traffic
3266         * classes to accommodate all priority levels, the lowest ones end up
3267         * on TC 0 which was configured as default
3268         */
3269        for (i = dpaa2_eth_tc_count(priv) - 1, pcp = 7; i >= 0; i--, pcp--) {
3270                *(__be16 *)key = cpu_to_be16(pcp << VLAN_PRIO_SHIFT);
3271                dma_sync_single_for_device(dev, key_params.key_iova,
3272                                           key_size * 2, DMA_TO_DEVICE);
3273
3274                err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token,
3275                                         &key_params, i, i);
3276                if (err) {
3277                        dev_err(dev, "dpni_add_qos_entry failed\n");
3278                        dpni_clear_qos_table(priv->mc_io, 0, priv->mc_token);
3279                        goto out_unmap_key;
3280                }
3281        }
3282
3283        priv->vlan_cls_enabled = true;
3284
3285        /* Table and key memory is not persistent, clean everything up after
3286         * configuration is finished
3287         */
3288out_unmap_key:
3289        dma_unmap_single(dev, key_params.key_iova, key_size * 2, DMA_TO_DEVICE);
3290out_free_key:
3291        kfree(key);
3292out_unmap_tbl:
3293        dma_unmap_single(dev, qos_cfg.key_cfg_iova, DPAA2_CLASSIFIER_DMA_SIZE,
3294                         DMA_TO_DEVICE);
3295out_free_tbl:
3296        kfree(dma_mem);
3297
3298        return err;
3299}
3300
3301/* Configure the DPNI object this interface is associated with */
3302static int dpaa2_eth_setup_dpni(struct fsl_mc_device *ls_dev)
3303{
3304        struct device *dev = &ls_dev->dev;
3305        struct dpaa2_eth_priv *priv;
3306        struct net_device *net_dev;
3307        int err;
3308
3309        net_dev = dev_get_drvdata(dev);
3310        priv = netdev_priv(net_dev);
3311
3312        /* get a handle for the DPNI object */
3313        err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
3314        if (err) {
3315                dev_err(dev, "dpni_open() failed\n");
3316                return err;
3317        }
3318
3319        /* Check if we can work with this DPNI object */
3320        err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
3321                                   &priv->dpni_ver_minor);
3322        if (err) {
3323                dev_err(dev, "dpni_get_api_version() failed\n");
3324                goto close;
3325        }
3326        if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
3327                dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
3328                        priv->dpni_ver_major, priv->dpni_ver_minor,
3329                        DPNI_VER_MAJOR, DPNI_VER_MINOR);
3330                err = -ENOTSUPP;
3331                goto close;
3332        }
3333
3334        ls_dev->mc_io = priv->mc_io;
3335        ls_dev->mc_handle = priv->mc_token;
3336
3337        err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3338        if (err) {
3339                dev_err(dev, "dpni_reset() failed\n");
3340                goto close;
3341        }
3342
3343        err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
3344                                  &priv->dpni_attrs);
3345        if (err) {
3346                dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
3347                goto close;
3348        }
3349
3350        err = dpaa2_eth_set_buffer_layout(priv);
3351        if (err)
3352                goto close;
3353
3354        dpaa2_eth_set_enqueue_mode(priv);
3355
3356        /* Enable pause frame support */
3357        if (dpaa2_eth_has_pause_support(priv)) {
3358                err = dpaa2_eth_set_pause(priv);
3359                if (err)
3360                        goto close;
3361        }
3362
3363        err = dpaa2_eth_set_vlan_qos(priv);
3364        if (err && err != -EOPNOTSUPP)
3365                goto close;
3366
3367        priv->cls_rules = devm_kcalloc(dev, dpaa2_eth_fs_count(priv),
3368                                       sizeof(struct dpaa2_eth_cls_rule),
3369                                       GFP_KERNEL);
3370        if (!priv->cls_rules) {
3371                err = -ENOMEM;
3372                goto close;
3373        }
3374
3375        return 0;
3376
3377close:
3378        dpni_close(priv->mc_io, 0, priv->mc_token);
3379
3380        return err;
3381}
3382
3383static void dpaa2_eth_free_dpni(struct dpaa2_eth_priv *priv)
3384{
3385        int err;
3386
3387        err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3388        if (err)
3389                netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
3390                            err);
3391
3392        dpni_close(priv->mc_io, 0, priv->mc_token);
3393}
3394
3395static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv,
3396                                   struct dpaa2_eth_fq *fq)
3397{
3398        struct device *dev = priv->net_dev->dev.parent;
3399        struct dpni_queue queue;
3400        struct dpni_queue_id qid;
3401        int err;
3402
3403        err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3404                             DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid);
3405        if (err) {
3406                dev_err(dev, "dpni_get_queue(RX) failed\n");
3407                return err;
3408        }
3409
3410        fq->fqid = qid.fqid;
3411
3412        queue.destination.id = fq->channel->dpcon_id;
3413        queue.destination.type = DPNI_DEST_DPCON;
3414        queue.destination.priority = 1;
3415        queue.user_context = (u64)(uintptr_t)fq;
3416        err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3417                             DPNI_QUEUE_RX, fq->tc, fq->flowid,
3418                             DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
3419                             &queue);
3420        if (err) {
3421                dev_err(dev, "dpni_set_queue(RX) failed\n");
3422                return err;
3423        }
3424
3425        /* xdp_rxq setup */
3426        /* only once for each channel */
3427        if (fq->tc > 0)
3428                return 0;
3429
3430        err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
3431                               fq->flowid, 0);
3432        if (err) {
3433                dev_err(dev, "xdp_rxq_info_reg failed\n");
3434                return err;
3435        }
3436
3437        err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq,
3438                                         MEM_TYPE_PAGE_ORDER0, NULL);
3439        if (err) {
3440                dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
3441                return err;
3442        }
3443
3444        return 0;
3445}
3446
3447static int dpaa2_eth_setup_tx_flow(struct dpaa2_eth_priv *priv,
3448                                   struct dpaa2_eth_fq *fq)
3449{
3450        struct device *dev = priv->net_dev->dev.parent;
3451        struct dpni_queue queue;
3452        struct dpni_queue_id qid;
3453        int i, err;
3454
3455        for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3456                err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3457                                     DPNI_QUEUE_TX, i, fq->flowid,
3458                                     &queue, &qid);
3459                if (err) {
3460                        dev_err(dev, "dpni_get_queue(TX) failed\n");
3461                        return err;
3462                }
3463                fq->tx_fqid[i] = qid.fqid;
3464        }
3465
3466        /* All Tx queues belonging to the same flowid have the same qdbin */
3467        fq->tx_qdbin = qid.qdbin;
3468
3469        err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3470                             DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
3471                             &queue, &qid);
3472        if (err) {
3473                dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
3474                return err;
3475        }
3476
3477        fq->fqid = qid.fqid;
3478
3479        queue.destination.id = fq->channel->dpcon_id;
3480        queue.destination.type = DPNI_DEST_DPCON;
3481        queue.destination.priority = 0;
3482        queue.user_context = (u64)(uintptr_t)fq;
3483        err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3484                             DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
3485                             DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
3486                             &queue);
3487        if (err) {
3488                dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
3489                return err;
3490        }
3491
3492        return 0;
3493}
3494
3495static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
3496                             struct dpaa2_eth_fq *fq)
3497{
3498        struct device *dev = priv->net_dev->dev.parent;
3499        struct dpni_queue q = { { 0 } };
3500        struct dpni_queue_id qid;
3501        u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
3502        int err;
3503
3504        err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3505                             DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
3506        if (err) {
3507                dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
3508                return err;
3509        }
3510
3511        fq->fqid = qid.fqid;
3512
3513        q.destination.id = fq->channel->dpcon_id;
3514        q.destination.type = DPNI_DEST_DPCON;
3515        q.destination.priority = 1;
3516        q.user_context = (u64)(uintptr_t)fq;
3517        err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3518                             DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q);
3519        if (err) {
3520                dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
3521                return err;
3522        }
3523
3524        return 0;
3525}
3526
3527/* Supported header fields for Rx hash distribution key */
3528static const struct dpaa2_eth_dist_fields dist_fields[] = {
3529        {
3530                /* L2 header */
3531                .rxnfc_field = RXH_L2DA,
3532                .cls_prot = NET_PROT_ETH,
3533                .cls_field = NH_FLD_ETH_DA,
3534                .id = DPAA2_ETH_DIST_ETHDST,
3535                .size = 6,
3536        }, {
3537                .cls_prot = NET_PROT_ETH,
3538                .cls_field = NH_FLD_ETH_SA,
3539                .id = DPAA2_ETH_DIST_ETHSRC,
3540                .size = 6,
3541        }, {
3542                /* This is the last ethertype field parsed:
3543                 * depending on frame format, it can be the MAC ethertype
3544                 * or the VLAN etype.
3545                 */
3546                .cls_prot = NET_PROT_ETH,
3547                .cls_field = NH_FLD_ETH_TYPE,
3548                .id = DPAA2_ETH_DIST_ETHTYPE,
3549                .size = 2,
3550        }, {
3551                /* VLAN header */
3552                .rxnfc_field = RXH_VLAN,
3553                .cls_prot = NET_PROT_VLAN,
3554                .cls_field = NH_FLD_VLAN_TCI,
3555                .id = DPAA2_ETH_DIST_VLAN,
3556                .size = 2,
3557        }, {
3558                /* IP header */
3559                .rxnfc_field = RXH_IP_SRC,
3560                .cls_prot = NET_PROT_IP,
3561                .cls_field = NH_FLD_IP_SRC,
3562                .id = DPAA2_ETH_DIST_IPSRC,
3563                .size = 4,
3564        }, {
3565                .rxnfc_field = RXH_IP_DST,
3566                .cls_prot = NET_PROT_IP,
3567                .cls_field = NH_FLD_IP_DST,
3568                .id = DPAA2_ETH_DIST_IPDST,
3569                .size = 4,
3570        }, {
3571                .rxnfc_field = RXH_L3_PROTO,
3572                .cls_prot = NET_PROT_IP,
3573                .cls_field = NH_FLD_IP_PROTO,
3574                .id = DPAA2_ETH_DIST_IPPROTO,
3575                .size = 1,
3576        }, {
3577                /* Using UDP ports, this is functionally equivalent to raw
3578                 * byte pairs from L4 header.
3579                 */
3580                .rxnfc_field = RXH_L4_B_0_1,
3581                .cls_prot = NET_PROT_UDP,
3582                .cls_field = NH_FLD_UDP_PORT_SRC,
3583                .id = DPAA2_ETH_DIST_L4SRC,
3584                .size = 2,
3585        }, {
3586                .rxnfc_field = RXH_L4_B_2_3,
3587                .cls_prot = NET_PROT_UDP,
3588                .cls_field = NH_FLD_UDP_PORT_DST,
3589                .id = DPAA2_ETH_DIST_L4DST,
3590                .size = 2,
3591        },
3592};
3593
3594/* Configure the Rx hash key using the legacy API */
3595static int dpaa2_eth_config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
3596{
3597        struct device *dev = priv->net_dev->dev.parent;
3598        struct dpni_rx_tc_dist_cfg dist_cfg;
3599        int i, err = 0;
3600
3601        memset(&dist_cfg, 0, sizeof(dist_cfg));
3602
3603        dist_cfg.key_cfg_iova = key;
3604        dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
3605        dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
3606
3607        for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3608                err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token,
3609                                          i, &dist_cfg);
3610                if (err) {
3611                        dev_err(dev, "dpni_set_rx_tc_dist failed\n");
3612                        break;
3613                }
3614        }
3615
3616        return err;
3617}
3618
3619/* Configure the Rx hash key using the new API */
3620static int dpaa2_eth_config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
3621{
3622        struct device *dev = priv->net_dev->dev.parent;
3623        struct dpni_rx_dist_cfg dist_cfg;
3624        int i, err = 0;
3625
3626        memset(&dist_cfg, 0, sizeof(dist_cfg));
3627
3628        dist_cfg.key_cfg_iova = key;
3629        dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
3630        dist_cfg.enable = 1;
3631
3632        for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3633                dist_cfg.tc = i;
3634                err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token,
3635                                            &dist_cfg);
3636                if (err) {
3637                        dev_err(dev, "dpni_set_rx_hash_dist failed\n");
3638                        break;
3639                }
3640
3641                /* If the flow steering / hashing key is shared between all
3642                 * traffic classes, install it just once
3643                 */
3644                if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
3645                        break;
3646        }
3647
3648        return err;
3649}
3650
3651/* Configure the Rx flow classification key */
3652static int dpaa2_eth_config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
3653{
3654        struct device *dev = priv->net_dev->dev.parent;
3655        struct dpni_rx_dist_cfg dist_cfg;
3656        int i, err = 0;
3657
3658        memset(&dist_cfg, 0, sizeof(dist_cfg));
3659
3660        dist_cfg.key_cfg_iova = key;
3661        dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
3662        dist_cfg.enable = 1;
3663
3664        for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3665                dist_cfg.tc = i;
3666                err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token,
3667                                          &dist_cfg);
3668                if (err) {
3669                        dev_err(dev, "dpni_set_rx_fs_dist failed\n");
3670                        break;
3671                }
3672
3673                /* If the flow steering / hashing key is shared between all
3674                 * traffic classes, install it just once
3675                 */
3676                if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
3677                        break;
3678        }
3679
3680        return err;
3681}
3682
3683/* Size of the Rx flow classification key */
3684int dpaa2_eth_cls_key_size(u64 fields)
3685{
3686        int i, size = 0;
3687
3688        for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3689                if (!(fields & dist_fields[i].id))
3690                        continue;
3691                size += dist_fields[i].size;
3692        }
3693
3694        return size;
3695}
3696
3697/* Offset of header field in Rx classification key */
3698int dpaa2_eth_cls_fld_off(int prot, int field)
3699{
3700        int i, off = 0;
3701
3702        for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3703                if (dist_fields[i].cls_prot == prot &&
3704                    dist_fields[i].cls_field == field)
3705                        return off;
3706                off += dist_fields[i].size;
3707        }
3708
3709        WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
3710        return 0;
3711}
3712
3713/* Prune unused fields from the classification rule.
3714 * Used when masking is not supported
3715 */
3716void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
3717{
3718        int off = 0, new_off = 0;
3719        int i, size;
3720
3721        for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3722                size = dist_fields[i].size;
3723                if (dist_fields[i].id & fields) {
3724                        memcpy(key_mem + new_off, key_mem + off, size);
3725                        new_off += size;
3726                }
3727                off += size;
3728        }
3729}
3730
3731/* Set Rx distribution (hash or flow classification) key
3732 * flags is a combination of RXH_ bits
3733 */
3734static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
3735                                  enum dpaa2_eth_rx_dist type, u64 flags)
3736{
3737        struct device *dev = net_dev->dev.parent;
3738        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3739        struct dpkg_profile_cfg cls_cfg;
3740        u32 rx_hash_fields = 0;
3741        dma_addr_t key_iova;
3742        u8 *dma_mem;
3743        int i;
3744        int err = 0;
3745
3746        memset(&cls_cfg, 0, sizeof(cls_cfg));
3747
3748        for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3749                struct dpkg_extract *key =
3750                        &cls_cfg.extracts[cls_cfg.num_extracts];
3751
3752                /* For both Rx hashing and classification keys
3753                 * we set only the selected fields.
3754                 */
3755                if (!(flags & dist_fields[i].id))
3756                        continue;
3757                if (type == DPAA2_ETH_RX_DIST_HASH)
3758                        rx_hash_fields |= dist_fields[i].rxnfc_field;
3759
3760                if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
3761                        dev_err(dev, "error adding key extraction rule, too many rules?\n");
3762                        return -E2BIG;
3763                }
3764
3765                key->type = DPKG_EXTRACT_FROM_HDR;
3766                key->extract.from_hdr.prot = dist_fields[i].cls_prot;
3767                key->extract.from_hdr.type = DPKG_FULL_FIELD;
3768                key->extract.from_hdr.field = dist_fields[i].cls_field;
3769                cls_cfg.num_extracts++;
3770        }
3771
3772        dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
3773        if (!dma_mem)
3774                return -ENOMEM;
3775
3776        err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
3777        if (err) {
3778                dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
3779                goto free_key;
3780        }
3781
3782        /* Prepare for setting the rx dist */
3783        key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
3784                                  DMA_TO_DEVICE);
3785        if (dma_mapping_error(dev, key_iova)) {
3786                dev_err(dev, "DMA mapping failed\n");
3787                err = -ENOMEM;
3788                goto free_key;
3789        }
3790
3791        if (type == DPAA2_ETH_RX_DIST_HASH) {
3792                if (dpaa2_eth_has_legacy_dist(priv))
3793                        err = dpaa2_eth_config_legacy_hash_key(priv, key_iova);
3794                else
3795                        err = dpaa2_eth_config_hash_key(priv, key_iova);
3796        } else {
3797                err = dpaa2_eth_config_cls_key(priv, key_iova);
3798        }
3799
3800        dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
3801                         DMA_TO_DEVICE);
3802        if (!err && type == DPAA2_ETH_RX_DIST_HASH)
3803                priv->rx_hash_fields = rx_hash_fields;
3804
3805free_key:
3806        kfree(dma_mem);
3807        return err;
3808}
3809
3810int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
3811{
3812        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3813        u64 key = 0;
3814        int i;
3815
3816        if (!dpaa2_eth_hash_enabled(priv))
3817                return -EOPNOTSUPP;
3818
3819        for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
3820                if (dist_fields[i].rxnfc_field & flags)
3821                        key |= dist_fields[i].id;
3822
3823        return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
3824}
3825
3826int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
3827{
3828        return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
3829}
3830
3831static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
3832{
3833        struct device *dev = priv->net_dev->dev.parent;
3834        int err;
3835
3836        /* Check if we actually support Rx flow classification */
3837        if (dpaa2_eth_has_legacy_dist(priv)) {
3838                dev_dbg(dev, "Rx cls not supported by current MC version\n");
3839                return -EOPNOTSUPP;
3840        }
3841
3842        if (!dpaa2_eth_fs_enabled(priv)) {
3843                dev_dbg(dev, "Rx cls disabled in DPNI options\n");
3844                return -EOPNOTSUPP;
3845        }
3846
3847        if (!dpaa2_eth_hash_enabled(priv)) {
3848                dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
3849                return -EOPNOTSUPP;
3850        }
3851
3852        /* If there is no support for masking in the classification table,
3853         * we don't set a default key, as it will depend on the rules
3854         * added by the user at runtime.
3855         */
3856        if (!dpaa2_eth_fs_mask_enabled(priv))
3857                goto out;
3858
3859        err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
3860        if (err)
3861                return err;
3862
3863out:
3864        priv->rx_cls_enabled = 1;
3865
3866        return 0;
3867}
3868
3869/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
3870 * frame queues and channels
3871 */
3872static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
3873{
3874        struct net_device *net_dev = priv->net_dev;
3875        struct device *dev = net_dev->dev.parent;
3876        struct dpni_pools_cfg pools_params;
3877        struct dpni_error_cfg err_cfg;
3878        int err = 0;
3879        int i;
3880
3881        pools_params.num_dpbp = 1;
3882        pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
3883        pools_params.pools[0].backup_pool = 0;
3884        pools_params.pools[0].buffer_size = priv->rx_buf_size;
3885        err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
3886        if (err) {
3887                dev_err(dev, "dpni_set_pools() failed\n");
3888                return err;
3889        }
3890
3891        /* have the interface implicitly distribute traffic based on
3892         * the default hash key
3893         */
3894        err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
3895        if (err && err != -EOPNOTSUPP)
3896                dev_err(dev, "Failed to configure hashing\n");
3897
3898        /* Configure the flow classification key; it includes all
3899         * supported header fields and cannot be modified at runtime
3900         */
3901        err = dpaa2_eth_set_default_cls(priv);
3902        if (err && err != -EOPNOTSUPP)
3903                dev_err(dev, "Failed to configure Rx classification key\n");
3904
3905        /* Configure handling of error frames */
3906        err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
3907        err_cfg.set_frame_annotation = 1;
3908        err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
3909        err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
3910                                       &err_cfg);
3911        if (err) {
3912                dev_err(dev, "dpni_set_errors_behavior failed\n");
3913                return err;
3914        }
3915
3916        /* Configure Rx and Tx conf queues to generate CDANs */
3917        for (i = 0; i < priv->num_fqs; i++) {
3918                switch (priv->fq[i].type) {
3919                case DPAA2_RX_FQ:
3920                        err = dpaa2_eth_setup_rx_flow(priv, &priv->fq[i]);
3921                        break;
3922                case DPAA2_TX_CONF_FQ:
3923                        err = dpaa2_eth_setup_tx_flow(priv, &priv->fq[i]);
3924                        break;
3925                case DPAA2_RX_ERR_FQ:
3926                        err = setup_rx_err_flow(priv, &priv->fq[i]);
3927                        break;
3928                default:
3929                        dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
3930                        return -EINVAL;
3931                }
3932                if (err)
3933                        return err;
3934        }
3935
3936        err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
3937                            DPNI_QUEUE_TX, &priv->tx_qdid);
3938        if (err) {
3939                dev_err(dev, "dpni_get_qdid() failed\n");
3940                return err;
3941        }
3942
3943        return 0;
3944}
3945
3946/* Allocate rings for storing incoming frame descriptors */
3947static int dpaa2_eth_alloc_rings(struct dpaa2_eth_priv *priv)
3948{
3949        struct net_device *net_dev = priv->net_dev;
3950        struct device *dev = net_dev->dev.parent;
3951        int i;
3952
3953        for (i = 0; i < priv->num_channels; i++) {
3954                priv->channel[i]->store =
3955                        dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
3956                if (!priv->channel[i]->store) {
3957                        netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
3958                        goto err_ring;
3959                }
3960        }
3961
3962        return 0;
3963
3964err_ring:
3965        for (i = 0; i < priv->num_channels; i++) {
3966                if (!priv->channel[i]->store)
3967                        break;
3968                dpaa2_io_store_destroy(priv->channel[i]->store);
3969        }
3970
3971        return -ENOMEM;
3972}
3973
3974static void dpaa2_eth_free_rings(struct dpaa2_eth_priv *priv)
3975{
3976        int i;
3977
3978        for (i = 0; i < priv->num_channels; i++)
3979                dpaa2_io_store_destroy(priv->channel[i]->store);
3980}
3981
3982static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv)
3983{
3984        struct net_device *net_dev = priv->net_dev;
3985        struct device *dev = net_dev->dev.parent;
3986        u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
3987        int err;
3988
3989        /* Get firmware address, if any */
3990        err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
3991        if (err) {
3992                dev_err(dev, "dpni_get_port_mac_addr() failed\n");
3993                return err;
3994        }
3995
3996        /* Get DPNI attributes address, if any */
3997        err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
3998                                        dpni_mac_addr);
3999        if (err) {
4000                dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
4001                return err;
4002        }
4003
4004        /* First check if firmware has any address configured by bootloader */
4005        if (!is_zero_ether_addr(mac_addr)) {
4006                /* If the DPMAC addr != DPNI addr, update it */
4007                if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
4008                        err = dpni_set_primary_mac_addr(priv->mc_io, 0,
4009                                                        priv->mc_token,
4010                                                        mac_addr);
4011                        if (err) {
4012                                dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
4013                                return err;
4014                        }
4015                }
4016                memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
4017        } else if (is_zero_ether_addr(dpni_mac_addr)) {
4018                /* No MAC address configured, fill in net_dev->dev_addr
4019                 * with a random one
4020                 */
4021                eth_hw_addr_random(net_dev);
4022                dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
4023
4024                err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
4025                                                net_dev->dev_addr);
4026                if (err) {
4027                        dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
4028                        return err;
4029                }
4030
4031                /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
4032                 * practical purposes, this will be our "permanent" mac address,
4033                 * at least until the next reboot. This move will also permit
4034                 * register_netdevice() to properly fill up net_dev->perm_addr.
4035                 */
4036                net_dev->addr_assign_type = NET_ADDR_PERM;
4037        } else {
4038                /* NET_ADDR_PERM is default, all we have to do is
4039                 * fill in the device addr.
4040                 */
4041                memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
4042        }
4043
4044        return 0;
4045}
4046
4047static int dpaa2_eth_netdev_init(struct net_device *net_dev)
4048{
4049        struct device *dev = net_dev->dev.parent;
4050        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4051        u32 options = priv->dpni_attrs.options;
4052        u64 supported = 0, not_supported = 0;
4053        u8 bcast_addr[ETH_ALEN];
4054        u8 num_queues;
4055        int err;
4056
4057        net_dev->netdev_ops = &dpaa2_eth_ops;
4058        net_dev->ethtool_ops = &dpaa2_ethtool_ops;
4059
4060        err = dpaa2_eth_set_mac_addr(priv);
4061        if (err)
4062                return err;
4063
4064        /* Explicitly add the broadcast address to the MAC filtering table */
4065        eth_broadcast_addr(bcast_addr);
4066        err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
4067        if (err) {
4068                dev_err(dev, "dpni_add_mac_addr() failed\n");
4069                return err;
4070        }
4071
4072        /* Set MTU upper limit; lower limit is 68B (default value) */
4073        net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
4074        err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
4075                                        DPAA2_ETH_MFL);
4076        if (err) {
4077                dev_err(dev, "dpni_set_max_frame_length() failed\n");
4078                return err;
4079        }
4080
4081        /* Set actual number of queues in the net device */
4082        num_queues = dpaa2_eth_queue_count(priv);
4083        err = netif_set_real_num_tx_queues(net_dev, num_queues);
4084        if (err) {
4085                dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
4086                return err;
4087        }
4088        err = netif_set_real_num_rx_queues(net_dev, num_queues);
4089        if (err) {
4090                dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
4091                return err;
4092        }
4093
4094        /* Capabilities listing */
4095        supported |= IFF_LIVE_ADDR_CHANGE;
4096
4097        if (options & DPNI_OPT_NO_MAC_FILTER)
4098                not_supported |= IFF_UNICAST_FLT;
4099        else
4100                supported |= IFF_UNICAST_FLT;
4101
4102        net_dev->priv_flags |= supported;
4103        net_dev->priv_flags &= ~not_supported;
4104
4105        /* Features */
4106        net_dev->features = NETIF_F_RXCSUM |
4107                            NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4108                            NETIF_F_SG | NETIF_F_HIGHDMA |
4109                            NETIF_F_LLTX | NETIF_F_HW_TC;
4110        net_dev->hw_features = net_dev->features;
4111
4112        if (priv->dpni_attrs.vlan_filter_entries)
4113                net_dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4114
4115        return 0;
4116}
4117
4118static int dpaa2_eth_poll_link_state(void *arg)
4119{
4120        struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
4121        int err;
4122
4123        while (!kthread_should_stop()) {
4124                err = dpaa2_eth_link_state_update(priv);
4125                if (unlikely(err))
4126                        return err;
4127
4128                msleep(DPAA2_ETH_LINK_STATE_REFRESH);
4129        }
4130
4131        return 0;
4132}
4133
4134static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
4135{
4136        struct fsl_mc_device *dpni_dev, *dpmac_dev;
4137        struct dpaa2_mac *mac;
4138        int err;
4139
4140        dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
4141        dpmac_dev = fsl_mc_get_endpoint(dpni_dev, 0);
4142
4143        if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
4144                return PTR_ERR(dpmac_dev);
4145
4146        if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
4147                return 0;
4148
4149        mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
4150        if (!mac)
4151                return -ENOMEM;
4152
4153        mac->mc_dev = dpmac_dev;
4154        mac->mc_io = priv->mc_io;
4155        mac->net_dev = priv->net_dev;
4156
4157        err = dpaa2_mac_open(mac);
4158        if (err)
4159                goto err_free_mac;
4160        priv->mac = mac;
4161
4162        if (dpaa2_eth_is_type_phy(priv)) {
4163                err = dpaa2_mac_connect(mac);
4164                if (err && err != -EPROBE_DEFER)
4165                        netdev_err(priv->net_dev, "Error connecting to the MAC endpoint: %pe",
4166                                   ERR_PTR(err));
4167                if (err)
4168                        goto err_close_mac;
4169        }
4170
4171        return 0;
4172
4173err_close_mac:
4174        dpaa2_mac_close(mac);
4175        priv->mac = NULL;
4176err_free_mac:
4177        kfree(mac);
4178        return err;
4179}
4180
4181static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv)
4182{
4183        if (dpaa2_eth_is_type_phy(priv))
4184                dpaa2_mac_disconnect(priv->mac);
4185
4186        if (!dpaa2_eth_has_mac(priv))
4187                return;
4188
4189        dpaa2_mac_close(priv->mac);
4190        kfree(priv->mac);
4191        priv->mac = NULL;
4192}
4193
4194static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
4195{
4196        u32 status = ~0;
4197        struct device *dev = (struct device *)arg;
4198        struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
4199        struct net_device *net_dev = dev_get_drvdata(dev);
4200        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4201        int err;
4202
4203        err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
4204                                  DPNI_IRQ_INDEX, &status);
4205        if (unlikely(err)) {
4206                netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
4207                return IRQ_HANDLED;
4208        }
4209
4210        if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
4211                dpaa2_eth_link_state_update(netdev_priv(net_dev));
4212
4213        if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) {
4214                dpaa2_eth_set_mac_addr(netdev_priv(net_dev));
4215                dpaa2_eth_update_tx_fqids(priv);
4216
4217                rtnl_lock();
4218                if (dpaa2_eth_has_mac(priv))
4219                        dpaa2_eth_disconnect_mac(priv);
4220                else
4221                        dpaa2_eth_connect_mac(priv);
4222                rtnl_unlock();
4223        }
4224
4225        return IRQ_HANDLED;
4226}
4227
4228static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev)
4229{
4230        int err = 0;
4231        struct fsl_mc_device_irq *irq;
4232
4233        err = fsl_mc_allocate_irqs(ls_dev);
4234        if (err) {
4235                dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
4236                return err;
4237        }
4238
4239        irq = ls_dev->irqs[0];
4240        err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
4241                                        NULL, dpni_irq0_handler_thread,
4242                                        IRQF_NO_SUSPEND | IRQF_ONESHOT,
4243                                        dev_name(&ls_dev->dev), &ls_dev->dev);
4244        if (err < 0) {
4245                dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
4246                goto free_mc_irq;
4247        }
4248
4249        err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
4250                                DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED |
4251                                DPNI_IRQ_EVENT_ENDPOINT_CHANGED);
4252        if (err < 0) {
4253                dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
4254                goto free_irq;
4255        }
4256
4257        err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
4258                                  DPNI_IRQ_INDEX, 1);
4259        if (err < 0) {
4260                dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
4261                goto free_irq;
4262        }
4263
4264        return 0;
4265
4266free_irq:
4267        devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
4268free_mc_irq:
4269        fsl_mc_free_irqs(ls_dev);
4270
4271        return err;
4272}
4273
4274static void dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv *priv)
4275{
4276        int i;
4277        struct dpaa2_eth_channel *ch;
4278
4279        for (i = 0; i < priv->num_channels; i++) {
4280                ch = priv->channel[i];
4281                /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
4282                netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
4283                               NAPI_POLL_WEIGHT);
4284        }
4285}
4286
4287static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv)
4288{
4289        int i;
4290        struct dpaa2_eth_channel *ch;
4291
4292        for (i = 0; i < priv->num_channels; i++) {
4293                ch = priv->channel[i];
4294                netif_napi_del(&ch->napi);
4295        }
4296}
4297
4298static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
4299{
4300        struct device *dev;
4301        struct net_device *net_dev = NULL;
4302        struct dpaa2_eth_priv *priv = NULL;
4303        int err = 0;
4304
4305        dev = &dpni_dev->dev;
4306
4307        /* Net device */
4308        net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES);
4309        if (!net_dev) {
4310                dev_err(dev, "alloc_etherdev_mq() failed\n");
4311                return -ENOMEM;
4312        }
4313
4314        SET_NETDEV_DEV(net_dev, dev);
4315        dev_set_drvdata(dev, net_dev);
4316
4317        priv = netdev_priv(net_dev);
4318        priv->net_dev = net_dev;
4319
4320        priv->iommu_domain = iommu_get_domain_for_dev(dev);
4321
4322        priv->tx_tstamp_type = HWTSTAMP_TX_OFF;
4323        priv->rx_tstamp = false;
4324
4325        priv->dpaa2_ptp_wq = alloc_workqueue("dpaa2_ptp_wq", 0, 0);
4326        if (!priv->dpaa2_ptp_wq) {
4327                err = -ENOMEM;
4328                goto err_wq_alloc;
4329        }
4330
4331        INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp);
4332
4333        skb_queue_head_init(&priv->tx_skbs);
4334
4335        priv->rx_copybreak = DPAA2_ETH_DEFAULT_COPYBREAK;
4336
4337        /* Obtain a MC portal */
4338        err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
4339                                     &priv->mc_io);
4340        if (err) {
4341                if (err == -ENXIO)
4342                        err = -EPROBE_DEFER;
4343                else
4344                        dev_err(dev, "MC portal allocation failed\n");
4345                goto err_portal_alloc;
4346        }
4347
4348        /* MC objects initialization and configuration */
4349        err = dpaa2_eth_setup_dpni(dpni_dev);
4350        if (err)
4351                goto err_dpni_setup;
4352
4353        err = dpaa2_eth_setup_dpio(priv);
4354        if (err)
4355                goto err_dpio_setup;
4356
4357        dpaa2_eth_setup_fqs(priv);
4358
4359        err = dpaa2_eth_setup_dpbp(priv);
4360        if (err)
4361                goto err_dpbp_setup;
4362
4363        err = dpaa2_eth_bind_dpni(priv);
4364        if (err)
4365                goto err_bind;
4366
4367        /* Add a NAPI context for each channel */
4368        dpaa2_eth_add_ch_napi(priv);
4369
4370        /* Percpu statistics */
4371        priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
4372        if (!priv->percpu_stats) {
4373                dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
4374                err = -ENOMEM;
4375                goto err_alloc_percpu_stats;
4376        }
4377        priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
4378        if (!priv->percpu_extras) {
4379                dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
4380                err = -ENOMEM;
4381                goto err_alloc_percpu_extras;
4382        }
4383
4384        priv->sgt_cache = alloc_percpu(*priv->sgt_cache);
4385        if (!priv->sgt_cache) {
4386                dev_err(dev, "alloc_percpu(sgt_cache) failed\n");
4387                err = -ENOMEM;
4388                goto err_alloc_sgt_cache;
4389        }
4390
4391        err = dpaa2_eth_netdev_init(net_dev);
4392        if (err)
4393                goto err_netdev_init;
4394
4395        /* Configure checksum offload based on current interface flags */
4396        err = dpaa2_eth_set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
4397        if (err)
4398                goto err_csum;
4399
4400        err = dpaa2_eth_set_tx_csum(priv,
4401                                    !!(net_dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
4402        if (err)
4403                goto err_csum;
4404
4405        err = dpaa2_eth_alloc_rings(priv);
4406        if (err)
4407                goto err_alloc_rings;
4408
4409#ifdef CONFIG_FSL_DPAA2_ETH_DCB
4410        if (dpaa2_eth_has_pause_support(priv) && priv->vlan_cls_enabled) {
4411                priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
4412                net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops;
4413        } else {
4414                dev_dbg(dev, "PFC not supported\n");
4415        }
4416#endif
4417
4418        err = dpaa2_eth_setup_irqs(dpni_dev);
4419        if (err) {
4420                netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
4421                priv->poll_thread = kthread_run(dpaa2_eth_poll_link_state, priv,
4422                                                "%s_poll_link", net_dev->name);
4423                if (IS_ERR(priv->poll_thread)) {
4424                        dev_err(dev, "Error starting polling thread\n");
4425                        goto err_poll_thread;
4426                }
4427                priv->do_link_poll = true;
4428        }
4429
4430        err = dpaa2_eth_connect_mac(priv);
4431        if (err)
4432                goto err_connect_mac;
4433
4434        err = dpaa2_eth_dl_register(priv);
4435        if (err)
4436                goto err_dl_register;
4437
4438        err = dpaa2_eth_dl_traps_register(priv);
4439        if (err)
4440                goto err_dl_trap_register;
4441
4442        err = dpaa2_eth_dl_port_add(priv);
4443        if (err)
4444                goto err_dl_port_add;
4445
4446        err = register_netdev(net_dev);
4447        if (err < 0) {
4448                dev_err(dev, "register_netdev() failed\n");
4449                goto err_netdev_reg;
4450        }
4451
4452#ifdef CONFIG_DEBUG_FS
4453        dpaa2_dbg_add(priv);
4454#endif
4455
4456        dev_info(dev, "Probed interface %s\n", net_dev->name);
4457        return 0;
4458
4459err_netdev_reg:
4460        dpaa2_eth_dl_port_del(priv);
4461err_dl_port_add:
4462        dpaa2_eth_dl_traps_unregister(priv);
4463err_dl_trap_register:
4464        dpaa2_eth_dl_unregister(priv);
4465err_dl_register:
4466        dpaa2_eth_disconnect_mac(priv);
4467err_connect_mac:
4468        if (priv->do_link_poll)
4469                kthread_stop(priv->poll_thread);
4470        else
4471                fsl_mc_free_irqs(dpni_dev);
4472err_poll_thread:
4473        dpaa2_eth_free_rings(priv);
4474err_alloc_rings:
4475err_csum:
4476err_netdev_init:
4477        free_percpu(priv->sgt_cache);
4478err_alloc_sgt_cache:
4479        free_percpu(priv->percpu_extras);
4480err_alloc_percpu_extras:
4481        free_percpu(priv->percpu_stats);
4482err_alloc_percpu_stats:
4483        dpaa2_eth_del_ch_napi(priv);
4484err_bind:
4485        dpaa2_eth_free_dpbp(priv);
4486err_dpbp_setup:
4487        dpaa2_eth_free_dpio(priv);
4488err_dpio_setup:
4489        dpaa2_eth_free_dpni(priv);
4490err_dpni_setup:
4491        fsl_mc_portal_free(priv->mc_io);
4492err_portal_alloc:
4493        destroy_workqueue(priv->dpaa2_ptp_wq);
4494err_wq_alloc:
4495        dev_set_drvdata(dev, NULL);
4496        free_netdev(net_dev);
4497
4498        return err;
4499}
4500
4501static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
4502{
4503        struct device *dev;
4504        struct net_device *net_dev;
4505        struct dpaa2_eth_priv *priv;
4506
4507        dev = &ls_dev->dev;
4508        net_dev = dev_get_drvdata(dev);
4509        priv = netdev_priv(net_dev);
4510
4511#ifdef CONFIG_DEBUG_FS
4512        dpaa2_dbg_remove(priv);
4513#endif
4514        rtnl_lock();
4515        dpaa2_eth_disconnect_mac(priv);
4516        rtnl_unlock();
4517
4518        unregister_netdev(net_dev);
4519
4520        dpaa2_eth_dl_port_del(priv);
4521        dpaa2_eth_dl_traps_unregister(priv);
4522        dpaa2_eth_dl_unregister(priv);
4523
4524        if (priv->do_link_poll)
4525                kthread_stop(priv->poll_thread);
4526        else
4527                fsl_mc_free_irqs(ls_dev);
4528
4529        dpaa2_eth_free_rings(priv);
4530        free_percpu(priv->sgt_cache);
4531        free_percpu(priv->percpu_stats);
4532        free_percpu(priv->percpu_extras);
4533
4534        dpaa2_eth_del_ch_napi(priv);
4535        dpaa2_eth_free_dpbp(priv);
4536        dpaa2_eth_free_dpio(priv);
4537        dpaa2_eth_free_dpni(priv);
4538
4539        fsl_mc_portal_free(priv->mc_io);
4540
4541        free_netdev(net_dev);
4542
4543        dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
4544
4545        return 0;
4546}
4547
4548static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
4549        {
4550                .vendor = FSL_MC_VENDOR_FREESCALE,
4551                .obj_type = "dpni",
4552        },
4553        { .vendor = 0x0 }
4554};
4555MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
4556
4557static struct fsl_mc_driver dpaa2_eth_driver = {
4558        .driver = {
4559                .name = KBUILD_MODNAME,
4560                .owner = THIS_MODULE,
4561        },
4562        .probe = dpaa2_eth_probe,
4563        .remove = dpaa2_eth_remove,
4564        .match_id_table = dpaa2_eth_match_id_table
4565};
4566
4567static int __init dpaa2_eth_driver_init(void)
4568{
4569        int err;
4570
4571        dpaa2_eth_dbg_init();
4572        err = fsl_mc_driver_register(&dpaa2_eth_driver);
4573        if (err) {
4574                dpaa2_eth_dbg_exit();
4575                return err;
4576        }
4577
4578        return 0;
4579}
4580
4581static void __exit dpaa2_eth_driver_exit(void)
4582{
4583        dpaa2_eth_dbg_exit();
4584        fsl_mc_driver_unregister(&dpaa2_eth_driver);
4585}
4586
4587module_init(dpaa2_eth_driver_init);
4588module_exit(dpaa2_eth_driver_exit);
4589