linux/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
   2/* Copyright 2014-2016 Freescale Semiconductor Inc.
   3 * Copyright 2016-2017 NXP
   4 */
   5#include <linux/init.h>
   6#include <linux/module.h>
   7#include <linux/platform_device.h>
   8#include <linux/etherdevice.h>
   9#include <linux/of_net.h>
  10#include <linux/interrupt.h>
  11#include <linux/msi.h>
  12#include <linux/kthread.h>
  13#include <linux/iommu.h>
  14#include <linux/net_tstamp.h>
  15#include <linux/fsl/mc.h>
  16#include <linux/bpf.h>
  17#include <linux/bpf_trace.h>
  18#include <net/sock.h>
  19
  20#include "dpaa2-eth.h"
  21
  22/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
  23 * using trace events only need to #include <trace/events/sched.h>
  24 */
  25#define CREATE_TRACE_POINTS
  26#include "dpaa2-eth-trace.h"
  27
  28MODULE_LICENSE("Dual BSD/GPL");
  29MODULE_AUTHOR("Freescale Semiconductor, Inc");
  30MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
  31
  32static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
  33                                dma_addr_t iova_addr)
  34{
  35        phys_addr_t phys_addr;
  36
  37        phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
  38
  39        return phys_to_virt(phys_addr);
  40}
  41
  42static void validate_rx_csum(struct dpaa2_eth_priv *priv,
  43                             u32 fd_status,
  44                             struct sk_buff *skb)
  45{
  46        skb_checksum_none_assert(skb);
  47
  48        /* HW checksum validation is disabled, nothing to do here */
  49        if (!(priv->net_dev->features & NETIF_F_RXCSUM))
  50                return;
  51
  52        /* Read checksum validation bits */
  53        if (!((fd_status & DPAA2_FAS_L3CV) &&
  54              (fd_status & DPAA2_FAS_L4CV)))
  55                return;
  56
  57        /* Inform the stack there's no need to compute L3/L4 csum anymore */
  58        skb->ip_summed = CHECKSUM_UNNECESSARY;
  59}
  60
  61/* Free a received FD.
  62 * Not to be used for Tx conf FDs or on any other paths.
  63 */
  64static void free_rx_fd(struct dpaa2_eth_priv *priv,
  65                       const struct dpaa2_fd *fd,
  66                       void *vaddr)
  67{
  68        struct device *dev = priv->net_dev->dev.parent;
  69        dma_addr_t addr = dpaa2_fd_get_addr(fd);
  70        u8 fd_format = dpaa2_fd_get_format(fd);
  71        struct dpaa2_sg_entry *sgt;
  72        void *sg_vaddr;
  73        int i;
  74
  75        /* If single buffer frame, just free the data buffer */
  76        if (fd_format == dpaa2_fd_single)
  77                goto free_buf;
  78        else if (fd_format != dpaa2_fd_sg)
  79                /* We don't support any other format */
  80                return;
  81
  82        /* For S/G frames, we first need to free all SG entries
  83         * except the first one, which was taken care of already
  84         */
  85        sgt = vaddr + dpaa2_fd_get_offset(fd);
  86        for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
  87                addr = dpaa2_sg_get_addr(&sgt[i]);
  88                sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
  89                dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
  90                               DMA_BIDIRECTIONAL);
  91
  92                free_pages((unsigned long)sg_vaddr, 0);
  93                if (dpaa2_sg_is_final(&sgt[i]))
  94                        break;
  95        }
  96
  97free_buf:
  98        free_pages((unsigned long)vaddr, 0);
  99}
 100
 101/* Build a linear skb based on a single-buffer frame descriptor */
 102static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch,
 103                                        const struct dpaa2_fd *fd,
 104                                        void *fd_vaddr)
 105{
 106        struct sk_buff *skb = NULL;
 107        u16 fd_offset = dpaa2_fd_get_offset(fd);
 108        u32 fd_length = dpaa2_fd_get_len(fd);
 109
 110        ch->buf_count--;
 111
 112        skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
 113        if (unlikely(!skb))
 114                return NULL;
 115
 116        skb_reserve(skb, fd_offset);
 117        skb_put(skb, fd_length);
 118
 119        return skb;
 120}
 121
 122/* Build a non linear (fragmented) skb based on a S/G table */
 123static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
 124                                      struct dpaa2_eth_channel *ch,
 125                                      struct dpaa2_sg_entry *sgt)
 126{
 127        struct sk_buff *skb = NULL;
 128        struct device *dev = priv->net_dev->dev.parent;
 129        void *sg_vaddr;
 130        dma_addr_t sg_addr;
 131        u16 sg_offset;
 132        u32 sg_length;
 133        struct page *page, *head_page;
 134        int page_offset;
 135        int i;
 136
 137        for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
 138                struct dpaa2_sg_entry *sge = &sgt[i];
 139
 140                /* NOTE: We only support SG entries in dpaa2_sg_single format,
 141                 * but this is the only format we may receive from HW anyway
 142                 */
 143
 144                /* Get the address and length from the S/G entry */
 145                sg_addr = dpaa2_sg_get_addr(sge);
 146                sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
 147                dma_unmap_page(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
 148                               DMA_BIDIRECTIONAL);
 149
 150                sg_length = dpaa2_sg_get_len(sge);
 151
 152                if (i == 0) {
 153                        /* We build the skb around the first data buffer */
 154                        skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
 155                        if (unlikely(!skb)) {
 156                                /* Free the first SG entry now, since we already
 157                                 * unmapped it and obtained the virtual address
 158                                 */
 159                                free_pages((unsigned long)sg_vaddr, 0);
 160
 161                                /* We still need to subtract the buffers used
 162                                 * by this FD from our software counter
 163                                 */
 164                                while (!dpaa2_sg_is_final(&sgt[i]) &&
 165                                       i < DPAA2_ETH_MAX_SG_ENTRIES)
 166                                        i++;
 167                                break;
 168                        }
 169
 170                        sg_offset = dpaa2_sg_get_offset(sge);
 171                        skb_reserve(skb, sg_offset);
 172                        skb_put(skb, sg_length);
 173                } else {
 174                        /* Rest of the data buffers are stored as skb frags */
 175                        page = virt_to_page(sg_vaddr);
 176                        head_page = virt_to_head_page(sg_vaddr);
 177
 178                        /* Offset in page (which may be compound).
 179                         * Data in subsequent SG entries is stored from the
 180                         * beginning of the buffer, so we don't need to add the
 181                         * sg_offset.
 182                         */
 183                        page_offset = ((unsigned long)sg_vaddr &
 184                                (PAGE_SIZE - 1)) +
 185                                (page_address(page) - page_address(head_page));
 186
 187                        skb_add_rx_frag(skb, i - 1, head_page, page_offset,
 188                                        sg_length, DPAA2_ETH_RX_BUF_SIZE);
 189                }
 190
 191                if (dpaa2_sg_is_final(sge))
 192                        break;
 193        }
 194
 195        WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
 196
 197        /* Count all data buffers + SG table buffer */
 198        ch->buf_count -= i + 2;
 199
 200        return skb;
 201}
 202
 203/* Free buffers acquired from the buffer pool or which were meant to
 204 * be released in the pool
 205 */
 206static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
 207{
 208        struct device *dev = priv->net_dev->dev.parent;
 209        void *vaddr;
 210        int i;
 211
 212        for (i = 0; i < count; i++) {
 213                vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
 214                dma_unmap_page(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
 215                               DMA_BIDIRECTIONAL);
 216                free_pages((unsigned long)vaddr, 0);
 217        }
 218}
 219
 220static void xdp_release_buf(struct dpaa2_eth_priv *priv,
 221                            struct dpaa2_eth_channel *ch,
 222                            dma_addr_t addr)
 223{
 224        int err;
 225
 226        ch->xdp.drop_bufs[ch->xdp.drop_cnt++] = addr;
 227        if (ch->xdp.drop_cnt < DPAA2_ETH_BUFS_PER_CMD)
 228                return;
 229
 230        while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
 231                                               ch->xdp.drop_bufs,
 232                                               ch->xdp.drop_cnt)) == -EBUSY)
 233                cpu_relax();
 234
 235        if (err) {
 236                free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt);
 237                ch->buf_count -= ch->xdp.drop_cnt;
 238        }
 239
 240        ch->xdp.drop_cnt = 0;
 241}
 242
 243static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd,
 244                       void *buf_start, u16 queue_id)
 245{
 246        struct dpaa2_eth_fq *fq;
 247        struct dpaa2_faead *faead;
 248        u32 ctrl, frc;
 249        int i, err;
 250
 251        /* Mark the egress frame hardware annotation area as valid */
 252        frc = dpaa2_fd_get_frc(fd);
 253        dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
 254        dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
 255
 256        /* Instruct hardware to release the FD buffer directly into
 257         * the buffer pool once transmission is completed, instead of
 258         * sending a Tx confirmation frame to us
 259         */
 260        ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
 261        faead = dpaa2_get_faead(buf_start, false);
 262        faead->ctrl = cpu_to_le32(ctrl);
 263        faead->conf_fqid = 0;
 264
 265        fq = &priv->fq[queue_id];
 266        for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
 267                err = priv->enqueue(priv, fq, fd, 0);
 268                if (err != -EBUSY)
 269                        break;
 270        }
 271
 272        return err;
 273}
 274
 275static u32 run_xdp(struct dpaa2_eth_priv *priv,
 276                   struct dpaa2_eth_channel *ch,
 277                   struct dpaa2_eth_fq *rx_fq,
 278                   struct dpaa2_fd *fd, void *vaddr)
 279{
 280        dma_addr_t addr = dpaa2_fd_get_addr(fd);
 281        struct rtnl_link_stats64 *percpu_stats;
 282        struct bpf_prog *xdp_prog;
 283        struct xdp_buff xdp;
 284        u32 xdp_act = XDP_PASS;
 285        int err;
 286
 287        percpu_stats = this_cpu_ptr(priv->percpu_stats);
 288
 289        rcu_read_lock();
 290
 291        xdp_prog = READ_ONCE(ch->xdp.prog);
 292        if (!xdp_prog)
 293                goto out;
 294
 295        xdp.data = vaddr + dpaa2_fd_get_offset(fd);
 296        xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
 297        xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
 298        xdp_set_data_meta_invalid(&xdp);
 299        xdp.rxq = &ch->xdp_rxq;
 300
 301        xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
 302
 303        /* xdp.data pointer may have changed */
 304        dpaa2_fd_set_offset(fd, xdp.data - vaddr);
 305        dpaa2_fd_set_len(fd, xdp.data_end - xdp.data);
 306
 307        switch (xdp_act) {
 308        case XDP_PASS:
 309                break;
 310        case XDP_TX:
 311                err = xdp_enqueue(priv, fd, vaddr, rx_fq->flowid);
 312                if (err) {
 313                        xdp_release_buf(priv, ch, addr);
 314                        percpu_stats->tx_errors++;
 315                        ch->stats.xdp_tx_err++;
 316                } else {
 317                        percpu_stats->tx_packets++;
 318                        percpu_stats->tx_bytes += dpaa2_fd_get_len(fd);
 319                        ch->stats.xdp_tx++;
 320                }
 321                break;
 322        default:
 323                bpf_warn_invalid_xdp_action(xdp_act);
 324                /* fall through */
 325        case XDP_ABORTED:
 326                trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
 327                /* fall through */
 328        case XDP_DROP:
 329                xdp_release_buf(priv, ch, addr);
 330                ch->stats.xdp_drop++;
 331                break;
 332        case XDP_REDIRECT:
 333                dma_unmap_page(priv->net_dev->dev.parent, addr,
 334                               DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL);
 335                ch->buf_count--;
 336                xdp.data_hard_start = vaddr;
 337                err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
 338                if (unlikely(err))
 339                        ch->stats.xdp_drop++;
 340                else
 341                        ch->stats.xdp_redirect++;
 342                break;
 343        }
 344
 345        ch->xdp.res |= xdp_act;
 346out:
 347        rcu_read_unlock();
 348        return xdp_act;
 349}
 350
 351/* Main Rx frame processing routine */
 352static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
 353                         struct dpaa2_eth_channel *ch,
 354                         const struct dpaa2_fd *fd,
 355                         struct dpaa2_eth_fq *fq)
 356{
 357        dma_addr_t addr = dpaa2_fd_get_addr(fd);
 358        u8 fd_format = dpaa2_fd_get_format(fd);
 359        void *vaddr;
 360        struct sk_buff *skb;
 361        struct rtnl_link_stats64 *percpu_stats;
 362        struct dpaa2_eth_drv_stats *percpu_extras;
 363        struct device *dev = priv->net_dev->dev.parent;
 364        struct dpaa2_fas *fas;
 365        void *buf_data;
 366        u32 status = 0;
 367        u32 xdp_act;
 368
 369        /* Tracing point */
 370        trace_dpaa2_rx_fd(priv->net_dev, fd);
 371
 372        vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
 373        dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
 374                                DMA_BIDIRECTIONAL);
 375
 376        fas = dpaa2_get_fas(vaddr, false);
 377        prefetch(fas);
 378        buf_data = vaddr + dpaa2_fd_get_offset(fd);
 379        prefetch(buf_data);
 380
 381        percpu_stats = this_cpu_ptr(priv->percpu_stats);
 382        percpu_extras = this_cpu_ptr(priv->percpu_extras);
 383
 384        if (fd_format == dpaa2_fd_single) {
 385                xdp_act = run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
 386                if (xdp_act != XDP_PASS) {
 387                        percpu_stats->rx_packets++;
 388                        percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
 389                        return;
 390                }
 391
 392                dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
 393                               DMA_BIDIRECTIONAL);
 394                skb = build_linear_skb(ch, fd, vaddr);
 395        } else if (fd_format == dpaa2_fd_sg) {
 396                WARN_ON(priv->xdp_prog);
 397
 398                dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
 399                               DMA_BIDIRECTIONAL);
 400                skb = build_frag_skb(priv, ch, buf_data);
 401                free_pages((unsigned long)vaddr, 0);
 402                percpu_extras->rx_sg_frames++;
 403                percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
 404        } else {
 405                /* We don't support any other format */
 406                goto err_frame_format;
 407        }
 408
 409        if (unlikely(!skb))
 410                goto err_build_skb;
 411
 412        prefetch(skb->data);
 413
 414        /* Get the timestamp value */
 415        if (priv->rx_tstamp) {
 416                struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
 417                __le64 *ts = dpaa2_get_ts(vaddr, false);
 418                u64 ns;
 419
 420                memset(shhwtstamps, 0, sizeof(*shhwtstamps));
 421
 422                ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
 423                shhwtstamps->hwtstamp = ns_to_ktime(ns);
 424        }
 425
 426        /* Check if we need to validate the L4 csum */
 427        if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
 428                status = le32_to_cpu(fas->status);
 429                validate_rx_csum(priv, status, skb);
 430        }
 431
 432        skb->protocol = eth_type_trans(skb, priv->net_dev);
 433        skb_record_rx_queue(skb, fq->flowid);
 434
 435        percpu_stats->rx_packets++;
 436        percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
 437
 438        list_add_tail(&skb->list, ch->rx_list);
 439
 440        return;
 441
 442err_build_skb:
 443        free_rx_fd(priv, fd, vaddr);
 444err_frame_format:
 445        percpu_stats->rx_dropped++;
 446}
 447
 448/* Consume all frames pull-dequeued into the store. This is the simplest way to
 449 * make sure we don't accidentally issue another volatile dequeue which would
 450 * overwrite (leak) frames already in the store.
 451 *
 452 * Observance of NAPI budget is not our concern, leaving that to the caller.
 453 */
 454static int consume_frames(struct dpaa2_eth_channel *ch,
 455                          struct dpaa2_eth_fq **src)
 456{
 457        struct dpaa2_eth_priv *priv = ch->priv;
 458        struct dpaa2_eth_fq *fq = NULL;
 459        struct dpaa2_dq *dq;
 460        const struct dpaa2_fd *fd;
 461        int cleaned = 0;
 462        int is_last;
 463
 464        do {
 465                dq = dpaa2_io_store_next(ch->store, &is_last);
 466                if (unlikely(!dq)) {
 467                        /* If we're here, we *must* have placed a
 468                         * volatile dequeue comnmand, so keep reading through
 469                         * the store until we get some sort of valid response
 470                         * token (either a valid frame or an "empty dequeue")
 471                         */
 472                        continue;
 473                }
 474
 475                fd = dpaa2_dq_fd(dq);
 476                fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
 477
 478                fq->consume(priv, ch, fd, fq);
 479                cleaned++;
 480        } while (!is_last);
 481
 482        if (!cleaned)
 483                return 0;
 484
 485        fq->stats.frames += cleaned;
 486
 487        /* A dequeue operation only pulls frames from a single queue
 488         * into the store. Return the frame queue as an out param.
 489         */
 490        if (src)
 491                *src = fq;
 492
 493        return cleaned;
 494}
 495
 496/* Configure the egress frame annotation for timestamp update */
 497static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
 498{
 499        struct dpaa2_faead *faead;
 500        u32 ctrl, frc;
 501
 502        /* Mark the egress frame annotation area as valid */
 503        frc = dpaa2_fd_get_frc(fd);
 504        dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
 505
 506        /* Set hardware annotation size */
 507        ctrl = dpaa2_fd_get_ctrl(fd);
 508        dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
 509
 510        /* enable UPD (update prepanded data) bit in FAEAD field of
 511         * hardware frame annotation area
 512         */
 513        ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
 514        faead = dpaa2_get_faead(buf_start, true);
 515        faead->ctrl = cpu_to_le32(ctrl);
 516}
 517
 518/* Create a frame descriptor based on a fragmented skb */
 519static int build_sg_fd(struct dpaa2_eth_priv *priv,
 520                       struct sk_buff *skb,
 521                       struct dpaa2_fd *fd)
 522{
 523        struct device *dev = priv->net_dev->dev.parent;
 524        void *sgt_buf = NULL;
 525        dma_addr_t addr;
 526        int nr_frags = skb_shinfo(skb)->nr_frags;
 527        struct dpaa2_sg_entry *sgt;
 528        int i, err;
 529        int sgt_buf_size;
 530        struct scatterlist *scl, *crt_scl;
 531        int num_sg;
 532        int num_dma_bufs;
 533        struct dpaa2_eth_swa *swa;
 534
 535        /* Create and map scatterlist.
 536         * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
 537         * to go beyond nr_frags+1.
 538         * Note: We don't support chained scatterlists
 539         */
 540        if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
 541                return -EINVAL;
 542
 543        scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
 544        if (unlikely(!scl))
 545                return -ENOMEM;
 546
 547        sg_init_table(scl, nr_frags + 1);
 548        num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
 549        num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
 550        if (unlikely(!num_dma_bufs)) {
 551                err = -ENOMEM;
 552                goto dma_map_sg_failed;
 553        }
 554
 555        /* Prepare the HW SGT structure */
 556        sgt_buf_size = priv->tx_data_offset +
 557                       sizeof(struct dpaa2_sg_entry) *  num_dma_bufs;
 558        sgt_buf = napi_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
 559        if (unlikely(!sgt_buf)) {
 560                err = -ENOMEM;
 561                goto sgt_buf_alloc_failed;
 562        }
 563        sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
 564        memset(sgt_buf, 0, sgt_buf_size);
 565
 566        sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
 567
 568        /* Fill in the HW SGT structure.
 569         *
 570         * sgt_buf is zeroed out, so the following fields are implicit
 571         * in all sgt entries:
 572         *   - offset is 0
 573         *   - format is 'dpaa2_sg_single'
 574         */
 575        for_each_sg(scl, crt_scl, num_dma_bufs, i) {
 576                dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
 577                dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
 578        }
 579        dpaa2_sg_set_final(&sgt[i - 1], true);
 580
 581        /* Store the skb backpointer in the SGT buffer.
 582         * Fit the scatterlist and the number of buffers alongside the
 583         * skb backpointer in the software annotation area. We'll need
 584         * all of them on Tx Conf.
 585         */
 586        swa = (struct dpaa2_eth_swa *)sgt_buf;
 587        swa->type = DPAA2_ETH_SWA_SG;
 588        swa->sg.skb = skb;
 589        swa->sg.scl = scl;
 590        swa->sg.num_sg = num_sg;
 591        swa->sg.sgt_size = sgt_buf_size;
 592
 593        /* Separately map the SGT buffer */
 594        addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
 595        if (unlikely(dma_mapping_error(dev, addr))) {
 596                err = -ENOMEM;
 597                goto dma_map_single_failed;
 598        }
 599        dpaa2_fd_set_offset(fd, priv->tx_data_offset);
 600        dpaa2_fd_set_format(fd, dpaa2_fd_sg);
 601        dpaa2_fd_set_addr(fd, addr);
 602        dpaa2_fd_set_len(fd, skb->len);
 603        dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
 604
 605        if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
 606                enable_tx_tstamp(fd, sgt_buf);
 607
 608        return 0;
 609
 610dma_map_single_failed:
 611        skb_free_frag(sgt_buf);
 612sgt_buf_alloc_failed:
 613        dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
 614dma_map_sg_failed:
 615        kfree(scl);
 616        return err;
 617}
 618
 619/* Create a frame descriptor based on a linear skb */
 620static int build_single_fd(struct dpaa2_eth_priv *priv,
 621                           struct sk_buff *skb,
 622                           struct dpaa2_fd *fd)
 623{
 624        struct device *dev = priv->net_dev->dev.parent;
 625        u8 *buffer_start, *aligned_start;
 626        struct dpaa2_eth_swa *swa;
 627        dma_addr_t addr;
 628
 629        buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb);
 630
 631        /* If there's enough room to align the FD address, do it.
 632         * It will help hardware optimize accesses.
 633         */
 634        aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
 635                                  DPAA2_ETH_TX_BUF_ALIGN);
 636        if (aligned_start >= skb->head)
 637                buffer_start = aligned_start;
 638
 639        /* Store a backpointer to the skb at the beginning of the buffer
 640         * (in the private data area) such that we can release it
 641         * on Tx confirm
 642         */
 643        swa = (struct dpaa2_eth_swa *)buffer_start;
 644        swa->type = DPAA2_ETH_SWA_SINGLE;
 645        swa->single.skb = skb;
 646
 647        addr = dma_map_single(dev, buffer_start,
 648                              skb_tail_pointer(skb) - buffer_start,
 649                              DMA_BIDIRECTIONAL);
 650        if (unlikely(dma_mapping_error(dev, addr)))
 651                return -ENOMEM;
 652
 653        dpaa2_fd_set_addr(fd, addr);
 654        dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
 655        dpaa2_fd_set_len(fd, skb->len);
 656        dpaa2_fd_set_format(fd, dpaa2_fd_single);
 657        dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
 658
 659        if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
 660                enable_tx_tstamp(fd, buffer_start);
 661
 662        return 0;
 663}
 664
 665/* FD freeing routine on the Tx path
 666 *
 667 * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
 668 * back-pointed to is also freed.
 669 * This can be called either from dpaa2_eth_tx_conf() or on the error path of
 670 * dpaa2_eth_tx().
 671 */
 672static void free_tx_fd(const struct dpaa2_eth_priv *priv,
 673                       struct dpaa2_eth_fq *fq,
 674                       const struct dpaa2_fd *fd, bool in_napi)
 675{
 676        struct device *dev = priv->net_dev->dev.parent;
 677        dma_addr_t fd_addr;
 678        struct sk_buff *skb = NULL;
 679        unsigned char *buffer_start;
 680        struct dpaa2_eth_swa *swa;
 681        u8 fd_format = dpaa2_fd_get_format(fd);
 682        u32 fd_len = dpaa2_fd_get_len(fd);
 683
 684        fd_addr = dpaa2_fd_get_addr(fd);
 685        buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
 686        swa = (struct dpaa2_eth_swa *)buffer_start;
 687
 688        if (fd_format == dpaa2_fd_single) {
 689                if (swa->type == DPAA2_ETH_SWA_SINGLE) {
 690                        skb = swa->single.skb;
 691                        /* Accessing the skb buffer is safe before dma unmap,
 692                         * because we didn't map the actual skb shell.
 693                         */
 694                        dma_unmap_single(dev, fd_addr,
 695                                         skb_tail_pointer(skb) - buffer_start,
 696                                         DMA_BIDIRECTIONAL);
 697                } else {
 698                        WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type");
 699                        dma_unmap_single(dev, fd_addr, swa->xdp.dma_size,
 700                                         DMA_BIDIRECTIONAL);
 701                }
 702        } else if (fd_format == dpaa2_fd_sg) {
 703                skb = swa->sg.skb;
 704
 705                /* Unmap the scatterlist */
 706                dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg,
 707                             DMA_BIDIRECTIONAL);
 708                kfree(swa->sg.scl);
 709
 710                /* Unmap the SGT buffer */
 711                dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
 712                                 DMA_BIDIRECTIONAL);
 713        } else {
 714                netdev_dbg(priv->net_dev, "Invalid FD format\n");
 715                return;
 716        }
 717
 718        if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) {
 719                fq->dq_frames++;
 720                fq->dq_bytes += fd_len;
 721        }
 722
 723        if (swa->type == DPAA2_ETH_SWA_XDP) {
 724                xdp_return_frame(swa->xdp.xdpf);
 725                return;
 726        }
 727
 728        /* Get the timestamp value */
 729        if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
 730                struct skb_shared_hwtstamps shhwtstamps;
 731                __le64 *ts = dpaa2_get_ts(buffer_start, true);
 732                u64 ns;
 733
 734                memset(&shhwtstamps, 0, sizeof(shhwtstamps));
 735
 736                ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
 737                shhwtstamps.hwtstamp = ns_to_ktime(ns);
 738                skb_tstamp_tx(skb, &shhwtstamps);
 739        }
 740
 741        /* Free SGT buffer allocated on tx */
 742        if (fd_format != dpaa2_fd_single)
 743                skb_free_frag(buffer_start);
 744
 745        /* Move on with skb release */
 746        napi_consume_skb(skb, in_napi);
 747}
 748
 749static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
 750{
 751        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 752        struct dpaa2_fd fd;
 753        struct rtnl_link_stats64 *percpu_stats;
 754        struct dpaa2_eth_drv_stats *percpu_extras;
 755        struct dpaa2_eth_fq *fq;
 756        struct netdev_queue *nq;
 757        u16 queue_mapping;
 758        unsigned int needed_headroom;
 759        u32 fd_len;
 760        u8 prio = 0;
 761        int err, i;
 762
 763        percpu_stats = this_cpu_ptr(priv->percpu_stats);
 764        percpu_extras = this_cpu_ptr(priv->percpu_extras);
 765
 766        needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
 767        if (skb_headroom(skb) < needed_headroom) {
 768                struct sk_buff *ns;
 769
 770                ns = skb_realloc_headroom(skb, needed_headroom);
 771                if (unlikely(!ns)) {
 772                        percpu_stats->tx_dropped++;
 773                        goto err_alloc_headroom;
 774                }
 775                percpu_extras->tx_reallocs++;
 776
 777                if (skb->sk)
 778                        skb_set_owner_w(ns, skb->sk);
 779
 780                dev_kfree_skb(skb);
 781                skb = ns;
 782        }
 783
 784        /* We'll be holding a back-reference to the skb until Tx Confirmation;
 785         * we don't want that overwritten by a concurrent Tx with a cloned skb.
 786         */
 787        skb = skb_unshare(skb, GFP_ATOMIC);
 788        if (unlikely(!skb)) {
 789                /* skb_unshare() has already freed the skb */
 790                percpu_stats->tx_dropped++;
 791                return NETDEV_TX_OK;
 792        }
 793
 794        /* Setup the FD fields */
 795        memset(&fd, 0, sizeof(fd));
 796
 797        if (skb_is_nonlinear(skb)) {
 798                err = build_sg_fd(priv, skb, &fd);
 799                percpu_extras->tx_sg_frames++;
 800                percpu_extras->tx_sg_bytes += skb->len;
 801        } else {
 802                err = build_single_fd(priv, skb, &fd);
 803        }
 804
 805        if (unlikely(err)) {
 806                percpu_stats->tx_dropped++;
 807                goto err_build_fd;
 808        }
 809
 810        /* Tracing point */
 811        trace_dpaa2_tx_fd(net_dev, &fd);
 812
 813        /* TxConf FQ selection relies on queue id from the stack.
 814         * In case of a forwarded frame from another DPNI interface, we choose
 815         * a queue affined to the same core that processed the Rx frame
 816         */
 817        queue_mapping = skb_get_queue_mapping(skb);
 818
 819        if (net_dev->num_tc) {
 820                prio = netdev_txq_to_tc(net_dev, queue_mapping);
 821                /* Hardware interprets priority level 0 as being the highest,
 822                 * so we need to do a reverse mapping to the netdev tc index
 823                 */
 824                prio = net_dev->num_tc - prio - 1;
 825                /* We have only one FQ array entry for all Tx hardware queues
 826                 * with the same flow id (but different priority levels)
 827                 */
 828                queue_mapping %= dpaa2_eth_queue_count(priv);
 829        }
 830        fq = &priv->fq[queue_mapping];
 831
 832        fd_len = dpaa2_fd_get_len(&fd);
 833        nq = netdev_get_tx_queue(net_dev, queue_mapping);
 834        netdev_tx_sent_queue(nq, fd_len);
 835
 836        /* Everything that happens after this enqueues might race with
 837         * the Tx confirmation callback for this frame
 838         */
 839        for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
 840                err = priv->enqueue(priv, fq, &fd, prio);
 841                if (err != -EBUSY)
 842                        break;
 843        }
 844        percpu_extras->tx_portal_busy += i;
 845        if (unlikely(err < 0)) {
 846                percpu_stats->tx_errors++;
 847                /* Clean up everything, including freeing the skb */
 848                free_tx_fd(priv, fq, &fd, false);
 849                netdev_tx_completed_queue(nq, 1, fd_len);
 850        } else {
 851                percpu_stats->tx_packets++;
 852                percpu_stats->tx_bytes += fd_len;
 853        }
 854
 855        return NETDEV_TX_OK;
 856
 857err_build_fd:
 858err_alloc_headroom:
 859        dev_kfree_skb(skb);
 860
 861        return NETDEV_TX_OK;
 862}
 863
 864/* Tx confirmation frame processing routine */
 865static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
 866                              struct dpaa2_eth_channel *ch __always_unused,
 867                              const struct dpaa2_fd *fd,
 868                              struct dpaa2_eth_fq *fq)
 869{
 870        struct rtnl_link_stats64 *percpu_stats;
 871        struct dpaa2_eth_drv_stats *percpu_extras;
 872        u32 fd_len = dpaa2_fd_get_len(fd);
 873        u32 fd_errors;
 874
 875        /* Tracing point */
 876        trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
 877
 878        percpu_extras = this_cpu_ptr(priv->percpu_extras);
 879        percpu_extras->tx_conf_frames++;
 880        percpu_extras->tx_conf_bytes += fd_len;
 881
 882        /* Check frame errors in the FD field */
 883        fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
 884        free_tx_fd(priv, fq, fd, true);
 885
 886        if (likely(!fd_errors))
 887                return;
 888
 889        if (net_ratelimit())
 890                netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
 891                           fd_errors);
 892
 893        percpu_stats = this_cpu_ptr(priv->percpu_stats);
 894        /* Tx-conf logically pertains to the egress path. */
 895        percpu_stats->tx_errors++;
 896}
 897
 898static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
 899{
 900        int err;
 901
 902        err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
 903                               DPNI_OFF_RX_L3_CSUM, enable);
 904        if (err) {
 905                netdev_err(priv->net_dev,
 906                           "dpni_set_offload(RX_L3_CSUM) failed\n");
 907                return err;
 908        }
 909
 910        err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
 911                               DPNI_OFF_RX_L4_CSUM, enable);
 912        if (err) {
 913                netdev_err(priv->net_dev,
 914                           "dpni_set_offload(RX_L4_CSUM) failed\n");
 915                return err;
 916        }
 917
 918        return 0;
 919}
 920
 921static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
 922{
 923        int err;
 924
 925        err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
 926                               DPNI_OFF_TX_L3_CSUM, enable);
 927        if (err) {
 928                netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
 929                return err;
 930        }
 931
 932        err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
 933                               DPNI_OFF_TX_L4_CSUM, enable);
 934        if (err) {
 935                netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
 936                return err;
 937        }
 938
 939        return 0;
 940}
 941
 942/* Perform a single release command to add buffers
 943 * to the specified buffer pool
 944 */
 945static int add_bufs(struct dpaa2_eth_priv *priv,
 946                    struct dpaa2_eth_channel *ch, u16 bpid)
 947{
 948        struct device *dev = priv->net_dev->dev.parent;
 949        u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
 950        struct page *page;
 951        dma_addr_t addr;
 952        int i, err;
 953
 954        for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
 955                /* Allocate buffer visible to WRIOP + skb shared info +
 956                 * alignment padding
 957                 */
 958                /* allocate one page for each Rx buffer. WRIOP sees
 959                 * the entire page except for a tailroom reserved for
 960                 * skb shared info
 961                 */
 962                page = dev_alloc_pages(0);
 963                if (!page)
 964                        goto err_alloc;
 965
 966                addr = dma_map_page(dev, page, 0, DPAA2_ETH_RX_BUF_SIZE,
 967                                    DMA_BIDIRECTIONAL);
 968                if (unlikely(dma_mapping_error(dev, addr)))
 969                        goto err_map;
 970
 971                buf_array[i] = addr;
 972
 973                /* tracing point */
 974                trace_dpaa2_eth_buf_seed(priv->net_dev,
 975                                         page, DPAA2_ETH_RX_BUF_RAW_SIZE,
 976                                         addr, DPAA2_ETH_RX_BUF_SIZE,
 977                                         bpid);
 978        }
 979
 980release_bufs:
 981        /* In case the portal is busy, retry until successful */
 982        while ((err = dpaa2_io_service_release(ch->dpio, bpid,
 983                                               buf_array, i)) == -EBUSY)
 984                cpu_relax();
 985
 986        /* If release command failed, clean up and bail out;
 987         * not much else we can do about it
 988         */
 989        if (err) {
 990                free_bufs(priv, buf_array, i);
 991                return 0;
 992        }
 993
 994        return i;
 995
 996err_map:
 997        __free_pages(page, 0);
 998err_alloc:
 999        /* If we managed to allocate at least some buffers,
1000         * release them to hardware
1001         */
1002        if (i)
1003                goto release_bufs;
1004
1005        return 0;
1006}
1007
1008static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
1009{
1010        int i, j;
1011        int new_count;
1012
1013        for (j = 0; j < priv->num_channels; j++) {
1014                for (i = 0; i < DPAA2_ETH_NUM_BUFS;
1015                     i += DPAA2_ETH_BUFS_PER_CMD) {
1016                        new_count = add_bufs(priv, priv->channel[j], bpid);
1017                        priv->channel[j]->buf_count += new_count;
1018
1019                        if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
1020                                return -ENOMEM;
1021                        }
1022                }
1023        }
1024
1025        return 0;
1026}
1027
1028/**
1029 * Drain the specified number of buffers from the DPNI's private buffer pool.
1030 * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
1031 */
1032static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
1033{
1034        u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
1035        int ret;
1036
1037        do {
1038                ret = dpaa2_io_service_acquire(NULL, priv->bpid,
1039                                               buf_array, count);
1040                if (ret < 0) {
1041                        netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
1042                        return;
1043                }
1044                free_bufs(priv, buf_array, ret);
1045        } while (ret);
1046}
1047
1048static void drain_pool(struct dpaa2_eth_priv *priv)
1049{
1050        int i;
1051
1052        drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
1053        drain_bufs(priv, 1);
1054
1055        for (i = 0; i < priv->num_channels; i++)
1056                priv->channel[i]->buf_count = 0;
1057}
1058
1059/* Function is called from softirq context only, so we don't need to guard
1060 * the access to percpu count
1061 */
1062static int refill_pool(struct dpaa2_eth_priv *priv,
1063                       struct dpaa2_eth_channel *ch,
1064                       u16 bpid)
1065{
1066        int new_count;
1067
1068        if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
1069                return 0;
1070
1071        do {
1072                new_count = add_bufs(priv, ch, bpid);
1073                if (unlikely(!new_count)) {
1074                        /* Out of memory; abort for now, we'll try later on */
1075                        break;
1076                }
1077                ch->buf_count += new_count;
1078        } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
1079
1080        if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
1081                return -ENOMEM;
1082
1083        return 0;
1084}
1085
1086static int pull_channel(struct dpaa2_eth_channel *ch)
1087{
1088        int err;
1089        int dequeues = -1;
1090
1091        /* Retry while portal is busy */
1092        do {
1093                err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
1094                                                    ch->store);
1095                dequeues++;
1096                cpu_relax();
1097        } while (err == -EBUSY);
1098
1099        ch->stats.dequeue_portal_busy += dequeues;
1100        if (unlikely(err))
1101                ch->stats.pull_err++;
1102
1103        return err;
1104}
1105
1106/* NAPI poll routine
1107 *
1108 * Frames are dequeued from the QMan channel associated with this NAPI context.
1109 * Rx, Tx confirmation and (if configured) Rx error frames all count
1110 * towards the NAPI budget.
1111 */
1112static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
1113{
1114        struct dpaa2_eth_channel *ch;
1115        struct dpaa2_eth_priv *priv;
1116        int rx_cleaned = 0, txconf_cleaned = 0;
1117        struct dpaa2_eth_fq *fq, *txc_fq = NULL;
1118        struct netdev_queue *nq;
1119        int store_cleaned, work_done;
1120        struct list_head rx_list;
1121        int err;
1122
1123        ch = container_of(napi, struct dpaa2_eth_channel, napi);
1124        ch->xdp.res = 0;
1125        priv = ch->priv;
1126
1127        INIT_LIST_HEAD(&rx_list);
1128        ch->rx_list = &rx_list;
1129
1130        do {
1131                err = pull_channel(ch);
1132                if (unlikely(err))
1133                        break;
1134
1135                /* Refill pool if appropriate */
1136                refill_pool(priv, ch, priv->bpid);
1137
1138                store_cleaned = consume_frames(ch, &fq);
1139                if (!store_cleaned)
1140                        break;
1141                if (fq->type == DPAA2_RX_FQ) {
1142                        rx_cleaned += store_cleaned;
1143                } else {
1144                        txconf_cleaned += store_cleaned;
1145                        /* We have a single Tx conf FQ on this channel */
1146                        txc_fq = fq;
1147                }
1148
1149                /* If we either consumed the whole NAPI budget with Rx frames
1150                 * or we reached the Tx confirmations threshold, we're done.
1151                 */
1152                if (rx_cleaned >= budget ||
1153                    txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
1154                        work_done = budget;
1155                        goto out;
1156                }
1157        } while (store_cleaned);
1158
1159        /* We didn't consume the entire budget, so finish napi and
1160         * re-enable data availability notifications
1161         */
1162        napi_complete_done(napi, rx_cleaned);
1163        do {
1164                err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
1165                cpu_relax();
1166        } while (err == -EBUSY);
1167        WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
1168                  ch->nctx.desired_cpu);
1169
1170        work_done = max(rx_cleaned, 1);
1171
1172out:
1173        netif_receive_skb_list(ch->rx_list);
1174
1175        if (txc_fq && txc_fq->dq_frames) {
1176                nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
1177                netdev_tx_completed_queue(nq, txc_fq->dq_frames,
1178                                          txc_fq->dq_bytes);
1179                txc_fq->dq_frames = 0;
1180                txc_fq->dq_bytes = 0;
1181        }
1182
1183        if (ch->xdp.res & XDP_REDIRECT)
1184                xdp_do_flush_map();
1185
1186        return work_done;
1187}
1188
1189static void enable_ch_napi(struct dpaa2_eth_priv *priv)
1190{
1191        struct dpaa2_eth_channel *ch;
1192        int i;
1193
1194        for (i = 0; i < priv->num_channels; i++) {
1195                ch = priv->channel[i];
1196                napi_enable(&ch->napi);
1197        }
1198}
1199
1200static void disable_ch_napi(struct dpaa2_eth_priv *priv)
1201{
1202        struct dpaa2_eth_channel *ch;
1203        int i;
1204
1205        for (i = 0; i < priv->num_channels; i++) {
1206                ch = priv->channel[i];
1207                napi_disable(&ch->napi);
1208        }
1209}
1210
1211static int link_state_update(struct dpaa2_eth_priv *priv)
1212{
1213        struct dpni_link_state state = {0};
1214        int err;
1215
1216        err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
1217        if (unlikely(err)) {
1218                netdev_err(priv->net_dev,
1219                           "dpni_get_link_state() failed\n");
1220                return err;
1221        }
1222
1223        /* Chech link state; speed / duplex changes are not treated yet */
1224        if (priv->link_state.up == state.up)
1225                return 0;
1226
1227        priv->link_state = state;
1228        if (state.up) {
1229                netif_carrier_on(priv->net_dev);
1230                netif_tx_start_all_queues(priv->net_dev);
1231        } else {
1232                netif_tx_stop_all_queues(priv->net_dev);
1233                netif_carrier_off(priv->net_dev);
1234        }
1235
1236        netdev_info(priv->net_dev, "Link Event: state %s\n",
1237                    state.up ? "up" : "down");
1238
1239        return 0;
1240}
1241
1242static int dpaa2_eth_open(struct net_device *net_dev)
1243{
1244        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1245        int err;
1246
1247        err = seed_pool(priv, priv->bpid);
1248        if (err) {
1249                /* Not much to do; the buffer pool, though not filled up,
1250                 * may still contain some buffers which would enable us
1251                 * to limp on.
1252                 */
1253                netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
1254                           priv->dpbp_dev->obj_desc.id, priv->bpid);
1255        }
1256
1257        /* We'll only start the txqs when the link is actually ready; make sure
1258         * we don't race against the link up notification, which may come
1259         * immediately after dpni_enable();
1260         */
1261        netif_tx_stop_all_queues(net_dev);
1262        enable_ch_napi(priv);
1263        /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
1264         * return true and cause 'ip link show' to report the LOWER_UP flag,
1265         * even though the link notification wasn't even received.
1266         */
1267        netif_carrier_off(net_dev);
1268
1269        err = dpni_enable(priv->mc_io, 0, priv->mc_token);
1270        if (err < 0) {
1271                netdev_err(net_dev, "dpni_enable() failed\n");
1272                goto enable_err;
1273        }
1274
1275        /* If the DPMAC object has already processed the link up interrupt,
1276         * we have to learn the link state ourselves.
1277         */
1278        err = link_state_update(priv);
1279        if (err < 0) {
1280                netdev_err(net_dev, "Can't update link state\n");
1281                goto link_state_err;
1282        }
1283
1284        return 0;
1285
1286link_state_err:
1287enable_err:
1288        disable_ch_napi(priv);
1289        drain_pool(priv);
1290        return err;
1291}
1292
1293/* Total number of in-flight frames on ingress queues */
1294static u32 ingress_fq_count(struct dpaa2_eth_priv *priv)
1295{
1296        struct dpaa2_eth_fq *fq;
1297        u32 fcnt = 0, bcnt = 0, total = 0;
1298        int i, err;
1299
1300        for (i = 0; i < priv->num_fqs; i++) {
1301                fq = &priv->fq[i];
1302                err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
1303                if (err) {
1304                        netdev_warn(priv->net_dev, "query_fq_count failed");
1305                        break;
1306                }
1307                total += fcnt;
1308        }
1309
1310        return total;
1311}
1312
1313static void wait_for_fq_empty(struct dpaa2_eth_priv *priv)
1314{
1315        int retries = 10;
1316        u32 pending;
1317
1318        do {
1319                pending = ingress_fq_count(priv);
1320                if (pending)
1321                        msleep(100);
1322        } while (pending && --retries);
1323}
1324
1325static int dpaa2_eth_stop(struct net_device *net_dev)
1326{
1327        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1328        int dpni_enabled = 0;
1329        int retries = 10;
1330
1331        netif_tx_stop_all_queues(net_dev);
1332        netif_carrier_off(net_dev);
1333
1334        /* On dpni_disable(), the MC firmware will:
1335         * - stop MAC Rx and wait for all Rx frames to be enqueued to software
1336         * - cut off WRIOP dequeues from egress FQs and wait until transmission
1337         * of all in flight Tx frames is finished (and corresponding Tx conf
1338         * frames are enqueued back to software)
1339         *
1340         * Before calling dpni_disable(), we wait for all Tx frames to arrive
1341         * on WRIOP. After it finishes, wait until all remaining frames on Rx
1342         * and Tx conf queues are consumed on NAPI poll.
1343         */
1344        msleep(500);
1345
1346        do {
1347                dpni_disable(priv->mc_io, 0, priv->mc_token);
1348                dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
1349                if (dpni_enabled)
1350                        /* Allow the hardware some slack */
1351                        msleep(100);
1352        } while (dpni_enabled && --retries);
1353        if (!retries) {
1354                netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
1355                /* Must go on and disable NAPI nonetheless, so we don't crash at
1356                 * the next "ifconfig up"
1357                 */
1358        }
1359
1360        wait_for_fq_empty(priv);
1361        disable_ch_napi(priv);
1362
1363        /* Empty the buffer pool */
1364        drain_pool(priv);
1365
1366        return 0;
1367}
1368
1369static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
1370{
1371        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1372        struct device *dev = net_dev->dev.parent;
1373        int err;
1374
1375        err = eth_mac_addr(net_dev, addr);
1376        if (err < 0) {
1377                dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
1378                return err;
1379        }
1380
1381        err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
1382                                        net_dev->dev_addr);
1383        if (err) {
1384                dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
1385                return err;
1386        }
1387
1388        return 0;
1389}
1390
1391/** Fill in counters maintained by the GPP driver. These may be different from
1392 * the hardware counters obtained by ethtool.
1393 */
1394static void dpaa2_eth_get_stats(struct net_device *net_dev,
1395                                struct rtnl_link_stats64 *stats)
1396{
1397        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1398        struct rtnl_link_stats64 *percpu_stats;
1399        u64 *cpustats;
1400        u64 *netstats = (u64 *)stats;
1401        int i, j;
1402        int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
1403
1404        for_each_possible_cpu(i) {
1405                percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
1406                cpustats = (u64 *)percpu_stats;
1407                for (j = 0; j < num; j++)
1408                        netstats[j] += cpustats[j];
1409        }
1410}
1411
1412/* Copy mac unicast addresses from @net_dev to @priv.
1413 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1414 */
1415static void add_uc_hw_addr(const struct net_device *net_dev,
1416                           struct dpaa2_eth_priv *priv)
1417{
1418        struct netdev_hw_addr *ha;
1419        int err;
1420
1421        netdev_for_each_uc_addr(ha, net_dev) {
1422                err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1423                                        ha->addr);
1424                if (err)
1425                        netdev_warn(priv->net_dev,
1426                                    "Could not add ucast MAC %pM to the filtering table (err %d)\n",
1427                                    ha->addr, err);
1428        }
1429}
1430
1431/* Copy mac multicast addresses from @net_dev to @priv
1432 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1433 */
1434static void add_mc_hw_addr(const struct net_device *net_dev,
1435                           struct dpaa2_eth_priv *priv)
1436{
1437        struct netdev_hw_addr *ha;
1438        int err;
1439
1440        netdev_for_each_mc_addr(ha, net_dev) {
1441                err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1442                                        ha->addr);
1443                if (err)
1444                        netdev_warn(priv->net_dev,
1445                                    "Could not add mcast MAC %pM to the filtering table (err %d)\n",
1446                                    ha->addr, err);
1447        }
1448}
1449
1450static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
1451{
1452        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1453        int uc_count = netdev_uc_count(net_dev);
1454        int mc_count = netdev_mc_count(net_dev);
1455        u8 max_mac = priv->dpni_attrs.mac_filter_entries;
1456        u32 options = priv->dpni_attrs.options;
1457        u16 mc_token = priv->mc_token;
1458        struct fsl_mc_io *mc_io = priv->mc_io;
1459        int err;
1460
1461        /* Basic sanity checks; these probably indicate a misconfiguration */
1462        if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
1463                netdev_info(net_dev,
1464                            "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
1465                            max_mac);
1466
1467        /* Force promiscuous if the uc or mc counts exceed our capabilities. */
1468        if (uc_count > max_mac) {
1469                netdev_info(net_dev,
1470                            "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
1471                            uc_count, max_mac);
1472                goto force_promisc;
1473        }
1474        if (mc_count + uc_count > max_mac) {
1475                netdev_info(net_dev,
1476                            "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
1477                            uc_count + mc_count, max_mac);
1478                goto force_mc_promisc;
1479        }
1480
1481        /* Adjust promisc settings due to flag combinations */
1482        if (net_dev->flags & IFF_PROMISC)
1483                goto force_promisc;
1484        if (net_dev->flags & IFF_ALLMULTI) {
1485                /* First, rebuild unicast filtering table. This should be done
1486                 * in promisc mode, in order to avoid frame loss while we
1487                 * progressively add entries to the table.
1488                 * We don't know whether we had been in promisc already, and
1489                 * making an MC call to find out is expensive; so set uc promisc
1490                 * nonetheless.
1491                 */
1492                err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1493                if (err)
1494                        netdev_warn(net_dev, "Can't set uc promisc\n");
1495
1496                /* Actual uc table reconstruction. */
1497                err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
1498                if (err)
1499                        netdev_warn(net_dev, "Can't clear uc filters\n");
1500                add_uc_hw_addr(net_dev, priv);
1501
1502                /* Finally, clear uc promisc and set mc promisc as requested. */
1503                err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
1504                if (err)
1505                        netdev_warn(net_dev, "Can't clear uc promisc\n");
1506                goto force_mc_promisc;
1507        }
1508
1509        /* Neither unicast, nor multicast promisc will be on... eventually.
1510         * For now, rebuild mac filtering tables while forcing both of them on.
1511         */
1512        err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1513        if (err)
1514                netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
1515        err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
1516        if (err)
1517                netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
1518
1519        /* Actual mac filtering tables reconstruction */
1520        err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
1521        if (err)
1522                netdev_warn(net_dev, "Can't clear mac filters\n");
1523        add_mc_hw_addr(net_dev, priv);
1524        add_uc_hw_addr(net_dev, priv);
1525
1526        /* Now we can clear both ucast and mcast promisc, without risking
1527         * to drop legitimate frames anymore.
1528         */
1529        err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
1530        if (err)
1531                netdev_warn(net_dev, "Can't clear ucast promisc\n");
1532        err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
1533        if (err)
1534                netdev_warn(net_dev, "Can't clear mcast promisc\n");
1535
1536        return;
1537
1538force_promisc:
1539        err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1540        if (err)
1541                netdev_warn(net_dev, "Can't set ucast promisc\n");
1542force_mc_promisc:
1543        err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
1544        if (err)
1545                netdev_warn(net_dev, "Can't set mcast promisc\n");
1546}
1547
1548static int dpaa2_eth_set_features(struct net_device *net_dev,
1549                                  netdev_features_t features)
1550{
1551        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1552        netdev_features_t changed = features ^ net_dev->features;
1553        bool enable;
1554        int err;
1555
1556        if (changed & NETIF_F_RXCSUM) {
1557                enable = !!(features & NETIF_F_RXCSUM);
1558                err = set_rx_csum(priv, enable);
1559                if (err)
1560                        return err;
1561        }
1562
1563        if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
1564                enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
1565                err = set_tx_csum(priv, enable);
1566                if (err)
1567                        return err;
1568        }
1569
1570        return 0;
1571}
1572
1573static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1574{
1575        struct dpaa2_eth_priv *priv = netdev_priv(dev);
1576        struct hwtstamp_config config;
1577
1578        if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
1579                return -EFAULT;
1580
1581        switch (config.tx_type) {
1582        case HWTSTAMP_TX_OFF:
1583                priv->tx_tstamp = false;
1584                break;
1585        case HWTSTAMP_TX_ON:
1586                priv->tx_tstamp = true;
1587                break;
1588        default:
1589                return -ERANGE;
1590        }
1591
1592        if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
1593                priv->rx_tstamp = false;
1594        } else {
1595                priv->rx_tstamp = true;
1596                /* TS is set for all frame types, not only those requested */
1597                config.rx_filter = HWTSTAMP_FILTER_ALL;
1598        }
1599
1600        return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
1601                        -EFAULT : 0;
1602}
1603
1604static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1605{
1606        if (cmd == SIOCSHWTSTAMP)
1607                return dpaa2_eth_ts_ioctl(dev, rq, cmd);
1608
1609        return -EINVAL;
1610}
1611
1612static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
1613{
1614        int mfl, linear_mfl;
1615
1616        mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
1617        linear_mfl = DPAA2_ETH_RX_BUF_SIZE - DPAA2_ETH_RX_HWA_SIZE -
1618                     dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
1619
1620        if (mfl > linear_mfl) {
1621                netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n",
1622                            linear_mfl - VLAN_ETH_HLEN);
1623                return false;
1624        }
1625
1626        return true;
1627}
1628
1629static int set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
1630{
1631        int mfl, err;
1632
1633        /* We enforce a maximum Rx frame length based on MTU only if we have
1634         * an XDP program attached (in order to avoid Rx S/G frames).
1635         * Otherwise, we accept all incoming frames as long as they are not
1636         * larger than maximum size supported in hardware
1637         */
1638        if (has_xdp)
1639                mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
1640        else
1641                mfl = DPAA2_ETH_MFL;
1642
1643        err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl);
1644        if (err) {
1645                netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n");
1646                return err;
1647        }
1648
1649        return 0;
1650}
1651
1652static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
1653{
1654        struct dpaa2_eth_priv *priv = netdev_priv(dev);
1655        int err;
1656
1657        if (!priv->xdp_prog)
1658                goto out;
1659
1660        if (!xdp_mtu_valid(priv, new_mtu))
1661                return -EINVAL;
1662
1663        err = set_rx_mfl(priv, new_mtu, true);
1664        if (err)
1665                return err;
1666
1667out:
1668        dev->mtu = new_mtu;
1669        return 0;
1670}
1671
1672static int update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
1673{
1674        struct dpni_buffer_layout buf_layout = {0};
1675        int err;
1676
1677        err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token,
1678                                     DPNI_QUEUE_RX, &buf_layout);
1679        if (err) {
1680                netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n");
1681                return err;
1682        }
1683
1684        /* Reserve extra headroom for XDP header size changes */
1685        buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) +
1686                                    (has_xdp ? XDP_PACKET_HEADROOM : 0);
1687        buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
1688        err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1689                                     DPNI_QUEUE_RX, &buf_layout);
1690        if (err) {
1691                netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n");
1692                return err;
1693        }
1694
1695        return 0;
1696}
1697
1698static int setup_xdp(struct net_device *dev, struct bpf_prog *prog)
1699{
1700        struct dpaa2_eth_priv *priv = netdev_priv(dev);
1701        struct dpaa2_eth_channel *ch;
1702        struct bpf_prog *old;
1703        bool up, need_update;
1704        int i, err;
1705
1706        if (prog && !xdp_mtu_valid(priv, dev->mtu))
1707                return -EINVAL;
1708
1709        if (prog) {
1710                prog = bpf_prog_add(prog, priv->num_channels);
1711                if (IS_ERR(prog))
1712                        return PTR_ERR(prog);
1713        }
1714
1715        up = netif_running(dev);
1716        need_update = (!!priv->xdp_prog != !!prog);
1717
1718        if (up)
1719                dpaa2_eth_stop(dev);
1720
1721        /* While in xdp mode, enforce a maximum Rx frame size based on MTU.
1722         * Also, when switching between xdp/non-xdp modes we need to reconfigure
1723         * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop,
1724         * so we are sure no old format buffers will be used from now on.
1725         */
1726        if (need_update) {
1727                err = set_rx_mfl(priv, dev->mtu, !!prog);
1728                if (err)
1729                        goto out_err;
1730                err = update_rx_buffer_headroom(priv, !!prog);
1731                if (err)
1732                        goto out_err;
1733        }
1734
1735        old = xchg(&priv->xdp_prog, prog);
1736        if (old)
1737                bpf_prog_put(old);
1738
1739        for (i = 0; i < priv->num_channels; i++) {
1740                ch = priv->channel[i];
1741                old = xchg(&ch->xdp.prog, prog);
1742                if (old)
1743                        bpf_prog_put(old);
1744        }
1745
1746        if (up) {
1747                err = dpaa2_eth_open(dev);
1748                if (err)
1749                        return err;
1750        }
1751
1752        return 0;
1753
1754out_err:
1755        if (prog)
1756                bpf_prog_sub(prog, priv->num_channels);
1757        if (up)
1758                dpaa2_eth_open(dev);
1759
1760        return err;
1761}
1762
1763static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1764{
1765        struct dpaa2_eth_priv *priv = netdev_priv(dev);
1766
1767        switch (xdp->command) {
1768        case XDP_SETUP_PROG:
1769                return setup_xdp(dev, xdp->prog);
1770        case XDP_QUERY_PROG:
1771                xdp->prog_id = priv->xdp_prog ? priv->xdp_prog->aux->id : 0;
1772                break;
1773        default:
1774                return -EINVAL;
1775        }
1776
1777        return 0;
1778}
1779
1780static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
1781                                    struct xdp_frame *xdpf)
1782{
1783        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1784        struct device *dev = net_dev->dev.parent;
1785        struct rtnl_link_stats64 *percpu_stats;
1786        struct dpaa2_eth_drv_stats *percpu_extras;
1787        unsigned int needed_headroom;
1788        struct dpaa2_eth_swa *swa;
1789        struct dpaa2_eth_fq *fq;
1790        struct dpaa2_fd fd;
1791        void *buffer_start, *aligned_start;
1792        dma_addr_t addr;
1793        int err, i;
1794
1795        /* We require a minimum headroom to be able to transmit the frame.
1796         * Otherwise return an error and let the original net_device handle it
1797         */
1798        needed_headroom = dpaa2_eth_needed_headroom(priv, NULL);
1799        if (xdpf->headroom < needed_headroom)
1800                return -EINVAL;
1801
1802        percpu_stats = this_cpu_ptr(priv->percpu_stats);
1803        percpu_extras = this_cpu_ptr(priv->percpu_extras);
1804
1805        /* Setup the FD fields */
1806        memset(&fd, 0, sizeof(fd));
1807
1808        /* Align FD address, if possible */
1809        buffer_start = xdpf->data - needed_headroom;
1810        aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
1811                                  DPAA2_ETH_TX_BUF_ALIGN);
1812        if (aligned_start >= xdpf->data - xdpf->headroom)
1813                buffer_start = aligned_start;
1814
1815        swa = (struct dpaa2_eth_swa *)buffer_start;
1816        /* fill in necessary fields here */
1817        swa->type = DPAA2_ETH_SWA_XDP;
1818        swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start;
1819        swa->xdp.xdpf = xdpf;
1820
1821        addr = dma_map_single(dev, buffer_start,
1822                              swa->xdp.dma_size,
1823                              DMA_BIDIRECTIONAL);
1824        if (unlikely(dma_mapping_error(dev, addr))) {
1825                percpu_stats->tx_dropped++;
1826                return -ENOMEM;
1827        }
1828
1829        dpaa2_fd_set_addr(&fd, addr);
1830        dpaa2_fd_set_offset(&fd, xdpf->data - buffer_start);
1831        dpaa2_fd_set_len(&fd, xdpf->len);
1832        dpaa2_fd_set_format(&fd, dpaa2_fd_single);
1833        dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA);
1834
1835        fq = &priv->fq[smp_processor_id() % dpaa2_eth_queue_count(priv)];
1836        for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
1837                err = priv->enqueue(priv, fq, &fd, 0);
1838                if (err != -EBUSY)
1839                        break;
1840        }
1841        percpu_extras->tx_portal_busy += i;
1842        if (unlikely(err < 0)) {
1843                percpu_stats->tx_errors++;
1844                /* let the Rx device handle the cleanup */
1845                return err;
1846        }
1847
1848        percpu_stats->tx_packets++;
1849        percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
1850
1851        return 0;
1852}
1853
1854static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
1855                              struct xdp_frame **frames, u32 flags)
1856{
1857        int drops = 0;
1858        int i, err;
1859
1860        if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1861                return -EINVAL;
1862
1863        if (!netif_running(net_dev))
1864                return -ENETDOWN;
1865
1866        for (i = 0; i < n; i++) {
1867                struct xdp_frame *xdpf = frames[i];
1868
1869                err = dpaa2_eth_xdp_xmit_frame(net_dev, xdpf);
1870                if (err) {
1871                        xdp_return_frame_rx_napi(xdpf);
1872                        drops++;
1873                }
1874        }
1875
1876        return n - drops;
1877}
1878
1879static int update_xps(struct dpaa2_eth_priv *priv)
1880{
1881        struct net_device *net_dev = priv->net_dev;
1882        struct cpumask xps_mask;
1883        struct dpaa2_eth_fq *fq;
1884        int i, num_queues, netdev_queues;
1885        int err = 0;
1886
1887        num_queues = dpaa2_eth_queue_count(priv);
1888        netdev_queues = (net_dev->num_tc ? : 1) * num_queues;
1889
1890        /* The first <num_queues> entries in priv->fq array are Tx/Tx conf
1891         * queues, so only process those
1892         */
1893        for (i = 0; i < netdev_queues; i++) {
1894                fq = &priv->fq[i % num_queues];
1895
1896                cpumask_clear(&xps_mask);
1897                cpumask_set_cpu(fq->target_cpu, &xps_mask);
1898
1899                err = netif_set_xps_queue(net_dev, &xps_mask, i);
1900                if (err) {
1901                        netdev_warn_once(net_dev, "Error setting XPS queue\n");
1902                        break;
1903                }
1904        }
1905
1906        return err;
1907}
1908
1909static int dpaa2_eth_setup_tc(struct net_device *net_dev,
1910                              enum tc_setup_type type, void *type_data)
1911{
1912        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1913        struct tc_mqprio_qopt *mqprio = type_data;
1914        u8 num_tc, num_queues;
1915        int i;
1916
1917        if (type != TC_SETUP_QDISC_MQPRIO)
1918                return -EINVAL;
1919
1920        mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
1921        num_queues = dpaa2_eth_queue_count(priv);
1922        num_tc = mqprio->num_tc;
1923
1924        if (num_tc == net_dev->num_tc)
1925                return 0;
1926
1927        if (num_tc  > dpaa2_eth_tc_count(priv)) {
1928                netdev_err(net_dev, "Max %d traffic classes supported\n",
1929                           dpaa2_eth_tc_count(priv));
1930                return -EINVAL;
1931        }
1932
1933        if (!num_tc) {
1934                netdev_reset_tc(net_dev);
1935                netif_set_real_num_tx_queues(net_dev, num_queues);
1936                goto out;
1937        }
1938
1939        netdev_set_num_tc(net_dev, num_tc);
1940        netif_set_real_num_tx_queues(net_dev, num_tc * num_queues);
1941
1942        for (i = 0; i < num_tc; i++)
1943                netdev_set_tc_queue(net_dev, i, num_queues, i * num_queues);
1944
1945out:
1946        update_xps(priv);
1947
1948        return 0;
1949}
1950
1951static const struct net_device_ops dpaa2_eth_ops = {
1952        .ndo_open = dpaa2_eth_open,
1953        .ndo_start_xmit = dpaa2_eth_tx,
1954        .ndo_stop = dpaa2_eth_stop,
1955        .ndo_set_mac_address = dpaa2_eth_set_addr,
1956        .ndo_get_stats64 = dpaa2_eth_get_stats,
1957        .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
1958        .ndo_set_features = dpaa2_eth_set_features,
1959        .ndo_do_ioctl = dpaa2_eth_ioctl,
1960        .ndo_change_mtu = dpaa2_eth_change_mtu,
1961        .ndo_bpf = dpaa2_eth_xdp,
1962        .ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
1963        .ndo_setup_tc = dpaa2_eth_setup_tc,
1964};
1965
1966static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
1967{
1968        struct dpaa2_eth_channel *ch;
1969
1970        ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
1971
1972        /* Update NAPI statistics */
1973        ch->stats.cdan++;
1974
1975        napi_schedule_irqoff(&ch->napi);
1976}
1977
1978/* Allocate and configure a DPCON object */
1979static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
1980{
1981        struct fsl_mc_device *dpcon;
1982        struct device *dev = priv->net_dev->dev.parent;
1983        struct dpcon_attr attrs;
1984        int err;
1985
1986        err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
1987                                     FSL_MC_POOL_DPCON, &dpcon);
1988        if (err) {
1989                if (err == -ENXIO)
1990                        err = -EPROBE_DEFER;
1991                else
1992                        dev_info(dev, "Not enough DPCONs, will go on as-is\n");
1993                return ERR_PTR(err);
1994        }
1995
1996        err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
1997        if (err) {
1998                dev_err(dev, "dpcon_open() failed\n");
1999                goto free;
2000        }
2001
2002        err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
2003        if (err) {
2004                dev_err(dev, "dpcon_reset() failed\n");
2005                goto close;
2006        }
2007
2008        err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
2009        if (err) {
2010                dev_err(dev, "dpcon_get_attributes() failed\n");
2011                goto close;
2012        }
2013
2014        err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
2015        if (err) {
2016                dev_err(dev, "dpcon_enable() failed\n");
2017                goto close;
2018        }
2019
2020        return dpcon;
2021
2022close:
2023        dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
2024free:
2025        fsl_mc_object_free(dpcon);
2026
2027        return NULL;
2028}
2029
2030static void free_dpcon(struct dpaa2_eth_priv *priv,
2031                       struct fsl_mc_device *dpcon)
2032{
2033        dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
2034        dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
2035        fsl_mc_object_free(dpcon);
2036}
2037
2038static struct dpaa2_eth_channel *
2039alloc_channel(struct dpaa2_eth_priv *priv)
2040{
2041        struct dpaa2_eth_channel *channel;
2042        struct dpcon_attr attr;
2043        struct device *dev = priv->net_dev->dev.parent;
2044        int err;
2045
2046        channel = kzalloc(sizeof(*channel), GFP_KERNEL);
2047        if (!channel)
2048                return NULL;
2049
2050        channel->dpcon = setup_dpcon(priv);
2051        if (IS_ERR_OR_NULL(channel->dpcon)) {
2052                err = PTR_ERR_OR_ZERO(channel->dpcon);
2053                goto err_setup;
2054        }
2055
2056        err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
2057                                   &attr);
2058        if (err) {
2059                dev_err(dev, "dpcon_get_attributes() failed\n");
2060                goto err_get_attr;
2061        }
2062
2063        channel->dpcon_id = attr.id;
2064        channel->ch_id = attr.qbman_ch_id;
2065        channel->priv = priv;
2066
2067        return channel;
2068
2069err_get_attr:
2070        free_dpcon(priv, channel->dpcon);
2071err_setup:
2072        kfree(channel);
2073        return ERR_PTR(err);
2074}
2075
2076static void free_channel(struct dpaa2_eth_priv *priv,
2077                         struct dpaa2_eth_channel *channel)
2078{
2079        free_dpcon(priv, channel->dpcon);
2080        kfree(channel);
2081}
2082
2083/* DPIO setup: allocate and configure QBMan channels, setup core affinity
2084 * and register data availability notifications
2085 */
2086static int setup_dpio(struct dpaa2_eth_priv *priv)
2087{
2088        struct dpaa2_io_notification_ctx *nctx;
2089        struct dpaa2_eth_channel *channel;
2090        struct dpcon_notification_cfg dpcon_notif_cfg;
2091        struct device *dev = priv->net_dev->dev.parent;
2092        int i, err;
2093
2094        /* We want the ability to spread ingress traffic (RX, TX conf) to as
2095         * many cores as possible, so we need one channel for each core
2096         * (unless there's fewer queues than cores, in which case the extra
2097         * channels would be wasted).
2098         * Allocate one channel per core and register it to the core's
2099         * affine DPIO. If not enough channels are available for all cores
2100         * or if some cores don't have an affine DPIO, there will be no
2101         * ingress frame processing on those cores.
2102         */
2103        cpumask_clear(&priv->dpio_cpumask);
2104        for_each_online_cpu(i) {
2105                /* Try to allocate a channel */
2106                channel = alloc_channel(priv);
2107                if (IS_ERR_OR_NULL(channel)) {
2108                        err = PTR_ERR_OR_ZERO(channel);
2109                        if (err != -EPROBE_DEFER)
2110                                dev_info(dev,
2111                                         "No affine channel for cpu %d and above\n", i);
2112                        goto err_alloc_ch;
2113                }
2114
2115                priv->channel[priv->num_channels] = channel;
2116
2117                nctx = &channel->nctx;
2118                nctx->is_cdan = 1;
2119                nctx->cb = cdan_cb;
2120                nctx->id = channel->ch_id;
2121                nctx->desired_cpu = i;
2122
2123                /* Register the new context */
2124                channel->dpio = dpaa2_io_service_select(i);
2125                err = dpaa2_io_service_register(channel->dpio, nctx, dev);
2126                if (err) {
2127                        dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
2128                        /* If no affine DPIO for this core, there's probably
2129                         * none available for next cores either. Signal we want
2130                         * to retry later, in case the DPIO devices weren't
2131                         * probed yet.
2132                         */
2133                        err = -EPROBE_DEFER;
2134                        goto err_service_reg;
2135                }
2136
2137                /* Register DPCON notification with MC */
2138                dpcon_notif_cfg.dpio_id = nctx->dpio_id;
2139                dpcon_notif_cfg.priority = 0;
2140                dpcon_notif_cfg.user_ctx = nctx->qman64;
2141                err = dpcon_set_notification(priv->mc_io, 0,
2142                                             channel->dpcon->mc_handle,
2143                                             &dpcon_notif_cfg);
2144                if (err) {
2145                        dev_err(dev, "dpcon_set_notification failed()\n");
2146                        goto err_set_cdan;
2147                }
2148
2149                /* If we managed to allocate a channel and also found an affine
2150                 * DPIO for this core, add it to the final mask
2151                 */
2152                cpumask_set_cpu(i, &priv->dpio_cpumask);
2153                priv->num_channels++;
2154
2155                /* Stop if we already have enough channels to accommodate all
2156                 * RX and TX conf queues
2157                 */
2158                if (priv->num_channels == priv->dpni_attrs.num_queues)
2159                        break;
2160        }
2161
2162        return 0;
2163
2164err_set_cdan:
2165        dpaa2_io_service_deregister(channel->dpio, nctx, dev);
2166err_service_reg:
2167        free_channel(priv, channel);
2168err_alloc_ch:
2169        if (err == -EPROBE_DEFER)
2170                return err;
2171
2172        if (cpumask_empty(&priv->dpio_cpumask)) {
2173                dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
2174                return -ENODEV;
2175        }
2176
2177        dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
2178                 cpumask_pr_args(&priv->dpio_cpumask));
2179
2180        return 0;
2181}
2182
2183static void free_dpio(struct dpaa2_eth_priv *priv)
2184{
2185        struct device *dev = priv->net_dev->dev.parent;
2186        struct dpaa2_eth_channel *ch;
2187        int i;
2188
2189        /* deregister CDAN notifications and free channels */
2190        for (i = 0; i < priv->num_channels; i++) {
2191                ch = priv->channel[i];
2192                dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
2193                free_channel(priv, ch);
2194        }
2195}
2196
2197static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
2198                                                    int cpu)
2199{
2200        struct device *dev = priv->net_dev->dev.parent;
2201        int i;
2202
2203        for (i = 0; i < priv->num_channels; i++)
2204                if (priv->channel[i]->nctx.desired_cpu == cpu)
2205                        return priv->channel[i];
2206
2207        /* We should never get here. Issue a warning and return
2208         * the first channel, because it's still better than nothing
2209         */
2210        dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
2211
2212        return priv->channel[0];
2213}
2214
2215static void set_fq_affinity(struct dpaa2_eth_priv *priv)
2216{
2217        struct device *dev = priv->net_dev->dev.parent;
2218        struct dpaa2_eth_fq *fq;
2219        int rx_cpu, txc_cpu;
2220        int i;
2221
2222        /* For each FQ, pick one channel/CPU to deliver frames to.
2223         * This may well change at runtime, either through irqbalance or
2224         * through direct user intervention.
2225         */
2226        rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
2227
2228        for (i = 0; i < priv->num_fqs; i++) {
2229                fq = &priv->fq[i];
2230                switch (fq->type) {
2231                case DPAA2_RX_FQ:
2232                        fq->target_cpu = rx_cpu;
2233                        rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
2234                        if (rx_cpu >= nr_cpu_ids)
2235                                rx_cpu = cpumask_first(&priv->dpio_cpumask);
2236                        break;
2237                case DPAA2_TX_CONF_FQ:
2238                        fq->target_cpu = txc_cpu;
2239                        txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
2240                        if (txc_cpu >= nr_cpu_ids)
2241                                txc_cpu = cpumask_first(&priv->dpio_cpumask);
2242                        break;
2243                default:
2244                        dev_err(dev, "Unknown FQ type: %d\n", fq->type);
2245                }
2246                fq->channel = get_affine_channel(priv, fq->target_cpu);
2247        }
2248
2249        update_xps(priv);
2250}
2251
2252static void setup_fqs(struct dpaa2_eth_priv *priv)
2253{
2254        int i;
2255
2256        /* We have one TxConf FQ per Tx flow.
2257         * The number of Tx and Rx queues is the same.
2258         * Tx queues come first in the fq array.
2259         */
2260        for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
2261                priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
2262                priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
2263                priv->fq[priv->num_fqs++].flowid = (u16)i;
2264        }
2265
2266        for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
2267                priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
2268                priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
2269                priv->fq[priv->num_fqs++].flowid = (u16)i;
2270        }
2271
2272        /* For each FQ, decide on which core to process incoming frames */
2273        set_fq_affinity(priv);
2274}
2275
2276/* Allocate and configure one buffer pool for each interface */
2277static int setup_dpbp(struct dpaa2_eth_priv *priv)
2278{
2279        int err;
2280        struct fsl_mc_device *dpbp_dev;
2281        struct device *dev = priv->net_dev->dev.parent;
2282        struct dpbp_attr dpbp_attrs;
2283
2284        err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
2285                                     &dpbp_dev);
2286        if (err) {
2287                if (err == -ENXIO)
2288                        err = -EPROBE_DEFER;
2289                else
2290                        dev_err(dev, "DPBP device allocation failed\n");
2291                return err;
2292        }
2293
2294        priv->dpbp_dev = dpbp_dev;
2295
2296        err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
2297                        &dpbp_dev->mc_handle);
2298        if (err) {
2299                dev_err(dev, "dpbp_open() failed\n");
2300                goto err_open;
2301        }
2302
2303        err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
2304        if (err) {
2305                dev_err(dev, "dpbp_reset() failed\n");
2306                goto err_reset;
2307        }
2308
2309        err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
2310        if (err) {
2311                dev_err(dev, "dpbp_enable() failed\n");
2312                goto err_enable;
2313        }
2314
2315        err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
2316                                  &dpbp_attrs);
2317        if (err) {
2318                dev_err(dev, "dpbp_get_attributes() failed\n");
2319                goto err_get_attr;
2320        }
2321        priv->bpid = dpbp_attrs.bpid;
2322
2323        return 0;
2324
2325err_get_attr:
2326        dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
2327err_enable:
2328err_reset:
2329        dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
2330err_open:
2331        fsl_mc_object_free(dpbp_dev);
2332
2333        return err;
2334}
2335
2336static void free_dpbp(struct dpaa2_eth_priv *priv)
2337{
2338        drain_pool(priv);
2339        dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
2340        dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
2341        fsl_mc_object_free(priv->dpbp_dev);
2342}
2343
2344static int set_buffer_layout(struct dpaa2_eth_priv *priv)
2345{
2346        struct device *dev = priv->net_dev->dev.parent;
2347        struct dpni_buffer_layout buf_layout = {0};
2348        u16 rx_buf_align;
2349        int err;
2350
2351        /* We need to check for WRIOP version 1.0.0, but depending on the MC
2352         * version, this number is not always provided correctly on rev1.
2353         * We need to check for both alternatives in this situation.
2354         */
2355        if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
2356            priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
2357                rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
2358        else
2359                rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
2360
2361        /* tx buffer */
2362        buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
2363        buf_layout.pass_timestamp = true;
2364        buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
2365                             DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2366        err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2367                                     DPNI_QUEUE_TX, &buf_layout);
2368        if (err) {
2369                dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
2370                return err;
2371        }
2372
2373        /* tx-confirm buffer */
2374        buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2375        err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2376                                     DPNI_QUEUE_TX_CONFIRM, &buf_layout);
2377        if (err) {
2378                dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
2379                return err;
2380        }
2381
2382        /* Now that we've set our tx buffer layout, retrieve the minimum
2383         * required tx data offset.
2384         */
2385        err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
2386                                      &priv->tx_data_offset);
2387        if (err) {
2388                dev_err(dev, "dpni_get_tx_data_offset() failed\n");
2389                return err;
2390        }
2391
2392        if ((priv->tx_data_offset % 64) != 0)
2393                dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
2394                         priv->tx_data_offset);
2395
2396        /* rx buffer */
2397        buf_layout.pass_frame_status = true;
2398        buf_layout.pass_parser_result = true;
2399        buf_layout.data_align = rx_buf_align;
2400        buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
2401        buf_layout.private_data_size = 0;
2402        buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
2403                             DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
2404                             DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
2405                             DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
2406                             DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2407        err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2408                                     DPNI_QUEUE_RX, &buf_layout);
2409        if (err) {
2410                dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
2411                return err;
2412        }
2413
2414        return 0;
2415}
2416
2417#define DPNI_ENQUEUE_FQID_VER_MAJOR     7
2418#define DPNI_ENQUEUE_FQID_VER_MINOR     9
2419
2420static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
2421                                       struct dpaa2_eth_fq *fq,
2422                                       struct dpaa2_fd *fd, u8 prio)
2423{
2424        return dpaa2_io_service_enqueue_qd(fq->channel->dpio,
2425                                           priv->tx_qdid, prio,
2426                                           fq->tx_qdbin, fd);
2427}
2428
2429static inline int dpaa2_eth_enqueue_fq(struct dpaa2_eth_priv *priv,
2430                                       struct dpaa2_eth_fq *fq,
2431                                       struct dpaa2_fd *fd, u8 prio)
2432{
2433        return dpaa2_io_service_enqueue_fq(fq->channel->dpio,
2434                                           fq->tx_fqid[prio], fd);
2435}
2436
2437static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
2438{
2439        if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
2440                                   DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
2441                priv->enqueue = dpaa2_eth_enqueue_qd;
2442        else
2443                priv->enqueue = dpaa2_eth_enqueue_fq;
2444}
2445
2446/* Configure the DPNI object this interface is associated with */
2447static int setup_dpni(struct fsl_mc_device *ls_dev)
2448{
2449        struct device *dev = &ls_dev->dev;
2450        struct dpaa2_eth_priv *priv;
2451        struct net_device *net_dev;
2452        int err;
2453
2454        net_dev = dev_get_drvdata(dev);
2455        priv = netdev_priv(net_dev);
2456
2457        /* get a handle for the DPNI object */
2458        err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
2459        if (err) {
2460                dev_err(dev, "dpni_open() failed\n");
2461                return err;
2462        }
2463
2464        /* Check if we can work with this DPNI object */
2465        err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
2466                                   &priv->dpni_ver_minor);
2467        if (err) {
2468                dev_err(dev, "dpni_get_api_version() failed\n");
2469                goto close;
2470        }
2471        if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
2472                dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
2473                        priv->dpni_ver_major, priv->dpni_ver_minor,
2474                        DPNI_VER_MAJOR, DPNI_VER_MINOR);
2475                err = -ENOTSUPP;
2476                goto close;
2477        }
2478
2479        ls_dev->mc_io = priv->mc_io;
2480        ls_dev->mc_handle = priv->mc_token;
2481
2482        err = dpni_reset(priv->mc_io, 0, priv->mc_token);
2483        if (err) {
2484                dev_err(dev, "dpni_reset() failed\n");
2485                goto close;
2486        }
2487
2488        err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
2489                                  &priv->dpni_attrs);
2490        if (err) {
2491                dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
2492                goto close;
2493        }
2494
2495        err = set_buffer_layout(priv);
2496        if (err)
2497                goto close;
2498
2499        set_enqueue_mode(priv);
2500
2501        priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) *
2502                                       dpaa2_eth_fs_count(priv), GFP_KERNEL);
2503        if (!priv->cls_rules)
2504                goto close;
2505
2506        return 0;
2507
2508close:
2509        dpni_close(priv->mc_io, 0, priv->mc_token);
2510
2511        return err;
2512}
2513
2514static void free_dpni(struct dpaa2_eth_priv *priv)
2515{
2516        int err;
2517
2518        err = dpni_reset(priv->mc_io, 0, priv->mc_token);
2519        if (err)
2520                netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
2521                            err);
2522
2523        dpni_close(priv->mc_io, 0, priv->mc_token);
2524}
2525
2526static int setup_rx_flow(struct dpaa2_eth_priv *priv,
2527                         struct dpaa2_eth_fq *fq)
2528{
2529        struct device *dev = priv->net_dev->dev.parent;
2530        struct dpni_queue queue;
2531        struct dpni_queue_id qid;
2532        struct dpni_taildrop td;
2533        int err;
2534
2535        err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
2536                             DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid);
2537        if (err) {
2538                dev_err(dev, "dpni_get_queue(RX) failed\n");
2539                return err;
2540        }
2541
2542        fq->fqid = qid.fqid;
2543
2544        queue.destination.id = fq->channel->dpcon_id;
2545        queue.destination.type = DPNI_DEST_DPCON;
2546        queue.destination.priority = 1;
2547        queue.user_context = (u64)(uintptr_t)fq;
2548        err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
2549                             DPNI_QUEUE_RX, 0, fq->flowid,
2550                             DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
2551                             &queue);
2552        if (err) {
2553                dev_err(dev, "dpni_set_queue(RX) failed\n");
2554                return err;
2555        }
2556
2557        td.enable = 1;
2558        td.threshold = DPAA2_ETH_TAILDROP_THRESH;
2559        err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, DPNI_CP_QUEUE,
2560                                DPNI_QUEUE_RX, 0, fq->flowid, &td);
2561        if (err) {
2562                dev_err(dev, "dpni_set_threshold() failed\n");
2563                return err;
2564        }
2565
2566        /* xdp_rxq setup */
2567        err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
2568                               fq->flowid);
2569        if (err) {
2570                dev_err(dev, "xdp_rxq_info_reg failed\n");
2571                return err;
2572        }
2573
2574        err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq,
2575                                         MEM_TYPE_PAGE_ORDER0, NULL);
2576        if (err) {
2577                dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
2578                return err;
2579        }
2580
2581        return 0;
2582}
2583
2584static int setup_tx_flow(struct dpaa2_eth_priv *priv,
2585                         struct dpaa2_eth_fq *fq)
2586{
2587        struct device *dev = priv->net_dev->dev.parent;
2588        struct dpni_queue queue;
2589        struct dpni_queue_id qid;
2590        int i, err;
2591
2592        for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
2593                err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
2594                                     DPNI_QUEUE_TX, i, fq->flowid,
2595                                     &queue, &qid);
2596                if (err) {
2597                        dev_err(dev, "dpni_get_queue(TX) failed\n");
2598                        return err;
2599                }
2600                fq->tx_fqid[i] = qid.fqid;
2601        }
2602
2603        /* All Tx queues belonging to the same flowid have the same qdbin */
2604        fq->tx_qdbin = qid.qdbin;
2605
2606        err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
2607                             DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
2608                             &queue, &qid);
2609        if (err) {
2610                dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
2611                return err;
2612        }
2613
2614        fq->fqid = qid.fqid;
2615
2616        queue.destination.id = fq->channel->dpcon_id;
2617        queue.destination.type = DPNI_DEST_DPCON;
2618        queue.destination.priority = 0;
2619        queue.user_context = (u64)(uintptr_t)fq;
2620        err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
2621                             DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
2622                             DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
2623                             &queue);
2624        if (err) {
2625                dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
2626                return err;
2627        }
2628
2629        return 0;
2630}
2631
2632/* Supported header fields for Rx hash distribution key */
2633static const struct dpaa2_eth_dist_fields dist_fields[] = {
2634        {
2635                /* L2 header */
2636                .rxnfc_field = RXH_L2DA,
2637                .cls_prot = NET_PROT_ETH,
2638                .cls_field = NH_FLD_ETH_DA,
2639                .id = DPAA2_ETH_DIST_ETHDST,
2640                .size = 6,
2641        }, {
2642                .cls_prot = NET_PROT_ETH,
2643                .cls_field = NH_FLD_ETH_SA,
2644                .id = DPAA2_ETH_DIST_ETHSRC,
2645                .size = 6,
2646        }, {
2647                /* This is the last ethertype field parsed:
2648                 * depending on frame format, it can be the MAC ethertype
2649                 * or the VLAN etype.
2650                 */
2651                .cls_prot = NET_PROT_ETH,
2652                .cls_field = NH_FLD_ETH_TYPE,
2653                .id = DPAA2_ETH_DIST_ETHTYPE,
2654                .size = 2,
2655        }, {
2656                /* VLAN header */
2657                .rxnfc_field = RXH_VLAN,
2658                .cls_prot = NET_PROT_VLAN,
2659                .cls_field = NH_FLD_VLAN_TCI,
2660                .id = DPAA2_ETH_DIST_VLAN,
2661                .size = 2,
2662        }, {
2663                /* IP header */
2664                .rxnfc_field = RXH_IP_SRC,
2665                .cls_prot = NET_PROT_IP,
2666                .cls_field = NH_FLD_IP_SRC,
2667                .id = DPAA2_ETH_DIST_IPSRC,
2668                .size = 4,
2669        }, {
2670                .rxnfc_field = RXH_IP_DST,
2671                .cls_prot = NET_PROT_IP,
2672                .cls_field = NH_FLD_IP_DST,
2673                .id = DPAA2_ETH_DIST_IPDST,
2674                .size = 4,
2675        }, {
2676                .rxnfc_field = RXH_L3_PROTO,
2677                .cls_prot = NET_PROT_IP,
2678                .cls_field = NH_FLD_IP_PROTO,
2679                .id = DPAA2_ETH_DIST_IPPROTO,
2680                .size = 1,
2681        }, {
2682                /* Using UDP ports, this is functionally equivalent to raw
2683                 * byte pairs from L4 header.
2684                 */
2685                .rxnfc_field = RXH_L4_B_0_1,
2686                .cls_prot = NET_PROT_UDP,
2687                .cls_field = NH_FLD_UDP_PORT_SRC,
2688                .id = DPAA2_ETH_DIST_L4SRC,
2689                .size = 2,
2690        }, {
2691                .rxnfc_field = RXH_L4_B_2_3,
2692                .cls_prot = NET_PROT_UDP,
2693                .cls_field = NH_FLD_UDP_PORT_DST,
2694                .id = DPAA2_ETH_DIST_L4DST,
2695                .size = 2,
2696        },
2697};
2698
2699/* Configure the Rx hash key using the legacy API */
2700static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
2701{
2702        struct device *dev = priv->net_dev->dev.parent;
2703        struct dpni_rx_tc_dist_cfg dist_cfg;
2704        int err;
2705
2706        memset(&dist_cfg, 0, sizeof(dist_cfg));
2707
2708        dist_cfg.key_cfg_iova = key;
2709        dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
2710        dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
2711
2712        err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
2713        if (err)
2714                dev_err(dev, "dpni_set_rx_tc_dist failed\n");
2715
2716        return err;
2717}
2718
2719/* Configure the Rx hash key using the new API */
2720static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
2721{
2722        struct device *dev = priv->net_dev->dev.parent;
2723        struct dpni_rx_dist_cfg dist_cfg;
2724        int err;
2725
2726        memset(&dist_cfg, 0, sizeof(dist_cfg));
2727
2728        dist_cfg.key_cfg_iova = key;
2729        dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
2730        dist_cfg.enable = 1;
2731
2732        err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
2733        if (err)
2734                dev_err(dev, "dpni_set_rx_hash_dist failed\n");
2735
2736        return err;
2737}
2738
2739/* Configure the Rx flow classification key */
2740static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
2741{
2742        struct device *dev = priv->net_dev->dev.parent;
2743        struct dpni_rx_dist_cfg dist_cfg;
2744        int err;
2745
2746        memset(&dist_cfg, 0, sizeof(dist_cfg));
2747
2748        dist_cfg.key_cfg_iova = key;
2749        dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
2750        dist_cfg.enable = 1;
2751
2752        err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
2753        if (err)
2754                dev_err(dev, "dpni_set_rx_fs_dist failed\n");
2755
2756        return err;
2757}
2758
2759/* Size of the Rx flow classification key */
2760int dpaa2_eth_cls_key_size(u64 fields)
2761{
2762        int i, size = 0;
2763
2764        for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
2765                if (!(fields & dist_fields[i].id))
2766                        continue;
2767                size += dist_fields[i].size;
2768        }
2769
2770        return size;
2771}
2772
2773/* Offset of header field in Rx classification key */
2774int dpaa2_eth_cls_fld_off(int prot, int field)
2775{
2776        int i, off = 0;
2777
2778        for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
2779                if (dist_fields[i].cls_prot == prot &&
2780                    dist_fields[i].cls_field == field)
2781                        return off;
2782                off += dist_fields[i].size;
2783        }
2784
2785        WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
2786        return 0;
2787}
2788
2789/* Prune unused fields from the classification rule.
2790 * Used when masking is not supported
2791 */
2792void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
2793{
2794        int off = 0, new_off = 0;
2795        int i, size;
2796
2797        for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
2798                size = dist_fields[i].size;
2799                if (dist_fields[i].id & fields) {
2800                        memcpy(key_mem + new_off, key_mem + off, size);
2801                        new_off += size;
2802                }
2803                off += size;
2804        }
2805}
2806
2807/* Set Rx distribution (hash or flow classification) key
2808 * flags is a combination of RXH_ bits
2809 */
2810static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
2811                                  enum dpaa2_eth_rx_dist type, u64 flags)
2812{
2813        struct device *dev = net_dev->dev.parent;
2814        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2815        struct dpkg_profile_cfg cls_cfg;
2816        u32 rx_hash_fields = 0;
2817        dma_addr_t key_iova;
2818        u8 *dma_mem;
2819        int i;
2820        int err = 0;
2821
2822        memset(&cls_cfg, 0, sizeof(cls_cfg));
2823
2824        for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
2825                struct dpkg_extract *key =
2826                        &cls_cfg.extracts[cls_cfg.num_extracts];
2827
2828                /* For both Rx hashing and classification keys
2829                 * we set only the selected fields.
2830                 */
2831                if (!(flags & dist_fields[i].id))
2832                        continue;
2833                if (type == DPAA2_ETH_RX_DIST_HASH)
2834                        rx_hash_fields |= dist_fields[i].rxnfc_field;
2835
2836                if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
2837                        dev_err(dev, "error adding key extraction rule, too many rules?\n");
2838                        return -E2BIG;
2839                }
2840
2841                key->type = DPKG_EXTRACT_FROM_HDR;
2842                key->extract.from_hdr.prot = dist_fields[i].cls_prot;
2843                key->extract.from_hdr.type = DPKG_FULL_FIELD;
2844                key->extract.from_hdr.field = dist_fields[i].cls_field;
2845                cls_cfg.num_extracts++;
2846        }
2847
2848        dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
2849        if (!dma_mem)
2850                return -ENOMEM;
2851
2852        err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
2853        if (err) {
2854                dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
2855                goto free_key;
2856        }
2857
2858        /* Prepare for setting the rx dist */
2859        key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
2860                                  DMA_TO_DEVICE);
2861        if (dma_mapping_error(dev, key_iova)) {
2862                dev_err(dev, "DMA mapping failed\n");
2863                err = -ENOMEM;
2864                goto free_key;
2865        }
2866
2867        if (type == DPAA2_ETH_RX_DIST_HASH) {
2868                if (dpaa2_eth_has_legacy_dist(priv))
2869                        err = config_legacy_hash_key(priv, key_iova);
2870                else
2871                        err = config_hash_key(priv, key_iova);
2872        } else {
2873                err = config_cls_key(priv, key_iova);
2874        }
2875
2876        dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
2877                         DMA_TO_DEVICE);
2878        if (!err && type == DPAA2_ETH_RX_DIST_HASH)
2879                priv->rx_hash_fields = rx_hash_fields;
2880
2881free_key:
2882        kfree(dma_mem);
2883        return err;
2884}
2885
2886int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
2887{
2888        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2889        u64 key = 0;
2890        int i;
2891
2892        if (!dpaa2_eth_hash_enabled(priv))
2893                return -EOPNOTSUPP;
2894
2895        for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
2896                if (dist_fields[i].rxnfc_field & flags)
2897                        key |= dist_fields[i].id;
2898
2899        return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
2900}
2901
2902int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
2903{
2904        return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
2905}
2906
2907static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
2908{
2909        struct device *dev = priv->net_dev->dev.parent;
2910        int err;
2911
2912        /* Check if we actually support Rx flow classification */
2913        if (dpaa2_eth_has_legacy_dist(priv)) {
2914                dev_dbg(dev, "Rx cls not supported by current MC version\n");
2915                return -EOPNOTSUPP;
2916        }
2917
2918        if (!dpaa2_eth_fs_enabled(priv)) {
2919                dev_dbg(dev, "Rx cls disabled in DPNI options\n");
2920                return -EOPNOTSUPP;
2921        }
2922
2923        if (!dpaa2_eth_hash_enabled(priv)) {
2924                dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
2925                return -EOPNOTSUPP;
2926        }
2927
2928        /* If there is no support for masking in the classification table,
2929         * we don't set a default key, as it will depend on the rules
2930         * added by the user at runtime.
2931         */
2932        if (!dpaa2_eth_fs_mask_enabled(priv))
2933                goto out;
2934
2935        err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
2936        if (err)
2937                return err;
2938
2939out:
2940        priv->rx_cls_enabled = 1;
2941
2942        return 0;
2943}
2944
2945/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
2946 * frame queues and channels
2947 */
2948static int bind_dpni(struct dpaa2_eth_priv *priv)
2949{
2950        struct net_device *net_dev = priv->net_dev;
2951        struct device *dev = net_dev->dev.parent;
2952        struct dpni_pools_cfg pools_params;
2953        struct dpni_error_cfg err_cfg;
2954        int err = 0;
2955        int i;
2956
2957        pools_params.num_dpbp = 1;
2958        pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
2959        pools_params.pools[0].backup_pool = 0;
2960        pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
2961        err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
2962        if (err) {
2963                dev_err(dev, "dpni_set_pools() failed\n");
2964                return err;
2965        }
2966
2967        /* have the interface implicitly distribute traffic based on
2968         * the default hash key
2969         */
2970        err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
2971        if (err && err != -EOPNOTSUPP)
2972                dev_err(dev, "Failed to configure hashing\n");
2973
2974        /* Configure the flow classification key; it includes all
2975         * supported header fields and cannot be modified at runtime
2976         */
2977        err = dpaa2_eth_set_default_cls(priv);
2978        if (err && err != -EOPNOTSUPP)
2979                dev_err(dev, "Failed to configure Rx classification key\n");
2980
2981        /* Configure handling of error frames */
2982        err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
2983        err_cfg.set_frame_annotation = 1;
2984        err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
2985        err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
2986                                       &err_cfg);
2987        if (err) {
2988                dev_err(dev, "dpni_set_errors_behavior failed\n");
2989                return err;
2990        }
2991
2992        /* Configure Rx and Tx conf queues to generate CDANs */
2993        for (i = 0; i < priv->num_fqs; i++) {
2994                switch (priv->fq[i].type) {
2995                case DPAA2_RX_FQ:
2996                        err = setup_rx_flow(priv, &priv->fq[i]);
2997                        break;
2998                case DPAA2_TX_CONF_FQ:
2999                        err = setup_tx_flow(priv, &priv->fq[i]);
3000                        break;
3001                default:
3002                        dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
3003                        return -EINVAL;
3004                }
3005                if (err)
3006                        return err;
3007        }
3008
3009        err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
3010                            DPNI_QUEUE_TX, &priv->tx_qdid);
3011        if (err) {
3012                dev_err(dev, "dpni_get_qdid() failed\n");
3013                return err;
3014        }
3015
3016        return 0;
3017}
3018
3019/* Allocate rings for storing incoming frame descriptors */
3020static int alloc_rings(struct dpaa2_eth_priv *priv)
3021{
3022        struct net_device *net_dev = priv->net_dev;
3023        struct device *dev = net_dev->dev.parent;
3024        int i;
3025
3026        for (i = 0; i < priv->num_channels; i++) {
3027                priv->channel[i]->store =
3028                        dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
3029                if (!priv->channel[i]->store) {
3030                        netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
3031                        goto err_ring;
3032                }
3033        }
3034
3035        return 0;
3036
3037err_ring:
3038        for (i = 0; i < priv->num_channels; i++) {
3039                if (!priv->channel[i]->store)
3040                        break;
3041                dpaa2_io_store_destroy(priv->channel[i]->store);
3042        }
3043
3044        return -ENOMEM;
3045}
3046
3047static void free_rings(struct dpaa2_eth_priv *priv)
3048{
3049        int i;
3050
3051        for (i = 0; i < priv->num_channels; i++)
3052                dpaa2_io_store_destroy(priv->channel[i]->store);
3053}
3054
3055static int set_mac_addr(struct dpaa2_eth_priv *priv)
3056{
3057        struct net_device *net_dev = priv->net_dev;
3058        struct device *dev = net_dev->dev.parent;
3059        u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
3060        int err;
3061
3062        /* Get firmware address, if any */
3063        err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
3064        if (err) {
3065                dev_err(dev, "dpni_get_port_mac_addr() failed\n");
3066                return err;
3067        }
3068
3069        /* Get DPNI attributes address, if any */
3070        err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
3071                                        dpni_mac_addr);
3072        if (err) {
3073                dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
3074                return err;
3075        }
3076
3077        /* First check if firmware has any address configured by bootloader */
3078        if (!is_zero_ether_addr(mac_addr)) {
3079                /* If the DPMAC addr != DPNI addr, update it */
3080                if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
3081                        err = dpni_set_primary_mac_addr(priv->mc_io, 0,
3082                                                        priv->mc_token,
3083                                                        mac_addr);
3084                        if (err) {
3085                                dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
3086                                return err;
3087                        }
3088                }
3089                memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
3090        } else if (is_zero_ether_addr(dpni_mac_addr)) {
3091                /* No MAC address configured, fill in net_dev->dev_addr
3092                 * with a random one
3093                 */
3094                eth_hw_addr_random(net_dev);
3095                dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
3096
3097                err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
3098                                                net_dev->dev_addr);
3099                if (err) {
3100                        dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
3101                        return err;
3102                }
3103
3104                /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
3105                 * practical purposes, this will be our "permanent" mac address,
3106                 * at least until the next reboot. This move will also permit
3107                 * register_netdevice() to properly fill up net_dev->perm_addr.
3108                 */
3109                net_dev->addr_assign_type = NET_ADDR_PERM;
3110        } else {
3111                /* NET_ADDR_PERM is default, all we have to do is
3112                 * fill in the device addr.
3113                 */
3114                memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
3115        }
3116
3117        return 0;
3118}
3119
3120static int netdev_init(struct net_device *net_dev)
3121{
3122        struct device *dev = net_dev->dev.parent;
3123        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3124        u32 options = priv->dpni_attrs.options;
3125        u64 supported = 0, not_supported = 0;
3126        u8 bcast_addr[ETH_ALEN];
3127        u8 num_queues;
3128        int err;
3129
3130        net_dev->netdev_ops = &dpaa2_eth_ops;
3131        net_dev->ethtool_ops = &dpaa2_ethtool_ops;
3132
3133        err = set_mac_addr(priv);
3134        if (err)
3135                return err;
3136
3137        /* Explicitly add the broadcast address to the MAC filtering table */
3138        eth_broadcast_addr(bcast_addr);
3139        err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
3140        if (err) {
3141                dev_err(dev, "dpni_add_mac_addr() failed\n");
3142                return err;
3143        }
3144
3145        /* Set MTU upper limit; lower limit is 68B (default value) */
3146        net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
3147        err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
3148                                        DPAA2_ETH_MFL);
3149        if (err) {
3150                dev_err(dev, "dpni_set_max_frame_length() failed\n");
3151                return err;
3152        }
3153
3154        /* Set actual number of queues in the net device */
3155        num_queues = dpaa2_eth_queue_count(priv);
3156        err = netif_set_real_num_tx_queues(net_dev, num_queues);
3157        if (err) {
3158                dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
3159                return err;
3160        }
3161        err = netif_set_real_num_rx_queues(net_dev, num_queues);
3162        if (err) {
3163                dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
3164                return err;
3165        }
3166
3167        /* Capabilities listing */
3168        supported |= IFF_LIVE_ADDR_CHANGE;
3169
3170        if (options & DPNI_OPT_NO_MAC_FILTER)
3171                not_supported |= IFF_UNICAST_FLT;
3172        else
3173                supported |= IFF_UNICAST_FLT;
3174
3175        net_dev->priv_flags |= supported;
3176        net_dev->priv_flags &= ~not_supported;
3177
3178        /* Features */
3179        net_dev->features = NETIF_F_RXCSUM |
3180                            NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3181                            NETIF_F_SG | NETIF_F_HIGHDMA |
3182                            NETIF_F_LLTX;
3183        net_dev->hw_features = net_dev->features;
3184
3185        return 0;
3186}
3187
3188static int poll_link_state(void *arg)
3189{
3190        struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
3191        int err;
3192
3193        while (!kthread_should_stop()) {
3194                err = link_state_update(priv);
3195                if (unlikely(err))
3196                        return err;
3197
3198                msleep(DPAA2_ETH_LINK_STATE_REFRESH);
3199        }
3200
3201        return 0;
3202}
3203
3204static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
3205{
3206        u32 status = ~0;
3207        struct device *dev = (struct device *)arg;
3208        struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
3209        struct net_device *net_dev = dev_get_drvdata(dev);
3210        int err;
3211
3212        err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
3213                                  DPNI_IRQ_INDEX, &status);
3214        if (unlikely(err)) {
3215                netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
3216                return IRQ_HANDLED;
3217        }
3218
3219        if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
3220                link_state_update(netdev_priv(net_dev));
3221
3222        return IRQ_HANDLED;
3223}
3224
3225static int setup_irqs(struct fsl_mc_device *ls_dev)
3226{
3227        int err = 0;
3228        struct fsl_mc_device_irq *irq;
3229
3230        err = fsl_mc_allocate_irqs(ls_dev);
3231        if (err) {
3232                dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
3233                return err;
3234        }
3235
3236        irq = ls_dev->irqs[0];
3237        err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
3238                                        NULL, dpni_irq0_handler_thread,
3239                                        IRQF_NO_SUSPEND | IRQF_ONESHOT,
3240                                        dev_name(&ls_dev->dev), &ls_dev->dev);
3241        if (err < 0) {
3242                dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
3243                goto free_mc_irq;
3244        }
3245
3246        err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
3247                                DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
3248        if (err < 0) {
3249                dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
3250                goto free_irq;
3251        }
3252
3253        err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
3254                                  DPNI_IRQ_INDEX, 1);
3255        if (err < 0) {
3256                dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
3257                goto free_irq;
3258        }
3259
3260        return 0;
3261
3262free_irq:
3263        devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
3264free_mc_irq:
3265        fsl_mc_free_irqs(ls_dev);
3266
3267        return err;
3268}
3269
3270static void add_ch_napi(struct dpaa2_eth_priv *priv)
3271{
3272        int i;
3273        struct dpaa2_eth_channel *ch;
3274
3275        for (i = 0; i < priv->num_channels; i++) {
3276                ch = priv->channel[i];
3277                /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
3278                netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
3279                               NAPI_POLL_WEIGHT);
3280        }
3281}
3282
3283static void del_ch_napi(struct dpaa2_eth_priv *priv)
3284{
3285        int i;
3286        struct dpaa2_eth_channel *ch;
3287
3288        for (i = 0; i < priv->num_channels; i++) {
3289                ch = priv->channel[i];
3290                netif_napi_del(&ch->napi);
3291        }
3292}
3293
3294static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
3295{
3296        struct device *dev;
3297        struct net_device *net_dev = NULL;
3298        struct dpaa2_eth_priv *priv = NULL;
3299        int err = 0;
3300
3301        dev = &dpni_dev->dev;
3302
3303        /* Net device */
3304        net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES);
3305        if (!net_dev) {
3306                dev_err(dev, "alloc_etherdev_mq() failed\n");
3307                return -ENOMEM;
3308        }
3309
3310        SET_NETDEV_DEV(net_dev, dev);
3311        dev_set_drvdata(dev, net_dev);
3312
3313        priv = netdev_priv(net_dev);
3314        priv->net_dev = net_dev;
3315
3316        priv->iommu_domain = iommu_get_domain_for_dev(dev);
3317
3318        /* Obtain a MC portal */
3319        err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
3320                                     &priv->mc_io);
3321        if (err) {
3322                if (err == -ENXIO)
3323                        err = -EPROBE_DEFER;
3324                else
3325                        dev_err(dev, "MC portal allocation failed\n");
3326                goto err_portal_alloc;
3327        }
3328
3329        /* MC objects initialization and configuration */
3330        err = setup_dpni(dpni_dev);
3331        if (err)
3332                goto err_dpni_setup;
3333
3334        err = setup_dpio(priv);
3335        if (err)
3336                goto err_dpio_setup;
3337
3338        setup_fqs(priv);
3339
3340        err = setup_dpbp(priv);
3341        if (err)
3342                goto err_dpbp_setup;
3343
3344        err = bind_dpni(priv);
3345        if (err)
3346                goto err_bind;
3347
3348        /* Add a NAPI context for each channel */
3349        add_ch_napi(priv);
3350
3351        /* Percpu statistics */
3352        priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
3353        if (!priv->percpu_stats) {
3354                dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
3355                err = -ENOMEM;
3356                goto err_alloc_percpu_stats;
3357        }
3358        priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
3359        if (!priv->percpu_extras) {
3360                dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
3361                err = -ENOMEM;
3362                goto err_alloc_percpu_extras;
3363        }
3364
3365        err = netdev_init(net_dev);
3366        if (err)
3367                goto err_netdev_init;
3368
3369        /* Configure checksum offload based on current interface flags */
3370        err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
3371        if (err)
3372                goto err_csum;
3373
3374        err = set_tx_csum(priv, !!(net_dev->features &
3375                                   (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
3376        if (err)
3377                goto err_csum;
3378
3379        err = alloc_rings(priv);
3380        if (err)
3381                goto err_alloc_rings;
3382
3383        err = setup_irqs(dpni_dev);
3384        if (err) {
3385                netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
3386                priv->poll_thread = kthread_run(poll_link_state, priv,
3387                                                "%s_poll_link", net_dev->name);
3388                if (IS_ERR(priv->poll_thread)) {
3389                        dev_err(dev, "Error starting polling thread\n");
3390                        goto err_poll_thread;
3391                }
3392                priv->do_link_poll = true;
3393        }
3394
3395        err = register_netdev(net_dev);
3396        if (err < 0) {
3397                dev_err(dev, "register_netdev() failed\n");
3398                goto err_netdev_reg;
3399        }
3400
3401#ifdef CONFIG_DEBUG_FS
3402        dpaa2_dbg_add(priv);
3403#endif
3404
3405        dev_info(dev, "Probed interface %s\n", net_dev->name);
3406        return 0;
3407
3408err_netdev_reg:
3409        if (priv->do_link_poll)
3410                kthread_stop(priv->poll_thread);
3411        else
3412                fsl_mc_free_irqs(dpni_dev);
3413err_poll_thread:
3414        free_rings(priv);
3415err_alloc_rings:
3416err_csum:
3417err_netdev_init:
3418        free_percpu(priv->percpu_extras);
3419err_alloc_percpu_extras:
3420        free_percpu(priv->percpu_stats);
3421err_alloc_percpu_stats:
3422        del_ch_napi(priv);
3423err_bind:
3424        free_dpbp(priv);
3425err_dpbp_setup:
3426        free_dpio(priv);
3427err_dpio_setup:
3428        free_dpni(priv);
3429err_dpni_setup:
3430        fsl_mc_portal_free(priv->mc_io);
3431err_portal_alloc:
3432        dev_set_drvdata(dev, NULL);
3433        free_netdev(net_dev);
3434
3435        return err;
3436}
3437
3438static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
3439{
3440        struct device *dev;
3441        struct net_device *net_dev;
3442        struct dpaa2_eth_priv *priv;
3443
3444        dev = &ls_dev->dev;
3445        net_dev = dev_get_drvdata(dev);
3446        priv = netdev_priv(net_dev);
3447
3448#ifdef CONFIG_DEBUG_FS
3449        dpaa2_dbg_remove(priv);
3450#endif
3451        unregister_netdev(net_dev);
3452
3453        if (priv->do_link_poll)
3454                kthread_stop(priv->poll_thread);
3455        else
3456                fsl_mc_free_irqs(ls_dev);
3457
3458        free_rings(priv);
3459        free_percpu(priv->percpu_stats);
3460        free_percpu(priv->percpu_extras);
3461
3462        del_ch_napi(priv);
3463        free_dpbp(priv);
3464        free_dpio(priv);
3465        free_dpni(priv);
3466
3467        fsl_mc_portal_free(priv->mc_io);
3468
3469        free_netdev(net_dev);
3470
3471        dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
3472
3473        return 0;
3474}
3475
3476static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
3477        {
3478                .vendor = FSL_MC_VENDOR_FREESCALE,
3479                .obj_type = "dpni",
3480        },
3481        { .vendor = 0x0 }
3482};
3483MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
3484
3485static struct fsl_mc_driver dpaa2_eth_driver = {
3486        .driver = {
3487                .name = KBUILD_MODNAME,
3488                .owner = THIS_MODULE,
3489        },
3490        .probe = dpaa2_eth_probe,
3491        .remove = dpaa2_eth_remove,
3492        .match_id_table = dpaa2_eth_match_id_table
3493};
3494
3495static int __init dpaa2_eth_driver_init(void)
3496{
3497        int err;
3498
3499        dpaa2_eth_dbg_init();
3500        err = fsl_mc_driver_register(&dpaa2_eth_driver);
3501        if (err) {
3502                dpaa2_eth_dbg_exit();
3503                return err;
3504        }
3505
3506        return 0;
3507}
3508
3509static void __exit dpaa2_eth_driver_exit(void)
3510{
3511        dpaa2_eth_dbg_exit();
3512        fsl_mc_driver_unregister(&dpaa2_eth_driver);
3513}
3514
3515module_init(dpaa2_eth_driver_init);
3516module_exit(dpaa2_eth_driver_exit);
3517