linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
<<
>>
Prefs
   1/* Copyright 2008 - 2016 Freescale Semiconductor Inc.
   2 * Copyright 2020 NXP
   3 *
   4 * Redistribution and use in source and binary forms, with or without
   5 * modification, are permitted provided that the following conditions are met:
   6 *     * Redistributions of source code must retain the above copyright
   7 *       notice, this list of conditions and the following disclaimer.
   8 *     * Redistributions in binary form must reproduce the above copyright
   9 *       notice, this list of conditions and the following disclaimer in the
  10 *       documentation and/or other materials provided with the distribution.
  11 *     * Neither the name of Freescale Semiconductor nor the
  12 *       names of its contributors may be used to endorse or promote products
  13 *       derived from this software without specific prior written permission.
  14 *
  15 * ALTERNATIVELY, this software may be distributed under the terms of the
  16 * GNU General Public License ("GPL") as published by the Free Software
  17 * Foundation, either version 2 of that License or (at your option) any
  18 * later version.
  19 *
  20 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  21 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  22 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  23 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  24 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  25 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  27 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  30 */
  31
  32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  33
  34#include <linux/init.h>
  35#include <linux/module.h>
  36#include <linux/of_platform.h>
  37#include <linux/of_mdio.h>
  38#include <linux/of_net.h>
  39#include <linux/io.h>
  40#include <linux/if_arp.h>
  41#include <linux/if_vlan.h>
  42#include <linux/icmp.h>
  43#include <linux/ip.h>
  44#include <linux/ipv6.h>
  45#include <linux/udp.h>
  46#include <linux/tcp.h>
  47#include <linux/net.h>
  48#include <linux/skbuff.h>
  49#include <linux/etherdevice.h>
  50#include <linux/if_ether.h>
  51#include <linux/highmem.h>
  52#include <linux/percpu.h>
  53#include <linux/dma-mapping.h>
  54#include <linux/sort.h>
  55#include <linux/phy_fixed.h>
  56#include <linux/bpf.h>
  57#include <linux/bpf_trace.h>
  58#include <soc/fsl/bman.h>
  59#include <soc/fsl/qman.h>
  60#include "fman.h"
  61#include "fman_port.h"
  62#include "mac.h"
  63#include "dpaa_eth.h"
  64
  65/* CREATE_TRACE_POINTS only needs to be defined once. Other dpaa files
  66 * using trace events only need to #include <trace/events/sched.h>
  67 */
  68#define CREATE_TRACE_POINTS
  69#include "dpaa_eth_trace.h"
  70
  71static int debug = -1;
  72module_param(debug, int, 0444);
  73MODULE_PARM_DESC(debug, "Module/Driver verbosity level (0=none,...,16=all)");
  74
  75static u16 tx_timeout = 1000;
  76module_param(tx_timeout, ushort, 0444);
  77MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
  78
  79#define FM_FD_STAT_RX_ERRORS                                            \
  80        (FM_FD_ERR_DMA | FM_FD_ERR_PHYSICAL     | \
  81         FM_FD_ERR_SIZE | FM_FD_ERR_CLS_DISCARD | \
  82         FM_FD_ERR_EXTRACTION | FM_FD_ERR_NO_SCHEME     | \
  83         FM_FD_ERR_PRS_TIMEOUT | FM_FD_ERR_PRS_ILL_INSTRUCT | \
  84         FM_FD_ERR_PRS_HDR_ERR)
  85
  86#define FM_FD_STAT_TX_ERRORS \
  87        (FM_FD_ERR_UNSUPPORTED_FORMAT | \
  88         FM_FD_ERR_LENGTH | FM_FD_ERR_DMA)
  89
  90#define DPAA_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
  91                          NETIF_MSG_LINK | NETIF_MSG_IFUP | \
  92                          NETIF_MSG_IFDOWN | NETIF_MSG_HW)
  93
  94#define DPAA_INGRESS_CS_THRESHOLD 0x10000000
  95/* Ingress congestion threshold on FMan ports
  96 * The size in bytes of the ingress tail-drop threshold on FMan ports.
  97 * Traffic piling up above this value will be rejected by QMan and discarded
  98 * by FMan.
  99 */
 100
 101/* Size in bytes of the FQ taildrop threshold */
 102#define DPAA_FQ_TD 0x200000
 103
 104#define DPAA_CS_THRESHOLD_1G 0x06000000
 105/* Egress congestion threshold on 1G ports, range 0x1000 .. 0x10000000
 106 * The size in bytes of the egress Congestion State notification threshold on
 107 * 1G ports. The 1G dTSECs can quite easily be flooded by cores doing Tx in a
 108 * tight loop (e.g. by sending UDP datagrams at "while(1) speed"),
 109 * and the larger the frame size, the more acute the problem.
 110 * So we have to find a balance between these factors:
 111 * - avoiding the device staying congested for a prolonged time (risking
 112 *   the netdev watchdog to fire - see also the tx_timeout module param);
 113 * - affecting performance of protocols such as TCP, which otherwise
 114 *   behave well under the congestion notification mechanism;
 115 * - preventing the Tx cores from tightly-looping (as if the congestion
 116 *   threshold was too low to be effective);
 117 * - running out of memory if the CS threshold is set too high.
 118 */
 119
 120#define DPAA_CS_THRESHOLD_10G 0x10000000
 121/* The size in bytes of the egress Congestion State notification threshold on
 122 * 10G ports, range 0x1000 .. 0x10000000
 123 */
 124
 125/* Largest value that the FQD's OAL field can hold */
 126#define FSL_QMAN_MAX_OAL        127
 127
 128/* Default alignment for start of data in an Rx FD */
 129#ifdef CONFIG_DPAA_ERRATUM_A050385
 130/* aligning data start to 64 avoids DMA transaction splits, unless the buffer
 131 * is crossing a 4k page boundary
 132 */
 133#define DPAA_FD_DATA_ALIGNMENT  (fman_has_errata_a050385() ? 64 : 16)
 134/* aligning to 256 avoids DMA transaction splits caused by 4k page boundary
 135 * crossings; also, all SG fragments except the last must have a size multiple
 136 * of 256 to avoid DMA transaction splits
 137 */
 138#define DPAA_A050385_ALIGN 256
 139#define DPAA_FD_RX_DATA_ALIGNMENT (fman_has_errata_a050385() ? \
 140                                   DPAA_A050385_ALIGN : 16)
 141#else
 142#define DPAA_FD_DATA_ALIGNMENT  16
 143#define DPAA_FD_RX_DATA_ALIGNMENT DPAA_FD_DATA_ALIGNMENT
 144#endif
 145
 146/* The DPAA requires 256 bytes reserved and mapped for the SGT */
 147#define DPAA_SGT_SIZE 256
 148
 149/* Values for the L3R field of the FM Parse Results
 150 */
 151/* L3 Type field: First IP Present IPv4 */
 152#define FM_L3_PARSE_RESULT_IPV4 0x8000
 153/* L3 Type field: First IP Present IPv6 */
 154#define FM_L3_PARSE_RESULT_IPV6 0x4000
 155/* Values for the L4R field of the FM Parse Results */
 156/* L4 Type field: UDP */
 157#define FM_L4_PARSE_RESULT_UDP  0x40
 158/* L4 Type field: TCP */
 159#define FM_L4_PARSE_RESULT_TCP  0x20
 160
 161/* FD status field indicating whether the FM Parser has attempted to validate
 162 * the L4 csum of the frame.
 163 * Note that having this bit set doesn't necessarily imply that the checksum
 164 * is valid. One would have to check the parse results to find that out.
 165 */
 166#define FM_FD_STAT_L4CV         0x00000004
 167
 168#define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
 169#define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
 170
 171#define FSL_DPAA_BPID_INV               0xff
 172#define FSL_DPAA_ETH_MAX_BUF_COUNT      128
 173#define FSL_DPAA_ETH_REFILL_THRESHOLD   80
 174
 175#define DPAA_TX_PRIV_DATA_SIZE  16
 176#define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result)
 177#define DPAA_TIME_STAMP_SIZE 8
 178#define DPAA_HASH_RESULTS_SIZE 8
 179#define DPAA_HWA_SIZE (DPAA_PARSE_RESULTS_SIZE + DPAA_TIME_STAMP_SIZE \
 180                       + DPAA_HASH_RESULTS_SIZE)
 181#define DPAA_RX_PRIV_DATA_DEFAULT_SIZE (DPAA_TX_PRIV_DATA_SIZE + \
 182                                        XDP_PACKET_HEADROOM - DPAA_HWA_SIZE)
 183#ifdef CONFIG_DPAA_ERRATUM_A050385
 184#define DPAA_RX_PRIV_DATA_A050385_SIZE (DPAA_A050385_ALIGN - DPAA_HWA_SIZE)
 185#define DPAA_RX_PRIV_DATA_SIZE (fman_has_errata_a050385() ? \
 186                                DPAA_RX_PRIV_DATA_A050385_SIZE : \
 187                                DPAA_RX_PRIV_DATA_DEFAULT_SIZE)
 188#else
 189#define DPAA_RX_PRIV_DATA_SIZE DPAA_RX_PRIV_DATA_DEFAULT_SIZE
 190#endif
 191
 192#define DPAA_ETH_PCD_RXQ_NUM    128
 193
 194#define DPAA_ENQUEUE_RETRIES    100000
 195
 196enum port_type {RX, TX};
 197
 198struct fm_port_fqs {
 199        struct dpaa_fq *tx_defq;
 200        struct dpaa_fq *tx_errq;
 201        struct dpaa_fq *rx_defq;
 202        struct dpaa_fq *rx_errq;
 203        struct dpaa_fq *rx_pcdq;
 204};
 205
 206/* All the dpa bps in use at any moment */
 207static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];
 208
 209#define DPAA_BP_RAW_SIZE 4096
 210
 211#ifdef CONFIG_DPAA_ERRATUM_A050385
 212#define dpaa_bp_size(raw_size) (SKB_WITH_OVERHEAD(raw_size) & \
 213                                ~(DPAA_A050385_ALIGN - 1))
 214#else
 215#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD(raw_size)
 216#endif
 217
 218static int dpaa_max_frm;
 219
 220static int dpaa_rx_extra_headroom;
 221
 222#define dpaa_get_max_mtu()      \
 223        (dpaa_max_frm - (VLAN_ETH_HLEN + ETH_FCS_LEN))
 224
 225static int dpaa_netdev_init(struct net_device *net_dev,
 226                            const struct net_device_ops *dpaa_ops,
 227                            u16 tx_timeout)
 228{
 229        struct dpaa_priv *priv = netdev_priv(net_dev);
 230        struct device *dev = net_dev->dev.parent;
 231        struct dpaa_percpu_priv *percpu_priv;
 232        const u8 *mac_addr;
 233        int i, err;
 234
 235        /* Although we access another CPU's private data here
 236         * we do it at initialization so it is safe
 237         */
 238        for_each_possible_cpu(i) {
 239                percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
 240                percpu_priv->net_dev = net_dev;
 241        }
 242
 243        net_dev->netdev_ops = dpaa_ops;
 244        mac_addr = priv->mac_dev->addr;
 245
 246        net_dev->mem_start = priv->mac_dev->res->start;
 247        net_dev->mem_end = priv->mac_dev->res->end;
 248
 249        net_dev->min_mtu = ETH_MIN_MTU;
 250        net_dev->max_mtu = dpaa_get_max_mtu();
 251
 252        net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 253                                 NETIF_F_LLTX | NETIF_F_RXHASH);
 254
 255        net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
 256        /* The kernels enables GSO automatically, if we declare NETIF_F_SG.
 257         * For conformity, we'll still declare GSO explicitly.
 258         */
 259        net_dev->features |= NETIF_F_GSO;
 260        net_dev->features |= NETIF_F_RXCSUM;
 261
 262        net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
 263        /* we do not want shared skbs on TX */
 264        net_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
 265
 266        net_dev->features |= net_dev->hw_features;
 267        net_dev->vlan_features = net_dev->features;
 268
 269        if (is_valid_ether_addr(mac_addr)) {
 270                memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
 271                memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
 272        } else {
 273                eth_hw_addr_random(net_dev);
 274                err = priv->mac_dev->change_addr(priv->mac_dev->fman_mac,
 275                        (enet_addr_t *)net_dev->dev_addr);
 276                if (err) {
 277                        dev_err(dev, "Failed to set random MAC address\n");
 278                        return -EINVAL;
 279                }
 280                dev_info(dev, "Using random MAC address: %pM\n",
 281                         net_dev->dev_addr);
 282        }
 283
 284        net_dev->ethtool_ops = &dpaa_ethtool_ops;
 285
 286        net_dev->needed_headroom = priv->tx_headroom;
 287        net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
 288
 289        /* start without the RUNNING flag, phylib controls it later */
 290        netif_carrier_off(net_dev);
 291
 292        err = register_netdev(net_dev);
 293        if (err < 0) {
 294                dev_err(dev, "register_netdev() = %d\n", err);
 295                return err;
 296        }
 297
 298        return 0;
 299}
 300
 301static int dpaa_stop(struct net_device *net_dev)
 302{
 303        struct mac_device *mac_dev;
 304        struct dpaa_priv *priv;
 305        int i, err, error;
 306
 307        priv = netdev_priv(net_dev);
 308        mac_dev = priv->mac_dev;
 309
 310        netif_tx_stop_all_queues(net_dev);
 311        /* Allow the Fman (Tx) port to process in-flight frames before we
 312         * try switching it off.
 313         */
 314        msleep(200);
 315
 316        err = mac_dev->stop(mac_dev);
 317        if (err < 0)
 318                netif_err(priv, ifdown, net_dev, "mac_dev->stop() = %d\n",
 319                          err);
 320
 321        for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
 322                error = fman_port_disable(mac_dev->port[i]);
 323                if (error)
 324                        err = error;
 325        }
 326
 327        if (net_dev->phydev)
 328                phy_disconnect(net_dev->phydev);
 329        net_dev->phydev = NULL;
 330
 331        msleep(200);
 332
 333        return err;
 334}
 335
 336static void dpaa_tx_timeout(struct net_device *net_dev, unsigned int txqueue)
 337{
 338        struct dpaa_percpu_priv *percpu_priv;
 339        const struct dpaa_priv  *priv;
 340
 341        priv = netdev_priv(net_dev);
 342        percpu_priv = this_cpu_ptr(priv->percpu_priv);
 343
 344        netif_crit(priv, timer, net_dev, "Transmit timeout latency: %u ms\n",
 345                   jiffies_to_msecs(jiffies - dev_trans_start(net_dev)));
 346
 347        percpu_priv->stats.tx_errors++;
 348}
 349
 350/* Calculates the statistics for the given device by adding the statistics
 351 * collected by each CPU.
 352 */
 353static void dpaa_get_stats64(struct net_device *net_dev,
 354                             struct rtnl_link_stats64 *s)
 355{
 356        int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
 357        struct dpaa_priv *priv = netdev_priv(net_dev);
 358        struct dpaa_percpu_priv *percpu_priv;
 359        u64 *netstats = (u64 *)s;
 360        u64 *cpustats;
 361        int i, j;
 362
 363        for_each_possible_cpu(i) {
 364                percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
 365
 366                cpustats = (u64 *)&percpu_priv->stats;
 367
 368                /* add stats from all CPUs */
 369                for (j = 0; j < numstats; j++)
 370                        netstats[j] += cpustats[j];
 371        }
 372}
 373
 374static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
 375                         void *type_data)
 376{
 377        struct dpaa_priv *priv = netdev_priv(net_dev);
 378        struct tc_mqprio_qopt *mqprio = type_data;
 379        u8 num_tc;
 380        int i;
 381
 382        if (type != TC_SETUP_QDISC_MQPRIO)
 383                return -EOPNOTSUPP;
 384
 385        mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
 386        num_tc = mqprio->num_tc;
 387
 388        if (num_tc == priv->num_tc)
 389                return 0;
 390
 391        if (!num_tc) {
 392                netdev_reset_tc(net_dev);
 393                goto out;
 394        }
 395
 396        if (num_tc > DPAA_TC_NUM) {
 397                netdev_err(net_dev, "Too many traffic classes: max %d supported.\n",
 398                           DPAA_TC_NUM);
 399                return -EINVAL;
 400        }
 401
 402        netdev_set_num_tc(net_dev, num_tc);
 403
 404        for (i = 0; i < num_tc; i++)
 405                netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM,
 406                                    i * DPAA_TC_TXQ_NUM);
 407
 408out:
 409        priv->num_tc = num_tc ? : 1;
 410        netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
 411        return 0;
 412}
 413
 414static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev)
 415{
 416        struct dpaa_eth_data *eth_data;
 417        struct device *dpaa_dev;
 418        struct mac_device *mac_dev;
 419
 420        dpaa_dev = &pdev->dev;
 421        eth_data = dpaa_dev->platform_data;
 422        if (!eth_data) {
 423                dev_err(dpaa_dev, "eth_data missing\n");
 424                return ERR_PTR(-ENODEV);
 425        }
 426        mac_dev = eth_data->mac_dev;
 427        if (!mac_dev) {
 428                dev_err(dpaa_dev, "mac_dev missing\n");
 429                return ERR_PTR(-EINVAL);
 430        }
 431
 432        return mac_dev;
 433}
 434
 435static int dpaa_set_mac_address(struct net_device *net_dev, void *addr)
 436{
 437        const struct dpaa_priv *priv;
 438        struct mac_device *mac_dev;
 439        struct sockaddr old_addr;
 440        int err;
 441
 442        priv = netdev_priv(net_dev);
 443
 444        memcpy(old_addr.sa_data, net_dev->dev_addr,  ETH_ALEN);
 445
 446        err = eth_mac_addr(net_dev, addr);
 447        if (err < 0) {
 448                netif_err(priv, drv, net_dev, "eth_mac_addr() = %d\n", err);
 449                return err;
 450        }
 451
 452        mac_dev = priv->mac_dev;
 453
 454        err = mac_dev->change_addr(mac_dev->fman_mac,
 455                                   (enet_addr_t *)net_dev->dev_addr);
 456        if (err < 0) {
 457                netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n",
 458                          err);
 459                /* reverting to previous address */
 460                eth_mac_addr(net_dev, &old_addr);
 461
 462                return err;
 463        }
 464
 465        return 0;
 466}
 467
 468static void dpaa_set_rx_mode(struct net_device *net_dev)
 469{
 470        const struct dpaa_priv  *priv;
 471        int err;
 472
 473        priv = netdev_priv(net_dev);
 474
 475        if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) {
 476                priv->mac_dev->promisc = !priv->mac_dev->promisc;
 477                err = priv->mac_dev->set_promisc(priv->mac_dev->fman_mac,
 478                                                 priv->mac_dev->promisc);
 479                if (err < 0)
 480                        netif_err(priv, drv, net_dev,
 481                                  "mac_dev->set_promisc() = %d\n",
 482                                  err);
 483        }
 484
 485        if (!!(net_dev->flags & IFF_ALLMULTI) != priv->mac_dev->allmulti) {
 486                priv->mac_dev->allmulti = !priv->mac_dev->allmulti;
 487                err = priv->mac_dev->set_allmulti(priv->mac_dev->fman_mac,
 488                                                  priv->mac_dev->allmulti);
 489                if (err < 0)
 490                        netif_err(priv, drv, net_dev,
 491                                  "mac_dev->set_allmulti() = %d\n",
 492                                  err);
 493        }
 494
 495        err = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
 496        if (err < 0)
 497                netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n",
 498                          err);
 499}
 500
 501static struct dpaa_bp *dpaa_bpid2pool(int bpid)
 502{
 503        if (WARN_ON(bpid < 0 || bpid >= BM_MAX_NUM_OF_POOLS))
 504                return NULL;
 505
 506        return dpaa_bp_array[bpid];
 507}
 508
 509/* checks if this bpool is already allocated */
 510static bool dpaa_bpid2pool_use(int bpid)
 511{
 512        if (dpaa_bpid2pool(bpid)) {
 513                refcount_inc(&dpaa_bp_array[bpid]->refs);
 514                return true;
 515        }
 516
 517        return false;
 518}
 519
 520/* called only once per bpid by dpaa_bp_alloc_pool() */
 521static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp)
 522{
 523        dpaa_bp_array[bpid] = dpaa_bp;
 524        refcount_set(&dpaa_bp->refs, 1);
 525}
 526
 527static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp)
 528{
 529        int err;
 530
 531        if (dpaa_bp->size == 0 || dpaa_bp->config_count == 0) {
 532                pr_err("%s: Buffer pool is not properly initialized! Missing size or initial number of buffers\n",
 533                       __func__);
 534                return -EINVAL;
 535        }
 536
 537        /* If the pool is already specified, we only create one per bpid */
 538        if (dpaa_bp->bpid != FSL_DPAA_BPID_INV &&
 539            dpaa_bpid2pool_use(dpaa_bp->bpid))
 540                return 0;
 541
 542        if (dpaa_bp->bpid == FSL_DPAA_BPID_INV) {
 543                dpaa_bp->pool = bman_new_pool();
 544                if (!dpaa_bp->pool) {
 545                        pr_err("%s: bman_new_pool() failed\n",
 546                               __func__);
 547                        return -ENODEV;
 548                }
 549
 550                dpaa_bp->bpid = (u8)bman_get_bpid(dpaa_bp->pool);
 551        }
 552
 553        if (dpaa_bp->seed_cb) {
 554                err = dpaa_bp->seed_cb(dpaa_bp);
 555                if (err)
 556                        goto pool_seed_failed;
 557        }
 558
 559        dpaa_bpid2pool_map(dpaa_bp->bpid, dpaa_bp);
 560
 561        return 0;
 562
 563pool_seed_failed:
 564        pr_err("%s: pool seeding failed\n", __func__);
 565        bman_free_pool(dpaa_bp->pool);
 566
 567        return err;
 568}
 569
 570/* remove and free all the buffers from the given buffer pool */
 571static void dpaa_bp_drain(struct dpaa_bp *bp)
 572{
 573        u8 num = 8;
 574        int ret;
 575
 576        do {
 577                struct bm_buffer bmb[8];
 578                int i;
 579
 580                ret = bman_acquire(bp->pool, bmb, num);
 581                if (ret < 0) {
 582                        if (num == 8) {
 583                                /* we have less than 8 buffers left;
 584                                 * drain them one by one
 585                                 */
 586                                num = 1;
 587                                ret = 1;
 588                                continue;
 589                        } else {
 590                                /* Pool is fully drained */
 591                                break;
 592                        }
 593                }
 594
 595                if (bp->free_buf_cb)
 596                        for (i = 0; i < num; i++)
 597                                bp->free_buf_cb(bp, &bmb[i]);
 598        } while (ret > 0);
 599}
 600
 601static void dpaa_bp_free(struct dpaa_bp *dpaa_bp)
 602{
 603        struct dpaa_bp *bp = dpaa_bpid2pool(dpaa_bp->bpid);
 604
 605        /* the mapping between bpid and dpaa_bp is done very late in the
 606         * allocation procedure; if something failed before the mapping, the bp
 607         * was not configured, therefore we don't need the below instructions
 608         */
 609        if (!bp)
 610                return;
 611
 612        if (!refcount_dec_and_test(&bp->refs))
 613                return;
 614
 615        if (bp->free_buf_cb)
 616                dpaa_bp_drain(bp);
 617
 618        dpaa_bp_array[bp->bpid] = NULL;
 619        bman_free_pool(bp->pool);
 620}
 621
 622static void dpaa_bps_free(struct dpaa_priv *priv)
 623{
 624        dpaa_bp_free(priv->dpaa_bp);
 625}
 626
 627/* Use multiple WQs for FQ assignment:
 628 *      - Tx Confirmation queues go to WQ1.
 629 *      - Rx Error and Tx Error queues go to WQ5 (giving them a better chance
 630 *        to be scheduled, in case there are many more FQs in WQ6).
 631 *      - Rx Default goes to WQ6.
 632 *      - Tx queues go to different WQs depending on their priority. Equal
 633 *        chunks of NR_CPUS queues go to WQ6 (lowest priority), WQ2, WQ1 and
 634 *        WQ0 (highest priority).
 635 * This ensures that Tx-confirmed buffers are timely released. In particular,
 636 * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
 637 * are greatly outnumbered by other FQs in the system, while
 638 * dequeue scheduling is round-robin.
 639 */
 640static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx)
 641{
 642        switch (fq->fq_type) {
 643        case FQ_TYPE_TX_CONFIRM:
 644        case FQ_TYPE_TX_CONF_MQ:
 645                fq->wq = 1;
 646                break;
 647        case FQ_TYPE_RX_ERROR:
 648        case FQ_TYPE_TX_ERROR:
 649                fq->wq = 5;
 650                break;
 651        case FQ_TYPE_RX_DEFAULT:
 652        case FQ_TYPE_RX_PCD:
 653                fq->wq = 6;
 654                break;
 655        case FQ_TYPE_TX:
 656                switch (idx / DPAA_TC_TXQ_NUM) {
 657                case 0:
 658                        /* Low priority (best effort) */
 659                        fq->wq = 6;
 660                        break;
 661                case 1:
 662                        /* Medium priority */
 663                        fq->wq = 2;
 664                        break;
 665                case 2:
 666                        /* High priority */
 667                        fq->wq = 1;
 668                        break;
 669                case 3:
 670                        /* Very high priority */
 671                        fq->wq = 0;
 672                        break;
 673                default:
 674                        WARN(1, "Too many TX FQs: more than %d!\n",
 675                             DPAA_ETH_TXQ_NUM);
 676                }
 677                break;
 678        default:
 679                WARN(1, "Invalid FQ type %d for FQID %d!\n",
 680                     fq->fq_type, fq->fqid);
 681        }
 682}
 683
 684static struct dpaa_fq *dpaa_fq_alloc(struct device *dev,
 685                                     u32 start, u32 count,
 686                                     struct list_head *list,
 687                                     enum dpaa_fq_type fq_type)
 688{
 689        struct dpaa_fq *dpaa_fq;
 690        int i;
 691
 692        dpaa_fq = devm_kcalloc(dev, count, sizeof(*dpaa_fq),
 693                               GFP_KERNEL);
 694        if (!dpaa_fq)
 695                return NULL;
 696
 697        for (i = 0; i < count; i++) {
 698                dpaa_fq[i].fq_type = fq_type;
 699                dpaa_fq[i].fqid = start ? start + i : 0;
 700                list_add_tail(&dpaa_fq[i].list, list);
 701        }
 702
 703        for (i = 0; i < count; i++)
 704                dpaa_assign_wq(dpaa_fq + i, i);
 705
 706        return dpaa_fq;
 707}
 708
 709static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list,
 710                              struct fm_port_fqs *port_fqs)
 711{
 712        struct dpaa_fq *dpaa_fq;
 713        u32 fq_base, fq_base_aligned, i;
 714
 715        dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_ERROR);
 716        if (!dpaa_fq)
 717                goto fq_alloc_failed;
 718
 719        port_fqs->rx_errq = &dpaa_fq[0];
 720
 721        dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_DEFAULT);
 722        if (!dpaa_fq)
 723                goto fq_alloc_failed;
 724
 725        port_fqs->rx_defq = &dpaa_fq[0];
 726
 727        /* the PCD FQIDs range needs to be aligned for correct operation */
 728        if (qman_alloc_fqid_range(&fq_base, 2 * DPAA_ETH_PCD_RXQ_NUM))
 729                goto fq_alloc_failed;
 730
 731        fq_base_aligned = ALIGN(fq_base, DPAA_ETH_PCD_RXQ_NUM);
 732
 733        for (i = fq_base; i < fq_base_aligned; i++)
 734                qman_release_fqid(i);
 735
 736        for (i = fq_base_aligned + DPAA_ETH_PCD_RXQ_NUM;
 737             i < (fq_base + 2 * DPAA_ETH_PCD_RXQ_NUM); i++)
 738                qman_release_fqid(i);
 739
 740        dpaa_fq = dpaa_fq_alloc(dev, fq_base_aligned, DPAA_ETH_PCD_RXQ_NUM,
 741                                list, FQ_TYPE_RX_PCD);
 742        if (!dpaa_fq)
 743                goto fq_alloc_failed;
 744
 745        port_fqs->rx_pcdq = &dpaa_fq[0];
 746
 747        if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ))
 748                goto fq_alloc_failed;
 749
 750        dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR);
 751        if (!dpaa_fq)
 752                goto fq_alloc_failed;
 753
 754        port_fqs->tx_errq = &dpaa_fq[0];
 755
 756        dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_CONFIRM);
 757        if (!dpaa_fq)
 758                goto fq_alloc_failed;
 759
 760        port_fqs->tx_defq = &dpaa_fq[0];
 761
 762        if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX))
 763                goto fq_alloc_failed;
 764
 765        return 0;
 766
 767fq_alloc_failed:
 768        dev_err(dev, "dpaa_fq_alloc() failed\n");
 769        return -ENOMEM;
 770}
 771
 772static u32 rx_pool_channel;
 773static DEFINE_SPINLOCK(rx_pool_channel_init);
 774
 775static int dpaa_get_channel(void)
 776{
 777        spin_lock(&rx_pool_channel_init);
 778        if (!rx_pool_channel) {
 779                u32 pool;
 780                int ret;
 781
 782                ret = qman_alloc_pool(&pool);
 783
 784                if (!ret)
 785                        rx_pool_channel = pool;
 786        }
 787        spin_unlock(&rx_pool_channel_init);
 788        if (!rx_pool_channel)
 789                return -ENOMEM;
 790        return rx_pool_channel;
 791}
 792
 793static void dpaa_release_channel(void)
 794{
 795        qman_release_pool(rx_pool_channel);
 796}
 797
 798static void dpaa_eth_add_channel(u16 channel, struct device *dev)
 799{
 800        u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel);
 801        const cpumask_t *cpus = qman_affine_cpus();
 802        struct qman_portal *portal;
 803        int cpu;
 804
 805        for_each_cpu_and(cpu, cpus, cpu_online_mask) {
 806                portal = qman_get_affine_portal(cpu);
 807                qman_p_static_dequeue_add(portal, pool);
 808                qman_start_using_portal(portal, dev);
 809        }
 810}
 811
 812/* Congestion group state change notification callback.
 813 * Stops the device's egress queues while they are congested and
 814 * wakes them upon exiting congested state.
 815 * Also updates some CGR-related stats.
 816 */
 817static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr,
 818                           int congested)
 819{
 820        struct dpaa_priv *priv = (struct dpaa_priv *)container_of(cgr,
 821                struct dpaa_priv, cgr_data.cgr);
 822
 823        if (congested) {
 824                priv->cgr_data.congestion_start_jiffies = jiffies;
 825                netif_tx_stop_all_queues(priv->net_dev);
 826                priv->cgr_data.cgr_congested_count++;
 827        } else {
 828                priv->cgr_data.congested_jiffies +=
 829                        (jiffies - priv->cgr_data.congestion_start_jiffies);
 830                netif_tx_wake_all_queues(priv->net_dev);
 831        }
 832}
 833
 834static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
 835{
 836        struct qm_mcc_initcgr initcgr;
 837        u32 cs_th;
 838        int err;
 839
 840        err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid);
 841        if (err < 0) {
 842                if (netif_msg_drv(priv))
 843                        pr_err("%s: Error %d allocating CGR ID\n",
 844                               __func__, err);
 845                goto out_error;
 846        }
 847        priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
 848
 849        /* Enable Congestion State Change Notifications and CS taildrop */
 850        memset(&initcgr, 0, sizeof(initcgr));
 851        initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES);
 852        initcgr.cgr.cscn_en = QM_CGR_EN;
 853
 854        /* Set different thresholds based on the MAC speed.
 855         * This may turn suboptimal if the MAC is reconfigured at a speed
 856         * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link.
 857         * In such cases, we ought to reconfigure the threshold, too.
 858         */
 859        if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
 860                cs_th = DPAA_CS_THRESHOLD_10G;
 861        else
 862                cs_th = DPAA_CS_THRESHOLD_1G;
 863        qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
 864
 865        initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
 866        initcgr.cgr.cstd_en = QM_CGR_EN;
 867
 868        err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
 869                              &initcgr);
 870        if (err < 0) {
 871                if (netif_msg_drv(priv))
 872                        pr_err("%s: Error %d creating CGR with ID %d\n",
 873                               __func__, err, priv->cgr_data.cgr.cgrid);
 874                qman_release_cgrid(priv->cgr_data.cgr.cgrid);
 875                goto out_error;
 876        }
 877        if (netif_msg_drv(priv))
 878                pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n",
 879                         priv->cgr_data.cgr.cgrid, priv->mac_dev->addr,
 880                         priv->cgr_data.cgr.chan);
 881
 882out_error:
 883        return err;
 884}
 885
 886static inline void dpaa_setup_ingress(const struct dpaa_priv *priv,
 887                                      struct dpaa_fq *fq,
 888                                      const struct qman_fq *template)
 889{
 890        fq->fq_base = *template;
 891        fq->net_dev = priv->net_dev;
 892
 893        fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
 894        fq->channel = priv->channel;
 895}
 896
 897static inline void dpaa_setup_egress(const struct dpaa_priv *priv,
 898                                     struct dpaa_fq *fq,
 899                                     struct fman_port *port,
 900                                     const struct qman_fq *template)
 901{
 902        fq->fq_base = *template;
 903        fq->net_dev = priv->net_dev;
 904
 905        if (port) {
 906                fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
 907                fq->channel = (u16)fman_port_get_qman_channel_id(port);
 908        } else {
 909                fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
 910        }
 911}
 912
 913static void dpaa_fq_setup(struct dpaa_priv *priv,
 914                          const struct dpaa_fq_cbs *fq_cbs,
 915                          struct fman_port *tx_port)
 916{
 917        int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu;
 918        const cpumask_t *affine_cpus = qman_affine_cpus();
 919        u16 channels[NR_CPUS];
 920        struct dpaa_fq *fq;
 921
 922        for_each_cpu_and(cpu, affine_cpus, cpu_online_mask)
 923                channels[num_portals++] = qman_affine_channel(cpu);
 924
 925        if (num_portals == 0)
 926                dev_err(priv->net_dev->dev.parent,
 927                        "No Qman software (affine) channels found\n");
 928
 929        /* Initialize each FQ in the list */
 930        list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
 931                switch (fq->fq_type) {
 932                case FQ_TYPE_RX_DEFAULT:
 933                        dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
 934                        break;
 935                case FQ_TYPE_RX_ERROR:
 936                        dpaa_setup_ingress(priv, fq, &fq_cbs->rx_errq);
 937                        break;
 938                case FQ_TYPE_RX_PCD:
 939                        if (!num_portals)
 940                                continue;
 941                        dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
 942                        fq->channel = channels[portal_cnt++ % num_portals];
 943                        break;
 944                case FQ_TYPE_TX:
 945                        dpaa_setup_egress(priv, fq, tx_port,
 946                                          &fq_cbs->egress_ern);
 947                        /* If we have more Tx queues than the number of cores,
 948                         * just ignore the extra ones.
 949                         */
 950                        if (egress_cnt < DPAA_ETH_TXQ_NUM)
 951                                priv->egress_fqs[egress_cnt++] = &fq->fq_base;
 952                        break;
 953                case FQ_TYPE_TX_CONF_MQ:
 954                        priv->conf_fqs[conf_cnt++] = &fq->fq_base;
 955                        fallthrough;
 956                case FQ_TYPE_TX_CONFIRM:
 957                        dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
 958                        break;
 959                case FQ_TYPE_TX_ERROR:
 960                        dpaa_setup_ingress(priv, fq, &fq_cbs->tx_errq);
 961                        break;
 962                default:
 963                        dev_warn(priv->net_dev->dev.parent,
 964                                 "Unknown FQ type detected!\n");
 965                        break;
 966                }
 967        }
 968
 969         /* Make sure all CPUs receive a corresponding Tx queue. */
 970        while (egress_cnt < DPAA_ETH_TXQ_NUM) {
 971                list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
 972                        if (fq->fq_type != FQ_TYPE_TX)
 973                                continue;
 974                        priv->egress_fqs[egress_cnt++] = &fq->fq_base;
 975                        if (egress_cnt == DPAA_ETH_TXQ_NUM)
 976                                break;
 977                }
 978        }
 979}
 980
 981static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
 982                                   struct qman_fq *tx_fq)
 983{
 984        int i;
 985
 986        for (i = 0; i < DPAA_ETH_TXQ_NUM; i++)
 987                if (priv->egress_fqs[i] == tx_fq)
 988                        return i;
 989
 990        return -EINVAL;
 991}
 992
 993static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
 994{
 995        const struct dpaa_priv  *priv;
 996        struct qman_fq *confq = NULL;
 997        struct qm_mcc_initfq initfq;
 998        struct device *dev;
 999        struct qman_fq *fq;
1000        int queue_id;
1001        int err;
1002
1003        priv = netdev_priv(dpaa_fq->net_dev);
1004        dev = dpaa_fq->net_dev->dev.parent;
1005
1006        if (dpaa_fq->fqid == 0)
1007                dpaa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
1008
1009        dpaa_fq->init = !(dpaa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
1010
1011        err = qman_create_fq(dpaa_fq->fqid, dpaa_fq->flags, &dpaa_fq->fq_base);
1012        if (err) {
1013                dev_err(dev, "qman_create_fq() failed\n");
1014                return err;
1015        }
1016        fq = &dpaa_fq->fq_base;
1017
1018        if (dpaa_fq->init) {
1019                memset(&initfq, 0, sizeof(initfq));
1020
1021                initfq.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL);
1022                /* Note: we may get to keep an empty FQ in cache */
1023                initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_PREFERINCACHE);
1024
1025                /* Try to reduce the number of portal interrupts for
1026                 * Tx Confirmation FQs.
1027                 */
1028                if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
1029                        initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_AVOIDBLOCK);
1030
1031                /* FQ placement */
1032                initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_DESTWQ);
1033
1034                qm_fqd_set_destwq(&initfq.fqd, dpaa_fq->channel, dpaa_fq->wq);
1035
1036                /* Put all egress queues in a congestion group of their own.
1037                 * Sensu stricto, the Tx confirmation queues are Rx FQs,
1038                 * rather than Tx - but they nonetheless account for the
1039                 * memory footprint on behalf of egress traffic. We therefore
1040                 * place them in the netdev's CGR, along with the Tx FQs.
1041                 */
1042                if (dpaa_fq->fq_type == FQ_TYPE_TX ||
1043                    dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
1044                    dpaa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
1045                        initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
1046                        initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
1047                        initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid;
1048                        /* Set a fixed overhead accounting, in an attempt to
1049                         * reduce the impact of fixed-size skb shells and the
1050                         * driver's needed headroom on system memory. This is
1051                         * especially the case when the egress traffic is
1052                         * composed of small datagrams.
1053                         * Unfortunately, QMan's OAL value is capped to an
1054                         * insufficient value, but even that is better than
1055                         * no overhead accounting at all.
1056                         */
1057                        initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
1058                        qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
1059                        qm_fqd_set_oal(&initfq.fqd,
1060                                       min(sizeof(struct sk_buff) +
1061                                       priv->tx_headroom,
1062                                       (size_t)FSL_QMAN_MAX_OAL));
1063                }
1064
1065                if (td_enable) {
1066                        initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_TDTHRESH);
1067                        qm_fqd_set_taildrop(&initfq.fqd, DPAA_FQ_TD, 1);
1068                        initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_TDE);
1069                }
1070
1071                if (dpaa_fq->fq_type == FQ_TYPE_TX) {
1072                        queue_id = dpaa_tx_fq_to_id(priv, &dpaa_fq->fq_base);
1073                        if (queue_id >= 0)
1074                                confq = priv->conf_fqs[queue_id];
1075                        if (confq) {
1076                                initfq.we_mask |=
1077                                        cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
1078                        /* ContextA: OVOM=1(use contextA2 bits instead of ICAD)
1079                         *           A2V=1 (contextA A2 field is valid)
1080                         *           A0V=1 (contextA A0 field is valid)
1081                         *           B0V=1 (contextB field is valid)
1082                         * ContextA A2: EBD=1 (deallocate buffers inside FMan)
1083                         * ContextB B0(ASPID): 0 (absolute Virtual Storage ID)
1084                         */
1085                                qm_fqd_context_a_set64(&initfq.fqd,
1086                                                       0x1e00000080000000ULL);
1087                        }
1088                }
1089
1090                /* Put all the ingress queues in our "ingress CGR". */
1091                if (priv->use_ingress_cgr &&
1092                    (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
1093                     dpaa_fq->fq_type == FQ_TYPE_RX_ERROR ||
1094                     dpaa_fq->fq_type == FQ_TYPE_RX_PCD)) {
1095                        initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
1096                        initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
1097                        initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid;
1098                        /* Set a fixed overhead accounting, just like for the
1099                         * egress CGR.
1100                         */
1101                        initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
1102                        qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
1103                        qm_fqd_set_oal(&initfq.fqd,
1104                                       min(sizeof(struct sk_buff) +
1105                                       priv->tx_headroom,
1106                                       (size_t)FSL_QMAN_MAX_OAL));
1107                }
1108
1109                /* Initialization common to all ingress queues */
1110                if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
1111                        initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
1112                        initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE |
1113                                                QM_FQCTRL_CTXASTASHING);
1114                        initfq.fqd.context_a.stashing.exclusive =
1115                                QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
1116                                QM_STASHING_EXCL_ANNOTATION;
1117                        qm_fqd_set_stashing(&initfq.fqd, 1, 2,
1118                                            DIV_ROUND_UP(sizeof(struct qman_fq),
1119                                                         64));
1120                }
1121
1122                err = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
1123                if (err < 0) {
1124                        dev_err(dev, "qman_init_fq(%u) = %d\n",
1125                                qman_fq_fqid(fq), err);
1126                        qman_destroy_fq(fq);
1127                        return err;
1128                }
1129        }
1130
1131        dpaa_fq->fqid = qman_fq_fqid(fq);
1132
1133        if (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
1134            dpaa_fq->fq_type == FQ_TYPE_RX_PCD) {
1135                err = xdp_rxq_info_reg(&dpaa_fq->xdp_rxq, dpaa_fq->net_dev,
1136                                       dpaa_fq->fqid, 0);
1137                if (err) {
1138                        dev_err(dev, "xdp_rxq_info_reg() = %d\n", err);
1139                        return err;
1140                }
1141
1142                err = xdp_rxq_info_reg_mem_model(&dpaa_fq->xdp_rxq,
1143                                                 MEM_TYPE_PAGE_ORDER0, NULL);
1144                if (err) {
1145                        dev_err(dev, "xdp_rxq_info_reg_mem_model() = %d\n",
1146                                err);
1147                        xdp_rxq_info_unreg(&dpaa_fq->xdp_rxq);
1148                        return err;
1149                }
1150        }
1151
1152        return 0;
1153}
1154
1155static int dpaa_fq_free_entry(struct device *dev, struct qman_fq *fq)
1156{
1157        const struct dpaa_priv  *priv;
1158        struct dpaa_fq *dpaa_fq;
1159        int err, error;
1160
1161        err = 0;
1162
1163        dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
1164        priv = netdev_priv(dpaa_fq->net_dev);
1165
1166        if (dpaa_fq->init) {
1167                err = qman_retire_fq(fq, NULL);
1168                if (err < 0 && netif_msg_drv(priv))
1169                        dev_err(dev, "qman_retire_fq(%u) = %d\n",
1170                                qman_fq_fqid(fq), err);
1171
1172                error = qman_oos_fq(fq);
1173                if (error < 0 && netif_msg_drv(priv)) {
1174                        dev_err(dev, "qman_oos_fq(%u) = %d\n",
1175                                qman_fq_fqid(fq), error);
1176                        if (err >= 0)
1177                                err = error;
1178                }
1179        }
1180
1181        if ((dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
1182             dpaa_fq->fq_type == FQ_TYPE_RX_PCD) &&
1183            xdp_rxq_info_is_reg(&dpaa_fq->xdp_rxq))
1184                xdp_rxq_info_unreg(&dpaa_fq->xdp_rxq);
1185
1186        qman_destroy_fq(fq);
1187        list_del(&dpaa_fq->list);
1188
1189        return err;
1190}
1191
1192static int dpaa_fq_free(struct device *dev, struct list_head *list)
1193{
1194        struct dpaa_fq *dpaa_fq, *tmp;
1195        int err, error;
1196
1197        err = 0;
1198        list_for_each_entry_safe(dpaa_fq, tmp, list, list) {
1199                error = dpaa_fq_free_entry(dev, (struct qman_fq *)dpaa_fq);
1200                if (error < 0 && err >= 0)
1201                        err = error;
1202        }
1203
1204        return err;
1205}
1206
1207static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
1208                                 struct dpaa_fq *defq,
1209                                 struct dpaa_buffer_layout *buf_layout)
1210{
1211        struct fman_buffer_prefix_content buf_prefix_content;
1212        struct fman_port_params params;
1213        int err;
1214
1215        memset(&params, 0, sizeof(params));
1216        memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
1217
1218        buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
1219        buf_prefix_content.pass_prs_result = true;
1220        buf_prefix_content.pass_hash_result = true;
1221        buf_prefix_content.pass_time_stamp = true;
1222        buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
1223
1224        params.specific_params.non_rx_params.err_fqid = errq->fqid;
1225        params.specific_params.non_rx_params.dflt_fqid = defq->fqid;
1226
1227        err = fman_port_config(port, &params);
1228        if (err) {
1229                pr_err("%s: fman_port_config failed\n", __func__);
1230                return err;
1231        }
1232
1233        err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
1234        if (err) {
1235                pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
1236                       __func__);
1237                return err;
1238        }
1239
1240        err = fman_port_init(port);
1241        if (err)
1242                pr_err("%s: fm_port_init failed\n", __func__);
1243
1244        return err;
1245}
1246
1247static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp *bp,
1248                                 struct dpaa_fq *errq,
1249                                 struct dpaa_fq *defq, struct dpaa_fq *pcdq,
1250                                 struct dpaa_buffer_layout *buf_layout)
1251{
1252        struct fman_buffer_prefix_content buf_prefix_content;
1253        struct fman_port_rx_params *rx_p;
1254        struct fman_port_params params;
1255        int err;
1256
1257        memset(&params, 0, sizeof(params));
1258        memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
1259
1260        buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
1261        buf_prefix_content.pass_prs_result = true;
1262        buf_prefix_content.pass_hash_result = true;
1263        buf_prefix_content.pass_time_stamp = true;
1264        buf_prefix_content.data_align = DPAA_FD_RX_DATA_ALIGNMENT;
1265
1266        rx_p = &params.specific_params.rx_params;
1267        rx_p->err_fqid = errq->fqid;
1268        rx_p->dflt_fqid = defq->fqid;
1269        if (pcdq) {
1270                rx_p->pcd_base_fqid = pcdq->fqid;
1271                rx_p->pcd_fqs_count = DPAA_ETH_PCD_RXQ_NUM;
1272        }
1273
1274        rx_p->ext_buf_pools.num_of_pools_used = 1;
1275        rx_p->ext_buf_pools.ext_buf_pool[0].id =  bp->bpid;
1276        rx_p->ext_buf_pools.ext_buf_pool[0].size = (u16)bp->size;
1277
1278        err = fman_port_config(port, &params);
1279        if (err) {
1280                pr_err("%s: fman_port_config failed\n", __func__);
1281                return err;
1282        }
1283
1284        err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
1285        if (err) {
1286                pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
1287                       __func__);
1288                return err;
1289        }
1290
1291        err = fman_port_init(port);
1292        if (err)
1293                pr_err("%s: fm_port_init failed\n", __func__);
1294
1295        return err;
1296}
1297
1298static int dpaa_eth_init_ports(struct mac_device *mac_dev,
1299                               struct dpaa_bp *bp,
1300                               struct fm_port_fqs *port_fqs,
1301                               struct dpaa_buffer_layout *buf_layout,
1302                               struct device *dev)
1303{
1304        struct fman_port *rxport = mac_dev->port[RX];
1305        struct fman_port *txport = mac_dev->port[TX];
1306        int err;
1307
1308        err = dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
1309                                    port_fqs->tx_defq, &buf_layout[TX]);
1310        if (err)
1311                return err;
1312
1313        err = dpaa_eth_init_rx_port(rxport, bp, port_fqs->rx_errq,
1314                                    port_fqs->rx_defq, port_fqs->rx_pcdq,
1315                                    &buf_layout[RX]);
1316
1317        return err;
1318}
1319
1320static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp,
1321                             struct bm_buffer *bmb, int cnt)
1322{
1323        int err;
1324
1325        err = bman_release(dpaa_bp->pool, bmb, cnt);
1326        /* Should never occur, address anyway to avoid leaking the buffers */
1327        if (WARN_ON(err) && dpaa_bp->free_buf_cb)
1328                while (cnt-- > 0)
1329                        dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]);
1330
1331        return cnt;
1332}
1333
1334static void dpaa_release_sgt_members(struct qm_sg_entry *sgt)
1335{
1336        struct bm_buffer bmb[DPAA_BUFF_RELEASE_MAX];
1337        struct dpaa_bp *dpaa_bp;
1338        int i = 0, j;
1339
1340        memset(bmb, 0, sizeof(bmb));
1341
1342        do {
1343                dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1344                if (!dpaa_bp)
1345                        return;
1346
1347                j = 0;
1348                do {
1349                        WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
1350
1351                        bm_buffer_set64(&bmb[j], qm_sg_entry_get64(&sgt[i]));
1352
1353                        j++; i++;
1354                } while (j < ARRAY_SIZE(bmb) &&
1355                                !qm_sg_entry_is_final(&sgt[i - 1]) &&
1356                                sgt[i - 1].bpid == sgt[i].bpid);
1357
1358                dpaa_bman_release(dpaa_bp, bmb, j);
1359        } while (!qm_sg_entry_is_final(&sgt[i - 1]));
1360}
1361
1362static void dpaa_fd_release(const struct net_device *net_dev,
1363                            const struct qm_fd *fd)
1364{
1365        struct qm_sg_entry *sgt;
1366        struct dpaa_bp *dpaa_bp;
1367        struct bm_buffer bmb;
1368        dma_addr_t addr;
1369        void *vaddr;
1370
1371        bmb.data = 0;
1372        bm_buffer_set64(&bmb, qm_fd_addr(fd));
1373
1374        dpaa_bp = dpaa_bpid2pool(fd->bpid);
1375        if (!dpaa_bp)
1376                return;
1377
1378        if (qm_fd_get_format(fd) == qm_fd_sg) {
1379                vaddr = phys_to_virt(qm_fd_addr(fd));
1380                sgt = vaddr + qm_fd_get_offset(fd);
1381
1382                dma_unmap_page(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd),
1383                               DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
1384
1385                dpaa_release_sgt_members(sgt);
1386
1387                addr = dma_map_page(dpaa_bp->priv->rx_dma_dev,
1388                                    virt_to_page(vaddr), 0, DPAA_BP_RAW_SIZE,
1389                                    DMA_FROM_DEVICE);
1390                if (dma_mapping_error(dpaa_bp->priv->rx_dma_dev, addr)) {
1391                        netdev_err(net_dev, "DMA mapping failed\n");
1392                        return;
1393                }
1394                bm_buffer_set64(&bmb, addr);
1395        }
1396
1397        dpaa_bman_release(dpaa_bp, &bmb, 1);
1398}
1399
1400static void count_ern(struct dpaa_percpu_priv *percpu_priv,
1401                      const union qm_mr_entry *msg)
1402{
1403        switch (msg->ern.rc & QM_MR_RC_MASK) {
1404        case QM_MR_RC_CGR_TAILDROP:
1405                percpu_priv->ern_cnt.cg_tdrop++;
1406                break;
1407        case QM_MR_RC_WRED:
1408                percpu_priv->ern_cnt.wred++;
1409                break;
1410        case QM_MR_RC_ERROR:
1411                percpu_priv->ern_cnt.err_cond++;
1412                break;
1413        case QM_MR_RC_ORPWINDOW_EARLY:
1414                percpu_priv->ern_cnt.early_window++;
1415                break;
1416        case QM_MR_RC_ORPWINDOW_LATE:
1417                percpu_priv->ern_cnt.late_window++;
1418                break;
1419        case QM_MR_RC_FQ_TAILDROP:
1420                percpu_priv->ern_cnt.fq_tdrop++;
1421                break;
1422        case QM_MR_RC_ORPWINDOW_RETIRED:
1423                percpu_priv->ern_cnt.fq_retired++;
1424                break;
1425        case QM_MR_RC_ORP_ZERO:
1426                percpu_priv->ern_cnt.orp_zero++;
1427                break;
1428        }
1429}
1430
1431/* Turn on HW checksum computation for this outgoing frame.
1432 * If the current protocol is not something we support in this regard
1433 * (or if the stack has already computed the SW checksum), we do nothing.
1434 *
1435 * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
1436 * otherwise.
1437 *
1438 * Note that this function may modify the fd->cmd field and the skb data buffer
1439 * (the Parse Results area).
1440 */
1441static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
1442                               struct sk_buff *skb,
1443                               struct qm_fd *fd,
1444                               void *parse_results)
1445{
1446        struct fman_prs_result *parse_result;
1447        u16 ethertype = ntohs(skb->protocol);
1448        struct ipv6hdr *ipv6h = NULL;
1449        struct iphdr *iph;
1450        int retval = 0;
1451        u8 l4_proto;
1452
1453        if (skb->ip_summed != CHECKSUM_PARTIAL)
1454                return 0;
1455
1456        /* Note: L3 csum seems to be already computed in sw, but we can't choose
1457         * L4 alone from the FM configuration anyway.
1458         */
1459
1460        /* Fill in some fields of the Parse Results array, so the FMan
1461         * can find them as if they came from the FMan Parser.
1462         */
1463        parse_result = (struct fman_prs_result *)parse_results;
1464
1465        /* If we're dealing with VLAN, get the real Ethernet type */
1466        if (ethertype == ETH_P_8021Q) {
1467                /* We can't always assume the MAC header is set correctly
1468                 * by the stack, so reset to beginning of skb->data
1469                 */
1470                skb_reset_mac_header(skb);
1471                ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
1472        }
1473
1474        /* Fill in the relevant L3 parse result fields
1475         * and read the L4 protocol type
1476         */
1477        switch (ethertype) {
1478        case ETH_P_IP:
1479                parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4);
1480                iph = ip_hdr(skb);
1481                WARN_ON(!iph);
1482                l4_proto = iph->protocol;
1483                break;
1484        case ETH_P_IPV6:
1485                parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6);
1486                ipv6h = ipv6_hdr(skb);
1487                WARN_ON(!ipv6h);
1488                l4_proto = ipv6h->nexthdr;
1489                break;
1490        default:
1491                /* We shouldn't even be here */
1492                if (net_ratelimit())
1493                        netif_alert(priv, tx_err, priv->net_dev,
1494                                    "Can't compute HW csum for L3 proto 0x%x\n",
1495                                    ntohs(skb->protocol));
1496                retval = -EIO;
1497                goto return_error;
1498        }
1499
1500        /* Fill in the relevant L4 parse result fields */
1501        switch (l4_proto) {
1502        case IPPROTO_UDP:
1503                parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
1504                break;
1505        case IPPROTO_TCP:
1506                parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
1507                break;
1508        default:
1509                if (net_ratelimit())
1510                        netif_alert(priv, tx_err, priv->net_dev,
1511                                    "Can't compute HW csum for L4 proto 0x%x\n",
1512                                    l4_proto);
1513                retval = -EIO;
1514                goto return_error;
1515        }
1516
1517        /* At index 0 is IPOffset_1 as defined in the Parse Results */
1518        parse_result->ip_off[0] = (u8)skb_network_offset(skb);
1519        parse_result->l4_off = (u8)skb_transport_offset(skb);
1520
1521        /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
1522        fd->cmd |= cpu_to_be32(FM_FD_CMD_RPD | FM_FD_CMD_DTC);
1523
1524        /* On P1023 and similar platforms fd->cmd interpretation could
1525         * be disabled by setting CONTEXT_A bit ICMD; currently this bit
1526         * is not set so we do not need to check; in the future, if/when
1527         * using context_a we need to check this bit
1528         */
1529
1530return_error:
1531        return retval;
1532}
1533
1534static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
1535{
1536        struct net_device *net_dev = dpaa_bp->priv->net_dev;
1537        struct bm_buffer bmb[8];
1538        dma_addr_t addr;
1539        struct page *p;
1540        u8 i;
1541
1542        for (i = 0; i < 8; i++) {
1543                p = dev_alloc_pages(0);
1544                if (unlikely(!p)) {
1545                        netdev_err(net_dev, "dev_alloc_pages() failed\n");
1546                        goto release_previous_buffs;
1547                }
1548
1549                addr = dma_map_page(dpaa_bp->priv->rx_dma_dev, p, 0,
1550                                    DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
1551                if (unlikely(dma_mapping_error(dpaa_bp->priv->rx_dma_dev,
1552                                               addr))) {
1553                        netdev_err(net_dev, "DMA map failed\n");
1554                        goto release_previous_buffs;
1555                }
1556
1557                bmb[i].data = 0;
1558                bm_buffer_set64(&bmb[i], addr);
1559        }
1560
1561release_bufs:
1562        return dpaa_bman_release(dpaa_bp, bmb, i);
1563
1564release_previous_buffs:
1565        WARN_ONCE(1, "dpaa_eth: failed to add buffers on Rx\n");
1566
1567        bm_buffer_set64(&bmb[i], 0);
1568        /* Avoid releasing a completely null buffer; bman_release() requires
1569         * at least one buffer.
1570         */
1571        if (likely(i))
1572                goto release_bufs;
1573
1574        return 0;
1575}
1576
1577static int dpaa_bp_seed(struct dpaa_bp *dpaa_bp)
1578{
1579        int i;
1580
1581        /* Give each CPU an allotment of "config_count" buffers */
1582        for_each_possible_cpu(i) {
1583                int *count_ptr = per_cpu_ptr(dpaa_bp->percpu_count, i);
1584                int j;
1585
1586                /* Although we access another CPU's counters here
1587                 * we do it at boot time so it is safe
1588                 */
1589                for (j = 0; j < dpaa_bp->config_count; j += 8)
1590                        *count_ptr += dpaa_bp_add_8_bufs(dpaa_bp);
1591        }
1592        return 0;
1593}
1594
1595/* Add buffers/(pages) for Rx processing whenever bpool count falls below
1596 * REFILL_THRESHOLD.
1597 */
1598static int dpaa_eth_refill_bpool(struct dpaa_bp *dpaa_bp, int *countptr)
1599{
1600        int count = *countptr;
1601        int new_bufs;
1602
1603        if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) {
1604                do {
1605                        new_bufs = dpaa_bp_add_8_bufs(dpaa_bp);
1606                        if (unlikely(!new_bufs)) {
1607                                /* Avoid looping forever if we've temporarily
1608                                 * run out of memory. We'll try again at the
1609                                 * next NAPI cycle.
1610                                 */
1611                                break;
1612                        }
1613                        count += new_bufs;
1614                } while (count < FSL_DPAA_ETH_MAX_BUF_COUNT);
1615
1616                *countptr = count;
1617                if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT))
1618                        return -ENOMEM;
1619        }
1620
1621        return 0;
1622}
1623
1624static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
1625{
1626        struct dpaa_bp *dpaa_bp;
1627        int *countptr;
1628
1629        dpaa_bp = priv->dpaa_bp;
1630        if (!dpaa_bp)
1631                return -EINVAL;
1632        countptr = this_cpu_ptr(dpaa_bp->percpu_count);
1633
1634        return dpaa_eth_refill_bpool(dpaa_bp, countptr);
1635}
1636
1637/* Cleanup function for outgoing frame descriptors that were built on Tx path,
1638 * either contiguous frames or scatter/gather ones.
1639 * Skb freeing is not handled here.
1640 *
1641 * This function may be called on error paths in the Tx function, so guard
1642 * against cases when not all fd relevant fields were filled in. To avoid
1643 * reading the invalid transmission timestamp for the error paths set ts to
1644 * false.
1645 *
1646 * Return the skb backpointer, since for S/G frames the buffer containing it
1647 * gets freed here.
1648 *
1649 * No skb backpointer is set when transmitting XDP frames. Cleanup the buffer
1650 * and return NULL in this case.
1651 */
1652static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
1653                                          const struct qm_fd *fd, bool ts)
1654{
1655        const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
1656        struct device *dev = priv->net_dev->dev.parent;
1657        struct skb_shared_hwtstamps shhwtstamps;
1658        dma_addr_t addr = qm_fd_addr(fd);
1659        void *vaddr = phys_to_virt(addr);
1660        const struct qm_sg_entry *sgt;
1661        struct dpaa_eth_swbp *swbp;
1662        struct sk_buff *skb;
1663        u64 ns;
1664        int i;
1665
1666        if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
1667                dma_unmap_page(priv->tx_dma_dev, addr,
1668                               qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
1669                               dma_dir);
1670
1671                /* The sgt buffer has been allocated with netdev_alloc_frag(),
1672                 * it's from lowmem.
1673                 */
1674                sgt = vaddr + qm_fd_get_offset(fd);
1675
1676                /* sgt[0] is from lowmem, was dma_map_single()-ed */
1677                dma_unmap_single(priv->tx_dma_dev, qm_sg_addr(&sgt[0]),
1678                                 qm_sg_entry_get_len(&sgt[0]), dma_dir);
1679
1680                /* remaining pages were mapped with skb_frag_dma_map() */
1681                for (i = 1; (i < DPAA_SGT_MAX_ENTRIES) &&
1682                     !qm_sg_entry_is_final(&sgt[i - 1]); i++) {
1683                        WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
1684
1685                        dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[i]),
1686                                       qm_sg_entry_get_len(&sgt[i]), dma_dir);
1687                }
1688        } else {
1689                dma_unmap_single(priv->tx_dma_dev, addr,
1690                                 qm_fd_get_offset(fd) + qm_fd_get_length(fd),
1691                                 dma_dir);
1692        }
1693
1694        swbp = (struct dpaa_eth_swbp *)vaddr;
1695        skb = swbp->skb;
1696
1697        /* No skb backpointer is set when running XDP. An xdp_frame
1698         * backpointer is saved instead.
1699         */
1700        if (!skb) {
1701                xdp_return_frame(swbp->xdpf);
1702                return NULL;
1703        }
1704
1705        /* DMA unmapping is required before accessing the HW provided info */
1706        if (ts && priv->tx_tstamp &&
1707            skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1708                memset(&shhwtstamps, 0, sizeof(shhwtstamps));
1709
1710                if (!fman_port_get_tstamp(priv->mac_dev->port[TX], vaddr,
1711                                          &ns)) {
1712                        shhwtstamps.hwtstamp = ns_to_ktime(ns);
1713                        skb_tstamp_tx(skb, &shhwtstamps);
1714                } else {
1715                        dev_warn(dev, "fman_port_get_tstamp failed!\n");
1716                }
1717        }
1718
1719        if (qm_fd_get_format(fd) == qm_fd_sg)
1720                /* Free the page that we allocated on Tx for the SGT */
1721                free_pages((unsigned long)vaddr, 0);
1722
1723        return skb;
1724}
1725
1726static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd)
1727{
1728        /* The parser has run and performed L4 checksum validation.
1729         * We know there were no parser errors (and implicitly no
1730         * L4 csum error), otherwise we wouldn't be here.
1731         */
1732        if ((priv->net_dev->features & NETIF_F_RXCSUM) &&
1733            (be32_to_cpu(fd->status) & FM_FD_STAT_L4CV))
1734                return CHECKSUM_UNNECESSARY;
1735
1736        /* We're here because either the parser didn't run or the L4 checksum
1737         * was not verified. This may include the case of a UDP frame with
1738         * checksum zero or an L4 proto other than TCP/UDP
1739         */
1740        return CHECKSUM_NONE;
1741}
1742
1743#define PTR_IS_ALIGNED(x, a) (IS_ALIGNED((unsigned long)(x), (a)))
1744
1745/* Build a linear skb around the received buffer.
1746 * We are guaranteed there is enough room at the end of the data buffer to
1747 * accommodate the shared info area of the skb.
1748 */
1749static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
1750                                        const struct qm_fd *fd)
1751{
1752        ssize_t fd_off = qm_fd_get_offset(fd);
1753        dma_addr_t addr = qm_fd_addr(fd);
1754        struct dpaa_bp *dpaa_bp;
1755        struct sk_buff *skb;
1756        void *vaddr;
1757
1758        vaddr = phys_to_virt(addr);
1759        WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
1760
1761        dpaa_bp = dpaa_bpid2pool(fd->bpid);
1762        if (!dpaa_bp)
1763                goto free_buffer;
1764
1765        skb = build_skb(vaddr, dpaa_bp->size +
1766                        SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1767        if (WARN_ONCE(!skb, "Build skb failure on Rx\n"))
1768                goto free_buffer;
1769        skb_reserve(skb, fd_off);
1770        skb_put(skb, qm_fd_get_length(fd));
1771
1772        skb->ip_summed = rx_csum_offload(priv, fd);
1773
1774        return skb;
1775
1776free_buffer:
1777        free_pages((unsigned long)vaddr, 0);
1778        return NULL;
1779}
1780
1781/* Build an skb with the data of the first S/G entry in the linear portion and
1782 * the rest of the frame as skb fragments.
1783 *
1784 * The page fragment holding the S/G Table is recycled here.
1785 */
1786static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
1787                                    const struct qm_fd *fd)
1788{
1789        ssize_t fd_off = qm_fd_get_offset(fd);
1790        dma_addr_t addr = qm_fd_addr(fd);
1791        const struct qm_sg_entry *sgt;
1792        struct page *page, *head_page;
1793        struct dpaa_bp *dpaa_bp;
1794        void *vaddr, *sg_vaddr;
1795        int frag_off, frag_len;
1796        struct sk_buff *skb;
1797        dma_addr_t sg_addr;
1798        int page_offset;
1799        unsigned int sz;
1800        int *count_ptr;
1801        int i, j;
1802
1803        vaddr = phys_to_virt(addr);
1804        WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
1805
1806        /* Iterate through the SGT entries and add data buffers to the skb */
1807        sgt = vaddr + fd_off;
1808        skb = NULL;
1809        for (i = 0; i < DPAA_SGT_MAX_ENTRIES; i++) {
1810                /* Extension bit is not supported */
1811                WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
1812
1813                sg_addr = qm_sg_addr(&sgt[i]);
1814                sg_vaddr = phys_to_virt(sg_addr);
1815                WARN_ON(!PTR_IS_ALIGNED(sg_vaddr, SMP_CACHE_BYTES));
1816
1817                dma_unmap_page(priv->rx_dma_dev, sg_addr,
1818                               DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
1819
1820                /* We may use multiple Rx pools */
1821                dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1822                if (!dpaa_bp)
1823                        goto free_buffers;
1824
1825                if (!skb) {
1826                        sz = dpaa_bp->size +
1827                                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1828                        skb = build_skb(sg_vaddr, sz);
1829                        if (WARN_ON(!skb))
1830                                goto free_buffers;
1831
1832                        skb->ip_summed = rx_csum_offload(priv, fd);
1833
1834                        /* Make sure forwarded skbs will have enough space
1835                         * on Tx, if extra headers are added.
1836                         */
1837                        WARN_ON(fd_off != priv->rx_headroom);
1838                        skb_reserve(skb, fd_off);
1839                        skb_put(skb, qm_sg_entry_get_len(&sgt[i]));
1840                } else {
1841                        /* Not the first S/G entry; all data from buffer will
1842                         * be added in an skb fragment; fragment index is offset
1843                         * by one since first S/G entry was incorporated in the
1844                         * linear part of the skb.
1845                         *
1846                         * Caution: 'page' may be a tail page.
1847                         */
1848                        page = virt_to_page(sg_vaddr);
1849                        head_page = virt_to_head_page(sg_vaddr);
1850
1851                        /* Compute offset in (possibly tail) page */
1852                        page_offset = ((unsigned long)sg_vaddr &
1853                                        (PAGE_SIZE - 1)) +
1854                                (page_address(page) - page_address(head_page));
1855                        /* page_offset only refers to the beginning of sgt[i];
1856                         * but the buffer itself may have an internal offset.
1857                         */
1858                        frag_off = qm_sg_entry_get_off(&sgt[i]) + page_offset;
1859                        frag_len = qm_sg_entry_get_len(&sgt[i]);
1860                        /* skb_add_rx_frag() does no checking on the page; if
1861                         * we pass it a tail page, we'll end up with
1862                         * bad page accounting and eventually with segafults.
1863                         */
1864                        skb_add_rx_frag(skb, i - 1, head_page, frag_off,
1865                                        frag_len, dpaa_bp->size);
1866                }
1867
1868                /* Update the pool count for the current {cpu x bpool} */
1869                count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
1870                (*count_ptr)--;
1871
1872                if (qm_sg_entry_is_final(&sgt[i]))
1873                        break;
1874        }
1875        WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
1876
1877        /* free the SG table buffer */
1878        free_pages((unsigned long)vaddr, 0);
1879
1880        return skb;
1881
1882free_buffers:
1883        /* free all the SG entries */
1884        for (j = 0; j < DPAA_SGT_MAX_ENTRIES ; j++) {
1885                sg_addr = qm_sg_addr(&sgt[j]);
1886                sg_vaddr = phys_to_virt(sg_addr);
1887                /* all pages 0..i were unmaped */
1888                if (j > i)
1889                        dma_unmap_page(priv->rx_dma_dev, qm_sg_addr(&sgt[j]),
1890                                       DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
1891                free_pages((unsigned long)sg_vaddr, 0);
1892                /* counters 0..i-1 were decremented */
1893                if (j >= i) {
1894                        dpaa_bp = dpaa_bpid2pool(sgt[j].bpid);
1895                        if (dpaa_bp) {
1896                                count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
1897                                (*count_ptr)--;
1898                        }
1899                }
1900
1901                if (qm_sg_entry_is_final(&sgt[j]))
1902                        break;
1903        }
1904        /* free the SGT fragment */
1905        free_pages((unsigned long)vaddr, 0);
1906
1907        return NULL;
1908}
1909
1910static int skb_to_contig_fd(struct dpaa_priv *priv,
1911                            struct sk_buff *skb, struct qm_fd *fd,
1912                            int *offset)
1913{
1914        struct net_device *net_dev = priv->net_dev;
1915        enum dma_data_direction dma_dir;
1916        struct dpaa_eth_swbp *swbp;
1917        unsigned char *buff_start;
1918        dma_addr_t addr;
1919        int err;
1920
1921        /* We are guaranteed to have at least tx_headroom bytes
1922         * available, so just use that for offset.
1923         */
1924        fd->bpid = FSL_DPAA_BPID_INV;
1925        buff_start = skb->data - priv->tx_headroom;
1926        dma_dir = DMA_TO_DEVICE;
1927
1928        swbp = (struct dpaa_eth_swbp *)buff_start;
1929        swbp->skb = skb;
1930
1931        /* Enable L3/L4 hardware checksum computation.
1932         *
1933         * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
1934         * need to write into the skb.
1935         */
1936        err = dpaa_enable_tx_csum(priv, skb, fd,
1937                                  buff_start + DPAA_TX_PRIV_DATA_SIZE);
1938        if (unlikely(err < 0)) {
1939                if (net_ratelimit())
1940                        netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
1941                                  err);
1942                return err;
1943        }
1944
1945        /* Fill in the rest of the FD fields */
1946        qm_fd_set_contig(fd, priv->tx_headroom, skb->len);
1947        fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
1948
1949        /* Map the entire buffer size that may be seen by FMan, but no more */
1950        addr = dma_map_single(priv->tx_dma_dev, buff_start,
1951                              priv->tx_headroom + skb->len, dma_dir);
1952        if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
1953                if (net_ratelimit())
1954                        netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
1955                return -EINVAL;
1956        }
1957        qm_fd_addr_set64(fd, addr);
1958
1959        return 0;
1960}
1961
1962static int skb_to_sg_fd(struct dpaa_priv *priv,
1963                        struct sk_buff *skb, struct qm_fd *fd)
1964{
1965        const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
1966        const int nr_frags = skb_shinfo(skb)->nr_frags;
1967        struct net_device *net_dev = priv->net_dev;
1968        struct dpaa_eth_swbp *swbp;
1969        struct qm_sg_entry *sgt;
1970        void *buff_start;
1971        skb_frag_t *frag;
1972        dma_addr_t addr;
1973        size_t frag_len;
1974        struct page *p;
1975        int i, j, err;
1976
1977        /* get a page to store the SGTable */
1978        p = dev_alloc_pages(0);
1979        if (unlikely(!p)) {
1980                netdev_err(net_dev, "dev_alloc_pages() failed\n");
1981                return -ENOMEM;
1982        }
1983        buff_start = page_address(p);
1984
1985        /* Enable L3/L4 hardware checksum computation.
1986         *
1987         * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
1988         * need to write into the skb.
1989         */
1990        err = dpaa_enable_tx_csum(priv, skb, fd,
1991                                  buff_start + DPAA_TX_PRIV_DATA_SIZE);
1992        if (unlikely(err < 0)) {
1993                if (net_ratelimit())
1994                        netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
1995                                  err);
1996                goto csum_failed;
1997        }
1998
1999        /* SGT[0] is used by the linear part */
2000        sgt = (struct qm_sg_entry *)(buff_start + priv->tx_headroom);
2001        frag_len = skb_headlen(skb);
2002        qm_sg_entry_set_len(&sgt[0], frag_len);
2003        sgt[0].bpid = FSL_DPAA_BPID_INV;
2004        sgt[0].offset = 0;
2005        addr = dma_map_single(priv->tx_dma_dev, skb->data,
2006                              skb_headlen(skb), dma_dir);
2007        if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
2008                netdev_err(priv->net_dev, "DMA mapping failed\n");
2009                err = -EINVAL;
2010                goto sg0_map_failed;
2011        }
2012        qm_sg_entry_set64(&sgt[0], addr);
2013
2014        /* populate the rest of SGT entries */
2015        for (i = 0; i < nr_frags; i++) {
2016                frag = &skb_shinfo(skb)->frags[i];
2017                frag_len = skb_frag_size(frag);
2018                WARN_ON(!skb_frag_page(frag));
2019                addr = skb_frag_dma_map(priv->tx_dma_dev, frag, 0,
2020                                        frag_len, dma_dir);
2021                if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
2022                        netdev_err(priv->net_dev, "DMA mapping failed\n");
2023                        err = -EINVAL;
2024                        goto sg_map_failed;
2025                }
2026
2027                qm_sg_entry_set_len(&sgt[i + 1], frag_len);
2028                sgt[i + 1].bpid = FSL_DPAA_BPID_INV;
2029                sgt[i + 1].offset = 0;
2030
2031                /* keep the offset in the address */
2032                qm_sg_entry_set64(&sgt[i + 1], addr);
2033        }
2034
2035        /* Set the final bit in the last used entry of the SGT */
2036        qm_sg_entry_set_f(&sgt[nr_frags], frag_len);
2037
2038        /* set fd offset to priv->tx_headroom */
2039        qm_fd_set_sg(fd, priv->tx_headroom, skb->len);
2040
2041        /* DMA map the SGT page */
2042        swbp = (struct dpaa_eth_swbp *)buff_start;
2043        swbp->skb = skb;
2044
2045        addr = dma_map_page(priv->tx_dma_dev, p, 0,
2046                            priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
2047        if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
2048                netdev_err(priv->net_dev, "DMA mapping failed\n");
2049                err = -EINVAL;
2050                goto sgt_map_failed;
2051        }
2052
2053        fd->bpid = FSL_DPAA_BPID_INV;
2054        fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
2055        qm_fd_addr_set64(fd, addr);
2056
2057        return 0;
2058
2059sgt_map_failed:
2060sg_map_failed:
2061        for (j = 0; j < i; j++)
2062                dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[j]),
2063                               qm_sg_entry_get_len(&sgt[j]), dma_dir);
2064sg0_map_failed:
2065csum_failed:
2066        free_pages((unsigned long)buff_start, 0);
2067
2068        return err;
2069}
2070
2071static inline int dpaa_xmit(struct dpaa_priv *priv,
2072                            struct rtnl_link_stats64 *percpu_stats,
2073                            int queue,
2074                            struct qm_fd *fd)
2075{
2076        struct qman_fq *egress_fq;
2077        int err, i;
2078
2079        egress_fq = priv->egress_fqs[queue];
2080        if (fd->bpid == FSL_DPAA_BPID_INV)
2081                fd->cmd |= cpu_to_be32(qman_fq_fqid(priv->conf_fqs[queue]));
2082
2083        /* Trace this Tx fd */
2084        trace_dpaa_tx_fd(priv->net_dev, egress_fq, fd);
2085
2086        for (i = 0; i < DPAA_ENQUEUE_RETRIES; i++) {
2087                err = qman_enqueue(egress_fq, fd);
2088                if (err != -EBUSY)
2089                        break;
2090        }
2091
2092        if (unlikely(err < 0)) {
2093                percpu_stats->tx_fifo_errors++;
2094                return err;
2095        }
2096
2097        percpu_stats->tx_packets++;
2098        percpu_stats->tx_bytes += qm_fd_get_length(fd);
2099
2100        return 0;
2101}
2102
2103#ifdef CONFIG_DPAA_ERRATUM_A050385
2104static int dpaa_a050385_wa_skb(struct net_device *net_dev, struct sk_buff **s)
2105{
2106        struct dpaa_priv *priv = netdev_priv(net_dev);
2107        struct sk_buff *new_skb, *skb = *s;
2108        unsigned char *start, i;
2109
2110        /* check linear buffer alignment */
2111        if (!PTR_IS_ALIGNED(skb->data, DPAA_A050385_ALIGN))
2112                goto workaround;
2113
2114        /* linear buffers just need to have an aligned start */
2115        if (!skb_is_nonlinear(skb))
2116                return 0;
2117
2118        /* linear data size for nonlinear skbs needs to be aligned */
2119        if (!IS_ALIGNED(skb_headlen(skb), DPAA_A050385_ALIGN))
2120                goto workaround;
2121
2122        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2123                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2124
2125                /* all fragments need to have aligned start addresses */
2126                if (!IS_ALIGNED(skb_frag_off(frag), DPAA_A050385_ALIGN))
2127                        goto workaround;
2128
2129                /* all but last fragment need to have aligned sizes */
2130                if (!IS_ALIGNED(skb_frag_size(frag), DPAA_A050385_ALIGN) &&
2131                    (i < skb_shinfo(skb)->nr_frags - 1))
2132                        goto workaround;
2133        }
2134
2135        return 0;
2136
2137workaround:
2138        /* copy all the skb content into a new linear buffer */
2139        new_skb = netdev_alloc_skb(net_dev, skb->len + DPAA_A050385_ALIGN - 1 +
2140                                                priv->tx_headroom);
2141        if (!new_skb)
2142                return -ENOMEM;
2143
2144        /* NET_SKB_PAD bytes already reserved, adding up to tx_headroom */
2145        skb_reserve(new_skb, priv->tx_headroom - NET_SKB_PAD);
2146
2147        /* Workaround for DPAA_A050385 requires data start to be aligned */
2148        start = PTR_ALIGN(new_skb->data, DPAA_A050385_ALIGN);
2149        if (start - new_skb->data)
2150                skb_reserve(new_skb, start - new_skb->data);
2151
2152        skb_put(new_skb, skb->len);
2153        skb_copy_bits(skb, 0, new_skb->data, skb->len);
2154        skb_copy_header(new_skb, skb);
2155        new_skb->dev = skb->dev;
2156
2157        /* Copy relevant timestamp info from the old skb to the new */
2158        if (priv->tx_tstamp) {
2159                skb_shinfo(new_skb)->tx_flags = skb_shinfo(skb)->tx_flags;
2160                skb_shinfo(new_skb)->hwtstamps = skb_shinfo(skb)->hwtstamps;
2161                skb_shinfo(new_skb)->tskey = skb_shinfo(skb)->tskey;
2162                if (skb->sk)
2163                        skb_set_owner_w(new_skb, skb->sk);
2164        }
2165
2166        /* We move the headroom when we align it so we have to reset the
2167         * network and transport header offsets relative to the new data
2168         * pointer. The checksum offload relies on these offsets.
2169         */
2170        skb_set_network_header(new_skb, skb_network_offset(skb));
2171        skb_set_transport_header(new_skb, skb_transport_offset(skb));
2172
2173        dev_kfree_skb(skb);
2174        *s = new_skb;
2175
2176        return 0;
2177}
2178
2179static int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv,
2180                                struct xdp_frame **init_xdpf)
2181{
2182        struct xdp_frame *new_xdpf, *xdpf = *init_xdpf;
2183        void *new_buff, *aligned_data;
2184        struct page *p;
2185        u32 data_shift;
2186        int headroom;
2187
2188        /* Check the data alignment and make sure the headroom is large
2189         * enough to store the xdpf backpointer. Use an aligned headroom
2190         * value.
2191         *
2192         * Due to alignment constraints, we give XDP access to the full 256
2193         * byte frame headroom. If the XDP program uses all of it, copy the
2194         * data to a new buffer and make room for storing the backpointer.
2195         */
2196        if (PTR_IS_ALIGNED(xdpf->data, DPAA_FD_DATA_ALIGNMENT) &&
2197            xdpf->headroom >= priv->tx_headroom) {
2198                xdpf->headroom = priv->tx_headroom;
2199                return 0;
2200        }
2201
2202        /* Try to move the data inside the buffer just enough to align it and
2203         * store the xdpf backpointer. If the available headroom isn't large
2204         * enough, resort to allocating a new buffer and copying the data.
2205         */
2206        aligned_data = PTR_ALIGN_DOWN(xdpf->data, DPAA_FD_DATA_ALIGNMENT);
2207        data_shift = xdpf->data - aligned_data;
2208
2209        /* The XDP frame's headroom needs to be large enough to accommodate
2210         * shifting the data as well as storing the xdpf backpointer.
2211         */
2212        if (xdpf->headroom  >= data_shift + priv->tx_headroom) {
2213                memmove(aligned_data, xdpf->data, xdpf->len);
2214                xdpf->data = aligned_data;
2215                xdpf->headroom = priv->tx_headroom;
2216                return 0;
2217        }
2218
2219        /* The new xdp_frame is stored in the new buffer. Reserve enough space
2220         * in the headroom for storing it along with the driver's private
2221         * info. The headroom needs to be aligned to DPAA_FD_DATA_ALIGNMENT to
2222         * guarantee the data's alignment in the buffer.
2223         */
2224        headroom = ALIGN(sizeof(*new_xdpf) + priv->tx_headroom,
2225                         DPAA_FD_DATA_ALIGNMENT);
2226
2227        /* Assure the extended headroom and data don't overflow the buffer,
2228         * while maintaining the mandatory tailroom.
2229         */
2230        if (headroom + xdpf->len > DPAA_BP_RAW_SIZE -
2231                        SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
2232                return -ENOMEM;
2233
2234        p = dev_alloc_pages(0);
2235        if (unlikely(!p))
2236                return -ENOMEM;
2237
2238        /* Copy the data to the new buffer at a properly aligned offset */
2239        new_buff = page_address(p);
2240        memcpy(new_buff + headroom, xdpf->data, xdpf->len);
2241
2242        /* Create an XDP frame around the new buffer in a similar fashion
2243         * to xdp_convert_buff_to_frame.
2244         */
2245        new_xdpf = new_buff;
2246        new_xdpf->data = new_buff + headroom;
2247        new_xdpf->len = xdpf->len;
2248        new_xdpf->headroom = priv->tx_headroom;
2249        new_xdpf->frame_sz = DPAA_BP_RAW_SIZE;
2250        new_xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
2251
2252        /* Release the initial buffer */
2253        xdp_return_frame_rx_napi(xdpf);
2254
2255        *init_xdpf = new_xdpf;
2256        return 0;
2257}
2258#endif
2259
2260static netdev_tx_t
2261dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
2262{
2263        const int queue_mapping = skb_get_queue_mapping(skb);
2264        bool nonlinear = skb_is_nonlinear(skb);
2265        struct rtnl_link_stats64 *percpu_stats;
2266        struct dpaa_percpu_priv *percpu_priv;
2267        struct netdev_queue *txq;
2268        struct dpaa_priv *priv;
2269        struct qm_fd fd;
2270        int offset = 0;
2271        int err = 0;
2272
2273        priv = netdev_priv(net_dev);
2274        percpu_priv = this_cpu_ptr(priv->percpu_priv);
2275        percpu_stats = &percpu_priv->stats;
2276
2277        qm_fd_clear_fd(&fd);
2278
2279        if (!nonlinear) {
2280                /* We're going to store the skb backpointer at the beginning
2281                 * of the data buffer, so we need a privately owned skb
2282                 *
2283                 * We've made sure skb is not shared in dev->priv_flags,
2284                 * we need to verify the skb head is not cloned
2285                 */
2286                if (skb_cow_head(skb, priv->tx_headroom))
2287                        goto enomem;
2288
2289                WARN_ON(skb_is_nonlinear(skb));
2290        }
2291
2292        /* MAX_SKB_FRAGS is equal or larger than our dpaa_SGT_MAX_ENTRIES;
2293         * make sure we don't feed FMan with more fragments than it supports.
2294         */
2295        if (unlikely(nonlinear &&
2296                     (skb_shinfo(skb)->nr_frags >= DPAA_SGT_MAX_ENTRIES))) {
2297                /* If the egress skb contains more fragments than we support
2298                 * we have no choice but to linearize it ourselves.
2299                 */
2300                if (__skb_linearize(skb))
2301                        goto enomem;
2302
2303                nonlinear = skb_is_nonlinear(skb);
2304        }
2305
2306#ifdef CONFIG_DPAA_ERRATUM_A050385
2307        if (unlikely(fman_has_errata_a050385())) {
2308                if (dpaa_a050385_wa_skb(net_dev, &skb))
2309                        goto enomem;
2310                nonlinear = skb_is_nonlinear(skb);
2311        }
2312#endif
2313
2314        if (nonlinear) {
2315                /* Just create a S/G fd based on the skb */
2316                err = skb_to_sg_fd(priv, skb, &fd);
2317                percpu_priv->tx_frag_skbuffs++;
2318        } else {
2319                /* Create a contig FD from this skb */
2320                err = skb_to_contig_fd(priv, skb, &fd, &offset);
2321        }
2322        if (unlikely(err < 0))
2323                goto skb_to_fd_failed;
2324
2325        txq = netdev_get_tx_queue(net_dev, queue_mapping);
2326
2327        /* LLTX requires to do our own update of trans_start */
2328        txq->trans_start = jiffies;
2329
2330        if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
2331                fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
2332                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2333        }
2334
2335        if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
2336                return NETDEV_TX_OK;
2337
2338        dpaa_cleanup_tx_fd(priv, &fd, false);
2339skb_to_fd_failed:
2340enomem:
2341        percpu_stats->tx_errors++;
2342        dev_kfree_skb(skb);
2343        return NETDEV_TX_OK;
2344}
2345
2346static void dpaa_rx_error(struct net_device *net_dev,
2347                          const struct dpaa_priv *priv,
2348                          struct dpaa_percpu_priv *percpu_priv,
2349                          const struct qm_fd *fd,
2350                          u32 fqid)
2351{
2352        if (net_ratelimit())
2353                netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n",
2354                          be32_to_cpu(fd->status) & FM_FD_STAT_RX_ERRORS);
2355
2356        percpu_priv->stats.rx_errors++;
2357
2358        if (be32_to_cpu(fd->status) & FM_FD_ERR_DMA)
2359                percpu_priv->rx_errors.dme++;
2360        if (be32_to_cpu(fd->status) & FM_FD_ERR_PHYSICAL)
2361                percpu_priv->rx_errors.fpe++;
2362        if (be32_to_cpu(fd->status) & FM_FD_ERR_SIZE)
2363                percpu_priv->rx_errors.fse++;
2364        if (be32_to_cpu(fd->status) & FM_FD_ERR_PRS_HDR_ERR)
2365                percpu_priv->rx_errors.phe++;
2366
2367        dpaa_fd_release(net_dev, fd);
2368}
2369
2370static void dpaa_tx_error(struct net_device *net_dev,
2371                          const struct dpaa_priv *priv,
2372                          struct dpaa_percpu_priv *percpu_priv,
2373                          const struct qm_fd *fd,
2374                          u32 fqid)
2375{
2376        struct sk_buff *skb;
2377
2378        if (net_ratelimit())
2379                netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2380                           be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS);
2381
2382        percpu_priv->stats.tx_errors++;
2383
2384        skb = dpaa_cleanup_tx_fd(priv, fd, false);
2385        dev_kfree_skb(skb);
2386}
2387
2388static int dpaa_eth_poll(struct napi_struct *napi, int budget)
2389{
2390        struct dpaa_napi_portal *np =
2391                        container_of(napi, struct dpaa_napi_portal, napi);
2392        int cleaned;
2393
2394        np->xdp_act = 0;
2395
2396        cleaned = qman_p_poll_dqrr(np->p, budget);
2397
2398        if (cleaned < budget) {
2399                napi_complete_done(napi, cleaned);
2400                qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
2401        } else if (np->down) {
2402                qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
2403        }
2404
2405        if (np->xdp_act & XDP_REDIRECT)
2406                xdp_do_flush();
2407
2408        return cleaned;
2409}
2410
2411static void dpaa_tx_conf(struct net_device *net_dev,
2412                         const struct dpaa_priv *priv,
2413                         struct dpaa_percpu_priv *percpu_priv,
2414                         const struct qm_fd *fd,
2415                         u32 fqid)
2416{
2417        struct sk_buff  *skb;
2418
2419        if (unlikely(be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS)) {
2420                if (net_ratelimit())
2421                        netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2422                                   be32_to_cpu(fd->status) &
2423                                   FM_FD_STAT_TX_ERRORS);
2424
2425                percpu_priv->stats.tx_errors++;
2426        }
2427
2428        percpu_priv->tx_confirm++;
2429
2430        skb = dpaa_cleanup_tx_fd(priv, fd, true);
2431
2432        consume_skb(skb);
2433}
2434
2435static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv,
2436                                         struct qman_portal *portal, bool sched_napi)
2437{
2438        if (sched_napi) {
2439                /* Disable QMan IRQ and invoke NAPI */
2440                qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);
2441
2442                percpu_priv->np.p = portal;
2443                napi_schedule(&percpu_priv->np.napi);
2444                percpu_priv->in_interrupt++;
2445                return 1;
2446        }
2447        return 0;
2448}
2449
2450static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
2451                                              struct qman_fq *fq,
2452                                              const struct qm_dqrr_entry *dq,
2453                                              bool sched_napi)
2454{
2455        struct dpaa_fq *dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
2456        struct dpaa_percpu_priv *percpu_priv;
2457        struct net_device *net_dev;
2458        struct dpaa_bp *dpaa_bp;
2459        struct dpaa_priv *priv;
2460
2461        net_dev = dpaa_fq->net_dev;
2462        priv = netdev_priv(net_dev);
2463        dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
2464        if (!dpaa_bp)
2465                return qman_cb_dqrr_consume;
2466
2467        percpu_priv = this_cpu_ptr(priv->percpu_priv);
2468
2469        if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi))
2470                return qman_cb_dqrr_stop;
2471
2472        dpaa_eth_refill_bpools(priv);
2473        dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2474
2475        return qman_cb_dqrr_consume;
2476}
2477
2478static int dpaa_xdp_xmit_frame(struct net_device *net_dev,
2479                               struct xdp_frame *xdpf)
2480{
2481        struct dpaa_priv *priv = netdev_priv(net_dev);
2482        struct rtnl_link_stats64 *percpu_stats;
2483        struct dpaa_percpu_priv *percpu_priv;
2484        struct dpaa_eth_swbp *swbp;
2485        struct netdev_queue *txq;
2486        void *buff_start;
2487        struct qm_fd fd;
2488        dma_addr_t addr;
2489        int err;
2490
2491        percpu_priv = this_cpu_ptr(priv->percpu_priv);
2492        percpu_stats = &percpu_priv->stats;
2493
2494#ifdef CONFIG_DPAA_ERRATUM_A050385
2495        if (unlikely(fman_has_errata_a050385())) {
2496                if (dpaa_a050385_wa_xdpf(priv, &xdpf)) {
2497                        err = -ENOMEM;
2498                        goto out_error;
2499                }
2500        }
2501#endif
2502
2503        if (xdpf->headroom < DPAA_TX_PRIV_DATA_SIZE) {
2504                err = -EINVAL;
2505                goto out_error;
2506        }
2507
2508        buff_start = xdpf->data - xdpf->headroom;
2509
2510        /* Leave empty the skb backpointer at the start of the buffer.
2511         * Save the XDP frame for easy cleanup on confirmation.
2512         */
2513        swbp = (struct dpaa_eth_swbp *)buff_start;
2514        swbp->skb = NULL;
2515        swbp->xdpf = xdpf;
2516
2517        qm_fd_clear_fd(&fd);
2518        fd.bpid = FSL_DPAA_BPID_INV;
2519        fd.cmd |= cpu_to_be32(FM_FD_CMD_FCO);
2520        qm_fd_set_contig(&fd, xdpf->headroom, xdpf->len);
2521
2522        addr = dma_map_single(priv->tx_dma_dev, buff_start,
2523                              xdpf->headroom + xdpf->len,
2524                              DMA_TO_DEVICE);
2525        if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
2526                err = -EINVAL;
2527                goto out_error;
2528        }
2529
2530        qm_fd_addr_set64(&fd, addr);
2531
2532        /* Bump the trans_start */
2533        txq = netdev_get_tx_queue(net_dev, smp_processor_id());
2534        txq->trans_start = jiffies;
2535
2536        err = dpaa_xmit(priv, percpu_stats, smp_processor_id(), &fd);
2537        if (err) {
2538                dma_unmap_single(priv->tx_dma_dev, addr,
2539                                 qm_fd_get_offset(&fd) + qm_fd_get_length(&fd),
2540                                 DMA_TO_DEVICE);
2541                goto out_error;
2542        }
2543
2544        return 0;
2545
2546out_error:
2547        percpu_stats->tx_errors++;
2548        return err;
2549}
2550
2551static u32 dpaa_run_xdp(struct dpaa_priv *priv, struct qm_fd *fd, void *vaddr,
2552                        struct dpaa_fq *dpaa_fq, unsigned int *xdp_meta_len)
2553{
2554        ssize_t fd_off = qm_fd_get_offset(fd);
2555        struct bpf_prog *xdp_prog;
2556        struct xdp_frame *xdpf;
2557        struct xdp_buff xdp;
2558        u32 xdp_act;
2559        int err;
2560
2561        xdp_prog = READ_ONCE(priv->xdp_prog);
2562        if (!xdp_prog)
2563                return XDP_PASS;
2564
2565        xdp_init_buff(&xdp, DPAA_BP_RAW_SIZE - DPAA_TX_PRIV_DATA_SIZE,
2566                      &dpaa_fq->xdp_rxq);
2567        xdp_prepare_buff(&xdp, vaddr + fd_off - XDP_PACKET_HEADROOM,
2568                         XDP_PACKET_HEADROOM, qm_fd_get_length(fd), true);
2569
2570        /* We reserve a fixed headroom of 256 bytes under the erratum and we
2571         * offer it all to XDP programs to use. If no room is left for the
2572         * xdpf backpointer on TX, we will need to copy the data.
2573         * Disable metadata support since data realignments might be required
2574         * and the information can be lost.
2575         */
2576#ifdef CONFIG_DPAA_ERRATUM_A050385
2577        if (unlikely(fman_has_errata_a050385())) {
2578                xdp_set_data_meta_invalid(&xdp);
2579                xdp.data_hard_start = vaddr;
2580                xdp.frame_sz = DPAA_BP_RAW_SIZE;
2581        }
2582#endif
2583
2584        xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
2585
2586        /* Update the length and the offset of the FD */
2587        qm_fd_set_contig(fd, xdp.data - vaddr, xdp.data_end - xdp.data);
2588
2589        switch (xdp_act) {
2590        case XDP_PASS:
2591#ifdef CONFIG_DPAA_ERRATUM_A050385
2592                *xdp_meta_len = xdp_data_meta_unsupported(&xdp) ? 0 :
2593                                xdp.data - xdp.data_meta;
2594#else
2595                *xdp_meta_len = xdp.data - xdp.data_meta;
2596#endif
2597                break;
2598        case XDP_TX:
2599                /* We can access the full headroom when sending the frame
2600                 * back out
2601                 */
2602                xdp.data_hard_start = vaddr;
2603                xdp.frame_sz = DPAA_BP_RAW_SIZE;
2604                xdpf = xdp_convert_buff_to_frame(&xdp);
2605                if (unlikely(!xdpf)) {
2606                        free_pages((unsigned long)vaddr, 0);
2607                        break;
2608                }
2609
2610                if (dpaa_xdp_xmit_frame(priv->net_dev, xdpf))
2611                        xdp_return_frame_rx_napi(xdpf);
2612
2613                break;
2614        case XDP_REDIRECT:
2615                /* Allow redirect to use the full headroom */
2616                xdp.data_hard_start = vaddr;
2617                xdp.frame_sz = DPAA_BP_RAW_SIZE;
2618
2619                err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
2620                if (err) {
2621                        trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
2622                        free_pages((unsigned long)vaddr, 0);
2623                }
2624                break;
2625        default:
2626                bpf_warn_invalid_xdp_action(xdp_act);
2627                fallthrough;
2628        case XDP_ABORTED:
2629                trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
2630                fallthrough;
2631        case XDP_DROP:
2632                /* Free the buffer */
2633                free_pages((unsigned long)vaddr, 0);
2634                break;
2635        }
2636
2637        return xdp_act;
2638}
2639
2640static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
2641                                                struct qman_fq *fq,
2642                                                const struct qm_dqrr_entry *dq,
2643                                                bool sched_napi)
2644{
2645        bool ts_valid = false, hash_valid = false;
2646        struct skb_shared_hwtstamps *shhwtstamps;
2647        unsigned int skb_len, xdp_meta_len = 0;
2648        struct rtnl_link_stats64 *percpu_stats;
2649        struct dpaa_percpu_priv *percpu_priv;
2650        const struct qm_fd *fd = &dq->fd;
2651        dma_addr_t addr = qm_fd_addr(fd);
2652        struct dpaa_napi_portal *np;
2653        enum qm_fd_format fd_format;
2654        struct net_device *net_dev;
2655        u32 fd_status, hash_offset;
2656        struct qm_sg_entry *sgt;
2657        struct dpaa_bp *dpaa_bp;
2658        struct dpaa_fq *dpaa_fq;
2659        struct dpaa_priv *priv;
2660        struct sk_buff *skb;
2661        int *count_ptr;
2662        u32 xdp_act;
2663        void *vaddr;
2664        u32 hash;
2665        u64 ns;
2666
2667        dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
2668        fd_status = be32_to_cpu(fd->status);
2669        fd_format = qm_fd_get_format(fd);
2670        net_dev = dpaa_fq->net_dev;
2671        priv = netdev_priv(net_dev);
2672        dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
2673        if (!dpaa_bp)
2674                return qman_cb_dqrr_consume;
2675
2676        /* Trace the Rx fd */
2677        trace_dpaa_rx_fd(net_dev, fq, &dq->fd);
2678
2679        percpu_priv = this_cpu_ptr(priv->percpu_priv);
2680        percpu_stats = &percpu_priv->stats;
2681        np = &percpu_priv->np;
2682
2683        if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi)))
2684                return qman_cb_dqrr_stop;
2685
2686        /* Make sure we didn't run out of buffers */
2687        if (unlikely(dpaa_eth_refill_bpools(priv))) {
2688                /* Unable to refill the buffer pool due to insufficient
2689                 * system memory. Just release the frame back into the pool,
2690                 * otherwise we'll soon end up with an empty buffer pool.
2691                 */
2692                dpaa_fd_release(net_dev, &dq->fd);
2693                return qman_cb_dqrr_consume;
2694        }
2695
2696        if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
2697                if (net_ratelimit())
2698                        netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2699                                   fd_status & FM_FD_STAT_RX_ERRORS);
2700
2701                percpu_stats->rx_errors++;
2702                dpaa_fd_release(net_dev, fd);
2703                return qman_cb_dqrr_consume;
2704        }
2705
2706        dma_unmap_page(dpaa_bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
2707                       DMA_FROM_DEVICE);
2708
2709        /* prefetch the first 64 bytes of the frame or the SGT start */
2710        vaddr = phys_to_virt(addr);
2711        prefetch(vaddr + qm_fd_get_offset(fd));
2712
2713        /* The only FD types that we may receive are contig and S/G */
2714        WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg));
2715
2716        /* Account for either the contig buffer or the SGT buffer (depending on
2717         * which case we were in) having been removed from the pool.
2718         */
2719        count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
2720        (*count_ptr)--;
2721
2722        /* Extract the timestamp stored in the headroom before running XDP */
2723        if (priv->rx_tstamp) {
2724                if (!fman_port_get_tstamp(priv->mac_dev->port[RX], vaddr, &ns))
2725                        ts_valid = true;
2726                else
2727                        WARN_ONCE(1, "fman_port_get_tstamp failed!\n");
2728        }
2729
2730        /* Extract the hash stored in the headroom before running XDP */
2731        if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use &&
2732            !fman_port_get_hash_result_offset(priv->mac_dev->port[RX],
2733                                              &hash_offset)) {
2734                hash = be32_to_cpu(*(u32 *)(vaddr + hash_offset));
2735                hash_valid = true;
2736        }
2737
2738        if (likely(fd_format == qm_fd_contig)) {
2739                xdp_act = dpaa_run_xdp(priv, (struct qm_fd *)fd, vaddr,
2740                                       dpaa_fq, &xdp_meta_len);
2741                np->xdp_act |= xdp_act;
2742                if (xdp_act != XDP_PASS) {
2743                        percpu_stats->rx_packets++;
2744                        percpu_stats->rx_bytes += qm_fd_get_length(fd);
2745                        return qman_cb_dqrr_consume;
2746                }
2747                skb = contig_fd_to_skb(priv, fd);
2748        } else {
2749                /* XDP doesn't support S/G frames. Return the fragments to the
2750                 * buffer pool and release the SGT.
2751                 */
2752                if (READ_ONCE(priv->xdp_prog)) {
2753                        WARN_ONCE(1, "S/G frames not supported under XDP\n");
2754                        sgt = vaddr + qm_fd_get_offset(fd);
2755                        dpaa_release_sgt_members(sgt);
2756                        free_pages((unsigned long)vaddr, 0);
2757                        return qman_cb_dqrr_consume;
2758                }
2759                skb = sg_fd_to_skb(priv, fd);
2760        }
2761        if (!skb)
2762                return qman_cb_dqrr_consume;
2763
2764        if (xdp_meta_len)
2765                skb_metadata_set(skb, xdp_meta_len);
2766
2767        /* Set the previously extracted timestamp */
2768        if (ts_valid) {
2769                shhwtstamps = skb_hwtstamps(skb);
2770                memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2771                shhwtstamps->hwtstamp = ns_to_ktime(ns);
2772        }
2773
2774        skb->protocol = eth_type_trans(skb, net_dev);
2775
2776        /* Set the previously extracted hash */
2777        if (hash_valid) {
2778                enum pkt_hash_types type;
2779
2780                /* if L4 exists, it was used in the hash generation */
2781                type = be32_to_cpu(fd->status) & FM_FD_STAT_L4CV ?
2782                        PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
2783                skb_set_hash(skb, hash, type);
2784        }
2785
2786        skb_len = skb->len;
2787
2788        if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) {
2789                percpu_stats->rx_dropped++;
2790                return qman_cb_dqrr_consume;
2791        }
2792
2793        percpu_stats->rx_packets++;
2794        percpu_stats->rx_bytes += skb_len;
2795
2796        return qman_cb_dqrr_consume;
2797}
2798
2799static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal,
2800                                                struct qman_fq *fq,
2801                                                const struct qm_dqrr_entry *dq,
2802                                                bool sched_napi)
2803{
2804        struct dpaa_percpu_priv *percpu_priv;
2805        struct net_device *net_dev;
2806        struct dpaa_priv *priv;
2807
2808        net_dev = ((struct dpaa_fq *)fq)->net_dev;
2809        priv = netdev_priv(net_dev);
2810
2811        percpu_priv = this_cpu_ptr(priv->percpu_priv);
2812
2813        if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi))
2814                return qman_cb_dqrr_stop;
2815
2816        dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2817
2818        return qman_cb_dqrr_consume;
2819}
2820
2821static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal,
2822                                               struct qman_fq *fq,
2823                                               const struct qm_dqrr_entry *dq,
2824                                               bool sched_napi)
2825{
2826        struct dpaa_percpu_priv *percpu_priv;
2827        struct net_device *net_dev;
2828        struct dpaa_priv *priv;
2829
2830        net_dev = ((struct dpaa_fq *)fq)->net_dev;
2831        priv = netdev_priv(net_dev);
2832
2833        /* Trace the fd */
2834        trace_dpaa_tx_conf_fd(net_dev, fq, &dq->fd);
2835
2836        percpu_priv = this_cpu_ptr(priv->percpu_priv);
2837
2838        if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi))
2839                return qman_cb_dqrr_stop;
2840
2841        dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2842
2843        return qman_cb_dqrr_consume;
2844}
2845
2846static void egress_ern(struct qman_portal *portal,
2847                       struct qman_fq *fq,
2848                       const union qm_mr_entry *msg)
2849{
2850        const struct qm_fd *fd = &msg->ern.fd;
2851        struct dpaa_percpu_priv *percpu_priv;
2852        const struct dpaa_priv *priv;
2853        struct net_device *net_dev;
2854        struct sk_buff *skb;
2855
2856        net_dev = ((struct dpaa_fq *)fq)->net_dev;
2857        priv = netdev_priv(net_dev);
2858        percpu_priv = this_cpu_ptr(priv->percpu_priv);
2859
2860        percpu_priv->stats.tx_dropped++;
2861        percpu_priv->stats.tx_fifo_errors++;
2862        count_ern(percpu_priv, msg);
2863
2864        skb = dpaa_cleanup_tx_fd(priv, fd, false);
2865        dev_kfree_skb_any(skb);
2866}
2867
2868static const struct dpaa_fq_cbs dpaa_fq_cbs = {
2869        .rx_defq = { .cb = { .dqrr = rx_default_dqrr } },
2870        .tx_defq = { .cb = { .dqrr = conf_dflt_dqrr } },
2871        .rx_errq = { .cb = { .dqrr = rx_error_dqrr } },
2872        .tx_errq = { .cb = { .dqrr = conf_error_dqrr } },
2873        .egress_ern = { .cb = { .ern = egress_ern } }
2874};
2875
2876static void dpaa_eth_napi_enable(struct dpaa_priv *priv)
2877{
2878        struct dpaa_percpu_priv *percpu_priv;
2879        int i;
2880
2881        for_each_online_cpu(i) {
2882                percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
2883
2884                percpu_priv->np.down = false;
2885                napi_enable(&percpu_priv->np.napi);
2886        }
2887}
2888
2889static void dpaa_eth_napi_disable(struct dpaa_priv *priv)
2890{
2891        struct dpaa_percpu_priv *percpu_priv;
2892        int i;
2893
2894        for_each_online_cpu(i) {
2895                percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
2896
2897                percpu_priv->np.down = true;
2898                napi_disable(&percpu_priv->np.napi);
2899        }
2900}
2901
2902static void dpaa_adjust_link(struct net_device *net_dev)
2903{
2904        struct mac_device *mac_dev;
2905        struct dpaa_priv *priv;
2906
2907        priv = netdev_priv(net_dev);
2908        mac_dev = priv->mac_dev;
2909        mac_dev->adjust_link(mac_dev);
2910}
2911
2912/* The Aquantia PHYs are capable of performing rate adaptation */
2913#define PHY_VEND_AQUANTIA       0x03a1b400
2914
2915static int dpaa_phy_init(struct net_device *net_dev)
2916{
2917        __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
2918        struct mac_device *mac_dev;
2919        struct phy_device *phy_dev;
2920        struct dpaa_priv *priv;
2921
2922        priv = netdev_priv(net_dev);
2923        mac_dev = priv->mac_dev;
2924
2925        phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
2926                                 &dpaa_adjust_link, 0,
2927                                 mac_dev->phy_if);
2928        if (!phy_dev) {
2929                netif_err(priv, ifup, net_dev, "init_phy() failed\n");
2930                return -ENODEV;
2931        }
2932
2933        /* Unless the PHY is capable of rate adaptation */
2934        if (mac_dev->phy_if != PHY_INTERFACE_MODE_XGMII ||
2935            ((phy_dev->drv->phy_id & GENMASK(31, 10)) != PHY_VEND_AQUANTIA)) {
2936                /* remove any features not supported by the controller */
2937                ethtool_convert_legacy_u32_to_link_mode(mask,
2938                                                        mac_dev->if_support);
2939                linkmode_and(phy_dev->supported, phy_dev->supported, mask);
2940        }
2941
2942        phy_support_asym_pause(phy_dev);
2943
2944        mac_dev->phy_dev = phy_dev;
2945        net_dev->phydev = phy_dev;
2946
2947        return 0;
2948}
2949
2950static int dpaa_open(struct net_device *net_dev)
2951{
2952        struct mac_device *mac_dev;
2953        struct dpaa_priv *priv;
2954        int err, i;
2955
2956        priv = netdev_priv(net_dev);
2957        mac_dev = priv->mac_dev;
2958        dpaa_eth_napi_enable(priv);
2959
2960        err = dpaa_phy_init(net_dev);
2961        if (err)
2962                goto phy_init_failed;
2963
2964        for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
2965                err = fman_port_enable(mac_dev->port[i]);
2966                if (err)
2967                        goto mac_start_failed;
2968        }
2969
2970        err = priv->mac_dev->start(mac_dev);
2971        if (err < 0) {
2972                netif_err(priv, ifup, net_dev, "mac_dev->start() = %d\n", err);
2973                goto mac_start_failed;
2974        }
2975
2976        netif_tx_start_all_queues(net_dev);
2977
2978        return 0;
2979
2980mac_start_failed:
2981        for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++)
2982                fman_port_disable(mac_dev->port[i]);
2983
2984phy_init_failed:
2985        dpaa_eth_napi_disable(priv);
2986
2987        return err;
2988}
2989
2990static int dpaa_eth_stop(struct net_device *net_dev)
2991{
2992        struct dpaa_priv *priv;
2993        int err;
2994
2995        err = dpaa_stop(net_dev);
2996
2997        priv = netdev_priv(net_dev);
2998        dpaa_eth_napi_disable(priv);
2999
3000        return err;
3001}
3002
3003static bool xdp_validate_mtu(struct dpaa_priv *priv, int mtu)
3004{
3005        int max_contig_data = priv->dpaa_bp->size - priv->rx_headroom;
3006
3007        /* We do not support S/G fragments when XDP is enabled.
3008         * Limit the MTU in relation to the buffer size.
3009         */
3010        if (mtu + VLAN_ETH_HLEN + ETH_FCS_LEN > max_contig_data) {
3011                dev_warn(priv->net_dev->dev.parent,
3012                         "The maximum MTU for XDP is %d\n",
3013                         max_contig_data - VLAN_ETH_HLEN - ETH_FCS_LEN);
3014                return false;
3015        }
3016
3017        return true;
3018}
3019
3020static int dpaa_change_mtu(struct net_device *net_dev, int new_mtu)
3021{
3022        struct dpaa_priv *priv = netdev_priv(net_dev);
3023
3024        if (priv->xdp_prog && !xdp_validate_mtu(priv, new_mtu))
3025                return -EINVAL;
3026
3027        net_dev->mtu = new_mtu;
3028        return 0;
3029}
3030
3031static int dpaa_setup_xdp(struct net_device *net_dev, struct netdev_bpf *bpf)
3032{
3033        struct dpaa_priv *priv = netdev_priv(net_dev);
3034        struct bpf_prog *old_prog;
3035        int err;
3036        bool up;
3037
3038        /* S/G fragments are not supported in XDP-mode */
3039        if (bpf->prog && !xdp_validate_mtu(priv, net_dev->mtu)) {
3040                NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP");
3041                return -EINVAL;
3042        }
3043
3044        up = netif_running(net_dev);
3045
3046        if (up)
3047                dpaa_eth_stop(net_dev);
3048
3049        old_prog = xchg(&priv->xdp_prog, bpf->prog);
3050        if (old_prog)
3051                bpf_prog_put(old_prog);
3052
3053        if (up) {
3054                err = dpaa_open(net_dev);
3055                if (err) {
3056                        NL_SET_ERR_MSG_MOD(bpf->extack, "dpaa_open() failed");
3057                        return err;
3058                }
3059        }
3060
3061        return 0;
3062}
3063
3064static int dpaa_xdp(struct net_device *net_dev, struct netdev_bpf *xdp)
3065{
3066        switch (xdp->command) {
3067        case XDP_SETUP_PROG:
3068                return dpaa_setup_xdp(net_dev, xdp);
3069        default:
3070                return -EINVAL;
3071        }
3072}
3073
3074static int dpaa_xdp_xmit(struct net_device *net_dev, int n,
3075                         struct xdp_frame **frames, u32 flags)
3076{
3077        struct xdp_frame *xdpf;
3078        int i, nxmit = 0;
3079
3080        if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
3081                return -EINVAL;
3082
3083        if (!netif_running(net_dev))
3084                return -ENETDOWN;
3085
3086        for (i = 0; i < n; i++) {
3087                xdpf = frames[i];
3088                if (dpaa_xdp_xmit_frame(net_dev, xdpf))
3089                        break;
3090                nxmit++;
3091        }
3092
3093        return nxmit;
3094}
3095
3096static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3097{
3098        struct dpaa_priv *priv = netdev_priv(dev);
3099        struct hwtstamp_config config;
3100
3101        if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
3102                return -EFAULT;
3103
3104        switch (config.tx_type) {
3105        case HWTSTAMP_TX_OFF:
3106                /* Couldn't disable rx/tx timestamping separately.
3107                 * Do nothing here.
3108                 */
3109                priv->tx_tstamp = false;
3110                break;
3111        case HWTSTAMP_TX_ON:
3112                priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
3113                priv->tx_tstamp = true;
3114                break;
3115        default:
3116                return -ERANGE;
3117        }
3118
3119        if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
3120                /* Couldn't disable rx/tx timestamping separately.
3121                 * Do nothing here.
3122                 */
3123                priv->rx_tstamp = false;
3124        } else {
3125                priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
3126                priv->rx_tstamp = true;
3127                /* TS is set for all frame types, not only those requested */
3128                config.rx_filter = HWTSTAMP_FILTER_ALL;
3129        }
3130
3131        return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
3132                        -EFAULT : 0;
3133}
3134
3135static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
3136{
3137        int ret = -EINVAL;
3138
3139        if (cmd == SIOCGMIIREG) {
3140                if (net_dev->phydev)
3141                        return phy_mii_ioctl(net_dev->phydev, rq, cmd);
3142        }
3143
3144        if (cmd == SIOCSHWTSTAMP)
3145                return dpaa_ts_ioctl(net_dev, rq, cmd);
3146
3147        return ret;
3148}
3149
3150static const struct net_device_ops dpaa_ops = {
3151        .ndo_open = dpaa_open,
3152        .ndo_start_xmit = dpaa_start_xmit,
3153        .ndo_stop = dpaa_eth_stop,
3154        .ndo_tx_timeout = dpaa_tx_timeout,
3155        .ndo_get_stats64 = dpaa_get_stats64,
3156        .ndo_change_carrier = fixed_phy_change_carrier,
3157        .ndo_set_mac_address = dpaa_set_mac_address,
3158        .ndo_validate_addr = eth_validate_addr,
3159        .ndo_set_rx_mode = dpaa_set_rx_mode,
3160        .ndo_eth_ioctl = dpaa_ioctl,
3161        .ndo_setup_tc = dpaa_setup_tc,
3162        .ndo_change_mtu = dpaa_change_mtu,
3163        .ndo_bpf = dpaa_xdp,
3164        .ndo_xdp_xmit = dpaa_xdp_xmit,
3165};
3166
3167static int dpaa_napi_add(struct net_device *net_dev)
3168{
3169        struct dpaa_priv *priv = netdev_priv(net_dev);
3170        struct dpaa_percpu_priv *percpu_priv;
3171        int cpu;
3172
3173        for_each_possible_cpu(cpu) {
3174                percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
3175
3176                netif_napi_add(net_dev, &percpu_priv->np.napi,
3177                               dpaa_eth_poll, NAPI_POLL_WEIGHT);
3178        }
3179
3180        return 0;
3181}
3182
3183static void dpaa_napi_del(struct net_device *net_dev)
3184{
3185        struct dpaa_priv *priv = netdev_priv(net_dev);
3186        struct dpaa_percpu_priv *percpu_priv;
3187        int cpu;
3188
3189        for_each_possible_cpu(cpu) {
3190                percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
3191
3192                netif_napi_del(&percpu_priv->np.napi);
3193        }
3194}
3195
3196static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
3197                                   struct bm_buffer *bmb)
3198{
3199        dma_addr_t addr = bm_buf_addr(bmb);
3200
3201        dma_unmap_page(bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
3202                       DMA_FROM_DEVICE);
3203
3204        skb_free_frag(phys_to_virt(addr));
3205}
3206
3207/* Alloc the dpaa_bp struct and configure default values */
3208static struct dpaa_bp *dpaa_bp_alloc(struct device *dev)
3209{
3210        struct dpaa_bp *dpaa_bp;
3211
3212        dpaa_bp = devm_kzalloc(dev, sizeof(*dpaa_bp), GFP_KERNEL);
3213        if (!dpaa_bp)
3214                return ERR_PTR(-ENOMEM);
3215
3216        dpaa_bp->bpid = FSL_DPAA_BPID_INV;
3217        dpaa_bp->percpu_count = devm_alloc_percpu(dev, *dpaa_bp->percpu_count);
3218        if (!dpaa_bp->percpu_count)
3219                return ERR_PTR(-ENOMEM);
3220
3221        dpaa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT;
3222
3223        dpaa_bp->seed_cb = dpaa_bp_seed;
3224        dpaa_bp->free_buf_cb = dpaa_bp_free_pf;
3225
3226        return dpaa_bp;
3227}
3228
3229/* Place all ingress FQs (Rx Default, Rx Error) in a dedicated CGR.
3230 * We won't be sending congestion notifications to FMan; for now, we just use
3231 * this CGR to generate enqueue rejections to FMan in order to drop the frames
3232 * before they reach our ingress queues and eat up memory.
3233 */
3234static int dpaa_ingress_cgr_init(struct dpaa_priv *priv)
3235{
3236        struct qm_mcc_initcgr initcgr;
3237        u32 cs_th;
3238        int err;
3239
3240        err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid);
3241        if (err < 0) {
3242                if (netif_msg_drv(priv))
3243                        pr_err("Error %d allocating CGR ID\n", err);
3244                goto out_error;
3245        }
3246
3247        /* Enable CS TD, but disable Congestion State Change Notifications. */
3248        memset(&initcgr, 0, sizeof(initcgr));
3249        initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
3250        initcgr.cgr.cscn_en = QM_CGR_EN;
3251        cs_th = DPAA_INGRESS_CS_THRESHOLD;
3252        qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
3253
3254        initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
3255        initcgr.cgr.cstd_en = QM_CGR_EN;
3256
3257        /* This CGR will be associated with the SWP affined to the current CPU.
3258         * However, we'll place all our ingress FQs in it.
3259         */
3260        err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT,
3261                              &initcgr);
3262        if (err < 0) {
3263                if (netif_msg_drv(priv))
3264                        pr_err("Error %d creating ingress CGR with ID %d\n",
3265                               err, priv->ingress_cgr.cgrid);
3266                qman_release_cgrid(priv->ingress_cgr.cgrid);
3267                goto out_error;
3268        }
3269        if (netif_msg_drv(priv))
3270                pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n",
3271                         priv->ingress_cgr.cgrid, priv->mac_dev->addr);
3272
3273        priv->use_ingress_cgr = true;
3274
3275out_error:
3276        return err;
3277}
3278
3279static u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl,
3280                             enum port_type port)
3281{
3282        u16 headroom;
3283
3284        /* The frame headroom must accommodate:
3285         * - the driver private data area
3286         * - parse results, hash results, timestamp if selected
3287         * If either hash results or time stamp are selected, both will
3288         * be copied to/from the frame headroom, as TS is located between PR and
3289         * HR in the IC and IC copy size has a granularity of 16bytes
3290         * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM)
3291         *
3292         * Also make sure the headroom is a multiple of data_align bytes
3293         */
3294        headroom = (u16)(bl[port].priv_data_size + DPAA_HWA_SIZE);
3295
3296        if (port == RX) {
3297#ifdef CONFIG_DPAA_ERRATUM_A050385
3298                if (unlikely(fman_has_errata_a050385()))
3299                        headroom = XDP_PACKET_HEADROOM;
3300#endif
3301
3302                return ALIGN(headroom, DPAA_FD_RX_DATA_ALIGNMENT);
3303        } else {
3304                return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT);
3305        }
3306}
3307
3308static int dpaa_eth_probe(struct platform_device *pdev)
3309{
3310        struct net_device *net_dev = NULL;
3311        struct dpaa_bp *dpaa_bp = NULL;
3312        struct dpaa_fq *dpaa_fq, *tmp;
3313        struct dpaa_priv *priv = NULL;
3314        struct fm_port_fqs port_fqs;
3315        struct mac_device *mac_dev;
3316        int err = 0, channel;
3317        struct device *dev;
3318
3319        dev = &pdev->dev;
3320
3321        err = bman_is_probed();
3322        if (!err)
3323                return -EPROBE_DEFER;
3324        if (err < 0) {
3325                dev_err(dev, "failing probe due to bman probe error\n");
3326                return -ENODEV;
3327        }
3328        err = qman_is_probed();
3329        if (!err)
3330                return -EPROBE_DEFER;
3331        if (err < 0) {
3332                dev_err(dev, "failing probe due to qman probe error\n");
3333                return -ENODEV;
3334        }
3335        err = bman_portals_probed();
3336        if (!err)
3337                return -EPROBE_DEFER;
3338        if (err < 0) {
3339                dev_err(dev,
3340                        "failing probe due to bman portals probe error\n");
3341                return -ENODEV;
3342        }
3343        err = qman_portals_probed();
3344        if (!err)
3345                return -EPROBE_DEFER;
3346        if (err < 0) {
3347                dev_err(dev,
3348                        "failing probe due to qman portals probe error\n");
3349                return -ENODEV;
3350        }
3351
3352        /* Allocate this early, so we can store relevant information in
3353         * the private area
3354         */
3355        net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM);
3356        if (!net_dev) {
3357                dev_err(dev, "alloc_etherdev_mq() failed\n");
3358                return -ENOMEM;
3359        }
3360
3361        /* Do this here, so we can be verbose early */
3362        SET_NETDEV_DEV(net_dev, dev->parent);
3363        dev_set_drvdata(dev, net_dev);
3364
3365        priv = netdev_priv(net_dev);
3366        priv->net_dev = net_dev;
3367
3368        priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT);
3369
3370        mac_dev = dpaa_mac_dev_get(pdev);
3371        if (IS_ERR(mac_dev)) {
3372                netdev_err(net_dev, "dpaa_mac_dev_get() failed\n");
3373                err = PTR_ERR(mac_dev);
3374                goto free_netdev;
3375        }
3376
3377        /* Devices used for DMA mapping */
3378        priv->rx_dma_dev = fman_port_get_device(mac_dev->port[RX]);
3379        priv->tx_dma_dev = fman_port_get_device(mac_dev->port[TX]);
3380        err = dma_coerce_mask_and_coherent(priv->rx_dma_dev, DMA_BIT_MASK(40));
3381        if (!err)
3382                err = dma_coerce_mask_and_coherent(priv->tx_dma_dev,
3383                                                   DMA_BIT_MASK(40));
3384        if (err) {
3385                netdev_err(net_dev, "dma_coerce_mask_and_coherent() failed\n");
3386                goto free_netdev;
3387        }
3388
3389        /* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
3390         * we choose conservatively and let the user explicitly set a higher
3391         * MTU via ifconfig. Otherwise, the user may end up with different MTUs
3392         * in the same LAN.
3393         * If on the other hand fsl_fm_max_frm has been chosen below 1500,
3394         * start with the maximum allowed.
3395         */
3396        net_dev->mtu = min(dpaa_get_max_mtu(), ETH_DATA_LEN);
3397
3398        netdev_dbg(net_dev, "Setting initial MTU on net device: %d\n",
3399                   net_dev->mtu);
3400
3401        priv->buf_layout[RX].priv_data_size = DPAA_RX_PRIV_DATA_SIZE; /* Rx */
3402        priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
3403
3404        /* bp init */
3405        dpaa_bp = dpaa_bp_alloc(dev);
3406        if (IS_ERR(dpaa_bp)) {
3407                err = PTR_ERR(dpaa_bp);
3408                goto free_dpaa_bps;
3409        }
3410        /* the raw size of the buffers used for reception */
3411        dpaa_bp->raw_size = DPAA_BP_RAW_SIZE;
3412        /* avoid runtime computations by keeping the usable size here */
3413        dpaa_bp->size = dpaa_bp_size(dpaa_bp->raw_size);
3414        dpaa_bp->priv = priv;
3415
3416        err = dpaa_bp_alloc_pool(dpaa_bp);
3417        if (err < 0)
3418                goto free_dpaa_bps;
3419        priv->dpaa_bp = dpaa_bp;
3420
3421        INIT_LIST_HEAD(&priv->dpaa_fq_list);
3422
3423        memset(&port_fqs, 0, sizeof(port_fqs));
3424
3425        err = dpaa_alloc_all_fqs(dev, &priv->dpaa_fq_list, &port_fqs);
3426        if (err < 0) {
3427                dev_err(dev, "dpaa_alloc_all_fqs() failed\n");
3428                goto free_dpaa_bps;
3429        }
3430
3431        priv->mac_dev = mac_dev;
3432
3433        channel = dpaa_get_channel();
3434        if (channel < 0) {
3435                dev_err(dev, "dpaa_get_channel() failed\n");
3436                err = channel;
3437                goto free_dpaa_bps;
3438        }
3439
3440        priv->channel = (u16)channel;
3441
3442        /* Walk the CPUs with affine portals
3443         * and add this pool channel to each's dequeue mask.
3444         */
3445        dpaa_eth_add_channel(priv->channel, &pdev->dev);
3446
3447        dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
3448
3449        /* Create a congestion group for this netdev, with
3450         * dynamically-allocated CGR ID.
3451         * Must be executed after probing the MAC, but before
3452         * assigning the egress FQs to the CGRs.
3453         */
3454        err = dpaa_eth_cgr_init(priv);
3455        if (err < 0) {
3456                dev_err(dev, "Error initializing CGR\n");
3457                goto free_dpaa_bps;
3458        }
3459
3460        err = dpaa_ingress_cgr_init(priv);
3461        if (err < 0) {
3462                dev_err(dev, "Error initializing ingress CGR\n");
3463                goto delete_egress_cgr;
3464        }
3465
3466        /* Add the FQs to the interface, and make them active */
3467        list_for_each_entry_safe(dpaa_fq, tmp, &priv->dpaa_fq_list, list) {
3468                err = dpaa_fq_init(dpaa_fq, false);
3469                if (err < 0)
3470                        goto free_dpaa_fqs;
3471        }
3472
3473        priv->tx_headroom = dpaa_get_headroom(priv->buf_layout, TX);
3474        priv->rx_headroom = dpaa_get_headroom(priv->buf_layout, RX);
3475
3476        /* All real interfaces need their ports initialized */
3477        err = dpaa_eth_init_ports(mac_dev, dpaa_bp, &port_fqs,
3478                                  &priv->buf_layout[0], dev);
3479        if (err)
3480                goto free_dpaa_fqs;
3481
3482        /* Rx traffic distribution based on keygen hashing defaults to on */
3483        priv->keygen_in_use = true;
3484
3485        priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
3486        if (!priv->percpu_priv) {
3487                dev_err(dev, "devm_alloc_percpu() failed\n");
3488                err = -ENOMEM;
3489                goto free_dpaa_fqs;
3490        }
3491
3492        priv->num_tc = 1;
3493        netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
3494
3495        /* Initialize NAPI */
3496        err = dpaa_napi_add(net_dev);
3497        if (err < 0)
3498                goto delete_dpaa_napi;
3499
3500        err = dpaa_netdev_init(net_dev, &dpaa_ops, tx_timeout);
3501        if (err < 0)
3502                goto delete_dpaa_napi;
3503
3504        dpaa_eth_sysfs_init(&net_dev->dev);
3505
3506        netif_info(priv, probe, net_dev, "Probed interface %s\n",
3507                   net_dev->name);
3508
3509        return 0;
3510
3511delete_dpaa_napi:
3512        dpaa_napi_del(net_dev);
3513free_dpaa_fqs:
3514        dpaa_fq_free(dev, &priv->dpaa_fq_list);
3515        qman_delete_cgr_safe(&priv->ingress_cgr);
3516        qman_release_cgrid(priv->ingress_cgr.cgrid);
3517delete_egress_cgr:
3518        qman_delete_cgr_safe(&priv->cgr_data.cgr);
3519        qman_release_cgrid(priv->cgr_data.cgr.cgrid);
3520free_dpaa_bps:
3521        dpaa_bps_free(priv);
3522free_netdev:
3523        dev_set_drvdata(dev, NULL);
3524        free_netdev(net_dev);
3525
3526        return err;
3527}
3528
3529static int dpaa_remove(struct platform_device *pdev)
3530{
3531        struct net_device *net_dev;
3532        struct dpaa_priv *priv;
3533        struct device *dev;
3534        int err;
3535
3536        dev = &pdev->dev;
3537        net_dev = dev_get_drvdata(dev);
3538
3539        priv = netdev_priv(net_dev);
3540
3541        dpaa_eth_sysfs_remove(dev);
3542
3543        dev_set_drvdata(dev, NULL);
3544        unregister_netdev(net_dev);
3545
3546        err = dpaa_fq_free(dev, &priv->dpaa_fq_list);
3547
3548        qman_delete_cgr_safe(&priv->ingress_cgr);
3549        qman_release_cgrid(priv->ingress_cgr.cgrid);
3550        qman_delete_cgr_safe(&priv->cgr_data.cgr);
3551        qman_release_cgrid(priv->cgr_data.cgr.cgrid);
3552
3553        dpaa_napi_del(net_dev);
3554
3555        dpaa_bps_free(priv);
3556
3557        free_netdev(net_dev);
3558
3559        return err;
3560}
3561
3562static const struct platform_device_id dpaa_devtype[] = {
3563        {
3564                .name = "dpaa-ethernet",
3565                .driver_data = 0,
3566        }, {
3567        }
3568};
3569MODULE_DEVICE_TABLE(platform, dpaa_devtype);
3570
3571static struct platform_driver dpaa_driver = {
3572        .driver = {
3573                .name = KBUILD_MODNAME,
3574        },
3575        .id_table = dpaa_devtype,
3576        .probe = dpaa_eth_probe,
3577        .remove = dpaa_remove
3578};
3579
3580static int __init dpaa_load(void)
3581{
3582        int err;
3583
3584        pr_debug("FSL DPAA Ethernet driver\n");
3585
3586        /* initialize dpaa_eth mirror values */
3587        dpaa_rx_extra_headroom = fman_get_rx_extra_headroom();
3588        dpaa_max_frm = fman_get_max_frm();
3589
3590        err = platform_driver_register(&dpaa_driver);
3591        if (err < 0)
3592                pr_err("Error, platform_driver_register() = %d\n", err);
3593
3594        return err;
3595}
3596module_init(dpaa_load);
3597
3598static void __exit dpaa_unload(void)
3599{
3600        platform_driver_unregister(&dpaa_driver);
3601
3602        /* Only one channel is used and needs to be released after all
3603         * interfaces are removed
3604         */
3605        dpaa_release_channel();
3606}
3607module_exit(dpaa_unload);
3608
3609MODULE_LICENSE("Dual BSD/GPL");
3610MODULE_DESCRIPTION("FSL DPAA Ethernet driver");
3611