linux/drivers/net/ethernet/ti/cpsw_priv.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Texas Instruments Ethernet Switch Driver
   4 *
   5 * Copyright (C) 2019 Texas Instruments
   6 */
   7
   8#include <linux/bpf.h>
   9#include <linux/bpf_trace.h>
  10#include <linux/if_ether.h>
  11#include <linux/if_vlan.h>
  12#include <linux/kmemleak.h>
  13#include <linux/module.h>
  14#include <linux/netdevice.h>
  15#include <linux/net_tstamp.h>
  16#include <linux/of.h>
  17#include <linux/phy.h>
  18#include <linux/platform_device.h>
  19#include <linux/pm_runtime.h>
  20#include <linux/skbuff.h>
  21#include <net/page_pool.h>
  22#include <net/pkt_cls.h>
  23
  24#include "cpsw.h"
  25#include "cpts.h"
  26#include "cpsw_ale.h"
  27#include "cpsw_priv.h"
  28#include "cpsw_sl.h"
  29#include "davinci_cpdma.h"
  30
  31int (*cpsw_slave_index)(struct cpsw_common *cpsw, struct cpsw_priv *priv);
  32
  33void cpsw_intr_enable(struct cpsw_common *cpsw)
  34{
  35        writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
  36        writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
  37
  38        cpdma_ctlr_int_ctrl(cpsw->dma, true);
  39}
  40
  41void cpsw_intr_disable(struct cpsw_common *cpsw)
  42{
  43        writel_relaxed(0, &cpsw->wr_regs->tx_en);
  44        writel_relaxed(0, &cpsw->wr_regs->rx_en);
  45
  46        cpdma_ctlr_int_ctrl(cpsw->dma, false);
  47}
  48
  49void cpsw_tx_handler(void *token, int len, int status)
  50{
  51        struct cpsw_meta_xdp    *xmeta;
  52        struct xdp_frame        *xdpf;
  53        struct net_device       *ndev;
  54        struct netdev_queue     *txq;
  55        struct sk_buff          *skb;
  56        int                     ch;
  57
  58        if (cpsw_is_xdpf_handle(token)) {
  59                xdpf = cpsw_handle_to_xdpf(token);
  60                xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
  61                ndev = xmeta->ndev;
  62                ch = xmeta->ch;
  63                xdp_return_frame(xdpf);
  64        } else {
  65                skb = token;
  66                ndev = skb->dev;
  67                ch = skb_get_queue_mapping(skb);
  68                cpts_tx_timestamp(ndev_to_cpsw(ndev)->cpts, skb);
  69                dev_kfree_skb_any(skb);
  70        }
  71
  72        /* Check whether the queue is stopped due to stalled tx dma, if the
  73         * queue is stopped then start the queue as we have free desc for tx
  74         */
  75        txq = netdev_get_tx_queue(ndev, ch);
  76        if (unlikely(netif_tx_queue_stopped(txq)))
  77                netif_tx_wake_queue(txq);
  78
  79        ndev->stats.tx_packets++;
  80        ndev->stats.tx_bytes += len;
  81}
  82
  83irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
  84{
  85        struct cpsw_common *cpsw = dev_id;
  86
  87        writel(0, &cpsw->wr_regs->tx_en);
  88        cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
  89
  90        if (cpsw->quirk_irq) {
  91                disable_irq_nosync(cpsw->irqs_table[1]);
  92                cpsw->tx_irq_disabled = true;
  93        }
  94
  95        napi_schedule(&cpsw->napi_tx);
  96        return IRQ_HANDLED;
  97}
  98
  99irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
 100{
 101        struct cpsw_common *cpsw = dev_id;
 102
 103        writel(0, &cpsw->wr_regs->rx_en);
 104        cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
 105
 106        if (cpsw->quirk_irq) {
 107                disable_irq_nosync(cpsw->irqs_table[0]);
 108                cpsw->rx_irq_disabled = true;
 109        }
 110
 111        napi_schedule(&cpsw->napi_rx);
 112        return IRQ_HANDLED;
 113}
 114
 115int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
 116{
 117        struct cpsw_common      *cpsw = napi_to_cpsw(napi_tx);
 118        int                     num_tx, cur_budget, ch;
 119        u32                     ch_map;
 120        struct cpsw_vector      *txv;
 121
 122        /* process every unprocessed channel */
 123        ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
 124        for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) {
 125                if (!(ch_map & 0x80))
 126                        continue;
 127
 128                txv = &cpsw->txv[ch];
 129                if (unlikely(txv->budget > budget - num_tx))
 130                        cur_budget = budget - num_tx;
 131                else
 132                        cur_budget = txv->budget;
 133
 134                num_tx += cpdma_chan_process(txv->ch, cur_budget);
 135                if (num_tx >= budget)
 136                        break;
 137        }
 138
 139        if (num_tx < budget) {
 140                napi_complete(napi_tx);
 141                writel(0xff, &cpsw->wr_regs->tx_en);
 142        }
 143
 144        return num_tx;
 145}
 146
 147int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
 148{
 149        struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
 150        int num_tx;
 151
 152        num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget);
 153        if (num_tx < budget) {
 154                napi_complete(napi_tx);
 155                writel(0xff, &cpsw->wr_regs->tx_en);
 156                if (cpsw->tx_irq_disabled) {
 157                        cpsw->tx_irq_disabled = false;
 158                        enable_irq(cpsw->irqs_table[1]);
 159                }
 160        }
 161
 162        return num_tx;
 163}
 164
 165int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget)
 166{
 167        struct cpsw_common      *cpsw = napi_to_cpsw(napi_rx);
 168        int                     num_rx, cur_budget, ch;
 169        u32                     ch_map;
 170        struct cpsw_vector      *rxv;
 171
 172        /* process every unprocessed channel */
 173        ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
 174        for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) {
 175                if (!(ch_map & 0x01))
 176                        continue;
 177
 178                rxv = &cpsw->rxv[ch];
 179                if (unlikely(rxv->budget > budget - num_rx))
 180                        cur_budget = budget - num_rx;
 181                else
 182                        cur_budget = rxv->budget;
 183
 184                num_rx += cpdma_chan_process(rxv->ch, cur_budget);
 185                if (num_rx >= budget)
 186                        break;
 187        }
 188
 189        if (num_rx < budget) {
 190                napi_complete_done(napi_rx, num_rx);
 191                writel(0xff, &cpsw->wr_regs->rx_en);
 192        }
 193
 194        return num_rx;
 195}
 196
 197int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
 198{
 199        struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
 200        int num_rx;
 201
 202        num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget);
 203        if (num_rx < budget) {
 204                napi_complete_done(napi_rx, num_rx);
 205                writel(0xff, &cpsw->wr_regs->rx_en);
 206                if (cpsw->rx_irq_disabled) {
 207                        cpsw->rx_irq_disabled = false;
 208                        enable_irq(cpsw->irqs_table[0]);
 209                }
 210        }
 211
 212        return num_rx;
 213}
 214
 215void cpsw_rx_vlan_encap(struct sk_buff *skb)
 216{
 217        struct cpsw_priv *priv = netdev_priv(skb->dev);
 218        u32 rx_vlan_encap_hdr = *((u32 *)skb->data);
 219        struct cpsw_common *cpsw = priv->cpsw;
 220        u16 vtag, vid, prio, pkt_type;
 221
 222        /* Remove VLAN header encapsulation word */
 223        skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE);
 224
 225        pkt_type = (rx_vlan_encap_hdr >>
 226                    CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) &
 227                    CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK;
 228        /* Ignore unknown & Priority-tagged packets*/
 229        if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV ||
 230            pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG)
 231                return;
 232
 233        vid = (rx_vlan_encap_hdr >>
 234               CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) &
 235               VLAN_VID_MASK;
 236        /* Ignore vid 0 and pass packet as is */
 237        if (!vid)
 238                return;
 239
 240        /* Untag P0 packets if set for vlan */
 241        if (!cpsw_ale_get_vlan_p0_untag(cpsw->ale, vid)) {
 242                prio = (rx_vlan_encap_hdr >>
 243                        CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) &
 244                        CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK;
 245
 246                vtag = (prio << VLAN_PRIO_SHIFT) | vid;
 247                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
 248        }
 249
 250        /* strip vlan tag for VLAN-tagged packet */
 251        if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) {
 252                memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
 253                skb_pull(skb, VLAN_HLEN);
 254        }
 255}
 256
 257void cpsw_set_slave_mac(struct cpsw_slave *slave, struct cpsw_priv *priv)
 258{
 259        slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
 260        slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
 261}
 262
 263void soft_reset(const char *module, void __iomem *reg)
 264{
 265        unsigned long timeout = jiffies + HZ;
 266
 267        writel_relaxed(1, reg);
 268        do {
 269                cpu_relax();
 270        } while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
 271
 272        WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
 273}
 274
 275void cpsw_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
 276{
 277        struct cpsw_priv *priv = netdev_priv(ndev);
 278        struct cpsw_common *cpsw = priv->cpsw;
 279        int ch;
 280
 281        cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
 282        ndev->stats.tx_errors++;
 283        cpsw_intr_disable(cpsw);
 284        for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
 285                cpdma_chan_stop(cpsw->txv[ch].ch);
 286                cpdma_chan_start(cpsw->txv[ch].ch);
 287        }
 288
 289        cpsw_intr_enable(cpsw);
 290        netif_trans_update(ndev);
 291        netif_tx_wake_all_queues(ndev);
 292}
 293
 294static int cpsw_get_common_speed(struct cpsw_common *cpsw)
 295{
 296        int i, speed;
 297
 298        for (i = 0, speed = 0; i < cpsw->data.slaves; i++)
 299                if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link)
 300                        speed += cpsw->slaves[i].phy->speed;
 301
 302        return speed;
 303}
 304
 305int cpsw_need_resplit(struct cpsw_common *cpsw)
 306{
 307        int i, rlim_ch_num;
 308        int speed, ch_rate;
 309
 310        /* re-split resources only in case speed was changed */
 311        speed = cpsw_get_common_speed(cpsw);
 312        if (speed == cpsw->speed || !speed)
 313                return 0;
 314
 315        cpsw->speed = speed;
 316
 317        for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) {
 318                ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch);
 319                if (!ch_rate)
 320                        break;
 321
 322                rlim_ch_num++;
 323        }
 324
 325        /* cases not dependent on speed */
 326        if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num)
 327                return 0;
 328
 329        return 1;
 330}
 331
 332void cpsw_split_res(struct cpsw_common *cpsw)
 333{
 334        u32 consumed_rate = 0, bigest_rate = 0;
 335        struct cpsw_vector *txv = cpsw->txv;
 336        int i, ch_weight, rlim_ch_num = 0;
 337        int budget, bigest_rate_ch = 0;
 338        u32 ch_rate, max_rate;
 339        int ch_budget = 0;
 340
 341        for (i = 0; i < cpsw->tx_ch_num; i++) {
 342                ch_rate = cpdma_chan_get_rate(txv[i].ch);
 343                if (!ch_rate)
 344                        continue;
 345
 346                rlim_ch_num++;
 347                consumed_rate += ch_rate;
 348        }
 349
 350        if (cpsw->tx_ch_num == rlim_ch_num) {
 351                max_rate = consumed_rate;
 352        } else if (!rlim_ch_num) {
 353                ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num;
 354                bigest_rate = 0;
 355                max_rate = consumed_rate;
 356        } else {
 357                max_rate = cpsw->speed * 1000;
 358
 359                /* if max_rate is less then expected due to reduced link speed,
 360                 * split proportionally according next potential max speed
 361                 */
 362                if (max_rate < consumed_rate)
 363                        max_rate *= 10;
 364
 365                if (max_rate < consumed_rate)
 366                        max_rate *= 10;
 367
 368                ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate;
 369                ch_budget = (CPSW_POLL_WEIGHT - ch_budget) /
 370                            (cpsw->tx_ch_num - rlim_ch_num);
 371                bigest_rate = (max_rate - consumed_rate) /
 372                              (cpsw->tx_ch_num - rlim_ch_num);
 373        }
 374
 375        /* split tx weight/budget */
 376        budget = CPSW_POLL_WEIGHT;
 377        for (i = 0; i < cpsw->tx_ch_num; i++) {
 378                ch_rate = cpdma_chan_get_rate(txv[i].ch);
 379                if (ch_rate) {
 380                        txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate;
 381                        if (!txv[i].budget)
 382                                txv[i].budget++;
 383                        if (ch_rate > bigest_rate) {
 384                                bigest_rate_ch = i;
 385                                bigest_rate = ch_rate;
 386                        }
 387
 388                        ch_weight = (ch_rate * 100) / max_rate;
 389                        if (!ch_weight)
 390                                ch_weight++;
 391                        cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight);
 392                } else {
 393                        txv[i].budget = ch_budget;
 394                        if (!bigest_rate_ch)
 395                                bigest_rate_ch = i;
 396                        cpdma_chan_set_weight(cpsw->txv[i].ch, 0);
 397                }
 398
 399                budget -= txv[i].budget;
 400        }
 401
 402        if (budget)
 403                txv[bigest_rate_ch].budget += budget;
 404
 405        /* split rx budget */
 406        budget = CPSW_POLL_WEIGHT;
 407        ch_budget = budget / cpsw->rx_ch_num;
 408        for (i = 0; i < cpsw->rx_ch_num; i++) {
 409                cpsw->rxv[i].budget = ch_budget;
 410                budget -= ch_budget;
 411        }
 412
 413        if (budget)
 414                cpsw->rxv[0].budget += budget;
 415}
 416
 417int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
 418                     int ale_ageout, phys_addr_t desc_mem_phys,
 419                     int descs_pool_size)
 420{
 421        u32 slave_offset, sliver_offset, slave_size;
 422        struct cpsw_ale_params ale_params;
 423        struct cpsw_platform_data *data;
 424        struct cpdma_params dma_params;
 425        struct device *dev = cpsw->dev;
 426        struct device_node *cpts_node;
 427        void __iomem *cpts_regs;
 428        int ret = 0, i;
 429
 430        data = &cpsw->data;
 431        cpsw->rx_ch_num = 1;
 432        cpsw->tx_ch_num = 1;
 433
 434        cpsw->version = readl(&cpsw->regs->id_ver);
 435
 436        memset(&dma_params, 0, sizeof(dma_params));
 437        memset(&ale_params, 0, sizeof(ale_params));
 438
 439        switch (cpsw->version) {
 440        case CPSW_VERSION_1:
 441                cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
 442                cpts_regs            = ss_regs + CPSW1_CPTS_OFFSET;
 443                cpsw->hw_stats       = ss_regs + CPSW1_HW_STATS;
 444                dma_params.dmaregs   = ss_regs + CPSW1_CPDMA_OFFSET;
 445                dma_params.txhdp     = ss_regs + CPSW1_STATERAM_OFFSET;
 446                ale_params.ale_regs  = ss_regs + CPSW1_ALE_OFFSET;
 447                slave_offset         = CPSW1_SLAVE_OFFSET;
 448                slave_size           = CPSW1_SLAVE_SIZE;
 449                sliver_offset        = CPSW1_SLIVER_OFFSET;
 450                dma_params.desc_mem_phys = 0;
 451                break;
 452        case CPSW_VERSION_2:
 453        case CPSW_VERSION_3:
 454        case CPSW_VERSION_4:
 455                cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
 456                cpts_regs            = ss_regs + CPSW2_CPTS_OFFSET;
 457                cpsw->hw_stats       = ss_regs + CPSW2_HW_STATS;
 458                dma_params.dmaregs   = ss_regs + CPSW2_CPDMA_OFFSET;
 459                dma_params.txhdp     = ss_regs + CPSW2_STATERAM_OFFSET;
 460                ale_params.ale_regs  = ss_regs + CPSW2_ALE_OFFSET;
 461                slave_offset         = CPSW2_SLAVE_OFFSET;
 462                slave_size           = CPSW2_SLAVE_SIZE;
 463                sliver_offset        = CPSW2_SLIVER_OFFSET;
 464                dma_params.desc_mem_phys = desc_mem_phys;
 465                break;
 466        default:
 467                dev_err(dev, "unknown version 0x%08x\n", cpsw->version);
 468                return -ENODEV;
 469        }
 470
 471        for (i = 0; i < cpsw->data.slaves; i++) {
 472                struct cpsw_slave *slave = &cpsw->slaves[i];
 473                void __iomem            *regs = cpsw->regs;
 474
 475                slave->slave_num = i;
 476                slave->data     = &cpsw->data.slave_data[i];
 477                slave->regs     = regs + slave_offset;
 478                slave->port_vlan = slave->data->dual_emac_res_vlan;
 479                slave->mac_sl = cpsw_sl_get("cpsw", dev, regs + sliver_offset);
 480                if (IS_ERR(slave->mac_sl))
 481                        return PTR_ERR(slave->mac_sl);
 482
 483                slave_offset  += slave_size;
 484                sliver_offset += SLIVER_SIZE;
 485        }
 486
 487        ale_params.dev                  = dev;
 488        ale_params.ale_ageout           = ale_ageout;
 489        ale_params.ale_entries          = data->ale_entries;
 490        ale_params.ale_ports            = CPSW_ALE_PORTS_NUM;
 491
 492        cpsw->ale = cpsw_ale_create(&ale_params);
 493        if (IS_ERR(cpsw->ale)) {
 494                dev_err(dev, "error initializing ale engine\n");
 495                return PTR_ERR(cpsw->ale);
 496        }
 497
 498        dma_params.dev          = dev;
 499        dma_params.rxthresh     = dma_params.dmaregs + CPDMA_RXTHRESH;
 500        dma_params.rxfree       = dma_params.dmaregs + CPDMA_RXFREE;
 501        dma_params.rxhdp        = dma_params.txhdp + CPDMA_RXHDP;
 502        dma_params.txcp         = dma_params.txhdp + CPDMA_TXCP;
 503        dma_params.rxcp         = dma_params.txhdp + CPDMA_RXCP;
 504
 505        dma_params.num_chan             = data->channels;
 506        dma_params.has_soft_reset       = true;
 507        dma_params.min_packet_size      = CPSW_MIN_PACKET_SIZE;
 508        dma_params.desc_mem_size        = data->bd_ram_size;
 509        dma_params.desc_align           = 16;
 510        dma_params.has_ext_regs         = true;
 511        dma_params.desc_hw_addr         = dma_params.desc_mem_phys;
 512        dma_params.bus_freq_mhz         = cpsw->bus_freq_mhz;
 513        dma_params.descs_pool_size      = descs_pool_size;
 514
 515        cpsw->dma = cpdma_ctlr_create(&dma_params);
 516        if (!cpsw->dma) {
 517                dev_err(dev, "error initializing dma\n");
 518                return -ENOMEM;
 519        }
 520
 521        cpts_node = of_get_child_by_name(cpsw->dev->of_node, "cpts");
 522        if (!cpts_node)
 523                cpts_node = cpsw->dev->of_node;
 524
 525        cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpts_node);
 526        if (IS_ERR(cpsw->cpts)) {
 527                ret = PTR_ERR(cpsw->cpts);
 528                cpdma_ctlr_destroy(cpsw->dma);
 529        }
 530        of_node_put(cpts_node);
 531
 532        return ret;
 533}
 534
 535#if IS_ENABLED(CONFIG_TI_CPTS)
 536
 537static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
 538{
 539        struct cpsw_common *cpsw = priv->cpsw;
 540        struct cpsw_slave *slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
 541        u32 ts_en, seq_id;
 542
 543        if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) {
 544                slave_write(slave, 0, CPSW1_TS_CTL);
 545                return;
 546        }
 547
 548        seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
 549        ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
 550
 551        if (priv->tx_ts_enabled)
 552                ts_en |= CPSW_V1_TS_TX_EN;
 553
 554        if (priv->rx_ts_enabled)
 555                ts_en |= CPSW_V1_TS_RX_EN;
 556
 557        slave_write(slave, ts_en, CPSW1_TS_CTL);
 558        slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
 559}
 560
 561static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
 562{
 563        struct cpsw_common *cpsw = priv->cpsw;
 564        struct cpsw_slave *slave;
 565        u32 ctrl, mtype;
 566
 567        slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
 568
 569        ctrl = slave_read(slave, CPSW2_CONTROL);
 570        switch (cpsw->version) {
 571        case CPSW_VERSION_2:
 572                ctrl &= ~CTRL_V2_ALL_TS_MASK;
 573
 574                if (priv->tx_ts_enabled)
 575                        ctrl |= CTRL_V2_TX_TS_BITS;
 576
 577                if (priv->rx_ts_enabled)
 578                        ctrl |= CTRL_V2_RX_TS_BITS;
 579                break;
 580        case CPSW_VERSION_3:
 581        default:
 582                ctrl &= ~CTRL_V3_ALL_TS_MASK;
 583
 584                if (priv->tx_ts_enabled)
 585                        ctrl |= CTRL_V3_TX_TS_BITS;
 586
 587                if (priv->rx_ts_enabled)
 588                        ctrl |= CTRL_V3_RX_TS_BITS;
 589                break;
 590        }
 591
 592        mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
 593
 594        slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
 595        slave_write(slave, ctrl, CPSW2_CONTROL);
 596        writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
 597        writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
 598}
 599
 600static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
 601{
 602        struct cpsw_priv *priv = netdev_priv(dev);
 603        struct cpsw_common *cpsw = priv->cpsw;
 604        struct hwtstamp_config cfg;
 605
 606        if (cpsw->version != CPSW_VERSION_1 &&
 607            cpsw->version != CPSW_VERSION_2 &&
 608            cpsw->version != CPSW_VERSION_3)
 609                return -EOPNOTSUPP;
 610
 611        if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
 612                return -EFAULT;
 613
 614        /* reserved for future extensions */
 615        if (cfg.flags)
 616                return -EINVAL;
 617
 618        if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
 619                return -ERANGE;
 620
 621        switch (cfg.rx_filter) {
 622        case HWTSTAMP_FILTER_NONE:
 623                priv->rx_ts_enabled = 0;
 624                break;
 625        case HWTSTAMP_FILTER_ALL:
 626        case HWTSTAMP_FILTER_NTP_ALL:
 627                return -ERANGE;
 628        case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
 629        case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
 630        case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
 631                priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
 632                cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
 633                break;
 634        case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
 635        case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
 636        case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
 637        case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
 638        case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
 639        case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
 640        case HWTSTAMP_FILTER_PTP_V2_EVENT:
 641        case HWTSTAMP_FILTER_PTP_V2_SYNC:
 642        case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
 643                priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
 644                cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
 645                break;
 646        default:
 647                return -ERANGE;
 648        }
 649
 650        priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
 651
 652        switch (cpsw->version) {
 653        case CPSW_VERSION_1:
 654                cpsw_hwtstamp_v1(priv);
 655                break;
 656        case CPSW_VERSION_2:
 657        case CPSW_VERSION_3:
 658                cpsw_hwtstamp_v2(priv);
 659                break;
 660        default:
 661                WARN_ON(1);
 662        }
 663
 664        return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
 665}
 666
 667static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
 668{
 669        struct cpsw_common *cpsw = ndev_to_cpsw(dev);
 670        struct cpsw_priv *priv = netdev_priv(dev);
 671        struct hwtstamp_config cfg;
 672
 673        if (cpsw->version != CPSW_VERSION_1 &&
 674            cpsw->version != CPSW_VERSION_2 &&
 675            cpsw->version != CPSW_VERSION_3)
 676                return -EOPNOTSUPP;
 677
 678        cfg.flags = 0;
 679        cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
 680        cfg.rx_filter = priv->rx_ts_enabled;
 681
 682        return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
 683}
 684#else
 685static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
 686{
 687        return -EOPNOTSUPP;
 688}
 689
 690static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
 691{
 692        return -EOPNOTSUPP;
 693}
 694#endif /*CONFIG_TI_CPTS*/
 695
 696int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
 697{
 698        struct cpsw_priv *priv = netdev_priv(dev);
 699        struct cpsw_common *cpsw = priv->cpsw;
 700        int slave_no = cpsw_slave_index(cpsw, priv);
 701
 702        if (!netif_running(dev))
 703                return -EINVAL;
 704
 705        switch (cmd) {
 706        case SIOCSHWTSTAMP:
 707                return cpsw_hwtstamp_set(dev, req);
 708        case SIOCGHWTSTAMP:
 709                return cpsw_hwtstamp_get(dev, req);
 710        }
 711
 712        if (!cpsw->slaves[slave_no].phy)
 713                return -EOPNOTSUPP;
 714        return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd);
 715}
 716
 717int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
 718{
 719        struct cpsw_priv *priv = netdev_priv(ndev);
 720        struct cpsw_common *cpsw = priv->cpsw;
 721        struct cpsw_slave *slave;
 722        u32 min_rate;
 723        u32 ch_rate;
 724        int i, ret;
 725
 726        ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
 727        if (ch_rate == rate)
 728                return 0;
 729
 730        ch_rate = rate * 1000;
 731        min_rate = cpdma_chan_get_min_rate(cpsw->dma);
 732        if ((ch_rate < min_rate && ch_rate)) {
 733                dev_err(priv->dev, "The channel rate cannot be less than %dMbps",
 734                        min_rate);
 735                return -EINVAL;
 736        }
 737
 738        if (rate > cpsw->speed) {
 739                dev_err(priv->dev, "The channel rate cannot be more than 2Gbps");
 740                return -EINVAL;
 741        }
 742
 743        ret = pm_runtime_get_sync(cpsw->dev);
 744        if (ret < 0) {
 745                pm_runtime_put_noidle(cpsw->dev);
 746                return ret;
 747        }
 748
 749        ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
 750        pm_runtime_put(cpsw->dev);
 751
 752        if (ret)
 753                return ret;
 754
 755        /* update rates for slaves tx queues */
 756        for (i = 0; i < cpsw->data.slaves; i++) {
 757                slave = &cpsw->slaves[i];
 758                if (!slave->ndev)
 759                        continue;
 760
 761                netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
 762        }
 763
 764        cpsw_split_res(cpsw);
 765        return ret;
 766}
 767
 768static int cpsw_tc_to_fifo(int tc, int num_tc)
 769{
 770        if (tc == num_tc - 1)
 771                return 0;
 772
 773        return CPSW_FIFO_SHAPERS_NUM - tc;
 774}
 775
 776bool cpsw_shp_is_off(struct cpsw_priv *priv)
 777{
 778        struct cpsw_common *cpsw = priv->cpsw;
 779        struct cpsw_slave *slave;
 780        u32 shift, mask, val;
 781
 782        val = readl_relaxed(&cpsw->regs->ptype);
 783
 784        slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
 785        shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
 786        mask = 7 << shift;
 787        val = val & mask;
 788
 789        return !val;
 790}
 791
 792static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on)
 793{
 794        struct cpsw_common *cpsw = priv->cpsw;
 795        struct cpsw_slave *slave;
 796        u32 shift, mask, val;
 797
 798        val = readl_relaxed(&cpsw->regs->ptype);
 799
 800        slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
 801        shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
 802        mask = (1 << --fifo) << shift;
 803        val = on ? val | mask : val & ~mask;
 804
 805        writel_relaxed(val, &cpsw->regs->ptype);
 806}
 807
 808static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw)
 809{
 810        struct cpsw_common *cpsw = priv->cpsw;
 811        u32 val = 0, send_pct, shift;
 812        struct cpsw_slave *slave;
 813        int pct = 0, i;
 814
 815        if (bw > priv->shp_cfg_speed * 1000)
 816                goto err;
 817
 818        /* shaping has to stay enabled for highest fifos linearly
 819         * and fifo bw no more then interface can allow
 820         */
 821        slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
 822        send_pct = slave_read(slave, SEND_PERCENT);
 823        for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) {
 824                if (!bw) {
 825                        if (i >= fifo || !priv->fifo_bw[i])
 826                                continue;
 827
 828                        dev_warn(priv->dev, "Prev FIFO%d is shaped", i);
 829                        continue;
 830                }
 831
 832                if (!priv->fifo_bw[i] && i > fifo) {
 833                        dev_err(priv->dev, "Upper FIFO%d is not shaped", i);
 834                        return -EINVAL;
 835                }
 836
 837                shift = (i - 1) * 8;
 838                if (i == fifo) {
 839                        send_pct &= ~(CPSW_PCT_MASK << shift);
 840                        val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10);
 841                        if (!val)
 842                                val = 1;
 843
 844                        send_pct |= val << shift;
 845                        pct += val;
 846                        continue;
 847                }
 848
 849                if (priv->fifo_bw[i])
 850                        pct += (send_pct >> shift) & CPSW_PCT_MASK;
 851        }
 852
 853        if (pct >= 100)
 854                goto err;
 855
 856        slave_write(slave, send_pct, SEND_PERCENT);
 857        priv->fifo_bw[fifo] = bw;
 858
 859        dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo,
 860                 DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100));
 861
 862        return 0;
 863err:
 864        dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration");
 865        return -EINVAL;
 866}
 867
 868static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw)
 869{
 870        struct cpsw_common *cpsw = priv->cpsw;
 871        struct cpsw_slave *slave;
 872        u32 tx_in_ctl_rg, val;
 873        int ret;
 874
 875        ret = cpsw_set_fifo_bw(priv, fifo, bw);
 876        if (ret)
 877                return ret;
 878
 879        slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
 880        tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ?
 881                       CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL;
 882
 883        if (!bw)
 884                cpsw_fifo_shp_on(priv, fifo, bw);
 885
 886        val = slave_read(slave, tx_in_ctl_rg);
 887        if (cpsw_shp_is_off(priv)) {
 888                /* disable FIFOs rate limited queues */
 889                val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT);
 890
 891                /* set type of FIFO queues to normal priority mode */
 892                val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT);
 893
 894                /* set type of FIFO queues to be rate limited */
 895                if (bw)
 896                        val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT;
 897                else
 898                        priv->shp_cfg_speed = 0;
 899        }
 900
 901        /* toggle a FIFO rate limited queue */
 902        if (bw)
 903                val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
 904        else
 905                val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
 906        slave_write(slave, val, tx_in_ctl_rg);
 907
 908        /* FIFO transmit shape enable */
 909        cpsw_fifo_shp_on(priv, fifo, bw);
 910        return 0;
 911}
 912
 913/* Defaults:
 914 * class A - prio 3
 915 * class B - prio 2
 916 * shaping for class A should be set first
 917 */
 918static int cpsw_set_cbs(struct net_device *ndev,
 919                        struct tc_cbs_qopt_offload *qopt)
 920{
 921        struct cpsw_priv *priv = netdev_priv(ndev);
 922        struct cpsw_common *cpsw = priv->cpsw;
 923        struct cpsw_slave *slave;
 924        int prev_speed = 0;
 925        int tc, ret, fifo;
 926        u32 bw = 0;
 927
 928        tc = netdev_txq_to_tc(priv->ndev, qopt->queue);
 929
 930        /* enable channels in backward order, as highest FIFOs must be rate
 931         * limited first and for compliance with CPDMA rate limited channels
 932         * that also used in bacward order. FIFO0 cannot be rate limited.
 933         */
 934        fifo = cpsw_tc_to_fifo(tc, ndev->num_tc);
 935        if (!fifo) {
 936                dev_err(priv->dev, "Last tc%d can't be rate limited", tc);
 937                return -EINVAL;
 938        }
 939
 940        /* do nothing, it's disabled anyway */
 941        if (!qopt->enable && !priv->fifo_bw[fifo])
 942                return 0;
 943
 944        /* shapers can be set if link speed is known */
 945        slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
 946        if (slave->phy && slave->phy->link) {
 947                if (priv->shp_cfg_speed &&
 948                    priv->shp_cfg_speed != slave->phy->speed)
 949                        prev_speed = priv->shp_cfg_speed;
 950
 951                priv->shp_cfg_speed = slave->phy->speed;
 952        }
 953
 954        if (!priv->shp_cfg_speed) {
 955                dev_err(priv->dev, "Link speed is not known");
 956                return -1;
 957        }
 958
 959        ret = pm_runtime_get_sync(cpsw->dev);
 960        if (ret < 0) {
 961                pm_runtime_put_noidle(cpsw->dev);
 962                return ret;
 963        }
 964
 965        bw = qopt->enable ? qopt->idleslope : 0;
 966        ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
 967        if (ret) {
 968                priv->shp_cfg_speed = prev_speed;
 969                prev_speed = 0;
 970        }
 971
 972        if (bw && prev_speed)
 973                dev_warn(priv->dev,
 974                         "Speed was changed, CBS shaper speeds are changed!");
 975
 976        pm_runtime_put_sync(cpsw->dev);
 977        return ret;
 978}
 979
 980static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
 981{
 982        struct tc_mqprio_qopt_offload *mqprio = type_data;
 983        struct cpsw_priv *priv = netdev_priv(ndev);
 984        struct cpsw_common *cpsw = priv->cpsw;
 985        int fifo, num_tc, count, offset;
 986        struct cpsw_slave *slave;
 987        u32 tx_prio_map = 0;
 988        int i, tc, ret;
 989
 990        num_tc = mqprio->qopt.num_tc;
 991        if (num_tc > CPSW_TC_NUM)
 992                return -EINVAL;
 993
 994        if (mqprio->mode != TC_MQPRIO_MODE_DCB)
 995                return -EINVAL;
 996
 997        ret = pm_runtime_get_sync(cpsw->dev);
 998        if (ret < 0) {
 999                pm_runtime_put_noidle(cpsw->dev);
1000                return ret;
1001        }
1002
1003        if (num_tc) {
1004                for (i = 0; i < 8; i++) {
1005                        tc = mqprio->qopt.prio_tc_map[i];
1006                        fifo = cpsw_tc_to_fifo(tc, num_tc);
1007                        tx_prio_map |= fifo << (4 * i);
1008                }
1009
1010                netdev_set_num_tc(ndev, num_tc);
1011                for (i = 0; i < num_tc; i++) {
1012                        count = mqprio->qopt.count[i];
1013                        offset = mqprio->qopt.offset[i];
1014                        netdev_set_tc_queue(ndev, i, count, offset);
1015                }
1016        }
1017
1018        if (!mqprio->qopt.hw) {
1019                /* restore default configuration */
1020                netdev_reset_tc(ndev);
1021                tx_prio_map = TX_PRIORITY_MAPPING;
1022        }
1023
1024        priv->mqprio_hw = mqprio->qopt.hw;
1025
1026        offset = cpsw->version == CPSW_VERSION_1 ?
1027                 CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
1028
1029        slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1030        slave_write(slave, tx_prio_map, offset);
1031
1032        pm_runtime_put_sync(cpsw->dev);
1033
1034        return 0;
1035}
1036
1037int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
1038                      void *type_data)
1039{
1040        switch (type) {
1041        case TC_SETUP_QDISC_CBS:
1042                return cpsw_set_cbs(ndev, type_data);
1043
1044        case TC_SETUP_QDISC_MQPRIO:
1045                return cpsw_set_mqprio(ndev, type_data);
1046
1047        default:
1048                return -EOPNOTSUPP;
1049        }
1050}
1051
1052void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1053{
1054        int fifo, bw;
1055
1056        for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) {
1057                bw = priv->fifo_bw[fifo];
1058                if (!bw)
1059                        continue;
1060
1061                cpsw_set_fifo_rlimit(priv, fifo, bw);
1062        }
1063}
1064
1065void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1066{
1067        struct cpsw_common *cpsw = priv->cpsw;
1068        u32 tx_prio_map = 0;
1069        int i, tc, fifo;
1070        u32 tx_prio_rg;
1071
1072        if (!priv->mqprio_hw)
1073                return;
1074
1075        for (i = 0; i < 8; i++) {
1076                tc = netdev_get_prio_tc_map(priv->ndev, i);
1077                fifo = CPSW_FIFO_SHAPERS_NUM - tc;
1078                tx_prio_map |= fifo << (4 * i);
1079        }
1080
1081        tx_prio_rg = cpsw->version == CPSW_VERSION_1 ?
1082                     CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
1083
1084        slave_write(slave, tx_prio_map, tx_prio_rg);
1085}
1086
1087int cpsw_fill_rx_channels(struct cpsw_priv *priv)
1088{
1089        struct cpsw_common *cpsw = priv->cpsw;
1090        struct cpsw_meta_xdp *xmeta;
1091        struct page_pool *pool;
1092        struct page *page;
1093        int ch_buf_num;
1094        int ch, i, ret;
1095        dma_addr_t dma;
1096
1097        for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1098                pool = cpsw->page_pool[ch];
1099                ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
1100                for (i = 0; i < ch_buf_num; i++) {
1101                        page = page_pool_dev_alloc_pages(pool);
1102                        if (!page) {
1103                                cpsw_err(priv, ifup, "allocate rx page err\n");
1104                                return -ENOMEM;
1105                        }
1106
1107                        xmeta = page_address(page) + CPSW_XMETA_OFFSET;
1108                        xmeta->ndev = priv->ndev;
1109                        xmeta->ch = ch;
1110
1111                        dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM;
1112                        ret = cpdma_chan_idle_submit_mapped(cpsw->rxv[ch].ch,
1113                                                            page, dma,
1114                                                            cpsw->rx_packet_max,
1115                                                            0);
1116                        if (ret < 0) {
1117                                cpsw_err(priv, ifup,
1118                                         "cannot submit page to channel %d rx, error %d\n",
1119                                         ch, ret);
1120                                page_pool_recycle_direct(pool, page);
1121                                return ret;
1122                        }
1123                }
1124
1125                cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
1126                          ch, ch_buf_num);
1127        }
1128
1129        return 0;
1130}
1131
1132static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw,
1133                                               int size)
1134{
1135        struct page_pool_params pp_params;
1136        struct page_pool *pool;
1137
1138        pp_params.order = 0;
1139        pp_params.flags = PP_FLAG_DMA_MAP;
1140        pp_params.pool_size = size;
1141        pp_params.nid = NUMA_NO_NODE;
1142        pp_params.dma_dir = DMA_BIDIRECTIONAL;
1143        pp_params.dev = cpsw->dev;
1144
1145        pool = page_pool_create(&pp_params);
1146        if (IS_ERR(pool))
1147                dev_err(cpsw->dev, "cannot create rx page pool\n");
1148
1149        return pool;
1150}
1151
1152static int cpsw_create_rx_pool(struct cpsw_common *cpsw, int ch)
1153{
1154        struct page_pool *pool;
1155        int ret = 0, pool_size;
1156
1157        pool_size = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
1158        pool = cpsw_create_page_pool(cpsw, pool_size);
1159        if (IS_ERR(pool))
1160                ret = PTR_ERR(pool);
1161        else
1162                cpsw->page_pool[ch] = pool;
1163
1164        return ret;
1165}
1166
1167static int cpsw_ndev_create_xdp_rxq(struct cpsw_priv *priv, int ch)
1168{
1169        struct cpsw_common *cpsw = priv->cpsw;
1170        struct xdp_rxq_info *rxq;
1171        struct page_pool *pool;
1172        int ret;
1173
1174        pool = cpsw->page_pool[ch];
1175        rxq = &priv->xdp_rxq[ch];
1176
1177        ret = xdp_rxq_info_reg(rxq, priv->ndev, ch);
1178        if (ret)
1179                return ret;
1180
1181        ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
1182        if (ret)
1183                xdp_rxq_info_unreg(rxq);
1184
1185        return ret;
1186}
1187
1188static void cpsw_ndev_destroy_xdp_rxq(struct cpsw_priv *priv, int ch)
1189{
1190        struct xdp_rxq_info *rxq = &priv->xdp_rxq[ch];
1191
1192        if (!xdp_rxq_info_is_reg(rxq))
1193                return;
1194
1195        xdp_rxq_info_unreg(rxq);
1196}
1197
1198void cpsw_destroy_xdp_rxqs(struct cpsw_common *cpsw)
1199{
1200        struct net_device *ndev;
1201        int i, ch;
1202
1203        for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1204                for (i = 0; i < cpsw->data.slaves; i++) {
1205                        ndev = cpsw->slaves[i].ndev;
1206                        if (!ndev)
1207                                continue;
1208
1209                        cpsw_ndev_destroy_xdp_rxq(netdev_priv(ndev), ch);
1210                }
1211
1212                page_pool_destroy(cpsw->page_pool[ch]);
1213                cpsw->page_pool[ch] = NULL;
1214        }
1215}
1216
1217int cpsw_create_xdp_rxqs(struct cpsw_common *cpsw)
1218{
1219        struct net_device *ndev;
1220        int i, ch, ret;
1221
1222        for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1223                ret = cpsw_create_rx_pool(cpsw, ch);
1224                if (ret)
1225                        goto err_cleanup;
1226
1227                /* using same page pool is allowed as no running rx handlers
1228                 * simultaneously for both ndevs
1229                 */
1230                for (i = 0; i < cpsw->data.slaves; i++) {
1231                        ndev = cpsw->slaves[i].ndev;
1232                        if (!ndev)
1233                                continue;
1234
1235                        ret = cpsw_ndev_create_xdp_rxq(netdev_priv(ndev), ch);
1236                        if (ret)
1237                                goto err_cleanup;
1238                }
1239        }
1240
1241        return 0;
1242
1243err_cleanup:
1244        cpsw_destroy_xdp_rxqs(cpsw);
1245
1246        return ret;
1247}
1248
1249static int cpsw_xdp_prog_setup(struct cpsw_priv *priv, struct netdev_bpf *bpf)
1250{
1251        struct bpf_prog *prog = bpf->prog;
1252
1253        if (!priv->xdpi.prog && !prog)
1254                return 0;
1255
1256        if (!xdp_attachment_flags_ok(&priv->xdpi, bpf))
1257                return -EBUSY;
1258
1259        WRITE_ONCE(priv->xdp_prog, prog);
1260
1261        xdp_attachment_setup(&priv->xdpi, bpf);
1262
1263        return 0;
1264}
1265
1266int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
1267{
1268        struct cpsw_priv *priv = netdev_priv(ndev);
1269
1270        switch (bpf->command) {
1271        case XDP_SETUP_PROG:
1272                return cpsw_xdp_prog_setup(priv, bpf);
1273
1274        case XDP_QUERY_PROG:
1275                return xdp_attachment_query(&priv->xdpi, bpf);
1276
1277        default:
1278                return -EINVAL;
1279        }
1280}
1281
1282int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf,
1283                      struct page *page, int port)
1284{
1285        struct cpsw_common *cpsw = priv->cpsw;
1286        struct cpsw_meta_xdp *xmeta;
1287        struct cpdma_chan *txch;
1288        dma_addr_t dma;
1289        int ret;
1290
1291        xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
1292        xmeta->ndev = priv->ndev;
1293        xmeta->ch = 0;
1294        txch = cpsw->txv[0].ch;
1295
1296        if (page) {
1297                dma = page_pool_get_dma_addr(page);
1298                dma += xdpf->headroom + sizeof(struct xdp_frame);
1299                ret = cpdma_chan_submit_mapped(txch, cpsw_xdpf_to_handle(xdpf),
1300                                               dma, xdpf->len, port);
1301        } else {
1302                if (sizeof(*xmeta) > xdpf->headroom) {
1303                        xdp_return_frame_rx_napi(xdpf);
1304                        return -EINVAL;
1305                }
1306
1307                ret = cpdma_chan_submit(txch, cpsw_xdpf_to_handle(xdpf),
1308                                        xdpf->data, xdpf->len, port);
1309        }
1310
1311        if (ret) {
1312                priv->ndev->stats.tx_dropped++;
1313                xdp_return_frame_rx_napi(xdpf);
1314        }
1315
1316        return ret;
1317}
1318
1319int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
1320                 struct page *page, int port)
1321{
1322        struct cpsw_common *cpsw = priv->cpsw;
1323        struct net_device *ndev = priv->ndev;
1324        int ret = CPSW_XDP_CONSUMED;
1325        struct xdp_frame *xdpf;
1326        struct bpf_prog *prog;
1327        u32 act;
1328
1329        rcu_read_lock();
1330
1331        prog = READ_ONCE(priv->xdp_prog);
1332        if (!prog) {
1333                ret = CPSW_XDP_PASS;
1334                goto out;
1335        }
1336
1337        act = bpf_prog_run_xdp(prog, xdp);
1338        switch (act) {
1339        case XDP_PASS:
1340                ret = CPSW_XDP_PASS;
1341                break;
1342        case XDP_TX:
1343                xdpf = convert_to_xdp_frame(xdp);
1344                if (unlikely(!xdpf))
1345                        goto drop;
1346
1347                cpsw_xdp_tx_frame(priv, xdpf, page, port);
1348                break;
1349        case XDP_REDIRECT:
1350                if (xdp_do_redirect(ndev, xdp, prog))
1351                        goto drop;
1352
1353                /*  Have to flush here, per packet, instead of doing it in bulk
1354                 *  at the end of the napi handler. The RX devices on this
1355                 *  particular hardware is sharing a common queue, so the
1356                 *  incoming device might change per packet.
1357                 */
1358                xdp_do_flush_map();
1359                break;
1360        default:
1361                bpf_warn_invalid_xdp_action(act);
1362                /* fall through */
1363        case XDP_ABORTED:
1364                trace_xdp_exception(ndev, prog, act);
1365                /* fall through -- handle aborts by dropping packet */
1366        case XDP_DROP:
1367                goto drop;
1368        }
1369out:
1370        rcu_read_unlock();
1371        return ret;
1372drop:
1373        rcu_read_unlock();
1374        page_pool_recycle_direct(cpsw->page_pool[ch], page);
1375        return ret;
1376}
1377