linux/drivers/net/ethernet/freescale/enetc/enetc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
   2/* Copyright 2017-2019 NXP */
   3
   4#include "enetc.h"
   5#include <linux/bpf_trace.h>
   6#include <linux/tcp.h>
   7#include <linux/udp.h>
   8#include <linux/vmalloc.h>
   9#include <linux/ptp_classify.h>
  10#include <net/pkt_sched.h>
  11
  12static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv)
  13{
  14        int num_tx_rings = priv->num_tx_rings;
  15        int i;
  16
  17        for (i = 0; i < priv->num_rx_rings; i++)
  18                if (priv->rx_ring[i]->xdp.prog)
  19                        return num_tx_rings - num_possible_cpus();
  20
  21        return num_tx_rings;
  22}
  23
  24static struct enetc_bdr *enetc_rx_ring_from_xdp_tx_ring(struct enetc_ndev_priv *priv,
  25                                                        struct enetc_bdr *tx_ring)
  26{
  27        int index = &priv->tx_ring[tx_ring->index] - priv->xdp_tx_ring;
  28
  29        return priv->rx_ring[index];
  30}
  31
  32static struct sk_buff *enetc_tx_swbd_get_skb(struct enetc_tx_swbd *tx_swbd)
  33{
  34        if (tx_swbd->is_xdp_tx || tx_swbd->is_xdp_redirect)
  35                return NULL;
  36
  37        return tx_swbd->skb;
  38}
  39
  40static struct xdp_frame *
  41enetc_tx_swbd_get_xdp_frame(struct enetc_tx_swbd *tx_swbd)
  42{
  43        if (tx_swbd->is_xdp_redirect)
  44                return tx_swbd->xdp_frame;
  45
  46        return NULL;
  47}
  48
  49static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
  50                                struct enetc_tx_swbd *tx_swbd)
  51{
  52        /* For XDP_TX, pages come from RX, whereas for the other contexts where
  53         * we have is_dma_page_set, those come from skb_frag_dma_map. We need
  54         * to match the DMA mapping length, so we need to differentiate those.
  55         */
  56        if (tx_swbd->is_dma_page)
  57                dma_unmap_page(tx_ring->dev, tx_swbd->dma,
  58                               tx_swbd->is_xdp_tx ? PAGE_SIZE : tx_swbd->len,
  59                               tx_swbd->dir);
  60        else
  61                dma_unmap_single(tx_ring->dev, tx_swbd->dma,
  62                                 tx_swbd->len, tx_swbd->dir);
  63        tx_swbd->dma = 0;
  64}
  65
  66static void enetc_free_tx_frame(struct enetc_bdr *tx_ring,
  67                                struct enetc_tx_swbd *tx_swbd)
  68{
  69        struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd);
  70        struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd);
  71
  72        if (tx_swbd->dma)
  73                enetc_unmap_tx_buff(tx_ring, tx_swbd);
  74
  75        if (xdp_frame) {
  76                xdp_return_frame(tx_swbd->xdp_frame);
  77                tx_swbd->xdp_frame = NULL;
  78        } else if (skb) {
  79                dev_kfree_skb_any(skb);
  80                tx_swbd->skb = NULL;
  81        }
  82}
  83
  84/* Let H/W know BD ring has been updated */
  85static void enetc_update_tx_ring_tail(struct enetc_bdr *tx_ring)
  86{
  87        /* includes wmb() */
  88        enetc_wr_reg_hot(tx_ring->tpir, tx_ring->next_to_use);
  89}
  90
  91static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp,
  92                           u8 *msgtype, u8 *twostep,
  93                           u16 *correction_offset, u16 *body_offset)
  94{
  95        unsigned int ptp_class;
  96        struct ptp_header *hdr;
  97        unsigned int type;
  98        u8 *base;
  99
 100        ptp_class = ptp_classify_raw(skb);
 101        if (ptp_class == PTP_CLASS_NONE)
 102                return -EINVAL;
 103
 104        hdr = ptp_parse_header(skb, ptp_class);
 105        if (!hdr)
 106                return -EINVAL;
 107
 108        type = ptp_class & PTP_CLASS_PMASK;
 109        if (type == PTP_CLASS_IPV4 || type == PTP_CLASS_IPV6)
 110                *udp = 1;
 111        else
 112                *udp = 0;
 113
 114        *msgtype = ptp_get_msgtype(hdr, ptp_class);
 115        *twostep = hdr->flag_field[0] & 0x2;
 116
 117        base = skb_mac_header(skb);
 118        *correction_offset = (u8 *)&hdr->correction - base;
 119        *body_offset = (u8 *)hdr + sizeof(struct ptp_header) - base;
 120
 121        return 0;
 122}
 123
 124static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
 125{
 126        bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false;
 127        struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
 128        struct enetc_hw *hw = &priv->si->hw;
 129        struct enetc_tx_swbd *tx_swbd;
 130        int len = skb_headlen(skb);
 131        union enetc_tx_bd temp_bd;
 132        u8 msgtype, twostep, udp;
 133        union enetc_tx_bd *txbd;
 134        u16 offset1, offset2;
 135        int i, count = 0;
 136        skb_frag_t *frag;
 137        unsigned int f;
 138        dma_addr_t dma;
 139        u8 flags = 0;
 140
 141        i = tx_ring->next_to_use;
 142        txbd = ENETC_TXBD(*tx_ring, i);
 143        prefetchw(txbd);
 144
 145        dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE);
 146        if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
 147                goto dma_err;
 148
 149        temp_bd.addr = cpu_to_le64(dma);
 150        temp_bd.buf_len = cpu_to_le16(len);
 151        temp_bd.lstatus = 0;
 152
 153        tx_swbd = &tx_ring->tx_swbd[i];
 154        tx_swbd->dma = dma;
 155        tx_swbd->len = len;
 156        tx_swbd->is_dma_page = 0;
 157        tx_swbd->dir = DMA_TO_DEVICE;
 158        count++;
 159
 160        do_vlan = skb_vlan_tag_present(skb);
 161        if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
 162                if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep, &offset1,
 163                                    &offset2) ||
 164                    msgtype != PTP_MSGTYPE_SYNC || twostep)
 165                        WARN_ONCE(1, "Bad packet for one-step timestamping\n");
 166                else
 167                        do_onestep_tstamp = true;
 168        } else if (skb->cb[0] & ENETC_F_TX_TSTAMP) {
 169                do_twostep_tstamp = true;
 170        }
 171
 172        tx_swbd->do_twostep_tstamp = do_twostep_tstamp;
 173        tx_swbd->check_wb = tx_swbd->do_twostep_tstamp;
 174
 175        if (do_vlan || do_onestep_tstamp || do_twostep_tstamp)
 176                flags |= ENETC_TXBD_FLAGS_EX;
 177
 178        if (tx_ring->tsd_enable)
 179                flags |= ENETC_TXBD_FLAGS_TSE | ENETC_TXBD_FLAGS_TXSTART;
 180
 181        /* first BD needs frm_len and offload flags set */
 182        temp_bd.frm_len = cpu_to_le16(skb->len);
 183        temp_bd.flags = flags;
 184
 185        if (flags & ENETC_TXBD_FLAGS_TSE)
 186                temp_bd.txstart = enetc_txbd_set_tx_start(skb->skb_mstamp_ns,
 187                                                          flags);
 188
 189        if (flags & ENETC_TXBD_FLAGS_EX) {
 190                u8 e_flags = 0;
 191                *txbd = temp_bd;
 192                enetc_clear_tx_bd(&temp_bd);
 193
 194                /* add extension BD for VLAN and/or timestamping */
 195                flags = 0;
 196                tx_swbd++;
 197                txbd++;
 198                i++;
 199                if (unlikely(i == tx_ring->bd_count)) {
 200                        i = 0;
 201                        tx_swbd = tx_ring->tx_swbd;
 202                        txbd = ENETC_TXBD(*tx_ring, 0);
 203                }
 204                prefetchw(txbd);
 205
 206                if (do_vlan) {
 207                        temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
 208                        temp_bd.ext.tpid = 0; /* < C-TAG */
 209                        e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
 210                }
 211
 212                if (do_onestep_tstamp) {
 213                        u32 lo, hi, val;
 214                        u64 sec, nsec;
 215                        u8 *data;
 216
 217                        lo = enetc_rd_hot(hw, ENETC_SICTR0);
 218                        hi = enetc_rd_hot(hw, ENETC_SICTR1);
 219                        sec = (u64)hi << 32 | lo;
 220                        nsec = do_div(sec, 1000000000);
 221
 222                        /* Configure extension BD */
 223                        temp_bd.ext.tstamp = cpu_to_le32(lo & 0x3fffffff);
 224                        e_flags |= ENETC_TXBD_E_FLAGS_ONE_STEP_PTP;
 225
 226                        /* Update originTimestamp field of Sync packet
 227                         * - 48 bits seconds field
 228                         * - 32 bits nanseconds field
 229                         */
 230                        data = skb_mac_header(skb);
 231                        *(__be16 *)(data + offset2) =
 232                                htons((sec >> 32) & 0xffff);
 233                        *(__be32 *)(data + offset2 + 2) =
 234                                htonl(sec & 0xffffffff);
 235                        *(__be32 *)(data + offset2 + 6) = htonl(nsec);
 236
 237                        /* Configure single-step register */
 238                        val = ENETC_PM0_SINGLE_STEP_EN;
 239                        val |= ENETC_SET_SINGLE_STEP_OFFSET(offset1);
 240                        if (udp)
 241                                val |= ENETC_PM0_SINGLE_STEP_CH;
 242
 243                        enetc_port_wr(hw, ENETC_PM0_SINGLE_STEP, val);
 244                        enetc_port_wr(hw, ENETC_PM1_SINGLE_STEP, val);
 245                } else if (do_twostep_tstamp) {
 246                        skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
 247                        e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
 248                }
 249
 250                temp_bd.ext.e_flags = e_flags;
 251                count++;
 252        }
 253
 254        frag = &skb_shinfo(skb)->frags[0];
 255        for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) {
 256                len = skb_frag_size(frag);
 257                dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
 258                                       DMA_TO_DEVICE);
 259                if (dma_mapping_error(tx_ring->dev, dma))
 260                        goto dma_err;
 261
 262                *txbd = temp_bd;
 263                enetc_clear_tx_bd(&temp_bd);
 264
 265                flags = 0;
 266                tx_swbd++;
 267                txbd++;
 268                i++;
 269                if (unlikely(i == tx_ring->bd_count)) {
 270                        i = 0;
 271                        tx_swbd = tx_ring->tx_swbd;
 272                        txbd = ENETC_TXBD(*tx_ring, 0);
 273                }
 274                prefetchw(txbd);
 275
 276                temp_bd.addr = cpu_to_le64(dma);
 277                temp_bd.buf_len = cpu_to_le16(len);
 278
 279                tx_swbd->dma = dma;
 280                tx_swbd->len = len;
 281                tx_swbd->is_dma_page = 1;
 282                tx_swbd->dir = DMA_TO_DEVICE;
 283                count++;
 284        }
 285
 286        /* last BD needs 'F' bit set */
 287        flags |= ENETC_TXBD_FLAGS_F;
 288        temp_bd.flags = flags;
 289        *txbd = temp_bd;
 290
 291        tx_ring->tx_swbd[i].is_eof = true;
 292        tx_ring->tx_swbd[i].skb = skb;
 293
 294        enetc_bdr_idx_inc(tx_ring, &i);
 295        tx_ring->next_to_use = i;
 296
 297        skb_tx_timestamp(skb);
 298
 299        enetc_update_tx_ring_tail(tx_ring);
 300
 301        return count;
 302
 303dma_err:
 304        dev_err(tx_ring->dev, "DMA map error");
 305
 306        do {
 307                tx_swbd = &tx_ring->tx_swbd[i];
 308                enetc_free_tx_frame(tx_ring, tx_swbd);
 309                if (i == 0)
 310                        i = tx_ring->bd_count;
 311                i--;
 312        } while (count--);
 313
 314        return 0;
 315}
 316
 317static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
 318                                    struct net_device *ndev)
 319{
 320        struct enetc_ndev_priv *priv = netdev_priv(ndev);
 321        struct enetc_bdr *tx_ring;
 322        int count;
 323
 324        /* Queue one-step Sync packet if already locked */
 325        if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
 326                if (test_and_set_bit_lock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS,
 327                                          &priv->flags)) {
 328                        skb_queue_tail(&priv->tx_skbs, skb);
 329                        return NETDEV_TX_OK;
 330                }
 331        }
 332
 333        tx_ring = priv->tx_ring[skb->queue_mapping];
 334
 335        if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
 336                if (unlikely(skb_linearize(skb)))
 337                        goto drop_packet_err;
 338
 339        count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
 340        if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
 341                netif_stop_subqueue(ndev, tx_ring->index);
 342                return NETDEV_TX_BUSY;
 343        }
 344
 345        enetc_lock_mdio();
 346        count = enetc_map_tx_buffs(tx_ring, skb);
 347        enetc_unlock_mdio();
 348
 349        if (unlikely(!count))
 350                goto drop_packet_err;
 351
 352        if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
 353                netif_stop_subqueue(ndev, tx_ring->index);
 354
 355        return NETDEV_TX_OK;
 356
 357drop_packet_err:
 358        dev_kfree_skb_any(skb);
 359        return NETDEV_TX_OK;
 360}
 361
 362netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
 363{
 364        struct enetc_ndev_priv *priv = netdev_priv(ndev);
 365        u8 udp, msgtype, twostep;
 366        u16 offset1, offset2;
 367
 368        /* Mark tx timestamp type on skb->cb[0] if requires */
 369        if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
 370            (priv->active_offloads & ENETC_F_TX_TSTAMP_MASK)) {
 371                skb->cb[0] = priv->active_offloads & ENETC_F_TX_TSTAMP_MASK;
 372        } else {
 373                skb->cb[0] = 0;
 374        }
 375
 376        /* Fall back to two-step timestamp if not one-step Sync packet */
 377        if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
 378                if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep,
 379                                    &offset1, &offset2) ||
 380                    msgtype != PTP_MSGTYPE_SYNC || twostep != 0)
 381                        skb->cb[0] = ENETC_F_TX_TSTAMP;
 382        }
 383
 384        return enetc_start_xmit(skb, ndev);
 385}
 386
 387static irqreturn_t enetc_msix(int irq, void *data)
 388{
 389        struct enetc_int_vector *v = data;
 390        int i;
 391
 392        enetc_lock_mdio();
 393
 394        /* disable interrupts */
 395        enetc_wr_reg_hot(v->rbier, 0);
 396        enetc_wr_reg_hot(v->ricr1, v->rx_ictt);
 397
 398        for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
 399                enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), 0);
 400
 401        enetc_unlock_mdio();
 402
 403        napi_schedule(&v->napi);
 404
 405        return IRQ_HANDLED;
 406}
 407
 408static void enetc_rx_dim_work(struct work_struct *w)
 409{
 410        struct dim *dim = container_of(w, struct dim, work);
 411        struct dim_cq_moder moder =
 412                net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
 413        struct enetc_int_vector *v =
 414                container_of(dim, struct enetc_int_vector, rx_dim);
 415
 416        v->rx_ictt = enetc_usecs_to_cycles(moder.usec);
 417        dim->state = DIM_START_MEASURE;
 418}
 419
 420static void enetc_rx_net_dim(struct enetc_int_vector *v)
 421{
 422        struct dim_sample dim_sample;
 423
 424        v->comp_cnt++;
 425
 426        if (!v->rx_napi_work)
 427                return;
 428
 429        dim_update_sample(v->comp_cnt,
 430                          v->rx_ring.stats.packets,
 431                          v->rx_ring.stats.bytes,
 432                          &dim_sample);
 433        net_dim(&v->rx_dim, dim_sample);
 434}
 435
 436static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
 437{
 438        int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
 439
 440        return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
 441}
 442
 443static bool enetc_page_reusable(struct page *page)
 444{
 445        return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1);
 446}
 447
 448static void enetc_reuse_page(struct enetc_bdr *rx_ring,
 449                             struct enetc_rx_swbd *old)
 450{
 451        struct enetc_rx_swbd *new;
 452
 453        new = &rx_ring->rx_swbd[rx_ring->next_to_alloc];
 454
 455        /* next buf that may reuse a page */
 456        enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc);
 457
 458        /* copy page reference */
 459        *new = *old;
 460}
 461
 462static void enetc_get_tx_tstamp(struct enetc_hw *hw, union enetc_tx_bd *txbd,
 463                                u64 *tstamp)
 464{
 465        u32 lo, hi, tstamp_lo;
 466
 467        lo = enetc_rd_hot(hw, ENETC_SICTR0);
 468        hi = enetc_rd_hot(hw, ENETC_SICTR1);
 469        tstamp_lo = le32_to_cpu(txbd->wb.tstamp);
 470        if (lo <= tstamp_lo)
 471                hi -= 1;
 472        *tstamp = (u64)hi << 32 | tstamp_lo;
 473}
 474
 475static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp)
 476{
 477        struct skb_shared_hwtstamps shhwtstamps;
 478
 479        if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
 480                memset(&shhwtstamps, 0, sizeof(shhwtstamps));
 481                shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
 482                skb_txtime_consumed(skb);
 483                skb_tstamp_tx(skb, &shhwtstamps);
 484        }
 485}
 486
 487static void enetc_recycle_xdp_tx_buff(struct enetc_bdr *tx_ring,
 488                                      struct enetc_tx_swbd *tx_swbd)
 489{
 490        struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
 491        struct enetc_rx_swbd rx_swbd = {
 492                .dma = tx_swbd->dma,
 493                .page = tx_swbd->page,
 494                .page_offset = tx_swbd->page_offset,
 495                .dir = tx_swbd->dir,
 496                .len = tx_swbd->len,
 497        };
 498        struct enetc_bdr *rx_ring;
 499
 500        rx_ring = enetc_rx_ring_from_xdp_tx_ring(priv, tx_ring);
 501
 502        if (likely(enetc_swbd_unused(rx_ring))) {
 503                enetc_reuse_page(rx_ring, &rx_swbd);
 504
 505                /* sync for use by the device */
 506                dma_sync_single_range_for_device(rx_ring->dev, rx_swbd.dma,
 507                                                 rx_swbd.page_offset,
 508                                                 ENETC_RXB_DMA_SIZE_XDP,
 509                                                 rx_swbd.dir);
 510
 511                rx_ring->stats.recycles++;
 512        } else {
 513                /* RX ring is already full, we need to unmap and free the
 514                 * page, since there's nothing useful we can do with it.
 515                 */
 516                rx_ring->stats.recycle_failures++;
 517
 518                dma_unmap_page(rx_ring->dev, rx_swbd.dma, PAGE_SIZE,
 519                               rx_swbd.dir);
 520                __free_page(rx_swbd.page);
 521        }
 522
 523        rx_ring->xdp.xdp_tx_in_flight--;
 524}
 525
 526static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
 527{
 528        struct net_device *ndev = tx_ring->ndev;
 529        struct enetc_ndev_priv *priv = netdev_priv(ndev);
 530        int tx_frm_cnt = 0, tx_byte_cnt = 0;
 531        struct enetc_tx_swbd *tx_swbd;
 532        int i, bds_to_clean;
 533        bool do_twostep_tstamp;
 534        u64 tstamp = 0;
 535
 536        i = tx_ring->next_to_clean;
 537        tx_swbd = &tx_ring->tx_swbd[i];
 538
 539        bds_to_clean = enetc_bd_ready_count(tx_ring, i);
 540
 541        do_twostep_tstamp = false;
 542
 543        while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) {
 544                struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd);
 545                struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd);
 546                bool is_eof = tx_swbd->is_eof;
 547
 548                if (unlikely(tx_swbd->check_wb)) {
 549                        struct enetc_ndev_priv *priv = netdev_priv(ndev);
 550                        union enetc_tx_bd *txbd;
 551
 552                        txbd = ENETC_TXBD(*tx_ring, i);
 553
 554                        if (txbd->flags & ENETC_TXBD_FLAGS_W &&
 555                            tx_swbd->do_twostep_tstamp) {
 556                                enetc_get_tx_tstamp(&priv->si->hw, txbd,
 557                                                    &tstamp);
 558                                do_twostep_tstamp = true;
 559                        }
 560                }
 561
 562                if (tx_swbd->is_xdp_tx)
 563                        enetc_recycle_xdp_tx_buff(tx_ring, tx_swbd);
 564                else if (likely(tx_swbd->dma))
 565                        enetc_unmap_tx_buff(tx_ring, tx_swbd);
 566
 567                if (xdp_frame) {
 568                        xdp_return_frame(xdp_frame);
 569                } else if (skb) {
 570                        if (unlikely(tx_swbd->skb->cb[0] &
 571                                     ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) {
 572                                /* Start work to release lock for next one-step
 573                                 * timestamping packet. And send one skb in
 574                                 * tx_skbs queue if has.
 575                                 */
 576                                schedule_work(&priv->tx_onestep_tstamp);
 577                        } else if (unlikely(do_twostep_tstamp)) {
 578                                enetc_tstamp_tx(skb, tstamp);
 579                                do_twostep_tstamp = false;
 580                        }
 581                        napi_consume_skb(skb, napi_budget);
 582                }
 583
 584                tx_byte_cnt += tx_swbd->len;
 585                /* Scrub the swbd here so we don't have to do that
 586                 * when we reuse it during xmit
 587                 */
 588                memset(tx_swbd, 0, sizeof(*tx_swbd));
 589
 590                bds_to_clean--;
 591                tx_swbd++;
 592                i++;
 593                if (unlikely(i == tx_ring->bd_count)) {
 594                        i = 0;
 595                        tx_swbd = tx_ring->tx_swbd;
 596                }
 597
 598                /* BD iteration loop end */
 599                if (is_eof) {
 600                        tx_frm_cnt++;
 601                        /* re-arm interrupt source */
 602                        enetc_wr_reg_hot(tx_ring->idr, BIT(tx_ring->index) |
 603                                         BIT(16 + tx_ring->index));
 604                }
 605
 606                if (unlikely(!bds_to_clean))
 607                        bds_to_clean = enetc_bd_ready_count(tx_ring, i);
 608        }
 609
 610        tx_ring->next_to_clean = i;
 611        tx_ring->stats.packets += tx_frm_cnt;
 612        tx_ring->stats.bytes += tx_byte_cnt;
 613
 614        if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
 615                     __netif_subqueue_stopped(ndev, tx_ring->index) &&
 616                     (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
 617                netif_wake_subqueue(ndev, tx_ring->index);
 618        }
 619
 620        return tx_frm_cnt != ENETC_DEFAULT_TX_WORK;
 621}
 622
 623static bool enetc_new_page(struct enetc_bdr *rx_ring,
 624                           struct enetc_rx_swbd *rx_swbd)
 625{
 626        bool xdp = !!(rx_ring->xdp.prog);
 627        struct page *page;
 628        dma_addr_t addr;
 629
 630        page = dev_alloc_page();
 631        if (unlikely(!page))
 632                return false;
 633
 634        /* For XDP_TX, we forgo dma_unmap -> dma_map */
 635        rx_swbd->dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
 636
 637        addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, rx_swbd->dir);
 638        if (unlikely(dma_mapping_error(rx_ring->dev, addr))) {
 639                __free_page(page);
 640
 641                return false;
 642        }
 643
 644        rx_swbd->dma = addr;
 645        rx_swbd->page = page;
 646        rx_swbd->page_offset = rx_ring->buffer_offset;
 647
 648        return true;
 649}
 650
 651static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
 652{
 653        struct enetc_rx_swbd *rx_swbd;
 654        union enetc_rx_bd *rxbd;
 655        int i, j;
 656
 657        i = rx_ring->next_to_use;
 658        rx_swbd = &rx_ring->rx_swbd[i];
 659        rxbd = enetc_rxbd(rx_ring, i);
 660
 661        for (j = 0; j < buff_cnt; j++) {
 662                /* try reuse page */
 663                if (unlikely(!rx_swbd->page)) {
 664                        if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) {
 665                                rx_ring->stats.rx_alloc_errs++;
 666                                break;
 667                        }
 668                }
 669
 670                /* update RxBD */
 671                rxbd->w.addr = cpu_to_le64(rx_swbd->dma +
 672                                           rx_swbd->page_offset);
 673                /* clear 'R" as well */
 674                rxbd->r.lstatus = 0;
 675
 676                enetc_rxbd_next(rx_ring, &rxbd, &i);
 677                rx_swbd = &rx_ring->rx_swbd[i];
 678        }
 679
 680        if (likely(j)) {
 681                rx_ring->next_to_alloc = i; /* keep track from page reuse */
 682                rx_ring->next_to_use = i;
 683
 684                /* update ENETC's consumer index */
 685                enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use);
 686        }
 687
 688        return j;
 689}
 690
 691#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
 692static void enetc_get_rx_tstamp(struct net_device *ndev,
 693                                union enetc_rx_bd *rxbd,
 694                                struct sk_buff *skb)
 695{
 696        struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
 697        struct enetc_ndev_priv *priv = netdev_priv(ndev);
 698        struct enetc_hw *hw = &priv->si->hw;
 699        u32 lo, hi, tstamp_lo;
 700        u64 tstamp;
 701
 702        if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) {
 703                lo = enetc_rd_reg_hot(hw->reg + ENETC_SICTR0);
 704                hi = enetc_rd_reg_hot(hw->reg + ENETC_SICTR1);
 705                rxbd = enetc_rxbd_ext(rxbd);
 706                tstamp_lo = le32_to_cpu(rxbd->ext.tstamp);
 707                if (lo <= tstamp_lo)
 708                        hi -= 1;
 709
 710                tstamp = (u64)hi << 32 | tstamp_lo;
 711                memset(shhwtstamps, 0, sizeof(*shhwtstamps));
 712                shhwtstamps->hwtstamp = ns_to_ktime(tstamp);
 713        }
 714}
 715#endif
 716
 717static void enetc_get_offloads(struct enetc_bdr *rx_ring,
 718                               union enetc_rx_bd *rxbd, struct sk_buff *skb)
 719{
 720        struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
 721
 722        /* TODO: hashing */
 723        if (rx_ring->ndev->features & NETIF_F_RXCSUM) {
 724                u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum);
 725
 726                skb->csum = csum_unfold((__force __sum16)~htons(inet_csum));
 727                skb->ip_summed = CHECKSUM_COMPLETE;
 728        }
 729
 730        if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) {
 731                __be16 tpid = 0;
 732
 733                switch (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TPID) {
 734                case 0:
 735                        tpid = htons(ETH_P_8021Q);
 736                        break;
 737                case 1:
 738                        tpid = htons(ETH_P_8021AD);
 739                        break;
 740                case 2:
 741                        tpid = htons(enetc_port_rd(&priv->si->hw,
 742                                                   ENETC_PCVLANR1));
 743                        break;
 744                case 3:
 745                        tpid = htons(enetc_port_rd(&priv->si->hw,
 746                                                   ENETC_PCVLANR2));
 747                        break;
 748                default:
 749                        break;
 750                }
 751
 752                __vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt));
 753        }
 754
 755#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
 756        if (priv->active_offloads & ENETC_F_RX_TSTAMP)
 757                enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb);
 758#endif
 759}
 760
 761/* This gets called during the non-XDP NAPI poll cycle as well as on XDP_PASS,
 762 * so it needs to work with both DMA_FROM_DEVICE as well as DMA_BIDIRECTIONAL
 763 * mapped buffers.
 764 */
 765static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring,
 766                                               int i, u16 size)
 767{
 768        struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
 769
 770        dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma,
 771                                      rx_swbd->page_offset,
 772                                      size, rx_swbd->dir);
 773        return rx_swbd;
 774}
 775
 776/* Reuse the current page without performing half-page buffer flipping */
 777static void enetc_put_rx_buff(struct enetc_bdr *rx_ring,
 778                              struct enetc_rx_swbd *rx_swbd)
 779{
 780        size_t buffer_size = ENETC_RXB_TRUESIZE - rx_ring->buffer_offset;
 781
 782        enetc_reuse_page(rx_ring, rx_swbd);
 783
 784        dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma,
 785                                         rx_swbd->page_offset,
 786                                         buffer_size, rx_swbd->dir);
 787
 788        rx_swbd->page = NULL;
 789}
 790
 791/* Reuse the current page by performing half-page buffer flipping */
 792static void enetc_flip_rx_buff(struct enetc_bdr *rx_ring,
 793                               struct enetc_rx_swbd *rx_swbd)
 794{
 795        if (likely(enetc_page_reusable(rx_swbd->page))) {
 796                rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE;
 797                page_ref_inc(rx_swbd->page);
 798
 799                enetc_put_rx_buff(rx_ring, rx_swbd);
 800        } else {
 801                dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
 802                               rx_swbd->dir);
 803                rx_swbd->page = NULL;
 804        }
 805}
 806
 807static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring,
 808                                                int i, u16 size)
 809{
 810        struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
 811        struct sk_buff *skb;
 812        void *ba;
 813
 814        ba = page_address(rx_swbd->page) + rx_swbd->page_offset;
 815        skb = build_skb(ba - rx_ring->buffer_offset, ENETC_RXB_TRUESIZE);
 816        if (unlikely(!skb)) {
 817                rx_ring->stats.rx_alloc_errs++;
 818                return NULL;
 819        }
 820
 821        skb_reserve(skb, rx_ring->buffer_offset);
 822        __skb_put(skb, size);
 823
 824        enetc_flip_rx_buff(rx_ring, rx_swbd);
 825
 826        return skb;
 827}
 828
 829static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i,
 830                                     u16 size, struct sk_buff *skb)
 831{
 832        struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
 833
 834        skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page,
 835                        rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE);
 836
 837        enetc_flip_rx_buff(rx_ring, rx_swbd);
 838}
 839
 840static bool enetc_check_bd_errors_and_consume(struct enetc_bdr *rx_ring,
 841                                              u32 bd_status,
 842                                              union enetc_rx_bd **rxbd, int *i)
 843{
 844        if (likely(!(bd_status & ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))))
 845                return false;
 846
 847        enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]);
 848        enetc_rxbd_next(rx_ring, rxbd, i);
 849
 850        while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
 851                dma_rmb();
 852                bd_status = le32_to_cpu((*rxbd)->r.lstatus);
 853
 854                enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]);
 855                enetc_rxbd_next(rx_ring, rxbd, i);
 856        }
 857
 858        rx_ring->ndev->stats.rx_dropped++;
 859        rx_ring->ndev->stats.rx_errors++;
 860
 861        return true;
 862}
 863
 864static struct sk_buff *enetc_build_skb(struct enetc_bdr *rx_ring,
 865                                       u32 bd_status, union enetc_rx_bd **rxbd,
 866                                       int *i, int *cleaned_cnt, int buffer_size)
 867{
 868        struct sk_buff *skb;
 869        u16 size;
 870
 871        size = le16_to_cpu((*rxbd)->r.buf_len);
 872        skb = enetc_map_rx_buff_to_skb(rx_ring, *i, size);
 873        if (!skb)
 874                return NULL;
 875
 876        enetc_get_offloads(rx_ring, *rxbd, skb);
 877
 878        (*cleaned_cnt)++;
 879
 880        enetc_rxbd_next(rx_ring, rxbd, i);
 881
 882        /* not last BD in frame? */
 883        while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
 884                bd_status = le32_to_cpu((*rxbd)->r.lstatus);
 885                size = buffer_size;
 886
 887                if (bd_status & ENETC_RXBD_LSTATUS_F) {
 888                        dma_rmb();
 889                        size = le16_to_cpu((*rxbd)->r.buf_len);
 890                }
 891
 892                enetc_add_rx_buff_to_skb(rx_ring, *i, size, skb);
 893
 894                (*cleaned_cnt)++;
 895
 896                enetc_rxbd_next(rx_ring, rxbd, i);
 897        }
 898
 899        skb_record_rx_queue(skb, rx_ring->index);
 900        skb->protocol = eth_type_trans(skb, rx_ring->ndev);
 901
 902        return skb;
 903}
 904
 905#define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */
 906
 907static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
 908                               struct napi_struct *napi, int work_limit)
 909{
 910        int rx_frm_cnt = 0, rx_byte_cnt = 0;
 911        int cleaned_cnt, i;
 912
 913        cleaned_cnt = enetc_bd_unused(rx_ring);
 914        /* next descriptor to process */
 915        i = rx_ring->next_to_clean;
 916
 917        while (likely(rx_frm_cnt < work_limit)) {
 918                union enetc_rx_bd *rxbd;
 919                struct sk_buff *skb;
 920                u32 bd_status;
 921
 922                if (cleaned_cnt >= ENETC_RXBD_BUNDLE)
 923                        cleaned_cnt -= enetc_refill_rx_ring(rx_ring,
 924                                                            cleaned_cnt);
 925
 926                rxbd = enetc_rxbd(rx_ring, i);
 927                bd_status = le32_to_cpu(rxbd->r.lstatus);
 928                if (!bd_status)
 929                        break;
 930
 931                enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
 932                dma_rmb(); /* for reading other rxbd fields */
 933
 934                if (enetc_check_bd_errors_and_consume(rx_ring, bd_status,
 935                                                      &rxbd, &i))
 936                        break;
 937
 938                skb = enetc_build_skb(rx_ring, bd_status, &rxbd, &i,
 939                                      &cleaned_cnt, ENETC_RXB_DMA_SIZE);
 940                if (!skb)
 941                        break;
 942
 943                rx_byte_cnt += skb->len;
 944                rx_frm_cnt++;
 945
 946                napi_gro_receive(napi, skb);
 947        }
 948
 949        rx_ring->next_to_clean = i;
 950
 951        rx_ring->stats.packets += rx_frm_cnt;
 952        rx_ring->stats.bytes += rx_byte_cnt;
 953
 954        return rx_frm_cnt;
 955}
 956
 957static void enetc_xdp_map_tx_buff(struct enetc_bdr *tx_ring, int i,
 958                                  struct enetc_tx_swbd *tx_swbd,
 959                                  int frm_len)
 960{
 961        union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i);
 962
 963        prefetchw(txbd);
 964
 965        enetc_clear_tx_bd(txbd);
 966        txbd->addr = cpu_to_le64(tx_swbd->dma + tx_swbd->page_offset);
 967        txbd->buf_len = cpu_to_le16(tx_swbd->len);
 968        txbd->frm_len = cpu_to_le16(frm_len);
 969
 970        memcpy(&tx_ring->tx_swbd[i], tx_swbd, sizeof(*tx_swbd));
 971}
 972
 973/* Puts in the TX ring one XDP frame, mapped as an array of TX software buffer
 974 * descriptors.
 975 */
 976static bool enetc_xdp_tx(struct enetc_bdr *tx_ring,
 977                         struct enetc_tx_swbd *xdp_tx_arr, int num_tx_swbd)
 978{
 979        struct enetc_tx_swbd *tmp_tx_swbd = xdp_tx_arr;
 980        int i, k, frm_len = tmp_tx_swbd->len;
 981
 982        if (unlikely(enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(num_tx_swbd)))
 983                return false;
 984
 985        while (unlikely(!tmp_tx_swbd->is_eof)) {
 986                tmp_tx_swbd++;
 987                frm_len += tmp_tx_swbd->len;
 988        }
 989
 990        i = tx_ring->next_to_use;
 991
 992        for (k = 0; k < num_tx_swbd; k++) {
 993                struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[k];
 994
 995                enetc_xdp_map_tx_buff(tx_ring, i, xdp_tx_swbd, frm_len);
 996
 997                /* last BD needs 'F' bit set */
 998                if (xdp_tx_swbd->is_eof) {
 999                        union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i);
1000
1001                        txbd->flags = ENETC_TXBD_FLAGS_F;
1002                }
1003
1004                enetc_bdr_idx_inc(tx_ring, &i);
1005        }
1006
1007        tx_ring->next_to_use = i;
1008
1009        return true;
1010}
1011
1012static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring,
1013                                          struct enetc_tx_swbd *xdp_tx_arr,
1014                                          struct xdp_frame *xdp_frame)
1015{
1016        struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[0];
1017        struct skb_shared_info *shinfo;
1018        void *data = xdp_frame->data;
1019        int len = xdp_frame->len;
1020        skb_frag_t *frag;
1021        dma_addr_t dma;
1022        unsigned int f;
1023        int n = 0;
1024
1025        dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
1026        if (unlikely(dma_mapping_error(tx_ring->dev, dma))) {
1027                netdev_err(tx_ring->ndev, "DMA map error\n");
1028                return -1;
1029        }
1030
1031        xdp_tx_swbd->dma = dma;
1032        xdp_tx_swbd->dir = DMA_TO_DEVICE;
1033        xdp_tx_swbd->len = len;
1034        xdp_tx_swbd->is_xdp_redirect = true;
1035        xdp_tx_swbd->is_eof = false;
1036        xdp_tx_swbd->xdp_frame = NULL;
1037
1038        n++;
1039        xdp_tx_swbd = &xdp_tx_arr[n];
1040
1041        shinfo = xdp_get_shared_info_from_frame(xdp_frame);
1042
1043        for (f = 0, frag = &shinfo->frags[0]; f < shinfo->nr_frags;
1044             f++, frag++) {
1045                data = skb_frag_address(frag);
1046                len = skb_frag_size(frag);
1047
1048                dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
1049                if (unlikely(dma_mapping_error(tx_ring->dev, dma))) {
1050                        /* Undo the DMA mapping for all fragments */
1051                        while (--n >= 0)
1052                                enetc_unmap_tx_buff(tx_ring, &xdp_tx_arr[n]);
1053
1054                        netdev_err(tx_ring->ndev, "DMA map error\n");
1055                        return -1;
1056                }
1057
1058                xdp_tx_swbd->dma = dma;
1059                xdp_tx_swbd->dir = DMA_TO_DEVICE;
1060                xdp_tx_swbd->len = len;
1061                xdp_tx_swbd->is_xdp_redirect = true;
1062                xdp_tx_swbd->is_eof = false;
1063                xdp_tx_swbd->xdp_frame = NULL;
1064
1065                n++;
1066                xdp_tx_swbd = &xdp_tx_arr[n];
1067        }
1068
1069        xdp_tx_arr[n - 1].is_eof = true;
1070        xdp_tx_arr[n - 1].xdp_frame = xdp_frame;
1071
1072        return n;
1073}
1074
1075int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
1076                   struct xdp_frame **frames, u32 flags)
1077{
1078        struct enetc_tx_swbd xdp_redirect_arr[ENETC_MAX_SKB_FRAGS] = {0};
1079        struct enetc_ndev_priv *priv = netdev_priv(ndev);
1080        struct enetc_bdr *tx_ring;
1081        int xdp_tx_bd_cnt, i, k;
1082        int xdp_tx_frm_cnt = 0;
1083
1084        enetc_lock_mdio();
1085
1086        tx_ring = priv->xdp_tx_ring[smp_processor_id()];
1087
1088        prefetchw(ENETC_TXBD(*tx_ring, tx_ring->next_to_use));
1089
1090        for (k = 0; k < num_frames; k++) {
1091                xdp_tx_bd_cnt = enetc_xdp_frame_to_xdp_tx_swbd(tx_ring,
1092                                                               xdp_redirect_arr,
1093                                                               frames[k]);
1094                if (unlikely(xdp_tx_bd_cnt < 0))
1095                        break;
1096
1097                if (unlikely(!enetc_xdp_tx(tx_ring, xdp_redirect_arr,
1098                                           xdp_tx_bd_cnt))) {
1099                        for (i = 0; i < xdp_tx_bd_cnt; i++)
1100                                enetc_unmap_tx_buff(tx_ring,
1101                                                    &xdp_redirect_arr[i]);
1102                        tx_ring->stats.xdp_tx_drops++;
1103                        break;
1104                }
1105
1106                xdp_tx_frm_cnt++;
1107        }
1108
1109        if (unlikely((flags & XDP_XMIT_FLUSH) || k != xdp_tx_frm_cnt))
1110                enetc_update_tx_ring_tail(tx_ring);
1111
1112        tx_ring->stats.xdp_tx += xdp_tx_frm_cnt;
1113
1114        enetc_unlock_mdio();
1115
1116        return xdp_tx_frm_cnt;
1117}
1118
1119static void enetc_map_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
1120                                     struct xdp_buff *xdp_buff, u16 size)
1121{
1122        struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
1123        void *hard_start = page_address(rx_swbd->page) + rx_swbd->page_offset;
1124        struct skb_shared_info *shinfo;
1125
1126        /* To be used for XDP_TX */
1127        rx_swbd->len = size;
1128
1129        xdp_prepare_buff(xdp_buff, hard_start - rx_ring->buffer_offset,
1130                         rx_ring->buffer_offset, size, false);
1131
1132        shinfo = xdp_get_shared_info_from_buff(xdp_buff);
1133        shinfo->nr_frags = 0;
1134}
1135
1136static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
1137                                     u16 size, struct xdp_buff *xdp_buff)
1138{
1139        struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp_buff);
1140        struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
1141        skb_frag_t *frag = &shinfo->frags[shinfo->nr_frags];
1142
1143        /* To be used for XDP_TX */
1144        rx_swbd->len = size;
1145
1146        skb_frag_off_set(frag, rx_swbd->page_offset);
1147        skb_frag_size_set(frag, size);
1148        __skb_frag_set_page(frag, rx_swbd->page);
1149
1150        shinfo->nr_frags++;
1151}
1152
1153static void enetc_build_xdp_buff(struct enetc_bdr *rx_ring, u32 bd_status,
1154                                 union enetc_rx_bd **rxbd, int *i,
1155                                 int *cleaned_cnt, struct xdp_buff *xdp_buff)
1156{
1157        u16 size = le16_to_cpu((*rxbd)->r.buf_len);
1158
1159        xdp_init_buff(xdp_buff, ENETC_RXB_TRUESIZE, &rx_ring->xdp.rxq);
1160
1161        enetc_map_rx_buff_to_xdp(rx_ring, *i, xdp_buff, size);
1162        (*cleaned_cnt)++;
1163        enetc_rxbd_next(rx_ring, rxbd, i);
1164
1165        /* not last BD in frame? */
1166        while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
1167                bd_status = le32_to_cpu((*rxbd)->r.lstatus);
1168                size = ENETC_RXB_DMA_SIZE_XDP;
1169
1170                if (bd_status & ENETC_RXBD_LSTATUS_F) {
1171                        dma_rmb();
1172                        size = le16_to_cpu((*rxbd)->r.buf_len);
1173                }
1174
1175                enetc_add_rx_buff_to_xdp(rx_ring, *i, size, xdp_buff);
1176                (*cleaned_cnt)++;
1177                enetc_rxbd_next(rx_ring, rxbd, i);
1178        }
1179}
1180
1181/* Convert RX buffer descriptors to TX buffer descriptors. These will be
1182 * recycled back into the RX ring in enetc_clean_tx_ring.
1183 */
1184static int enetc_rx_swbd_to_xdp_tx_swbd(struct enetc_tx_swbd *xdp_tx_arr,
1185                                        struct enetc_bdr *rx_ring,
1186                                        int rx_ring_first, int rx_ring_last)
1187{
1188        int n = 0;
1189
1190        for (; rx_ring_first != rx_ring_last;
1191             n++, enetc_bdr_idx_inc(rx_ring, &rx_ring_first)) {
1192                struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first];
1193                struct enetc_tx_swbd *tx_swbd = &xdp_tx_arr[n];
1194
1195                /* No need to dma_map, we already have DMA_BIDIRECTIONAL */
1196                tx_swbd->dma = rx_swbd->dma;
1197                tx_swbd->dir = rx_swbd->dir;
1198                tx_swbd->page = rx_swbd->page;
1199                tx_swbd->page_offset = rx_swbd->page_offset;
1200                tx_swbd->len = rx_swbd->len;
1201                tx_swbd->is_dma_page = true;
1202                tx_swbd->is_xdp_tx = true;
1203                tx_swbd->is_eof = false;
1204        }
1205
1206        /* We rely on caller providing an rx_ring_last > rx_ring_first */
1207        xdp_tx_arr[n - 1].is_eof = true;
1208
1209        return n;
1210}
1211
1212static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first,
1213                           int rx_ring_last)
1214{
1215        while (rx_ring_first != rx_ring_last) {
1216                enetc_put_rx_buff(rx_ring,
1217                                  &rx_ring->rx_swbd[rx_ring_first]);
1218                enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
1219        }
1220        rx_ring->stats.xdp_drops++;
1221}
1222
1223static void enetc_xdp_free(struct enetc_bdr *rx_ring, int rx_ring_first,
1224                           int rx_ring_last)
1225{
1226        while (rx_ring_first != rx_ring_last) {
1227                struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first];
1228
1229                if (rx_swbd->page) {
1230                        dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
1231                                       rx_swbd->dir);
1232                        __free_page(rx_swbd->page);
1233                        rx_swbd->page = NULL;
1234                }
1235                enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
1236        }
1237        rx_ring->stats.xdp_redirect_failures++;
1238}
1239
1240static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
1241                                   struct napi_struct *napi, int work_limit,
1242                                   struct bpf_prog *prog)
1243{
1244        int xdp_tx_bd_cnt, xdp_tx_frm_cnt = 0, xdp_redirect_frm_cnt = 0;
1245        struct enetc_tx_swbd xdp_tx_arr[ENETC_MAX_SKB_FRAGS] = {0};
1246        struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
1247        int rx_frm_cnt = 0, rx_byte_cnt = 0;
1248        struct enetc_bdr *tx_ring;
1249        int cleaned_cnt, i;
1250        u32 xdp_act;
1251
1252        cleaned_cnt = enetc_bd_unused(rx_ring);
1253        /* next descriptor to process */
1254        i = rx_ring->next_to_clean;
1255
1256        while (likely(rx_frm_cnt < work_limit)) {
1257                union enetc_rx_bd *rxbd, *orig_rxbd;
1258                int orig_i, orig_cleaned_cnt;
1259                struct xdp_buff xdp_buff;
1260                struct sk_buff *skb;
1261                int tmp_orig_i, err;
1262                u32 bd_status;
1263
1264                rxbd = enetc_rxbd(rx_ring, i);
1265                bd_status = le32_to_cpu(rxbd->r.lstatus);
1266                if (!bd_status)
1267                        break;
1268
1269                enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
1270                dma_rmb(); /* for reading other rxbd fields */
1271
1272                if (enetc_check_bd_errors_and_consume(rx_ring, bd_status,
1273                                                      &rxbd, &i))
1274                        break;
1275
1276                orig_rxbd = rxbd;
1277                orig_cleaned_cnt = cleaned_cnt;
1278                orig_i = i;
1279
1280                enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i,
1281                                     &cleaned_cnt, &xdp_buff);
1282
1283                xdp_act = bpf_prog_run_xdp(prog, &xdp_buff);
1284
1285                switch (xdp_act) {
1286                default:
1287                        bpf_warn_invalid_xdp_action(xdp_act);
1288                        fallthrough;
1289                case XDP_ABORTED:
1290                        trace_xdp_exception(rx_ring->ndev, prog, xdp_act);
1291                        fallthrough;
1292                case XDP_DROP:
1293                        enetc_xdp_drop(rx_ring, orig_i, i);
1294                        break;
1295                case XDP_PASS:
1296                        rxbd = orig_rxbd;
1297                        cleaned_cnt = orig_cleaned_cnt;
1298                        i = orig_i;
1299
1300                        skb = enetc_build_skb(rx_ring, bd_status, &rxbd,
1301                                              &i, &cleaned_cnt,
1302                                              ENETC_RXB_DMA_SIZE_XDP);
1303                        if (unlikely(!skb))
1304                                goto out;
1305
1306                        napi_gro_receive(napi, skb);
1307                        break;
1308                case XDP_TX:
1309                        tx_ring = priv->xdp_tx_ring[rx_ring->index];
1310                        xdp_tx_bd_cnt = enetc_rx_swbd_to_xdp_tx_swbd(xdp_tx_arr,
1311                                                                     rx_ring,
1312                                                                     orig_i, i);
1313
1314                        if (!enetc_xdp_tx(tx_ring, xdp_tx_arr, xdp_tx_bd_cnt)) {
1315                                enetc_xdp_drop(rx_ring, orig_i, i);
1316                                tx_ring->stats.xdp_tx_drops++;
1317                        } else {
1318                                tx_ring->stats.xdp_tx += xdp_tx_bd_cnt;
1319                                rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt;
1320                                xdp_tx_frm_cnt++;
1321                                /* The XDP_TX enqueue was successful, so we
1322                                 * need to scrub the RX software BDs because
1323                                 * the ownership of the buffers no longer
1324                                 * belongs to the RX ring, and we must prevent
1325                                 * enetc_refill_rx_ring() from reusing
1326                                 * rx_swbd->page.
1327                                 */
1328                                while (orig_i != i) {
1329                                        rx_ring->rx_swbd[orig_i].page = NULL;
1330                                        enetc_bdr_idx_inc(rx_ring, &orig_i);
1331                                }
1332                        }
1333                        break;
1334                case XDP_REDIRECT:
1335                        /* xdp_return_frame does not support S/G in the sense
1336                         * that it leaks the fragments (__xdp_return should not
1337                         * call page_frag_free only for the initial buffer).
1338                         * Until XDP_REDIRECT gains support for S/G let's keep
1339                         * the code structure in place, but dead. We drop the
1340                         * S/G frames ourselves to avoid memory leaks which
1341                         * would otherwise leave the kernel OOM.
1342                         */
1343                        if (unlikely(cleaned_cnt - orig_cleaned_cnt != 1)) {
1344                                enetc_xdp_drop(rx_ring, orig_i, i);
1345                                rx_ring->stats.xdp_redirect_sg++;
1346                                break;
1347                        }
1348
1349                        tmp_orig_i = orig_i;
1350
1351                        while (orig_i != i) {
1352                                enetc_flip_rx_buff(rx_ring,
1353                                                   &rx_ring->rx_swbd[orig_i]);
1354                                enetc_bdr_idx_inc(rx_ring, &orig_i);
1355                        }
1356
1357                        err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog);
1358                        if (unlikely(err)) {
1359                                enetc_xdp_free(rx_ring, tmp_orig_i, i);
1360                        } else {
1361                                xdp_redirect_frm_cnt++;
1362                                rx_ring->stats.xdp_redirect++;
1363                        }
1364                }
1365
1366                rx_frm_cnt++;
1367        }
1368
1369out:
1370        rx_ring->next_to_clean = i;
1371
1372        rx_ring->stats.packets += rx_frm_cnt;
1373        rx_ring->stats.bytes += rx_byte_cnt;
1374
1375        if (xdp_redirect_frm_cnt)
1376                xdp_do_flush_map();
1377
1378        if (xdp_tx_frm_cnt)
1379                enetc_update_tx_ring_tail(tx_ring);
1380
1381        if (cleaned_cnt > rx_ring->xdp.xdp_tx_in_flight)
1382                enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) -
1383                                     rx_ring->xdp.xdp_tx_in_flight);
1384
1385        return rx_frm_cnt;
1386}
1387
1388static int enetc_poll(struct napi_struct *napi, int budget)
1389{
1390        struct enetc_int_vector
1391                *v = container_of(napi, struct enetc_int_vector, napi);
1392        struct enetc_bdr *rx_ring = &v->rx_ring;
1393        struct bpf_prog *prog;
1394        bool complete = true;
1395        int work_done;
1396        int i;
1397
1398        enetc_lock_mdio();
1399
1400        for (i = 0; i < v->count_tx_rings; i++)
1401                if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
1402                        complete = false;
1403
1404        prog = rx_ring->xdp.prog;
1405        if (prog)
1406                work_done = enetc_clean_rx_ring_xdp(rx_ring, napi, budget, prog);
1407        else
1408                work_done = enetc_clean_rx_ring(rx_ring, napi, budget);
1409        if (work_done == budget)
1410                complete = false;
1411        if (work_done)
1412                v->rx_napi_work = true;
1413
1414        if (!complete) {
1415                enetc_unlock_mdio();
1416                return budget;
1417        }
1418
1419        napi_complete_done(napi, work_done);
1420
1421        if (likely(v->rx_dim_en))
1422                enetc_rx_net_dim(v);
1423
1424        v->rx_napi_work = false;
1425
1426        /* enable interrupts */
1427        enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
1428
1429        for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
1430                enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i),
1431                                 ENETC_TBIER_TXTIE);
1432
1433        enetc_unlock_mdio();
1434
1435        return work_done;
1436}
1437
1438/* Probing and Init */
1439#define ENETC_MAX_RFS_SIZE 64
1440void enetc_get_si_caps(struct enetc_si *si)
1441{
1442        struct enetc_hw *hw = &si->hw;
1443        u32 val;
1444
1445        /* find out how many of various resources we have to work with */
1446        val = enetc_rd(hw, ENETC_SICAPR0);
1447        si->num_rx_rings = (val >> 16) & 0xff;
1448        si->num_tx_rings = val & 0xff;
1449
1450        val = enetc_rd(hw, ENETC_SIRFSCAPR);
1451        si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val);
1452        si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE);
1453
1454        si->num_rss = 0;
1455        val = enetc_rd(hw, ENETC_SIPCAPR0);
1456        if (val & ENETC_SIPCAPR0_RSS) {
1457                u32 rss;
1458
1459                rss = enetc_rd(hw, ENETC_SIRSSCAPR);
1460                si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss);
1461        }
1462
1463        if (val & ENETC_SIPCAPR0_QBV)
1464                si->hw_features |= ENETC_SI_F_QBV;
1465
1466        if (val & ENETC_SIPCAPR0_PSFP)
1467                si->hw_features |= ENETC_SI_F_PSFP;
1468}
1469
1470static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
1471{
1472        r->bd_base = dma_alloc_coherent(r->dev, r->bd_count * bd_size,
1473                                        &r->bd_dma_base, GFP_KERNEL);
1474        if (!r->bd_base)
1475                return -ENOMEM;
1476
1477        /* h/w requires 128B alignment */
1478        if (!IS_ALIGNED(r->bd_dma_base, 128)) {
1479                dma_free_coherent(r->dev, r->bd_count * bd_size, r->bd_base,
1480                                  r->bd_dma_base);
1481                return -EINVAL;
1482        }
1483
1484        return 0;
1485}
1486
1487static int enetc_alloc_txbdr(struct enetc_bdr *txr)
1488{
1489        int err;
1490
1491        txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd));
1492        if (!txr->tx_swbd)
1493                return -ENOMEM;
1494
1495        err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd));
1496        if (err) {
1497                vfree(txr->tx_swbd);
1498                return err;
1499        }
1500
1501        txr->next_to_clean = 0;
1502        txr->next_to_use = 0;
1503
1504        return 0;
1505}
1506
1507static void enetc_free_txbdr(struct enetc_bdr *txr)
1508{
1509        int size, i;
1510
1511        for (i = 0; i < txr->bd_count; i++)
1512                enetc_free_tx_frame(txr, &txr->tx_swbd[i]);
1513
1514        size = txr->bd_count * sizeof(union enetc_tx_bd);
1515
1516        dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base);
1517        txr->bd_base = NULL;
1518
1519        vfree(txr->tx_swbd);
1520        txr->tx_swbd = NULL;
1521}
1522
1523static int enetc_alloc_tx_resources(struct enetc_ndev_priv *priv)
1524{
1525        int i, err;
1526
1527        for (i = 0; i < priv->num_tx_rings; i++) {
1528                err = enetc_alloc_txbdr(priv->tx_ring[i]);
1529
1530                if (err)
1531                        goto fail;
1532        }
1533
1534        return 0;
1535
1536fail:
1537        while (i-- > 0)
1538                enetc_free_txbdr(priv->tx_ring[i]);
1539
1540        return err;
1541}
1542
1543static void enetc_free_tx_resources(struct enetc_ndev_priv *priv)
1544{
1545        int i;
1546
1547        for (i = 0; i < priv->num_tx_rings; i++)
1548                enetc_free_txbdr(priv->tx_ring[i]);
1549}
1550
1551static int enetc_alloc_rxbdr(struct enetc_bdr *rxr, bool extended)
1552{
1553        size_t size = sizeof(union enetc_rx_bd);
1554        int err;
1555
1556        rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd));
1557        if (!rxr->rx_swbd)
1558                return -ENOMEM;
1559
1560        if (extended)
1561                size *= 2;
1562
1563        err = enetc_dma_alloc_bdr(rxr, size);
1564        if (err) {
1565                vfree(rxr->rx_swbd);
1566                return err;
1567        }
1568
1569        rxr->next_to_clean = 0;
1570        rxr->next_to_use = 0;
1571        rxr->next_to_alloc = 0;
1572        rxr->ext_en = extended;
1573
1574        return 0;
1575}
1576
1577static void enetc_free_rxbdr(struct enetc_bdr *rxr)
1578{
1579        int size;
1580
1581        size = rxr->bd_count * sizeof(union enetc_rx_bd);
1582
1583        dma_free_coherent(rxr->dev, size, rxr->bd_base, rxr->bd_dma_base);
1584        rxr->bd_base = NULL;
1585
1586        vfree(rxr->rx_swbd);
1587        rxr->rx_swbd = NULL;
1588}
1589
1590static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv)
1591{
1592        bool extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
1593        int i, err;
1594
1595        for (i = 0; i < priv->num_rx_rings; i++) {
1596                err = enetc_alloc_rxbdr(priv->rx_ring[i], extended);
1597
1598                if (err)
1599                        goto fail;
1600        }
1601
1602        return 0;
1603
1604fail:
1605        while (i-- > 0)
1606                enetc_free_rxbdr(priv->rx_ring[i]);
1607
1608        return err;
1609}
1610
1611static void enetc_free_rx_resources(struct enetc_ndev_priv *priv)
1612{
1613        int i;
1614
1615        for (i = 0; i < priv->num_rx_rings; i++)
1616                enetc_free_rxbdr(priv->rx_ring[i]);
1617}
1618
1619static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
1620{
1621        int i;
1622
1623        if (!tx_ring->tx_swbd)
1624                return;
1625
1626        for (i = 0; i < tx_ring->bd_count; i++) {
1627                struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
1628
1629                enetc_free_tx_frame(tx_ring, tx_swbd);
1630        }
1631
1632        tx_ring->next_to_clean = 0;
1633        tx_ring->next_to_use = 0;
1634}
1635
1636static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
1637{
1638        int i;
1639
1640        if (!rx_ring->rx_swbd)
1641                return;
1642
1643        for (i = 0; i < rx_ring->bd_count; i++) {
1644                struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
1645
1646                if (!rx_swbd->page)
1647                        continue;
1648
1649                dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
1650                               rx_swbd->dir);
1651                __free_page(rx_swbd->page);
1652                rx_swbd->page = NULL;
1653        }
1654
1655        rx_ring->next_to_clean = 0;
1656        rx_ring->next_to_use = 0;
1657        rx_ring->next_to_alloc = 0;
1658}
1659
1660static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
1661{
1662        int i;
1663
1664        for (i = 0; i < priv->num_rx_rings; i++)
1665                enetc_free_rx_ring(priv->rx_ring[i]);
1666
1667        for (i = 0; i < priv->num_tx_rings; i++)
1668                enetc_free_tx_ring(priv->tx_ring[i]);
1669}
1670
1671static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
1672{
1673        int *rss_table;
1674        int i;
1675
1676        rss_table = kmalloc_array(si->num_rss, sizeof(*rss_table), GFP_KERNEL);
1677        if (!rss_table)
1678                return -ENOMEM;
1679
1680        /* Set up RSS table defaults */
1681        for (i = 0; i < si->num_rss; i++)
1682                rss_table[i] = i % num_groups;
1683
1684        enetc_set_rss_table(si, rss_table, si->num_rss);
1685
1686        kfree(rss_table);
1687
1688        return 0;
1689}
1690
1691int enetc_configure_si(struct enetc_ndev_priv *priv)
1692{
1693        struct enetc_si *si = priv->si;
1694        struct enetc_hw *hw = &si->hw;
1695        int err;
1696
1697        /* set SI cache attributes */
1698        enetc_wr(hw, ENETC_SICAR0,
1699                 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
1700        enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI);
1701        /* enable SI */
1702        enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN);
1703
1704        if (si->num_rss) {
1705                err = enetc_setup_default_rss_table(si, priv->num_rx_rings);
1706                if (err)
1707                        return err;
1708        }
1709
1710        return 0;
1711}
1712
1713void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
1714{
1715        struct enetc_si *si = priv->si;
1716        int cpus = num_online_cpus();
1717
1718        priv->tx_bd_count = ENETC_TX_RING_DEFAULT_SIZE;
1719        priv->rx_bd_count = ENETC_RX_RING_DEFAULT_SIZE;
1720
1721        /* Enable all available TX rings in order to configure as many
1722         * priorities as possible, when needed.
1723         * TODO: Make # of TX rings run-time configurable
1724         */
1725        priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings);
1726        priv->num_tx_rings = si->num_tx_rings;
1727        priv->bdr_int_num = cpus;
1728        priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL;
1729        priv->tx_ictt = ENETC_TXIC_TIMETHR;
1730}
1731
1732int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
1733{
1734        struct enetc_si *si = priv->si;
1735
1736        priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules),
1737                                  GFP_KERNEL);
1738        if (!priv->cls_rules)
1739                return -ENOMEM;
1740
1741        return 0;
1742}
1743
1744void enetc_free_si_resources(struct enetc_ndev_priv *priv)
1745{
1746        kfree(priv->cls_rules);
1747}
1748
1749static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
1750{
1751        int idx = tx_ring->index;
1752        u32 tbmr;
1753
1754        enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
1755                       lower_32_bits(tx_ring->bd_dma_base));
1756
1757        enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
1758                       upper_32_bits(tx_ring->bd_dma_base));
1759
1760        WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */
1761        enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
1762                       ENETC_RTBLENR_LEN(tx_ring->bd_count));
1763
1764        /* clearing PI/CI registers for Tx not supported, adjust sw indexes */
1765        tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR);
1766        tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR);
1767
1768        /* enable Tx ints by setting pkt thr to 1 */
1769        enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1);
1770
1771        tbmr = ENETC_TBMR_EN;
1772        if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
1773                tbmr |= ENETC_TBMR_VIH;
1774
1775        /* enable ring */
1776        enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
1777
1778        tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR);
1779        tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR);
1780        tx_ring->idr = hw->reg + ENETC_SITXIDR;
1781}
1782
1783static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
1784{
1785        int idx = rx_ring->index;
1786        u32 rbmr;
1787
1788        enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
1789                       lower_32_bits(rx_ring->bd_dma_base));
1790
1791        enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
1792                       upper_32_bits(rx_ring->bd_dma_base));
1793
1794        WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */
1795        enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
1796                       ENETC_RTBLENR_LEN(rx_ring->bd_count));
1797
1798        if (rx_ring->xdp.prog)
1799                enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE_XDP);
1800        else
1801                enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
1802
1803        enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
1804
1805        /* enable Rx ints by setting pkt thr to 1 */
1806        enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1);
1807
1808        rbmr = ENETC_RBMR_EN;
1809
1810        if (rx_ring->ext_en)
1811                rbmr |= ENETC_RBMR_BDS;
1812
1813        if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1814                rbmr |= ENETC_RBMR_VTE;
1815
1816        rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR);
1817        rx_ring->idr = hw->reg + ENETC_SIRXIDR;
1818
1819        enetc_lock_mdio();
1820        enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
1821        enetc_unlock_mdio();
1822
1823        /* enable ring */
1824        enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
1825}
1826
1827static void enetc_setup_bdrs(struct enetc_ndev_priv *priv)
1828{
1829        int i;
1830
1831        for (i = 0; i < priv->num_tx_rings; i++)
1832                enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]);
1833
1834        for (i = 0; i < priv->num_rx_rings; i++)
1835                enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]);
1836}
1837
1838static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
1839{
1840        int idx = rx_ring->index;
1841
1842        /* disable EN bit on ring */
1843        enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0);
1844}
1845
1846static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
1847{
1848        int delay = 8, timeout = 100;
1849        int idx = tx_ring->index;
1850
1851        /* disable EN bit on ring */
1852        enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
1853
1854        /* wait for busy to clear */
1855        while (delay < timeout &&
1856               enetc_txbdr_rd(hw, idx, ENETC_TBSR) & ENETC_TBSR_BUSY) {
1857                msleep(delay);
1858                delay *= 2;
1859        }
1860
1861        if (delay >= timeout)
1862                netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n",
1863                            idx);
1864}
1865
1866static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
1867{
1868        int i;
1869
1870        for (i = 0; i < priv->num_tx_rings; i++)
1871                enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]);
1872
1873        for (i = 0; i < priv->num_rx_rings; i++)
1874                enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]);
1875
1876        udelay(1);
1877}
1878
1879static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
1880{
1881        struct pci_dev *pdev = priv->si->pdev;
1882        cpumask_t cpu_mask;
1883        int i, j, err;
1884
1885        for (i = 0; i < priv->bdr_int_num; i++) {
1886                int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1887                struct enetc_int_vector *v = priv->int_vector[i];
1888                int entry = ENETC_BDR_INT_BASE_IDX + i;
1889                struct enetc_hw *hw = &priv->si->hw;
1890
1891                snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
1892                         priv->ndev->name, i);
1893                err = request_irq(irq, enetc_msix, 0, v->name, v);
1894                if (err) {
1895                        dev_err(priv->dev, "request_irq() failed!\n");
1896                        goto irq_err;
1897                }
1898                disable_irq(irq);
1899
1900                v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER);
1901                v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER);
1902                v->ricr1 = hw->reg + ENETC_BDR(RX, i, ENETC_RBICR1);
1903
1904                enetc_wr(hw, ENETC_SIMSIRRV(i), entry);
1905
1906                for (j = 0; j < v->count_tx_rings; j++) {
1907                        int idx = v->tx_ring[j].index;
1908
1909                        enetc_wr(hw, ENETC_SIMSITRV(idx), entry);
1910                }
1911                cpumask_clear(&cpu_mask);
1912                cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
1913                irq_set_affinity_hint(irq, &cpu_mask);
1914        }
1915
1916        return 0;
1917
1918irq_err:
1919        while (i--) {
1920                int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1921
1922                irq_set_affinity_hint(irq, NULL);
1923                free_irq(irq, priv->int_vector[i]);
1924        }
1925
1926        return err;
1927}
1928
1929static void enetc_free_irqs(struct enetc_ndev_priv *priv)
1930{
1931        struct pci_dev *pdev = priv->si->pdev;
1932        int i;
1933
1934        for (i = 0; i < priv->bdr_int_num; i++) {
1935                int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1936
1937                irq_set_affinity_hint(irq, NULL);
1938                free_irq(irq, priv->int_vector[i]);
1939        }
1940}
1941
1942static void enetc_setup_interrupts(struct enetc_ndev_priv *priv)
1943{
1944        struct enetc_hw *hw = &priv->si->hw;
1945        u32 icpt, ictt;
1946        int i;
1947
1948        /* enable Tx & Rx event indication */
1949        if (priv->ic_mode &
1950            (ENETC_IC_RX_MANUAL | ENETC_IC_RX_ADAPTIVE)) {
1951                icpt = ENETC_RBICR0_SET_ICPT(ENETC_RXIC_PKTTHR);
1952                /* init to non-0 minimum, will be adjusted later */
1953                ictt = 0x1;
1954        } else {
1955                icpt = 0x1; /* enable Rx ints by setting pkt thr to 1 */
1956                ictt = 0;
1957        }
1958
1959        for (i = 0; i < priv->num_rx_rings; i++) {
1960                enetc_rxbdr_wr(hw, i, ENETC_RBICR1, ictt);
1961                enetc_rxbdr_wr(hw, i, ENETC_RBICR0, ENETC_RBICR0_ICEN | icpt);
1962                enetc_rxbdr_wr(hw, i, ENETC_RBIER, ENETC_RBIER_RXTIE);
1963        }
1964
1965        if (priv->ic_mode & ENETC_IC_TX_MANUAL)
1966                icpt = ENETC_TBICR0_SET_ICPT(ENETC_TXIC_PKTTHR);
1967        else
1968                icpt = 0x1; /* enable Tx ints by setting pkt thr to 1 */
1969
1970        for (i = 0; i < priv->num_tx_rings; i++) {
1971                enetc_txbdr_wr(hw, i, ENETC_TBICR1, priv->tx_ictt);
1972                enetc_txbdr_wr(hw, i, ENETC_TBICR0, ENETC_TBICR0_ICEN | icpt);
1973                enetc_txbdr_wr(hw, i, ENETC_TBIER, ENETC_TBIER_TXTIE);
1974        }
1975}
1976
1977static void enetc_clear_interrupts(struct enetc_ndev_priv *priv)
1978{
1979        int i;
1980
1981        for (i = 0; i < priv->num_tx_rings; i++)
1982                enetc_txbdr_wr(&priv->si->hw, i, ENETC_TBIER, 0);
1983
1984        for (i = 0; i < priv->num_rx_rings; i++)
1985                enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0);
1986}
1987
1988static int enetc_phylink_connect(struct net_device *ndev)
1989{
1990        struct enetc_ndev_priv *priv = netdev_priv(ndev);
1991        struct ethtool_eee edata;
1992        int err;
1993
1994        if (!priv->phylink)
1995                return 0; /* phy-less mode */
1996
1997        err = phylink_of_phy_connect(priv->phylink, priv->dev->of_node, 0);
1998        if (err) {
1999                dev_err(&ndev->dev, "could not attach to PHY\n");
2000                return err;
2001        }
2002
2003        /* disable EEE autoneg, until ENETC driver supports it */
2004        memset(&edata, 0, sizeof(struct ethtool_eee));
2005        phylink_ethtool_set_eee(priv->phylink, &edata);
2006
2007        return 0;
2008}
2009
2010static void enetc_tx_onestep_tstamp(struct work_struct *work)
2011{
2012        struct enetc_ndev_priv *priv;
2013        struct sk_buff *skb;
2014
2015        priv = container_of(work, struct enetc_ndev_priv, tx_onestep_tstamp);
2016
2017        netif_tx_lock(priv->ndev);
2018
2019        clear_bit_unlock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, &priv->flags);
2020        skb = skb_dequeue(&priv->tx_skbs);
2021        if (skb)
2022                enetc_start_xmit(skb, priv->ndev);
2023
2024        netif_tx_unlock(priv->ndev);
2025}
2026
2027static void enetc_tx_onestep_tstamp_init(struct enetc_ndev_priv *priv)
2028{
2029        INIT_WORK(&priv->tx_onestep_tstamp, enetc_tx_onestep_tstamp);
2030        skb_queue_head_init(&priv->tx_skbs);
2031}
2032
2033void enetc_start(struct net_device *ndev)
2034{
2035        struct enetc_ndev_priv *priv = netdev_priv(ndev);
2036        int i;
2037
2038        enetc_setup_interrupts(priv);
2039
2040        for (i = 0; i < priv->bdr_int_num; i++) {
2041                int irq = pci_irq_vector(priv->si->pdev,
2042                                         ENETC_BDR_INT_BASE_IDX + i);
2043
2044                napi_enable(&priv->int_vector[i]->napi);
2045                enable_irq(irq);
2046        }
2047
2048        if (priv->phylink)
2049                phylink_start(priv->phylink);
2050        else
2051                netif_carrier_on(ndev);
2052
2053        netif_tx_start_all_queues(ndev);
2054}
2055
2056int enetc_open(struct net_device *ndev)
2057{
2058        struct enetc_ndev_priv *priv = netdev_priv(ndev);
2059        int num_stack_tx_queues;
2060        int err;
2061
2062        err = enetc_setup_irqs(priv);
2063        if (err)
2064                return err;
2065
2066        err = enetc_phylink_connect(ndev);
2067        if (err)
2068                goto err_phy_connect;
2069
2070        err = enetc_alloc_tx_resources(priv);
2071        if (err)
2072                goto err_alloc_tx;
2073
2074        err = enetc_alloc_rx_resources(priv);
2075        if (err)
2076                goto err_alloc_rx;
2077
2078        num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
2079
2080        err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
2081        if (err)
2082                goto err_set_queues;
2083
2084        err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings);
2085        if (err)
2086                goto err_set_queues;
2087
2088        enetc_tx_onestep_tstamp_init(priv);
2089        enetc_setup_bdrs(priv);
2090        enetc_start(ndev);
2091
2092        return 0;
2093
2094err_set_queues:
2095        enetc_free_rx_resources(priv);
2096err_alloc_rx:
2097        enetc_free_tx_resources(priv);
2098err_alloc_tx:
2099        if (priv->phylink)
2100                phylink_disconnect_phy(priv->phylink);
2101err_phy_connect:
2102        enetc_free_irqs(priv);
2103
2104        return err;
2105}
2106
2107void enetc_stop(struct net_device *ndev)
2108{
2109        struct enetc_ndev_priv *priv = netdev_priv(ndev);
2110        int i;
2111
2112        netif_tx_stop_all_queues(ndev);
2113
2114        for (i = 0; i < priv->bdr_int_num; i++) {
2115                int irq = pci_irq_vector(priv->si->pdev,
2116                                         ENETC_BDR_INT_BASE_IDX + i);
2117
2118                disable_irq(irq);
2119                napi_synchronize(&priv->int_vector[i]->napi);
2120                napi_disable(&priv->int_vector[i]->napi);
2121        }
2122
2123        if (priv->phylink)
2124                phylink_stop(priv->phylink);
2125        else
2126                netif_carrier_off(ndev);
2127
2128        enetc_clear_interrupts(priv);
2129}
2130
2131int enetc_close(struct net_device *ndev)
2132{
2133        struct enetc_ndev_priv *priv = netdev_priv(ndev);
2134
2135        enetc_stop(ndev);
2136        enetc_clear_bdrs(priv);
2137
2138        if (priv->phylink)
2139                phylink_disconnect_phy(priv->phylink);
2140        enetc_free_rxtx_rings(priv);
2141        enetc_free_rx_resources(priv);
2142        enetc_free_tx_resources(priv);
2143        enetc_free_irqs(priv);
2144
2145        return 0;
2146}
2147
2148static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
2149{
2150        struct enetc_ndev_priv *priv = netdev_priv(ndev);
2151        struct tc_mqprio_qopt *mqprio = type_data;
2152        struct enetc_bdr *tx_ring;
2153        int num_stack_tx_queues;
2154        u8 num_tc;
2155        int i;
2156
2157        num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
2158        mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
2159        num_tc = mqprio->num_tc;
2160
2161        if (!num_tc) {
2162                netdev_reset_tc(ndev);
2163                netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
2164
2165                /* Reset all ring priorities to 0 */
2166                for (i = 0; i < priv->num_tx_rings; i++) {
2167                        tx_ring = priv->tx_ring[i];
2168                        enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, 0);
2169                }
2170
2171                return 0;
2172        }
2173
2174        /* Check if we have enough BD rings available to accommodate all TCs */
2175        if (num_tc > num_stack_tx_queues) {
2176                netdev_err(ndev, "Max %d traffic classes supported\n",
2177                           priv->num_tx_rings);
2178                return -EINVAL;
2179        }
2180
2181        /* For the moment, we use only one BD ring per TC.
2182         *
2183         * Configure num_tc BD rings with increasing priorities.
2184         */
2185        for (i = 0; i < num_tc; i++) {
2186                tx_ring = priv->tx_ring[i];
2187                enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, i);
2188        }
2189
2190        /* Reset the number of netdev queues based on the TC count */
2191        netif_set_real_num_tx_queues(ndev, num_tc);
2192
2193        netdev_set_num_tc(ndev, num_tc);
2194
2195        /* Each TC is associated with one netdev queue */
2196        for (i = 0; i < num_tc; i++)
2197                netdev_set_tc_queue(ndev, i, 1, i);
2198
2199        return 0;
2200}
2201
2202int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
2203                   void *type_data)
2204{
2205        switch (type) {
2206        case TC_SETUP_QDISC_MQPRIO:
2207                return enetc_setup_tc_mqprio(ndev, type_data);
2208        case TC_SETUP_QDISC_TAPRIO:
2209                return enetc_setup_tc_taprio(ndev, type_data);
2210        case TC_SETUP_QDISC_CBS:
2211                return enetc_setup_tc_cbs(ndev, type_data);
2212        case TC_SETUP_QDISC_ETF:
2213                return enetc_setup_tc_txtime(ndev, type_data);
2214        case TC_SETUP_BLOCK:
2215                return enetc_setup_tc_psfp(ndev, type_data);
2216        default:
2217                return -EOPNOTSUPP;
2218        }
2219}
2220
2221static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog,
2222                                struct netlink_ext_ack *extack)
2223{
2224        struct enetc_ndev_priv *priv = netdev_priv(dev);
2225        struct bpf_prog *old_prog;
2226        bool is_up;
2227        int i;
2228
2229        /* The buffer layout is changing, so we need to drain the old
2230         * RX buffers and seed new ones.
2231         */
2232        is_up = netif_running(dev);
2233        if (is_up)
2234                dev_close(dev);
2235
2236        old_prog = xchg(&priv->xdp_prog, prog);
2237        if (old_prog)
2238                bpf_prog_put(old_prog);
2239
2240        for (i = 0; i < priv->num_rx_rings; i++) {
2241                struct enetc_bdr *rx_ring = priv->rx_ring[i];
2242
2243                rx_ring->xdp.prog = prog;
2244
2245                if (prog)
2246                        rx_ring->buffer_offset = XDP_PACKET_HEADROOM;
2247                else
2248                        rx_ring->buffer_offset = ENETC_RXB_PAD;
2249        }
2250
2251        if (is_up)
2252                return dev_open(dev, extack);
2253
2254        return 0;
2255}
2256
2257int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp)
2258{
2259        switch (xdp->command) {
2260        case XDP_SETUP_PROG:
2261                return enetc_setup_xdp_prog(dev, xdp->prog, xdp->extack);
2262        default:
2263                return -EINVAL;
2264        }
2265
2266        return 0;
2267}
2268
2269struct net_device_stats *enetc_get_stats(struct net_device *ndev)
2270{
2271        struct enetc_ndev_priv *priv = netdev_priv(ndev);
2272        struct net_device_stats *stats = &ndev->stats;
2273        unsigned long packets = 0, bytes = 0;
2274        int i;
2275
2276        for (i = 0; i < priv->num_rx_rings; i++) {
2277                packets += priv->rx_ring[i]->stats.packets;
2278                bytes   += priv->rx_ring[i]->stats.bytes;
2279        }
2280
2281        stats->rx_packets = packets;
2282        stats->rx_bytes = bytes;
2283        bytes = 0;
2284        packets = 0;
2285
2286        for (i = 0; i < priv->num_tx_rings; i++) {
2287                packets += priv->tx_ring[i]->stats.packets;
2288                bytes   += priv->tx_ring[i]->stats.bytes;
2289        }
2290
2291        stats->tx_packets = packets;
2292        stats->tx_bytes = bytes;
2293
2294        return stats;
2295}
2296
2297static int enetc_set_rss(struct net_device *ndev, int en)
2298{
2299        struct enetc_ndev_priv *priv = netdev_priv(ndev);
2300        struct enetc_hw *hw = &priv->si->hw;
2301        u32 reg;
2302
2303        enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
2304
2305        reg = enetc_rd(hw, ENETC_SIMR);
2306        reg &= ~ENETC_SIMR_RSSE;
2307        reg |= (en) ? ENETC_SIMR_RSSE : 0;
2308        enetc_wr(hw, ENETC_SIMR, reg);
2309
2310        return 0;
2311}
2312
2313static int enetc_set_psfp(struct net_device *ndev, int en)
2314{
2315        struct enetc_ndev_priv *priv = netdev_priv(ndev);
2316        int err;
2317
2318        if (en) {
2319                err = enetc_psfp_enable(priv);
2320                if (err)
2321                        return err;
2322
2323                priv->active_offloads |= ENETC_F_QCI;
2324                return 0;
2325        }
2326
2327        err = enetc_psfp_disable(priv);
2328        if (err)
2329                return err;
2330
2331        priv->active_offloads &= ~ENETC_F_QCI;
2332
2333        return 0;
2334}
2335
2336static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
2337{
2338        struct enetc_ndev_priv *priv = netdev_priv(ndev);
2339        int i;
2340
2341        for (i = 0; i < priv->num_rx_rings; i++)
2342                enetc_bdr_enable_rxvlan(&priv->si->hw, i, en);
2343}
2344
2345static void enetc_enable_txvlan(struct net_device *ndev, bool en)
2346{
2347        struct enetc_ndev_priv *priv = netdev_priv(ndev);
2348        int i;
2349
2350        for (i = 0; i < priv->num_tx_rings; i++)
2351                enetc_bdr_enable_txvlan(&priv->si->hw, i, en);
2352}
2353
2354int enetc_set_features(struct net_device *ndev,
2355                       netdev_features_t features)
2356{
2357        netdev_features_t changed = ndev->features ^ features;
2358        int err = 0;
2359
2360        if (changed & NETIF_F_RXHASH)
2361                enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
2362
2363        if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2364                enetc_enable_rxvlan(ndev,
2365                                    !!(features & NETIF_F_HW_VLAN_CTAG_RX));
2366
2367        if (changed & NETIF_F_HW_VLAN_CTAG_TX)
2368                enetc_enable_txvlan(ndev,
2369                                    !!(features & NETIF_F_HW_VLAN_CTAG_TX));
2370
2371        if (changed & NETIF_F_HW_TC)
2372                err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
2373
2374        return err;
2375}
2376
2377#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
2378static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
2379{
2380        struct enetc_ndev_priv *priv = netdev_priv(ndev);
2381        struct hwtstamp_config config;
2382        int ao;
2383
2384        if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2385                return -EFAULT;
2386
2387        switch (config.tx_type) {
2388        case HWTSTAMP_TX_OFF:
2389                priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
2390                break;
2391        case HWTSTAMP_TX_ON:
2392                priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
2393                priv->active_offloads |= ENETC_F_TX_TSTAMP;
2394                break;
2395        case HWTSTAMP_TX_ONESTEP_SYNC:
2396                priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
2397                priv->active_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP;
2398                break;
2399        default:
2400                return -ERANGE;
2401        }
2402
2403        ao = priv->active_offloads;
2404        switch (config.rx_filter) {
2405        case HWTSTAMP_FILTER_NONE:
2406                priv->active_offloads &= ~ENETC_F_RX_TSTAMP;
2407                break;
2408        default:
2409                priv->active_offloads |= ENETC_F_RX_TSTAMP;
2410                config.rx_filter = HWTSTAMP_FILTER_ALL;
2411        }
2412
2413        if (netif_running(ndev) && ao != priv->active_offloads) {
2414                enetc_close(ndev);
2415                enetc_open(ndev);
2416        }
2417
2418        return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2419               -EFAULT : 0;
2420}
2421
2422static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
2423{
2424        struct enetc_ndev_priv *priv = netdev_priv(ndev);
2425        struct hwtstamp_config config;
2426
2427        config.flags = 0;
2428
2429        if (priv->active_offloads & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)
2430                config.tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
2431        else if (priv->active_offloads & ENETC_F_TX_TSTAMP)
2432                config.tx_type = HWTSTAMP_TX_ON;
2433        else
2434                config.tx_type = HWTSTAMP_TX_OFF;
2435
2436        config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ?
2437                            HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
2438
2439        return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2440               -EFAULT : 0;
2441}
2442#endif
2443
2444int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2445{
2446        struct enetc_ndev_priv *priv = netdev_priv(ndev);
2447#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
2448        if (cmd == SIOCSHWTSTAMP)
2449                return enetc_hwtstamp_set(ndev, rq);
2450        if (cmd == SIOCGHWTSTAMP)
2451                return enetc_hwtstamp_get(ndev, rq);
2452#endif
2453
2454        if (!priv->phylink)
2455                return -EOPNOTSUPP;
2456
2457        return phylink_mii_ioctl(priv->phylink, rq, cmd);
2458}
2459
2460int enetc_alloc_msix(struct enetc_ndev_priv *priv)
2461{
2462        struct pci_dev *pdev = priv->si->pdev;
2463        int first_xdp_tx_ring;
2464        int i, n, err, nvec;
2465        int v_tx_rings;
2466
2467        nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
2468        /* allocate MSIX for both messaging and Rx/Tx interrupts */
2469        n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
2470
2471        if (n < 0)
2472                return n;
2473
2474        if (n != nvec)
2475                return -EPERM;
2476
2477        /* # of tx rings per int vector */
2478        v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
2479
2480        for (i = 0; i < priv->bdr_int_num; i++) {
2481                struct enetc_int_vector *v;
2482                struct enetc_bdr *bdr;
2483                int j;
2484
2485                v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
2486                if (!v) {
2487                        err = -ENOMEM;
2488                        goto fail;
2489                }
2490
2491                priv->int_vector[i] = v;
2492
2493                bdr = &v->rx_ring;
2494                bdr->index = i;
2495                bdr->ndev = priv->ndev;
2496                bdr->dev = priv->dev;
2497                bdr->bd_count = priv->rx_bd_count;
2498                bdr->buffer_offset = ENETC_RXB_PAD;
2499                priv->rx_ring[i] = bdr;
2500
2501                err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0);
2502                if (err) {
2503                        kfree(v);
2504                        goto fail;
2505                }
2506
2507                err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq,
2508                                                 MEM_TYPE_PAGE_SHARED, NULL);
2509                if (err) {
2510                        xdp_rxq_info_unreg(&bdr->xdp.rxq);
2511                        kfree(v);
2512                        goto fail;
2513                }
2514
2515                /* init defaults for adaptive IC */
2516                if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
2517                        v->rx_ictt = 0x1;
2518                        v->rx_dim_en = true;
2519                }
2520                INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
2521                netif_napi_add(priv->ndev, &v->napi, enetc_poll,
2522                               NAPI_POLL_WEIGHT);
2523                v->count_tx_rings = v_tx_rings;
2524
2525                for (j = 0; j < v_tx_rings; j++) {
2526                        int idx;
2527
2528                        /* default tx ring mapping policy */
2529                        idx = priv->bdr_int_num * j + i;
2530                        __set_bit(idx, &v->tx_rings_map);
2531                        bdr = &v->tx_ring[j];
2532                        bdr->index = idx;
2533                        bdr->ndev = priv->ndev;
2534                        bdr->dev = priv->dev;
2535                        bdr->bd_count = priv->tx_bd_count;
2536                        priv->tx_ring[idx] = bdr;
2537                }
2538        }
2539
2540        first_xdp_tx_ring = priv->num_tx_rings - num_possible_cpus();
2541        priv->xdp_tx_ring = &priv->tx_ring[first_xdp_tx_ring];
2542
2543        return 0;
2544
2545fail:
2546        while (i--) {
2547                struct enetc_int_vector *v = priv->int_vector[i];
2548                struct enetc_bdr *rx_ring = &v->rx_ring;
2549
2550                xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
2551                xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
2552                netif_napi_del(&v->napi);
2553                cancel_work_sync(&v->rx_dim.work);
2554                kfree(v);
2555        }
2556
2557        pci_free_irq_vectors(pdev);
2558
2559        return err;
2560}
2561
2562void enetc_free_msix(struct enetc_ndev_priv *priv)
2563{
2564        int i;
2565
2566        for (i = 0; i < priv->bdr_int_num; i++) {
2567                struct enetc_int_vector *v = priv->int_vector[i];
2568                struct enetc_bdr *rx_ring = &v->rx_ring;
2569
2570                xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
2571                xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
2572                netif_napi_del(&v->napi);
2573                cancel_work_sync(&v->rx_dim.work);
2574        }
2575
2576        for (i = 0; i < priv->num_rx_rings; i++)
2577                priv->rx_ring[i] = NULL;
2578
2579        for (i = 0; i < priv->num_tx_rings; i++)
2580                priv->tx_ring[i] = NULL;
2581
2582        for (i = 0; i < priv->bdr_int_num; i++) {
2583                kfree(priv->int_vector[i]);
2584                priv->int_vector[i] = NULL;
2585        }
2586
2587        /* disable all MSIX for this device */
2588        pci_free_irq_vectors(priv->si->pdev);
2589}
2590
2591static void enetc_kfree_si(struct enetc_si *si)
2592{
2593        char *p = (char *)si - si->pad;
2594
2595        kfree(p);
2596}
2597
2598static void enetc_detect_errata(struct enetc_si *si)
2599{
2600        if (si->pdev->revision == ENETC_REV1)
2601                si->errata = ENETC_ERR_VLAN_ISOL | ENETC_ERR_UCMCSWP;
2602}
2603
2604int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
2605{
2606        struct enetc_si *si, *p;
2607        struct enetc_hw *hw;
2608        size_t alloc_size;
2609        int err, len;
2610
2611        pcie_flr(pdev);
2612        err = pci_enable_device_mem(pdev);
2613        if (err) {
2614                dev_err(&pdev->dev, "device enable failed\n");
2615                return err;
2616        }
2617
2618        /* set up for high or low dma */
2619        err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2620        if (err) {
2621                err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2622                if (err) {
2623                        dev_err(&pdev->dev,
2624                                "DMA configuration failed: 0x%x\n", err);
2625                        goto err_dma;
2626                }
2627        }
2628
2629        err = pci_request_mem_regions(pdev, name);
2630        if (err) {
2631                dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err);
2632                goto err_pci_mem_reg;
2633        }
2634
2635        pci_set_master(pdev);
2636
2637        alloc_size = sizeof(struct enetc_si);
2638        if (sizeof_priv) {
2639                /* align priv to 32B */
2640                alloc_size = ALIGN(alloc_size, ENETC_SI_ALIGN);
2641                alloc_size += sizeof_priv;
2642        }
2643        /* force 32B alignment for enetc_si */
2644        alloc_size += ENETC_SI_ALIGN - 1;
2645
2646        p = kzalloc(alloc_size, GFP_KERNEL);
2647        if (!p) {
2648                err = -ENOMEM;
2649                goto err_alloc_si;
2650        }
2651
2652        si = PTR_ALIGN(p, ENETC_SI_ALIGN);
2653        si->pad = (char *)si - (char *)p;
2654
2655        pci_set_drvdata(pdev, si);
2656        si->pdev = pdev;
2657        hw = &si->hw;
2658
2659        len = pci_resource_len(pdev, ENETC_BAR_REGS);
2660        hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len);
2661        if (!hw->reg) {
2662                err = -ENXIO;
2663                dev_err(&pdev->dev, "ioremap() failed\n");
2664                goto err_ioremap;
2665        }
2666        if (len > ENETC_PORT_BASE)
2667                hw->port = hw->reg + ENETC_PORT_BASE;
2668        if (len > ENETC_GLOBAL_BASE)
2669                hw->global = hw->reg + ENETC_GLOBAL_BASE;
2670
2671        enetc_detect_errata(si);
2672
2673        return 0;
2674
2675err_ioremap:
2676        enetc_kfree_si(si);
2677err_alloc_si:
2678        pci_release_mem_regions(pdev);
2679err_pci_mem_reg:
2680err_dma:
2681        pci_disable_device(pdev);
2682
2683        return err;
2684}
2685
2686void enetc_pci_remove(struct pci_dev *pdev)
2687{
2688        struct enetc_si *si = pci_get_drvdata(pdev);
2689        struct enetc_hw *hw = &si->hw;
2690
2691        iounmap(hw->reg);
2692        enetc_kfree_si(si);
2693        pci_release_mem_regions(pdev);
2694        pci_disable_device(pdev);
2695}
2696