linux/drivers/net/ethernet/ec_bhf.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3 * drivers/net/ethernet/ec_bhf.c
   4 *
   5 * Copyright (C) 2014 Darek Marcinkiewicz <reksio@newterm.pl>
   6 */
   7
   8/* This is a driver for EtherCAT master module present on CCAT FPGA.
   9 * Those can be found on Bechhoff CX50xx industrial PCs.
  10 */
  11
  12#include <linux/kernel.h>
  13#include <linux/module.h>
  14#include <linux/moduleparam.h>
  15#include <linux/pci.h>
  16#include <linux/init.h>
  17
  18#include <linux/netdevice.h>
  19#include <linux/etherdevice.h>
  20#include <linux/ip.h>
  21#include <linux/skbuff.h>
  22#include <linux/hrtimer.h>
  23#include <linux/interrupt.h>
  24#include <linux/stat.h>
  25
  26#define TIMER_INTERVAL_NSEC     20000
  27
  28#define INFO_BLOCK_SIZE         0x10
  29#define INFO_BLOCK_TYPE         0x0
  30#define INFO_BLOCK_REV          0x2
  31#define INFO_BLOCK_BLK_CNT      0x4
  32#define INFO_BLOCK_TX_CHAN      0x4
  33#define INFO_BLOCK_RX_CHAN      0x5
  34#define INFO_BLOCK_OFFSET       0x8
  35
  36#define EC_MII_OFFSET           0x4
  37#define EC_FIFO_OFFSET          0x8
  38#define EC_MAC_OFFSET           0xc
  39
  40#define MAC_FRAME_ERR_CNT       0x0
  41#define MAC_RX_ERR_CNT          0x1
  42#define MAC_CRC_ERR_CNT         0x2
  43#define MAC_LNK_LST_ERR_CNT     0x3
  44#define MAC_TX_FRAME_CNT        0x10
  45#define MAC_RX_FRAME_CNT        0x14
  46#define MAC_TX_FIFO_LVL         0x20
  47#define MAC_DROPPED_FRMS        0x28
  48#define MAC_CONNECTED_CCAT_FLAG 0x78
  49
  50#define MII_MAC_ADDR            0x8
  51#define MII_MAC_FILT_FLAG       0xe
  52#define MII_LINK_STATUS         0xf
  53
  54#define FIFO_TX_REG             0x0
  55#define FIFO_TX_RESET           0x8
  56#define FIFO_RX_REG             0x10
  57#define FIFO_RX_ADDR_VALID      (1u << 31)
  58#define FIFO_RX_RESET           0x18
  59
  60#define DMA_CHAN_OFFSET         0x1000
  61#define DMA_CHAN_SIZE           0x8
  62
  63#define DMA_WINDOW_SIZE_MASK    0xfffffffc
  64
  65#define ETHERCAT_MASTER_ID      0x14
  66
  67static const struct pci_device_id ids[] = {
  68        { PCI_DEVICE(0x15ec, 0x5000), },
  69        { 0, }
  70};
  71MODULE_DEVICE_TABLE(pci, ids);
  72
  73struct rx_header {
  74#define RXHDR_NEXT_ADDR_MASK    0xffffffu
  75#define RXHDR_NEXT_VALID        (1u << 31)
  76        __le32 next;
  77#define RXHDR_NEXT_RECV_FLAG    0x1
  78        __le32 recv;
  79#define RXHDR_LEN_MASK          0xfffu
  80        __le16 len;
  81        __le16 port;
  82        __le32 reserved;
  83        u8 timestamp[8];
  84} __packed;
  85
  86#define PKT_PAYLOAD_SIZE        0x7e8
  87struct rx_desc {
  88        struct rx_header header;
  89        u8 data[PKT_PAYLOAD_SIZE];
  90} __packed;
  91
  92struct tx_header {
  93        __le16 len;
  94#define TX_HDR_PORT_0           0x1
  95#define TX_HDR_PORT_1           0x2
  96        u8 port;
  97        u8 ts_enable;
  98#define TX_HDR_SENT             0x1
  99        __le32 sent;
 100        u8 timestamp[8];
 101} __packed;
 102
 103struct tx_desc {
 104        struct tx_header header;
 105        u8 data[PKT_PAYLOAD_SIZE];
 106} __packed;
 107
 108#define FIFO_SIZE               64
 109
 110static long polling_frequency = TIMER_INTERVAL_NSEC;
 111
 112struct bhf_dma {
 113        u8 *buf;
 114        size_t len;
 115        dma_addr_t buf_phys;
 116
 117        u8 *alloc;
 118        size_t alloc_len;
 119        dma_addr_t alloc_phys;
 120};
 121
 122struct ec_bhf_priv {
 123        struct net_device *net_dev;
 124        struct pci_dev *dev;
 125
 126        void __iomem *io;
 127        void __iomem *dma_io;
 128
 129        struct hrtimer hrtimer;
 130
 131        int tx_dma_chan;
 132        int rx_dma_chan;
 133        void __iomem *ec_io;
 134        void __iomem *fifo_io;
 135        void __iomem *mii_io;
 136        void __iomem *mac_io;
 137
 138        struct bhf_dma rx_buf;
 139        struct rx_desc *rx_descs;
 140        int rx_dnext;
 141        int rx_dcount;
 142
 143        struct bhf_dma tx_buf;
 144        struct tx_desc *tx_descs;
 145        int tx_dcount;
 146        int tx_dnext;
 147
 148        u64 stat_rx_bytes;
 149        u64 stat_tx_bytes;
 150};
 151
 152#define PRIV_TO_DEV(priv) (&(priv)->dev->dev)
 153
 154static void ec_bhf_reset(struct ec_bhf_priv *priv)
 155{
 156        iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT);
 157        iowrite8(0, priv->mac_io + MAC_RX_ERR_CNT);
 158        iowrite8(0, priv->mac_io + MAC_CRC_ERR_CNT);
 159        iowrite8(0, priv->mac_io + MAC_LNK_LST_ERR_CNT);
 160        iowrite32(0, priv->mac_io + MAC_TX_FRAME_CNT);
 161        iowrite32(0, priv->mac_io + MAC_RX_FRAME_CNT);
 162        iowrite8(0, priv->mac_io + MAC_DROPPED_FRMS);
 163
 164        iowrite8(0, priv->fifo_io + FIFO_TX_RESET);
 165        iowrite8(0, priv->fifo_io + FIFO_RX_RESET);
 166
 167        iowrite8(0, priv->mac_io + MAC_TX_FIFO_LVL);
 168}
 169
 170static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc)
 171{
 172        u32 len = le16_to_cpu(desc->header.len) + sizeof(desc->header);
 173        u32 addr = (u8 *)desc - priv->tx_buf.buf;
 174
 175        iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG);
 176}
 177
 178static int ec_bhf_desc_sent(struct tx_desc *desc)
 179{
 180        return le32_to_cpu(desc->header.sent) & TX_HDR_SENT;
 181}
 182
 183static void ec_bhf_process_tx(struct ec_bhf_priv *priv)
 184{
 185        if (unlikely(netif_queue_stopped(priv->net_dev))) {
 186                /* Make sure that we perceive changes to tx_dnext. */
 187                smp_rmb();
 188
 189                if (ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext]))
 190                        netif_wake_queue(priv->net_dev);
 191        }
 192}
 193
 194static int ec_bhf_pkt_received(struct rx_desc *desc)
 195{
 196        return le32_to_cpu(desc->header.recv) & RXHDR_NEXT_RECV_FLAG;
 197}
 198
 199static void ec_bhf_add_rx_desc(struct ec_bhf_priv *priv, struct rx_desc *desc)
 200{
 201        iowrite32(FIFO_RX_ADDR_VALID | ((u8 *)(desc) - priv->rx_buf.buf),
 202                  priv->fifo_io + FIFO_RX_REG);
 203}
 204
 205static void ec_bhf_process_rx(struct ec_bhf_priv *priv)
 206{
 207        struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext];
 208
 209        while (ec_bhf_pkt_received(desc)) {
 210                int pkt_size = (le16_to_cpu(desc->header.len) &
 211                               RXHDR_LEN_MASK) - sizeof(struct rx_header) - 4;
 212                u8 *data = desc->data;
 213                struct sk_buff *skb;
 214
 215                skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size);
 216                if (skb) {
 217                        skb_put_data(skb, data, pkt_size);
 218                        skb->protocol = eth_type_trans(skb, priv->net_dev);
 219                        priv->stat_rx_bytes += pkt_size;
 220
 221                        netif_rx(skb);
 222                } else {
 223                        dev_err_ratelimited(PRIV_TO_DEV(priv),
 224                                            "Couldn't allocate a skb_buff for a packet of size %u\n",
 225                                            pkt_size);
 226                }
 227
 228                desc->header.recv = 0;
 229
 230                ec_bhf_add_rx_desc(priv, desc);
 231
 232                priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount;
 233                desc = &priv->rx_descs[priv->rx_dnext];
 234        }
 235}
 236
 237static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer)
 238{
 239        struct ec_bhf_priv *priv = container_of(timer, struct ec_bhf_priv,
 240                                                hrtimer);
 241        ec_bhf_process_rx(priv);
 242        ec_bhf_process_tx(priv);
 243
 244        if (!netif_running(priv->net_dev))
 245                return HRTIMER_NORESTART;
 246
 247        hrtimer_forward_now(timer, polling_frequency);
 248        return HRTIMER_RESTART;
 249}
 250
 251static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv)
 252{
 253        struct device *dev = PRIV_TO_DEV(priv);
 254        unsigned block_count, i;
 255        void __iomem *ec_info;
 256
 257        block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT);
 258        for (i = 0; i < block_count; i++) {
 259                u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE +
 260                                    INFO_BLOCK_TYPE);
 261                if (type == ETHERCAT_MASTER_ID)
 262                        break;
 263        }
 264        if (i == block_count) {
 265                dev_err(dev, "EtherCAT master with DMA block not found\n");
 266                return -ENODEV;
 267        }
 268
 269        ec_info = priv->io + i * INFO_BLOCK_SIZE;
 270
 271        priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN);
 272        priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN);
 273
 274        priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET);
 275        priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET);
 276        priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET);
 277        priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET);
 278
 279        return 0;
 280}
 281
 282static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb,
 283                                     struct net_device *net_dev)
 284{
 285        struct ec_bhf_priv *priv = netdev_priv(net_dev);
 286        struct tx_desc *desc;
 287        unsigned len;
 288
 289        desc = &priv->tx_descs[priv->tx_dnext];
 290
 291        skb_copy_and_csum_dev(skb, desc->data);
 292        len = skb->len;
 293
 294        memset(&desc->header, 0, sizeof(desc->header));
 295        desc->header.len = cpu_to_le16(len);
 296        desc->header.port = TX_HDR_PORT_0;
 297
 298        ec_bhf_send_packet(priv, desc);
 299
 300        priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount;
 301
 302        if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) {
 303                /* Make sure that updates to tx_dnext are perceived
 304                 * by timer routine.
 305                 */
 306                smp_wmb();
 307
 308                netif_stop_queue(net_dev);
 309        }
 310
 311        priv->stat_tx_bytes += len;
 312
 313        dev_kfree_skb(skb);
 314
 315        return NETDEV_TX_OK;
 316}
 317
 318static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv,
 319                                struct bhf_dma *buf,
 320                                int channel,
 321                                int size)
 322{
 323        int offset = channel * DMA_CHAN_SIZE + DMA_CHAN_OFFSET;
 324        struct device *dev = PRIV_TO_DEV(priv);
 325        u32 mask;
 326
 327        iowrite32(0xffffffff, priv->dma_io + offset);
 328
 329        mask = ioread32(priv->dma_io + offset);
 330        mask &= DMA_WINDOW_SIZE_MASK;
 331
 332        /* We want to allocate a chunk of memory that is:
 333         * - aligned to the mask we just read
 334         * - is of size 2^mask bytes (at most)
 335         * In order to ensure that we will allocate buffer of
 336         * 2 * 2^mask bytes.
 337         */
 338        buf->len = min_t(int, ~mask + 1, size);
 339        buf->alloc_len = 2 * buf->len;
 340
 341        buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys,
 342                                        GFP_KERNEL);
 343        if (buf->alloc == NULL) {
 344                dev_err(dev, "Failed to allocate buffer\n");
 345                return -ENOMEM;
 346        }
 347
 348        buf->buf_phys = (buf->alloc_phys + buf->len) & mask;
 349        buf->buf = buf->alloc + (buf->buf_phys - buf->alloc_phys);
 350
 351        iowrite32(0, priv->dma_io + offset + 4);
 352        iowrite32(buf->buf_phys, priv->dma_io + offset);
 353
 354        return 0;
 355}
 356
 357static void ec_bhf_setup_tx_descs(struct ec_bhf_priv *priv)
 358{
 359        int i = 0;
 360
 361        priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc);
 362        priv->tx_descs = (struct tx_desc *)priv->tx_buf.buf;
 363        priv->tx_dnext = 0;
 364
 365        for (i = 0; i < priv->tx_dcount; i++)
 366                priv->tx_descs[i].header.sent = cpu_to_le32(TX_HDR_SENT);
 367}
 368
 369static void ec_bhf_setup_rx_descs(struct ec_bhf_priv *priv)
 370{
 371        int i;
 372
 373        priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc);
 374        priv->rx_descs = (struct rx_desc *)priv->rx_buf.buf;
 375        priv->rx_dnext = 0;
 376
 377        for (i = 0; i < priv->rx_dcount; i++) {
 378                struct rx_desc *desc = &priv->rx_descs[i];
 379                u32 next;
 380
 381                if (i != priv->rx_dcount - 1)
 382                        next = (u8 *)(desc + 1) - priv->rx_buf.buf;
 383                else
 384                        next = 0;
 385                next |= RXHDR_NEXT_VALID;
 386                desc->header.next = cpu_to_le32(next);
 387                desc->header.recv = 0;
 388                ec_bhf_add_rx_desc(priv, desc);
 389        }
 390}
 391
 392static int ec_bhf_open(struct net_device *net_dev)
 393{
 394        struct ec_bhf_priv *priv = netdev_priv(net_dev);
 395        struct device *dev = PRIV_TO_DEV(priv);
 396        int err = 0;
 397
 398        ec_bhf_reset(priv);
 399
 400        err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan,
 401                                   FIFO_SIZE * sizeof(struct rx_desc));
 402        if (err) {
 403                dev_err(dev, "Failed to allocate rx buffer\n");
 404                goto out;
 405        }
 406        ec_bhf_setup_rx_descs(priv);
 407
 408        err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan,
 409                                   FIFO_SIZE * sizeof(struct tx_desc));
 410        if (err) {
 411                dev_err(dev, "Failed to allocate tx buffer\n");
 412                goto error_rx_free;
 413        }
 414        iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG);
 415        ec_bhf_setup_tx_descs(priv);
 416
 417        netif_start_queue(net_dev);
 418
 419        hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 420        priv->hrtimer.function = ec_bhf_timer_fun;
 421        hrtimer_start(&priv->hrtimer, polling_frequency, HRTIMER_MODE_REL);
 422
 423        return 0;
 424
 425error_rx_free:
 426        dma_free_coherent(dev, priv->rx_buf.alloc_len, priv->rx_buf.alloc,
 427                          priv->rx_buf.alloc_len);
 428out:
 429        return err;
 430}
 431
 432static int ec_bhf_stop(struct net_device *net_dev)
 433{
 434        struct ec_bhf_priv *priv = netdev_priv(net_dev);
 435        struct device *dev = PRIV_TO_DEV(priv);
 436
 437        hrtimer_cancel(&priv->hrtimer);
 438
 439        ec_bhf_reset(priv);
 440
 441        netif_tx_disable(net_dev);
 442
 443        dma_free_coherent(dev, priv->tx_buf.alloc_len,
 444                          priv->tx_buf.alloc, priv->tx_buf.alloc_phys);
 445        dma_free_coherent(dev, priv->rx_buf.alloc_len,
 446                          priv->rx_buf.alloc, priv->rx_buf.alloc_phys);
 447
 448        return 0;
 449}
 450
 451static void
 452ec_bhf_get_stats(struct net_device *net_dev,
 453                 struct rtnl_link_stats64 *stats)
 454{
 455        struct ec_bhf_priv *priv = netdev_priv(net_dev);
 456
 457        stats->rx_errors = ioread8(priv->mac_io + MAC_RX_ERR_CNT) +
 458                                ioread8(priv->mac_io + MAC_CRC_ERR_CNT) +
 459                                ioread8(priv->mac_io + MAC_FRAME_ERR_CNT);
 460        stats->rx_packets = ioread32(priv->mac_io + MAC_RX_FRAME_CNT);
 461        stats->tx_packets = ioread32(priv->mac_io + MAC_TX_FRAME_CNT);
 462        stats->rx_dropped = ioread8(priv->mac_io + MAC_DROPPED_FRMS);
 463
 464        stats->tx_bytes = priv->stat_tx_bytes;
 465        stats->rx_bytes = priv->stat_rx_bytes;
 466}
 467
 468static const struct net_device_ops ec_bhf_netdev_ops = {
 469        .ndo_start_xmit         = ec_bhf_start_xmit,
 470        .ndo_open               = ec_bhf_open,
 471        .ndo_stop               = ec_bhf_stop,
 472        .ndo_get_stats64        = ec_bhf_get_stats,
 473        .ndo_validate_addr      = eth_validate_addr,
 474        .ndo_set_mac_address    = eth_mac_addr
 475};
 476
 477static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
 478{
 479        struct net_device *net_dev;
 480        struct ec_bhf_priv *priv;
 481        void __iomem *dma_io;
 482        void __iomem *io;
 483        int err = 0;
 484
 485        err = pci_enable_device(dev);
 486        if (err)
 487                return err;
 488
 489        pci_set_master(dev);
 490
 491        err = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
 492        if (err) {
 493                dev_err(&dev->dev,
 494                        "Required dma mask not supported, failed to initialize device\n");
 495                goto err_disable_dev;
 496        }
 497
 498        err = pci_request_regions(dev, "ec_bhf");
 499        if (err) {
 500                dev_err(&dev->dev, "Failed to request pci memory regions\n");
 501                goto err_disable_dev;
 502        }
 503
 504        io = pci_iomap(dev, 0, 0);
 505        if (!io) {
 506                dev_err(&dev->dev, "Failed to map pci card memory bar 0");
 507                err = -EIO;
 508                goto err_release_regions;
 509        }
 510
 511        dma_io = pci_iomap(dev, 2, 0);
 512        if (!dma_io) {
 513                dev_err(&dev->dev, "Failed to map pci card memory bar 2");
 514                err = -EIO;
 515                goto err_unmap;
 516        }
 517
 518        net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv));
 519        if (net_dev == NULL) {
 520                err = -ENOMEM;
 521                goto err_unmap_dma_io;
 522        }
 523
 524        pci_set_drvdata(dev, net_dev);
 525        SET_NETDEV_DEV(net_dev, &dev->dev);
 526
 527        net_dev->features = 0;
 528        net_dev->flags |= IFF_NOARP;
 529
 530        net_dev->netdev_ops = &ec_bhf_netdev_ops;
 531
 532        priv = netdev_priv(net_dev);
 533        priv->net_dev = net_dev;
 534        priv->io = io;
 535        priv->dma_io = dma_io;
 536        priv->dev = dev;
 537
 538        err = ec_bhf_setup_offsets(priv);
 539        if (err < 0)
 540                goto err_free_net_dev;
 541
 542        memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6);
 543
 544        err = register_netdev(net_dev);
 545        if (err < 0)
 546                goto err_free_net_dev;
 547
 548        return 0;
 549
 550err_free_net_dev:
 551        free_netdev(net_dev);
 552err_unmap_dma_io:
 553        pci_iounmap(dev, dma_io);
 554err_unmap:
 555        pci_iounmap(dev, io);
 556err_release_regions:
 557        pci_release_regions(dev);
 558err_disable_dev:
 559        pci_clear_master(dev);
 560        pci_disable_device(dev);
 561
 562        return err;
 563}
 564
 565static void ec_bhf_remove(struct pci_dev *dev)
 566{
 567        struct net_device *net_dev = pci_get_drvdata(dev);
 568        struct ec_bhf_priv *priv = netdev_priv(net_dev);
 569
 570        unregister_netdev(net_dev);
 571
 572        pci_iounmap(dev, priv->dma_io);
 573        pci_iounmap(dev, priv->io);
 574
 575        free_netdev(net_dev);
 576
 577        pci_release_regions(dev);
 578        pci_clear_master(dev);
 579        pci_disable_device(dev);
 580}
 581
 582static struct pci_driver pci_driver = {
 583        .name           = "ec_bhf",
 584        .id_table       = ids,
 585        .probe          = ec_bhf_probe,
 586        .remove         = ec_bhf_remove,
 587};
 588module_pci_driver(pci_driver);
 589
 590module_param(polling_frequency, long, 0444);
 591MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
 592
 593MODULE_LICENSE("GPL");
 594MODULE_AUTHOR("Dariusz Marcinkiewicz <reksio@newterm.pl>");
 595