linux/drivers/net/ethernet/ec_bhf.c
<<
>>
Prefs
   1 /*
   2 * drivers/net/ethernet/ec_bhf.c
   3 *
   4 * Copyright (C) 2014 Darek Marcinkiewicz <reksio@newterm.pl>
   5 *
   6 * This software is licensed under the terms of the GNU General Public
   7 * License version 2, as published by the Free Software Foundation, and
   8 * may be copied, distributed, and modified under those terms.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 */
  16
  17/* This is a driver for EtherCAT master module present on CCAT FPGA.
  18 * Those can be found on Bechhoff CX50xx industrial PCs.
  19 */
  20
  21#include <linux/kernel.h>
  22#include <linux/module.h>
  23#include <linux/moduleparam.h>
  24#include <linux/pci.h>
  25#include <linux/init.h>
  26
  27#include <linux/netdevice.h>
  28#include <linux/etherdevice.h>
  29#include <linux/ip.h>
  30#include <linux/skbuff.h>
  31#include <linux/hrtimer.h>
  32#include <linux/interrupt.h>
  33#include <linux/stat.h>
  34
  35#define TIMER_INTERVAL_NSEC     20000
  36
  37#define INFO_BLOCK_SIZE         0x10
  38#define INFO_BLOCK_TYPE         0x0
  39#define INFO_BLOCK_REV          0x2
  40#define INFO_BLOCK_BLK_CNT      0x4
  41#define INFO_BLOCK_TX_CHAN      0x4
  42#define INFO_BLOCK_RX_CHAN      0x5
  43#define INFO_BLOCK_OFFSET       0x8
  44
  45#define EC_MII_OFFSET           0x4
  46#define EC_FIFO_OFFSET          0x8
  47#define EC_MAC_OFFSET           0xc
  48
  49#define MAC_FRAME_ERR_CNT       0x0
  50#define MAC_RX_ERR_CNT          0x1
  51#define MAC_CRC_ERR_CNT         0x2
  52#define MAC_LNK_LST_ERR_CNT     0x3
  53#define MAC_TX_FRAME_CNT        0x10
  54#define MAC_RX_FRAME_CNT        0x14
  55#define MAC_TX_FIFO_LVL         0x20
  56#define MAC_DROPPED_FRMS        0x28
  57#define MAC_CONNECTED_CCAT_FLAG 0x78
  58
  59#define MII_MAC_ADDR            0x8
  60#define MII_MAC_FILT_FLAG       0xe
  61#define MII_LINK_STATUS         0xf
  62
  63#define FIFO_TX_REG             0x0
  64#define FIFO_TX_RESET           0x8
  65#define FIFO_RX_REG             0x10
  66#define FIFO_RX_ADDR_VALID      (1u << 31)
  67#define FIFO_RX_RESET           0x18
  68
  69#define DMA_CHAN_OFFSET         0x1000
  70#define DMA_CHAN_SIZE           0x8
  71
  72#define DMA_WINDOW_SIZE_MASK    0xfffffffc
  73
  74#define ETHERCAT_MASTER_ID      0x14
  75
  76static const struct pci_device_id ids[] = {
  77        { PCI_DEVICE(0x15ec, 0x5000), },
  78        { 0, }
  79};
  80MODULE_DEVICE_TABLE(pci, ids);
  81
  82struct rx_header {
  83#define RXHDR_NEXT_ADDR_MASK    0xffffffu
  84#define RXHDR_NEXT_VALID        (1u << 31)
  85        __le32 next;
  86#define RXHDR_NEXT_RECV_FLAG    0x1
  87        __le32 recv;
  88#define RXHDR_LEN_MASK          0xfffu
  89        __le16 len;
  90        __le16 port;
  91        __le32 reserved;
  92        u8 timestamp[8];
  93} __packed;
  94
  95#define PKT_PAYLOAD_SIZE        0x7e8
  96struct rx_desc {
  97        struct rx_header header;
  98        u8 data[PKT_PAYLOAD_SIZE];
  99} __packed;
 100
 101struct tx_header {
 102        __le16 len;
 103#define TX_HDR_PORT_0           0x1
 104#define TX_HDR_PORT_1           0x2
 105        u8 port;
 106        u8 ts_enable;
 107#define TX_HDR_SENT             0x1
 108        __le32 sent;
 109        u8 timestamp[8];
 110} __packed;
 111
 112struct tx_desc {
 113        struct tx_header header;
 114        u8 data[PKT_PAYLOAD_SIZE];
 115} __packed;
 116
 117#define FIFO_SIZE               64
 118
 119static long polling_frequency = TIMER_INTERVAL_NSEC;
 120
 121struct bhf_dma {
 122        u8 *buf;
 123        size_t len;
 124        dma_addr_t buf_phys;
 125
 126        u8 *alloc;
 127        size_t alloc_len;
 128        dma_addr_t alloc_phys;
 129};
 130
 131struct ec_bhf_priv {
 132        struct net_device *net_dev;
 133        struct pci_dev *dev;
 134
 135        void __iomem *io;
 136        void __iomem *dma_io;
 137
 138        struct hrtimer hrtimer;
 139
 140        int tx_dma_chan;
 141        int rx_dma_chan;
 142        void __iomem *ec_io;
 143        void __iomem *fifo_io;
 144        void __iomem *mii_io;
 145        void __iomem *mac_io;
 146
 147        struct bhf_dma rx_buf;
 148        struct rx_desc *rx_descs;
 149        int rx_dnext;
 150        int rx_dcount;
 151
 152        struct bhf_dma tx_buf;
 153        struct tx_desc *tx_descs;
 154        int tx_dcount;
 155        int tx_dnext;
 156
 157        u64 stat_rx_bytes;
 158        u64 stat_tx_bytes;
 159};
 160
 161#define PRIV_TO_DEV(priv) (&(priv)->dev->dev)
 162
 163static void ec_bhf_reset(struct ec_bhf_priv *priv)
 164{
 165        iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT);
 166        iowrite8(0, priv->mac_io + MAC_RX_ERR_CNT);
 167        iowrite8(0, priv->mac_io + MAC_CRC_ERR_CNT);
 168        iowrite8(0, priv->mac_io + MAC_LNK_LST_ERR_CNT);
 169        iowrite32(0, priv->mac_io + MAC_TX_FRAME_CNT);
 170        iowrite32(0, priv->mac_io + MAC_RX_FRAME_CNT);
 171        iowrite8(0, priv->mac_io + MAC_DROPPED_FRMS);
 172
 173        iowrite8(0, priv->fifo_io + FIFO_TX_RESET);
 174        iowrite8(0, priv->fifo_io + FIFO_RX_RESET);
 175
 176        iowrite8(0, priv->mac_io + MAC_TX_FIFO_LVL);
 177}
 178
 179static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc)
 180{
 181        u32 len = le16_to_cpu(desc->header.len) + sizeof(desc->header);
 182        u32 addr = (u8 *)desc - priv->tx_buf.buf;
 183
 184        iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG);
 185}
 186
 187static int ec_bhf_desc_sent(struct tx_desc *desc)
 188{
 189        return le32_to_cpu(desc->header.sent) & TX_HDR_SENT;
 190}
 191
 192static void ec_bhf_process_tx(struct ec_bhf_priv *priv)
 193{
 194        if (unlikely(netif_queue_stopped(priv->net_dev))) {
 195                /* Make sure that we perceive changes to tx_dnext. */
 196                smp_rmb();
 197
 198                if (ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext]))
 199                        netif_wake_queue(priv->net_dev);
 200        }
 201}
 202
 203static int ec_bhf_pkt_received(struct rx_desc *desc)
 204{
 205        return le32_to_cpu(desc->header.recv) & RXHDR_NEXT_RECV_FLAG;
 206}
 207
 208static void ec_bhf_add_rx_desc(struct ec_bhf_priv *priv, struct rx_desc *desc)
 209{
 210        iowrite32(FIFO_RX_ADDR_VALID | ((u8 *)(desc) - priv->rx_buf.buf),
 211                  priv->fifo_io + FIFO_RX_REG);
 212}
 213
 214static void ec_bhf_process_rx(struct ec_bhf_priv *priv)
 215{
 216        struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext];
 217
 218        while (ec_bhf_pkt_received(desc)) {
 219                int pkt_size = (le16_to_cpu(desc->header.len) &
 220                               RXHDR_LEN_MASK) - sizeof(struct rx_header) - 4;
 221                u8 *data = desc->data;
 222                struct sk_buff *skb;
 223
 224                skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size);
 225                if (skb) {
 226                        skb_put_data(skb, data, pkt_size);
 227                        skb->protocol = eth_type_trans(skb, priv->net_dev);
 228                        priv->stat_rx_bytes += pkt_size;
 229
 230                        netif_rx(skb);
 231                } else {
 232                        dev_err_ratelimited(PRIV_TO_DEV(priv),
 233                                            "Couldn't allocate a skb_buff for a packet of size %u\n",
 234                                            pkt_size);
 235                }
 236
 237                desc->header.recv = 0;
 238
 239                ec_bhf_add_rx_desc(priv, desc);
 240
 241                priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount;
 242                desc = &priv->rx_descs[priv->rx_dnext];
 243        }
 244}
 245
 246static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer)
 247{
 248        struct ec_bhf_priv *priv = container_of(timer, struct ec_bhf_priv,
 249                                                hrtimer);
 250        ec_bhf_process_rx(priv);
 251        ec_bhf_process_tx(priv);
 252
 253        if (!netif_running(priv->net_dev))
 254                return HRTIMER_NORESTART;
 255
 256        hrtimer_forward_now(timer, polling_frequency);
 257        return HRTIMER_RESTART;
 258}
 259
 260static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv)
 261{
 262        struct device *dev = PRIV_TO_DEV(priv);
 263        unsigned block_count, i;
 264        void __iomem *ec_info;
 265
 266        block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT);
 267        for (i = 0; i < block_count; i++) {
 268                u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE +
 269                                    INFO_BLOCK_TYPE);
 270                if (type == ETHERCAT_MASTER_ID)
 271                        break;
 272        }
 273        if (i == block_count) {
 274                dev_err(dev, "EtherCAT master with DMA block not found\n");
 275                return -ENODEV;
 276        }
 277
 278        ec_info = priv->io + i * INFO_BLOCK_SIZE;
 279
 280        priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN);
 281        priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN);
 282
 283        priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET);
 284        priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET);
 285        priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET);
 286        priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET);
 287
 288        return 0;
 289}
 290
 291static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb,
 292                                     struct net_device *net_dev)
 293{
 294        struct ec_bhf_priv *priv = netdev_priv(net_dev);
 295        struct tx_desc *desc;
 296        unsigned len;
 297
 298        desc = &priv->tx_descs[priv->tx_dnext];
 299
 300        skb_copy_and_csum_dev(skb, desc->data);
 301        len = skb->len;
 302
 303        memset(&desc->header, 0, sizeof(desc->header));
 304        desc->header.len = cpu_to_le16(len);
 305        desc->header.port = TX_HDR_PORT_0;
 306
 307        ec_bhf_send_packet(priv, desc);
 308
 309        priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount;
 310
 311        if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) {
 312                /* Make sure that updates to tx_dnext are perceived
 313                 * by timer routine.
 314                 */
 315                smp_wmb();
 316
 317                netif_stop_queue(net_dev);
 318        }
 319
 320        priv->stat_tx_bytes += len;
 321
 322        dev_kfree_skb(skb);
 323
 324        return NETDEV_TX_OK;
 325}
 326
 327static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv,
 328                                struct bhf_dma *buf,
 329                                int channel,
 330                                int size)
 331{
 332        int offset = channel * DMA_CHAN_SIZE + DMA_CHAN_OFFSET;
 333        struct device *dev = PRIV_TO_DEV(priv);
 334        u32 mask;
 335
 336        iowrite32(0xffffffff, priv->dma_io + offset);
 337
 338        mask = ioread32(priv->dma_io + offset);
 339        mask &= DMA_WINDOW_SIZE_MASK;
 340
 341        /* We want to allocate a chunk of memory that is:
 342         * - aligned to the mask we just read
 343         * - is of size 2^mask bytes (at most)
 344         * In order to ensure that we will allocate buffer of
 345         * 2 * 2^mask bytes.
 346         */
 347        buf->len = min_t(int, ~mask + 1, size);
 348        buf->alloc_len = 2 * buf->len;
 349
 350        buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys,
 351                                        GFP_KERNEL);
 352        if (buf->alloc == NULL) {
 353                dev_err(dev, "Failed to allocate buffer\n");
 354                return -ENOMEM;
 355        }
 356
 357        buf->buf_phys = (buf->alloc_phys + buf->len) & mask;
 358        buf->buf = buf->alloc + (buf->buf_phys - buf->alloc_phys);
 359
 360        iowrite32(0, priv->dma_io + offset + 4);
 361        iowrite32(buf->buf_phys, priv->dma_io + offset);
 362
 363        return 0;
 364}
 365
 366static void ec_bhf_setup_tx_descs(struct ec_bhf_priv *priv)
 367{
 368        int i = 0;
 369
 370        priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc);
 371        priv->tx_descs = (struct tx_desc *)priv->tx_buf.buf;
 372        priv->tx_dnext = 0;
 373
 374        for (i = 0; i < priv->tx_dcount; i++)
 375                priv->tx_descs[i].header.sent = cpu_to_le32(TX_HDR_SENT);
 376}
 377
 378static void ec_bhf_setup_rx_descs(struct ec_bhf_priv *priv)
 379{
 380        int i;
 381
 382        priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc);
 383        priv->rx_descs = (struct rx_desc *)priv->rx_buf.buf;
 384        priv->rx_dnext = 0;
 385
 386        for (i = 0; i < priv->rx_dcount; i++) {
 387                struct rx_desc *desc = &priv->rx_descs[i];
 388                u32 next;
 389
 390                if (i != priv->rx_dcount - 1)
 391                        next = (u8 *)(desc + 1) - priv->rx_buf.buf;
 392                else
 393                        next = 0;
 394                next |= RXHDR_NEXT_VALID;
 395                desc->header.next = cpu_to_le32(next);
 396                desc->header.recv = 0;
 397                ec_bhf_add_rx_desc(priv, desc);
 398        }
 399}
 400
 401static int ec_bhf_open(struct net_device *net_dev)
 402{
 403        struct ec_bhf_priv *priv = netdev_priv(net_dev);
 404        struct device *dev = PRIV_TO_DEV(priv);
 405        int err = 0;
 406
 407        ec_bhf_reset(priv);
 408
 409        err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan,
 410                                   FIFO_SIZE * sizeof(struct rx_desc));
 411        if (err) {
 412                dev_err(dev, "Failed to allocate rx buffer\n");
 413                goto out;
 414        }
 415        ec_bhf_setup_rx_descs(priv);
 416
 417        err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan,
 418                                   FIFO_SIZE * sizeof(struct tx_desc));
 419        if (err) {
 420                dev_err(dev, "Failed to allocate tx buffer\n");
 421                goto error_rx_free;
 422        }
 423        iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG);
 424        ec_bhf_setup_tx_descs(priv);
 425
 426        netif_start_queue(net_dev);
 427
 428        hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 429        priv->hrtimer.function = ec_bhf_timer_fun;
 430        hrtimer_start(&priv->hrtimer, polling_frequency, HRTIMER_MODE_REL);
 431
 432        return 0;
 433
 434error_rx_free:
 435        dma_free_coherent(dev, priv->rx_buf.alloc_len, priv->rx_buf.alloc,
 436                          priv->rx_buf.alloc_len);
 437out:
 438        return err;
 439}
 440
 441static int ec_bhf_stop(struct net_device *net_dev)
 442{
 443        struct ec_bhf_priv *priv = netdev_priv(net_dev);
 444        struct device *dev = PRIV_TO_DEV(priv);
 445
 446        hrtimer_cancel(&priv->hrtimer);
 447
 448        ec_bhf_reset(priv);
 449
 450        netif_tx_disable(net_dev);
 451
 452        dma_free_coherent(dev, priv->tx_buf.alloc_len,
 453                          priv->tx_buf.alloc, priv->tx_buf.alloc_phys);
 454        dma_free_coherent(dev, priv->rx_buf.alloc_len,
 455                          priv->rx_buf.alloc, priv->rx_buf.alloc_phys);
 456
 457        return 0;
 458}
 459
 460static void
 461ec_bhf_get_stats(struct net_device *net_dev,
 462                 struct rtnl_link_stats64 *stats)
 463{
 464        struct ec_bhf_priv *priv = netdev_priv(net_dev);
 465
 466        stats->rx_errors = ioread8(priv->mac_io + MAC_RX_ERR_CNT) +
 467                                ioread8(priv->mac_io + MAC_CRC_ERR_CNT) +
 468                                ioread8(priv->mac_io + MAC_FRAME_ERR_CNT);
 469        stats->rx_packets = ioread32(priv->mac_io + MAC_RX_FRAME_CNT);
 470        stats->tx_packets = ioread32(priv->mac_io + MAC_TX_FRAME_CNT);
 471        stats->rx_dropped = ioread8(priv->mac_io + MAC_DROPPED_FRMS);
 472
 473        stats->tx_bytes = priv->stat_tx_bytes;
 474        stats->rx_bytes = priv->stat_rx_bytes;
 475}
 476
 477static const struct net_device_ops ec_bhf_netdev_ops = {
 478        .ndo_start_xmit         = ec_bhf_start_xmit,
 479        .ndo_open               = ec_bhf_open,
 480        .ndo_stop               = ec_bhf_stop,
 481        .ndo_get_stats64        = ec_bhf_get_stats,
 482        .ndo_validate_addr      = eth_validate_addr,
 483        .ndo_set_mac_address    = eth_mac_addr
 484};
 485
 486static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
 487{
 488        struct net_device *net_dev;
 489        struct ec_bhf_priv *priv;
 490        void __iomem *dma_io;
 491        void __iomem *io;
 492        int err = 0;
 493
 494        err = pci_enable_device(dev);
 495        if (err)
 496                return err;
 497
 498        pci_set_master(dev);
 499
 500        err = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
 501        if (err) {
 502                dev_err(&dev->dev,
 503                        "Required dma mask not supported, failed to initialize device\n");
 504                err = -EIO;
 505                goto err_disable_dev;
 506        }
 507
 508        err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32));
 509        if (err) {
 510                dev_err(&dev->dev,
 511                        "Required dma mask not supported, failed to initialize device\n");
 512                goto err_disable_dev;
 513        }
 514
 515        err = pci_request_regions(dev, "ec_bhf");
 516        if (err) {
 517                dev_err(&dev->dev, "Failed to request pci memory regions\n");
 518                goto err_disable_dev;
 519        }
 520
 521        io = pci_iomap(dev, 0, 0);
 522        if (!io) {
 523                dev_err(&dev->dev, "Failed to map pci card memory bar 0");
 524                err = -EIO;
 525                goto err_release_regions;
 526        }
 527
 528        dma_io = pci_iomap(dev, 2, 0);
 529        if (!dma_io) {
 530                dev_err(&dev->dev, "Failed to map pci card memory bar 2");
 531                err = -EIO;
 532                goto err_unmap;
 533        }
 534
 535        net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv));
 536        if (net_dev == NULL) {
 537                err = -ENOMEM;
 538                goto err_unmap_dma_io;
 539        }
 540
 541        pci_set_drvdata(dev, net_dev);
 542        SET_NETDEV_DEV(net_dev, &dev->dev);
 543
 544        net_dev->features = 0;
 545        net_dev->flags |= IFF_NOARP;
 546
 547        net_dev->netdev_ops = &ec_bhf_netdev_ops;
 548
 549        priv = netdev_priv(net_dev);
 550        priv->net_dev = net_dev;
 551        priv->io = io;
 552        priv->dma_io = dma_io;
 553        priv->dev = dev;
 554
 555        err = ec_bhf_setup_offsets(priv);
 556        if (err < 0)
 557                goto err_free_net_dev;
 558
 559        memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6);
 560
 561        err = register_netdev(net_dev);
 562        if (err < 0)
 563                goto err_free_net_dev;
 564
 565        return 0;
 566
 567err_free_net_dev:
 568        free_netdev(net_dev);
 569err_unmap_dma_io:
 570        pci_iounmap(dev, dma_io);
 571err_unmap:
 572        pci_iounmap(dev, io);
 573err_release_regions:
 574        pci_release_regions(dev);
 575err_disable_dev:
 576        pci_clear_master(dev);
 577        pci_disable_device(dev);
 578
 579        return err;
 580}
 581
 582static void ec_bhf_remove(struct pci_dev *dev)
 583{
 584        struct net_device *net_dev = pci_get_drvdata(dev);
 585        struct ec_bhf_priv *priv = netdev_priv(net_dev);
 586
 587        unregister_netdev(net_dev);
 588        free_netdev(net_dev);
 589
 590        pci_iounmap(dev, priv->dma_io);
 591        pci_iounmap(dev, priv->io);
 592        pci_release_regions(dev);
 593        pci_clear_master(dev);
 594        pci_disable_device(dev);
 595}
 596
 597static struct pci_driver pci_driver = {
 598        .name           = "ec_bhf",
 599        .id_table       = ids,
 600        .probe          = ec_bhf_probe,
 601        .remove         = ec_bhf_remove,
 602};
 603module_pci_driver(pci_driver);
 604
 605module_param(polling_frequency, long, 0444);
 606MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
 607
 608MODULE_LICENSE("GPL");
 609MODULE_AUTHOR("Dariusz Marcinkiewicz <reksio@newterm.pl>");
 610