uboot/drivers/net/xilinx_ll_temac_sdma.c
<<
>>
Prefs
   1/*
   2 * Xilinx xps_ll_temac ethernet driver for u-boot
   3 *
   4 * SDMA sub-controller
   5 *
   6 * Copyright (C) 2011 - 2012 Stephan Linz <linz@li-pro.net>
   7 * Copyright (C) 2008 - 2011 Michal Simek <monstr@monstr.eu>
   8 * Copyright (C) 2008 - 2011 PetaLogix
   9 *
  10 * Based on Yoshio Kashiwagi kashiwagi@co-nss.co.jp driver
  11 * Copyright (C) 2008 Nissin Systems Co.,Ltd.
  12 * March 2008 created
  13 *
  14 * CREDITS: tsec driver
  15 *
  16 * SPDX-License-Identifier:     GPL-2.0+
  17 *
  18 * [0]: http://www.xilinx.com/support/documentation
  19 *
  20 * [M]: [0]/ip_documentation/mpmc.pdf
  21 * [S]: [0]/ip_documentation/xps_ll_temac.pdf
  22 * [A]: [0]/application_notes/xapp1041.pdf
  23 */
  24
  25#include <config.h>
  26#include <common.h>
  27#include <net.h>
  28
  29#include <asm/types.h>
  30#include <asm/io.h>
  31
  32#include "xilinx_ll_temac.h"
  33#include "xilinx_ll_temac_sdma.h"
  34
  35#define TX_BUF_CNT              2
  36
  37static unsigned int rx_idx;     /* index of the current RX buffer */
  38static unsigned int tx_idx;     /* index of the current TX buffer */
  39
  40struct rtx_cdmac_bd {
  41        struct cdmac_bd rx[PKTBUFSRX];
  42        struct cdmac_bd tx[TX_BUF_CNT];
  43};
  44
  45/*
  46 * DMA Buffer Descriptor alignment
  47 *
  48 * If the address contained in the Next Descriptor Pointer register is not
  49 * 8-word aligned or reaches beyond the range of available memory, the SDMA
  50 * halts processing and sets the CDMAC_BD_STCTRL_ERROR bit in the respective
  51 * status register (tx_chnl_sts or rx_chnl_sts).
  52 *
  53 * [1]: [0]/ip_documentation/mpmc.pdf
  54 *      page 161, Next Descriptor Pointer
  55 */
  56static struct rtx_cdmac_bd cdmac_bd __aligned(32);
  57
  58#if defined(CONFIG_XILINX_440) || defined(CONFIG_XILINX_405)
  59
  60/*
  61 * Indirect DCR access operations mi{ft}dcr_xilinx() espacialy
  62 * for Xilinx PowerPC implementations on FPGA.
  63 *
  64 * FIXME: This part should go up to arch/powerpc -- but where?
  65 */
  66#include <asm/processor.h>
  67#define XILINX_INDIRECT_DCR_ADDRESS_REG 0
  68#define XILINX_INDIRECT_DCR_ACCESS_REG  1
  69inline unsigned mifdcr_xilinx(const unsigned dcrn)
  70{
  71        mtdcr(XILINX_INDIRECT_DCR_ADDRESS_REG, dcrn);
  72        return mfdcr(XILINX_INDIRECT_DCR_ACCESS_REG);
  73}
  74inline void mitdcr_xilinx(const unsigned dcrn, int val)
  75{
  76        mtdcr(XILINX_INDIRECT_DCR_ADDRESS_REG, dcrn);
  77        mtdcr(XILINX_INDIRECT_DCR_ACCESS_REG, val);
  78}
  79
  80/* Xilinx Device Control Register (DCR) in/out accessors */
  81inline unsigned ll_temac_xldcr_in32(phys_addr_t addr)
  82{
  83        return mifdcr_xilinx((const unsigned)addr);
  84}
  85inline void ll_temac_xldcr_out32(phys_addr_t addr, unsigned value)
  86{
  87        mitdcr_xilinx((const unsigned)addr, value);
  88}
  89
  90void ll_temac_collect_xldcr_sdma_reg_addr(struct eth_device *dev)
  91{
  92        struct ll_temac *ll_temac = dev->priv;
  93        phys_addr_t dmac_ctrl = ll_temac->ctrladdr;
  94        phys_addr_t *ra = ll_temac->sdma_reg_addr;
  95
  96        ra[TX_NXTDESC_PTR]   = dmac_ctrl + TX_NXTDESC_PTR;
  97        ra[TX_CURBUF_ADDR]   = dmac_ctrl + TX_CURBUF_ADDR;
  98        ra[TX_CURBUF_LENGTH] = dmac_ctrl + TX_CURBUF_LENGTH;
  99        ra[TX_CURDESC_PTR]   = dmac_ctrl + TX_CURDESC_PTR;
 100        ra[TX_TAILDESC_PTR]  = dmac_ctrl + TX_TAILDESC_PTR;
 101        ra[TX_CHNL_CTRL]     = dmac_ctrl + TX_CHNL_CTRL;
 102        ra[TX_IRQ_REG]       = dmac_ctrl + TX_IRQ_REG;
 103        ra[TX_CHNL_STS]      = dmac_ctrl + TX_CHNL_STS;
 104        ra[RX_NXTDESC_PTR]   = dmac_ctrl + RX_NXTDESC_PTR;
 105        ra[RX_CURBUF_ADDR]   = dmac_ctrl + RX_CURBUF_ADDR;
 106        ra[RX_CURBUF_LENGTH] = dmac_ctrl + RX_CURBUF_LENGTH;
 107        ra[RX_CURDESC_PTR]   = dmac_ctrl + RX_CURDESC_PTR;
 108        ra[RX_TAILDESC_PTR]  = dmac_ctrl + RX_TAILDESC_PTR;
 109        ra[RX_CHNL_CTRL]     = dmac_ctrl + RX_CHNL_CTRL;
 110        ra[RX_IRQ_REG]       = dmac_ctrl + RX_IRQ_REG;
 111        ra[RX_CHNL_STS]      = dmac_ctrl + RX_CHNL_STS;
 112        ra[DMA_CONTROL_REG]  = dmac_ctrl + DMA_CONTROL_REG;
 113}
 114
 115#endif /* CONFIG_XILINX_440 || ONFIG_XILINX_405 */
 116
 117/* Xilinx Processor Local Bus (PLB) in/out accessors */
 118inline unsigned ll_temac_xlplb_in32(phys_addr_t addr)
 119{
 120        return in_be32((void *)addr);
 121}
 122inline void ll_temac_xlplb_out32(phys_addr_t addr, unsigned value)
 123{
 124        out_be32((void *)addr, value);
 125}
 126
 127/* collect all register addresses for Xilinx PLB in/out accessors */
 128void ll_temac_collect_xlplb_sdma_reg_addr(struct eth_device *dev)
 129{
 130        struct ll_temac *ll_temac = dev->priv;
 131        struct sdma_ctrl *sdma_ctrl = (void *)ll_temac->ctrladdr;
 132        phys_addr_t *ra = ll_temac->sdma_reg_addr;
 133
 134        ra[TX_NXTDESC_PTR]   = (phys_addr_t)&sdma_ctrl->tx_nxtdesc_ptr;
 135        ra[TX_CURBUF_ADDR]   = (phys_addr_t)&sdma_ctrl->tx_curbuf_addr;
 136        ra[TX_CURBUF_LENGTH] = (phys_addr_t)&sdma_ctrl->tx_curbuf_length;
 137        ra[TX_CURDESC_PTR]   = (phys_addr_t)&sdma_ctrl->tx_curdesc_ptr;
 138        ra[TX_TAILDESC_PTR]  = (phys_addr_t)&sdma_ctrl->tx_taildesc_ptr;
 139        ra[TX_CHNL_CTRL]     = (phys_addr_t)&sdma_ctrl->tx_chnl_ctrl;
 140        ra[TX_IRQ_REG]       = (phys_addr_t)&sdma_ctrl->tx_irq_reg;
 141        ra[TX_CHNL_STS]      = (phys_addr_t)&sdma_ctrl->tx_chnl_sts;
 142        ra[RX_NXTDESC_PTR]   = (phys_addr_t)&sdma_ctrl->rx_nxtdesc_ptr;
 143        ra[RX_CURBUF_ADDR]   = (phys_addr_t)&sdma_ctrl->rx_curbuf_addr;
 144        ra[RX_CURBUF_LENGTH] = (phys_addr_t)&sdma_ctrl->rx_curbuf_length;
 145        ra[RX_CURDESC_PTR]   = (phys_addr_t)&sdma_ctrl->rx_curdesc_ptr;
 146        ra[RX_TAILDESC_PTR]  = (phys_addr_t)&sdma_ctrl->rx_taildesc_ptr;
 147        ra[RX_CHNL_CTRL]     = (phys_addr_t)&sdma_ctrl->rx_chnl_ctrl;
 148        ra[RX_IRQ_REG]       = (phys_addr_t)&sdma_ctrl->rx_irq_reg;
 149        ra[RX_CHNL_STS]      = (phys_addr_t)&sdma_ctrl->rx_chnl_sts;
 150        ra[DMA_CONTROL_REG]  = (phys_addr_t)&sdma_ctrl->dma_control_reg;
 151}
 152
 153/* Check for TX and RX channel errors. */
 154static inline int ll_temac_sdma_error(struct eth_device *dev)
 155{
 156        int err;
 157        struct ll_temac *ll_temac = dev->priv;
 158        phys_addr_t *ra = ll_temac->sdma_reg_addr;
 159
 160        err = ll_temac->in32(ra[TX_CHNL_STS]) & CHNL_STS_ERROR;
 161        err |= ll_temac->in32(ra[RX_CHNL_STS]) & CHNL_STS_ERROR;
 162
 163        return err;
 164}
 165
 166int ll_temac_init_sdma(struct eth_device *dev)
 167{
 168        struct ll_temac *ll_temac = dev->priv;
 169        struct cdmac_bd *rx_dp;
 170        struct cdmac_bd *tx_dp;
 171        phys_addr_t *ra = ll_temac->sdma_reg_addr;
 172        int i;
 173
 174        printf("%s: SDMA: %d Rx buffers, %d Tx buffers\n",
 175                        dev->name, PKTBUFSRX, TX_BUF_CNT);
 176
 177        /* Initialize the Rx Buffer descriptors */
 178        for (i = 0; i < PKTBUFSRX; i++) {
 179                rx_dp = &cdmac_bd.rx[i];
 180                memset(rx_dp, 0, sizeof(*rx_dp));
 181                rx_dp->next_p = rx_dp;
 182                rx_dp->buf_len = PKTSIZE_ALIGN;
 183                rx_dp->phys_buf_p = (u8 *)net_rx_packets[i];
 184                flush_cache((u32)rx_dp->phys_buf_p, PKTSIZE_ALIGN);
 185        }
 186        flush_cache((u32)cdmac_bd.rx, sizeof(cdmac_bd.rx));
 187
 188        /* Initialize the TX Buffer Descriptors */
 189        for (i = 0; i < TX_BUF_CNT; i++) {
 190                tx_dp = &cdmac_bd.tx[i];
 191                memset(tx_dp, 0, sizeof(*tx_dp));
 192                tx_dp->next_p = tx_dp;
 193        }
 194        flush_cache((u32)cdmac_bd.tx, sizeof(cdmac_bd.tx));
 195
 196        /* Reset index counter to the Rx and Tx Buffer descriptors */
 197        rx_idx = tx_idx = 0;
 198
 199        /* initial Rx DMA start by writing to respective TAILDESC_PTR */
 200        ll_temac->out32(ra[RX_CURDESC_PTR], (int)&cdmac_bd.rx[rx_idx]);
 201        ll_temac->out32(ra[RX_TAILDESC_PTR], (int)&cdmac_bd.rx[rx_idx]);
 202
 203        return 0;
 204}
 205
 206int ll_temac_halt_sdma(struct eth_device *dev)
 207{
 208        unsigned timeout = 50;  /* 1usec * 50 = 50usec */
 209        struct ll_temac *ll_temac = dev->priv;
 210        phys_addr_t *ra = ll_temac->sdma_reg_addr;
 211
 212        /*
 213         * Soft reset the DMA
 214         *
 215         * Quote from MPMC documentation: Writing a 1 to this field
 216         * forces the DMA engine to shutdown and reset itself. After
 217         * setting this bit, software must poll it until the bit is
 218         * cleared by the DMA. This indicates that the reset process
 219         * is done and the pipeline has been flushed.
 220         */
 221        ll_temac->out32(ra[DMA_CONTROL_REG], DMA_CONTROL_RESET);
 222        while (timeout && (ll_temac->in32(ra[DMA_CONTROL_REG])
 223                                        & DMA_CONTROL_RESET)) {
 224                timeout--;
 225                udelay(1);
 226        }
 227
 228        if (!timeout) {
 229                printf("%s: Timeout\n", __func__);
 230                return -1;
 231        }
 232
 233        return 0;
 234}
 235
 236int ll_temac_reset_sdma(struct eth_device *dev)
 237{
 238        u32 r;
 239        struct ll_temac *ll_temac = dev->priv;
 240        phys_addr_t *ra = ll_temac->sdma_reg_addr;
 241
 242        /* Soft reset the DMA.  */
 243        if (ll_temac_halt_sdma(dev))
 244                return -1;
 245
 246        /* Now clear the interrupts.  */
 247        r = ll_temac->in32(ra[TX_CHNL_CTRL]);
 248        r &= ~CHNL_CTRL_IRQ_MASK;
 249        ll_temac->out32(ra[TX_CHNL_CTRL], r);
 250
 251        r = ll_temac->in32(ra[RX_CHNL_CTRL]);
 252        r &= ~CHNL_CTRL_IRQ_MASK;
 253        ll_temac->out32(ra[RX_CHNL_CTRL], r);
 254
 255        /* Now ACK pending IRQs.  */
 256        ll_temac->out32(ra[TX_IRQ_REG], IRQ_REG_IRQ_MASK);
 257        ll_temac->out32(ra[RX_IRQ_REG], IRQ_REG_IRQ_MASK);
 258
 259        /* Set tail-ptr mode, disable errors for both channels.  */
 260        ll_temac->out32(ra[DMA_CONTROL_REG],
 261                        /* Enable use of tail pointer register */
 262                        DMA_CONTROL_TPE |
 263                        /* Disable error when 2 or 4 bit coalesce cnt overfl */
 264                        DMA_CONTROL_RXOCEID |
 265                        /* Disable error when 2 or 4 bit coalesce cnt overfl */
 266                        DMA_CONTROL_TXOCEID);
 267
 268        return 0;
 269}
 270
 271int ll_temac_recv_sdma(struct eth_device *dev)
 272{
 273        int length, pb_idx;
 274        struct cdmac_bd *rx_dp = &cdmac_bd.rx[rx_idx];
 275        struct ll_temac *ll_temac = dev->priv;
 276        phys_addr_t *ra = ll_temac->sdma_reg_addr;
 277
 278        if (ll_temac_sdma_error(dev)) {
 279
 280                if (ll_temac_reset_sdma(dev))
 281                        return -1;
 282
 283                ll_temac_init_sdma(dev);
 284        }
 285
 286        flush_cache((u32)rx_dp, sizeof(*rx_dp));
 287
 288        if (!(rx_dp->sca.stctrl & CDMAC_BD_STCTRL_COMPLETED))
 289                return 0;
 290
 291        if (rx_dp->sca.stctrl & (CDMAC_BD_STCTRL_SOP | CDMAC_BD_STCTRL_EOP)) {
 292                pb_idx = rx_idx;
 293                length = rx_dp->sca.app[4] & CDMAC_BD_APP4_RXBYTECNT_MASK;
 294        } else {
 295                pb_idx = -1;
 296                length = 0;
 297                printf("%s: Got part of package, unsupported (%x)\n",
 298                                __func__, rx_dp->sca.stctrl);
 299        }
 300
 301        /* flip the buffer */
 302        flush_cache((u32)rx_dp->phys_buf_p, length);
 303
 304        /* reset the current descriptor */
 305        rx_dp->sca.stctrl = 0;
 306        rx_dp->sca.app[4] = 0;
 307        flush_cache((u32)rx_dp, sizeof(*rx_dp));
 308
 309        /* Find next empty buffer descriptor, preparation for next iteration */
 310        rx_idx = (rx_idx + 1) % PKTBUFSRX;
 311        rx_dp = &cdmac_bd.rx[rx_idx];
 312        flush_cache((u32)rx_dp, sizeof(*rx_dp));
 313
 314        /* DMA start by writing to respective TAILDESC_PTR */
 315        ll_temac->out32(ra[RX_CURDESC_PTR], (int)&cdmac_bd.rx[rx_idx]);
 316        ll_temac->out32(ra[RX_TAILDESC_PTR], (int)&cdmac_bd.rx[rx_idx]);
 317
 318        if (length > 0 && pb_idx != -1)
 319                net_process_received_packet(net_rx_packets[pb_idx], length);
 320
 321        return 0;
 322}
 323
 324int ll_temac_send_sdma(struct eth_device *dev, void *packet, int length)
 325{
 326        unsigned timeout = 50;  /* 1usec * 50 = 50usec */
 327        struct cdmac_bd *tx_dp = &cdmac_bd.tx[tx_idx];
 328        struct ll_temac *ll_temac = dev->priv;
 329        phys_addr_t *ra = ll_temac->sdma_reg_addr;
 330
 331        if (ll_temac_sdma_error(dev)) {
 332
 333                if (ll_temac_reset_sdma(dev))
 334                        return -1;
 335
 336                ll_temac_init_sdma(dev);
 337        }
 338
 339        tx_dp->phys_buf_p = (u8 *)packet;
 340        tx_dp->buf_len = length;
 341        tx_dp->sca.stctrl = CDMAC_BD_STCTRL_SOP | CDMAC_BD_STCTRL_EOP |
 342                        CDMAC_BD_STCTRL_STOP_ON_END;
 343
 344        flush_cache((u32)packet, length);
 345        flush_cache((u32)tx_dp, sizeof(*tx_dp));
 346
 347        /* DMA start by writing to respective TAILDESC_PTR */
 348        ll_temac->out32(ra[TX_CURDESC_PTR], (int)tx_dp);
 349        ll_temac->out32(ra[TX_TAILDESC_PTR], (int)tx_dp);
 350
 351        /* Find next empty buffer descriptor, preparation for next iteration */
 352        tx_idx = (tx_idx + 1) % TX_BUF_CNT;
 353        tx_dp = &cdmac_bd.tx[tx_idx];
 354
 355        do {
 356                flush_cache((u32)tx_dp, sizeof(*tx_dp));
 357                udelay(1);
 358        } while (timeout-- && !(tx_dp->sca.stctrl & CDMAC_BD_STCTRL_COMPLETED));
 359
 360        if (!timeout) {
 361                printf("%s: Timeout\n", __func__);
 362                return -1;
 363        }
 364
 365        return 0;
 366}
 367