linux/drivers/net/ethernet/altera/altera_sgdma.c
<<
>>
Prefs
   1/* Altera TSE SGDMA and MSGDMA Linux driver
   2 * Copyright (C) 2014 Altera Corporation. All rights reserved
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms and conditions of the GNU General Public License,
   6 * version 2, as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope it will be useful, but WITHOUT
   9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  11 * more details.
  12 *
  13 * You should have received a copy of the GNU General Public License along with
  14 * this program.  If not, see <http://www.gnu.org/licenses/>.
  15 */
  16
  17#include <linux/list.h>
  18#include "altera_utils.h"
  19#include "altera_tse.h"
  20#include "altera_sgdmahw.h"
  21#include "altera_sgdma.h"
  22
  23static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
  24                                struct sgdma_descrip __iomem *ndesc,
  25                                dma_addr_t ndesc_phys,
  26                                dma_addr_t raddr,
  27                                dma_addr_t waddr,
  28                                u16 length,
  29                                int generate_eop,
  30                                int rfixed,
  31                                int wfixed);
  32
  33static int sgdma_async_write(struct altera_tse_private *priv,
  34                              struct sgdma_descrip __iomem *desc);
  35
  36static int sgdma_async_read(struct altera_tse_private *priv);
  37
  38static dma_addr_t
  39sgdma_txphysaddr(struct altera_tse_private *priv,
  40                 struct sgdma_descrip __iomem *desc);
  41
  42static dma_addr_t
  43sgdma_rxphysaddr(struct altera_tse_private *priv,
  44                 struct sgdma_descrip __iomem *desc);
  45
  46static int sgdma_txbusy(struct altera_tse_private *priv);
  47
  48static int sgdma_rxbusy(struct altera_tse_private *priv);
  49
  50static void
  51queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer);
  52
  53static void
  54queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer);
  55
  56static struct tse_buffer *
  57dequeue_tx(struct altera_tse_private *priv);
  58
  59static struct tse_buffer *
  60dequeue_rx(struct altera_tse_private *priv);
  61
  62static struct tse_buffer *
  63queue_rx_peekhead(struct altera_tse_private *priv);
  64
  65int sgdma_initialize(struct altera_tse_private *priv)
  66{
  67        priv->txctrlreg = SGDMA_CTRLREG_ILASTD |
  68                      SGDMA_CTRLREG_INTEN;
  69
  70        priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP |
  71                      SGDMA_CTRLREG_INTEN |
  72                      SGDMA_CTRLREG_ILASTD;
  73
  74        priv->sgdmadesclen = sizeof(struct sgdma_descrip);
  75
  76        INIT_LIST_HEAD(&priv->txlisthd);
  77        INIT_LIST_HEAD(&priv->rxlisthd);
  78
  79        priv->rxdescphys = (dma_addr_t) 0;
  80        priv->txdescphys = (dma_addr_t) 0;
  81
  82        priv->rxdescphys = dma_map_single(priv->device,
  83                                          (void __force *)priv->rx_dma_desc,
  84                                          priv->rxdescmem, DMA_BIDIRECTIONAL);
  85
  86        if (dma_mapping_error(priv->device, priv->rxdescphys)) {
  87                sgdma_uninitialize(priv);
  88                netdev_err(priv->dev, "error mapping rx descriptor memory\n");
  89                return -EINVAL;
  90        }
  91
  92        priv->txdescphys = dma_map_single(priv->device,
  93                                          (void __force *)priv->tx_dma_desc,
  94                                          priv->txdescmem, DMA_TO_DEVICE);
  95
  96        if (dma_mapping_error(priv->device, priv->txdescphys)) {
  97                sgdma_uninitialize(priv);
  98                netdev_err(priv->dev, "error mapping tx descriptor memory\n");
  99                return -EINVAL;
 100        }
 101
 102        /* Initialize descriptor memory to all 0's, sync memory to cache */
 103        memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
 104        memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
 105
 106        dma_sync_single_for_device(priv->device, priv->txdescphys,
 107                                   priv->txdescmem, DMA_TO_DEVICE);
 108
 109        dma_sync_single_for_device(priv->device, priv->rxdescphys,
 110                                   priv->rxdescmem, DMA_TO_DEVICE);
 111
 112        return 0;
 113}
 114
 115void sgdma_uninitialize(struct altera_tse_private *priv)
 116{
 117        if (priv->rxdescphys)
 118                dma_unmap_single(priv->device, priv->rxdescphys,
 119                                 priv->rxdescmem, DMA_BIDIRECTIONAL);
 120
 121        if (priv->txdescphys)
 122                dma_unmap_single(priv->device, priv->txdescphys,
 123                                 priv->txdescmem, DMA_TO_DEVICE);
 124}
 125
 126/* This function resets the SGDMA controller and clears the
 127 * descriptor memory used for transmits and receives.
 128 */
 129void sgdma_reset(struct altera_tse_private *priv)
 130{
 131        /* Initialize descriptor memory to 0 */
 132        memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
 133        memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
 134
 135        csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control));
 136        csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
 137
 138        csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control));
 139        csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
 140}
 141
 142/* For SGDMA, interrupts remain enabled after initially enabling,
 143 * so no need to provide implementations for abstract enable
 144 * and disable
 145 */
 146
 147void sgdma_enable_rxirq(struct altera_tse_private *priv)
 148{
 149}
 150
 151void sgdma_enable_txirq(struct altera_tse_private *priv)
 152{
 153}
 154
 155void sgdma_disable_rxirq(struct altera_tse_private *priv)
 156{
 157}
 158
 159void sgdma_disable_txirq(struct altera_tse_private *priv)
 160{
 161}
 162
 163void sgdma_clear_rxirq(struct altera_tse_private *priv)
 164{
 165        tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control),
 166                    SGDMA_CTRLREG_CLRINT);
 167}
 168
 169void sgdma_clear_txirq(struct altera_tse_private *priv)
 170{
 171        tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control),
 172                    SGDMA_CTRLREG_CLRINT);
 173}
 174
 175/* transmits buffer through SGDMA. Returns number of buffers
 176 * transmitted, 0 if not possible.
 177 *
 178 * tx_lock is held by the caller
 179 */
 180int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
 181{
 182        struct sgdma_descrip __iomem *descbase =
 183                (struct sgdma_descrip __iomem *)priv->tx_dma_desc;
 184
 185        struct sgdma_descrip __iomem *cdesc = &descbase[0];
 186        struct sgdma_descrip __iomem *ndesc = &descbase[1];
 187
 188        /* wait 'til the tx sgdma is ready for the next transmit request */
 189        if (sgdma_txbusy(priv))
 190                return 0;
 191
 192        sgdma_setup_descrip(cdesc,                      /* current descriptor */
 193                            ndesc,                      /* next descriptor */
 194                            sgdma_txphysaddr(priv, ndesc),
 195                            buffer->dma_addr,           /* address of packet to xmit */
 196                            0,                          /* write addr 0 for tx dma */
 197                            buffer->len,                /* length of packet */
 198                            SGDMA_CONTROL_EOP,          /* Generate EOP */
 199                            0,                          /* read fixed */
 200                            SGDMA_CONTROL_WR_FIXED);    /* Generate SOP */
 201
 202        sgdma_async_write(priv, cdesc);
 203
 204        /* enqueue the request to the pending transmit queue */
 205        queue_tx(priv, buffer);
 206
 207        return 1;
 208}
 209
 210
 211/* tx_lock held to protect access to queued tx list
 212 */
 213u32 sgdma_tx_completions(struct altera_tse_private *priv)
 214{
 215        u32 ready = 0;
 216
 217        if (!sgdma_txbusy(priv) &&
 218            ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control))
 219             & SGDMA_CONTROL_HW_OWNED) == 0) &&
 220            (dequeue_tx(priv))) {
 221                ready = 1;
 222        }
 223
 224        return ready;
 225}
 226
 227void sgdma_start_rxdma(struct altera_tse_private *priv)
 228{
 229        sgdma_async_read(priv);
 230}
 231
 232void sgdma_add_rx_desc(struct altera_tse_private *priv,
 233                       struct tse_buffer *rxbuffer)
 234{
 235        queue_rx(priv, rxbuffer);
 236}
 237
 238/* status is returned on upper 16 bits,
 239 * length is returned in lower 16 bits
 240 */
 241u32 sgdma_rx_status(struct altera_tse_private *priv)
 242{
 243        struct sgdma_descrip __iomem *base =
 244                (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
 245        struct sgdma_descrip __iomem *desc = NULL;
 246        struct tse_buffer *rxbuffer = NULL;
 247        unsigned int rxstatus = 0;
 248
 249        u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status));
 250
 251        desc = &base[0];
 252        if (sts & SGDMA_STSREG_EOP) {
 253                unsigned int pktlength = 0;
 254                unsigned int pktstatus = 0;
 255                dma_sync_single_for_cpu(priv->device,
 256                                        priv->rxdescphys,
 257                                        priv->sgdmadesclen,
 258                                        DMA_FROM_DEVICE);
 259
 260                pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
 261                pktstatus = csrrd8(desc, sgdma_descroffs(status));
 262                rxstatus = pktstatus & ~SGDMA_STATUS_EOP;
 263                rxstatus = rxstatus << 16;
 264                rxstatus |= (pktlength & 0xffff);
 265
 266                if (rxstatus) {
 267                        csrwr8(0, desc, sgdma_descroffs(status));
 268
 269                        rxbuffer = dequeue_rx(priv);
 270                        if (rxbuffer == NULL)
 271                                netdev_info(priv->dev,
 272                                            "sgdma rx and rx queue empty!\n");
 273
 274                        /* Clear control */
 275                        csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
 276                        /* clear status */
 277                        csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status));
 278
 279                        /* kick the rx sgdma after reaping this descriptor */
 280                        sgdma_async_read(priv);
 281
 282                } else {
 283                        /* If the SGDMA indicated an end of packet on recv,
 284                         * then it's expected that the rxstatus from the
 285                         * descriptor is non-zero - meaning a valid packet
 286                         * with a nonzero length, or an error has been
 287                         * indicated. if not, then all we can do is signal
 288                         * an error and return no packet received. Most likely
 289                         * there is a system design error, or an error in the
 290                         * underlying kernel (cache or cache management problem)
 291                         */
 292                        netdev_err(priv->dev,
 293                                   "SGDMA RX Error Info: %x, %x, %x\n",
 294                                   sts, csrrd8(desc, sgdma_descroffs(status)),
 295                                   rxstatus);
 296                }
 297        } else if (sts == 0) {
 298                sgdma_async_read(priv);
 299        }
 300
 301        return rxstatus;
 302}
 303
 304
 305/* Private functions */
 306static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
 307                                struct sgdma_descrip __iomem *ndesc,
 308                                dma_addr_t ndesc_phys,
 309                                dma_addr_t raddr,
 310                                dma_addr_t waddr,
 311                                u16 length,
 312                                int generate_eop,
 313                                int rfixed,
 314                                int wfixed)
 315{
 316        /* Clear the next descriptor as not owned by hardware */
 317
 318        u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control));
 319        ctrl &= ~SGDMA_CONTROL_HW_OWNED;
 320        csrwr8(ctrl, ndesc, sgdma_descroffs(control));
 321
 322        ctrl = SGDMA_CONTROL_HW_OWNED;
 323        ctrl |= generate_eop;
 324        ctrl |= rfixed;
 325        ctrl |= wfixed;
 326
 327        /* Channel is implicitly zero, initialized to 0 by default */
 328        csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr));
 329        csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr));
 330
 331        csrwr32(0, desc, sgdma_descroffs(pad1));
 332        csrwr32(0, desc, sgdma_descroffs(pad2));
 333        csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next));
 334
 335        csrwr8(ctrl, desc, sgdma_descroffs(control));
 336        csrwr8(0, desc, sgdma_descroffs(status));
 337        csrwr8(0, desc, sgdma_descroffs(wburst));
 338        csrwr8(0, desc, sgdma_descroffs(rburst));
 339        csrwr16(length, desc, sgdma_descroffs(bytes));
 340        csrwr16(0, desc, sgdma_descroffs(bytes_xferred));
 341}
 342
 343/* If hardware is busy, don't restart async read.
 344 * if status register is 0 - meaning initial state, restart async read,
 345 * probably for the first time when populating a receive buffer.
 346 * If read status indicate not busy and a status, restart the async
 347 * DMA read.
 348 */
 349static int sgdma_async_read(struct altera_tse_private *priv)
 350{
 351        struct sgdma_descrip __iomem *descbase =
 352                (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
 353
 354        struct sgdma_descrip __iomem *cdesc = &descbase[0];
 355        struct sgdma_descrip __iomem *ndesc = &descbase[1];
 356        struct tse_buffer *rxbuffer = NULL;
 357
 358        if (!sgdma_rxbusy(priv)) {
 359                rxbuffer = queue_rx_peekhead(priv);
 360                if (rxbuffer == NULL) {
 361                        netdev_err(priv->dev, "no rx buffers available\n");
 362                        return 0;
 363                }
 364
 365                sgdma_setup_descrip(cdesc,              /* current descriptor */
 366                                    ndesc,              /* next descriptor */
 367                                    sgdma_rxphysaddr(priv, ndesc),
 368                                    0,                  /* read addr 0 for rx dma */
 369                                    rxbuffer->dma_addr, /* write addr for rx dma */
 370                                    0,                  /* read 'til EOP */
 371                                    0,                  /* EOP: NA for rx dma */
 372                                    0,                  /* read fixed: NA for rx dma */
 373                                    0);                 /* SOP: NA for rx DMA */
 374
 375                dma_sync_single_for_device(priv->device,
 376                                           priv->rxdescphys,
 377                                           priv->sgdmadesclen,
 378                                           DMA_TO_DEVICE);
 379
 380                csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
 381                        priv->rx_dma_csr,
 382                        sgdma_csroffs(next_descrip));
 383
 384                csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START),
 385                        priv->rx_dma_csr,
 386                        sgdma_csroffs(control));
 387
 388                return 1;
 389        }
 390
 391        return 0;
 392}
 393
 394static int sgdma_async_write(struct altera_tse_private *priv,
 395                             struct sgdma_descrip __iomem *desc)
 396{
 397        if (sgdma_txbusy(priv))
 398                return 0;
 399
 400        /* clear control and status */
 401        csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
 402        csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
 403
 404        dma_sync_single_for_device(priv->device, priv->txdescphys,
 405                                   priv->sgdmadesclen, DMA_TO_DEVICE);
 406
 407        csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
 408                priv->tx_dma_csr,
 409                sgdma_csroffs(next_descrip));
 410
 411        csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START),
 412                priv->tx_dma_csr,
 413                sgdma_csroffs(control));
 414
 415        return 1;
 416}
 417
 418static dma_addr_t
 419sgdma_txphysaddr(struct altera_tse_private *priv,
 420                 struct sgdma_descrip __iomem *desc)
 421{
 422        dma_addr_t paddr = priv->txdescmem_busaddr;
 423        uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
 424        return (dma_addr_t)((uintptr_t)paddr + offs);
 425}
 426
 427static dma_addr_t
 428sgdma_rxphysaddr(struct altera_tse_private *priv,
 429                 struct sgdma_descrip __iomem *desc)
 430{
 431        dma_addr_t paddr = priv->rxdescmem_busaddr;
 432        uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
 433        return (dma_addr_t)((uintptr_t)paddr + offs);
 434}
 435
 436#define list_remove_head(list, entry, type, member)                     \
 437        do {                                                            \
 438                entry = NULL;                                           \
 439                if (!list_empty(list)) {                                \
 440                        entry = list_entry((list)->next, type, member); \
 441                        list_del_init(&entry->member);                  \
 442                }                                                       \
 443        } while (0)
 444
 445#define list_peek_head(list, entry, type, member)                       \
 446        do {                                                            \
 447                entry = NULL;                                           \
 448                if (!list_empty(list)) {                                \
 449                        entry = list_entry((list)->next, type, member); \
 450                }                                                       \
 451        } while (0)
 452
 453/* adds a tse_buffer to the tail of a tx buffer list.
 454 * assumes the caller is managing and holding a mutual exclusion
 455 * primitive to avoid simultaneous pushes/pops to the list.
 456 */
 457static void
 458queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer)
 459{
 460        list_add_tail(&buffer->lh, &priv->txlisthd);
 461}
 462
 463
 464/* adds a tse_buffer to the tail of a rx buffer list
 465 * assumes the caller is managing and holding a mutual exclusion
 466 * primitive to avoid simultaneous pushes/pops to the list.
 467 */
 468static void
 469queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer)
 470{
 471        list_add_tail(&buffer->lh, &priv->rxlisthd);
 472}
 473
 474/* dequeues a tse_buffer from the transmit buffer list, otherwise
 475 * returns NULL if empty.
 476 * assumes the caller is managing and holding a mutual exclusion
 477 * primitive to avoid simultaneous pushes/pops to the list.
 478 */
 479static struct tse_buffer *
 480dequeue_tx(struct altera_tse_private *priv)
 481{
 482        struct tse_buffer *buffer = NULL;
 483        list_remove_head(&priv->txlisthd, buffer, struct tse_buffer, lh);
 484        return buffer;
 485}
 486
 487/* dequeues a tse_buffer from the receive buffer list, otherwise
 488 * returns NULL if empty
 489 * assumes the caller is managing and holding a mutual exclusion
 490 * primitive to avoid simultaneous pushes/pops to the list.
 491 */
 492static struct tse_buffer *
 493dequeue_rx(struct altera_tse_private *priv)
 494{
 495        struct tse_buffer *buffer = NULL;
 496        list_remove_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
 497        return buffer;
 498}
 499
 500/* dequeues a tse_buffer from the receive buffer list, otherwise
 501 * returns NULL if empty
 502 * assumes the caller is managing and holding a mutual exclusion
 503 * primitive to avoid simultaneous pushes/pops to the list while the
 504 * head is being examined.
 505 */
 506static struct tse_buffer *
 507queue_rx_peekhead(struct altera_tse_private *priv)
 508{
 509        struct tse_buffer *buffer = NULL;
 510        list_peek_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
 511        return buffer;
 512}
 513
 514/* check and return rx sgdma status without polling
 515 */
 516static int sgdma_rxbusy(struct altera_tse_private *priv)
 517{
 518        return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status))
 519                       & SGDMA_STSREG_BUSY;
 520}
 521
 522/* waits for the tx sgdma to finish it's current operation, returns 0
 523 * when it transitions to nonbusy, returns 1 if the operation times out
 524 */
 525static int sgdma_txbusy(struct altera_tse_private *priv)
 526{
 527        int delay = 0;
 528
 529        /* if DMA is busy, wait for current transactino to finish */
 530        while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
 531                & SGDMA_STSREG_BUSY) && (delay++ < 100))
 532                udelay(1);
 533
 534        if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
 535            & SGDMA_STSREG_BUSY) {
 536                netdev_err(priv->dev, "timeout waiting for tx dma\n");
 537                return 1;
 538        }
 539        return 0;
 540}
 541