linux/drivers/net/ethernet/altera/altera_sgdma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Altera TSE SGDMA and MSGDMA Linux driver
   3 * Copyright (C) 2014 Altera Corporation. All rights reserved
   4 */
   5
   6#include <linux/list.h>
   7#include "altera_utils.h"
   8#include "altera_tse.h"
   9#include "altera_sgdmahw.h"
  10#include "altera_sgdma.h"
  11
  12static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
  13                                struct sgdma_descrip __iomem *ndesc,
  14                                dma_addr_t ndesc_phys,
  15                                dma_addr_t raddr,
  16                                dma_addr_t waddr,
  17                                u16 length,
  18                                int generate_eop,
  19                                int rfixed,
  20                                int wfixed);
  21
  22static int sgdma_async_write(struct altera_tse_private *priv,
  23                              struct sgdma_descrip __iomem *desc);
  24
  25static int sgdma_async_read(struct altera_tse_private *priv);
  26
  27static dma_addr_t
  28sgdma_txphysaddr(struct altera_tse_private *priv,
  29                 struct sgdma_descrip __iomem *desc);
  30
  31static dma_addr_t
  32sgdma_rxphysaddr(struct altera_tse_private *priv,
  33                 struct sgdma_descrip __iomem *desc);
  34
  35static int sgdma_txbusy(struct altera_tse_private *priv);
  36
  37static int sgdma_rxbusy(struct altera_tse_private *priv);
  38
  39static void
  40queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer);
  41
  42static void
  43queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer);
  44
  45static struct tse_buffer *
  46dequeue_tx(struct altera_tse_private *priv);
  47
  48static struct tse_buffer *
  49dequeue_rx(struct altera_tse_private *priv);
  50
  51static struct tse_buffer *
  52queue_rx_peekhead(struct altera_tse_private *priv);
  53
  54int sgdma_initialize(struct altera_tse_private *priv)
  55{
  56        priv->txctrlreg = SGDMA_CTRLREG_ILASTD |
  57                      SGDMA_CTRLREG_INTEN;
  58
  59        priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP |
  60                      SGDMA_CTRLREG_INTEN |
  61                      SGDMA_CTRLREG_ILASTD;
  62
  63        INIT_LIST_HEAD(&priv->txlisthd);
  64        INIT_LIST_HEAD(&priv->rxlisthd);
  65
  66        priv->rxdescphys = (dma_addr_t) 0;
  67        priv->txdescphys = (dma_addr_t) 0;
  68
  69        priv->rxdescphys = dma_map_single(priv->device,
  70                                          (void __force *)priv->rx_dma_desc,
  71                                          priv->rxdescmem, DMA_BIDIRECTIONAL);
  72
  73        if (dma_mapping_error(priv->device, priv->rxdescphys)) {
  74                sgdma_uninitialize(priv);
  75                netdev_err(priv->dev, "error mapping rx descriptor memory\n");
  76                return -EINVAL;
  77        }
  78
  79        priv->txdescphys = dma_map_single(priv->device,
  80                                          (void __force *)priv->tx_dma_desc,
  81                                          priv->txdescmem, DMA_TO_DEVICE);
  82
  83        if (dma_mapping_error(priv->device, priv->txdescphys)) {
  84                sgdma_uninitialize(priv);
  85                netdev_err(priv->dev, "error mapping tx descriptor memory\n");
  86                return -EINVAL;
  87        }
  88
  89        /* Initialize descriptor memory to all 0's, sync memory to cache */
  90        memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
  91        memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
  92
  93        dma_sync_single_for_device(priv->device, priv->txdescphys,
  94                                   priv->txdescmem, DMA_TO_DEVICE);
  95
  96        dma_sync_single_for_device(priv->device, priv->rxdescphys,
  97                                   priv->rxdescmem, DMA_TO_DEVICE);
  98
  99        return 0;
 100}
 101
 102void sgdma_uninitialize(struct altera_tse_private *priv)
 103{
 104        if (priv->rxdescphys)
 105                dma_unmap_single(priv->device, priv->rxdescphys,
 106                                 priv->rxdescmem, DMA_BIDIRECTIONAL);
 107
 108        if (priv->txdescphys)
 109                dma_unmap_single(priv->device, priv->txdescphys,
 110                                 priv->txdescmem, DMA_TO_DEVICE);
 111}
 112
 113/* This function resets the SGDMA controller and clears the
 114 * descriptor memory used for transmits and receives.
 115 */
 116void sgdma_reset(struct altera_tse_private *priv)
 117{
 118        /* Initialize descriptor memory to 0 */
 119        memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
 120        memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
 121
 122        csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control));
 123        csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
 124
 125        csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control));
 126        csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
 127}
 128
 129/* For SGDMA, interrupts remain enabled after initially enabling,
 130 * so no need to provide implementations for abstract enable
 131 * and disable
 132 */
 133
 134void sgdma_enable_rxirq(struct altera_tse_private *priv)
 135{
 136}
 137
 138void sgdma_enable_txirq(struct altera_tse_private *priv)
 139{
 140}
 141
 142void sgdma_disable_rxirq(struct altera_tse_private *priv)
 143{
 144}
 145
 146void sgdma_disable_txirq(struct altera_tse_private *priv)
 147{
 148}
 149
 150void sgdma_clear_rxirq(struct altera_tse_private *priv)
 151{
 152        tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control),
 153                    SGDMA_CTRLREG_CLRINT);
 154}
 155
 156void sgdma_clear_txirq(struct altera_tse_private *priv)
 157{
 158        tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control),
 159                    SGDMA_CTRLREG_CLRINT);
 160}
 161
 162/* transmits buffer through SGDMA. Returns number of buffers
 163 * transmitted, 0 if not possible.
 164 *
 165 * tx_lock is held by the caller
 166 */
 167int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
 168{
 169        struct sgdma_descrip __iomem *descbase =
 170                (struct sgdma_descrip __iomem *)priv->tx_dma_desc;
 171
 172        struct sgdma_descrip __iomem *cdesc = &descbase[0];
 173        struct sgdma_descrip __iomem *ndesc = &descbase[1];
 174
 175        /* wait 'til the tx sgdma is ready for the next transmit request */
 176        if (sgdma_txbusy(priv))
 177                return 0;
 178
 179        sgdma_setup_descrip(cdesc,                      /* current descriptor */
 180                            ndesc,                      /* next descriptor */
 181                            sgdma_txphysaddr(priv, ndesc),
 182                            buffer->dma_addr,           /* address of packet to xmit */
 183                            0,                          /* write addr 0 for tx dma */
 184                            buffer->len,                /* length of packet */
 185                            SGDMA_CONTROL_EOP,          /* Generate EOP */
 186                            0,                          /* read fixed */
 187                            SGDMA_CONTROL_WR_FIXED);    /* Generate SOP */
 188
 189        sgdma_async_write(priv, cdesc);
 190
 191        /* enqueue the request to the pending transmit queue */
 192        queue_tx(priv, buffer);
 193
 194        return 1;
 195}
 196
 197
 198/* tx_lock held to protect access to queued tx list
 199 */
 200u32 sgdma_tx_completions(struct altera_tse_private *priv)
 201{
 202        u32 ready = 0;
 203
 204        if (!sgdma_txbusy(priv) &&
 205            ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control))
 206             & SGDMA_CONTROL_HW_OWNED) == 0) &&
 207            (dequeue_tx(priv))) {
 208                ready = 1;
 209        }
 210
 211        return ready;
 212}
 213
 214void sgdma_start_rxdma(struct altera_tse_private *priv)
 215{
 216        sgdma_async_read(priv);
 217}
 218
 219void sgdma_add_rx_desc(struct altera_tse_private *priv,
 220                       struct tse_buffer *rxbuffer)
 221{
 222        queue_rx(priv, rxbuffer);
 223}
 224
 225/* status is returned on upper 16 bits,
 226 * length is returned in lower 16 bits
 227 */
 228u32 sgdma_rx_status(struct altera_tse_private *priv)
 229{
 230        struct sgdma_descrip __iomem *base =
 231                (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
 232        struct sgdma_descrip __iomem *desc = NULL;
 233        struct tse_buffer *rxbuffer = NULL;
 234        unsigned int rxstatus = 0;
 235
 236        u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status));
 237
 238        desc = &base[0];
 239        if (sts & SGDMA_STSREG_EOP) {
 240                unsigned int pktlength = 0;
 241                unsigned int pktstatus = 0;
 242                dma_sync_single_for_cpu(priv->device,
 243                                        priv->rxdescphys,
 244                                        SGDMA_DESC_LEN,
 245                                        DMA_FROM_DEVICE);
 246
 247                pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
 248                pktstatus = csrrd8(desc, sgdma_descroffs(status));
 249                rxstatus = pktstatus & ~SGDMA_STATUS_EOP;
 250                rxstatus = rxstatus << 16;
 251                rxstatus |= (pktlength & 0xffff);
 252
 253                if (rxstatus) {
 254                        csrwr8(0, desc, sgdma_descroffs(status));
 255
 256                        rxbuffer = dequeue_rx(priv);
 257                        if (rxbuffer == NULL)
 258                                netdev_info(priv->dev,
 259                                            "sgdma rx and rx queue empty!\n");
 260
 261                        /* Clear control */
 262                        csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
 263                        /* clear status */
 264                        csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status));
 265
 266                        /* kick the rx sgdma after reaping this descriptor */
 267                        sgdma_async_read(priv);
 268
 269                } else {
 270                        /* If the SGDMA indicated an end of packet on recv,
 271                         * then it's expected that the rxstatus from the
 272                         * descriptor is non-zero - meaning a valid packet
 273                         * with a nonzero length, or an error has been
 274                         * indicated. if not, then all we can do is signal
 275                         * an error and return no packet received. Most likely
 276                         * there is a system design error, or an error in the
 277                         * underlying kernel (cache or cache management problem)
 278                         */
 279                        netdev_err(priv->dev,
 280                                   "SGDMA RX Error Info: %x, %x, %x\n",
 281                                   sts, csrrd8(desc, sgdma_descroffs(status)),
 282                                   rxstatus);
 283                }
 284        } else if (sts == 0) {
 285                sgdma_async_read(priv);
 286        }
 287
 288        return rxstatus;
 289}
 290
 291
 292/* Private functions */
 293static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
 294                                struct sgdma_descrip __iomem *ndesc,
 295                                dma_addr_t ndesc_phys,
 296                                dma_addr_t raddr,
 297                                dma_addr_t waddr,
 298                                u16 length,
 299                                int generate_eop,
 300                                int rfixed,
 301                                int wfixed)
 302{
 303        /* Clear the next descriptor as not owned by hardware */
 304
 305        u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control));
 306        ctrl &= ~SGDMA_CONTROL_HW_OWNED;
 307        csrwr8(ctrl, ndesc, sgdma_descroffs(control));
 308
 309        ctrl = SGDMA_CONTROL_HW_OWNED;
 310        ctrl |= generate_eop;
 311        ctrl |= rfixed;
 312        ctrl |= wfixed;
 313
 314        /* Channel is implicitly zero, initialized to 0 by default */
 315        csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr));
 316        csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr));
 317
 318        csrwr32(0, desc, sgdma_descroffs(pad1));
 319        csrwr32(0, desc, sgdma_descroffs(pad2));
 320        csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next));
 321
 322        csrwr8(ctrl, desc, sgdma_descroffs(control));
 323        csrwr8(0, desc, sgdma_descroffs(status));
 324        csrwr8(0, desc, sgdma_descroffs(wburst));
 325        csrwr8(0, desc, sgdma_descroffs(rburst));
 326        csrwr16(length, desc, sgdma_descroffs(bytes));
 327        csrwr16(0, desc, sgdma_descroffs(bytes_xferred));
 328}
 329
 330/* If hardware is busy, don't restart async read.
 331 * if status register is 0 - meaning initial state, restart async read,
 332 * probably for the first time when populating a receive buffer.
 333 * If read status indicate not busy and a status, restart the async
 334 * DMA read.
 335 */
 336static int sgdma_async_read(struct altera_tse_private *priv)
 337{
 338        struct sgdma_descrip __iomem *descbase =
 339                (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
 340
 341        struct sgdma_descrip __iomem *cdesc = &descbase[0];
 342        struct sgdma_descrip __iomem *ndesc = &descbase[1];
 343        struct tse_buffer *rxbuffer = NULL;
 344
 345        if (!sgdma_rxbusy(priv)) {
 346                rxbuffer = queue_rx_peekhead(priv);
 347                if (rxbuffer == NULL) {
 348                        netdev_err(priv->dev, "no rx buffers available\n");
 349                        return 0;
 350                }
 351
 352                sgdma_setup_descrip(cdesc,              /* current descriptor */
 353                                    ndesc,              /* next descriptor */
 354                                    sgdma_rxphysaddr(priv, ndesc),
 355                                    0,                  /* read addr 0 for rx dma */
 356                                    rxbuffer->dma_addr, /* write addr for rx dma */
 357                                    0,                  /* read 'til EOP */
 358                                    0,                  /* EOP: NA for rx dma */
 359                                    0,                  /* read fixed: NA for rx dma */
 360                                    0);                 /* SOP: NA for rx DMA */
 361
 362                dma_sync_single_for_device(priv->device,
 363                                           priv->rxdescphys,
 364                                           SGDMA_DESC_LEN,
 365                                           DMA_TO_DEVICE);
 366
 367                csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
 368                        priv->rx_dma_csr,
 369                        sgdma_csroffs(next_descrip));
 370
 371                csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START),
 372                        priv->rx_dma_csr,
 373                        sgdma_csroffs(control));
 374
 375                return 1;
 376        }
 377
 378        return 0;
 379}
 380
 381static int sgdma_async_write(struct altera_tse_private *priv,
 382                             struct sgdma_descrip __iomem *desc)
 383{
 384        if (sgdma_txbusy(priv))
 385                return 0;
 386
 387        /* clear control and status */
 388        csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
 389        csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
 390
 391        dma_sync_single_for_device(priv->device, priv->txdescphys,
 392                                   SGDMA_DESC_LEN, DMA_TO_DEVICE);
 393
 394        csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
 395                priv->tx_dma_csr,
 396                sgdma_csroffs(next_descrip));
 397
 398        csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START),
 399                priv->tx_dma_csr,
 400                sgdma_csroffs(control));
 401
 402        return 1;
 403}
 404
 405static dma_addr_t
 406sgdma_txphysaddr(struct altera_tse_private *priv,
 407                 struct sgdma_descrip __iomem *desc)
 408{
 409        dma_addr_t paddr = priv->txdescmem_busaddr;
 410        uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
 411        return (dma_addr_t)((uintptr_t)paddr + offs);
 412}
 413
 414static dma_addr_t
 415sgdma_rxphysaddr(struct altera_tse_private *priv,
 416                 struct sgdma_descrip __iomem *desc)
 417{
 418        dma_addr_t paddr = priv->rxdescmem_busaddr;
 419        uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
 420        return (dma_addr_t)((uintptr_t)paddr + offs);
 421}
 422
 423#define list_remove_head(list, entry, type, member)                     \
 424        do {                                                            \
 425                entry = NULL;                                           \
 426                if (!list_empty(list)) {                                \
 427                        entry = list_entry((list)->next, type, member); \
 428                        list_del_init(&entry->member);                  \
 429                }                                                       \
 430        } while (0)
 431
 432#define list_peek_head(list, entry, type, member)                       \
 433        do {                                                            \
 434                entry = NULL;                                           \
 435                if (!list_empty(list)) {                                \
 436                        entry = list_entry((list)->next, type, member); \
 437                }                                                       \
 438        } while (0)
 439
 440/* adds a tse_buffer to the tail of a tx buffer list.
 441 * assumes the caller is managing and holding a mutual exclusion
 442 * primitive to avoid simultaneous pushes/pops to the list.
 443 */
 444static void
 445queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer)
 446{
 447        list_add_tail(&buffer->lh, &priv->txlisthd);
 448}
 449
 450
 451/* adds a tse_buffer to the tail of a rx buffer list
 452 * assumes the caller is managing and holding a mutual exclusion
 453 * primitive to avoid simultaneous pushes/pops to the list.
 454 */
 455static void
 456queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer)
 457{
 458        list_add_tail(&buffer->lh, &priv->rxlisthd);
 459}
 460
 461/* dequeues a tse_buffer from the transmit buffer list, otherwise
 462 * returns NULL if empty.
 463 * assumes the caller is managing and holding a mutual exclusion
 464 * primitive to avoid simultaneous pushes/pops to the list.
 465 */
 466static struct tse_buffer *
 467dequeue_tx(struct altera_tse_private *priv)
 468{
 469        struct tse_buffer *buffer = NULL;
 470        list_remove_head(&priv->txlisthd, buffer, struct tse_buffer, lh);
 471        return buffer;
 472}
 473
 474/* dequeues a tse_buffer from the receive buffer list, otherwise
 475 * returns NULL if empty
 476 * assumes the caller is managing and holding a mutual exclusion
 477 * primitive to avoid simultaneous pushes/pops to the list.
 478 */
 479static struct tse_buffer *
 480dequeue_rx(struct altera_tse_private *priv)
 481{
 482        struct tse_buffer *buffer = NULL;
 483        list_remove_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
 484        return buffer;
 485}
 486
 487/* dequeues a tse_buffer from the receive buffer list, otherwise
 488 * returns NULL if empty
 489 * assumes the caller is managing and holding a mutual exclusion
 490 * primitive to avoid simultaneous pushes/pops to the list while the
 491 * head is being examined.
 492 */
 493static struct tse_buffer *
 494queue_rx_peekhead(struct altera_tse_private *priv)
 495{
 496        struct tse_buffer *buffer = NULL;
 497        list_peek_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
 498        return buffer;
 499}
 500
 501/* check and return rx sgdma status without polling
 502 */
 503static int sgdma_rxbusy(struct altera_tse_private *priv)
 504{
 505        return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status))
 506                       & SGDMA_STSREG_BUSY;
 507}
 508
 509/* waits for the tx sgdma to finish it's current operation, returns 0
 510 * when it transitions to nonbusy, returns 1 if the operation times out
 511 */
 512static int sgdma_txbusy(struct altera_tse_private *priv)
 513{
 514        int delay = 0;
 515
 516        /* if DMA is busy, wait for current transactino to finish */
 517        while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
 518                & SGDMA_STSREG_BUSY) && (delay++ < 100))
 519                udelay(1);
 520
 521        if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
 522            & SGDMA_STSREG_BUSY) {
 523                netdev_err(priv->dev, "timeout waiting for tx dma\n");
 524                return 1;
 525        }
 526        return 0;
 527}
 528