linux/arch/powerpc/platforms/pasemi/dma_lib.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2006-2007 PA Semi, Inc
   4 *
   5 * Common functions for DMA access on PA Semi PWRficient
   6 */
   7
   8#include <linux/kernel.h>
   9#include <linux/export.h>
  10#include <linux/pci.h>
  11#include <linux/slab.h>
  12#include <linux/of.h>
  13#include <linux/sched.h>
  14
  15#include <asm/pasemi_dma.h>
  16
  17#define MAX_TXCH 64
  18#define MAX_RXCH 64
  19#define MAX_FLAGS 64
  20#define MAX_FUN 8
  21
  22static struct pasdma_status *dma_status;
  23
  24static void __iomem *iob_regs;
  25static void __iomem *mac_regs[6];
  26static void __iomem *dma_regs;
  27
  28static int base_hw_irq;
  29
  30static int num_txch, num_rxch;
  31
  32static struct pci_dev *dma_pdev;
  33
  34/* Bitmaps to handle allocation of channels */
  35
  36static DECLARE_BITMAP(txch_free, MAX_TXCH);
  37static DECLARE_BITMAP(rxch_free, MAX_RXCH);
  38static DECLARE_BITMAP(flags_free, MAX_FLAGS);
  39static DECLARE_BITMAP(fun_free, MAX_FUN);
  40
  41/* pasemi_read_iob_reg - read IOB register
  42 * @reg: Register to read (offset into PCI CFG space)
  43 */
  44unsigned int pasemi_read_iob_reg(unsigned int reg)
  45{
  46        return in_le32(iob_regs+reg);
  47}
  48EXPORT_SYMBOL(pasemi_read_iob_reg);
  49
  50/* pasemi_write_iob_reg - write IOB register
  51 * @reg: Register to write to (offset into PCI CFG space)
  52 * @val: Value to write
  53 */
  54void pasemi_write_iob_reg(unsigned int reg, unsigned int val)
  55{
  56        out_le32(iob_regs+reg, val);
  57}
  58EXPORT_SYMBOL(pasemi_write_iob_reg);
  59
  60/* pasemi_read_mac_reg - read MAC register
  61 * @intf: MAC interface
  62 * @reg: Register to read (offset into PCI CFG space)
  63 */
  64unsigned int pasemi_read_mac_reg(int intf, unsigned int reg)
  65{
  66        return in_le32(mac_regs[intf]+reg);
  67}
  68EXPORT_SYMBOL(pasemi_read_mac_reg);
  69
  70/* pasemi_write_mac_reg - write MAC register
  71 * @intf: MAC interface
  72 * @reg: Register to write to (offset into PCI CFG space)
  73 * @val: Value to write
  74 */
  75void pasemi_write_mac_reg(int intf, unsigned int reg, unsigned int val)
  76{
  77        out_le32(mac_regs[intf]+reg, val);
  78}
  79EXPORT_SYMBOL(pasemi_write_mac_reg);
  80
  81/* pasemi_read_dma_reg - read DMA register
  82 * @reg: Register to read (offset into PCI CFG space)
  83 */
  84unsigned int pasemi_read_dma_reg(unsigned int reg)
  85{
  86        return in_le32(dma_regs+reg);
  87}
  88EXPORT_SYMBOL(pasemi_read_dma_reg);
  89
  90/* pasemi_write_dma_reg - write DMA register
  91 * @reg: Register to write to (offset into PCI CFG space)
  92 * @val: Value to write
  93 */
  94void pasemi_write_dma_reg(unsigned int reg, unsigned int val)
  95{
  96        out_le32(dma_regs+reg, val);
  97}
  98EXPORT_SYMBOL(pasemi_write_dma_reg);
  99
 100static int pasemi_alloc_tx_chan(enum pasemi_dmachan_type type)
 101{
 102        int bit;
 103        int start, limit;
 104
 105        switch (type & (TXCHAN_EVT0|TXCHAN_EVT1)) {
 106        case TXCHAN_EVT0:
 107                start = 0;
 108                limit = 10;
 109                break;
 110        case TXCHAN_EVT1:
 111                start = 10;
 112                limit = MAX_TXCH;
 113                break;
 114        default:
 115                start = 0;
 116                limit = MAX_TXCH;
 117                break;
 118        }
 119retry:
 120        bit = find_next_bit(txch_free, MAX_TXCH, start);
 121        if (bit >= limit)
 122                return -ENOSPC;
 123        if (!test_and_clear_bit(bit, txch_free))
 124                goto retry;
 125
 126        return bit;
 127}
 128
 129static void pasemi_free_tx_chan(int chan)
 130{
 131        BUG_ON(test_bit(chan, txch_free));
 132        set_bit(chan, txch_free);
 133}
 134
 135static int pasemi_alloc_rx_chan(void)
 136{
 137        int bit;
 138retry:
 139        bit = find_first_bit(rxch_free, MAX_RXCH);
 140        if (bit >= MAX_TXCH)
 141                return -ENOSPC;
 142        if (!test_and_clear_bit(bit, rxch_free))
 143                goto retry;
 144
 145        return bit;
 146}
 147
 148static void pasemi_free_rx_chan(int chan)
 149{
 150        BUG_ON(test_bit(chan, rxch_free));
 151        set_bit(chan, rxch_free);
 152}
 153
 154/* pasemi_dma_alloc_chan - Allocate a DMA channel
 155 * @type: Type of channel to allocate
 156 * @total_size: Total size of structure to allocate (to allow for more
 157 *              room behind the structure to be used by the client)
 158 * @offset: Offset in bytes from start of the total structure to the beginning
 159 *          of struct pasemi_dmachan. Needed when struct pasemi_dmachan is
 160 *          not the first member of the client structure.
 161 *
 162 * pasemi_dma_alloc_chan allocates a DMA channel for use by a client. The
 163 * type argument specifies whether it's a RX or TX channel, and in the case
 164 * of TX channels which group it needs to belong to (if any).
 165 *
 166 * Returns a pointer to the total structure allocated on success, NULL
 167 * on failure.
 168 */
 169void *pasemi_dma_alloc_chan(enum pasemi_dmachan_type type,
 170                            int total_size, int offset)
 171{
 172        void *buf;
 173        struct pasemi_dmachan *chan;
 174        int chno;
 175
 176        BUG_ON(total_size < sizeof(struct pasemi_dmachan));
 177
 178        buf = kzalloc(total_size, GFP_KERNEL);
 179
 180        if (!buf)
 181                return NULL;
 182        chan = buf + offset;
 183
 184        chan->priv = buf;
 185
 186        switch (type & (TXCHAN|RXCHAN)) {
 187        case RXCHAN:
 188                chno = pasemi_alloc_rx_chan();
 189                chan->chno = chno;
 190                chan->irq = irq_create_mapping(NULL,
 191                                               base_hw_irq + num_txch + chno);
 192                chan->status = &dma_status->rx_sta[chno];
 193                break;
 194        case TXCHAN:
 195                chno = pasemi_alloc_tx_chan(type);
 196                chan->chno = chno;
 197                chan->irq = irq_create_mapping(NULL, base_hw_irq + chno);
 198                chan->status = &dma_status->tx_sta[chno];
 199                break;
 200        }
 201
 202        chan->chan_type = type;
 203
 204        return chan;
 205}
 206EXPORT_SYMBOL(pasemi_dma_alloc_chan);
 207
 208/* pasemi_dma_free_chan - Free a previously allocated channel
 209 * @chan: Channel to free
 210 *
 211 * Frees a previously allocated channel. It will also deallocate any
 212 * descriptor ring associated with the channel, if allocated.
 213 */
 214void pasemi_dma_free_chan(struct pasemi_dmachan *chan)
 215{
 216        if (chan->ring_virt)
 217                pasemi_dma_free_ring(chan);
 218
 219        switch (chan->chan_type & (RXCHAN|TXCHAN)) {
 220        case RXCHAN:
 221                pasemi_free_rx_chan(chan->chno);
 222                break;
 223        case TXCHAN:
 224                pasemi_free_tx_chan(chan->chno);
 225                break;
 226        }
 227
 228        kfree(chan->priv);
 229}
 230EXPORT_SYMBOL(pasemi_dma_free_chan);
 231
 232/* pasemi_dma_alloc_ring - Allocate descriptor ring for a channel
 233 * @chan: Channel for which to allocate
 234 * @ring_size: Ring size in 64-bit (8-byte) words
 235 *
 236 * Allocate a descriptor ring for a channel. Returns 0 on success, errno
 237 * on failure. The passed in struct pasemi_dmachan is updated with the
 238 * virtual and DMA addresses of the ring.
 239 */
 240int pasemi_dma_alloc_ring(struct pasemi_dmachan *chan, int ring_size)
 241{
 242        BUG_ON(chan->ring_virt);
 243
 244        chan->ring_size = ring_size;
 245
 246        chan->ring_virt = dma_alloc_coherent(&dma_pdev->dev,
 247                                             ring_size * sizeof(u64),
 248                                             &chan->ring_dma, GFP_KERNEL);
 249
 250        if (!chan->ring_virt)
 251                return -ENOMEM;
 252
 253        return 0;
 254}
 255EXPORT_SYMBOL(pasemi_dma_alloc_ring);
 256
 257/* pasemi_dma_free_ring - Free an allocated descriptor ring for a channel
 258 * @chan: Channel for which to free the descriptor ring
 259 *
 260 * Frees a previously allocated descriptor ring for a channel.
 261 */
 262void pasemi_dma_free_ring(struct pasemi_dmachan *chan)
 263{
 264        BUG_ON(!chan->ring_virt);
 265
 266        dma_free_coherent(&dma_pdev->dev, chan->ring_size * sizeof(u64),
 267                          chan->ring_virt, chan->ring_dma);
 268        chan->ring_virt = NULL;
 269        chan->ring_size = 0;
 270        chan->ring_dma = 0;
 271}
 272EXPORT_SYMBOL(pasemi_dma_free_ring);
 273
 274/* pasemi_dma_start_chan - Start a DMA channel
 275 * @chan: Channel to start
 276 * @cmdsta: Additional CCMDSTA/TCMDSTA bits to write
 277 *
 278 * Enables (starts) a DMA channel with optional additional arguments.
 279 */
 280void pasemi_dma_start_chan(const struct pasemi_dmachan *chan, const u32 cmdsta)
 281{
 282        if (chan->chan_type == RXCHAN)
 283                pasemi_write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno),
 284                                     cmdsta | PAS_DMA_RXCHAN_CCMDSTA_EN);
 285        else
 286                pasemi_write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno),
 287                                     cmdsta | PAS_DMA_TXCHAN_TCMDSTA_EN);
 288}
 289EXPORT_SYMBOL(pasemi_dma_start_chan);
 290
 291/* pasemi_dma_stop_chan - Stop a DMA channel
 292 * @chan: Channel to stop
 293 *
 294 * Stops (disables) a DMA channel. This is done by setting the ST bit in the
 295 * CMDSTA register and waiting on the ACT (active) bit to clear, then
 296 * finally disabling the whole channel.
 297 *
 298 * This function will only try for a short while for the channel to stop, if
 299 * it doesn't it will return failure.
 300 *
 301 * Returns 1 on success, 0 on failure.
 302 */
 303#define MAX_RETRIES 5000
 304int pasemi_dma_stop_chan(const struct pasemi_dmachan *chan)
 305{
 306        int reg, retries;
 307        u32 sta;
 308
 309        if (chan->chan_type == RXCHAN) {
 310                reg = PAS_DMA_RXCHAN_CCMDSTA(chan->chno);
 311                pasemi_write_dma_reg(reg, PAS_DMA_RXCHAN_CCMDSTA_ST);
 312                for (retries = 0; retries < MAX_RETRIES; retries++) {
 313                        sta = pasemi_read_dma_reg(reg);
 314                        if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)) {
 315                                pasemi_write_dma_reg(reg, 0);
 316                                return 1;
 317                        }
 318                        cond_resched();
 319                }
 320        } else {
 321                reg = PAS_DMA_TXCHAN_TCMDSTA(chan->chno);
 322                pasemi_write_dma_reg(reg, PAS_DMA_TXCHAN_TCMDSTA_ST);
 323                for (retries = 0; retries < MAX_RETRIES; retries++) {
 324                        sta = pasemi_read_dma_reg(reg);
 325                        if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)) {
 326                                pasemi_write_dma_reg(reg, 0);
 327                                return 1;
 328                        }
 329                        cond_resched();
 330                }
 331        }
 332
 333        return 0;
 334}
 335EXPORT_SYMBOL(pasemi_dma_stop_chan);
 336
 337/* pasemi_dma_alloc_buf - Allocate a buffer to use for DMA
 338 * @chan: Channel to allocate for
 339 * @size: Size of buffer in bytes
 340 * @handle: DMA handle
 341 *
 342 * Allocate a buffer to be used by the DMA engine for read/write,
 343 * similar to dma_alloc_coherent().
 344 *
 345 * Returns the virtual address of the buffer, or NULL in case of failure.
 346 */
 347void *pasemi_dma_alloc_buf(struct pasemi_dmachan *chan, int size,
 348                           dma_addr_t *handle)
 349{
 350        return dma_alloc_coherent(&dma_pdev->dev, size, handle, GFP_KERNEL);
 351}
 352EXPORT_SYMBOL(pasemi_dma_alloc_buf);
 353
 354/* pasemi_dma_free_buf - Free a buffer used for DMA
 355 * @chan: Channel the buffer was allocated for
 356 * @size: Size of buffer in bytes
 357 * @handle: DMA handle
 358 *
 359 * Frees a previously allocated buffer.
 360 */
 361void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size,
 362                         dma_addr_t *handle)
 363{
 364        dma_free_coherent(&dma_pdev->dev, size, handle, GFP_KERNEL);
 365}
 366EXPORT_SYMBOL(pasemi_dma_free_buf);
 367
 368/* pasemi_dma_alloc_flag - Allocate a flag (event) for channel synchronization
 369 *
 370 * Allocates a flag for use with channel synchronization (event descriptors).
 371 * Returns allocated flag (0-63), < 0 on error.
 372 */
 373int pasemi_dma_alloc_flag(void)
 374{
 375        int bit;
 376
 377retry:
 378        bit = find_next_bit(flags_free, MAX_FLAGS, 0);
 379        if (bit >= MAX_FLAGS)
 380                return -ENOSPC;
 381        if (!test_and_clear_bit(bit, flags_free))
 382                goto retry;
 383
 384        return bit;
 385}
 386EXPORT_SYMBOL(pasemi_dma_alloc_flag);
 387
 388
 389/* pasemi_dma_free_flag - Deallocates a flag (event)
 390 * @flag: Flag number to deallocate
 391 *
 392 * Frees up a flag so it can be reused for other purposes.
 393 */
 394void pasemi_dma_free_flag(int flag)
 395{
 396        BUG_ON(test_bit(flag, flags_free));
 397        BUG_ON(flag >= MAX_FLAGS);
 398        set_bit(flag, flags_free);
 399}
 400EXPORT_SYMBOL(pasemi_dma_free_flag);
 401
 402
 403/* pasemi_dma_set_flag - Sets a flag (event) to 1
 404 * @flag: Flag number to set active
 405 *
 406 * Sets the flag provided to 1.
 407 */
 408void pasemi_dma_set_flag(int flag)
 409{
 410        BUG_ON(flag >= MAX_FLAGS);
 411        if (flag < 32)
 412                pasemi_write_dma_reg(PAS_DMA_TXF_SFLG0, 1 << flag);
 413        else
 414                pasemi_write_dma_reg(PAS_DMA_TXF_SFLG1, 1 << flag);
 415}
 416EXPORT_SYMBOL(pasemi_dma_set_flag);
 417
 418/* pasemi_dma_clear_flag - Sets a flag (event) to 0
 419 * @flag: Flag number to set inactive
 420 *
 421 * Sets the flag provided to 0.
 422 */
 423void pasemi_dma_clear_flag(int flag)
 424{
 425        BUG_ON(flag >= MAX_FLAGS);
 426        if (flag < 32)
 427                pasemi_write_dma_reg(PAS_DMA_TXF_CFLG0, 1 << flag);
 428        else
 429                pasemi_write_dma_reg(PAS_DMA_TXF_CFLG1, 1 << flag);
 430}
 431EXPORT_SYMBOL(pasemi_dma_clear_flag);
 432
 433/* pasemi_dma_alloc_fun - Allocate a function engine
 434 *
 435 * Allocates a function engine to use for crypto/checksum offload
 436 * Returns allocated engine (0-8), < 0 on error.
 437 */
 438int pasemi_dma_alloc_fun(void)
 439{
 440        int bit;
 441
 442retry:
 443        bit = find_next_bit(fun_free, MAX_FLAGS, 0);
 444        if (bit >= MAX_FLAGS)
 445                return -ENOSPC;
 446        if (!test_and_clear_bit(bit, fun_free))
 447                goto retry;
 448
 449        return bit;
 450}
 451EXPORT_SYMBOL(pasemi_dma_alloc_fun);
 452
 453
 454/* pasemi_dma_free_fun - Deallocates a function engine
 455 * @flag: Engine number to deallocate
 456 *
 457 * Frees up a function engine so it can be used for other purposes.
 458 */
 459void pasemi_dma_free_fun(int fun)
 460{
 461        BUG_ON(test_bit(fun, fun_free));
 462        BUG_ON(fun >= MAX_FLAGS);
 463        set_bit(fun, fun_free);
 464}
 465EXPORT_SYMBOL(pasemi_dma_free_fun);
 466
 467
 468static void *map_onedev(struct pci_dev *p, int index)
 469{
 470        struct device_node *dn;
 471        void __iomem *ret;
 472
 473        dn = pci_device_to_OF_node(p);
 474        if (!dn)
 475                goto fallback;
 476
 477        ret = of_iomap(dn, index);
 478        if (!ret)
 479                goto fallback;
 480
 481        return ret;
 482fallback:
 483        /* This is hardcoded and ugly, but we have some firmware versions
 484         * that don't provide the register space in the device tree. Luckily
 485         * they are at well-known locations so we can just do the math here.
 486         */
 487        return ioremap(0xe0000000 + (p->devfn << 12), 0x2000);
 488}
 489
 490/* pasemi_dma_init - Initialize the PA Semi DMA library
 491 *
 492 * This function initializes the DMA library. It must be called before
 493 * any other function in the library.
 494 *
 495 * Returns 0 on success, errno on failure.
 496 */
 497int pasemi_dma_init(void)
 498{
 499        static DEFINE_SPINLOCK(init_lock);
 500        struct pci_dev *iob_pdev;
 501        struct pci_dev *pdev;
 502        struct resource res;
 503        struct device_node *dn;
 504        int i, intf, err = 0;
 505        unsigned long timeout;
 506        u32 tmp;
 507
 508        if (!machine_is(pasemi))
 509                return -ENODEV;
 510
 511        spin_lock(&init_lock);
 512
 513        /* Make sure we haven't already initialized */
 514        if (dma_pdev)
 515                goto out;
 516
 517        iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
 518        if (!iob_pdev) {
 519                BUG();
 520                pr_warn("Can't find I/O Bridge\n");
 521                err = -ENODEV;
 522                goto out;
 523        }
 524        iob_regs = map_onedev(iob_pdev, 0);
 525
 526        dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
 527        if (!dma_pdev) {
 528                BUG();
 529                pr_warn("Can't find DMA controller\n");
 530                err = -ENODEV;
 531                goto out;
 532        }
 533        dma_regs = map_onedev(dma_pdev, 0);
 534        base_hw_irq = virq_to_hw(dma_pdev->irq);
 535
 536        pci_read_config_dword(dma_pdev, PAS_DMA_CAP_TXCH, &tmp);
 537        num_txch = (tmp & PAS_DMA_CAP_TXCH_TCHN_M) >> PAS_DMA_CAP_TXCH_TCHN_S;
 538
 539        pci_read_config_dword(dma_pdev, PAS_DMA_CAP_RXCH, &tmp);
 540        num_rxch = (tmp & PAS_DMA_CAP_RXCH_RCHN_M) >> PAS_DMA_CAP_RXCH_RCHN_S;
 541
 542        intf = 0;
 543        for (pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa006, NULL);
 544             pdev;
 545             pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa006, pdev))
 546                mac_regs[intf++] = map_onedev(pdev, 0);
 547
 548        pci_dev_put(pdev);
 549
 550        for (pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa005, NULL);
 551             pdev;
 552             pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa005, pdev))
 553                mac_regs[intf++] = map_onedev(pdev, 0);
 554
 555        pci_dev_put(pdev);
 556
 557        dn = pci_device_to_OF_node(iob_pdev);
 558        if (dn)
 559                err = of_address_to_resource(dn, 1, &res);
 560        if (!dn || err) {
 561                /* Fallback for old firmware */
 562                res.start = 0xfd800000;
 563                res.end = res.start + 0x1000;
 564        }
 565        dma_status = ioremap_cache(res.start, resource_size(&res));
 566        pci_dev_put(iob_pdev);
 567
 568        for (i = 0; i < MAX_TXCH; i++)
 569                __set_bit(i, txch_free);
 570
 571        for (i = 0; i < MAX_RXCH; i++)
 572                __set_bit(i, rxch_free);
 573
 574        timeout = jiffies + HZ;
 575        pasemi_write_dma_reg(PAS_DMA_COM_RXCMD, 0);
 576        while (pasemi_read_dma_reg(PAS_DMA_COM_RXSTA) & 1) {
 577                if (time_after(jiffies, timeout)) {
 578                        pr_warn("Warning: Could not disable RX section\n");
 579                        break;
 580                }
 581        }
 582
 583        timeout = jiffies + HZ;
 584        pasemi_write_dma_reg(PAS_DMA_COM_TXCMD, 0);
 585        while (pasemi_read_dma_reg(PAS_DMA_COM_TXSTA) & 1) {
 586                if (time_after(jiffies, timeout)) {
 587                        pr_warn("Warning: Could not disable TX section\n");
 588                        break;
 589                }
 590        }
 591
 592        /* setup resource allocations for the different DMA sections */
 593        tmp = pasemi_read_dma_reg(PAS_DMA_COM_CFG);
 594        pasemi_write_dma_reg(PAS_DMA_COM_CFG, tmp | 0x18000000);
 595
 596        /* enable tx section */
 597        pasemi_write_dma_reg(PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN);
 598
 599        /* enable rx section */
 600        pasemi_write_dma_reg(PAS_DMA_COM_RXCMD, PAS_DMA_COM_RXCMD_EN);
 601
 602        for (i = 0; i < MAX_FLAGS; i++)
 603                __set_bit(i, flags_free);
 604
 605        for (i = 0; i < MAX_FUN; i++)
 606                __set_bit(i, fun_free);
 607
 608        /* clear all status flags */
 609        pasemi_write_dma_reg(PAS_DMA_TXF_CFLG0, 0xffffffff);
 610        pasemi_write_dma_reg(PAS_DMA_TXF_CFLG1, 0xffffffff);
 611
 612        pr_info("PA Semi PWRficient DMA library initialized "
 613                "(%d tx, %d rx channels)\n", num_txch, num_rxch);
 614
 615out:
 616        spin_unlock(&init_lock);
 617        return err;
 618}
 619EXPORT_SYMBOL(pasemi_dma_init);
 620