linux/arch/arm/mach-rpc/dma.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/mach-rpc/dma.c
   3 *
   4 *  Copyright (C) 1998 Russell King
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 *  DMA functions specific to RiscPC architecture
  11 */
  12#include <linux/slab.h>
  13#include <linux/mman.h>
  14#include <linux/init.h>
  15#include <linux/interrupt.h>
  16#include <linux/dma-mapping.h>
  17#include <linux/io.h>
  18
  19#include <asm/page.h>
  20#include <asm/dma.h>
  21#include <asm/fiq.h>
  22#include <asm/irq.h>
  23#include <mach/hardware.h>
  24#include <asm/uaccess.h>
  25
  26#include <asm/mach/dma.h>
  27#include <asm/hardware/iomd.h>
  28
  29struct iomd_dma {
  30        struct dma_struct       dma;
  31        unsigned int            state;
  32        unsigned long           base;           /* Controller base address */
  33        int                     irq;            /* Controller IRQ */
  34        struct scatterlist      cur_sg;         /* Current controller buffer */
  35        dma_addr_t              dma_addr;
  36        unsigned int            dma_len;
  37};
  38
  39#if 0
  40typedef enum {
  41        dma_size_8      = 1,
  42        dma_size_16     = 2,
  43        dma_size_32     = 4,
  44        dma_size_128    = 16
  45} dma_size_t;
  46#endif
  47
  48#define TRANSFER_SIZE   2
  49
  50#define CURA    (0)
  51#define ENDA    (IOMD_IO0ENDA - IOMD_IO0CURA)
  52#define CURB    (IOMD_IO0CURB - IOMD_IO0CURA)
  53#define ENDB    (IOMD_IO0ENDB - IOMD_IO0CURA)
  54#define CR      (IOMD_IO0CR - IOMD_IO0CURA)
  55#define ST      (IOMD_IO0ST - IOMD_IO0CURA)
  56
  57static void iomd_get_next_sg(struct scatterlist *sg, struct iomd_dma *idma)
  58{
  59        unsigned long end, offset, flags = 0;
  60
  61        if (idma->dma.sg) {
  62                sg->dma_address = idma->dma_addr;
  63                offset = sg->dma_address & ~PAGE_MASK;
  64
  65                end = offset + idma->dma_len;
  66
  67                if (end > PAGE_SIZE)
  68                        end = PAGE_SIZE;
  69
  70                if (offset + TRANSFER_SIZE >= end)
  71                        flags |= DMA_END_L;
  72
  73                sg->length = end - TRANSFER_SIZE;
  74
  75                idma->dma_len -= end - offset;
  76                idma->dma_addr += end - offset;
  77
  78                if (idma->dma_len == 0) {
  79                        if (idma->dma.sgcount > 1) {
  80                                idma->dma.sg = sg_next(idma->dma.sg);
  81                                idma->dma_addr = idma->dma.sg->dma_address;
  82                                idma->dma_len = idma->dma.sg->length;
  83                                idma->dma.sgcount--;
  84                        } else {
  85                                idma->dma.sg = NULL;
  86                                flags |= DMA_END_S;
  87                        }
  88                }
  89        } else {
  90                flags = DMA_END_S | DMA_END_L;
  91                sg->dma_address = 0;
  92                sg->length = 0;
  93        }
  94
  95        sg->length |= flags;
  96}
  97
  98static irqreturn_t iomd_dma_handle(int irq, void *dev_id)
  99{
 100        struct iomd_dma *idma = dev_id;
 101        unsigned long base = idma->base;
 102
 103        do {
 104                unsigned int status;
 105
 106                status = iomd_readb(base + ST);
 107                if (!(status & DMA_ST_INT))
 108                        return IRQ_HANDLED;
 109
 110                if ((idma->state ^ status) & DMA_ST_AB)
 111                        iomd_get_next_sg(&idma->cur_sg, idma);
 112
 113                switch (status & (DMA_ST_OFL | DMA_ST_AB)) {
 114                case DMA_ST_OFL:                        /* OIA */
 115                case DMA_ST_AB:                         /* .IB */
 116                        iomd_writel(idma->cur_sg.dma_address, base + CURA);
 117                        iomd_writel(idma->cur_sg.length, base + ENDA);
 118                        idma->state = DMA_ST_AB;
 119                        break;
 120
 121                case DMA_ST_OFL | DMA_ST_AB:            /* OIB */
 122                case 0:                                 /* .IA */
 123                        iomd_writel(idma->cur_sg.dma_address, base + CURB);
 124                        iomd_writel(idma->cur_sg.length, base + ENDB);
 125                        idma->state = 0;
 126                        break;
 127                }
 128
 129                if (status & DMA_ST_OFL &&
 130                    idma->cur_sg.length == (DMA_END_S|DMA_END_L))
 131                        break;
 132        } while (1);
 133
 134        idma->state = ~DMA_ST_AB;
 135        disable_irq(irq);
 136
 137        return IRQ_HANDLED;
 138}
 139
 140static int iomd_request_dma(unsigned int chan, dma_t *dma)
 141{
 142        struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
 143
 144        return request_irq(idma->irq, iomd_dma_handle,
 145                           IRQF_DISABLED, idma->dma.device_id, idma);
 146}
 147
 148static void iomd_free_dma(unsigned int chan, dma_t *dma)
 149{
 150        struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
 151
 152        free_irq(idma->irq, idma);
 153}
 154
 155static void iomd_enable_dma(unsigned int chan, dma_t *dma)
 156{
 157        struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
 158        unsigned long dma_base = idma->base;
 159        unsigned int ctrl = TRANSFER_SIZE | DMA_CR_E;
 160
 161        if (idma->dma.invalid) {
 162                idma->dma.invalid = 0;
 163
 164                /*
 165                 * Cope with ISA-style drivers which expect cache
 166                 * coherence.
 167                 */
 168                if (!idma->dma.sg) {
 169                        idma->dma.sg = &idma->dma.buf;
 170                        idma->dma.sgcount = 1;
 171                        idma->dma.buf.length = idma->dma.count;
 172                        idma->dma.buf.dma_address = dma_map_single(NULL,
 173                                idma->dma.addr, idma->dma.count,
 174                                idma->dma.dma_mode == DMA_MODE_READ ?
 175                                DMA_FROM_DEVICE : DMA_TO_DEVICE);
 176                }
 177
 178                iomd_writeb(DMA_CR_C, dma_base + CR);
 179                idma->state = DMA_ST_AB;
 180        }
 181
 182        if (idma->dma.dma_mode == DMA_MODE_READ)
 183                ctrl |= DMA_CR_D;
 184
 185        iomd_writeb(ctrl, dma_base + CR);
 186        enable_irq(idma->irq);
 187}
 188
 189static void iomd_disable_dma(unsigned int chan, dma_t *dma)
 190{
 191        struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
 192        unsigned long dma_base = idma->base;
 193        unsigned long flags;
 194
 195        local_irq_save(flags);
 196        if (idma->state != ~DMA_ST_AB)
 197                disable_irq(idma->irq);
 198        iomd_writeb(0, dma_base + CR);
 199        local_irq_restore(flags);
 200}
 201
 202static int iomd_set_dma_speed(unsigned int chan, dma_t *dma, int cycle)
 203{
 204        int tcr, speed;
 205
 206        if (cycle < 188)
 207                speed = 3;
 208        else if (cycle <= 250)
 209                speed = 2;
 210        else if (cycle < 438)
 211                speed = 1;
 212        else
 213                speed = 0;
 214
 215        tcr = iomd_readb(IOMD_DMATCR);
 216        speed &= 3;
 217
 218        switch (chan) {
 219        case DMA_0:
 220                tcr = (tcr & ~0x03) | speed;
 221                break;
 222
 223        case DMA_1:
 224                tcr = (tcr & ~0x0c) | (speed << 2);
 225                break;
 226
 227        case DMA_2:
 228                tcr = (tcr & ~0x30) | (speed << 4);
 229                break;
 230
 231        case DMA_3:
 232                tcr = (tcr & ~0xc0) | (speed << 6);
 233                break;
 234
 235        default:
 236                break;
 237        }
 238
 239        iomd_writeb(tcr, IOMD_DMATCR);
 240
 241        return speed;
 242}
 243
 244static struct dma_ops iomd_dma_ops = {
 245        .type           = "IOMD",
 246        .request        = iomd_request_dma,
 247        .free           = iomd_free_dma,
 248        .enable         = iomd_enable_dma,
 249        .disable        = iomd_disable_dma,
 250        .setspeed       = iomd_set_dma_speed,
 251};
 252
 253static struct fiq_handler fh = {
 254        .name   = "floppydma"
 255};
 256
 257struct floppy_dma {
 258        struct dma_struct       dma;
 259        unsigned int            fiq;
 260};
 261
 262static void floppy_enable_dma(unsigned int chan, dma_t *dma)
 263{
 264        struct floppy_dma *fdma = container_of(dma, struct floppy_dma, dma);
 265        void *fiqhandler_start;
 266        unsigned int fiqhandler_length;
 267        struct pt_regs regs;
 268
 269        if (fdma->dma.sg)
 270                BUG();
 271
 272        if (fdma->dma.dma_mode == DMA_MODE_READ) {
 273                extern unsigned char floppy_fiqin_start, floppy_fiqin_end;
 274                fiqhandler_start = &floppy_fiqin_start;
 275                fiqhandler_length = &floppy_fiqin_end - &floppy_fiqin_start;
 276        } else {
 277                extern unsigned char floppy_fiqout_start, floppy_fiqout_end;
 278                fiqhandler_start = &floppy_fiqout_start;
 279                fiqhandler_length = &floppy_fiqout_end - &floppy_fiqout_start;
 280        }
 281
 282        regs.ARM_r9  = fdma->dma.count;
 283        regs.ARM_r10 = (unsigned long)fdma->dma.addr;
 284        regs.ARM_fp  = (unsigned long)FLOPPYDMA_BASE;
 285
 286        if (claim_fiq(&fh)) {
 287                printk("floppydma: couldn't claim FIQ.\n");
 288                return;
 289        }
 290
 291        set_fiq_handler(fiqhandler_start, fiqhandler_length);
 292        set_fiq_regs(&regs);
 293        enable_fiq(fdma->fiq);
 294}
 295
 296static void floppy_disable_dma(unsigned int chan, dma_t *dma)
 297{
 298        struct floppy_dma *fdma = container_of(dma, struct floppy_dma, dma);
 299        disable_fiq(fdma->fiq);
 300        release_fiq(&fh);
 301}
 302
 303static int floppy_get_residue(unsigned int chan, dma_t *dma)
 304{
 305        struct pt_regs regs;
 306        get_fiq_regs(&regs);
 307        return regs.ARM_r9;
 308}
 309
 310static struct dma_ops floppy_dma_ops = {
 311        .type           = "FIQDMA",
 312        .enable         = floppy_enable_dma,
 313        .disable        = floppy_disable_dma,
 314        .residue        = floppy_get_residue,
 315};
 316
 317/*
 318 * This is virtual DMA - we don't need anything here.
 319 */
 320static void sound_enable_disable_dma(unsigned int chan, dma_t *dma)
 321{
 322}
 323
 324static struct dma_ops sound_dma_ops = {
 325        .type           = "VIRTUAL",
 326        .enable         = sound_enable_disable_dma,
 327        .disable        = sound_enable_disable_dma,
 328};
 329
 330static struct iomd_dma iomd_dma[6];
 331
 332static struct floppy_dma floppy_dma = {
 333        .dma            = {
 334                .d_ops  = &floppy_dma_ops,
 335        },
 336        .fiq            = FIQ_FLOPPYDATA,
 337};
 338
 339static dma_t sound_dma = {
 340        .d_ops          = &sound_dma_ops,
 341};
 342
 343static int __init rpc_dma_init(void)
 344{
 345        unsigned int i;
 346        int ret;
 347
 348        iomd_writeb(0, IOMD_IO0CR);
 349        iomd_writeb(0, IOMD_IO1CR);
 350        iomd_writeb(0, IOMD_IO2CR);
 351        iomd_writeb(0, IOMD_IO3CR);
 352
 353        iomd_writeb(0xa0, IOMD_DMATCR);
 354
 355        /*
 356         * Setup DMA channels 2,3 to be for podules
 357         * and channels 0,1 for internal devices
 358         */
 359        iomd_writeb(DMA_EXT_IO3|DMA_EXT_IO2, IOMD_DMAEXT);
 360
 361        iomd_dma[DMA_0].base    = IOMD_IO0CURA;
 362        iomd_dma[DMA_0].irq     = IRQ_DMA0;
 363        iomd_dma[DMA_1].base    = IOMD_IO1CURA;
 364        iomd_dma[DMA_1].irq     = IRQ_DMA1;
 365        iomd_dma[DMA_2].base    = IOMD_IO2CURA;
 366        iomd_dma[DMA_2].irq     = IRQ_DMA2;
 367        iomd_dma[DMA_3].base    = IOMD_IO3CURA;
 368        iomd_dma[DMA_3].irq     = IRQ_DMA3;
 369        iomd_dma[DMA_S0].base   = IOMD_SD0CURA;
 370        iomd_dma[DMA_S0].irq    = IRQ_DMAS0;
 371        iomd_dma[DMA_S1].base   = IOMD_SD1CURA;
 372        iomd_dma[DMA_S1].irq    = IRQ_DMAS1;
 373
 374        for (i = DMA_0; i <= DMA_S1; i++) {
 375                iomd_dma[i].dma.d_ops = &iomd_dma_ops;
 376
 377                ret = isa_dma_add(i, &iomd_dma[i].dma);
 378                if (ret)
 379                        printk("IOMDDMA%u: unable to register: %d\n", i, ret);
 380        }
 381
 382        ret = isa_dma_add(DMA_VIRTUAL_FLOPPY, &floppy_dma.dma);
 383        if (ret)
 384                printk("IOMDFLOPPY: unable to register: %d\n", ret);
 385        ret = isa_dma_add(DMA_VIRTUAL_SOUND, &sound_dma);
 386        if (ret)
 387                printk("IOMDSOUND: unable to register: %d\n", ret);
 388        return 0;
 389}
 390core_initcall(rpc_dma_init);
 391