qemu/hw/dma/pl080.c
<<
>>
Prefs
   1/*
   2 * Arm PrimeCell PL080/PL081 DMA controller
   3 *
   4 * Copyright (c) 2006 CodeSourcery.
   5 * Written by Paul Brook
   6 *
   7 * This code is licensed under the GPL.
   8 */
   9
  10#include "qemu/osdep.h"
  11#include "hw/sysbus.h"
  12#include "exec/address-spaces.h"
  13
  14#define PL080_MAX_CHANNELS 8
  15#define PL080_CONF_E    0x1
  16#define PL080_CONF_M1   0x2
  17#define PL080_CONF_M2   0x4
  18
  19#define PL080_CCONF_H   0x40000
  20#define PL080_CCONF_A   0x20000
  21#define PL080_CCONF_L   0x10000
  22#define PL080_CCONF_ITC 0x08000
  23#define PL080_CCONF_IE  0x04000
  24#define PL080_CCONF_E   0x00001
  25
  26#define PL080_CCTRL_I   0x80000000
  27#define PL080_CCTRL_DI  0x08000000
  28#define PL080_CCTRL_SI  0x04000000
  29#define PL080_CCTRL_D   0x02000000
  30#define PL080_CCTRL_S   0x01000000
  31
  32typedef struct {
  33    uint32_t src;
  34    uint32_t dest;
  35    uint32_t lli;
  36    uint32_t ctrl;
  37    uint32_t conf;
  38} pl080_channel;
  39
  40#define TYPE_PL080 "pl080"
  41#define PL080(obj) OBJECT_CHECK(PL080State, (obj), TYPE_PL080)
  42
  43typedef struct PL080State {
  44    SysBusDevice parent_obj;
  45
  46    MemoryRegion iomem;
  47    uint8_t tc_int;
  48    uint8_t tc_mask;
  49    uint8_t err_int;
  50    uint8_t err_mask;
  51    uint32_t conf;
  52    uint32_t sync;
  53    uint32_t req_single;
  54    uint32_t req_burst;
  55    pl080_channel chan[PL080_MAX_CHANNELS];
  56    int nchannels;
  57    /* Flag to avoid recursive DMA invocations.  */
  58    int running;
  59    qemu_irq irq;
  60} PL080State;
  61
  62static const VMStateDescription vmstate_pl080_channel = {
  63    .name = "pl080_channel",
  64    .version_id = 1,
  65    .minimum_version_id = 1,
  66    .fields = (VMStateField[]) {
  67        VMSTATE_UINT32(src, pl080_channel),
  68        VMSTATE_UINT32(dest, pl080_channel),
  69        VMSTATE_UINT32(lli, pl080_channel),
  70        VMSTATE_UINT32(ctrl, pl080_channel),
  71        VMSTATE_UINT32(conf, pl080_channel),
  72        VMSTATE_END_OF_LIST()
  73    }
  74};
  75
  76static const VMStateDescription vmstate_pl080 = {
  77    .name = "pl080",
  78    .version_id = 1,
  79    .minimum_version_id = 1,
  80    .fields = (VMStateField[]) {
  81        VMSTATE_UINT8(tc_int, PL080State),
  82        VMSTATE_UINT8(tc_mask, PL080State),
  83        VMSTATE_UINT8(err_int, PL080State),
  84        VMSTATE_UINT8(err_mask, PL080State),
  85        VMSTATE_UINT32(conf, PL080State),
  86        VMSTATE_UINT32(sync, PL080State),
  87        VMSTATE_UINT32(req_single, PL080State),
  88        VMSTATE_UINT32(req_burst, PL080State),
  89        VMSTATE_UINT8(tc_int, PL080State),
  90        VMSTATE_UINT8(tc_int, PL080State),
  91        VMSTATE_UINT8(tc_int, PL080State),
  92        VMSTATE_STRUCT_ARRAY(chan, PL080State, PL080_MAX_CHANNELS,
  93                             1, vmstate_pl080_channel, pl080_channel),
  94        VMSTATE_INT32(running, PL080State),
  95        VMSTATE_END_OF_LIST()
  96    }
  97};
  98
  99static const unsigned char pl080_id[] =
 100{ 0x80, 0x10, 0x04, 0x0a, 0x0d, 0xf0, 0x05, 0xb1 };
 101
 102static const unsigned char pl081_id[] =
 103{ 0x81, 0x10, 0x04, 0x0a, 0x0d, 0xf0, 0x05, 0xb1 };
 104
 105static void pl080_update(PL080State *s)
 106{
 107    if ((s->tc_int & s->tc_mask)
 108            || (s->err_int & s->err_mask))
 109        qemu_irq_raise(s->irq);
 110    else
 111        qemu_irq_lower(s->irq);
 112}
 113
 114static void pl080_run(PL080State *s)
 115{
 116    int c;
 117    int flow;
 118    pl080_channel *ch;
 119    int swidth;
 120    int dwidth;
 121    int xsize;
 122    int n;
 123    int src_id;
 124    int dest_id;
 125    int size;
 126    uint8_t buff[4];
 127    uint32_t req;
 128
 129    s->tc_mask = 0;
 130    for (c = 0; c < s->nchannels; c++) {
 131        if (s->chan[c].conf & PL080_CCONF_ITC)
 132            s->tc_mask |= 1 << c;
 133        if (s->chan[c].conf & PL080_CCONF_IE)
 134            s->err_mask |= 1 << c;
 135    }
 136
 137    if ((s->conf & PL080_CONF_E) == 0)
 138        return;
 139
 140hw_error("DMA active\n");
 141    /* If we are already in the middle of a DMA operation then indicate that
 142       there may be new DMA requests and return immediately.  */
 143    if (s->running) {
 144        s->running++;
 145        return;
 146    }
 147    s->running = 1;
 148    while (s->running) {
 149        for (c = 0; c < s->nchannels; c++) {
 150            ch = &s->chan[c];
 151again:
 152            /* Test if thiws channel has any pending DMA requests.  */
 153            if ((ch->conf & (PL080_CCONF_H | PL080_CCONF_E))
 154                    != PL080_CCONF_E)
 155                continue;
 156            flow = (ch->conf >> 11) & 7;
 157            if (flow >= 4) {
 158                hw_error(
 159                    "pl080_run: Peripheral flow control not implemented\n");
 160            }
 161            src_id = (ch->conf >> 1) & 0x1f;
 162            dest_id = (ch->conf >> 6) & 0x1f;
 163            size = ch->ctrl & 0xfff;
 164            req = s->req_single | s->req_burst;
 165            switch (flow) {
 166            case 0:
 167                break;
 168            case 1:
 169                if ((req & (1u << dest_id)) == 0)
 170                    size = 0;
 171                break;
 172            case 2:
 173                if ((req & (1u << src_id)) == 0)
 174                    size = 0;
 175                break;
 176            case 3:
 177                if ((req & (1u << src_id)) == 0
 178                        || (req & (1u << dest_id)) == 0)
 179                    size = 0;
 180                break;
 181            }
 182            if (!size)
 183                continue;
 184
 185            /* Transfer one element.  */
 186            /* ??? Should transfer multiple elements for a burst request.  */
 187            /* ??? Unclear what the proper behavior is when source and
 188               destination widths are different.  */
 189            swidth = 1 << ((ch->ctrl >> 18) & 7);
 190            dwidth = 1 << ((ch->ctrl >> 21) & 7);
 191            for (n = 0; n < dwidth; n+= swidth) {
 192                cpu_physical_memory_read(ch->src, buff + n, swidth);
 193                if (ch->ctrl & PL080_CCTRL_SI)
 194                    ch->src += swidth;
 195            }
 196            xsize = (dwidth < swidth) ? swidth : dwidth;
 197            /* ??? This may pad the value incorrectly for dwidth < 32.  */
 198            for (n = 0; n < xsize; n += dwidth) {
 199                cpu_physical_memory_write(ch->dest + n, buff + n, dwidth);
 200                if (ch->ctrl & PL080_CCTRL_DI)
 201                    ch->dest += swidth;
 202            }
 203
 204            size--;
 205            ch->ctrl = (ch->ctrl & 0xfffff000) | size;
 206            if (size == 0) {
 207                /* Transfer complete.  */
 208                if (ch->lli) {
 209                    ch->src = address_space_ldl_le(&address_space_memory,
 210                                                   ch->lli,
 211                                                   MEMTXATTRS_UNSPECIFIED,
 212                                                   NULL);
 213                    ch->dest = address_space_ldl_le(&address_space_memory,
 214                                                    ch->lli + 4,
 215                                                    MEMTXATTRS_UNSPECIFIED,
 216                                                    NULL);
 217                    ch->ctrl = address_space_ldl_le(&address_space_memory,
 218                                                    ch->lli + 12,
 219                                                    MEMTXATTRS_UNSPECIFIED,
 220                                                    NULL);
 221                    ch->lli = address_space_ldl_le(&address_space_memory,
 222                                                   ch->lli + 8,
 223                                                   MEMTXATTRS_UNSPECIFIED,
 224                                                   NULL);
 225                } else {
 226                    ch->conf &= ~PL080_CCONF_E;
 227                }
 228                if (ch->ctrl & PL080_CCTRL_I) {
 229                    s->tc_int |= 1 << c;
 230                }
 231            }
 232            goto again;
 233        }
 234        if (--s->running)
 235            s->running = 1;
 236    }
 237}
 238
 239static uint64_t pl080_read(void *opaque, hwaddr offset,
 240                           unsigned size)
 241{
 242    PL080State *s = (PL080State *)opaque;
 243    uint32_t i;
 244    uint32_t mask;
 245
 246    if (offset >= 0xfe0 && offset < 0x1000) {
 247        if (s->nchannels == 8) {
 248            return pl080_id[(offset - 0xfe0) >> 2];
 249        } else {
 250            return pl081_id[(offset - 0xfe0) >> 2];
 251        }
 252    }
 253    if (offset >= 0x100 && offset < 0x200) {
 254        i = (offset & 0xe0) >> 5;
 255        if (i >= s->nchannels)
 256            goto bad_offset;
 257        switch (offset >> 2) {
 258        case 0: /* SrcAddr */
 259            return s->chan[i].src;
 260        case 1: /* DestAddr */
 261            return s->chan[i].dest;
 262        case 2: /* LLI */
 263            return s->chan[i].lli;
 264        case 3: /* Control */
 265            return s->chan[i].ctrl;
 266        case 4: /* Configuration */
 267            return s->chan[i].conf;
 268        default:
 269            goto bad_offset;
 270        }
 271    }
 272    switch (offset >> 2) {
 273    case 0: /* IntStatus */
 274        return (s->tc_int & s->tc_mask) | (s->err_int & s->err_mask);
 275    case 1: /* IntTCStatus */
 276        return (s->tc_int & s->tc_mask);
 277    case 3: /* IntErrorStatus */
 278        return (s->err_int & s->err_mask);
 279    case 5: /* RawIntTCStatus */
 280        return s->tc_int;
 281    case 6: /* RawIntErrorStatus */
 282        return s->err_int;
 283    case 7: /* EnbldChns */
 284        mask = 0;
 285        for (i = 0; i < s->nchannels; i++) {
 286            if (s->chan[i].conf & PL080_CCONF_E)
 287                mask |= 1 << i;
 288        }
 289        return mask;
 290    case 8: /* SoftBReq */
 291    case 9: /* SoftSReq */
 292    case 10: /* SoftLBReq */
 293    case 11: /* SoftLSReq */
 294        /* ??? Implement these. */
 295        return 0;
 296    case 12: /* Configuration */
 297        return s->conf;
 298    case 13: /* Sync */
 299        return s->sync;
 300    default:
 301    bad_offset:
 302        qemu_log_mask(LOG_GUEST_ERROR,
 303                      "pl080_read: Bad offset %x\n", (int)offset);
 304        return 0;
 305    }
 306}
 307
 308static void pl080_write(void *opaque, hwaddr offset,
 309                        uint64_t value, unsigned size)
 310{
 311    PL080State *s = (PL080State *)opaque;
 312    int i;
 313
 314    if (offset >= 0x100 && offset < 0x200) {
 315        i = (offset & 0xe0) >> 5;
 316        if (i >= s->nchannels)
 317            goto bad_offset;
 318        switch (offset >> 2) {
 319        case 0: /* SrcAddr */
 320            s->chan[i].src = value;
 321            break;
 322        case 1: /* DestAddr */
 323            s->chan[i].dest = value;
 324            break;
 325        case 2: /* LLI */
 326            s->chan[i].lli = value;
 327            break;
 328        case 3: /* Control */
 329            s->chan[i].ctrl = value;
 330            break;
 331        case 4: /* Configuration */
 332            s->chan[i].conf = value;
 333            pl080_run(s);
 334            break;
 335        }
 336    }
 337    switch (offset >> 2) {
 338    case 2: /* IntTCClear */
 339        s->tc_int &= ~value;
 340        break;
 341    case 4: /* IntErrorClear */
 342        s->err_int &= ~value;
 343        break;
 344    case 8: /* SoftBReq */
 345    case 9: /* SoftSReq */
 346    case 10: /* SoftLBReq */
 347    case 11: /* SoftLSReq */
 348        /* ??? Implement these.  */
 349        qemu_log_mask(LOG_UNIMP, "pl080_write: Soft DMA not implemented\n");
 350        break;
 351    case 12: /* Configuration */
 352        s->conf = value;
 353        if (s->conf & (PL080_CONF_M1 | PL080_CONF_M1)) {
 354            qemu_log_mask(LOG_UNIMP,
 355                          "pl080_write: Big-endian DMA not implemented\n");
 356        }
 357        pl080_run(s);
 358        break;
 359    case 13: /* Sync */
 360        s->sync = value;
 361        break;
 362    default:
 363    bad_offset:
 364        qemu_log_mask(LOG_GUEST_ERROR,
 365                      "pl080_write: Bad offset %x\n", (int)offset);
 366    }
 367    pl080_update(s);
 368}
 369
 370static const MemoryRegionOps pl080_ops = {
 371    .read = pl080_read,
 372    .write = pl080_write,
 373    .endianness = DEVICE_NATIVE_ENDIAN,
 374};
 375
 376static void pl080_init(Object *obj)
 377{
 378    SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
 379    PL080State *s = PL080(obj);
 380
 381    memory_region_init_io(&s->iomem, OBJECT(s), &pl080_ops, s, "pl080", 0x1000);
 382    sysbus_init_mmio(sbd, &s->iomem);
 383    sysbus_init_irq(sbd, &s->irq);
 384    s->nchannels = 8;
 385}
 386
 387static void pl081_init(Object *obj)
 388{
 389    PL080State *s = PL080(obj);
 390
 391    s->nchannels = 2;
 392}
 393
 394static void pl080_class_init(ObjectClass *oc, void *data)
 395{
 396    DeviceClass *dc = DEVICE_CLASS(oc);
 397
 398    dc->vmsd = &vmstate_pl080;
 399}
 400
 401static const TypeInfo pl080_info = {
 402    .name          = TYPE_PL080,
 403    .parent        = TYPE_SYS_BUS_DEVICE,
 404    .instance_size = sizeof(PL080State),
 405    .instance_init = pl080_init,
 406    .class_init    = pl080_class_init,
 407};
 408
 409static const TypeInfo pl081_info = {
 410    .name          = "pl081",
 411    .parent        = TYPE_PL080,
 412    .instance_init = pl081_init,
 413};
 414
 415/* The PL080 and PL081 are the same except for the number of channels
 416   they implement (8 and 2 respectively).  */
 417static void pl080_register_types(void)
 418{
 419    type_register_static(&pl080_info);
 420    type_register_static(&pl081_info);
 421}
 422
 423type_init(pl080_register_types)
 424