qemu/hw/dma/pl080.c
<<
>>
Prefs
   1/*
   2 * Arm PrimeCell PL080/PL081 DMA controller
   3 *
   4 * Copyright (c) 2006 CodeSourcery.
   5 * Written by Paul Brook
   6 *
   7 * This code is licensed under the GPL.
   8 */
   9
  10#include "qemu/osdep.h"
  11#include "hw/sysbus.h"
  12#include "exec/address-spaces.h"
  13#include "qemu/log.h"
  14
  15#define PL080_MAX_CHANNELS 8
  16#define PL080_CONF_E    0x1
  17#define PL080_CONF_M1   0x2
  18#define PL080_CONF_M2   0x4
  19
  20#define PL080_CCONF_H   0x40000
  21#define PL080_CCONF_A   0x20000
  22#define PL080_CCONF_L   0x10000
  23#define PL080_CCONF_ITC 0x08000
  24#define PL080_CCONF_IE  0x04000
  25#define PL080_CCONF_E   0x00001
  26
  27#define PL080_CCTRL_I   0x80000000
  28#define PL080_CCTRL_DI  0x08000000
  29#define PL080_CCTRL_SI  0x04000000
  30#define PL080_CCTRL_D   0x02000000
  31#define PL080_CCTRL_S   0x01000000
  32
  33typedef struct {
  34    uint32_t src;
  35    uint32_t dest;
  36    uint32_t lli;
  37    uint32_t ctrl;
  38    uint32_t conf;
  39} pl080_channel;
  40
  41#define TYPE_PL080 "pl080"
  42#define PL080(obj) OBJECT_CHECK(PL080State, (obj), TYPE_PL080)
  43
  44typedef struct PL080State {
  45    SysBusDevice parent_obj;
  46
  47    MemoryRegion iomem;
  48    uint8_t tc_int;
  49    uint8_t tc_mask;
  50    uint8_t err_int;
  51    uint8_t err_mask;
  52    uint32_t conf;
  53    uint32_t sync;
  54    uint32_t req_single;
  55    uint32_t req_burst;
  56    pl080_channel chan[PL080_MAX_CHANNELS];
  57    int nchannels;
  58    /* Flag to avoid recursive DMA invocations.  */
  59    int running;
  60    qemu_irq irq;
  61} PL080State;
  62
  63static const VMStateDescription vmstate_pl080_channel = {
  64    .name = "pl080_channel",
  65    .version_id = 1,
  66    .minimum_version_id = 1,
  67    .fields = (VMStateField[]) {
  68        VMSTATE_UINT32(src, pl080_channel),
  69        VMSTATE_UINT32(dest, pl080_channel),
  70        VMSTATE_UINT32(lli, pl080_channel),
  71        VMSTATE_UINT32(ctrl, pl080_channel),
  72        VMSTATE_UINT32(conf, pl080_channel),
  73        VMSTATE_END_OF_LIST()
  74    }
  75};
  76
  77static const VMStateDescription vmstate_pl080 = {
  78    .name = "pl080",
  79    .version_id = 1,
  80    .minimum_version_id = 1,
  81    .fields = (VMStateField[]) {
  82        VMSTATE_UINT8(tc_int, PL080State),
  83        VMSTATE_UINT8(tc_mask, PL080State),
  84        VMSTATE_UINT8(err_int, PL080State),
  85        VMSTATE_UINT8(err_mask, PL080State),
  86        VMSTATE_UINT32(conf, PL080State),
  87        VMSTATE_UINT32(sync, PL080State),
  88        VMSTATE_UINT32(req_single, PL080State),
  89        VMSTATE_UINT32(req_burst, PL080State),
  90        VMSTATE_UINT8(tc_int, PL080State),
  91        VMSTATE_UINT8(tc_int, PL080State),
  92        VMSTATE_UINT8(tc_int, PL080State),
  93        VMSTATE_STRUCT_ARRAY(chan, PL080State, PL080_MAX_CHANNELS,
  94                             1, vmstate_pl080_channel, pl080_channel),
  95        VMSTATE_INT32(running, PL080State),
  96        VMSTATE_END_OF_LIST()
  97    }
  98};
  99
 100static const unsigned char pl080_id[] =
 101{ 0x80, 0x10, 0x04, 0x0a, 0x0d, 0xf0, 0x05, 0xb1 };
 102
 103static const unsigned char pl081_id[] =
 104{ 0x81, 0x10, 0x04, 0x0a, 0x0d, 0xf0, 0x05, 0xb1 };
 105
 106static void pl080_update(PL080State *s)
 107{
 108    if ((s->tc_int & s->tc_mask)
 109            || (s->err_int & s->err_mask))
 110        qemu_irq_raise(s->irq);
 111    else
 112        qemu_irq_lower(s->irq);
 113}
 114
 115static void pl080_run(PL080State *s)
 116{
 117    int c;
 118    int flow;
 119    pl080_channel *ch;
 120    int swidth;
 121    int dwidth;
 122    int xsize;
 123    int n;
 124    int src_id;
 125    int dest_id;
 126    int size;
 127    uint8_t buff[4];
 128    uint32_t req;
 129
 130    s->tc_mask = 0;
 131    for (c = 0; c < s->nchannels; c++) {
 132        if (s->chan[c].conf & PL080_CCONF_ITC)
 133            s->tc_mask |= 1 << c;
 134        if (s->chan[c].conf & PL080_CCONF_IE)
 135            s->err_mask |= 1 << c;
 136    }
 137
 138    if ((s->conf & PL080_CONF_E) == 0)
 139        return;
 140
 141hw_error("DMA active\n");
 142    /* If we are already in the middle of a DMA operation then indicate that
 143       there may be new DMA requests and return immediately.  */
 144    if (s->running) {
 145        s->running++;
 146        return;
 147    }
 148    s->running = 1;
 149    while (s->running) {
 150        for (c = 0; c < s->nchannels; c++) {
 151            ch = &s->chan[c];
 152again:
 153            /* Test if thiws channel has any pending DMA requests.  */
 154            if ((ch->conf & (PL080_CCONF_H | PL080_CCONF_E))
 155                    != PL080_CCONF_E)
 156                continue;
 157            flow = (ch->conf >> 11) & 7;
 158            if (flow >= 4) {
 159                hw_error(
 160                    "pl080_run: Peripheral flow control not implemented\n");
 161            }
 162            src_id = (ch->conf >> 1) & 0x1f;
 163            dest_id = (ch->conf >> 6) & 0x1f;
 164            size = ch->ctrl & 0xfff;
 165            req = s->req_single | s->req_burst;
 166            switch (flow) {
 167            case 0:
 168                break;
 169            case 1:
 170                if ((req & (1u << dest_id)) == 0)
 171                    size = 0;
 172                break;
 173            case 2:
 174                if ((req & (1u << src_id)) == 0)
 175                    size = 0;
 176                break;
 177            case 3:
 178                if ((req & (1u << src_id)) == 0
 179                        || (req & (1u << dest_id)) == 0)
 180                    size = 0;
 181                break;
 182            }
 183            if (!size)
 184                continue;
 185
 186            /* Transfer one element.  */
 187            /* ??? Should transfer multiple elements for a burst request.  */
 188            /* ??? Unclear what the proper behavior is when source and
 189               destination widths are different.  */
 190            swidth = 1 << ((ch->ctrl >> 18) & 7);
 191            dwidth = 1 << ((ch->ctrl >> 21) & 7);
 192            for (n = 0; n < dwidth; n+= swidth) {
 193                cpu_physical_memory_read(ch->src, buff + n, swidth);
 194                if (ch->ctrl & PL080_CCTRL_SI)
 195                    ch->src += swidth;
 196            }
 197            xsize = (dwidth < swidth) ? swidth : dwidth;
 198            /* ??? This may pad the value incorrectly for dwidth < 32.  */
 199            for (n = 0; n < xsize; n += dwidth) {
 200                cpu_physical_memory_write(ch->dest + n, buff + n, dwidth);
 201                if (ch->ctrl & PL080_CCTRL_DI)
 202                    ch->dest += swidth;
 203            }
 204
 205            size--;
 206            ch->ctrl = (ch->ctrl & 0xfffff000) | size;
 207            if (size == 0) {
 208                /* Transfer complete.  */
 209                if (ch->lli) {
 210                    ch->src = address_space_ldl_le(&address_space_memory,
 211                                                   ch->lli,
 212                                                   MEMTXATTRS_UNSPECIFIED,
 213                                                   NULL);
 214                    ch->dest = address_space_ldl_le(&address_space_memory,
 215                                                    ch->lli + 4,
 216                                                    MEMTXATTRS_UNSPECIFIED,
 217                                                    NULL);
 218                    ch->ctrl = address_space_ldl_le(&address_space_memory,
 219                                                    ch->lli + 12,
 220                                                    MEMTXATTRS_UNSPECIFIED,
 221                                                    NULL);
 222                    ch->lli = address_space_ldl_le(&address_space_memory,
 223                                                   ch->lli + 8,
 224                                                   MEMTXATTRS_UNSPECIFIED,
 225                                                   NULL);
 226                } else {
 227                    ch->conf &= ~PL080_CCONF_E;
 228                }
 229                if (ch->ctrl & PL080_CCTRL_I) {
 230                    s->tc_int |= 1 << c;
 231                }
 232            }
 233            goto again;
 234        }
 235        if (--s->running)
 236            s->running = 1;
 237    }
 238}
 239
 240static uint64_t pl080_read(void *opaque, hwaddr offset,
 241                           unsigned size)
 242{
 243    PL080State *s = (PL080State *)opaque;
 244    uint32_t i;
 245    uint32_t mask;
 246
 247    if (offset >= 0xfe0 && offset < 0x1000) {
 248        if (s->nchannels == 8) {
 249            return pl080_id[(offset - 0xfe0) >> 2];
 250        } else {
 251            return pl081_id[(offset - 0xfe0) >> 2];
 252        }
 253    }
 254    if (offset >= 0x100 && offset < 0x200) {
 255        i = (offset & 0xe0) >> 5;
 256        if (i >= s->nchannels)
 257            goto bad_offset;
 258        switch (offset >> 2) {
 259        case 0: /* SrcAddr */
 260            return s->chan[i].src;
 261        case 1: /* DestAddr */
 262            return s->chan[i].dest;
 263        case 2: /* LLI */
 264            return s->chan[i].lli;
 265        case 3: /* Control */
 266            return s->chan[i].ctrl;
 267        case 4: /* Configuration */
 268            return s->chan[i].conf;
 269        default:
 270            goto bad_offset;
 271        }
 272    }
 273    switch (offset >> 2) {
 274    case 0: /* IntStatus */
 275        return (s->tc_int & s->tc_mask) | (s->err_int & s->err_mask);
 276    case 1: /* IntTCStatus */
 277        return (s->tc_int & s->tc_mask);
 278    case 3: /* IntErrorStatus */
 279        return (s->err_int & s->err_mask);
 280    case 5: /* RawIntTCStatus */
 281        return s->tc_int;
 282    case 6: /* RawIntErrorStatus */
 283        return s->err_int;
 284    case 7: /* EnbldChns */
 285        mask = 0;
 286        for (i = 0; i < s->nchannels; i++) {
 287            if (s->chan[i].conf & PL080_CCONF_E)
 288                mask |= 1 << i;
 289        }
 290        return mask;
 291    case 8: /* SoftBReq */
 292    case 9: /* SoftSReq */
 293    case 10: /* SoftLBReq */
 294    case 11: /* SoftLSReq */
 295        /* ??? Implement these. */
 296        return 0;
 297    case 12: /* Configuration */
 298        return s->conf;
 299    case 13: /* Sync */
 300        return s->sync;
 301    default:
 302    bad_offset:
 303        qemu_log_mask(LOG_GUEST_ERROR,
 304                      "pl080_read: Bad offset %x\n", (int)offset);
 305        return 0;
 306    }
 307}
 308
 309static void pl080_write(void *opaque, hwaddr offset,
 310                        uint64_t value, unsigned size)
 311{
 312    PL080State *s = (PL080State *)opaque;
 313    int i;
 314
 315    if (offset >= 0x100 && offset < 0x200) {
 316        i = (offset & 0xe0) >> 5;
 317        if (i >= s->nchannels)
 318            goto bad_offset;
 319        switch (offset >> 2) {
 320        case 0: /* SrcAddr */
 321            s->chan[i].src = value;
 322            break;
 323        case 1: /* DestAddr */
 324            s->chan[i].dest = value;
 325            break;
 326        case 2: /* LLI */
 327            s->chan[i].lli = value;
 328            break;
 329        case 3: /* Control */
 330            s->chan[i].ctrl = value;
 331            break;
 332        case 4: /* Configuration */
 333            s->chan[i].conf = value;
 334            pl080_run(s);
 335            break;
 336        }
 337    }
 338    switch (offset >> 2) {
 339    case 2: /* IntTCClear */
 340        s->tc_int &= ~value;
 341        break;
 342    case 4: /* IntErrorClear */
 343        s->err_int &= ~value;
 344        break;
 345    case 8: /* SoftBReq */
 346    case 9: /* SoftSReq */
 347    case 10: /* SoftLBReq */
 348    case 11: /* SoftLSReq */
 349        /* ??? Implement these.  */
 350        qemu_log_mask(LOG_UNIMP, "pl080_write: Soft DMA not implemented\n");
 351        break;
 352    case 12: /* Configuration */
 353        s->conf = value;
 354        if (s->conf & (PL080_CONF_M1 | PL080_CONF_M2)) {
 355            qemu_log_mask(LOG_UNIMP,
 356                          "pl080_write: Big-endian DMA not implemented\n");
 357        }
 358        pl080_run(s);
 359        break;
 360    case 13: /* Sync */
 361        s->sync = value;
 362        break;
 363    default:
 364    bad_offset:
 365        qemu_log_mask(LOG_GUEST_ERROR,
 366                      "pl080_write: Bad offset %x\n", (int)offset);
 367    }
 368    pl080_update(s);
 369}
 370
 371static const MemoryRegionOps pl080_ops = {
 372    .read = pl080_read,
 373    .write = pl080_write,
 374    .endianness = DEVICE_NATIVE_ENDIAN,
 375};
 376
 377static void pl080_init(Object *obj)
 378{
 379    SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
 380    PL080State *s = PL080(obj);
 381
 382    memory_region_init_io(&s->iomem, OBJECT(s), &pl080_ops, s, "pl080", 0x1000);
 383    sysbus_init_mmio(sbd, &s->iomem);
 384    sysbus_init_irq(sbd, &s->irq);
 385    s->nchannels = 8;
 386}
 387
 388static void pl081_init(Object *obj)
 389{
 390    PL080State *s = PL080(obj);
 391
 392    s->nchannels = 2;
 393}
 394
 395static void pl080_class_init(ObjectClass *oc, void *data)
 396{
 397    DeviceClass *dc = DEVICE_CLASS(oc);
 398
 399    dc->vmsd = &vmstate_pl080;
 400}
 401
 402static const TypeInfo pl080_info = {
 403    .name          = TYPE_PL080,
 404    .parent        = TYPE_SYS_BUS_DEVICE,
 405    .instance_size = sizeof(PL080State),
 406    .instance_init = pl080_init,
 407    .class_init    = pl080_class_init,
 408};
 409
 410static const TypeInfo pl081_info = {
 411    .name          = "pl081",
 412    .parent        = TYPE_PL080,
 413    .instance_init = pl081_init,
 414};
 415
 416/* The PL080 and PL081 are the same except for the number of channels
 417   they implement (8 and 2 respectively).  */
 418static void pl080_register_types(void)
 419{
 420    type_register_static(&pl080_info);
 421    type_register_static(&pl081_info);
 422}
 423
 424type_init(pl080_register_types)
 425