qemu/hw/dma/pl080.c
<<
>>
Prefs
   1/*
   2 * Arm PrimeCell PL080/PL081 DMA controller
   3 *
   4 * Copyright (c) 2006 CodeSourcery.
   5 * Written by Paul Brook
   6 *
   7 * This code is licensed under the GPL.
   8 */
   9
  10#include "hw/sysbus.h"
  11#include "exec/address-spaces.h"
  12
  13#define PL080_MAX_CHANNELS 8
  14#define PL080_CONF_E    0x1
  15#define PL080_CONF_M1   0x2
  16#define PL080_CONF_M2   0x4
  17
  18#define PL080_CCONF_H   0x40000
  19#define PL080_CCONF_A   0x20000
  20#define PL080_CCONF_L   0x10000
  21#define PL080_CCONF_ITC 0x08000
  22#define PL080_CCONF_IE  0x04000
  23#define PL080_CCONF_E   0x00001
  24
  25#define PL080_CCTRL_I   0x80000000
  26#define PL080_CCTRL_DI  0x08000000
  27#define PL080_CCTRL_SI  0x04000000
  28#define PL080_CCTRL_D   0x02000000
  29#define PL080_CCTRL_S   0x01000000
  30
  31typedef struct {
  32    uint32_t src;
  33    uint32_t dest;
  34    uint32_t lli;
  35    uint32_t ctrl;
  36    uint32_t conf;
  37} pl080_channel;
  38
  39#define TYPE_PL080 "pl080"
  40#define PL080(obj) OBJECT_CHECK(PL080State, (obj), TYPE_PL080)
  41
  42typedef struct PL080State {
  43    SysBusDevice parent_obj;
  44
  45    MemoryRegion iomem;
  46    uint8_t tc_int;
  47    uint8_t tc_mask;
  48    uint8_t err_int;
  49    uint8_t err_mask;
  50    uint32_t conf;
  51    uint32_t sync;
  52    uint32_t req_single;
  53    uint32_t req_burst;
  54    pl080_channel chan[PL080_MAX_CHANNELS];
  55    int nchannels;
  56    /* Flag to avoid recursive DMA invocations.  */
  57    int running;
  58    qemu_irq irq;
  59} PL080State;
  60
  61static const VMStateDescription vmstate_pl080_channel = {
  62    .name = "pl080_channel",
  63    .version_id = 1,
  64    .minimum_version_id = 1,
  65    .fields = (VMStateField[]) {
  66        VMSTATE_UINT32(src, pl080_channel),
  67        VMSTATE_UINT32(dest, pl080_channel),
  68        VMSTATE_UINT32(lli, pl080_channel),
  69        VMSTATE_UINT32(ctrl, pl080_channel),
  70        VMSTATE_UINT32(conf, pl080_channel),
  71        VMSTATE_END_OF_LIST()
  72    }
  73};
  74
  75static const VMStateDescription vmstate_pl080 = {
  76    .name = "pl080",
  77    .version_id = 1,
  78    .minimum_version_id = 1,
  79    .fields = (VMStateField[]) {
  80        VMSTATE_UINT8(tc_int, PL080State),
  81        VMSTATE_UINT8(tc_mask, PL080State),
  82        VMSTATE_UINT8(err_int, PL080State),
  83        VMSTATE_UINT8(err_mask, PL080State),
  84        VMSTATE_UINT32(conf, PL080State),
  85        VMSTATE_UINT32(sync, PL080State),
  86        VMSTATE_UINT32(req_single, PL080State),
  87        VMSTATE_UINT32(req_burst, PL080State),
  88        VMSTATE_UINT8(tc_int, PL080State),
  89        VMSTATE_UINT8(tc_int, PL080State),
  90        VMSTATE_UINT8(tc_int, PL080State),
  91        VMSTATE_STRUCT_ARRAY(chan, PL080State, PL080_MAX_CHANNELS,
  92                             1, vmstate_pl080_channel, pl080_channel),
  93        VMSTATE_INT32(running, PL080State),
  94        VMSTATE_END_OF_LIST()
  95    }
  96};
  97
  98static const unsigned char pl080_id[] =
  99{ 0x80, 0x10, 0x04, 0x0a, 0x0d, 0xf0, 0x05, 0xb1 };
 100
 101static const unsigned char pl081_id[] =
 102{ 0x81, 0x10, 0x04, 0x0a, 0x0d, 0xf0, 0x05, 0xb1 };
 103
 104static void pl080_update(PL080State *s)
 105{
 106    if ((s->tc_int & s->tc_mask)
 107            || (s->err_int & s->err_mask))
 108        qemu_irq_raise(s->irq);
 109    else
 110        qemu_irq_lower(s->irq);
 111}
 112
 113static void pl080_run(PL080State *s)
 114{
 115    int c;
 116    int flow;
 117    pl080_channel *ch;
 118    int swidth;
 119    int dwidth;
 120    int xsize;
 121    int n;
 122    int src_id;
 123    int dest_id;
 124    int size;
 125    uint8_t buff[4];
 126    uint32_t req;
 127
 128    s->tc_mask = 0;
 129    for (c = 0; c < s->nchannels; c++) {
 130        if (s->chan[c].conf & PL080_CCONF_ITC)
 131            s->tc_mask |= 1 << c;
 132        if (s->chan[c].conf & PL080_CCONF_IE)
 133            s->err_mask |= 1 << c;
 134    }
 135
 136    if ((s->conf & PL080_CONF_E) == 0)
 137        return;
 138
 139hw_error("DMA active\n");
 140    /* If we are already in the middle of a DMA operation then indicate that
 141       there may be new DMA requests and return immediately.  */
 142    if (s->running) {
 143        s->running++;
 144        return;
 145    }
 146    s->running = 1;
 147    while (s->running) {
 148        for (c = 0; c < s->nchannels; c++) {
 149            ch = &s->chan[c];
 150again:
 151            /* Test if thiws channel has any pending DMA requests.  */
 152            if ((ch->conf & (PL080_CCONF_H | PL080_CCONF_E))
 153                    != PL080_CCONF_E)
 154                continue;
 155            flow = (ch->conf >> 11) & 7;
 156            if (flow >= 4) {
 157                hw_error(
 158                    "pl080_run: Peripheral flow control not implemented\n");
 159            }
 160            src_id = (ch->conf >> 1) & 0x1f;
 161            dest_id = (ch->conf >> 6) & 0x1f;
 162            size = ch->ctrl & 0xfff;
 163            req = s->req_single | s->req_burst;
 164            switch (flow) {
 165            case 0:
 166                break;
 167            case 1:
 168                if ((req & (1u << dest_id)) == 0)
 169                    size = 0;
 170                break;
 171            case 2:
 172                if ((req & (1u << src_id)) == 0)
 173                    size = 0;
 174                break;
 175            case 3:
 176                if ((req & (1u << src_id)) == 0
 177                        || (req & (1u << dest_id)) == 0)
 178                    size = 0;
 179                break;
 180            }
 181            if (!size)
 182                continue;
 183
 184            /* Transfer one element.  */
 185            /* ??? Should transfer multiple elements for a burst request.  */
 186            /* ??? Unclear what the proper behavior is when source and
 187               destination widths are different.  */
 188            swidth = 1 << ((ch->ctrl >> 18) & 7);
 189            dwidth = 1 << ((ch->ctrl >> 21) & 7);
 190            for (n = 0; n < dwidth; n+= swidth) {
 191                cpu_physical_memory_read(ch->src, buff + n, swidth);
 192                if (ch->ctrl & PL080_CCTRL_SI)
 193                    ch->src += swidth;
 194            }
 195            xsize = (dwidth < swidth) ? swidth : dwidth;
 196            /* ??? This may pad the value incorrectly for dwidth < 32.  */
 197            for (n = 0; n < xsize; n += dwidth) {
 198                cpu_physical_memory_write(ch->dest + n, buff + n, dwidth);
 199                if (ch->ctrl & PL080_CCTRL_DI)
 200                    ch->dest += swidth;
 201            }
 202
 203            size--;
 204            ch->ctrl = (ch->ctrl & 0xfffff000) | size;
 205            if (size == 0) {
 206                /* Transfer complete.  */
 207                if (ch->lli) {
 208                    ch->src = address_space_ldl_le(&address_space_memory,
 209                                                   ch->lli,
 210                                                   MEMTXATTRS_UNSPECIFIED,
 211                                                   NULL);
 212                    ch->dest = address_space_ldl_le(&address_space_memory,
 213                                                    ch->lli + 4,
 214                                                    MEMTXATTRS_UNSPECIFIED,
 215                                                    NULL);
 216                    ch->ctrl = address_space_ldl_le(&address_space_memory,
 217                                                    ch->lli + 12,
 218                                                    MEMTXATTRS_UNSPECIFIED,
 219                                                    NULL);
 220                    ch->lli = address_space_ldl_le(&address_space_memory,
 221                                                   ch->lli + 8,
 222                                                   MEMTXATTRS_UNSPECIFIED,
 223                                                   NULL);
 224                } else {
 225                    ch->conf &= ~PL080_CCONF_E;
 226                }
 227                if (ch->ctrl & PL080_CCTRL_I) {
 228                    s->tc_int |= 1 << c;
 229                }
 230            }
 231            goto again;
 232        }
 233        if (--s->running)
 234            s->running = 1;
 235    }
 236}
 237
 238static uint64_t pl080_read(void *opaque, hwaddr offset,
 239                           unsigned size)
 240{
 241    PL080State *s = (PL080State *)opaque;
 242    uint32_t i;
 243    uint32_t mask;
 244
 245    if (offset >= 0xfe0 && offset < 0x1000) {
 246        if (s->nchannels == 8) {
 247            return pl080_id[(offset - 0xfe0) >> 2];
 248        } else {
 249            return pl081_id[(offset - 0xfe0) >> 2];
 250        }
 251    }
 252    if (offset >= 0x100 && offset < 0x200) {
 253        i = (offset & 0xe0) >> 5;
 254        if (i >= s->nchannels)
 255            goto bad_offset;
 256        switch (offset >> 2) {
 257        case 0: /* SrcAddr */
 258            return s->chan[i].src;
 259        case 1: /* DestAddr */
 260            return s->chan[i].dest;
 261        case 2: /* LLI */
 262            return s->chan[i].lli;
 263        case 3: /* Control */
 264            return s->chan[i].ctrl;
 265        case 4: /* Configuration */
 266            return s->chan[i].conf;
 267        default:
 268            goto bad_offset;
 269        }
 270    }
 271    switch (offset >> 2) {
 272    case 0: /* IntStatus */
 273        return (s->tc_int & s->tc_mask) | (s->err_int & s->err_mask);
 274    case 1: /* IntTCStatus */
 275        return (s->tc_int & s->tc_mask);
 276    case 3: /* IntErrorStatus */
 277        return (s->err_int & s->err_mask);
 278    case 5: /* RawIntTCStatus */
 279        return s->tc_int;
 280    case 6: /* RawIntErrorStatus */
 281        return s->err_int;
 282    case 7: /* EnbldChns */
 283        mask = 0;
 284        for (i = 0; i < s->nchannels; i++) {
 285            if (s->chan[i].conf & PL080_CCONF_E)
 286                mask |= 1 << i;
 287        }
 288        return mask;
 289    case 8: /* SoftBReq */
 290    case 9: /* SoftSReq */
 291    case 10: /* SoftLBReq */
 292    case 11: /* SoftLSReq */
 293        /* ??? Implement these. */
 294        return 0;
 295    case 12: /* Configuration */
 296        return s->conf;
 297    case 13: /* Sync */
 298        return s->sync;
 299    default:
 300    bad_offset:
 301        qemu_log_mask(LOG_GUEST_ERROR,
 302                      "pl080_read: Bad offset %x\n", (int)offset);
 303        return 0;
 304    }
 305}
 306
 307static void pl080_write(void *opaque, hwaddr offset,
 308                        uint64_t value, unsigned size)
 309{
 310    PL080State *s = (PL080State *)opaque;
 311    int i;
 312
 313    if (offset >= 0x100 && offset < 0x200) {
 314        i = (offset & 0xe0) >> 5;
 315        if (i >= s->nchannels)
 316            goto bad_offset;
 317        switch (offset >> 2) {
 318        case 0: /* SrcAddr */
 319            s->chan[i].src = value;
 320            break;
 321        case 1: /* DestAddr */
 322            s->chan[i].dest = value;
 323            break;
 324        case 2: /* LLI */
 325            s->chan[i].lli = value;
 326            break;
 327        case 3: /* Control */
 328            s->chan[i].ctrl = value;
 329            break;
 330        case 4: /* Configuration */
 331            s->chan[i].conf = value;
 332            pl080_run(s);
 333            break;
 334        }
 335    }
 336    switch (offset >> 2) {
 337    case 2: /* IntTCClear */
 338        s->tc_int &= ~value;
 339        break;
 340    case 4: /* IntErrorClear */
 341        s->err_int &= ~value;
 342        break;
 343    case 8: /* SoftBReq */
 344    case 9: /* SoftSReq */
 345    case 10: /* SoftLBReq */
 346    case 11: /* SoftLSReq */
 347        /* ??? Implement these.  */
 348        qemu_log_mask(LOG_UNIMP, "pl080_write: Soft DMA not implemented\n");
 349        break;
 350    case 12: /* Configuration */
 351        s->conf = value;
 352        if (s->conf & (PL080_CONF_M1 | PL080_CONF_M1)) {
 353            qemu_log_mask(LOG_UNIMP,
 354                          "pl080_write: Big-endian DMA not implemented\n");
 355        }
 356        pl080_run(s);
 357        break;
 358    case 13: /* Sync */
 359        s->sync = value;
 360        break;
 361    default:
 362    bad_offset:
 363        qemu_log_mask(LOG_GUEST_ERROR,
 364                      "pl080_write: Bad offset %x\n", (int)offset);
 365    }
 366    pl080_update(s);
 367}
 368
 369static const MemoryRegionOps pl080_ops = {
 370    .read = pl080_read,
 371    .write = pl080_write,
 372    .endianness = DEVICE_NATIVE_ENDIAN,
 373};
 374
 375static void pl080_init(Object *obj)
 376{
 377    SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
 378    PL080State *s = PL080(obj);
 379
 380    memory_region_init_io(&s->iomem, OBJECT(s), &pl080_ops, s, "pl080", 0x1000);
 381    sysbus_init_mmio(sbd, &s->iomem);
 382    sysbus_init_irq(sbd, &s->irq);
 383    s->nchannels = 8;
 384}
 385
 386static void pl081_init(Object *obj)
 387{
 388    PL080State *s = PL080(obj);
 389
 390    s->nchannels = 2;
 391}
 392
 393static void pl080_class_init(ObjectClass *oc, void *data)
 394{
 395    DeviceClass *dc = DEVICE_CLASS(oc);
 396
 397    dc->vmsd = &vmstate_pl080;
 398}
 399
 400static const TypeInfo pl080_info = {
 401    .name          = TYPE_PL080,
 402    .parent        = TYPE_SYS_BUS_DEVICE,
 403    .instance_size = sizeof(PL080State),
 404    .instance_init = pl080_init,
 405    .class_init    = pl080_class_init,
 406};
 407
 408static const TypeInfo pl081_info = {
 409    .name          = "pl081",
 410    .parent        = TYPE_PL080,
 411    .instance_init = pl081_init,
 412};
 413
 414/* The PL080 and PL081 are the same except for the number of channels
 415   they implement (8 and 2 respectively).  */
 416static void pl080_register_types(void)
 417{
 418    type_register_static(&pl080_info);
 419    type_register_static(&pl081_info);
 420}
 421
 422type_init(pl080_register_types)
 423