1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include "qemu/osdep.h"
24#include "qemu/bitops.h"
25#include "qemu/log.h"
26#include "qapi/error.h"
27#include "hw/irq.h"
28#include "hw/qdev-properties.h"
29#include "hw/sysbus.h"
30#include "migration/vmstate.h"
31#include "sysemu/dma.h"
32#include "hw/dma/sifive_pdma.h"
33
34#define DMA_CONTROL 0x000
35#define CONTROL_CLAIM BIT(0)
36#define CONTROL_RUN BIT(1)
37#define CONTROL_DONE_IE BIT(14)
38#define CONTROL_ERR_IE BIT(15)
39#define CONTROL_DONE BIT(30)
40#define CONTROL_ERR BIT(31)
41
42#define DMA_NEXT_CONFIG 0x004
43#define CONFIG_REPEAT BIT(2)
44#define CONFIG_ORDER BIT(3)
45#define CONFIG_WRSZ_SHIFT 24
46#define CONFIG_RDSZ_SHIFT 28
47#define CONFIG_SZ_MASK 0xf
48
49#define DMA_NEXT_BYTES 0x008
50#define DMA_NEXT_DST 0x010
51#define DMA_NEXT_SRC 0x018
52#define DMA_EXEC_CONFIG 0x104
53#define DMA_EXEC_BYTES 0x108
54#define DMA_EXEC_DST 0x110
55#define DMA_EXEC_SRC 0x118
56
57
58
59
60
61#define CONFIG_WRSZ_DEFAULT 6
62#define CONFIG_RDSZ_DEFAULT 6
63
64enum dma_chan_state {
65 DMA_CHAN_STATE_IDLE,
66 DMA_CHAN_STATE_STARTED,
67 DMA_CHAN_STATE_ERROR,
68 DMA_CHAN_STATE_DONE
69};
70
71static void sifive_pdma_run(SiFivePDMAState *s, int ch)
72{
73 uint64_t bytes = s->chan[ch].next_bytes;
74 uint64_t dst = s->chan[ch].next_dst;
75 uint64_t src = s->chan[ch].next_src;
76 uint32_t config = s->chan[ch].next_config;
77 int wsize, rsize, size, remainder;
78 uint8_t buf[64];
79 int n;
80
81
82 if (!bytes) {
83 goto done;
84 }
85
86
87
88
89
90
91
92 wsize = (config >> CONFIG_WRSZ_SHIFT) & CONFIG_SZ_MASK;
93 rsize = (config >> CONFIG_RDSZ_SHIFT) & CONFIG_SZ_MASK;
94 if (wsize != rsize) {
95 goto error;
96 }
97
98
99
100
101
102
103
104 size = wsize;
105 if (size > 6) {
106 size = 6;
107 }
108 size = 1 << size;
109 remainder = bytes % size;
110
111
112 s->chan[ch].state = DMA_CHAN_STATE_STARTED;
113 s->chan[ch].control &= ~CONTROL_DONE;
114 s->chan[ch].control &= ~CONTROL_ERR;
115
116
117 s->chan[ch].exec_config = config;
118 s->chan[ch].exec_bytes = bytes;
119 s->chan[ch].exec_dst = dst;
120 s->chan[ch].exec_src = src;
121
122 for (n = 0; n < bytes / size; n++) {
123 cpu_physical_memory_read(s->chan[ch].exec_src, buf, size);
124 cpu_physical_memory_write(s->chan[ch].exec_dst, buf, size);
125 s->chan[ch].exec_src += size;
126 s->chan[ch].exec_dst += size;
127 s->chan[ch].exec_bytes -= size;
128 }
129
130 if (remainder) {
131 cpu_physical_memory_read(s->chan[ch].exec_src, buf, remainder);
132 cpu_physical_memory_write(s->chan[ch].exec_dst, buf, remainder);
133 s->chan[ch].exec_src += remainder;
134 s->chan[ch].exec_dst += remainder;
135 s->chan[ch].exec_bytes -= remainder;
136 }
137
138
139 if (s->chan[ch].next_config & CONFIG_REPEAT) {
140 s->chan[ch].exec_bytes = bytes;
141 s->chan[ch].exec_dst = dst;
142 s->chan[ch].exec_src = src;
143 }
144
145done:
146
147 s->chan[ch].state = DMA_CHAN_STATE_DONE;
148 s->chan[ch].control &= ~CONTROL_RUN;
149 s->chan[ch].control |= CONTROL_DONE;
150 return;
151
152error:
153 s->chan[ch].state = DMA_CHAN_STATE_ERROR;
154 s->chan[ch].control |= CONTROL_ERR;
155 return;
156}
157
158static inline void sifive_pdma_update_irq(SiFivePDMAState *s, int ch)
159{
160 bool done_ie, err_ie;
161
162 done_ie = !!(s->chan[ch].control & CONTROL_DONE_IE);
163 err_ie = !!(s->chan[ch].control & CONTROL_ERR_IE);
164
165 if (done_ie && (s->chan[ch].control & CONTROL_DONE)) {
166 qemu_irq_raise(s->irq[ch * 2]);
167 } else {
168 qemu_irq_lower(s->irq[ch * 2]);
169 }
170
171 if (err_ie && (s->chan[ch].control & CONTROL_ERR)) {
172 qemu_irq_raise(s->irq[ch * 2 + 1]);
173 } else {
174 qemu_irq_lower(s->irq[ch * 2 + 1]);
175 }
176
177 s->chan[ch].state = DMA_CHAN_STATE_IDLE;
178}
179
180static uint64_t sifive_pdma_read(void *opaque, hwaddr offset, unsigned size)
181{
182 SiFivePDMAState *s = opaque;
183 int ch = SIFIVE_PDMA_CHAN_NO(offset);
184 uint64_t val = 0;
185
186 if (ch >= SIFIVE_PDMA_CHANS) {
187 qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid channel no %d\n",
188 __func__, ch);
189 return 0;
190 }
191
192 offset &= 0xfff;
193 switch (offset) {
194 case DMA_CONTROL:
195 val = s->chan[ch].control;
196 break;
197 case DMA_NEXT_CONFIG:
198 val = s->chan[ch].next_config;
199 break;
200 case DMA_NEXT_BYTES:
201 val = s->chan[ch].next_bytes;
202 break;
203 case DMA_NEXT_DST:
204 val = s->chan[ch].next_dst;
205 break;
206 case DMA_NEXT_SRC:
207 val = s->chan[ch].next_src;
208 break;
209 case DMA_EXEC_CONFIG:
210 val = s->chan[ch].exec_config;
211 break;
212 case DMA_EXEC_BYTES:
213 val = s->chan[ch].exec_bytes;
214 break;
215 case DMA_EXEC_DST:
216 val = s->chan[ch].exec_dst;
217 break;
218 case DMA_EXEC_SRC:
219 val = s->chan[ch].exec_src;
220 break;
221 default:
222 qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n",
223 __func__, offset);
224 break;
225 }
226
227 return val;
228}
229
230static void sifive_pdma_write(void *opaque, hwaddr offset,
231 uint64_t value, unsigned size)
232{
233 SiFivePDMAState *s = opaque;
234 int ch = SIFIVE_PDMA_CHAN_NO(offset);
235 bool claimed, run;
236
237 if (ch >= SIFIVE_PDMA_CHANS) {
238 qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid channel no %d\n",
239 __func__, ch);
240 return;
241 }
242
243 offset &= 0xfff;
244 switch (offset) {
245 case DMA_CONTROL:
246 claimed = !!(s->chan[ch].control & CONTROL_CLAIM);
247 run = !!(s->chan[ch].control & CONTROL_RUN);
248
249 if (!claimed && (value & CONTROL_CLAIM)) {
250
251 s->chan[ch].next_config = (CONFIG_RDSZ_DEFAULT << CONFIG_RDSZ_SHIFT) |
252 (CONFIG_WRSZ_DEFAULT << CONFIG_WRSZ_SHIFT);
253 s->chan[ch].next_bytes = 0;
254 s->chan[ch].next_dst = 0;
255 s->chan[ch].next_src = 0;
256 }
257
258
259 if (run && !(value & CONTROL_CLAIM)) {
260 value |= CONTROL_CLAIM;
261 }
262
263 s->chan[ch].control = value;
264
265
266
267
268
269
270 if (!claimed || (!run && !(value & CONTROL_CLAIM))) {
271 s->chan[ch].control &= ~CONTROL_RUN;
272 return;
273 }
274
275 if (value & CONTROL_RUN) {
276 sifive_pdma_run(s, ch);
277 }
278
279 sifive_pdma_update_irq(s, ch);
280 break;
281 case DMA_NEXT_CONFIG:
282 s->chan[ch].next_config = value;
283 break;
284 case DMA_NEXT_BYTES:
285 s->chan[ch].next_bytes = value;
286 break;
287 case DMA_NEXT_DST:
288 s->chan[ch].next_dst = value;
289 break;
290 case DMA_NEXT_SRC:
291 s->chan[ch].next_src = value;
292 break;
293 case DMA_EXEC_CONFIG:
294 case DMA_EXEC_BYTES:
295 case DMA_EXEC_DST:
296 case DMA_EXEC_SRC:
297
298 break;
299 default:
300 qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n",
301 __func__, offset);
302 break;
303 }
304}
305
306static const MemoryRegionOps sifive_pdma_ops = {
307 .read = sifive_pdma_read,
308 .write = sifive_pdma_write,
309 .endianness = DEVICE_LITTLE_ENDIAN,
310
311 .impl = {
312 .min_access_size = 4,
313 .max_access_size = 8,
314 }
315};
316
317static void sifive_pdma_realize(DeviceState *dev, Error **errp)
318{
319 SiFivePDMAState *s = SIFIVE_PDMA(dev);
320 int i;
321
322 memory_region_init_io(&s->iomem, OBJECT(dev), &sifive_pdma_ops, s,
323 TYPE_SIFIVE_PDMA, SIFIVE_PDMA_REG_SIZE);
324 sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
325
326 for (i = 0; i < SIFIVE_PDMA_IRQS; i++) {
327 sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq[i]);
328 }
329}
330
331static void sifive_pdma_class_init(ObjectClass *klass, void *data)
332{
333 DeviceClass *dc = DEVICE_CLASS(klass);
334
335 dc->desc = "SiFive Platform DMA controller";
336 dc->realize = sifive_pdma_realize;
337}
338
339static const TypeInfo sifive_pdma_info = {
340 .name = TYPE_SIFIVE_PDMA,
341 .parent = TYPE_SYS_BUS_DEVICE,
342 .instance_size = sizeof(SiFivePDMAState),
343 .class_init = sifive_pdma_class_init,
344};
345
346static void sifive_pdma_register_types(void)
347{
348 type_register_static(&sifive_pdma_info);
349}
350
351type_init(sifive_pdma_register_types)
352