1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include "qemu/osdep.h"
28#include "hw/dma/xlnx-zynq-devcfg.h"
29#include "hw/irq.h"
30#include "migration/vmstate.h"
31#include "qemu/bitops.h"
32#include "sysemu/dma.h"
33#include "qemu/log.h"
34#include "qemu/module.h"
35
36#define FREQ_HZ 900000000
37
38#define BTT_MAX 0x400
39
40#ifndef XLNX_ZYNQ_DEVCFG_ERR_DEBUG
41#define XLNX_ZYNQ_DEVCFG_ERR_DEBUG 0
42#endif
43
44#define DB_PRINT(fmt, args...) do { \
45 if (XLNX_ZYNQ_DEVCFG_ERR_DEBUG) { \
46 qemu_log("%s: " fmt, __func__, ## args); \
47 } \
48} while (0)
49
50REG32(CTRL, 0x00)
51 FIELD(CTRL, FORCE_RST, 31, 1)
52 FIELD(CTRL, PCAP_PR, 27, 1)
53 FIELD(CTRL, PCAP_MODE, 26, 1)
54 FIELD(CTRL, MULTIBOOT_EN, 24, 1)
55 FIELD(CTRL, USER_MODE, 15, 1)
56 FIELD(CTRL, PCFG_AES_FUSE, 12, 1)
57 FIELD(CTRL, PCFG_AES_EN, 9, 3)
58 FIELD(CTRL, SEU_EN, 8, 1)
59 FIELD(CTRL, SEC_EN, 7, 1)
60 FIELD(CTRL, SPNIDEN, 6, 1)
61 FIELD(CTRL, SPIDEN, 5, 1)
62 FIELD(CTRL, NIDEN, 4, 1)
63 FIELD(CTRL, DBGEN, 3, 1)
64 FIELD(CTRL, DAP_EN, 0, 3)
65
66REG32(LOCK, 0x04)
67#define AES_FUSE_LOCK 4
68#define AES_EN_LOCK 3
69#define SEU_LOCK 2
70#define SEC_LOCK 1
71#define DBG_LOCK 0
72
73
74static const uint32_t lock_ctrl_map[] = {
75 [AES_FUSE_LOCK] = R_CTRL_PCFG_AES_FUSE_MASK,
76 [AES_EN_LOCK] = R_CTRL_PCFG_AES_EN_MASK,
77 [SEU_LOCK] = R_CTRL_SEU_EN_MASK,
78 [SEC_LOCK] = R_CTRL_SEC_EN_MASK,
79 [DBG_LOCK] = R_CTRL_SPNIDEN_MASK | R_CTRL_SPIDEN_MASK |
80 R_CTRL_NIDEN_MASK | R_CTRL_DBGEN_MASK |
81 R_CTRL_DAP_EN_MASK,
82};
83
84REG32(CFG, 0x08)
85 FIELD(CFG, RFIFO_TH, 10, 2)
86 FIELD(CFG, WFIFO_TH, 8, 2)
87 FIELD(CFG, RCLK_EDGE, 7, 1)
88 FIELD(CFG, WCLK_EDGE, 6, 1)
89 FIELD(CFG, DISABLE_SRC_INC, 5, 1)
90 FIELD(CFG, DISABLE_DST_INC, 4, 1)
91#define R_CFG_RESET 0x50B
92
93REG32(INT_STS, 0x0C)
94 FIELD(INT_STS, PSS_GTS_USR_B, 31, 1)
95 FIELD(INT_STS, PSS_FST_CFG_B, 30, 1)
96 FIELD(INT_STS, PSS_CFG_RESET_B, 27, 1)
97 FIELD(INT_STS, RX_FIFO_OV, 18, 1)
98 FIELD(INT_STS, WR_FIFO_LVL, 17, 1)
99 FIELD(INT_STS, RD_FIFO_LVL, 16, 1)
100 FIELD(INT_STS, DMA_CMD_ERR, 15, 1)
101 FIELD(INT_STS, DMA_Q_OV, 14, 1)
102 FIELD(INT_STS, DMA_DONE, 13, 1)
103 FIELD(INT_STS, DMA_P_DONE, 12, 1)
104 FIELD(INT_STS, P2D_LEN_ERR, 11, 1)
105 FIELD(INT_STS, PCFG_DONE, 2, 1)
106#define R_INT_STS_RSVD ((0x7 << 24) | (0x1 << 19) | (0xF < 7))
107
108REG32(INT_MASK, 0x10)
109
110REG32(STATUS, 0x14)
111 FIELD(STATUS, DMA_CMD_Q_F, 31, 1)
112 FIELD(STATUS, DMA_CMD_Q_E, 30, 1)
113 FIELD(STATUS, DMA_DONE_CNT, 28, 2)
114 FIELD(STATUS, RX_FIFO_LVL, 20, 5)
115 FIELD(STATUS, TX_FIFO_LVL, 12, 7)
116 FIELD(STATUS, PSS_GTS_USR_B, 11, 1)
117 FIELD(STATUS, PSS_FST_CFG_B, 10, 1)
118 FIELD(STATUS, PSS_CFG_RESET_B, 5, 1)
119
120REG32(DMA_SRC_ADDR, 0x18)
121REG32(DMA_DST_ADDR, 0x1C)
122REG32(DMA_SRC_LEN, 0x20)
123REG32(DMA_DST_LEN, 0x24)
124REG32(ROM_SHADOW, 0x28)
125REG32(SW_ID, 0x30)
126REG32(UNLOCK, 0x34)
127
128#define R_UNLOCK_MAGIC 0x757BDF0D
129
130REG32(MCTRL, 0x80)
131 FIELD(MCTRL, PS_VERSION, 28, 4)
132 FIELD(MCTRL, PCFG_POR_B, 8, 1)
133 FIELD(MCTRL, INT_PCAP_LPBK, 4, 1)
134 FIELD(MCTRL, QEMU, 3, 1)
135
136static void xlnx_zynq_devcfg_update_ixr(XlnxZynqDevcfg *s)
137{
138 qemu_set_irq(s->irq, ~s->regs[R_INT_MASK] & s->regs[R_INT_STS]);
139}
140
141static void xlnx_zynq_devcfg_reset(DeviceState *dev)
142{
143 XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(dev);
144 int i;
145
146 for (i = 0; i < XLNX_ZYNQ_DEVCFG_R_MAX; ++i) {
147 register_reset(&s->regs_info[i]);
148 }
149}
150
151static void xlnx_zynq_devcfg_dma_go(XlnxZynqDevcfg *s)
152{
153 do {
154 uint8_t buf[BTT_MAX];
155 XlnxZynqDevcfgDMACmd *dmah = s->dma_cmd_fifo;
156 uint32_t btt = BTT_MAX;
157 bool loopback = s->regs[R_MCTRL] & R_MCTRL_INT_PCAP_LPBK_MASK;
158
159 btt = MIN(btt, dmah->src_len);
160 if (loopback) {
161 btt = MIN(btt, dmah->dest_len);
162 }
163 DB_PRINT("reading %x bytes from %x\n", btt, dmah->src_addr);
164 dma_memory_read(&address_space_memory, dmah->src_addr, buf, btt);
165 dmah->src_len -= btt;
166 dmah->src_addr += btt;
167 if (loopback && (dmah->src_len || dmah->dest_len)) {
168 DB_PRINT("writing %x bytes from %x\n", btt, dmah->dest_addr);
169 dma_memory_write(&address_space_memory, dmah->dest_addr, buf, btt);
170 dmah->dest_len -= btt;
171 dmah->dest_addr += btt;
172 }
173 if (!dmah->src_len && !dmah->dest_len) {
174 DB_PRINT("dma operation finished\n");
175 s->regs[R_INT_STS] |= R_INT_STS_DMA_DONE_MASK |
176 R_INT_STS_DMA_P_DONE_MASK;
177 s->dma_cmd_fifo_num--;
178 memmove(s->dma_cmd_fifo, &s->dma_cmd_fifo[1],
179 sizeof(s->dma_cmd_fifo) - sizeof(s->dma_cmd_fifo[0]));
180 }
181 xlnx_zynq_devcfg_update_ixr(s);
182 } while (s->dma_cmd_fifo_num);
183}
184
185static void r_ixr_post_write(RegisterInfo *reg, uint64_t val)
186{
187 XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(reg->opaque);
188
189 xlnx_zynq_devcfg_update_ixr(s);
190}
191
192static uint64_t r_ctrl_pre_write(RegisterInfo *reg, uint64_t val)
193{
194 XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(reg->opaque);
195 int i;
196
197 for (i = 0; i < ARRAY_SIZE(lock_ctrl_map); ++i) {
198 if (s->regs[R_LOCK] & 1 << i) {
199 val &= ~lock_ctrl_map[i];
200 val |= lock_ctrl_map[i] & s->regs[R_CTRL];
201 }
202 }
203 return val;
204}
205
206static void r_ctrl_post_write(RegisterInfo *reg, uint64_t val)
207{
208 const char *device_prefix = object_get_typename(OBJECT(reg->opaque));
209 uint32_t aes_en = FIELD_EX32(val, CTRL, PCFG_AES_EN);
210
211 if (aes_en != 0 && aes_en != 7) {
212 qemu_log_mask(LOG_UNIMP, "%s: warning, aes-en bits inconsistent,"
213 "unimplemented security reset should happen!\n",
214 device_prefix);
215 }
216}
217
218static void r_unlock_post_write(RegisterInfo *reg, uint64_t val)
219{
220 XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(reg->opaque);
221 const char *device_prefix = object_get_typename(OBJECT(s));
222
223 if (val == R_UNLOCK_MAGIC) {
224 DB_PRINT("successful unlock\n");
225 s->regs[R_CTRL] |= R_CTRL_PCAP_PR_MASK;
226 s->regs[R_CTRL] |= R_CTRL_PCFG_AES_EN_MASK;
227 memory_region_set_enabled(&s->iomem, true);
228 } else {
229 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed unlock\n", device_prefix);
230 s->regs[R_CTRL] &= ~R_CTRL_PCAP_PR_MASK;
231 s->regs[R_CTRL] &= ~R_CTRL_PCFG_AES_EN_MASK;
232
233 memory_region_set_enabled(&s->iomem, false);
234 }
235}
236
237static uint64_t r_lock_pre_write(RegisterInfo *reg, uint64_t val)
238{
239 XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(reg->opaque);
240
241
242 return s->regs[R_LOCK] | val;
243}
244
245static void r_dma_dst_len_post_write(RegisterInfo *reg, uint64_t val)
246{
247 XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(reg->opaque);
248
249 s->dma_cmd_fifo[s->dma_cmd_fifo_num] = (XlnxZynqDevcfgDMACmd) {
250 .src_addr = s->regs[R_DMA_SRC_ADDR] & ~0x3UL,
251 .dest_addr = s->regs[R_DMA_DST_ADDR] & ~0x3UL,
252 .src_len = s->regs[R_DMA_SRC_LEN] << 2,
253 .dest_len = s->regs[R_DMA_DST_LEN] << 2,
254 };
255 s->dma_cmd_fifo_num++;
256 DB_PRINT("dma transfer started; %d total transfers pending\n",
257 s->dma_cmd_fifo_num);
258 xlnx_zynq_devcfg_dma_go(s);
259}
260
261static const RegisterAccessInfo xlnx_zynq_devcfg_regs_info[] = {
262 { .name = "CTRL", .addr = A_CTRL,
263 .reset = R_CTRL_PCAP_PR_MASK | R_CTRL_PCAP_MODE_MASK | 0x3 << 13,
264 .rsvd = 0x1 << 28 | 0x3ff << 13 | 0x3 << 13,
265 .pre_write = r_ctrl_pre_write,
266 .post_write = r_ctrl_post_write,
267 },
268 { .name = "LOCK", .addr = A_LOCK,
269 .rsvd = MAKE_64BIT_MASK(5, 64 - 5),
270 .pre_write = r_lock_pre_write,
271 },
272 { .name = "CFG", .addr = A_CFG,
273 .reset = R_CFG_RESET,
274 .rsvd = 0xfffff00f,
275 },
276 { .name = "INT_STS", .addr = A_INT_STS,
277 .w1c = ~R_INT_STS_RSVD,
278 .reset = R_INT_STS_PSS_GTS_USR_B_MASK |
279 R_INT_STS_PSS_CFG_RESET_B_MASK |
280 R_INT_STS_WR_FIFO_LVL_MASK,
281 .rsvd = R_INT_STS_RSVD,
282 .post_write = r_ixr_post_write,
283 },
284 { .name = "INT_MASK", .addr = A_INT_MASK,
285 .reset = ~0,
286 .rsvd = R_INT_STS_RSVD,
287 .post_write = r_ixr_post_write,
288 },
289 { .name = "STATUS", .addr = A_STATUS,
290 .reset = R_STATUS_DMA_CMD_Q_E_MASK |
291 R_STATUS_PSS_GTS_USR_B_MASK |
292 R_STATUS_PSS_CFG_RESET_B_MASK,
293 .ro = ~0,
294 },
295 { .name = "DMA_SRC_ADDR", .addr = A_DMA_SRC_ADDR, },
296 { .name = "DMA_DST_ADDR", .addr = A_DMA_DST_ADDR, },
297 { .name = "DMA_SRC_LEN", .addr = A_DMA_SRC_LEN,
298 .ro = MAKE_64BIT_MASK(27, 64 - 27) },
299 { .name = "DMA_DST_LEN", .addr = A_DMA_DST_LEN,
300 .ro = MAKE_64BIT_MASK(27, 64 - 27),
301 .post_write = r_dma_dst_len_post_write,
302 },
303 { .name = "ROM_SHADOW", .addr = A_ROM_SHADOW,
304 .rsvd = ~0ull,
305 },
306 { .name = "SW_ID", .addr = A_SW_ID, },
307 { .name = "UNLOCK", .addr = A_UNLOCK,
308 .post_write = r_unlock_post_write,
309 },
310 { .name = "MCTRL", .addr = R_MCTRL * 4,
311
312
313
314 .reset = 0x2 << R_MCTRL_PS_VERSION_SHIFT | 1 << 23 | R_MCTRL_QEMU_MASK,
315 .ro = ~R_MCTRL_INT_PCAP_LPBK_MASK,
316 .rsvd = 0x00f00303,
317 },
318};
319
320static const MemoryRegionOps xlnx_zynq_devcfg_reg_ops = {
321 .read = register_read_memory,
322 .write = register_write_memory,
323 .endianness = DEVICE_LITTLE_ENDIAN,
324 .valid = {
325 .min_access_size = 4,
326 .max_access_size = 4,
327 }
328};
329
330static const VMStateDescription vmstate_xlnx_zynq_devcfg_dma_cmd = {
331 .name = "xlnx_zynq_devcfg_dma_cmd",
332 .version_id = 1,
333 .minimum_version_id = 1,
334 .fields = (VMStateField[]) {
335 VMSTATE_UINT32(src_addr, XlnxZynqDevcfgDMACmd),
336 VMSTATE_UINT32(dest_addr, XlnxZynqDevcfgDMACmd),
337 VMSTATE_UINT32(src_len, XlnxZynqDevcfgDMACmd),
338 VMSTATE_UINT32(dest_len, XlnxZynqDevcfgDMACmd),
339 VMSTATE_END_OF_LIST()
340 }
341};
342
343static const VMStateDescription vmstate_xlnx_zynq_devcfg = {
344 .name = "xlnx_zynq_devcfg",
345 .version_id = 1,
346 .minimum_version_id = 1,
347 .fields = (VMStateField[]) {
348 VMSTATE_STRUCT_ARRAY(dma_cmd_fifo, XlnxZynqDevcfg,
349 XLNX_ZYNQ_DEVCFG_DMA_CMD_FIFO_LEN, 0,
350 vmstate_xlnx_zynq_devcfg_dma_cmd,
351 XlnxZynqDevcfgDMACmd),
352 VMSTATE_UINT8(dma_cmd_fifo_num, XlnxZynqDevcfg),
353 VMSTATE_UINT32_ARRAY(regs, XlnxZynqDevcfg, XLNX_ZYNQ_DEVCFG_R_MAX),
354 VMSTATE_END_OF_LIST()
355 }
356};
357
358static void xlnx_zynq_devcfg_init(Object *obj)
359{
360 SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
361 XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(obj);
362 RegisterInfoArray *reg_array;
363
364 sysbus_init_irq(sbd, &s->irq);
365
366 memory_region_init(&s->iomem, obj, "devcfg", XLNX_ZYNQ_DEVCFG_R_MAX * 4);
367 reg_array =
368 register_init_block32(DEVICE(obj), xlnx_zynq_devcfg_regs_info,
369 ARRAY_SIZE(xlnx_zynq_devcfg_regs_info),
370 s->regs_info, s->regs,
371 &xlnx_zynq_devcfg_reg_ops,
372 XLNX_ZYNQ_DEVCFG_ERR_DEBUG,
373 XLNX_ZYNQ_DEVCFG_R_MAX);
374 memory_region_add_subregion(&s->iomem,
375 A_CTRL,
376 ®_array->mem);
377
378 sysbus_init_mmio(sbd, &s->iomem);
379}
380
381static void xlnx_zynq_devcfg_class_init(ObjectClass *klass, void *data)
382{
383 DeviceClass *dc = DEVICE_CLASS(klass);
384
385 dc->reset = xlnx_zynq_devcfg_reset;
386 dc->vmsd = &vmstate_xlnx_zynq_devcfg;
387}
388
389static const TypeInfo xlnx_zynq_devcfg_info = {
390 .name = TYPE_XLNX_ZYNQ_DEVCFG,
391 .parent = TYPE_SYS_BUS_DEVICE,
392 .instance_size = sizeof(XlnxZynqDevcfg),
393 .instance_init = xlnx_zynq_devcfg_init,
394 .class_init = xlnx_zynq_devcfg_class_init,
395};
396
397static void xlnx_zynq_devcfg_register_types(void)
398{
399 type_register_static(&xlnx_zynq_devcfg_info);
400}
401
402type_init(xlnx_zynq_devcfg_register_types)
403