1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include "hw/sparc/sun4m.h"
26#include "monitor/monitor.h"
27#include "hw/sysbus.h"
28#include "trace.h"
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43#define MAX_CPUS 16
44#define MAX_PILS 16
45
46struct SLAVIO_INTCTLState;
47
48typedef struct SLAVIO_CPUINTCTLState {
49 MemoryRegion iomem;
50 struct SLAVIO_INTCTLState *master;
51 uint32_t intreg_pending;
52 uint32_t cpu;
53 uint32_t irl_out;
54} SLAVIO_CPUINTCTLState;
55
56#define TYPE_SLAVIO_INTCTL "slavio_intctl"
57#define SLAVIO_INTCTL(obj) \
58 OBJECT_CHECK(SLAVIO_INTCTLState, (obj), TYPE_SLAVIO_INTCTL)
59
60typedef struct SLAVIO_INTCTLState {
61 SysBusDevice parent_obj;
62
63 MemoryRegion iomem;
64#ifdef DEBUG_IRQ_COUNT
65 uint64_t irq_count[32];
66#endif
67 qemu_irq cpu_irqs[MAX_CPUS][MAX_PILS];
68 SLAVIO_CPUINTCTLState slaves[MAX_CPUS];
69 uint32_t intregm_pending;
70 uint32_t intregm_disabled;
71 uint32_t target_cpu;
72} SLAVIO_INTCTLState;
73
74#define INTCTL_MAXADDR 0xf
75#define INTCTL_SIZE (INTCTL_MAXADDR + 1)
76#define INTCTLM_SIZE 0x14
77#define MASTER_IRQ_MASK ~0x0fa2007f
78#define MASTER_DISABLE 0x80000000
79#define CPU_SOFTIRQ_MASK 0xfffe0000
80#define CPU_IRQ_INT15_IN (1 << 15)
81#define CPU_IRQ_TIMER_IN (1 << 14)
82
83static void slavio_check_interrupts(SLAVIO_INTCTLState *s, int set_irqs);
84
85
86static uint64_t slavio_intctl_mem_readl(void *opaque, hwaddr addr,
87 unsigned size)
88{
89 SLAVIO_CPUINTCTLState *s = opaque;
90 uint32_t saddr, ret;
91
92 saddr = addr >> 2;
93 switch (saddr) {
94 case 0:
95 ret = s->intreg_pending;
96 break;
97 default:
98 ret = 0;
99 break;
100 }
101 trace_slavio_intctl_mem_readl(s->cpu, addr, ret);
102
103 return ret;
104}
105
106static void slavio_intctl_mem_writel(void *opaque, hwaddr addr,
107 uint64_t val, unsigned size)
108{
109 SLAVIO_CPUINTCTLState *s = opaque;
110 uint32_t saddr;
111
112 saddr = addr >> 2;
113 trace_slavio_intctl_mem_writel(s->cpu, addr, val);
114 switch (saddr) {
115 case 1:
116 val &= CPU_SOFTIRQ_MASK | CPU_IRQ_INT15_IN;
117 s->intreg_pending &= ~val;
118 slavio_check_interrupts(s->master, 1);
119 trace_slavio_intctl_mem_writel_clear(s->cpu, val, s->intreg_pending);
120 break;
121 case 2:
122 val &= CPU_SOFTIRQ_MASK;
123 s->intreg_pending |= val;
124 slavio_check_interrupts(s->master, 1);
125 trace_slavio_intctl_mem_writel_set(s->cpu, val, s->intreg_pending);
126 break;
127 default:
128 break;
129 }
130}
131
132static const MemoryRegionOps slavio_intctl_mem_ops = {
133 .read = slavio_intctl_mem_readl,
134 .write = slavio_intctl_mem_writel,
135 .endianness = DEVICE_NATIVE_ENDIAN,
136 .valid = {
137 .min_access_size = 4,
138 .max_access_size = 4,
139 },
140};
141
142
143static uint64_t slavio_intctlm_mem_readl(void *opaque, hwaddr addr,
144 unsigned size)
145{
146 SLAVIO_INTCTLState *s = opaque;
147 uint32_t saddr, ret;
148
149 saddr = addr >> 2;
150 switch (saddr) {
151 case 0:
152 ret = s->intregm_pending & ~MASTER_DISABLE;
153 break;
154 case 1:
155 ret = s->intregm_disabled & MASTER_IRQ_MASK;
156 break;
157 case 4:
158 ret = s->target_cpu;
159 break;
160 default:
161 ret = 0;
162 break;
163 }
164 trace_slavio_intctlm_mem_readl(addr, ret);
165
166 return ret;
167}
168
169static void slavio_intctlm_mem_writel(void *opaque, hwaddr addr,
170 uint64_t val, unsigned size)
171{
172 SLAVIO_INTCTLState *s = opaque;
173 uint32_t saddr;
174
175 saddr = addr >> 2;
176 trace_slavio_intctlm_mem_writel(addr, val);
177 switch (saddr) {
178 case 2:
179
180 val &= MASTER_IRQ_MASK;
181 s->intregm_disabled &= ~val;
182 trace_slavio_intctlm_mem_writel_enable(val, s->intregm_disabled);
183 slavio_check_interrupts(s, 1);
184 break;
185 case 3:
186
187 val &= MASTER_IRQ_MASK;
188 s->intregm_disabled |= val;
189 slavio_check_interrupts(s, 1);
190 trace_slavio_intctlm_mem_writel_disable(val, s->intregm_disabled);
191 break;
192 case 4:
193 s->target_cpu = val & (MAX_CPUS - 1);
194 slavio_check_interrupts(s, 1);
195 trace_slavio_intctlm_mem_writel_target(s->target_cpu);
196 break;
197 default:
198 break;
199 }
200}
201
202static const MemoryRegionOps slavio_intctlm_mem_ops = {
203 .read = slavio_intctlm_mem_readl,
204 .write = slavio_intctlm_mem_writel,
205 .endianness = DEVICE_NATIVE_ENDIAN,
206 .valid = {
207 .min_access_size = 4,
208 .max_access_size = 4,
209 },
210};
211
212void slavio_pic_info(Monitor *mon, DeviceState *dev)
213{
214 SLAVIO_INTCTLState *s = SLAVIO_INTCTL(dev);
215 int i;
216
217 for (i = 0; i < MAX_CPUS; i++) {
218 monitor_printf(mon, "per-cpu %d: pending 0x%08x\n", i,
219 s->slaves[i].intreg_pending);
220 }
221 monitor_printf(mon, "master: pending 0x%08x, disabled 0x%08x\n",
222 s->intregm_pending, s->intregm_disabled);
223}
224
225void slavio_irq_info(Monitor *mon, DeviceState *dev)
226{
227#ifndef DEBUG_IRQ_COUNT
228 monitor_printf(mon, "irq statistic code not compiled.\n");
229#else
230 SLAVIO_INTCTLState *s = SLAVIO_INTCTL(dev);
231 int i;
232 int64_t count;
233
234 s = SLAVIO_INTCTL(dev);
235 monitor_printf(mon, "IRQ statistics:\n");
236 for (i = 0; i < 32; i++) {
237 count = s->irq_count[i];
238 if (count > 0)
239 monitor_printf(mon, "%2d: %" PRId64 "\n", i, count);
240 }
241#endif
242}
243
244static const uint32_t intbit_to_level[] = {
245 2, 3, 5, 7, 9, 11, 13, 2, 3, 5, 7, 9, 11, 13, 12, 12,
246 6, 13, 4, 10, 8, 9, 11, 0, 0, 0, 0, 15, 15, 15, 15, 0,
247};
248
249static void slavio_check_interrupts(SLAVIO_INTCTLState *s, int set_irqs)
250{
251 uint32_t pending = s->intregm_pending, pil_pending;
252 unsigned int i, j;
253
254 pending &= ~s->intregm_disabled;
255
256 trace_slavio_check_interrupts(pending, s->intregm_disabled);
257 for (i = 0; i < MAX_CPUS; i++) {
258 pil_pending = 0;
259
260
261 if (pending && !(s->intregm_disabled & MASTER_DISABLE) &&
262 (i == s->target_cpu)) {
263 for (j = 0; j < 32; j++) {
264 if ((pending & (1 << j)) && intbit_to_level[j]) {
265 pil_pending |= 1 << intbit_to_level[j];
266 }
267 }
268 }
269
270
271 s->slaves[i].intreg_pending &= CPU_SOFTIRQ_MASK | CPU_IRQ_INT15_IN |
272 CPU_IRQ_TIMER_IN;
273 if (i == s->target_cpu) {
274 for (j = 0; j < 32; j++) {
275 if ((s->intregm_pending & (1 << j)) && intbit_to_level[j]) {
276 s->slaves[i].intreg_pending |= 1 << intbit_to_level[j];
277 }
278 }
279 }
280
281
282
283 if (!(s->intregm_disabled & MASTER_DISABLE)) {
284 pil_pending |= s->slaves[i].intreg_pending &
285 (CPU_IRQ_INT15_IN | CPU_IRQ_TIMER_IN);
286 }
287
288
289 pil_pending |= (s->slaves[i].intreg_pending & CPU_SOFTIRQ_MASK) >> 16;
290
291 if (set_irqs) {
292
293
294
295
296
297 for (j = MAX_PILS-1; j > 0; j--) {
298 if (pil_pending & (1 << j)) {
299 if (!(s->slaves[i].irl_out & (1 << j))) {
300 qemu_irq_raise(s->cpu_irqs[i][j]);
301 }
302 } else {
303 if (s->slaves[i].irl_out & (1 << j)) {
304 qemu_irq_lower(s->cpu_irqs[i][j]);
305 }
306 }
307 }
308 }
309 s->slaves[i].irl_out = pil_pending;
310 }
311}
312
313
314
315
316
317static void slavio_set_irq(void *opaque, int irq, int level)
318{
319 SLAVIO_INTCTLState *s = opaque;
320 uint32_t mask = 1 << irq;
321 uint32_t pil = intbit_to_level[irq];
322 unsigned int i;
323
324 trace_slavio_set_irq(s->target_cpu, irq, pil, level);
325 if (pil > 0) {
326 if (level) {
327#ifdef DEBUG_IRQ_COUNT
328 s->irq_count[pil]++;
329#endif
330 s->intregm_pending |= mask;
331 if (pil == 15) {
332 for (i = 0; i < MAX_CPUS; i++) {
333 s->slaves[i].intreg_pending |= 1 << pil;
334 }
335 }
336 } else {
337 s->intregm_pending &= ~mask;
338 if (pil == 15) {
339 for (i = 0; i < MAX_CPUS; i++) {
340 s->slaves[i].intreg_pending &= ~(1 << pil);
341 }
342 }
343 }
344 slavio_check_interrupts(s, 1);
345 }
346}
347
348static void slavio_set_timer_irq_cpu(void *opaque, int cpu, int level)
349{
350 SLAVIO_INTCTLState *s = opaque;
351
352 trace_slavio_set_timer_irq_cpu(cpu, level);
353
354 if (level) {
355 s->slaves[cpu].intreg_pending |= CPU_IRQ_TIMER_IN;
356 } else {
357 s->slaves[cpu].intreg_pending &= ~CPU_IRQ_TIMER_IN;
358 }
359
360 slavio_check_interrupts(s, 1);
361}
362
363static void slavio_set_irq_all(void *opaque, int irq, int level)
364{
365 if (irq < 32) {
366 slavio_set_irq(opaque, irq, level);
367 } else {
368 slavio_set_timer_irq_cpu(opaque, irq - 32, level);
369 }
370}
371
372static int vmstate_intctl_post_load(void *opaque, int version_id)
373{
374 SLAVIO_INTCTLState *s = opaque;
375
376 slavio_check_interrupts(s, 0);
377 return 0;
378}
379
380static const VMStateDescription vmstate_intctl_cpu = {
381 .name ="slavio_intctl_cpu",
382 .version_id = 1,
383 .minimum_version_id = 1,
384 .minimum_version_id_old = 1,
385 .fields = (VMStateField []) {
386 VMSTATE_UINT32(intreg_pending, SLAVIO_CPUINTCTLState),
387 VMSTATE_END_OF_LIST()
388 }
389};
390
391static const VMStateDescription vmstate_intctl = {
392 .name ="slavio_intctl",
393 .version_id = 1,
394 .minimum_version_id = 1,
395 .minimum_version_id_old = 1,
396 .post_load = vmstate_intctl_post_load,
397 .fields = (VMStateField []) {
398 VMSTATE_STRUCT_ARRAY(slaves, SLAVIO_INTCTLState, MAX_CPUS, 1,
399 vmstate_intctl_cpu, SLAVIO_CPUINTCTLState),
400 VMSTATE_UINT32(intregm_pending, SLAVIO_INTCTLState),
401 VMSTATE_UINT32(intregm_disabled, SLAVIO_INTCTLState),
402 VMSTATE_UINT32(target_cpu, SLAVIO_INTCTLState),
403 VMSTATE_END_OF_LIST()
404 }
405};
406
407static void slavio_intctl_reset(DeviceState *d)
408{
409 SLAVIO_INTCTLState *s = SLAVIO_INTCTL(d);
410 int i;
411
412 for (i = 0; i < MAX_CPUS; i++) {
413 s->slaves[i].intreg_pending = 0;
414 s->slaves[i].irl_out = 0;
415 }
416 s->intregm_disabled = ~MASTER_IRQ_MASK;
417 s->intregm_pending = 0;
418 s->target_cpu = 0;
419 slavio_check_interrupts(s, 0);
420}
421
422static int slavio_intctl_init1(SysBusDevice *sbd)
423{
424 DeviceState *dev = DEVICE(sbd);
425 SLAVIO_INTCTLState *s = SLAVIO_INTCTL(dev);
426 unsigned int i, j;
427 char slave_name[45];
428
429 qdev_init_gpio_in(dev, slavio_set_irq_all, 32 + MAX_CPUS);
430 memory_region_init_io(&s->iomem, OBJECT(s), &slavio_intctlm_mem_ops, s,
431 "master-interrupt-controller", INTCTLM_SIZE);
432 sysbus_init_mmio(sbd, &s->iomem);
433
434 for (i = 0; i < MAX_CPUS; i++) {
435 snprintf(slave_name, sizeof(slave_name),
436 "slave-interrupt-controller-%i", i);
437 for (j = 0; j < MAX_PILS; j++) {
438 sysbus_init_irq(sbd, &s->cpu_irqs[i][j]);
439 }
440 memory_region_init_io(&s->slaves[i].iomem, OBJECT(s),
441 &slavio_intctl_mem_ops,
442 &s->slaves[i], slave_name, INTCTL_SIZE);
443 sysbus_init_mmio(sbd, &s->slaves[i].iomem);
444 s->slaves[i].cpu = i;
445 s->slaves[i].master = s;
446 }
447
448 return 0;
449}
450
451static void slavio_intctl_class_init(ObjectClass *klass, void *data)
452{
453 DeviceClass *dc = DEVICE_CLASS(klass);
454 SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
455
456 k->init = slavio_intctl_init1;
457 dc->reset = slavio_intctl_reset;
458 dc->vmsd = &vmstate_intctl;
459}
460
461static const TypeInfo slavio_intctl_info = {
462 .name = TYPE_SLAVIO_INTCTL,
463 .parent = TYPE_SYS_BUS_DEVICE,
464 .instance_size = sizeof(SLAVIO_INTCTLState),
465 .class_init = slavio_intctl_class_init,
466};
467
468static void slavio_intctl_register_types(void)
469{
470 type_register_static(&slavio_intctl_info);
471}
472
473type_init(slavio_intctl_register_types)
474