1
2
3
4
5
6
7
8
9
10
11#include "qemu/osdep.h"
12#include "qemu/log.h"
13#include "cpu.h"
14#include "hw/sh4/sh_intc.h"
15#include "hw/irq.h"
16#include "hw/sh4/sh.h"
17#include "trace.h"
18
19void sh_intc_toggle_source(struct intc_source *source,
20 int enable_adj, int assert_adj)
21{
22 int enable_changed = 0;
23 int pending_changed = 0;
24 int old_pending;
25
26 if (source->enable_count == source->enable_max && enable_adj == -1) {
27 enable_changed = -1;
28 }
29 source->enable_count += enable_adj;
30
31 if (source->enable_count == source->enable_max) {
32 enable_changed = 1;
33 }
34 source->asserted += assert_adj;
35
36 old_pending = source->pending;
37 source->pending = source->asserted &&
38 (source->enable_count == source->enable_max);
39
40 if (old_pending != source->pending) {
41 pending_changed = 1;
42 }
43 if (pending_changed) {
44 if (source->pending) {
45 source->parent->pending++;
46 if (source->parent->pending == 1) {
47 cpu_interrupt(first_cpu, CPU_INTERRUPT_HARD);
48 }
49 } else {
50 source->parent->pending--;
51 if (source->parent->pending == 0) {
52 cpu_reset_interrupt(first_cpu, CPU_INTERRUPT_HARD);
53 }
54 }
55 }
56
57 if (enable_changed || assert_adj || pending_changed) {
58 trace_sh_intc_sources(source->parent->pending, source->asserted,
59 source->enable_count, source->enable_max,
60 source->vect, source->asserted ? "asserted " :
61 assert_adj ? "deasserted" : "",
62 enable_changed == 1 ? "enabled " :
63 enable_changed == -1 ? "disabled " : "",
64 source->pending ? "pending" : "");
65 }
66}
67
68static void sh_intc_set_irq(void *opaque, int n, int level)
69{
70 struct intc_desc *desc = opaque;
71 struct intc_source *source = &desc->sources[n];
72
73 if (level && !source->asserted) {
74 sh_intc_toggle_source(source, 0, 1);
75 } else if (!level && source->asserted) {
76 sh_intc_toggle_source(source, 0, -1);
77 }
78}
79
80int sh_intc_get_pending_vector(struct intc_desc *desc, int imask)
81{
82 unsigned int i;
83
84
85
86
87 if (imask == 0x0f) {
88 return -1;
89 }
90
91 for (i = 0; i < desc->nr_sources; i++) {
92 struct intc_source *source = &desc->sources[i];
93
94 if (source->pending) {
95 trace_sh_intc_pending(desc->pending, source->vect);
96 return source->vect;
97 }
98 }
99 g_assert_not_reached();
100}
101
102typedef enum {
103 INTC_MODE_NONE,
104 INTC_MODE_DUAL_SET,
105 INTC_MODE_DUAL_CLR,
106 INTC_MODE_ENABLE_REG,
107 INTC_MODE_MASK_REG,
108} SHIntCMode;
109#define INTC_MODE_IS_PRIO 0x80
110
111static SHIntCMode sh_intc_mode(unsigned long address, unsigned long set_reg,
112 unsigned long clr_reg)
113{
114 if (address != A7ADDR(set_reg) && address != A7ADDR(clr_reg)) {
115 return INTC_MODE_NONE;
116 }
117 if (set_reg && clr_reg) {
118 return address == A7ADDR(set_reg) ?
119 INTC_MODE_DUAL_SET : INTC_MODE_DUAL_CLR;
120 }
121 return set_reg ? INTC_MODE_ENABLE_REG : INTC_MODE_MASK_REG;
122}
123
124static void sh_intc_locate(struct intc_desc *desc,
125 unsigned long address,
126 unsigned long **datap,
127 intc_enum **enums,
128 unsigned int *first,
129 unsigned int *width,
130 unsigned int *modep)
131{
132 SHIntCMode mode;
133 unsigned int i;
134
135
136
137 if (desc->mask_regs) {
138 for (i = 0; i < desc->nr_mask_regs; i++) {
139 struct intc_mask_reg *mr = &desc->mask_regs[i];
140
141 mode = sh_intc_mode(address, mr->set_reg, mr->clr_reg);
142 if (mode != INTC_MODE_NONE) {
143 *modep = mode;
144 *datap = &mr->value;
145 *enums = mr->enum_ids;
146 *first = mr->reg_width - 1;
147 *width = 1;
148 return;
149 }
150 }
151 }
152
153 if (desc->prio_regs) {
154 for (i = 0; i < desc->nr_prio_regs; i++) {
155 struct intc_prio_reg *pr = &desc->prio_regs[i];
156
157 mode = sh_intc_mode(address, pr->set_reg, pr->clr_reg);
158 if (mode != INTC_MODE_NONE) {
159 *modep = mode | INTC_MODE_IS_PRIO;
160 *datap = &pr->value;
161 *enums = pr->enum_ids;
162 *first = pr->reg_width / pr->field_width - 1;
163 *width = pr->field_width;
164 return;
165 }
166 }
167 }
168 g_assert_not_reached();
169}
170
171static void sh_intc_toggle_mask(struct intc_desc *desc, intc_enum id,
172 int enable, int is_group)
173{
174 struct intc_source *source = &desc->sources[id];
175
176 if (!id) {
177 return;
178 }
179 if (!source->next_enum_id && (!source->enable_max || !source->vect)) {
180 qemu_log_mask(LOG_UNIMP,
181 "sh_intc: reserved interrupt source %d modified\n", id);
182 return;
183 }
184
185 if (source->vect) {
186 sh_intc_toggle_source(source, enable ? 1 : -1, 0);
187 }
188
189 if ((is_group || !source->vect) && source->next_enum_id) {
190 sh_intc_toggle_mask(desc, source->next_enum_id, enable, 1);
191 }
192
193 if (!source->vect) {
194 trace_sh_intc_set(id, !!enable);
195 }
196}
197
198static uint64_t sh_intc_read(void *opaque, hwaddr offset, unsigned size)
199{
200 struct intc_desc *desc = opaque;
201 intc_enum *enum_ids;
202 unsigned int first;
203 unsigned int width;
204 unsigned int mode;
205 unsigned long *valuep;
206
207 sh_intc_locate(desc, (unsigned long)offset, &valuep,
208 &enum_ids, &first, &width, &mode);
209 trace_sh_intc_read(size, (uint64_t)offset, *valuep);
210 return *valuep;
211}
212
213static void sh_intc_write(void *opaque, hwaddr offset,
214 uint64_t value, unsigned size)
215{
216 struct intc_desc *desc = opaque;
217 intc_enum *enum_ids;
218 unsigned int first;
219 unsigned int width;
220 unsigned int mode;
221 unsigned long *valuep;
222 unsigned int k;
223 unsigned long mask;
224
225 trace_sh_intc_write(size, (uint64_t)offset, value);
226 sh_intc_locate(desc, (unsigned long)offset, &valuep,
227 &enum_ids, &first, &width, &mode);
228 switch (mode) {
229 case INTC_MODE_ENABLE_REG | INTC_MODE_IS_PRIO:
230 break;
231 case INTC_MODE_DUAL_SET:
232 value |= *valuep;
233 break;
234 case INTC_MODE_DUAL_CLR:
235 value = *valuep & ~value;
236 break;
237 default:
238 g_assert_not_reached();
239 }
240
241 for (k = 0; k <= first; k++) {
242 mask = (1 << width) - 1;
243 mask <<= (first - k) * width;
244
245 if ((*valuep & mask) != (value & mask)) {
246 sh_intc_toggle_mask(desc, enum_ids[k], value & mask, 0);
247 }
248 }
249
250 *valuep = value;
251}
252
253static const MemoryRegionOps sh_intc_ops = {
254 .read = sh_intc_read,
255 .write = sh_intc_write,
256 .endianness = DEVICE_NATIVE_ENDIAN,
257};
258
259static void sh_intc_register_source(struct intc_desc *desc,
260 intc_enum source,
261 struct intc_group *groups,
262 int nr_groups)
263{
264 unsigned int i, k;
265 intc_enum id;
266
267 if (desc->mask_regs) {
268 for (i = 0; i < desc->nr_mask_regs; i++) {
269 struct intc_mask_reg *mr = &desc->mask_regs[i];
270
271 for (k = 0; k < ARRAY_SIZE(mr->enum_ids); k++) {
272 id = mr->enum_ids[k];
273 if (id && id == source) {
274 desc->sources[id].enable_max++;
275 }
276 }
277 }
278 }
279
280 if (desc->prio_regs) {
281 for (i = 0; i < desc->nr_prio_regs; i++) {
282 struct intc_prio_reg *pr = &desc->prio_regs[i];
283
284 for (k = 0; k < ARRAY_SIZE(pr->enum_ids); k++) {
285 id = pr->enum_ids[k];
286 if (id && id == source) {
287 desc->sources[id].enable_max++;
288 }
289 }
290 }
291 }
292
293 if (groups) {
294 for (i = 0; i < nr_groups; i++) {
295 struct intc_group *gr = &groups[i];
296
297 for (k = 0; k < ARRAY_SIZE(gr->enum_ids); k++) {
298 id = gr->enum_ids[k];
299 if (id && id == source) {
300 desc->sources[id].enable_max++;
301 }
302 }
303 }
304 }
305
306}
307
308void sh_intc_register_sources(struct intc_desc *desc,
309 struct intc_vect *vectors,
310 int nr_vectors,
311 struct intc_group *groups,
312 int nr_groups)
313{
314 unsigned int i, k;
315 intc_enum id;
316 struct intc_source *s;
317
318 for (i = 0; i < nr_vectors; i++) {
319 struct intc_vect *vect = &vectors[i];
320
321 sh_intc_register_source(desc, vect->enum_id, groups, nr_groups);
322 id = vect->enum_id;
323 if (id) {
324 s = &desc->sources[id];
325 s->vect = vect->vect;
326 trace_sh_intc_register("source", vect->enum_id, s->vect,
327 s->enable_count, s->enable_max);
328 }
329 }
330
331 if (groups) {
332 for (i = 0; i < nr_groups; i++) {
333 struct intc_group *gr = &groups[i];
334
335 id = gr->enum_id;
336 s = &desc->sources[id];
337 s->next_enum_id = gr->enum_ids[0];
338
339 for (k = 1; k < ARRAY_SIZE(gr->enum_ids); k++) {
340 if (gr->enum_ids[k]) {
341 id = gr->enum_ids[k - 1];
342 s = &desc->sources[id];
343 s->next_enum_id = gr->enum_ids[k];
344 }
345 }
346 trace_sh_intc_register("group", gr->enum_id, 0xffff,
347 s->enable_count, s->enable_max);
348 }
349 }
350}
351
352static unsigned int sh_intc_register(MemoryRegion *sysmem,
353 struct intc_desc *desc,
354 const unsigned long address,
355 const char *type,
356 const char *action,
357 const unsigned int index)
358{
359 char name[60];
360 MemoryRegion *iomem, *iomem_p4, *iomem_a7;
361
362 if (!address) {
363 return 0;
364 }
365
366 iomem = &desc->iomem;
367 iomem_p4 = &desc->iomem_aliases[index];
368 iomem_a7 = iomem_p4 + 1;
369
370 snprintf(name, sizeof(name), "intc-%s-%s-%s", type, action, "p4");
371 memory_region_init_alias(iomem_p4, NULL, name, iomem, A7ADDR(address), 4);
372 memory_region_add_subregion(sysmem, P4ADDR(address), iomem_p4);
373
374 snprintf(name, sizeof(name), "intc-%s-%s-%s", type, action, "a7");
375 memory_region_init_alias(iomem_a7, NULL, name, iomem, A7ADDR(address), 4);
376 memory_region_add_subregion(sysmem, A7ADDR(address), iomem_a7);
377
378
379 return 2;
380}
381
382int sh_intc_init(MemoryRegion *sysmem,
383 struct intc_desc *desc,
384 int nr_sources,
385 struct intc_mask_reg *mask_regs,
386 int nr_mask_regs,
387 struct intc_prio_reg *prio_regs,
388 int nr_prio_regs)
389{
390 unsigned int i, j;
391
392 desc->pending = 0;
393 desc->nr_sources = nr_sources;
394 desc->mask_regs = mask_regs;
395 desc->nr_mask_regs = nr_mask_regs;
396 desc->prio_regs = prio_regs;
397 desc->nr_prio_regs = nr_prio_regs;
398
399 desc->iomem_aliases = g_new0(MemoryRegion,
400 (nr_mask_regs + nr_prio_regs) * 4);
401 desc->sources = g_new0(struct intc_source, nr_sources);
402 for (i = 0; i < nr_sources; i++) {
403 desc->sources[i].parent = desc;
404 }
405 desc->irqs = qemu_allocate_irqs(sh_intc_set_irq, desc, nr_sources);
406 memory_region_init_io(&desc->iomem, NULL, &sh_intc_ops, desc, "intc",
407 0x100000000ULL);
408 j = 0;
409 if (desc->mask_regs) {
410 for (i = 0; i < desc->nr_mask_regs; i++) {
411 struct intc_mask_reg *mr = &desc->mask_regs[i];
412
413 j += sh_intc_register(sysmem, desc, mr->set_reg, "mask", "set", j);
414 j += sh_intc_register(sysmem, desc, mr->clr_reg, "mask", "clr", j);
415 }
416 }
417
418 if (desc->prio_regs) {
419 for (i = 0; i < desc->nr_prio_regs; i++) {
420 struct intc_prio_reg *pr = &desc->prio_regs[i];
421
422 j += sh_intc_register(sysmem, desc, pr->set_reg, "prio", "set", j);
423 j += sh_intc_register(sysmem, desc, pr->clr_reg, "prio", "clr", j);
424 }
425 }
426
427 return 0;
428}
429
430
431
432
433
434void sh_intc_set_irl(void *opaque, int n, int level)
435{
436 struct intc_source *s = opaque;
437 int i, irl = level ^ 15;
438 intc_enum id = s->next_enum_id;
439
440 for (i = 0; id; id = s->next_enum_id, i++) {
441 s = &s->parent->sources[id];
442 if (i == irl) {
443 sh_intc_toggle_source(s, s->enable_count ? 0 : 1,
444 s->asserted ? 0 : 1);
445 } else if (s->asserted) {
446 sh_intc_toggle_source(s, 0, -1);
447 }
448 }
449}
450