1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/init.h>
26#include <linux/kernel.h>
27#include <linux/list.h>
28#include <linux/smp.h>
29#include <linux/cpumask.h>
30#include <linux/io.h>
31
32#include <asm/irq.h>
33#include <asm/mach/irq.h>
34#include <asm/hardware/gic.h>
35
36static DEFINE_SPINLOCK(irq_controller_lock);
37
38struct gic_chip_data {
39 unsigned int irq_offset;
40 void __iomem *dist_base;
41 void __iomem *cpu_base;
42};
43
44#ifndef MAX_GIC_NR
45#define MAX_GIC_NR 1
46#endif
47
48static struct gic_chip_data gic_data[MAX_GIC_NR];
49
50static inline void __iomem *gic_dist_base(unsigned int irq)
51{
52 struct gic_chip_data *gic_data = get_irq_chip_data(irq);
53 return gic_data->dist_base;
54}
55
56static inline void __iomem *gic_cpu_base(unsigned int irq)
57{
58 struct gic_chip_data *gic_data = get_irq_chip_data(irq);
59 return gic_data->cpu_base;
60}
61
62static inline unsigned int gic_irq(unsigned int irq)
63{
64 struct gic_chip_data *gic_data = get_irq_chip_data(irq);
65 return irq - gic_data->irq_offset;
66}
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83static void gic_ack_irq(unsigned int irq)
84{
85 u32 mask = 1 << (irq % 32);
86
87 spin_lock(&irq_controller_lock);
88 writel(mask, gic_dist_base(irq) + GIC_DIST_ENABLE_CLEAR + (gic_irq(irq) / 32) * 4);
89 writel(gic_irq(irq), gic_cpu_base(irq) + GIC_CPU_EOI);
90 spin_unlock(&irq_controller_lock);
91}
92
93static void gic_mask_irq(unsigned int irq)
94{
95 u32 mask = 1 << (irq % 32);
96
97 spin_lock(&irq_controller_lock);
98 writel(mask, gic_dist_base(irq) + GIC_DIST_ENABLE_CLEAR + (gic_irq(irq) / 32) * 4);
99 spin_unlock(&irq_controller_lock);
100}
101
102static void gic_unmask_irq(unsigned int irq)
103{
104 u32 mask = 1 << (irq % 32);
105
106 spin_lock(&irq_controller_lock);
107 writel(mask, gic_dist_base(irq) + GIC_DIST_ENABLE_SET + (gic_irq(irq) / 32) * 4);
108 spin_unlock(&irq_controller_lock);
109}
110
111#ifdef CONFIG_SMP
112static int gic_set_cpu(unsigned int irq, const struct cpumask *mask_val)
113{
114 void __iomem *reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3);
115 unsigned int shift = (irq % 4) * 8;
116 unsigned int cpu = cpumask_first(mask_val);
117 u32 val;
118
119 spin_lock(&irq_controller_lock);
120 irq_desc[irq].node = cpu;
121 val = readl(reg) & ~(0xff << shift);
122 val |= 1 << (cpu + shift);
123 writel(val, reg);
124 spin_unlock(&irq_controller_lock);
125
126 return 0;
127}
128#endif
129
130static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
131{
132 struct gic_chip_data *chip_data = get_irq_data(irq);
133 struct irq_chip *chip = get_irq_chip(irq);
134 unsigned int cascade_irq, gic_irq;
135 unsigned long status;
136
137
138 chip->ack(irq);
139
140 spin_lock(&irq_controller_lock);
141 status = readl(chip_data->cpu_base + GIC_CPU_INTACK);
142 spin_unlock(&irq_controller_lock);
143
144 gic_irq = (status & 0x3ff);
145 if (gic_irq == 1023)
146 goto out;
147
148 cascade_irq = gic_irq + chip_data->irq_offset;
149 if (unlikely(gic_irq < 32 || gic_irq > 1020 || cascade_irq >= NR_IRQS))
150 do_bad_IRQ(cascade_irq, desc);
151 else
152 generic_handle_irq(cascade_irq);
153
154 out:
155
156 chip->unmask(irq);
157}
158
159static struct irq_chip gic_chip = {
160 .name = "GIC",
161 .ack = gic_ack_irq,
162 .mask = gic_mask_irq,
163 .unmask = gic_unmask_irq,
164#ifdef CONFIG_SMP
165 .set_affinity = gic_set_cpu,
166#endif
167};
168
169void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
170{
171 if (gic_nr >= MAX_GIC_NR)
172 BUG();
173 if (set_irq_data(irq, &gic_data[gic_nr]) != 0)
174 BUG();
175 set_irq_chained_handler(irq, gic_handle_cascade_irq);
176}
177
178void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
179 unsigned int irq_start)
180{
181 unsigned int max_irq, i;
182 u32 cpumask = 1 << smp_processor_id();
183
184 if (gic_nr >= MAX_GIC_NR)
185 BUG();
186
187 cpumask |= cpumask << 8;
188 cpumask |= cpumask << 16;
189
190 gic_data[gic_nr].dist_base = base;
191 gic_data[gic_nr].irq_offset = (irq_start - 1) & ~31;
192
193 writel(0, base + GIC_DIST_CTRL);
194
195
196
197
198 max_irq = readl(base + GIC_DIST_CTR) & 0x1f;
199 max_irq = (max_irq + 1) * 32;
200
201
202
203
204
205
206 if (max_irq > max(1020, NR_IRQS))
207 max_irq = max(1020, NR_IRQS);
208
209
210
211
212 for (i = 32; i < max_irq; i += 16)
213 writel(0, base + GIC_DIST_CONFIG + i * 4 / 16);
214
215
216
217
218 for (i = 32; i < max_irq; i += 4)
219 writel(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
220
221
222
223
224 for (i = 0; i < max_irq; i += 4)
225 writel(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
226
227
228
229
230 for (i = 0; i < max_irq; i += 32)
231 writel(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
232
233
234
235
236 for (i = irq_start; i < gic_data[gic_nr].irq_offset + max_irq; i++) {
237 set_irq_chip(i, &gic_chip);
238 set_irq_chip_data(i, &gic_data[gic_nr]);
239 set_irq_handler(i, handle_level_irq);
240 set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
241 }
242
243 writel(1, base + GIC_DIST_CTRL);
244}
245
246void __cpuinit gic_cpu_init(unsigned int gic_nr, void __iomem *base)
247{
248 if (gic_nr >= MAX_GIC_NR)
249 BUG();
250
251 gic_data[gic_nr].cpu_base = base;
252
253 writel(0xf0, base + GIC_CPU_PRIMASK);
254 writel(1, base + GIC_CPU_CTRL);
255}
256
257#ifdef CONFIG_SMP
258void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
259{
260 unsigned long map = *cpus_addr(*mask);
261
262
263 writel(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT);
264}
265#endif
266