1
2
3
4
5
6
7
8#include <linux/err.h>
9#include <linux/export.h>
10#include <linux/init.h>
11#include <linux/io.h>
12#include <linux/slab.h>
13#include <linux/syscore_ops.h>
14#include <linux/irqdomain.h>
15#include <linux/irqchip.h>
16#include <linux/irqchip/chained_irq.h>
17#include <linux/interrupt.h>
18#include <linux/of_address.h>
19#include <linux/of_irq.h>
20
21#define COMBINER_ENABLE_SET 0x0
22#define COMBINER_ENABLE_CLEAR 0x4
23#define COMBINER_INT_STATUS 0xC
24
25#define IRQ_IN_COMBINER 8
26
27static DEFINE_SPINLOCK(irq_controller_lock);
28
29struct combiner_chip_data {
30 unsigned int hwirq_offset;
31 unsigned int irq_mask;
32 void __iomem *base;
33 unsigned int parent_irq;
34#ifdef CONFIG_PM
35 u32 pm_save;
36#endif
37};
38
39static struct combiner_chip_data *combiner_data;
40static struct irq_domain *combiner_irq_domain;
41static unsigned int max_nr = 20;
42
43static inline void __iomem *combiner_base(struct irq_data *data)
44{
45 struct combiner_chip_data *combiner_data =
46 irq_data_get_irq_chip_data(data);
47
48 return combiner_data->base;
49}
50
51static void combiner_mask_irq(struct irq_data *data)
52{
53 u32 mask = 1 << (data->hwirq % 32);
54
55 writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
56}
57
58static void combiner_unmask_irq(struct irq_data *data)
59{
60 u32 mask = 1 << (data->hwirq % 32);
61
62 writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_SET);
63}
64
65static void combiner_handle_cascade_irq(struct irq_desc *desc)
66{
67 struct combiner_chip_data *chip_data = irq_desc_get_handler_data(desc);
68 struct irq_chip *chip = irq_desc_get_chip(desc);
69 unsigned int cascade_irq, combiner_irq;
70 unsigned long status;
71
72 chained_irq_enter(chip, desc);
73
74 spin_lock(&irq_controller_lock);
75 status = readl_relaxed(chip_data->base + COMBINER_INT_STATUS);
76 spin_unlock(&irq_controller_lock);
77 status &= chip_data->irq_mask;
78
79 if (status == 0)
80 goto out;
81
82 combiner_irq = chip_data->hwirq_offset + __ffs(status);
83 cascade_irq = irq_find_mapping(combiner_irq_domain, combiner_irq);
84
85 if (unlikely(!cascade_irq))
86 handle_bad_irq(desc);
87 else
88 generic_handle_irq(cascade_irq);
89
90 out:
91 chained_irq_exit(chip, desc);
92}
93
94#ifdef CONFIG_SMP
95static int combiner_set_affinity(struct irq_data *d,
96 const struct cpumask *mask_val, bool force)
97{
98 struct combiner_chip_data *chip_data = irq_data_get_irq_chip_data(d);
99 struct irq_chip *chip = irq_get_chip(chip_data->parent_irq);
100 struct irq_data *data = irq_get_irq_data(chip_data->parent_irq);
101
102 if (chip && chip->irq_set_affinity)
103 return chip->irq_set_affinity(data, mask_val, force);
104 else
105 return -EINVAL;
106}
107#endif
108
109static struct irq_chip combiner_chip = {
110 .name = "COMBINER",
111 .irq_mask = combiner_mask_irq,
112 .irq_unmask = combiner_unmask_irq,
113#ifdef CONFIG_SMP
114 .irq_set_affinity = combiner_set_affinity,
115#endif
116};
117
118static void __init combiner_cascade_irq(struct combiner_chip_data *combiner_data,
119 unsigned int irq)
120{
121 irq_set_chained_handler_and_data(irq, combiner_handle_cascade_irq,
122 combiner_data);
123}
124
125static void __init combiner_init_one(struct combiner_chip_data *combiner_data,
126 unsigned int combiner_nr,
127 void __iomem *base, unsigned int irq)
128{
129 combiner_data->base = base;
130 combiner_data->hwirq_offset = (combiner_nr & ~3) * IRQ_IN_COMBINER;
131 combiner_data->irq_mask = 0xff << ((combiner_nr % 4) << 3);
132 combiner_data->parent_irq = irq;
133
134
135 writel_relaxed(combiner_data->irq_mask, base + COMBINER_ENABLE_CLEAR);
136}
137
138static int combiner_irq_domain_xlate(struct irq_domain *d,
139 struct device_node *controller,
140 const u32 *intspec, unsigned int intsize,
141 unsigned long *out_hwirq,
142 unsigned int *out_type)
143{
144 if (irq_domain_get_of_node(d) != controller)
145 return -EINVAL;
146
147 if (intsize < 2)
148 return -EINVAL;
149
150 *out_hwirq = intspec[0] * IRQ_IN_COMBINER + intspec[1];
151 *out_type = 0;
152
153 return 0;
154}
155
156static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
157 irq_hw_number_t hw)
158{
159 struct combiner_chip_data *combiner_data = d->host_data;
160
161 irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
162 irq_set_chip_data(irq, &combiner_data[hw >> 3]);
163 irq_set_probe(irq);
164
165 return 0;
166}
167
168static const struct irq_domain_ops combiner_irq_domain_ops = {
169 .xlate = combiner_irq_domain_xlate,
170 .map = combiner_irq_domain_map,
171};
172
173static void __init combiner_init(void __iomem *combiner_base,
174 struct device_node *np)
175{
176 int i, irq;
177 unsigned int nr_irq;
178
179 nr_irq = max_nr * IRQ_IN_COMBINER;
180
181 combiner_data = kcalloc(max_nr, sizeof (*combiner_data), GFP_KERNEL);
182 if (!combiner_data) {
183 pr_warn("%s: could not allocate combiner data\n", __func__);
184 return;
185 }
186
187 combiner_irq_domain = irq_domain_add_linear(np, nr_irq,
188 &combiner_irq_domain_ops, combiner_data);
189 if (WARN_ON(!combiner_irq_domain)) {
190 pr_warn("%s: irq domain init failed\n", __func__);
191 return;
192 }
193
194 for (i = 0; i < max_nr; i++) {
195 irq = irq_of_parse_and_map(np, i);
196
197 combiner_init_one(&combiner_data[i], i,
198 combiner_base + (i >> 2) * 0x10, irq);
199 combiner_cascade_irq(&combiner_data[i], irq);
200 }
201}
202
203#ifdef CONFIG_PM
204
205
206
207
208
209
210
211
212static int combiner_suspend(void)
213{
214 int i;
215
216 for (i = 0; i < max_nr; i++)
217 combiner_data[i].pm_save =
218 readl_relaxed(combiner_data[i].base + COMBINER_ENABLE_SET);
219
220 return 0;
221}
222
223
224
225
226
227
228
229
230static void combiner_resume(void)
231{
232 int i;
233
234 for (i = 0; i < max_nr; i++) {
235 writel_relaxed(combiner_data[i].irq_mask,
236 combiner_data[i].base + COMBINER_ENABLE_CLEAR);
237 writel_relaxed(combiner_data[i].pm_save,
238 combiner_data[i].base + COMBINER_ENABLE_SET);
239 }
240}
241
242#else
243#define combiner_suspend NULL
244#define combiner_resume NULL
245#endif
246
247static struct syscore_ops combiner_syscore_ops = {
248 .suspend = combiner_suspend,
249 .resume = combiner_resume,
250};
251
252static int __init combiner_of_init(struct device_node *np,
253 struct device_node *parent)
254{
255 void __iomem *combiner_base;
256
257 combiner_base = of_iomap(np, 0);
258 if (!combiner_base) {
259 pr_err("%s: failed to map combiner registers\n", __func__);
260 return -ENXIO;
261 }
262
263 if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
264 pr_info("%s: number of combiners not specified, "
265 "setting default as %d.\n",
266 __func__, max_nr);
267 }
268
269 combiner_init(combiner_base, np);
270
271 register_syscore_ops(&combiner_syscore_ops);
272
273 return 0;
274}
275IRQCHIP_DECLARE(exynos4210_combiner, "samsung,exynos4210-combiner",
276 combiner_of_init);
277