1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <asm/delay.h>
20#include <asm/uaccess.h>
21#include <linux/module.h>
22#include <linux/seq_file.h>
23#include <linux/interrupt.h>
24#include <linux/kernel_stat.h>
25
26#include <asm/mca.h>
27
28
29
30
31
32void ack_bad_irq(unsigned int irq)
33{
34 printk(KERN_ERR "Unexpected irq vector 0x%x on CPU %u!\n", irq, smp_processor_id());
35}
36
37#ifdef CONFIG_IA64_GENERIC
38ia64_vector __ia64_irq_to_vector(int irq)
39{
40 return irq_cfg[irq].vector;
41}
42
43unsigned int __ia64_local_vector_to_irq (ia64_vector vec)
44{
45 return __get_cpu_var(vector_irq)[vec];
46}
47#endif
48
49
50
51
52
53atomic_t irq_err_count;
54
55
56
57
58int arch_show_interrupts(struct seq_file *p, int prec)
59{
60 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
61 return 0;
62}
63
64#ifdef CONFIG_SMP
65static char irq_redir [NR_IRQS];
66
67void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
68{
69 if (irq < NR_IRQS) {
70 cpumask_copy(irq_get_irq_data(irq)->affinity,
71 cpumask_of(cpu_logical_id(hwid)));
72 irq_redir[irq] = (char) (redir & 0xff);
73 }
74}
75
76bool is_affinity_mask_valid(const struct cpumask *cpumask)
77{
78 if (ia64_platform_is("sn2")) {
79
80 if (cpumask_weight(cpumask) != 1)
81 return false;
82 }
83 return true;
84}
85
86#endif
87
88int __init arch_early_irq_init(void)
89{
90 ia64_mca_irq_init();
91 return 0;
92}
93
94#ifdef CONFIG_HOTPLUG_CPU
95unsigned int vectors_in_migration[NR_IRQS];
96
97
98
99
100
101static void migrate_irqs(void)
102{
103 int irq, new_cpu;
104
105 for (irq=0; irq < NR_IRQS; irq++) {
106 struct irq_desc *desc = irq_to_desc(irq);
107 struct irq_data *data = irq_desc_get_irq_data(desc);
108 struct irq_chip *chip = irq_data_get_irq_chip(data);
109
110 if (irqd_irq_disabled(data))
111 continue;
112
113
114
115
116
117
118
119 if (irqd_is_per_cpu(data))
120 continue;
121
122 if (cpumask_any_and(data->affinity, cpu_online_mask)
123 >= nr_cpu_ids) {
124
125
126
127 vectors_in_migration[irq] = irq;
128
129 new_cpu = cpumask_any(cpu_online_mask);
130
131
132
133
134 if (chip && chip->irq_disable &&
135 chip->irq_enable && chip->irq_set_affinity) {
136 chip->irq_disable(data);
137 chip->irq_set_affinity(data,
138 cpumask_of(new_cpu), false);
139 chip->irq_enable(data);
140 } else {
141 WARN_ON((!chip || !chip->irq_disable ||
142 !chip->irq_enable ||
143 !chip->irq_set_affinity));
144 }
145 }
146 }
147}
148
149void fixup_irqs(void)
150{
151 unsigned int irq;
152 extern void ia64_process_pending_intr(void);
153 extern volatile int time_keeper_id;
154
155
156 ia64_set_itv(1 << 16);
157
158
159
160
161 if (smp_processor_id() == time_keeper_id) {
162 time_keeper_id = cpumask_first(cpu_online_mask);
163 printk ("CPU %d is now promoted to time-keeper master\n", time_keeper_id);
164 }
165
166
167
168
169
170 migrate_irqs();
171
172
173
174
175
176 ia64_process_pending_intr();
177
178
179
180
181
182
183 for (irq=0; irq < NR_IRQS; irq++) {
184 if (vectors_in_migration[irq]) {
185 struct pt_regs *old_regs = set_irq_regs(NULL);
186
187 vectors_in_migration[irq]=0;
188 generic_handle_irq(irq);
189 set_irq_regs(old_regs);
190 }
191 }
192
193
194
195
196
197
198
199 max_xtp();
200 local_irq_disable();
201}
202#endif
203