1
2
3
4
5
6
7
8
9
10
11#include <linux/interrupt.h>
12#include <linux/ratelimit.h>
13#include <linux/irq.h>
14
15#include "internals.h"
16
17
18static inline bool irq_needs_fixup(struct irq_data *d)
19{
20 const struct cpumask *m = irq_data_get_effective_affinity_mask(d);
21 unsigned int cpu = smp_processor_id();
22
23#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
24
25
26
27
28
29 if (cpumask_empty(m))
30 m = irq_data_get_affinity_mask(d);
31
32
33
34
35
36
37 if (cpumask_any_but(m, cpu) < nr_cpu_ids &&
38 cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) {
39
40
41
42
43 pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n",
44 cpumask_pr_args(m), d->irq, cpu);
45 return true;
46 }
47#endif
48 return cpumask_test_cpu(cpu, m);
49}
50
51static bool migrate_one_irq(struct irq_desc *desc)
52{
53 struct irq_data *d = irq_desc_get_irq_data(desc);
54 struct irq_chip *chip = irq_data_get_irq_chip(d);
55 bool maskchip = !irq_can_move_pcntxt(d) && !irqd_irq_masked(d);
56 const struct cpumask *affinity;
57 bool brokeaff = false;
58 int err;
59
60
61
62
63
64
65 if (!chip || !chip->irq_set_affinity) {
66 pr_debug("IRQ %u: Unable to migrate away\n", d->irq);
67 return false;
68 }
69
70
71
72
73
74
75
76
77
78
79 if (irqd_is_per_cpu(d) || !irqd_is_started(d) || !irq_needs_fixup(d)) {
80
81
82
83
84 irq_fixup_move_pending(desc, false);
85 return false;
86 }
87
88
89
90
91
92
93
94 irq_force_complete_move(desc);
95
96
97
98
99
100
101
102 if (irq_fixup_move_pending(desc, true))
103 affinity = irq_desc_get_pending_mask(desc);
104 else
105 affinity = irq_data_get_affinity_mask(d);
106
107
108 if (maskchip && chip->irq_mask)
109 chip->irq_mask(d);
110
111 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
112
113
114
115
116 if (irqd_affinity_is_managed(d)) {
117 irqd_set_managed_shutdown(d);
118 irq_shutdown(desc);
119 return false;
120 }
121 affinity = cpu_online_mask;
122 brokeaff = true;
123 }
124
125
126
127
128
129
130 err = irq_do_set_affinity(d, affinity, false);
131 if (err) {
132 pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
133 d->irq, err);
134 brokeaff = false;
135 }
136
137 if (maskchip && chip->irq_unmask)
138 chip->irq_unmask(d);
139
140 return brokeaff;
141}
142
143
144
145
146
147
148
149
150
151
152
153void irq_migrate_all_off_this_cpu(void)
154{
155 struct irq_desc *desc;
156 unsigned int irq;
157
158 for_each_active_irq(irq) {
159 bool affinity_broken;
160
161 desc = irq_to_desc(irq);
162 raw_spin_lock(&desc->lock);
163 affinity_broken = migrate_one_irq(desc);
164 raw_spin_unlock(&desc->lock);
165
166 if (affinity_broken) {
167 pr_warn_ratelimited("IRQ %u: no longer affine to CPU%u\n",
168 irq, smp_processor_id());
169 }
170 }
171}
172
173static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
174{
175 struct irq_data *data = irq_desc_get_irq_data(desc);
176 const struct cpumask *affinity = irq_data_get_affinity_mask(data);
177
178 if (!irqd_affinity_is_managed(data) || !desc->action ||
179 !irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity))
180 return;
181
182 if (irqd_is_managed_and_shutdown(data)) {
183 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
184 return;
185 }
186
187
188
189
190
191
192 if (!irqd_is_single_target(data))
193 irq_set_affinity_locked(data, affinity, false);
194}
195
196
197
198
199
200int irq_affinity_online_cpu(unsigned int cpu)
201{
202 struct irq_desc *desc;
203 unsigned int irq;
204
205 irq_lock_sparse();
206 for_each_active_irq(irq) {
207 desc = irq_to_desc(irq);
208 raw_spin_lock_irq(&desc->lock);
209 irq_restore_affinity_of_irq(desc, cpu);
210 raw_spin_unlock_irq(&desc->lock);
211 }
212 irq_unlock_sparse();
213
214 return 0;
215}
216