1
2
3
4
5
6
7
8
9
10
11
12#include <linux/interrupt.h>
13#include <linux/ratelimit.h>
14#include <linux/irq.h>
15#include <linux/sched/isolation.h>
16
17#include "internals.h"
18
19
20static inline bool irq_needs_fixup(struct irq_data *d)
21{
22 const struct cpumask *m = irq_data_get_effective_affinity_mask(d);
23 unsigned int cpu = smp_processor_id();
24
25#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
26
27
28
29
30
31 if (cpumask_empty(m))
32 m = irq_data_get_affinity_mask(d);
33
34
35
36
37
38
39 if (cpumask_any_but(m, cpu) < nr_cpu_ids &&
40 cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) {
41
42
43
44
45 pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n",
46 cpumask_pr_args(m), d->irq, cpu);
47 return true;
48 }
49#endif
50 return cpumask_test_cpu(cpu, m);
51}
52
53static bool migrate_one_irq(struct irq_desc *desc)
54{
55 struct irq_data *d = irq_desc_get_irq_data(desc);
56 struct irq_chip *chip = irq_data_get_irq_chip(d);
57 bool maskchip = !irq_can_move_pcntxt(d) && !irqd_irq_masked(d);
58 const struct cpumask *affinity;
59 bool brokeaff = false;
60 int err;
61
62
63
64
65
66
67 if (!chip || !chip->irq_set_affinity) {
68 pr_debug("IRQ %u: Unable to migrate away\n", d->irq);
69 return false;
70 }
71
72
73
74
75
76
77
78
79
80
81 if (irqd_is_per_cpu(d) || !irqd_is_started(d) || !irq_needs_fixup(d)) {
82
83
84
85
86 irq_fixup_move_pending(desc, false);
87 return false;
88 }
89
90
91
92
93
94
95
96 irq_force_complete_move(desc);
97
98
99
100
101
102
103
104 if (irq_fixup_move_pending(desc, true))
105 affinity = irq_desc_get_pending_mask(desc);
106 else
107 affinity = irq_data_get_affinity_mask(d);
108
109
110 if (maskchip && chip->irq_mask)
111 chip->irq_mask(d);
112
113 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
114
115
116
117
118 if (irqd_affinity_is_managed(d)) {
119 irqd_set_managed_shutdown(d);
120 irq_shutdown_and_deactivate(desc);
121 return false;
122 }
123 affinity = cpu_online_mask;
124 brokeaff = true;
125 }
126
127
128
129
130
131
132 err = irq_do_set_affinity(d, affinity, false);
133 if (err) {
134 pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
135 d->irq, err);
136 brokeaff = false;
137 }
138
139 if (maskchip && chip->irq_unmask)
140 chip->irq_unmask(d);
141
142 return brokeaff;
143}
144
145
146
147
148
149
150
151
152
153
154
155void irq_migrate_all_off_this_cpu(void)
156{
157 struct irq_desc *desc;
158 unsigned int irq;
159
160 for_each_active_irq(irq) {
161 bool affinity_broken;
162
163 desc = irq_to_desc(irq);
164 raw_spin_lock(&desc->lock);
165 affinity_broken = migrate_one_irq(desc);
166 raw_spin_unlock(&desc->lock);
167
168 if (affinity_broken) {
169 pr_warn_ratelimited("IRQ %u: no longer affine to CPU%u\n",
170 irq, smp_processor_id());
171 }
172 }
173}
174
175static bool hk_should_isolate(struct irq_data *data, unsigned int cpu)
176{
177 const struct cpumask *hk_mask;
178
179 if (!housekeeping_enabled(HK_FLAG_MANAGED_IRQ))
180 return false;
181
182 hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ);
183 if (cpumask_subset(irq_data_get_effective_affinity_mask(data), hk_mask))
184 return false;
185
186 return cpumask_test_cpu(cpu, hk_mask);
187}
188
189static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
190{
191 struct irq_data *data = irq_desc_get_irq_data(desc);
192 const struct cpumask *affinity = irq_data_get_affinity_mask(data);
193
194 if (!irqd_affinity_is_managed(data) || !desc->action ||
195 !irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity))
196 return;
197
198 if (irqd_is_managed_and_shutdown(data)) {
199 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
200 return;
201 }
202
203
204
205
206
207
208
209
210 if (!irqd_is_single_target(data) || hk_should_isolate(data, cpu))
211 irq_set_affinity_locked(data, affinity, false);
212}
213
214
215
216
217
218int irq_affinity_online_cpu(unsigned int cpu)
219{
220 struct irq_desc *desc;
221 unsigned int irq;
222
223 irq_lock_sparse();
224 for_each_active_irq(irq) {
225 desc = irq_to_desc(irq);
226 raw_spin_lock_irq(&desc->lock);
227 irq_restore_affinity_of_irq(desc, cpu);
228 raw_spin_unlock_irq(&desc->lock);
229 }
230 irq_unlock_sparse();
231
232 return 0;
233}
234