1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/device.h>
15#include <linux/interrupt.h>
16#include <linux/irq.h>
17#include <linux/slab.h>
18#include <linux/pm_runtime.h>
19#include <linux/pm_wakeirq.h>
20
21#include "power.h"
22
23
24
25
26
27
28
29
30
31
32static int dev_pm_attach_wake_irq(struct device *dev, int irq,
33 struct wake_irq *wirq)
34{
35 unsigned long flags;
36
37 if (!dev || !wirq)
38 return -EINVAL;
39
40 spin_lock_irqsave(&dev->power.lock, flags);
41 if (dev_WARN_ONCE(dev, dev->power.wakeirq,
42 "wake irq already initialized\n")) {
43 spin_unlock_irqrestore(&dev->power.lock, flags);
44 return -EEXIST;
45 }
46
47 dev->power.wakeirq = wirq;
48 device_wakeup_attach_irq(dev, wirq);
49
50 spin_unlock_irqrestore(&dev->power.lock, flags);
51 return 0;
52}
53
54
55
56
57
58
59
60
61
62
63
64int dev_pm_set_wake_irq(struct device *dev, int irq)
65{
66 struct wake_irq *wirq;
67 int err;
68
69 if (irq < 0)
70 return -EINVAL;
71
72 wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
73 if (!wirq)
74 return -ENOMEM;
75
76 wirq->dev = dev;
77 wirq->irq = irq;
78
79 err = dev_pm_attach_wake_irq(dev, irq, wirq);
80 if (err)
81 kfree(wirq);
82
83 return err;
84}
85EXPORT_SYMBOL_GPL(dev_pm_set_wake_irq);
86
87
88
89
90
91
92
93
94
95
96
97
98void dev_pm_clear_wake_irq(struct device *dev)
99{
100 struct wake_irq *wirq = dev->power.wakeirq;
101 unsigned long flags;
102
103 if (!wirq)
104 return;
105
106 spin_lock_irqsave(&dev->power.lock, flags);
107 device_wakeup_detach_irq(dev);
108 dev->power.wakeirq = NULL;
109 spin_unlock_irqrestore(&dev->power.lock, flags);
110
111 if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) {
112 free_irq(wirq->irq, wirq);
113 wirq->status &= ~WAKE_IRQ_DEDICATED_MASK;
114 }
115 kfree(wirq->name);
116 kfree(wirq);
117}
118EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
139{
140 struct wake_irq *wirq = _wirq;
141 int res;
142
143
144 if (irqd_is_wakeup_set(irq_get_irq_data(irq))) {
145 pm_wakeup_event(wirq->dev, 0);
146
147 return IRQ_HANDLED;
148 }
149
150
151 res = pm_runtime_resume(wirq->dev);
152 if (res < 0)
153 dev_warn(wirq->dev,
154 "wake IRQ with no resume: %i\n", res);
155
156 return IRQ_HANDLED;
157}
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
177{
178 struct wake_irq *wirq;
179 int err;
180
181 if (irq < 0)
182 return -EINVAL;
183
184 wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
185 if (!wirq)
186 return -ENOMEM;
187
188 wirq->name = kasprintf(GFP_KERNEL, "%s:wakeup", dev_name(dev));
189 if (!wirq->name) {
190 err = -ENOMEM;
191 goto err_free;
192 }
193
194 wirq->dev = dev;
195 wirq->irq = irq;
196 irq_set_status_flags(irq, IRQ_NOAUTOEN);
197
198
199 irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
200
201
202
203
204
205 err = request_threaded_irq(irq, NULL, handle_threaded_wake_irq,
206 IRQF_ONESHOT, wirq->name, wirq);
207 if (err)
208 goto err_free_name;
209
210 err = dev_pm_attach_wake_irq(dev, irq, wirq);
211 if (err)
212 goto err_free_irq;
213
214 wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED;
215
216 return err;
217
218err_free_irq:
219 free_irq(irq, wirq);
220err_free_name:
221 kfree(wirq->name);
222err_free:
223 kfree(wirq);
224
225 return err;
226}
227EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
228
229
230
231
232
233
234
235
236
237
238
239
240
241void dev_pm_enable_wake_irq(struct device *dev)
242{
243 struct wake_irq *wirq = dev->power.wakeirq;
244
245 if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
246 enable_irq(wirq->irq);
247}
248EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq);
249
250
251
252
253
254
255
256
257
258void dev_pm_disable_wake_irq(struct device *dev)
259{
260 struct wake_irq *wirq = dev->power.wakeirq;
261
262 if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
263 disable_irq_nosync(wirq->irq);
264}
265EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq);
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281void dev_pm_enable_wake_irq_check(struct device *dev,
282 bool can_change_status)
283{
284 struct wake_irq *wirq = dev->power.wakeirq;
285
286 if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK)))
287 return;
288
289 if (likely(wirq->status & WAKE_IRQ_DEDICATED_MANAGED)) {
290 goto enable;
291 } else if (can_change_status) {
292 wirq->status |= WAKE_IRQ_DEDICATED_MANAGED;
293 goto enable;
294 }
295
296 return;
297
298enable:
299 enable_irq(wirq->irq);
300}
301
302
303
304
305
306
307
308
309void dev_pm_disable_wake_irq_check(struct device *dev)
310{
311 struct wake_irq *wirq = dev->power.wakeirq;
312
313 if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK)))
314 return;
315
316 if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED)
317 disable_irq_nosync(wirq->irq);
318}
319
320
321
322
323
324
325
326
327void dev_pm_arm_wake_irq(struct wake_irq *wirq)
328{
329 if (!wirq)
330 return;
331
332 if (device_may_wakeup(wirq->dev)) {
333 if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
334 !pm_runtime_status_suspended(wirq->dev))
335 enable_irq(wirq->irq);
336
337 enable_irq_wake(wirq->irq);
338 }
339}
340
341
342
343
344
345
346
347
348void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
349{
350 if (!wirq)
351 return;
352
353 if (device_may_wakeup(wirq->dev)) {
354 disable_irq_wake(wirq->irq);
355
356 if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
357 !pm_runtime_status_suspended(wirq->dev))
358 disable_irq_nosync(wirq->irq);
359 }
360}
361