1
2
3#include <linux/device.h>
4#include <linux/interrupt.h>
5#include <linux/irq.h>
6#include <linux/slab.h>
7#include <linux/pm_runtime.h>
8#include <linux/pm_wakeirq.h>
9
10#include "power.h"
11
12
13
14
15
16
17
18
19static int dev_pm_attach_wake_irq(struct device *dev, struct wake_irq *wirq)
20{
21 unsigned long flags;
22
23 if (!dev || !wirq)
24 return -EINVAL;
25
26 spin_lock_irqsave(&dev->power.lock, flags);
27 if (dev_WARN_ONCE(dev, dev->power.wakeirq,
28 "wake irq already initialized\n")) {
29 spin_unlock_irqrestore(&dev->power.lock, flags);
30 return -EEXIST;
31 }
32
33 dev->power.wakeirq = wirq;
34 device_wakeup_attach_irq(dev, wirq);
35
36 spin_unlock_irqrestore(&dev->power.lock, flags);
37 return 0;
38}
39
40
41
42
43
44
45
46
47
48
49
50int dev_pm_set_wake_irq(struct device *dev, int irq)
51{
52 struct wake_irq *wirq;
53 int err;
54
55 if (irq < 0)
56 return -EINVAL;
57
58 wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
59 if (!wirq)
60 return -ENOMEM;
61
62 wirq->dev = dev;
63 wirq->irq = irq;
64
65 err = dev_pm_attach_wake_irq(dev, wirq);
66 if (err)
67 kfree(wirq);
68
69 return err;
70}
71EXPORT_SYMBOL_GPL(dev_pm_set_wake_irq);
72
73
74
75
76
77
78
79
80
81
82
83
84void dev_pm_clear_wake_irq(struct device *dev)
85{
86 struct wake_irq *wirq = dev->power.wakeirq;
87 unsigned long flags;
88
89 if (!wirq)
90 return;
91
92 spin_lock_irqsave(&dev->power.lock, flags);
93 device_wakeup_detach_irq(dev);
94 dev->power.wakeirq = NULL;
95 spin_unlock_irqrestore(&dev->power.lock, flags);
96
97 if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) {
98 free_irq(wirq->irq, wirq);
99 wirq->status &= ~WAKE_IRQ_DEDICATED_MASK;
100 }
101 kfree(wirq->name);
102 kfree(wirq);
103}
104EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
125{
126 struct wake_irq *wirq = _wirq;
127 int res;
128
129
130 if (irqd_is_wakeup_set(irq_get_irq_data(irq))) {
131 pm_wakeup_event(wirq->dev, 0);
132
133 return IRQ_HANDLED;
134 }
135
136
137 res = pm_runtime_resume(wirq->dev);
138 if (res < 0)
139 dev_warn(wirq->dev,
140 "wake IRQ with no resume: %i\n", res);
141
142 return IRQ_HANDLED;
143}
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
163{
164 struct wake_irq *wirq;
165 int err;
166
167 if (irq < 0)
168 return -EINVAL;
169
170 wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
171 if (!wirq)
172 return -ENOMEM;
173
174 wirq->name = kasprintf(GFP_KERNEL, "%s:wakeup", dev_name(dev));
175 if (!wirq->name) {
176 err = -ENOMEM;
177 goto err_free;
178 }
179
180 wirq->dev = dev;
181 wirq->irq = irq;
182
183
184 irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
185
186
187
188
189
190 err = request_threaded_irq(irq, NULL, handle_threaded_wake_irq,
191 IRQF_ONESHOT | IRQF_NO_AUTOEN,
192 wirq->name, wirq);
193 if (err)
194 goto err_free_name;
195
196 err = dev_pm_attach_wake_irq(dev, wirq);
197 if (err)
198 goto err_free_irq;
199
200 wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED;
201
202 return err;
203
204err_free_irq:
205 free_irq(irq, wirq);
206err_free_name:
207 kfree(wirq->name);
208err_free:
209 kfree(wirq);
210
211 return err;
212}
213EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
214
215
216
217
218
219
220
221
222
223
224
225
226
227void dev_pm_enable_wake_irq(struct device *dev)
228{
229 struct wake_irq *wirq = dev->power.wakeirq;
230
231 if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
232 enable_irq(wirq->irq);
233}
234EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq);
235
236
237
238
239
240
241
242
243
244void dev_pm_disable_wake_irq(struct device *dev)
245{
246 struct wake_irq *wirq = dev->power.wakeirq;
247
248 if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
249 disable_irq_nosync(wirq->irq);
250}
251EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq);
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267void dev_pm_enable_wake_irq_check(struct device *dev,
268 bool can_change_status)
269{
270 struct wake_irq *wirq = dev->power.wakeirq;
271
272 if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
273 return;
274
275 if (likely(wirq->status & WAKE_IRQ_DEDICATED_MANAGED)) {
276 goto enable;
277 } else if (can_change_status) {
278 wirq->status |= WAKE_IRQ_DEDICATED_MANAGED;
279 goto enable;
280 }
281
282 return;
283
284enable:
285 enable_irq(wirq->irq);
286}
287
288
289
290
291
292
293
294
295void dev_pm_disable_wake_irq_check(struct device *dev)
296{
297 struct wake_irq *wirq = dev->power.wakeirq;
298
299 if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
300 return;
301
302 if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED)
303 disable_irq_nosync(wirq->irq);
304}
305
306
307
308
309
310
311
312
313void dev_pm_arm_wake_irq(struct wake_irq *wirq)
314{
315 if (!wirq)
316 return;
317
318 if (device_may_wakeup(wirq->dev)) {
319 if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
320 !pm_runtime_status_suspended(wirq->dev))
321 enable_irq(wirq->irq);
322
323 enable_irq_wake(wirq->irq);
324 }
325}
326
327
328
329
330
331
332
333
334void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
335{
336 if (!wirq)
337 return;
338
339 if (device_may_wakeup(wirq->dev)) {
340 disable_irq_wake(wirq->irq);
341
342 if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
343 !pm_runtime_status_suspended(wirq->dev))
344 disable_irq_nosync(wirq->irq);
345 }
346}
347