1
2
3
4
5
6
7
8
9
10#include <linux/device.h>
11#include <linux/err.h>
12#include <linux/interrupt.h>
13#include <linux/kernel.h>
14#include <linux/reboot.h>
15#include <linux/regmap.h>
16#include <linux/slab.h>
17#include <linux/spinlock.h>
18#include <linux/regulator/driver.h>
19
20#include "internal.h"
21
22#define REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS 10000
23
24struct regulator_irq {
25 struct regulator_irq_data rdata;
26 struct regulator_irq_desc desc;
27 int irq;
28 int retry_cnt;
29 struct delayed_work isr_work;
30};
31
32
33
34
35static void rdev_flag_err(struct regulator_dev *rdev, int err)
36{
37 spin_lock(&rdev->err_lock);
38 rdev->cached_err |= err;
39 spin_unlock(&rdev->err_lock);
40}
41
42static void rdev_clear_err(struct regulator_dev *rdev, int err)
43{
44 spin_lock(&rdev->err_lock);
45 rdev->cached_err &= ~err;
46 spin_unlock(&rdev->err_lock);
47}
48
49static void regulator_notifier_isr_work(struct work_struct *work)
50{
51 struct regulator_irq *h;
52 struct regulator_irq_desc *d;
53 struct regulator_irq_data *rid;
54 int ret = 0;
55 int tmo, i;
56 int num_rdevs;
57
58 h = container_of(work, struct regulator_irq,
59 isr_work.work);
60 d = &h->desc;
61 rid = &h->rdata;
62 num_rdevs = rid->num_states;
63
64reread:
65 if (d->fatal_cnt && h->retry_cnt > d->fatal_cnt) {
66 if (!d->die)
67 return hw_protection_shutdown("Regulator HW failure? - no IC recovery",
68 REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
69 ret = d->die(rid);
70
71
72
73
74 if (ret)
75 return hw_protection_shutdown("Regulator HW failure. IC recovery failed",
76 REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
77
78
79
80
81
82
83 goto enable_out;
84 }
85 if (d->renable) {
86 ret = d->renable(rid);
87
88 if (ret == REGULATOR_FAILED_RETRY) {
89
90 h->retry_cnt++;
91 if (!d->reread_ms)
92 goto reread;
93
94 tmo = d->reread_ms;
95 goto reschedule;
96 }
97
98 if (ret) {
99
100
101
102
103 for (i = 0; i < num_rdevs; i++) {
104 struct regulator_err_state *stat;
105 struct regulator_dev *rdev;
106
107 stat = &rid->states[i];
108 rdev = stat->rdev;
109 rdev_clear_err(rdev, (~stat->errors) &
110 stat->possible_errs);
111 }
112 h->retry_cnt++;
113
114
115
116
117 tmo = d->irq_off_ms;
118 goto reschedule;
119 }
120 }
121
122
123
124
125
126
127
128 for (i = 0; i < num_rdevs; i++) {
129 struct regulator_err_state *stat;
130 struct regulator_dev *rdev;
131
132 stat = &rid->states[i];
133 rdev = stat->rdev;
134 rdev_clear_err(rdev, stat->possible_errs);
135 }
136
137
138
139
140 h->retry_cnt = 0;
141
142enable_out:
143 enable_irq(h->irq);
144
145 return;
146
147reschedule:
148 if (!d->high_prio)
149 mod_delayed_work(system_wq, &h->isr_work,
150 msecs_to_jiffies(tmo));
151 else
152 mod_delayed_work(system_highpri_wq, &h->isr_work,
153 msecs_to_jiffies(tmo));
154}
155
156static irqreturn_t regulator_notifier_isr(int irq, void *data)
157{
158 struct regulator_irq *h = data;
159 struct regulator_irq_desc *d;
160 struct regulator_irq_data *rid;
161 unsigned long rdev_map = 0;
162 int num_rdevs;
163 int ret, i;
164
165 d = &h->desc;
166 rid = &h->rdata;
167 num_rdevs = rid->num_states;
168
169 if (d->fatal_cnt)
170 h->retry_cnt++;
171
172
173
174
175
176
177
178
179 ret = d->map_event(irq, rid, &rdev_map);
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194 if (unlikely(ret == REGULATOR_FAILED_RETRY))
195 goto fail_out;
196
197 h->retry_cnt = 0;
198
199
200
201
202 if (ret || !rdev_map)
203 return IRQ_NONE;
204
205
206
207
208
209 if (d->skip_off) {
210 for_each_set_bit(i, &rdev_map, num_rdevs) {
211 struct regulator_dev *rdev;
212 const struct regulator_ops *ops;
213
214 rdev = rid->states[i].rdev;
215 ops = rdev->desc->ops;
216
217
218
219
220
221 if (ops->is_enabled(rdev))
222 break;
223 }
224 if (i == num_rdevs)
225 return IRQ_NONE;
226 }
227
228
229 if (d->irq_off_ms)
230 disable_irq_nosync(irq);
231
232
233
234
235
236 for_each_set_bit(i, &rdev_map, num_rdevs) {
237 struct regulator_err_state *stat;
238 struct regulator_dev *rdev;
239
240 stat = &rid->states[i];
241 rdev = stat->rdev;
242
243 rdev_dbg(rdev, "Sending regulator notification EVT 0x%lx\n",
244 stat->notifs);
245
246 regulator_notifier_call_chain(rdev, stat->notifs, NULL);
247 rdev_flag_err(rdev, stat->errors);
248 }
249
250 if (d->irq_off_ms) {
251 if (!d->high_prio)
252 schedule_delayed_work(&h->isr_work,
253 msecs_to_jiffies(d->irq_off_ms));
254 else
255 mod_delayed_work(system_highpri_wq,
256 &h->isr_work,
257 msecs_to_jiffies(d->irq_off_ms));
258 }
259
260 return IRQ_HANDLED;
261
262fail_out:
263 if (d->fatal_cnt && h->retry_cnt > d->fatal_cnt) {
264
265 if (!d->die) {
266 hw_protection_shutdown("Regulator failure. Retry count exceeded",
267 REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
268 } else {
269 ret = d->die(rid);
270
271 if (ret)
272 hw_protection_shutdown("Regulator failure. Recovery failed",
273 REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
274 }
275 }
276
277 return IRQ_NONE;
278}
279
280static int init_rdev_state(struct device *dev, struct regulator_irq *h,
281 struct regulator_dev **rdev, int common_err,
282 int *rdev_err, int rdev_amount)
283{
284 int i;
285
286 h->rdata.states = devm_kzalloc(dev, sizeof(*h->rdata.states) *
287 rdev_amount, GFP_KERNEL);
288 if (!h->rdata.states)
289 return -ENOMEM;
290
291 h->rdata.num_states = rdev_amount;
292 h->rdata.data = h->desc.data;
293
294 for (i = 0; i < rdev_amount; i++) {
295 h->rdata.states[i].possible_errs = common_err;
296 if (rdev_err)
297 h->rdata.states[i].possible_errs |= *rdev_err++;
298 h->rdata.states[i].rdev = *rdev++;
299 }
300
301 return 0;
302}
303
304static void init_rdev_errors(struct regulator_irq *h)
305{
306 int i;
307
308 for (i = 0; i < h->rdata.num_states; i++)
309 if (h->rdata.states[i].possible_errs)
310 h->rdata.states[i].rdev->use_cached_err = true;
311}
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336void *regulator_irq_helper(struct device *dev,
337 const struct regulator_irq_desc *d, int irq,
338 int irq_flags, int common_errs, int *per_rdev_errs,
339 struct regulator_dev **rdev, int rdev_amount)
340{
341 struct regulator_irq *h;
342 int ret;
343
344 if (!rdev_amount || !d || !d->map_event || !d->name)
345 return ERR_PTR(-EINVAL);
346
347 h = devm_kzalloc(dev, sizeof(*h), GFP_KERNEL);
348 if (!h)
349 return ERR_PTR(-ENOMEM);
350
351 h->irq = irq;
352 h->desc = *d;
353
354 ret = init_rdev_state(dev, h, rdev, common_errs, per_rdev_errs,
355 rdev_amount);
356 if (ret)
357 return ERR_PTR(ret);
358
359 init_rdev_errors(h);
360
361 if (h->desc.irq_off_ms)
362 INIT_DELAYED_WORK(&h->isr_work, regulator_notifier_isr_work);
363
364 ret = request_threaded_irq(h->irq, NULL, regulator_notifier_isr,
365 IRQF_ONESHOT | irq_flags, h->desc.name, h);
366 if (ret) {
367 dev_err(dev, "Failed to request IRQ %d\n", irq);
368
369 return ERR_PTR(ret);
370 }
371
372 return h;
373}
374EXPORT_SYMBOL_GPL(regulator_irq_helper);
375
376
377
378
379
380
381
382
383
384
385void regulator_irq_helper_cancel(void **handle)
386{
387 if (handle && *handle) {
388 struct regulator_irq *h = *handle;
389
390 free_irq(h->irq, h);
391 if (h->desc.irq_off_ms)
392 cancel_delayed_work_sync(&h->isr_work);
393
394 h = NULL;
395 }
396}
397EXPORT_SYMBOL_GPL(regulator_irq_helper_cancel);
398