1
2
3
4
5
6
7
8
9#include <linux/kernel.h>
10#include <linux/device.h>
11#include <linux/io.h>
12#include <linux/pm.h>
13#include <linux/pm_clock.h>
14#include <linux/clk.h>
15#include <linux/clkdev.h>
16#include <linux/slab.h>
17#include <linux/err.h>
18#include <linux/pm_domain.h>
19#include <linux/pm_runtime.h>
20
21#ifdef CONFIG_PM_CLK
22
23enum pce_status {
24 PCE_STATUS_NONE = 0,
25 PCE_STATUS_ACQUIRED,
26 PCE_STATUS_ENABLED,
27 PCE_STATUS_ERROR,
28};
29
30struct pm_clock_entry {
31 struct list_head node;
32 char *con_id;
33 struct clk *clk;
34 enum pce_status status;
35};
36
37
38
39
40
41
42static inline void __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)
43{
44 int ret;
45
46 if (ce->status < PCE_STATUS_ERROR) {
47 ret = clk_enable(ce->clk);
48 if (!ret)
49 ce->status = PCE_STATUS_ENABLED;
50 else
51 dev_err(dev, "%s: failed to enable clk %p, error %d\n",
52 __func__, ce->clk, ret);
53 }
54}
55
56
57
58
59
60
61static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
62{
63 if (!ce->clk)
64 ce->clk = clk_get(dev, ce->con_id);
65 if (IS_ERR(ce->clk)) {
66 ce->status = PCE_STATUS_ERROR;
67 } else {
68 clk_prepare(ce->clk);
69 ce->status = PCE_STATUS_ACQUIRED;
70 dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n",
71 ce->clk, ce->con_id);
72 }
73}
74
75static int __pm_clk_add(struct device *dev, const char *con_id,
76 struct clk *clk)
77{
78 struct pm_subsys_data *psd = dev_to_psd(dev);
79 struct pm_clock_entry *ce;
80
81 if (!psd)
82 return -EINVAL;
83
84 ce = kzalloc(sizeof(*ce), GFP_KERNEL);
85 if (!ce)
86 return -ENOMEM;
87
88 if (con_id) {
89 ce->con_id = kstrdup(con_id, GFP_KERNEL);
90 if (!ce->con_id) {
91 dev_err(dev,
92 "Not enough memory for clock connection ID.\n");
93 kfree(ce);
94 return -ENOMEM;
95 }
96 } else {
97 if (IS_ERR(clk)) {
98 kfree(ce);
99 return -ENOENT;
100 }
101 ce->clk = clk;
102 }
103
104 pm_clk_acquire(dev, ce);
105
106 spin_lock_irq(&psd->lock);
107 list_add_tail(&ce->node, &psd->clock_list);
108 spin_unlock_irq(&psd->lock);
109 return 0;
110}
111
112
113
114
115
116
117
118
119
120int pm_clk_add(struct device *dev, const char *con_id)
121{
122 return __pm_clk_add(dev, con_id, NULL);
123}
124
125
126
127
128
129
130
131
132
133
134
135int pm_clk_add_clk(struct device *dev, struct clk *clk)
136{
137 return __pm_clk_add(dev, NULL, clk);
138}
139
140
141
142
143
144
145
146
147
148
149
150
151int of_pm_clk_add_clks(struct device *dev)
152{
153 struct clk **clks;
154 unsigned int i, count;
155 int ret;
156
157 if (!dev || !dev->of_node)
158 return -EINVAL;
159
160 count = of_count_phandle_with_args(dev->of_node, "clocks",
161 "#clock-cells");
162 if (count == 0)
163 return -ENODEV;
164
165 clks = kcalloc(count, sizeof(*clks), GFP_KERNEL);
166 if (!clks)
167 return -ENOMEM;
168
169 for (i = 0; i < count; i++) {
170 clks[i] = of_clk_get(dev->of_node, i);
171 if (IS_ERR(clks[i])) {
172 ret = PTR_ERR(clks[i]);
173 goto error;
174 }
175
176 ret = pm_clk_add_clk(dev, clks[i]);
177 if (ret) {
178 clk_put(clks[i]);
179 goto error;
180 }
181 }
182
183 kfree(clks);
184
185 return i;
186
187error:
188 while (i--)
189 pm_clk_remove_clk(dev, clks[i]);
190
191 kfree(clks);
192
193 return ret;
194}
195
196
197
198
199
200static void __pm_clk_remove(struct pm_clock_entry *ce)
201{
202 if (!ce)
203 return;
204
205 if (ce->status < PCE_STATUS_ERROR) {
206 if (ce->status == PCE_STATUS_ENABLED)
207 clk_disable(ce->clk);
208
209 if (ce->status >= PCE_STATUS_ACQUIRED) {
210 clk_unprepare(ce->clk);
211 clk_put(ce->clk);
212 }
213 }
214
215 kfree(ce->con_id);
216 kfree(ce);
217}
218
219
220
221
222
223
224
225
226
227void pm_clk_remove(struct device *dev, const char *con_id)
228{
229 struct pm_subsys_data *psd = dev_to_psd(dev);
230 struct pm_clock_entry *ce;
231
232 if (!psd)
233 return;
234
235 spin_lock_irq(&psd->lock);
236
237 list_for_each_entry(ce, &psd->clock_list, node) {
238 if (!con_id && !ce->con_id)
239 goto remove;
240 else if (!con_id || !ce->con_id)
241 continue;
242 else if (!strcmp(con_id, ce->con_id))
243 goto remove;
244 }
245
246 spin_unlock_irq(&psd->lock);
247 return;
248
249 remove:
250 list_del(&ce->node);
251 spin_unlock_irq(&psd->lock);
252
253 __pm_clk_remove(ce);
254}
255
256
257
258
259
260
261
262
263
264void pm_clk_remove_clk(struct device *dev, struct clk *clk)
265{
266 struct pm_subsys_data *psd = dev_to_psd(dev);
267 struct pm_clock_entry *ce;
268
269 if (!psd || !clk)
270 return;
271
272 spin_lock_irq(&psd->lock);
273
274 list_for_each_entry(ce, &psd->clock_list, node) {
275 if (clk == ce->clk)
276 goto remove;
277 }
278
279 spin_unlock_irq(&psd->lock);
280 return;
281
282 remove:
283 list_del(&ce->node);
284 spin_unlock_irq(&psd->lock);
285
286 __pm_clk_remove(ce);
287}
288
289
290
291
292
293
294
295
296void pm_clk_init(struct device *dev)
297{
298 struct pm_subsys_data *psd = dev_to_psd(dev);
299 if (psd)
300 INIT_LIST_HEAD(&psd->clock_list);
301}
302
303
304
305
306
307
308
309
310int pm_clk_create(struct device *dev)
311{
312 return dev_pm_get_subsys_data(dev);
313}
314
315
316
317
318
319
320
321
322
323void pm_clk_destroy(struct device *dev)
324{
325 struct pm_subsys_data *psd = dev_to_psd(dev);
326 struct pm_clock_entry *ce, *c;
327 struct list_head list;
328
329 if (!psd)
330 return;
331
332 INIT_LIST_HEAD(&list);
333
334 spin_lock_irq(&psd->lock);
335
336 list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node)
337 list_move(&ce->node, &list);
338
339 spin_unlock_irq(&psd->lock);
340
341 dev_pm_put_subsys_data(dev);
342
343 list_for_each_entry_safe_reverse(ce, c, &list, node) {
344 list_del(&ce->node);
345 __pm_clk_remove(ce);
346 }
347}
348
349
350
351
352
353int pm_clk_suspend(struct device *dev)
354{
355 struct pm_subsys_data *psd = dev_to_psd(dev);
356 struct pm_clock_entry *ce;
357 unsigned long flags;
358
359 dev_dbg(dev, "%s()\n", __func__);
360
361 if (!psd)
362 return 0;
363
364 spin_lock_irqsave(&psd->lock, flags);
365
366 list_for_each_entry_reverse(ce, &psd->clock_list, node) {
367 if (ce->status < PCE_STATUS_ERROR) {
368 if (ce->status == PCE_STATUS_ENABLED)
369 clk_disable(ce->clk);
370 ce->status = PCE_STATUS_ACQUIRED;
371 }
372 }
373
374 spin_unlock_irqrestore(&psd->lock, flags);
375
376 return 0;
377}
378
379
380
381
382
383int pm_clk_resume(struct device *dev)
384{
385 struct pm_subsys_data *psd = dev_to_psd(dev);
386 struct pm_clock_entry *ce;
387 unsigned long flags;
388
389 dev_dbg(dev, "%s()\n", __func__);
390
391 if (!psd)
392 return 0;
393
394 spin_lock_irqsave(&psd->lock, flags);
395
396 list_for_each_entry(ce, &psd->clock_list, node)
397 __pm_clk_enable(dev, ce);
398
399 spin_unlock_irqrestore(&psd->lock, flags);
400
401 return 0;
402}
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420static int pm_clk_notify(struct notifier_block *nb,
421 unsigned long action, void *data)
422{
423 struct pm_clk_notifier_block *clknb;
424 struct device *dev = data;
425 char **con_id;
426 int error;
427
428 dev_dbg(dev, "%s() %ld\n", __func__, action);
429
430 clknb = container_of(nb, struct pm_clk_notifier_block, nb);
431
432 switch (action) {
433 case BUS_NOTIFY_ADD_DEVICE:
434 if (dev->pm_domain)
435 break;
436
437 error = pm_clk_create(dev);
438 if (error)
439 break;
440
441 dev_pm_domain_set(dev, clknb->pm_domain);
442 if (clknb->con_ids[0]) {
443 for (con_id = clknb->con_ids; *con_id; con_id++)
444 pm_clk_add(dev, *con_id);
445 } else {
446 pm_clk_add(dev, NULL);
447 }
448
449 break;
450 case BUS_NOTIFY_DEL_DEVICE:
451 if (dev->pm_domain != clknb->pm_domain)
452 break;
453
454 dev_pm_domain_set(dev, NULL);
455 pm_clk_destroy(dev);
456 break;
457 }
458
459 return 0;
460}
461
462int pm_clk_runtime_suspend(struct device *dev)
463{
464 int ret;
465
466 dev_dbg(dev, "%s\n", __func__);
467
468 ret = pm_generic_runtime_suspend(dev);
469 if (ret) {
470 dev_err(dev, "failed to suspend device\n");
471 return ret;
472 }
473
474 ret = pm_clk_suspend(dev);
475 if (ret) {
476 dev_err(dev, "failed to suspend clock\n");
477 pm_generic_runtime_resume(dev);
478 return ret;
479 }
480
481 return 0;
482}
483
484int pm_clk_runtime_resume(struct device *dev)
485{
486 int ret;
487
488 dev_dbg(dev, "%s\n", __func__);
489
490 ret = pm_clk_resume(dev);
491 if (ret) {
492 dev_err(dev, "failed to resume clock\n");
493 return ret;
494 }
495
496 return pm_generic_runtime_resume(dev);
497}
498
499#else
500
501
502
503
504
505
506static void enable_clock(struct device *dev, const char *con_id)
507{
508 struct clk *clk;
509
510 clk = clk_get(dev, con_id);
511 if (!IS_ERR(clk)) {
512 clk_prepare_enable(clk);
513 clk_put(clk);
514 dev_info(dev, "Runtime PM disabled, clock forced on.\n");
515 }
516}
517
518
519
520
521
522
523static void disable_clock(struct device *dev, const char *con_id)
524{
525 struct clk *clk;
526
527 clk = clk_get(dev, con_id);
528 if (!IS_ERR(clk)) {
529 clk_disable_unprepare(clk);
530 clk_put(clk);
531 dev_info(dev, "Runtime PM disabled, clock forced off.\n");
532 }
533}
534
535
536
537
538
539
540
541
542
543
544
545
546static int pm_clk_notify(struct notifier_block *nb,
547 unsigned long action, void *data)
548{
549 struct pm_clk_notifier_block *clknb;
550 struct device *dev = data;
551 char **con_id;
552
553 dev_dbg(dev, "%s() %ld\n", __func__, action);
554
555 clknb = container_of(nb, struct pm_clk_notifier_block, nb);
556
557 switch (action) {
558 case BUS_NOTIFY_BIND_DRIVER:
559 if (clknb->con_ids[0]) {
560 for (con_id = clknb->con_ids; *con_id; con_id++)
561 enable_clock(dev, *con_id);
562 } else {
563 enable_clock(dev, NULL);
564 }
565 break;
566 case BUS_NOTIFY_DRIVER_NOT_BOUND:
567 case BUS_NOTIFY_UNBOUND_DRIVER:
568 if (clknb->con_ids[0]) {
569 for (con_id = clknb->con_ids; *con_id; con_id++)
570 disable_clock(dev, *con_id);
571 } else {
572 disable_clock(dev, NULL);
573 }
574 break;
575 }
576
577 return 0;
578}
579
580#endif
581
582
583
584
585
586
587
588
589
590
591
592void pm_clk_add_notifier(struct bus_type *bus,
593 struct pm_clk_notifier_block *clknb)
594{
595 if (!bus || !clknb)
596 return;
597
598 clknb->nb.notifier_call = pm_clk_notify;
599 bus_register_notifier(bus, &clknb->nb);
600}
601