1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/kernel.h>
18#include <linux/list.h>
19#include <linux/err.h>
20#include <linux/spinlock.h>
21#include <linux/pm_qos.h>
22#include <linux/mutex.h>
23#include <linux/clk.h>
24#include <linux/string.h>
25#include <linux/module.h>
26#include <linux/clkdev.h>
27
28#include "clock.h"
29
30static DEFINE_MUTEX(clocks_mutex);
31static DEFINE_SPINLOCK(clocks_lock);
32static LIST_HEAD(clocks);
33
34
35
36
37int clk_enable(struct clk *clk)
38{
39 unsigned long flags;
40 spin_lock_irqsave(&clocks_lock, flags);
41 clk->count++;
42 if (clk->count == 1)
43 clk->ops->enable(clk->id);
44 spin_unlock_irqrestore(&clocks_lock, flags);
45 return 0;
46}
47EXPORT_SYMBOL(clk_enable);
48
49void clk_disable(struct clk *clk)
50{
51 unsigned long flags;
52 spin_lock_irqsave(&clocks_lock, flags);
53 BUG_ON(clk->count == 0);
54 clk->count--;
55 if (clk->count == 0)
56 clk->ops->disable(clk->id);
57 spin_unlock_irqrestore(&clocks_lock, flags);
58}
59EXPORT_SYMBOL(clk_disable);
60
61int clk_reset(struct clk *clk, enum clk_reset_action action)
62{
63 return clk->ops->reset(clk->remote_id, action);
64}
65EXPORT_SYMBOL(clk_reset);
66
67unsigned long clk_get_rate(struct clk *clk)
68{
69 return clk->ops->get_rate(clk->id);
70}
71EXPORT_SYMBOL(clk_get_rate);
72
73int clk_set_rate(struct clk *clk, unsigned long rate)
74{
75 int ret;
76 if (clk->flags & CLKFLAG_MAX) {
77 ret = clk->ops->set_max_rate(clk->id, rate);
78 if (ret)
79 return ret;
80 }
81 if (clk->flags & CLKFLAG_MIN) {
82 ret = clk->ops->set_min_rate(clk->id, rate);
83 if (ret)
84 return ret;
85 }
86
87 if (clk->flags & CLKFLAG_MAX || clk->flags & CLKFLAG_MIN)
88 return ret;
89
90 return clk->ops->set_rate(clk->id, rate);
91}
92EXPORT_SYMBOL(clk_set_rate);
93
94long clk_round_rate(struct clk *clk, unsigned long rate)
95{
96 return clk->ops->round_rate(clk->id, rate);
97}
98EXPORT_SYMBOL(clk_round_rate);
99
100int clk_set_min_rate(struct clk *clk, unsigned long rate)
101{
102 return clk->ops->set_min_rate(clk->id, rate);
103}
104EXPORT_SYMBOL(clk_set_min_rate);
105
106int clk_set_max_rate(struct clk *clk, unsigned long rate)
107{
108 return clk->ops->set_max_rate(clk->id, rate);
109}
110EXPORT_SYMBOL(clk_set_max_rate);
111
112int clk_set_parent(struct clk *clk, struct clk *parent)
113{
114 return -ENOSYS;
115}
116EXPORT_SYMBOL(clk_set_parent);
117
118struct clk *clk_get_parent(struct clk *clk)
119{
120 return ERR_PTR(-ENOSYS);
121}
122EXPORT_SYMBOL(clk_get_parent);
123
124int clk_set_flags(struct clk *clk, unsigned long flags)
125{
126 if (clk == NULL || IS_ERR(clk))
127 return -EINVAL;
128 return clk->ops->set_flags(clk->id, flags);
129}
130EXPORT_SYMBOL(clk_set_flags);
131
132
133
134
135
136
137static struct clk *ebi1_clk;
138
139void __init msm_clock_init(struct clk_lookup *clock_tbl, unsigned num_clocks)
140{
141 unsigned n;
142
143 mutex_lock(&clocks_mutex);
144 for (n = 0; n < num_clocks; n++) {
145 clkdev_add(&clock_tbl[n]);
146 list_add_tail(&clock_tbl[n].clk->list, &clocks);
147 }
148 mutex_unlock(&clocks_mutex);
149
150 ebi1_clk = clk_get(NULL, "ebi1_clk");
151 BUG_ON(ebi1_clk == NULL);
152
153}
154
155
156
157
158
159static int __init clock_late_init(void)
160{
161 unsigned long flags;
162 struct clk *clk;
163 unsigned count = 0;
164
165 clock_debug_init();
166 mutex_lock(&clocks_mutex);
167 list_for_each_entry(clk, &clocks, list) {
168 clock_debug_add(clk);
169 if (clk->flags & CLKFLAG_AUTO_OFF) {
170 spin_lock_irqsave(&clocks_lock, flags);
171 if (!clk->count) {
172 count++;
173 clk->ops->auto_off(clk->id);
174 }
175 spin_unlock_irqrestore(&clocks_lock, flags);
176 }
177 }
178 mutex_unlock(&clocks_mutex);
179 pr_info("clock_late_init() disabled %d unused clocks\n", count);
180 return 0;
181}
182
183late_initcall(clock_late_init);
184
185