1
2
3
4
5
6
7
8
9
10
11#include <linux/atomic.h>
12#include <linux/clk.h>
13#include <linux/device.h>
14#include <linux/errno.h>
15#include <linux/list.h>
16#include <linux/module.h>
17#include <linux/mutex.h>
18#include <linux/of.h>
19#include <linux/slab.h>
20#include <linux/string.h>
21
22#include <media/v4l2-clk.h>
23#include <media/v4l2-subdev.h>
24
25static DEFINE_MUTEX(clk_lock);
26static LIST_HEAD(clk_list);
27
28static struct v4l2_clk *v4l2_clk_find(const char *dev_id)
29{
30 struct v4l2_clk *clk;
31
32 list_for_each_entry(clk, &clk_list, list)
33 if (!strcmp(dev_id, clk->dev_id))
34 return clk;
35
36 return ERR_PTR(-ENODEV);
37}
38
39struct v4l2_clk *v4l2_clk_get(struct device *dev, const char *id)
40{
41 struct v4l2_clk *clk;
42 struct clk *ccf_clk = clk_get(dev, id);
43 char clk_name[V4L2_CLK_NAME_SIZE];
44
45 if (PTR_ERR(ccf_clk) == -EPROBE_DEFER)
46 return ERR_PTR(-EPROBE_DEFER);
47
48 if (!IS_ERR_OR_NULL(ccf_clk)) {
49 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
50 if (!clk) {
51 clk_put(ccf_clk);
52 return ERR_PTR(-ENOMEM);
53 }
54 clk->clk = ccf_clk;
55
56 return clk;
57 }
58
59 mutex_lock(&clk_lock);
60 clk = v4l2_clk_find(dev_name(dev));
61
62
63 if (PTR_ERR(clk) == -ENODEV && dev->of_node) {
64 v4l2_clk_name_of(clk_name, sizeof(clk_name),
65 of_node_full_name(dev->of_node));
66 clk = v4l2_clk_find(clk_name);
67 }
68
69 if (!IS_ERR(clk))
70 atomic_inc(&clk->use_count);
71 mutex_unlock(&clk_lock);
72
73 return clk;
74}
75EXPORT_SYMBOL(v4l2_clk_get);
76
77void v4l2_clk_put(struct v4l2_clk *clk)
78{
79 struct v4l2_clk *tmp;
80
81 if (IS_ERR(clk))
82 return;
83
84 if (clk->clk) {
85 clk_put(clk->clk);
86 kfree(clk);
87 return;
88 }
89
90 mutex_lock(&clk_lock);
91
92 list_for_each_entry(tmp, &clk_list, list)
93 if (tmp == clk)
94 atomic_dec(&clk->use_count);
95
96 mutex_unlock(&clk_lock);
97}
98EXPORT_SYMBOL(v4l2_clk_put);
99
100static int v4l2_clk_lock_driver(struct v4l2_clk *clk)
101{
102 struct v4l2_clk *tmp;
103 int ret = -ENODEV;
104
105 mutex_lock(&clk_lock);
106
107 list_for_each_entry(tmp, &clk_list, list)
108 if (tmp == clk) {
109 ret = !try_module_get(clk->ops->owner);
110 if (ret)
111 ret = -EFAULT;
112 break;
113 }
114
115 mutex_unlock(&clk_lock);
116
117 return ret;
118}
119
120static void v4l2_clk_unlock_driver(struct v4l2_clk *clk)
121{
122 module_put(clk->ops->owner);
123}
124
125int v4l2_clk_enable(struct v4l2_clk *clk)
126{
127 int ret;
128
129 if (clk->clk)
130 return clk_prepare_enable(clk->clk);
131
132 ret = v4l2_clk_lock_driver(clk);
133 if (ret < 0)
134 return ret;
135
136 mutex_lock(&clk->lock);
137
138 if (++clk->enable == 1 && clk->ops->enable) {
139 ret = clk->ops->enable(clk);
140 if (ret < 0)
141 clk->enable--;
142 }
143
144 mutex_unlock(&clk->lock);
145
146 return ret;
147}
148EXPORT_SYMBOL(v4l2_clk_enable);
149
150
151
152
153
154void v4l2_clk_disable(struct v4l2_clk *clk)
155{
156 int enable;
157
158 if (clk->clk)
159 return clk_disable_unprepare(clk->clk);
160
161 mutex_lock(&clk->lock);
162
163 enable = --clk->enable;
164 if (WARN(enable < 0, "Unbalanced %s() on %s!\n", __func__,
165 clk->dev_id))
166 clk->enable++;
167 else if (!enable && clk->ops->disable)
168 clk->ops->disable(clk);
169
170 mutex_unlock(&clk->lock);
171
172 v4l2_clk_unlock_driver(clk);
173}
174EXPORT_SYMBOL(v4l2_clk_disable);
175
176unsigned long v4l2_clk_get_rate(struct v4l2_clk *clk)
177{
178 int ret;
179
180 if (clk->clk)
181 return clk_get_rate(clk->clk);
182
183 ret = v4l2_clk_lock_driver(clk);
184 if (ret < 0)
185 return ret;
186
187 mutex_lock(&clk->lock);
188 if (!clk->ops->get_rate)
189 ret = -ENOSYS;
190 else
191 ret = clk->ops->get_rate(clk);
192 mutex_unlock(&clk->lock);
193
194 v4l2_clk_unlock_driver(clk);
195
196 return ret;
197}
198EXPORT_SYMBOL(v4l2_clk_get_rate);
199
200int v4l2_clk_set_rate(struct v4l2_clk *clk, unsigned long rate)
201{
202 int ret;
203
204 if (clk->clk) {
205 long r = clk_round_rate(clk->clk, rate);
206 if (r < 0)
207 return r;
208 return clk_set_rate(clk->clk, r);
209 }
210
211 ret = v4l2_clk_lock_driver(clk);
212
213 if (ret < 0)
214 return ret;
215
216 mutex_lock(&clk->lock);
217 if (!clk->ops->set_rate)
218 ret = -ENOSYS;
219 else
220 ret = clk->ops->set_rate(clk, rate);
221 mutex_unlock(&clk->lock);
222
223 v4l2_clk_unlock_driver(clk);
224
225 return ret;
226}
227EXPORT_SYMBOL(v4l2_clk_set_rate);
228
229struct v4l2_clk *v4l2_clk_register(const struct v4l2_clk_ops *ops,
230 const char *dev_id,
231 void *priv)
232{
233 struct v4l2_clk *clk;
234 int ret;
235
236 if (!ops || !dev_id)
237 return ERR_PTR(-EINVAL);
238
239 clk = kzalloc(sizeof(struct v4l2_clk), GFP_KERNEL);
240 if (!clk)
241 return ERR_PTR(-ENOMEM);
242
243 clk->dev_id = kstrdup(dev_id, GFP_KERNEL);
244 if (!clk->dev_id) {
245 ret = -ENOMEM;
246 goto ealloc;
247 }
248 clk->ops = ops;
249 clk->priv = priv;
250 atomic_set(&clk->use_count, 0);
251 mutex_init(&clk->lock);
252
253 mutex_lock(&clk_lock);
254 if (!IS_ERR(v4l2_clk_find(dev_id))) {
255 mutex_unlock(&clk_lock);
256 ret = -EEXIST;
257 goto eexist;
258 }
259 list_add_tail(&clk->list, &clk_list);
260 mutex_unlock(&clk_lock);
261
262 return clk;
263
264eexist:
265ealloc:
266 kfree(clk->dev_id);
267 kfree(clk);
268 return ERR_PTR(ret);
269}
270EXPORT_SYMBOL(v4l2_clk_register);
271
272void v4l2_clk_unregister(struct v4l2_clk *clk)
273{
274 if (WARN(atomic_read(&clk->use_count),
275 "%s(): Refusing to unregister ref-counted %s clock!\n",
276 __func__, clk->dev_id))
277 return;
278
279 mutex_lock(&clk_lock);
280 list_del(&clk->list);
281 mutex_unlock(&clk_lock);
282
283 kfree(clk->dev_id);
284 kfree(clk);
285}
286EXPORT_SYMBOL(v4l2_clk_unregister);
287
288struct v4l2_clk_fixed {
289 unsigned long rate;
290 struct v4l2_clk_ops ops;
291};
292
293static unsigned long fixed_get_rate(struct v4l2_clk *clk)
294{
295 struct v4l2_clk_fixed *priv = clk->priv;
296 return priv->rate;
297}
298
299struct v4l2_clk *__v4l2_clk_register_fixed(const char *dev_id,
300 unsigned long rate, struct module *owner)
301{
302 struct v4l2_clk *clk;
303 struct v4l2_clk_fixed *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
304
305 if (!priv)
306 return ERR_PTR(-ENOMEM);
307
308 priv->rate = rate;
309 priv->ops.get_rate = fixed_get_rate;
310 priv->ops.owner = owner;
311
312 clk = v4l2_clk_register(&priv->ops, dev_id, priv);
313 if (IS_ERR(clk))
314 kfree(priv);
315
316 return clk;
317}
318EXPORT_SYMBOL(__v4l2_clk_register_fixed);
319
320void v4l2_clk_unregister_fixed(struct v4l2_clk *clk)
321{
322 kfree(clk->priv);
323 v4l2_clk_unregister(clk);
324}
325EXPORT_SYMBOL(v4l2_clk_unregister_fixed);
326