linux/drivers/media/v4l2-core/v4l2-clk.c
<<
>>
Prefs
   1/*
   2 * V4L2 clock service
   3 *
   4 * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10
  11#include <linux/atomic.h>
  12#include <linux/clk.h>
  13#include <linux/device.h>
  14#include <linux/errno.h>
  15#include <linux/list.h>
  16#include <linux/module.h>
  17#include <linux/mutex.h>
  18#include <linux/slab.h>
  19#include <linux/string.h>
  20
  21#include <media/v4l2-clk.h>
  22#include <media/v4l2-subdev.h>
  23
  24static DEFINE_MUTEX(clk_lock);
  25static LIST_HEAD(clk_list);
  26
  27static struct v4l2_clk *v4l2_clk_find(const char *dev_id)
  28{
  29        struct v4l2_clk *clk;
  30
  31        list_for_each_entry(clk, &clk_list, list)
  32                if (!strcmp(dev_id, clk->dev_id))
  33                        return clk;
  34
  35        return ERR_PTR(-ENODEV);
  36}
  37
  38struct v4l2_clk *v4l2_clk_get(struct device *dev, const char *id)
  39{
  40        struct v4l2_clk *clk;
  41        struct clk *ccf_clk = clk_get(dev, id);
  42
  43        if (PTR_ERR(ccf_clk) == -EPROBE_DEFER)
  44                return ERR_PTR(-EPROBE_DEFER);
  45
  46        if (!IS_ERR_OR_NULL(ccf_clk)) {
  47                clk = kzalloc(sizeof(*clk), GFP_KERNEL);
  48                if (!clk) {
  49                        clk_put(ccf_clk);
  50                        return ERR_PTR(-ENOMEM);
  51                }
  52                clk->clk = ccf_clk;
  53
  54                return clk;
  55        }
  56
  57        mutex_lock(&clk_lock);
  58        clk = v4l2_clk_find(dev_name(dev));
  59
  60        if (!IS_ERR(clk))
  61                atomic_inc(&clk->use_count);
  62        mutex_unlock(&clk_lock);
  63
  64        return clk;
  65}
  66EXPORT_SYMBOL(v4l2_clk_get);
  67
  68void v4l2_clk_put(struct v4l2_clk *clk)
  69{
  70        struct v4l2_clk *tmp;
  71
  72        if (IS_ERR(clk))
  73                return;
  74
  75        if (clk->clk) {
  76                clk_put(clk->clk);
  77                kfree(clk);
  78                return;
  79        }
  80
  81        mutex_lock(&clk_lock);
  82
  83        list_for_each_entry(tmp, &clk_list, list)
  84                if (tmp == clk)
  85                        atomic_dec(&clk->use_count);
  86
  87        mutex_unlock(&clk_lock);
  88}
  89EXPORT_SYMBOL(v4l2_clk_put);
  90
  91static int v4l2_clk_lock_driver(struct v4l2_clk *clk)
  92{
  93        struct v4l2_clk *tmp;
  94        int ret = -ENODEV;
  95
  96        mutex_lock(&clk_lock);
  97
  98        list_for_each_entry(tmp, &clk_list, list)
  99                if (tmp == clk) {
 100                        ret = !try_module_get(clk->ops->owner);
 101                        if (ret)
 102                                ret = -EFAULT;
 103                        break;
 104                }
 105
 106        mutex_unlock(&clk_lock);
 107
 108        return ret;
 109}
 110
 111static void v4l2_clk_unlock_driver(struct v4l2_clk *clk)
 112{
 113        module_put(clk->ops->owner);
 114}
 115
 116int v4l2_clk_enable(struct v4l2_clk *clk)
 117{
 118        int ret;
 119
 120        if (clk->clk)
 121                return clk_prepare_enable(clk->clk);
 122
 123        ret = v4l2_clk_lock_driver(clk);
 124        if (ret < 0)
 125                return ret;
 126
 127        mutex_lock(&clk->lock);
 128
 129        if (++clk->enable == 1 && clk->ops->enable) {
 130                ret = clk->ops->enable(clk);
 131                if (ret < 0)
 132                        clk->enable--;
 133        }
 134
 135        mutex_unlock(&clk->lock);
 136
 137        return ret;
 138}
 139EXPORT_SYMBOL(v4l2_clk_enable);
 140
 141/*
 142 * You might Oops if you try to disabled a disabled clock, because then the
 143 * driver isn't locked and could have been unloaded by now, so, don't do that
 144 */
 145void v4l2_clk_disable(struct v4l2_clk *clk)
 146{
 147        int enable;
 148
 149        if (clk->clk)
 150                return clk_disable_unprepare(clk->clk);
 151
 152        mutex_lock(&clk->lock);
 153
 154        enable = --clk->enable;
 155        if (WARN(enable < 0, "Unbalanced %s() on %s!\n", __func__,
 156                 clk->dev_id))
 157                clk->enable++;
 158        else if (!enable && clk->ops->disable)
 159                clk->ops->disable(clk);
 160
 161        mutex_unlock(&clk->lock);
 162
 163        v4l2_clk_unlock_driver(clk);
 164}
 165EXPORT_SYMBOL(v4l2_clk_disable);
 166
 167unsigned long v4l2_clk_get_rate(struct v4l2_clk *clk)
 168{
 169        int ret;
 170
 171        if (clk->clk)
 172                return clk_get_rate(clk->clk);
 173
 174        ret = v4l2_clk_lock_driver(clk);
 175        if (ret < 0)
 176                return ret;
 177
 178        mutex_lock(&clk->lock);
 179        if (!clk->ops->get_rate)
 180                ret = -ENOSYS;
 181        else
 182                ret = clk->ops->get_rate(clk);
 183        mutex_unlock(&clk->lock);
 184
 185        v4l2_clk_unlock_driver(clk);
 186
 187        return ret;
 188}
 189EXPORT_SYMBOL(v4l2_clk_get_rate);
 190
 191int v4l2_clk_set_rate(struct v4l2_clk *clk, unsigned long rate)
 192{
 193        int ret;
 194
 195        if (clk->clk) {
 196                long r = clk_round_rate(clk->clk, rate);
 197                if (r < 0)
 198                        return r;
 199                return clk_set_rate(clk->clk, r);
 200        }
 201
 202        ret = v4l2_clk_lock_driver(clk);
 203
 204        if (ret < 0)
 205                return ret;
 206
 207        mutex_lock(&clk->lock);
 208        if (!clk->ops->set_rate)
 209                ret = -ENOSYS;
 210        else
 211                ret = clk->ops->set_rate(clk, rate);
 212        mutex_unlock(&clk->lock);
 213
 214        v4l2_clk_unlock_driver(clk);
 215
 216        return ret;
 217}
 218EXPORT_SYMBOL(v4l2_clk_set_rate);
 219
 220struct v4l2_clk *v4l2_clk_register(const struct v4l2_clk_ops *ops,
 221                                   const char *dev_id,
 222                                   void *priv)
 223{
 224        struct v4l2_clk *clk;
 225        int ret;
 226
 227        if (!ops || !dev_id)
 228                return ERR_PTR(-EINVAL);
 229
 230        clk = kzalloc(sizeof(struct v4l2_clk), GFP_KERNEL);
 231        if (!clk)
 232                return ERR_PTR(-ENOMEM);
 233
 234        clk->dev_id = kstrdup(dev_id, GFP_KERNEL);
 235        if (!clk->dev_id) {
 236                ret = -ENOMEM;
 237                goto ealloc;
 238        }
 239        clk->ops = ops;
 240        clk->priv = priv;
 241        atomic_set(&clk->use_count, 0);
 242        mutex_init(&clk->lock);
 243
 244        mutex_lock(&clk_lock);
 245        if (!IS_ERR(v4l2_clk_find(dev_id))) {
 246                mutex_unlock(&clk_lock);
 247                ret = -EEXIST;
 248                goto eexist;
 249        }
 250        list_add_tail(&clk->list, &clk_list);
 251        mutex_unlock(&clk_lock);
 252
 253        return clk;
 254
 255eexist:
 256ealloc:
 257        kfree(clk->dev_id);
 258        kfree(clk);
 259        return ERR_PTR(ret);
 260}
 261EXPORT_SYMBOL(v4l2_clk_register);
 262
 263void v4l2_clk_unregister(struct v4l2_clk *clk)
 264{
 265        if (WARN(atomic_read(&clk->use_count),
 266                 "%s(): Refusing to unregister ref-counted %s clock!\n",
 267                 __func__, clk->dev_id))
 268                return;
 269
 270        mutex_lock(&clk_lock);
 271        list_del(&clk->list);
 272        mutex_unlock(&clk_lock);
 273
 274        kfree(clk->dev_id);
 275        kfree(clk);
 276}
 277EXPORT_SYMBOL(v4l2_clk_unregister);
 278
 279struct v4l2_clk_fixed {
 280        unsigned long rate;
 281        struct v4l2_clk_ops ops;
 282};
 283
 284static unsigned long fixed_get_rate(struct v4l2_clk *clk)
 285{
 286        struct v4l2_clk_fixed *priv = clk->priv;
 287        return priv->rate;
 288}
 289
 290struct v4l2_clk *__v4l2_clk_register_fixed(const char *dev_id,
 291                                unsigned long rate, struct module *owner)
 292{
 293        struct v4l2_clk *clk;
 294        struct v4l2_clk_fixed *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 295
 296        if (!priv)
 297                return ERR_PTR(-ENOMEM);
 298
 299        priv->rate = rate;
 300        priv->ops.get_rate = fixed_get_rate;
 301        priv->ops.owner = owner;
 302
 303        clk = v4l2_clk_register(&priv->ops, dev_id, priv);
 304        if (IS_ERR(clk))
 305                kfree(priv);
 306
 307        return clk;
 308}
 309EXPORT_SYMBOL(__v4l2_clk_register_fixed);
 310
 311void v4l2_clk_unregister_fixed(struct v4l2_clk *clk)
 312{
 313        kfree(clk->priv);
 314        v4l2_clk_unregister(clk);
 315}
 316EXPORT_SYMBOL(v4l2_clk_unregister_fixed);
 317