linux/drivers/clk/clk-bulk.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright 2017 NXP
   4 *
   5 * Dong Aisheng <aisheng.dong@nxp.com>
   6 */
   7
   8#include <linux/clk.h>
   9#include <linux/clk-provider.h>
  10#include <linux/device.h>
  11#include <linux/export.h>
  12#include <linux/of.h>
  13#include <linux/slab.h>
  14
  15static int __must_check of_clk_bulk_get(struct device_node *np, int num_clks,
  16                                        struct clk_bulk_data *clks)
  17{
  18        int ret;
  19        int i;
  20
  21        for (i = 0; i < num_clks; i++)
  22                clks[i].clk = NULL;
  23
  24        for (i = 0; i < num_clks; i++) {
  25                clks[i].clk = of_clk_get(np, i);
  26                if (IS_ERR(clks[i].clk)) {
  27                        ret = PTR_ERR(clks[i].clk);
  28                        pr_err("%pOF: Failed to get clk index: %d ret: %d\n",
  29                               np, i, ret);
  30                        clks[i].clk = NULL;
  31                        goto err;
  32                }
  33        }
  34
  35        return 0;
  36
  37err:
  38        clk_bulk_put(i, clks);
  39
  40        return ret;
  41}
  42
  43static int __must_check of_clk_bulk_get_all(struct device_node *np,
  44                                            struct clk_bulk_data **clks)
  45{
  46        struct clk_bulk_data *clk_bulk;
  47        int num_clks;
  48        int ret;
  49
  50        num_clks = of_clk_get_parent_count(np);
  51        if (!num_clks)
  52                return 0;
  53
  54        clk_bulk = kmalloc_array(num_clks, sizeof(*clk_bulk), GFP_KERNEL);
  55        if (!clk_bulk)
  56                return -ENOMEM;
  57
  58        ret = of_clk_bulk_get(np, num_clks, clk_bulk);
  59        if (ret) {
  60                kfree(clk_bulk);
  61                return ret;
  62        }
  63
  64        *clks = clk_bulk;
  65
  66        return num_clks;
  67}
  68
  69void clk_bulk_put(int num_clks, struct clk_bulk_data *clks)
  70{
  71        while (--num_clks >= 0) {
  72                clk_put(clks[num_clks].clk);
  73                clks[num_clks].clk = NULL;
  74        }
  75}
  76EXPORT_SYMBOL_GPL(clk_bulk_put);
  77
  78int __must_check clk_bulk_get(struct device *dev, int num_clks,
  79                              struct clk_bulk_data *clks)
  80{
  81        int ret;
  82        int i;
  83
  84        for (i = 0; i < num_clks; i++)
  85                clks[i].clk = NULL;
  86
  87        for (i = 0; i < num_clks; i++) {
  88                clks[i].clk = clk_get(dev, clks[i].id);
  89                if (IS_ERR(clks[i].clk)) {
  90                        ret = PTR_ERR(clks[i].clk);
  91                        if (ret != -EPROBE_DEFER)
  92                                dev_err(dev, "Failed to get clk '%s': %d\n",
  93                                        clks[i].id, ret);
  94                        clks[i].clk = NULL;
  95                        goto err;
  96                }
  97        }
  98
  99        return 0;
 100
 101err:
 102        clk_bulk_put(i, clks);
 103
 104        return ret;
 105}
 106EXPORT_SYMBOL(clk_bulk_get);
 107
 108void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks)
 109{
 110        if (IS_ERR_OR_NULL(clks))
 111                return;
 112
 113        clk_bulk_put(num_clks, clks);
 114
 115        kfree(clks);
 116}
 117EXPORT_SYMBOL(clk_bulk_put_all);
 118
 119int __must_check clk_bulk_get_all(struct device *dev,
 120                                  struct clk_bulk_data **clks)
 121{
 122        struct device_node *np = dev_of_node(dev);
 123
 124        if (!np)
 125                return 0;
 126
 127        return of_clk_bulk_get_all(np, clks);
 128}
 129EXPORT_SYMBOL(clk_bulk_get_all);
 130
 131#ifdef CONFIG_HAVE_CLK_PREPARE
 132
 133/**
 134 * clk_bulk_unprepare - undo preparation of a set of clock sources
 135 * @num_clks: the number of clk_bulk_data
 136 * @clks: the clk_bulk_data table being unprepared
 137 *
 138 * clk_bulk_unprepare may sleep, which differentiates it from clk_bulk_disable.
 139 * Returns 0 on success, -EERROR otherwise.
 140 */
 141void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks)
 142{
 143        while (--num_clks >= 0)
 144                clk_unprepare(clks[num_clks].clk);
 145}
 146EXPORT_SYMBOL_GPL(clk_bulk_unprepare);
 147
 148/**
 149 * clk_bulk_prepare - prepare a set of clocks
 150 * @num_clks: the number of clk_bulk_data
 151 * @clks: the clk_bulk_data table being prepared
 152 *
 153 * clk_bulk_prepare may sleep, which differentiates it from clk_bulk_enable.
 154 * Returns 0 on success, -EERROR otherwise.
 155 */
 156int __must_check clk_bulk_prepare(int num_clks,
 157                                  const struct clk_bulk_data *clks)
 158{
 159        int ret;
 160        int i;
 161
 162        for (i = 0; i < num_clks; i++) {
 163                ret = clk_prepare(clks[i].clk);
 164                if (ret) {
 165                        pr_err("Failed to prepare clk '%s': %d\n",
 166                                clks[i].id, ret);
 167                        goto err;
 168                }
 169        }
 170
 171        return 0;
 172
 173err:
 174        clk_bulk_unprepare(i, clks);
 175
 176        return  ret;
 177}
 178EXPORT_SYMBOL_GPL(clk_bulk_prepare);
 179
 180#endif /* CONFIG_HAVE_CLK_PREPARE */
 181
 182/**
 183 * clk_bulk_disable - gate a set of clocks
 184 * @num_clks: the number of clk_bulk_data
 185 * @clks: the clk_bulk_data table being gated
 186 *
 187 * clk_bulk_disable must not sleep, which differentiates it from
 188 * clk_bulk_unprepare. clk_bulk_disable must be called before
 189 * clk_bulk_unprepare.
 190 */
 191void clk_bulk_disable(int num_clks, const struct clk_bulk_data *clks)
 192{
 193
 194        while (--num_clks >= 0)
 195                clk_disable(clks[num_clks].clk);
 196}
 197EXPORT_SYMBOL_GPL(clk_bulk_disable);
 198
 199/**
 200 * clk_bulk_enable - ungate a set of clocks
 201 * @num_clks: the number of clk_bulk_data
 202 * @clks: the clk_bulk_data table being ungated
 203 *
 204 * clk_bulk_enable must not sleep
 205 * Returns 0 on success, -EERROR otherwise.
 206 */
 207int __must_check clk_bulk_enable(int num_clks, const struct clk_bulk_data *clks)
 208{
 209        int ret;
 210        int i;
 211
 212        for (i = 0; i < num_clks; i++) {
 213                ret = clk_enable(clks[i].clk);
 214                if (ret) {
 215                        pr_err("Failed to enable clk '%s': %d\n",
 216                                clks[i].id, ret);
 217                        goto err;
 218                }
 219        }
 220
 221        return 0;
 222
 223err:
 224        clk_bulk_disable(i, clks);
 225
 226        return  ret;
 227}
 228EXPORT_SYMBOL_GPL(clk_bulk_enable);
 229