1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/clk.h>
14#include <linux/clk-provider.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/io.h>
18#include <linux/err.h>
19
20
21
22
23
24
25
26
27
28
29
30#define to_clk_mux(_hw) container_of(_hw, struct clk_mux, hw)
31
32static u8 clk_mux_get_parent(struct clk_hw *hw)
33{
34 struct clk_mux *mux = to_clk_mux(hw);
35 int num_parents = __clk_get_num_parents(hw->clk);
36 u32 val;
37
38
39
40
41
42
43
44
45 val = clk_readl(mux->reg) >> mux->shift;
46 val &= mux->mask;
47
48 if (mux->table) {
49 int i;
50
51 for (i = 0; i < num_parents; i++)
52 if (mux->table[i] == val)
53 return i;
54 return -EINVAL;
55 }
56
57 if (val && (mux->flags & CLK_MUX_INDEX_BIT))
58 val = ffs(val) - 1;
59
60 if (val && (mux->flags & CLK_MUX_INDEX_ONE))
61 val--;
62
63 if (val >= num_parents)
64 return -EINVAL;
65
66 return val;
67}
68
69static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
70{
71 struct clk_mux *mux = to_clk_mux(hw);
72 u32 val;
73 unsigned long flags = 0;
74
75 if (mux->table)
76 index = mux->table[index];
77
78 else {
79 if (mux->flags & CLK_MUX_INDEX_BIT)
80 index = (1 << ffs(index));
81
82 if (mux->flags & CLK_MUX_INDEX_ONE)
83 index++;
84 }
85
86 if (mux->lock)
87 spin_lock_irqsave(mux->lock, flags);
88
89 if (mux->flags & CLK_MUX_HIWORD_MASK) {
90 val = mux->mask << (mux->shift + 16);
91 } else {
92 val = clk_readl(mux->reg);
93 val &= ~(mux->mask << mux->shift);
94 }
95 val |= index << mux->shift;
96 clk_writel(val, mux->reg);
97
98 if (mux->lock)
99 spin_unlock_irqrestore(mux->lock, flags);
100
101 return 0;
102}
103
104const struct clk_ops clk_mux_ops = {
105 .get_parent = clk_mux_get_parent,
106 .set_parent = clk_mux_set_parent,
107 .determine_rate = __clk_mux_determine_rate,
108};
109EXPORT_SYMBOL_GPL(clk_mux_ops);
110
111const struct clk_ops clk_mux_ro_ops = {
112 .get_parent = clk_mux_get_parent,
113};
114EXPORT_SYMBOL_GPL(clk_mux_ro_ops);
115
116struct clk *clk_register_mux_table(struct device *dev, const char *name,
117 const char **parent_names, u8 num_parents, unsigned long flags,
118 void __iomem *reg, u8 shift, u32 mask,
119 u8 clk_mux_flags, u32 *table, spinlock_t *lock)
120{
121 struct clk_mux *mux;
122 struct clk *clk;
123 struct clk_init_data init;
124 u8 width = 0;
125
126 if (clk_mux_flags & CLK_MUX_HIWORD_MASK) {
127 width = fls(mask) - ffs(mask) + 1;
128 if (width + shift > 16) {
129 pr_err("mux value exceeds LOWORD field\n");
130 return ERR_PTR(-EINVAL);
131 }
132 }
133
134
135 mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
136 if (!mux) {
137 pr_err("%s: could not allocate mux clk\n", __func__);
138 return ERR_PTR(-ENOMEM);
139 }
140
141 init.name = name;
142 if (clk_mux_flags & CLK_MUX_READ_ONLY)
143 init.ops = &clk_mux_ro_ops;
144 else
145 init.ops = &clk_mux_ops;
146 init.flags = flags | CLK_IS_BASIC;
147 init.parent_names = parent_names;
148 init.num_parents = num_parents;
149
150
151 mux->reg = reg;
152 mux->shift = shift;
153 mux->mask = mask;
154 mux->flags = clk_mux_flags;
155 mux->lock = lock;
156 mux->table = table;
157 mux->hw.init = &init;
158
159 clk = clk_register(dev, &mux->hw);
160
161 if (IS_ERR(clk))
162 kfree(mux);
163
164 return clk;
165}
166EXPORT_SYMBOL_GPL(clk_register_mux_table);
167
168struct clk *clk_register_mux(struct device *dev, const char *name,
169 const char **parent_names, u8 num_parents, unsigned long flags,
170 void __iomem *reg, u8 shift, u8 width,
171 u8 clk_mux_flags, spinlock_t *lock)
172{
173 u32 mask = BIT(width) - 1;
174
175 return clk_register_mux_table(dev, name, parent_names, num_parents,
176 flags, reg, shift, mask, clk_mux_flags,
177 NULL, lock);
178}
179EXPORT_SYMBOL_GPL(clk_register_mux);
180