1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright 2018 NXP
4 */
5
6#include <linux/clk-provider.h>
7#include <linux/errno.h>
8#include <linux/export.h>
9#include <linux/io.h>
10#include <linux/slab.h>
11
12#include "clk.h"
13
14#define PCG_PREDIV_SHIFT 16
15#define PCG_PREDIV_WIDTH 3
16#define PCG_PREDIV_MAX 8
17
18#define PCG_DIV_SHIFT 0
19#define PCG_CORE_DIV_WIDTH 3
20#define PCG_DIV_WIDTH 6
21#define PCG_DIV_MAX 64
22
23#define PCG_PCS_SHIFT 24
24#define PCG_PCS_MASK 0x7
25
26#define PCG_CGC_SHIFT 28
27
28static unsigned long imx8m_clk_composite_divider_recalc_rate(struct clk_hw *hw,
29 unsigned long parent_rate)
30{
31 struct clk_divider *divider = to_clk_divider(hw);
32 unsigned long prediv_rate;
33 unsigned int prediv_value;
34 unsigned int div_value;
35
36 prediv_value = readl(addr: divider->reg) >> divider->shift;
37 prediv_value &= clk_div_mask(divider->width);
38
39 prediv_rate = divider_recalc_rate(hw, parent_rate, val: prediv_value,
40 NULL, flags: divider->flags,
41 width: divider->width);
42
43 div_value = readl(addr: divider->reg) >> PCG_DIV_SHIFT;
44 div_value &= clk_div_mask(PCG_DIV_WIDTH);
45
46 return divider_recalc_rate(hw, parent_rate: prediv_rate, val: div_value, NULL,
47 flags: divider->flags, PCG_DIV_WIDTH);
48}
49
50static int imx8m_clk_composite_compute_dividers(unsigned long rate,
51 unsigned long parent_rate,
52 int *prediv, int *postdiv)
53{
54 int div1, div2;
55 int error = INT_MAX;
56 int ret = -EINVAL;
57
58 *prediv = 1;
59 *postdiv = 1;
60
61 for (div1 = 1; div1 <= PCG_PREDIV_MAX; div1++) {
62 for (div2 = 1; div2 <= PCG_DIV_MAX; div2++) {
63 int new_error = ((parent_rate / div1) / div2) - rate;
64
65 if (abs(new_error) < abs(error)) {
66 *prediv = div1;
67 *postdiv = div2;
68 error = new_error;
69 ret = 0;
70 }
71 }
72 }
73 return ret;
74}
75
76static long imx8m_clk_composite_divider_round_rate(struct clk_hw *hw,
77 unsigned long rate,
78 unsigned long *prate)
79{
80 int prediv_value;
81 int div_value;
82
83 imx8m_clk_composite_compute_dividers(rate, parent_rate: *prate,
84 prediv: &prediv_value, postdiv: &div_value);
85 rate = DIV_ROUND_UP(*prate, prediv_value);
86
87 return DIV_ROUND_UP(rate, div_value);
88
89}
90
91static int imx8m_clk_composite_divider_set_rate(struct clk_hw *hw,
92 unsigned long rate,
93 unsigned long parent_rate)
94{
95 struct clk_divider *divider = to_clk_divider(hw);
96 unsigned long flags;
97 int prediv_value;
98 int div_value;
99 int ret;
100 u32 orig, val;
101
102 ret = imx8m_clk_composite_compute_dividers(rate, parent_rate,
103 prediv: &prediv_value, postdiv: &div_value);
104 if (ret)
105 return -EINVAL;
106
107 spin_lock_irqsave(divider->lock, flags);
108
109 orig = readl(addr: divider->reg);
110 val = orig & ~((clk_div_mask(divider->width) << divider->shift) |
111 (clk_div_mask(PCG_DIV_WIDTH) << PCG_DIV_SHIFT));
112
113 val |= (u32)(prediv_value - 1) << divider->shift;
114 val |= (u32)(div_value - 1) << PCG_DIV_SHIFT;
115
116 if (val != orig)
117 writel(val, addr: divider->reg);
118
119 spin_unlock_irqrestore(lock: divider->lock, flags);
120
121 return ret;
122}
123
124static int imx8m_divider_determine_rate(struct clk_hw *hw,
125 struct clk_rate_request *req)
126{
127 struct clk_divider *divider = to_clk_divider(hw);
128 int prediv_value;
129 int div_value;
130
131 /* if read only, just return current value */
132 if (divider->flags & CLK_DIVIDER_READ_ONLY) {
133 u32 val;
134
135 val = readl(addr: divider->reg);
136 prediv_value = val >> divider->shift;
137 prediv_value &= clk_div_mask(divider->width);
138 prediv_value++;
139
140 div_value = val >> PCG_DIV_SHIFT;
141 div_value &= clk_div_mask(PCG_DIV_WIDTH);
142 div_value++;
143
144 return divider_ro_determine_rate(hw, req, table: divider->table,
145 PCG_PREDIV_WIDTH + PCG_DIV_WIDTH,
146 flags: divider->flags, val: prediv_value * div_value);
147 }
148
149 return divider_determine_rate(hw, req, table: divider->table,
150 PCG_PREDIV_WIDTH + PCG_DIV_WIDTH,
151 flags: divider->flags);
152}
153
154static const struct clk_ops imx8m_clk_composite_divider_ops = {
155 .recalc_rate = imx8m_clk_composite_divider_recalc_rate,
156 .round_rate = imx8m_clk_composite_divider_round_rate,
157 .set_rate = imx8m_clk_composite_divider_set_rate,
158 .determine_rate = imx8m_divider_determine_rate,
159};
160
161static u8 imx8m_clk_composite_mux_get_parent(struct clk_hw *hw)
162{
163 return clk_mux_ops.get_parent(hw);
164}
165
166static int imx8m_clk_composite_mux_set_parent(struct clk_hw *hw, u8 index)
167{
168 struct clk_mux *mux = to_clk_mux(hw);
169 u32 val = clk_mux_index_to_val(table: mux->table, flags: mux->flags, index);
170 unsigned long flags = 0;
171 u32 reg;
172
173 if (mux->lock)
174 spin_lock_irqsave(mux->lock, flags);
175
176 reg = readl(addr: mux->reg);
177 reg &= ~(mux->mask << mux->shift);
178 val = val << mux->shift;
179 reg |= val;
180 /*
181 * write twice to make sure non-target interface
182 * SEL_A/B point the same clk input.
183 */
184 writel(val: reg, addr: mux->reg);
185 writel(val: reg, addr: mux->reg);
186
187 if (mux->lock)
188 spin_unlock_irqrestore(lock: mux->lock, flags);
189
190 return 0;
191}
192
193static int
194imx8m_clk_composite_mux_determine_rate(struct clk_hw *hw,
195 struct clk_rate_request *req)
196{
197 return clk_mux_ops.determine_rate(hw, req);
198}
199
200
201static const struct clk_ops imx8m_clk_composite_mux_ops = {
202 .get_parent = imx8m_clk_composite_mux_get_parent,
203 .set_parent = imx8m_clk_composite_mux_set_parent,
204 .determine_rate = imx8m_clk_composite_mux_determine_rate,
205};
206
207struct clk_hw *__imx8m_clk_hw_composite(const char *name,
208 const char * const *parent_names,
209 int num_parents, void __iomem *reg,
210 u32 composite_flags,
211 unsigned long flags)
212{
213 struct clk_hw *hw = ERR_PTR(error: -ENOMEM), *mux_hw;
214 struct clk_hw *div_hw, *gate_hw = NULL;
215 struct clk_divider *div = NULL;
216 struct clk_gate *gate = NULL;
217 struct clk_mux *mux = NULL;
218 const struct clk_ops *divider_ops;
219 const struct clk_ops *mux_ops;
220
221 mux = kzalloc(size: sizeof(*mux), GFP_KERNEL);
222 if (!mux)
223 goto fail;
224
225 mux_hw = &mux->hw;
226 mux->reg = reg;
227 mux->shift = PCG_PCS_SHIFT;
228 mux->mask = PCG_PCS_MASK;
229 mux->lock = &imx_ccm_lock;
230
231 div = kzalloc(size: sizeof(*div), GFP_KERNEL);
232 if (!div)
233 goto fail;
234
235 div_hw = &div->hw;
236 div->reg = reg;
237 if (composite_flags & IMX_COMPOSITE_CORE) {
238 div->shift = PCG_DIV_SHIFT;
239 div->width = PCG_CORE_DIV_WIDTH;
240 divider_ops = &clk_divider_ops;
241 mux_ops = &imx8m_clk_composite_mux_ops;
242 } else if (composite_flags & IMX_COMPOSITE_BUS) {
243 div->shift = PCG_PREDIV_SHIFT;
244 div->width = PCG_PREDIV_WIDTH;
245 divider_ops = &imx8m_clk_composite_divider_ops;
246 mux_ops = &imx8m_clk_composite_mux_ops;
247 } else {
248 div->shift = PCG_PREDIV_SHIFT;
249 div->width = PCG_PREDIV_WIDTH;
250 divider_ops = &imx8m_clk_composite_divider_ops;
251 mux_ops = &clk_mux_ops;
252 if (!(composite_flags & IMX_COMPOSITE_FW_MANAGED))
253 flags |= CLK_SET_PARENT_GATE;
254 }
255
256 div->lock = &imx_ccm_lock;
257 div->flags = CLK_DIVIDER_ROUND_CLOSEST;
258
259 /* skip registering the gate ops if M4 is enabled */
260 if (!mcore_booted) {
261 gate = kzalloc(size: sizeof(*gate), GFP_KERNEL);
262 if (!gate)
263 goto fail;
264
265 gate_hw = &gate->hw;
266 gate->reg = reg;
267 gate->bit_idx = PCG_CGC_SHIFT;
268 gate->lock = &imx_ccm_lock;
269 }
270
271 hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
272 mux_hw, mux_ops, rate_hw: div_hw,
273 rate_ops: divider_ops, gate_hw, gate_ops: &clk_gate_ops, flags);
274 if (IS_ERR(ptr: hw))
275 goto fail;
276
277 return hw;
278
279fail:
280 kfree(objp: gate);
281 kfree(objp: div);
282 kfree(objp: mux);
283 return ERR_CAST(ptr: hw);
284}
285EXPORT_SYMBOL_GPL(__imx8m_clk_hw_composite);
286

source code of linux/drivers/clk/imx/clk-composite-8m.c