1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* |
3 | * Copyright 2018-2021 NXP |
4 | * Dong Aisheng <aisheng.dong@nxp.com> |
5 | */ |
6 | |
7 | #include <dt-bindings/firmware/imx/rsrc.h> |
8 | #include <linux/arm-smccc.h> |
9 | #include <linux/bsearch.h> |
10 | #include <linux/clk-provider.h> |
11 | #include <linux/err.h> |
12 | #include <linux/of.h> |
13 | #include <linux/firmware/imx/svc/rm.h> |
14 | #include <linux/platform_device.h> |
15 | #include <linux/pm_domain.h> |
16 | #include <linux/pm_runtime.h> |
17 | #include <linux/slab.h> |
18 | #include <xen/xen.h> |
19 | |
20 | #include "clk-scu.h" |
21 | |
22 | #define IMX_SIP_CPUFREQ 0xC2000001 |
23 | #define IMX_SIP_SET_CPUFREQ 0x00 |
24 | |
25 | static struct imx_sc_ipc *ccm_ipc_handle; |
26 | static struct device_node *pd_np; |
27 | static struct platform_driver imx_clk_scu_driver; |
28 | static const struct imx_clk_scu_rsrc_table *rsrc_table; |
29 | |
30 | struct imx_scu_clk_node { |
31 | const char *name; |
32 | u32 rsrc; |
33 | u8 clk_type; |
34 | const char * const *parents; |
35 | int num_parents; |
36 | |
37 | struct clk_hw *hw; |
38 | struct list_head node; |
39 | }; |
40 | |
41 | struct list_head imx_scu_clks[IMX_SC_R_LAST]; |
42 | |
43 | /* |
44 | * struct clk_scu - Description of one SCU clock |
45 | * @hw: the common clk_hw |
46 | * @rsrc_id: resource ID of this SCU clock |
47 | * @clk_type: type of this clock resource |
48 | */ |
49 | struct clk_scu { |
50 | struct clk_hw hw; |
51 | u16 rsrc_id; |
52 | u8 clk_type; |
53 | |
54 | /* for state save&restore */ |
55 | struct clk_hw *parent; |
56 | u8 parent_index; |
57 | bool is_enabled; |
58 | u32 rate; |
59 | }; |
60 | |
61 | /* |
62 | * struct clk_gpr_scu - Description of one SCU GPR clock |
63 | * @hw: the common clk_hw |
64 | * @rsrc_id: resource ID of this SCU clock |
65 | * @gpr_id: GPR ID index to control the divider |
66 | */ |
67 | struct clk_gpr_scu { |
68 | struct clk_hw hw; |
69 | u16 rsrc_id; |
70 | u8 gpr_id; |
71 | u8 flags; |
72 | bool gate_invert; |
73 | }; |
74 | |
75 | #define to_clk_gpr_scu(_hw) container_of(_hw, struct clk_gpr_scu, hw) |
76 | |
77 | /* |
78 | * struct imx_sc_msg_req_set_clock_rate - clock set rate protocol |
79 | * @hdr: SCU protocol header |
80 | * @rate: rate to set |
81 | * @resource: clock resource to set rate |
82 | * @clk: clk type of this resource |
83 | * |
84 | * This structure describes the SCU protocol of clock rate set |
85 | */ |
86 | struct imx_sc_msg_req_set_clock_rate { |
87 | struct imx_sc_rpc_msg hdr; |
88 | __le32 rate; |
89 | __le16 resource; |
90 | u8 clk; |
91 | } __packed __aligned(4); |
92 | |
93 | struct req_get_clock_rate { |
94 | __le16 resource; |
95 | u8 clk; |
96 | } __packed __aligned(4); |
97 | |
98 | struct resp_get_clock_rate { |
99 | __le32 rate; |
100 | }; |
101 | |
102 | /* |
103 | * struct imx_sc_msg_get_clock_rate - clock get rate protocol |
104 | * @hdr: SCU protocol header |
105 | * @req: get rate request protocol |
106 | * @resp: get rate response protocol |
107 | * |
108 | * This structure describes the SCU protocol of clock rate get |
109 | */ |
110 | struct imx_sc_msg_get_clock_rate { |
111 | struct imx_sc_rpc_msg hdr; |
112 | union { |
113 | struct req_get_clock_rate req; |
114 | struct resp_get_clock_rate resp; |
115 | } data; |
116 | }; |
117 | |
118 | /* |
119 | * struct imx_sc_msg_get_clock_parent - clock get parent protocol |
120 | * @hdr: SCU protocol header |
121 | * @req: get parent request protocol |
122 | * @resp: get parent response protocol |
123 | * |
124 | * This structure describes the SCU protocol of clock get parent |
125 | */ |
126 | struct imx_sc_msg_get_clock_parent { |
127 | struct imx_sc_rpc_msg hdr; |
128 | union { |
129 | struct req_get_clock_parent { |
130 | __le16 resource; |
131 | u8 clk; |
132 | } __packed __aligned(4) req; |
133 | struct resp_get_clock_parent { |
134 | u8 parent; |
135 | } resp; |
136 | } data; |
137 | }; |
138 | |
139 | /* |
140 | * struct imx_sc_msg_set_clock_parent - clock set parent protocol |
141 | * @hdr: SCU protocol header |
142 | * @req: set parent request protocol |
143 | * |
144 | * This structure describes the SCU protocol of clock set parent |
145 | */ |
146 | struct imx_sc_msg_set_clock_parent { |
147 | struct imx_sc_rpc_msg hdr; |
148 | __le16 resource; |
149 | u8 clk; |
150 | u8 parent; |
151 | } __packed; |
152 | |
153 | /* |
154 | * struct imx_sc_msg_req_clock_enable - clock gate protocol |
155 | * @hdr: SCU protocol header |
156 | * @resource: clock resource to gate |
157 | * @clk: clk type of this resource |
158 | * @enable: whether gate off the clock |
159 | * @autog: HW auto gate enable |
160 | * |
161 | * This structure describes the SCU protocol of clock gate |
162 | */ |
163 | struct imx_sc_msg_req_clock_enable { |
164 | struct imx_sc_rpc_msg hdr; |
165 | __le16 resource; |
166 | u8 clk; |
167 | u8 enable; |
168 | u8 autog; |
169 | } __packed __aligned(4); |
170 | |
171 | static inline struct clk_scu *to_clk_scu(struct clk_hw *hw) |
172 | { |
173 | return container_of(hw, struct clk_scu, hw); |
174 | } |
175 | |
176 | static inline int imx_scu_clk_search_cmp(const void *rsrc, const void *rsrc_p) |
177 | { |
178 | return *(u32 *)rsrc - *(u32 *)rsrc_p; |
179 | } |
180 | |
181 | static bool imx_scu_clk_is_valid(u32 rsrc_id) |
182 | { |
183 | void *p; |
184 | |
185 | if (!rsrc_table) |
186 | return true; |
187 | |
188 | p = bsearch(key: &rsrc_id, base: rsrc_table->rsrc, num: rsrc_table->num, |
189 | size: sizeof(rsrc_table->rsrc[0]), cmp: imx_scu_clk_search_cmp); |
190 | |
191 | return p != NULL; |
192 | } |
193 | |
194 | int imx_clk_scu_init(struct device_node *np, |
195 | const struct imx_clk_scu_rsrc_table *data) |
196 | { |
197 | u32 clk_cells; |
198 | int ret, i; |
199 | |
200 | ret = imx_scu_get_handle(ipc: &ccm_ipc_handle); |
201 | if (ret) |
202 | return ret; |
203 | |
204 | of_property_read_u32(np, propname: "#clock-cells" , out_value: &clk_cells); |
205 | |
206 | if (clk_cells == 2) { |
207 | for (i = 0; i < IMX_SC_R_LAST; i++) |
208 | INIT_LIST_HEAD(list: &imx_scu_clks[i]); |
209 | |
210 | /* pd_np will be used to attach power domains later */ |
211 | pd_np = of_find_compatible_node(NULL, NULL, compat: "fsl,scu-pd" ); |
212 | if (!pd_np) |
213 | return -EINVAL; |
214 | |
215 | rsrc_table = data; |
216 | } |
217 | |
218 | return platform_driver_register(&imx_clk_scu_driver); |
219 | } |
220 | |
221 | /* |
222 | * clk_scu_recalc_rate - Get clock rate for a SCU clock |
223 | * @hw: clock to get rate for |
224 | * @parent_rate: parent rate provided by common clock framework, not used |
225 | * |
226 | * Gets the current clock rate of a SCU clock. Returns the current |
227 | * clock rate, or zero in failure. |
228 | */ |
229 | static unsigned long clk_scu_recalc_rate(struct clk_hw *hw, |
230 | unsigned long parent_rate) |
231 | { |
232 | struct clk_scu *clk = to_clk_scu(hw); |
233 | struct imx_sc_msg_get_clock_rate msg; |
234 | struct imx_sc_rpc_msg *hdr = &msg.hdr; |
235 | int ret; |
236 | |
237 | hdr->ver = IMX_SC_RPC_VERSION; |
238 | hdr->svc = IMX_SC_RPC_SVC_PM; |
239 | hdr->func = IMX_SC_PM_FUNC_GET_CLOCK_RATE; |
240 | hdr->size = 2; |
241 | |
242 | msg.data.req.resource = cpu_to_le16(clk->rsrc_id); |
243 | msg.data.req.clk = clk->clk_type; |
244 | |
245 | ret = imx_scu_call_rpc(ipc: ccm_ipc_handle, msg: &msg, have_resp: true); |
246 | if (ret) { |
247 | pr_err("%s: failed to get clock rate %d\n" , |
248 | clk_hw_get_name(hw), ret); |
249 | return 0; |
250 | } |
251 | |
252 | return le32_to_cpu(msg.data.resp.rate); |
253 | } |
254 | |
255 | /* |
256 | * clk_scu_determine_rate - Returns the closest rate for a SCU clock |
257 | * @hw: clock to round rate for |
258 | * @req: clock rate request |
259 | * |
260 | * Returns 0 on success, a negative error on failure |
261 | */ |
262 | static int clk_scu_determine_rate(struct clk_hw *hw, |
263 | struct clk_rate_request *req) |
264 | { |
265 | /* |
266 | * Assume we support all the requested rate and let the SCU firmware |
267 | * to handle the left work |
268 | */ |
269 | return 0; |
270 | } |
271 | |
272 | /* |
273 | * clk_scu_round_rate - Round clock rate for a SCU clock |
274 | * @hw: clock to round rate for |
275 | * @rate: rate to round |
276 | * @parent_rate: parent rate provided by common clock framework, not used |
277 | * |
278 | * Returns the current clock rate, or zero in failure. |
279 | */ |
280 | static long clk_scu_round_rate(struct clk_hw *hw, unsigned long rate, |
281 | unsigned long *parent_rate) |
282 | { |
283 | /* |
284 | * Assume we support all the requested rate and let the SCU firmware |
285 | * to handle the left work |
286 | */ |
287 | return rate; |
288 | } |
289 | |
290 | static int clk_scu_atf_set_cpu_rate(struct clk_hw *hw, unsigned long rate, |
291 | unsigned long parent_rate) |
292 | { |
293 | struct clk_scu *clk = to_clk_scu(hw); |
294 | struct arm_smccc_res res; |
295 | unsigned long cluster_id; |
296 | |
297 | if (clk->rsrc_id == IMX_SC_R_A35 || clk->rsrc_id == IMX_SC_R_A53) |
298 | cluster_id = 0; |
299 | else if (clk->rsrc_id == IMX_SC_R_A72) |
300 | cluster_id = 1; |
301 | else |
302 | return -EINVAL; |
303 | |
304 | /* CPU frequency scaling can ONLY be done by ARM-Trusted-Firmware */ |
305 | arm_smccc_smc(IMX_SIP_CPUFREQ, IMX_SIP_SET_CPUFREQ, |
306 | cluster_id, rate, 0, 0, 0, 0, &res); |
307 | |
308 | return 0; |
309 | } |
310 | |
311 | /* |
312 | * clk_scu_set_rate - Set rate for a SCU clock |
313 | * @hw: clock to change rate for |
314 | * @rate: target rate for the clock |
315 | * @parent_rate: rate of the clock parent, not used for SCU clocks |
316 | * |
317 | * Sets a clock frequency for a SCU clock. Returns the SCU |
318 | * protocol status. |
319 | */ |
320 | static int clk_scu_set_rate(struct clk_hw *hw, unsigned long rate, |
321 | unsigned long parent_rate) |
322 | { |
323 | struct clk_scu *clk = to_clk_scu(hw); |
324 | struct imx_sc_msg_req_set_clock_rate msg; |
325 | struct imx_sc_rpc_msg *hdr = &msg.hdr; |
326 | |
327 | hdr->ver = IMX_SC_RPC_VERSION; |
328 | hdr->svc = IMX_SC_RPC_SVC_PM; |
329 | hdr->func = IMX_SC_PM_FUNC_SET_CLOCK_RATE; |
330 | hdr->size = 3; |
331 | |
332 | msg.rate = cpu_to_le32(rate); |
333 | msg.resource = cpu_to_le16(clk->rsrc_id); |
334 | msg.clk = clk->clk_type; |
335 | |
336 | return imx_scu_call_rpc(ipc: ccm_ipc_handle, msg: &msg, have_resp: true); |
337 | } |
338 | |
339 | static u8 clk_scu_get_parent(struct clk_hw *hw) |
340 | { |
341 | struct clk_scu *clk = to_clk_scu(hw); |
342 | struct imx_sc_msg_get_clock_parent msg; |
343 | struct imx_sc_rpc_msg *hdr = &msg.hdr; |
344 | int ret; |
345 | |
346 | hdr->ver = IMX_SC_RPC_VERSION; |
347 | hdr->svc = IMX_SC_RPC_SVC_PM; |
348 | hdr->func = IMX_SC_PM_FUNC_GET_CLOCK_PARENT; |
349 | hdr->size = 2; |
350 | |
351 | msg.data.req.resource = cpu_to_le16(clk->rsrc_id); |
352 | msg.data.req.clk = clk->clk_type; |
353 | |
354 | ret = imx_scu_call_rpc(ipc: ccm_ipc_handle, msg: &msg, have_resp: true); |
355 | if (ret) { |
356 | pr_err("%s: failed to get clock parent %d\n" , |
357 | clk_hw_get_name(hw), ret); |
358 | return 0; |
359 | } |
360 | |
361 | clk->parent_index = msg.data.resp.parent; |
362 | |
363 | return msg.data.resp.parent; |
364 | } |
365 | |
366 | static int clk_scu_set_parent(struct clk_hw *hw, u8 index) |
367 | { |
368 | struct clk_scu *clk = to_clk_scu(hw); |
369 | struct imx_sc_msg_set_clock_parent msg; |
370 | struct imx_sc_rpc_msg *hdr = &msg.hdr; |
371 | int ret; |
372 | |
373 | hdr->ver = IMX_SC_RPC_VERSION; |
374 | hdr->svc = IMX_SC_RPC_SVC_PM; |
375 | hdr->func = IMX_SC_PM_FUNC_SET_CLOCK_PARENT; |
376 | hdr->size = 2; |
377 | |
378 | msg.resource = cpu_to_le16(clk->rsrc_id); |
379 | msg.clk = clk->clk_type; |
380 | msg.parent = index; |
381 | |
382 | ret = imx_scu_call_rpc(ipc: ccm_ipc_handle, msg: &msg, have_resp: true); |
383 | if (ret) { |
384 | pr_err("%s: failed to set clock parent %d\n" , |
385 | clk_hw_get_name(hw), ret); |
386 | return ret; |
387 | } |
388 | |
389 | clk->parent_index = index; |
390 | |
391 | return 0; |
392 | } |
393 | |
394 | static int sc_pm_clock_enable(struct imx_sc_ipc *ipc, u16 resource, |
395 | u8 clk, bool enable, bool autog) |
396 | { |
397 | struct imx_sc_msg_req_clock_enable msg; |
398 | struct imx_sc_rpc_msg *hdr = &msg.hdr; |
399 | |
400 | hdr->ver = IMX_SC_RPC_VERSION; |
401 | hdr->svc = IMX_SC_RPC_SVC_PM; |
402 | hdr->func = IMX_SC_PM_FUNC_CLOCK_ENABLE; |
403 | hdr->size = 3; |
404 | |
405 | msg.resource = cpu_to_le16(resource); |
406 | msg.clk = clk; |
407 | msg.enable = enable; |
408 | msg.autog = autog; |
409 | |
410 | return imx_scu_call_rpc(ipc: ccm_ipc_handle, msg: &msg, have_resp: true); |
411 | } |
412 | |
413 | /* |
414 | * clk_scu_prepare - Enable a SCU clock |
415 | * @hw: clock to enable |
416 | * |
417 | * Enable the clock at the DSC slice level |
418 | */ |
419 | static int clk_scu_prepare(struct clk_hw *hw) |
420 | { |
421 | struct clk_scu *clk = to_clk_scu(hw); |
422 | |
423 | return sc_pm_clock_enable(ipc: ccm_ipc_handle, resource: clk->rsrc_id, |
424 | clk: clk->clk_type, enable: true, autog: false); |
425 | } |
426 | |
427 | /* |
428 | * clk_scu_unprepare - Disable a SCU clock |
429 | * @hw: clock to enable |
430 | * |
431 | * Disable the clock at the DSC slice level |
432 | */ |
433 | static void clk_scu_unprepare(struct clk_hw *hw) |
434 | { |
435 | struct clk_scu *clk = to_clk_scu(hw); |
436 | int ret; |
437 | |
438 | ret = sc_pm_clock_enable(ipc: ccm_ipc_handle, resource: clk->rsrc_id, |
439 | clk: clk->clk_type, enable: false, autog: false); |
440 | if (ret) |
441 | pr_warn("%s: clk unprepare failed %d\n" , clk_hw_get_name(hw), |
442 | ret); |
443 | } |
444 | |
445 | static const struct clk_ops clk_scu_ops = { |
446 | .recalc_rate = clk_scu_recalc_rate, |
447 | .determine_rate = clk_scu_determine_rate, |
448 | .set_rate = clk_scu_set_rate, |
449 | .get_parent = clk_scu_get_parent, |
450 | .set_parent = clk_scu_set_parent, |
451 | .prepare = clk_scu_prepare, |
452 | .unprepare = clk_scu_unprepare, |
453 | }; |
454 | |
455 | static const struct clk_ops clk_scu_cpu_ops = { |
456 | .recalc_rate = clk_scu_recalc_rate, |
457 | .round_rate = clk_scu_round_rate, |
458 | .set_rate = clk_scu_atf_set_cpu_rate, |
459 | .prepare = clk_scu_prepare, |
460 | .unprepare = clk_scu_unprepare, |
461 | }; |
462 | |
463 | static const struct clk_ops clk_scu_pi_ops = { |
464 | .recalc_rate = clk_scu_recalc_rate, |
465 | .round_rate = clk_scu_round_rate, |
466 | .set_rate = clk_scu_set_rate, |
467 | }; |
468 | |
469 | struct clk_hw *__imx_clk_scu(struct device *dev, const char *name, |
470 | const char * const *parents, int num_parents, |
471 | u32 rsrc_id, u8 clk_type) |
472 | { |
473 | struct clk_init_data init; |
474 | struct clk_scu *clk; |
475 | struct clk_hw *hw; |
476 | int ret; |
477 | |
478 | clk = kzalloc(size: sizeof(*clk), GFP_KERNEL); |
479 | if (!clk) |
480 | return ERR_PTR(error: -ENOMEM); |
481 | |
482 | clk->rsrc_id = rsrc_id; |
483 | clk->clk_type = clk_type; |
484 | |
485 | init.name = name; |
486 | init.ops = &clk_scu_ops; |
487 | if (rsrc_id == IMX_SC_R_A35 || rsrc_id == IMX_SC_R_A53 || rsrc_id == IMX_SC_R_A72) |
488 | init.ops = &clk_scu_cpu_ops; |
489 | else if (rsrc_id == IMX_SC_R_PI_0_PLL) |
490 | init.ops = &clk_scu_pi_ops; |
491 | else |
492 | init.ops = &clk_scu_ops; |
493 | init.parent_names = parents; |
494 | init.num_parents = num_parents; |
495 | |
496 | /* |
497 | * Note on MX8, the clocks are tightly coupled with power domain |
498 | * that once the power domain is off, the clock status may be |
499 | * lost. So we make it NOCACHE to let user to retrieve the real |
500 | * clock status from HW instead of using the possible invalid |
501 | * cached rate. |
502 | */ |
503 | init.flags = CLK_GET_RATE_NOCACHE; |
504 | clk->hw.init = &init; |
505 | |
506 | hw = &clk->hw; |
507 | ret = clk_hw_register(dev, hw); |
508 | if (ret) { |
509 | kfree(objp: clk); |
510 | hw = ERR_PTR(error: ret); |
511 | return hw; |
512 | } |
513 | |
514 | if (dev) |
515 | dev_set_drvdata(dev, data: clk); |
516 | |
517 | return hw; |
518 | } |
519 | |
520 | struct clk_hw *imx_scu_of_clk_src_get(struct of_phandle_args *clkspec, |
521 | void *data) |
522 | { |
523 | unsigned int rsrc = clkspec->args[0]; |
524 | unsigned int idx = clkspec->args[1]; |
525 | struct list_head *scu_clks = data; |
526 | struct imx_scu_clk_node *clk; |
527 | |
528 | list_for_each_entry(clk, &scu_clks[rsrc], node) { |
529 | if (clk->clk_type == idx) |
530 | return clk->hw; |
531 | } |
532 | |
533 | return ERR_PTR(error: -ENODEV); |
534 | } |
535 | |
536 | static int imx_clk_scu_probe(struct platform_device *pdev) |
537 | { |
538 | struct device *dev = &pdev->dev; |
539 | struct imx_scu_clk_node *clk = dev_get_platdata(dev); |
540 | struct clk_hw *hw; |
541 | int ret; |
542 | |
543 | if (!((clk->rsrc == IMX_SC_R_A35) || (clk->rsrc == IMX_SC_R_A53) || |
544 | (clk->rsrc == IMX_SC_R_A72))) { |
545 | pm_runtime_set_suspended(dev); |
546 | pm_runtime_set_autosuspend_delay(dev, delay: 50); |
547 | pm_runtime_use_autosuspend(dev: &pdev->dev); |
548 | pm_runtime_enable(dev); |
549 | |
550 | ret = pm_runtime_resume_and_get(dev); |
551 | if (ret) { |
552 | pm_genpd_remove_device(dev); |
553 | pm_runtime_disable(dev); |
554 | return ret; |
555 | } |
556 | } |
557 | |
558 | hw = __imx_clk_scu(dev, name: clk->name, parents: clk->parents, num_parents: clk->num_parents, |
559 | rsrc_id: clk->rsrc, clk_type: clk->clk_type); |
560 | if (IS_ERR(ptr: hw)) { |
561 | pm_runtime_disable(dev); |
562 | return PTR_ERR(ptr: hw); |
563 | } |
564 | |
565 | clk->hw = hw; |
566 | list_add_tail(new: &clk->node, head: &imx_scu_clks[clk->rsrc]); |
567 | |
568 | if (!((clk->rsrc == IMX_SC_R_A35) || (clk->rsrc == IMX_SC_R_A53) || |
569 | (clk->rsrc == IMX_SC_R_A72))) { |
570 | pm_runtime_mark_last_busy(dev: &pdev->dev); |
571 | pm_runtime_put_autosuspend(dev: &pdev->dev); |
572 | } |
573 | |
574 | dev_dbg(dev, "register SCU clock rsrc:%d type:%d\n" , clk->rsrc, |
575 | clk->clk_type); |
576 | |
577 | return 0; |
578 | } |
579 | |
580 | static int __maybe_unused imx_clk_scu_suspend(struct device *dev) |
581 | { |
582 | struct clk_scu *clk = dev_get_drvdata(dev); |
583 | u32 rsrc_id = clk->rsrc_id; |
584 | |
585 | if ((rsrc_id == IMX_SC_R_A35) || (rsrc_id == IMX_SC_R_A53) || |
586 | (rsrc_id == IMX_SC_R_A72)) |
587 | return 0; |
588 | |
589 | clk->parent = clk_hw_get_parent(hw: &clk->hw); |
590 | |
591 | /* DC SS needs to handle bypass clock using non-cached clock rate */ |
592 | if (clk->rsrc_id == IMX_SC_R_DC_0_VIDEO0 || |
593 | clk->rsrc_id == IMX_SC_R_DC_0_VIDEO1 || |
594 | clk->rsrc_id == IMX_SC_R_DC_1_VIDEO0 || |
595 | clk->rsrc_id == IMX_SC_R_DC_1_VIDEO1) |
596 | clk->rate = clk_scu_recalc_rate(hw: &clk->hw, parent_rate: 0); |
597 | else |
598 | clk->rate = clk_hw_get_rate(hw: &clk->hw); |
599 | clk->is_enabled = clk_hw_is_enabled(hw: &clk->hw); |
600 | |
601 | if (clk->parent) |
602 | dev_dbg(dev, "save parent %s idx %u\n" , clk_hw_get_name(clk->parent), |
603 | clk->parent_index); |
604 | |
605 | if (clk->rate) |
606 | dev_dbg(dev, "save rate %d\n" , clk->rate); |
607 | |
608 | if (clk->is_enabled) |
609 | dev_dbg(dev, "save enabled state\n" ); |
610 | |
611 | return 0; |
612 | } |
613 | |
614 | static int __maybe_unused imx_clk_scu_resume(struct device *dev) |
615 | { |
616 | struct clk_scu *clk = dev_get_drvdata(dev); |
617 | u32 rsrc_id = clk->rsrc_id; |
618 | int ret = 0; |
619 | |
620 | if ((rsrc_id == IMX_SC_R_A35) || (rsrc_id == IMX_SC_R_A53) || |
621 | (rsrc_id == IMX_SC_R_A72)) |
622 | return 0; |
623 | |
624 | if (clk->parent) { |
625 | ret = clk_scu_set_parent(hw: &clk->hw, index: clk->parent_index); |
626 | dev_dbg(dev, "restore parent %s idx %u %s\n" , |
627 | clk_hw_get_name(clk->parent), |
628 | clk->parent_index, !ret ? "success" : "failed" ); |
629 | } |
630 | |
631 | if (clk->rate) { |
632 | ret = clk_scu_set_rate(hw: &clk->hw, rate: clk->rate, parent_rate: 0); |
633 | dev_dbg(dev, "restore rate %d %s\n" , clk->rate, |
634 | !ret ? "success" : "failed" ); |
635 | } |
636 | |
637 | if (clk->is_enabled && rsrc_id != IMX_SC_R_PI_0_PLL) { |
638 | ret = clk_scu_prepare(hw: &clk->hw); |
639 | dev_dbg(dev, "restore enabled state %s\n" , |
640 | !ret ? "success" : "failed" ); |
641 | } |
642 | |
643 | return ret; |
644 | } |
645 | |
646 | static const struct dev_pm_ops imx_clk_scu_pm_ops = { |
647 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_clk_scu_suspend, |
648 | imx_clk_scu_resume) |
649 | }; |
650 | |
651 | static struct platform_driver imx_clk_scu_driver = { |
652 | .driver = { |
653 | .name = "imx-scu-clk" , |
654 | .suppress_bind_attrs = true, |
655 | .pm = &imx_clk_scu_pm_ops, |
656 | }, |
657 | .probe = imx_clk_scu_probe, |
658 | }; |
659 | |
660 | static int imx_clk_scu_attach_pd(struct device *dev, u32 rsrc_id) |
661 | { |
662 | struct of_phandle_args genpdspec = { |
663 | .np = pd_np, |
664 | .args_count = 1, |
665 | .args[0] = rsrc_id, |
666 | }; |
667 | |
668 | if (rsrc_id == IMX_SC_R_A35 || rsrc_id == IMX_SC_R_A53 || |
669 | rsrc_id == IMX_SC_R_A72) |
670 | return 0; |
671 | |
672 | return of_genpd_add_device(args: &genpdspec, dev); |
673 | } |
674 | |
675 | static bool imx_clk_is_resource_owned(u32 rsrc) |
676 | { |
677 | /* |
678 | * A-core resources are special. SCFW reports they are not "owned" by |
679 | * current partition but linux can still adjust them for cpufreq. |
680 | */ |
681 | if (rsrc == IMX_SC_R_A53 || rsrc == IMX_SC_R_A72 || rsrc == IMX_SC_R_A35) |
682 | return true; |
683 | |
684 | return imx_sc_rm_is_resource_owned(ipc: ccm_ipc_handle, resource: rsrc); |
685 | } |
686 | |
687 | struct clk_hw *imx_clk_scu_alloc_dev(const char *name, |
688 | const char * const *parents, |
689 | int num_parents, u32 rsrc_id, u8 clk_type) |
690 | { |
691 | struct imx_scu_clk_node clk = { |
692 | .name = name, |
693 | .rsrc = rsrc_id, |
694 | .clk_type = clk_type, |
695 | .parents = parents, |
696 | .num_parents = num_parents, |
697 | }; |
698 | struct platform_device *pdev; |
699 | int ret; |
700 | |
701 | if (!imx_scu_clk_is_valid(rsrc_id)) |
702 | return ERR_PTR(error: -EINVAL); |
703 | |
704 | if (!imx_clk_is_resource_owned(rsrc: rsrc_id)) |
705 | return NULL; |
706 | |
707 | pdev = platform_device_alloc(name, PLATFORM_DEVID_NONE); |
708 | if (!pdev) { |
709 | pr_err("%s: failed to allocate scu clk dev rsrc %d type %d\n" , |
710 | name, rsrc_id, clk_type); |
711 | return ERR_PTR(error: -ENOMEM); |
712 | } |
713 | |
714 | ret = platform_device_add_data(pdev, data: &clk, size: sizeof(clk)); |
715 | if (ret) { |
716 | platform_device_put(pdev); |
717 | return ERR_PTR(error: ret); |
718 | } |
719 | |
720 | ret = driver_set_override(dev: &pdev->dev, override: &pdev->driver_override, |
721 | s: "imx-scu-clk" , strlen("imx-scu-clk" )); |
722 | if (ret) { |
723 | platform_device_put(pdev); |
724 | return ERR_PTR(error: ret); |
725 | } |
726 | |
727 | ret = imx_clk_scu_attach_pd(dev: &pdev->dev, rsrc_id); |
728 | if (ret) |
729 | pr_warn("%s: failed to attached the power domain %d\n" , |
730 | name, ret); |
731 | |
732 | ret = platform_device_add(pdev); |
733 | if (ret) { |
734 | platform_device_put(pdev); |
735 | return ERR_PTR(error: ret); |
736 | } |
737 | |
738 | /* For API backwards compatiblilty, simply return NULL for success */ |
739 | return NULL; |
740 | } |
741 | |
742 | void imx_clk_scu_unregister(void) |
743 | { |
744 | struct imx_scu_clk_node *clk, *n; |
745 | int i; |
746 | |
747 | for (i = 0; i < IMX_SC_R_LAST; i++) { |
748 | list_for_each_entry_safe(clk, n, &imx_scu_clks[i], node) { |
749 | clk_hw_unregister(hw: clk->hw); |
750 | kfree(objp: clk); |
751 | } |
752 | } |
753 | } |
754 | |
755 | static unsigned long clk_gpr_div_scu_recalc_rate(struct clk_hw *hw, |
756 | unsigned long parent_rate) |
757 | { |
758 | struct clk_gpr_scu *clk = to_clk_gpr_scu(hw); |
759 | unsigned long rate = 0; |
760 | u32 val; |
761 | int err; |
762 | |
763 | err = imx_sc_misc_get_control(ipc: ccm_ipc_handle, resource: clk->rsrc_id, |
764 | ctrl: clk->gpr_id, val: &val); |
765 | |
766 | rate = val ? parent_rate / 2 : parent_rate; |
767 | |
768 | return err ? 0 : rate; |
769 | } |
770 | |
771 | static long clk_gpr_div_scu_round_rate(struct clk_hw *hw, unsigned long rate, |
772 | unsigned long *prate) |
773 | { |
774 | if (rate < *prate) |
775 | rate = *prate / 2; |
776 | else |
777 | rate = *prate; |
778 | |
779 | return rate; |
780 | } |
781 | |
782 | static int clk_gpr_div_scu_set_rate(struct clk_hw *hw, unsigned long rate, |
783 | unsigned long parent_rate) |
784 | { |
785 | struct clk_gpr_scu *clk = to_clk_gpr_scu(hw); |
786 | uint32_t val; |
787 | int err; |
788 | |
789 | val = (rate < parent_rate) ? 1 : 0; |
790 | err = imx_sc_misc_set_control(ipc: ccm_ipc_handle, resource: clk->rsrc_id, |
791 | ctrl: clk->gpr_id, val); |
792 | |
793 | return err ? -EINVAL : 0; |
794 | } |
795 | |
796 | static const struct clk_ops clk_gpr_div_scu_ops = { |
797 | .recalc_rate = clk_gpr_div_scu_recalc_rate, |
798 | .round_rate = clk_gpr_div_scu_round_rate, |
799 | .set_rate = clk_gpr_div_scu_set_rate, |
800 | }; |
801 | |
802 | static u8 clk_gpr_mux_scu_get_parent(struct clk_hw *hw) |
803 | { |
804 | struct clk_gpr_scu *clk = to_clk_gpr_scu(hw); |
805 | u32 val = 0; |
806 | |
807 | imx_sc_misc_get_control(ipc: ccm_ipc_handle, resource: clk->rsrc_id, |
808 | ctrl: clk->gpr_id, val: &val); |
809 | |
810 | return (u8)val; |
811 | } |
812 | |
813 | static int clk_gpr_mux_scu_set_parent(struct clk_hw *hw, u8 index) |
814 | { |
815 | struct clk_gpr_scu *clk = to_clk_gpr_scu(hw); |
816 | |
817 | return imx_sc_misc_set_control(ipc: ccm_ipc_handle, resource: clk->rsrc_id, |
818 | ctrl: clk->gpr_id, val: index); |
819 | } |
820 | |
821 | static const struct clk_ops clk_gpr_mux_scu_ops = { |
822 | .determine_rate = clk_hw_determine_rate_no_reparent, |
823 | .get_parent = clk_gpr_mux_scu_get_parent, |
824 | .set_parent = clk_gpr_mux_scu_set_parent, |
825 | }; |
826 | |
827 | static int clk_gpr_gate_scu_prepare(struct clk_hw *hw) |
828 | { |
829 | struct clk_gpr_scu *clk = to_clk_gpr_scu(hw); |
830 | |
831 | return imx_sc_misc_set_control(ipc: ccm_ipc_handle, resource: clk->rsrc_id, |
832 | ctrl: clk->gpr_id, val: !clk->gate_invert); |
833 | } |
834 | |
835 | static void clk_gpr_gate_scu_unprepare(struct clk_hw *hw) |
836 | { |
837 | struct clk_gpr_scu *clk = to_clk_gpr_scu(hw); |
838 | int ret; |
839 | |
840 | ret = imx_sc_misc_set_control(ipc: ccm_ipc_handle, resource: clk->rsrc_id, |
841 | ctrl: clk->gpr_id, val: clk->gate_invert); |
842 | if (ret) |
843 | pr_err("%s: clk unprepare failed %d\n" , clk_hw_get_name(hw), |
844 | ret); |
845 | } |
846 | |
847 | static int clk_gpr_gate_scu_is_prepared(struct clk_hw *hw) |
848 | { |
849 | struct clk_gpr_scu *clk = to_clk_gpr_scu(hw); |
850 | int ret; |
851 | u32 val; |
852 | |
853 | ret = imx_sc_misc_get_control(ipc: ccm_ipc_handle, resource: clk->rsrc_id, |
854 | ctrl: clk->gpr_id, val: &val); |
855 | if (ret) |
856 | return ret; |
857 | |
858 | return clk->gate_invert ? !val : val; |
859 | } |
860 | |
861 | static const struct clk_ops clk_gpr_gate_scu_ops = { |
862 | .prepare = clk_gpr_gate_scu_prepare, |
863 | .unprepare = clk_gpr_gate_scu_unprepare, |
864 | .is_prepared = clk_gpr_gate_scu_is_prepared, |
865 | }; |
866 | |
867 | struct clk_hw *__imx_clk_gpr_scu(const char *name, const char * const *parent_name, |
868 | int num_parents, u32 rsrc_id, u8 gpr_id, u8 flags, |
869 | bool invert) |
870 | { |
871 | struct imx_scu_clk_node *clk_node; |
872 | struct clk_gpr_scu *clk; |
873 | struct clk_hw *hw; |
874 | struct clk_init_data init; |
875 | int ret; |
876 | |
877 | if (rsrc_id >= IMX_SC_R_LAST || gpr_id >= IMX_SC_C_LAST) |
878 | return ERR_PTR(error: -EINVAL); |
879 | |
880 | clk_node = kzalloc(size: sizeof(*clk_node), GFP_KERNEL); |
881 | if (!clk_node) |
882 | return ERR_PTR(error: -ENOMEM); |
883 | |
884 | if (!imx_scu_clk_is_valid(rsrc_id)) { |
885 | kfree(objp: clk_node); |
886 | return ERR_PTR(error: -EINVAL); |
887 | } |
888 | |
889 | if (!imx_clk_is_resource_owned(rsrc: rsrc_id)) |
890 | return NULL; |
891 | |
892 | clk = kzalloc(size: sizeof(*clk), GFP_KERNEL); |
893 | if (!clk) { |
894 | kfree(objp: clk_node); |
895 | return ERR_PTR(error: -ENOMEM); |
896 | } |
897 | |
898 | clk->rsrc_id = rsrc_id; |
899 | clk->gpr_id = gpr_id; |
900 | clk->flags = flags; |
901 | clk->gate_invert = invert; |
902 | |
903 | if (flags & IMX_SCU_GPR_CLK_GATE) |
904 | init.ops = &clk_gpr_gate_scu_ops; |
905 | |
906 | if (flags & IMX_SCU_GPR_CLK_DIV) |
907 | init.ops = &clk_gpr_div_scu_ops; |
908 | |
909 | if (flags & IMX_SCU_GPR_CLK_MUX) |
910 | init.ops = &clk_gpr_mux_scu_ops; |
911 | |
912 | init.flags = 0; |
913 | init.name = name; |
914 | init.parent_names = parent_name; |
915 | init.num_parents = num_parents; |
916 | |
917 | clk->hw.init = &init; |
918 | |
919 | hw = &clk->hw; |
920 | ret = clk_hw_register(NULL, hw); |
921 | if (ret) { |
922 | kfree(objp: clk); |
923 | kfree(objp: clk_node); |
924 | hw = ERR_PTR(error: ret); |
925 | } else { |
926 | clk_node->hw = hw; |
927 | clk_node->clk_type = gpr_id; |
928 | list_add_tail(new: &clk_node->node, head: &imx_scu_clks[rsrc_id]); |
929 | } |
930 | |
931 | return hw; |
932 | } |
933 | |