1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com> |
4 | * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> |
5 | * |
6 | * Standard functionality for the common clock API. See Documentation/driver-api/clk.rst |
7 | */ |
8 | |
9 | #include <linux/clk.h> |
10 | #include <linux/clk-provider.h> |
11 | #include <linux/clk/clk-conf.h> |
12 | #include <linux/module.h> |
13 | #include <linux/mutex.h> |
14 | #include <linux/spinlock.h> |
15 | #include <linux/err.h> |
16 | #include <linux/list.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/of.h> |
19 | #include <linux/device.h> |
20 | #include <linux/init.h> |
21 | #include <linux/pm_runtime.h> |
22 | #include <linux/sched.h> |
23 | #include <linux/clkdev.h> |
24 | |
25 | #include "clk.h" |
26 | |
27 | static DEFINE_SPINLOCK(enable_lock); |
28 | static DEFINE_MUTEX(prepare_lock); |
29 | |
30 | static struct task_struct *prepare_owner; |
31 | static struct task_struct *enable_owner; |
32 | |
33 | static int prepare_refcnt; |
34 | static int enable_refcnt; |
35 | |
36 | static HLIST_HEAD(clk_root_list); |
37 | static HLIST_HEAD(clk_orphan_list); |
38 | static LIST_HEAD(clk_notifier_list); |
39 | |
40 | /*** private data structures ***/ |
41 | |
42 | struct clk_core { |
43 | const char *name; |
44 | const struct clk_ops *ops; |
45 | struct clk_hw *hw; |
46 | struct module *owner; |
47 | struct device *dev; |
48 | struct clk_core *parent; |
49 | const char **parent_names; |
50 | struct clk_core **parents; |
51 | u8 num_parents; |
52 | u8 new_parent_index; |
53 | unsigned long rate; |
54 | unsigned long req_rate; |
55 | unsigned long new_rate; |
56 | struct clk_core *new_parent; |
57 | struct clk_core *new_child; |
58 | unsigned long flags; |
59 | bool orphan; |
60 | bool rpm_enabled; |
61 | unsigned int enable_count; |
62 | unsigned int prepare_count; |
63 | unsigned int protect_count; |
64 | unsigned long min_rate; |
65 | unsigned long max_rate; |
66 | unsigned long accuracy; |
67 | int phase; |
68 | struct clk_duty duty; |
69 | struct hlist_head children; |
70 | struct hlist_node child_node; |
71 | struct hlist_head clks; |
72 | unsigned int notifier_count; |
73 | #ifdef CONFIG_DEBUG_FS |
74 | struct dentry *dentry; |
75 | struct hlist_node debug_node; |
76 | #endif |
77 | struct kref ref; |
78 | }; |
79 | |
80 | #define CREATE_TRACE_POINTS |
81 | #include <trace/events/clk.h> |
82 | |
83 | struct clk { |
84 | struct clk_core *core; |
85 | struct device *dev; |
86 | const char *dev_id; |
87 | const char *con_id; |
88 | unsigned long min_rate; |
89 | unsigned long max_rate; |
90 | unsigned int exclusive_count; |
91 | struct hlist_node clks_node; |
92 | }; |
93 | |
94 | /*** runtime pm ***/ |
95 | static int clk_pm_runtime_get(struct clk_core *core) |
96 | { |
97 | int ret; |
98 | |
99 | if (!core->rpm_enabled) |
100 | return 0; |
101 | |
102 | ret = pm_runtime_get_sync(core->dev); |
103 | return ret < 0 ? ret : 0; |
104 | } |
105 | |
106 | static void clk_pm_runtime_put(struct clk_core *core) |
107 | { |
108 | if (!core->rpm_enabled) |
109 | return; |
110 | |
111 | pm_runtime_put_sync(core->dev); |
112 | } |
113 | |
114 | /*** locking ***/ |
115 | static void clk_prepare_lock(void) |
116 | { |
117 | if (!mutex_trylock(&prepare_lock)) { |
118 | if (prepare_owner == current) { |
119 | prepare_refcnt++; |
120 | return; |
121 | } |
122 | mutex_lock(&prepare_lock); |
123 | } |
124 | WARN_ON_ONCE(prepare_owner != NULL); |
125 | WARN_ON_ONCE(prepare_refcnt != 0); |
126 | prepare_owner = current; |
127 | prepare_refcnt = 1; |
128 | } |
129 | |
130 | static void clk_prepare_unlock(void) |
131 | { |
132 | WARN_ON_ONCE(prepare_owner != current); |
133 | WARN_ON_ONCE(prepare_refcnt == 0); |
134 | |
135 | if (--prepare_refcnt) |
136 | return; |
137 | prepare_owner = NULL; |
138 | mutex_unlock(&prepare_lock); |
139 | } |
140 | |
141 | static unsigned long clk_enable_lock(void) |
142 | __acquires(enable_lock) |
143 | { |
144 | unsigned long flags; |
145 | |
146 | /* |
147 | * On UP systems, spin_trylock_irqsave() always returns true, even if |
148 | * we already hold the lock. So, in that case, we rely only on |
149 | * reference counting. |
150 | */ |
151 | if (!IS_ENABLED(CONFIG_SMP) || |
152 | !spin_trylock_irqsave(&enable_lock, flags)) { |
153 | if (enable_owner == current) { |
154 | enable_refcnt++; |
155 | __acquire(enable_lock); |
156 | if (!IS_ENABLED(CONFIG_SMP)) |
157 | local_save_flags(flags); |
158 | return flags; |
159 | } |
160 | spin_lock_irqsave(&enable_lock, flags); |
161 | } |
162 | WARN_ON_ONCE(enable_owner != NULL); |
163 | WARN_ON_ONCE(enable_refcnt != 0); |
164 | enable_owner = current; |
165 | enable_refcnt = 1; |
166 | return flags; |
167 | } |
168 | |
169 | static void clk_enable_unlock(unsigned long flags) |
170 | __releases(enable_lock) |
171 | { |
172 | WARN_ON_ONCE(enable_owner != current); |
173 | WARN_ON_ONCE(enable_refcnt == 0); |
174 | |
175 | if (--enable_refcnt) { |
176 | __release(enable_lock); |
177 | return; |
178 | } |
179 | enable_owner = NULL; |
180 | spin_unlock_irqrestore(&enable_lock, flags); |
181 | } |
182 | |
183 | static bool clk_core_rate_is_protected(struct clk_core *core) |
184 | { |
185 | return core->protect_count; |
186 | } |
187 | |
188 | static bool clk_core_is_prepared(struct clk_core *core) |
189 | { |
190 | bool ret = false; |
191 | |
192 | /* |
193 | * .is_prepared is optional for clocks that can prepare |
194 | * fall back to software usage counter if it is missing |
195 | */ |
196 | if (!core->ops->is_prepared) |
197 | return core->prepare_count; |
198 | |
199 | if (!clk_pm_runtime_get(core)) { |
200 | ret = core->ops->is_prepared(core->hw); |
201 | clk_pm_runtime_put(core); |
202 | } |
203 | |
204 | return ret; |
205 | } |
206 | |
207 | static bool clk_core_is_enabled(struct clk_core *core) |
208 | { |
209 | bool ret = false; |
210 | |
211 | /* |
212 | * .is_enabled is only mandatory for clocks that gate |
213 | * fall back to software usage counter if .is_enabled is missing |
214 | */ |
215 | if (!core->ops->is_enabled) |
216 | return core->enable_count; |
217 | |
218 | /* |
219 | * Check if clock controller's device is runtime active before |
220 | * calling .is_enabled callback. If not, assume that clock is |
221 | * disabled, because we might be called from atomic context, from |
222 | * which pm_runtime_get() is not allowed. |
223 | * This function is called mainly from clk_disable_unused_subtree, |
224 | * which ensures proper runtime pm activation of controller before |
225 | * taking enable spinlock, but the below check is needed if one tries |
226 | * to call it from other places. |
227 | */ |
228 | if (core->rpm_enabled) { |
229 | pm_runtime_get_noresume(core->dev); |
230 | if (!pm_runtime_active(core->dev)) { |
231 | ret = false; |
232 | goto done; |
233 | } |
234 | } |
235 | |
236 | ret = core->ops->is_enabled(core->hw); |
237 | done: |
238 | if (core->rpm_enabled) |
239 | pm_runtime_put(core->dev); |
240 | |
241 | return ret; |
242 | } |
243 | |
244 | /*** helper functions ***/ |
245 | |
246 | const char *__clk_get_name(const struct clk *clk) |
247 | { |
248 | return !clk ? NULL : clk->core->name; |
249 | } |
250 | EXPORT_SYMBOL_GPL(__clk_get_name); |
251 | |
252 | const char *clk_hw_get_name(const struct clk_hw *hw) |
253 | { |
254 | return hw->core->name; |
255 | } |
256 | EXPORT_SYMBOL_GPL(clk_hw_get_name); |
257 | |
258 | struct clk_hw *__clk_get_hw(struct clk *clk) |
259 | { |
260 | return !clk ? NULL : clk->core->hw; |
261 | } |
262 | EXPORT_SYMBOL_GPL(__clk_get_hw); |
263 | |
264 | unsigned int clk_hw_get_num_parents(const struct clk_hw *hw) |
265 | { |
266 | return hw->core->num_parents; |
267 | } |
268 | EXPORT_SYMBOL_GPL(clk_hw_get_num_parents); |
269 | |
270 | struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw) |
271 | { |
272 | return hw->core->parent ? hw->core->parent->hw : NULL; |
273 | } |
274 | EXPORT_SYMBOL_GPL(clk_hw_get_parent); |
275 | |
276 | static struct clk_core *__clk_lookup_subtree(const char *name, |
277 | struct clk_core *core) |
278 | { |
279 | struct clk_core *child; |
280 | struct clk_core *ret; |
281 | |
282 | if (!strcmp(core->name, name)) |
283 | return core; |
284 | |
285 | hlist_for_each_entry(child, &core->children, child_node) { |
286 | ret = __clk_lookup_subtree(name, child); |
287 | if (ret) |
288 | return ret; |
289 | } |
290 | |
291 | return NULL; |
292 | } |
293 | |
294 | static struct clk_core *clk_core_lookup(const char *name) |
295 | { |
296 | struct clk_core *root_clk; |
297 | struct clk_core *ret; |
298 | |
299 | if (!name) |
300 | return NULL; |
301 | |
302 | /* search the 'proper' clk tree first */ |
303 | hlist_for_each_entry(root_clk, &clk_root_list, child_node) { |
304 | ret = __clk_lookup_subtree(name, root_clk); |
305 | if (ret) |
306 | return ret; |
307 | } |
308 | |
309 | /* if not found, then search the orphan tree */ |
310 | hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) { |
311 | ret = __clk_lookup_subtree(name, root_clk); |
312 | if (ret) |
313 | return ret; |
314 | } |
315 | |
316 | return NULL; |
317 | } |
318 | |
319 | static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core, |
320 | u8 index) |
321 | { |
322 | if (!core || index >= core->num_parents) |
323 | return NULL; |
324 | |
325 | if (!core->parents[index]) |
326 | core->parents[index] = |
327 | clk_core_lookup(core->parent_names[index]); |
328 | |
329 | return core->parents[index]; |
330 | } |
331 | |
332 | struct clk_hw * |
333 | clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index) |
334 | { |
335 | struct clk_core *parent; |
336 | |
337 | parent = clk_core_get_parent_by_index(hw->core, index); |
338 | |
339 | return !parent ? NULL : parent->hw; |
340 | } |
341 | EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index); |
342 | |
343 | unsigned int __clk_get_enable_count(struct clk *clk) |
344 | { |
345 | return !clk ? 0 : clk->core->enable_count; |
346 | } |
347 | |
348 | static unsigned long clk_core_get_rate_nolock(struct clk_core *core) |
349 | { |
350 | unsigned long ret; |
351 | |
352 | if (!core) { |
353 | ret = 0; |
354 | goto out; |
355 | } |
356 | |
357 | ret = core->rate; |
358 | |
359 | if (!core->num_parents) |
360 | goto out; |
361 | |
362 | if (!core->parent) |
363 | ret = 0; |
364 | |
365 | out: |
366 | return ret; |
367 | } |
368 | |
369 | unsigned long clk_hw_get_rate(const struct clk_hw *hw) |
370 | { |
371 | return clk_core_get_rate_nolock(hw->core); |
372 | } |
373 | EXPORT_SYMBOL_GPL(clk_hw_get_rate); |
374 | |
375 | static unsigned long __clk_get_accuracy(struct clk_core *core) |
376 | { |
377 | if (!core) |
378 | return 0; |
379 | |
380 | return core->accuracy; |
381 | } |
382 | |
383 | unsigned long __clk_get_flags(struct clk *clk) |
384 | { |
385 | return !clk ? 0 : clk->core->flags; |
386 | } |
387 | EXPORT_SYMBOL_GPL(__clk_get_flags); |
388 | |
389 | unsigned long clk_hw_get_flags(const struct clk_hw *hw) |
390 | { |
391 | return hw->core->flags; |
392 | } |
393 | EXPORT_SYMBOL_GPL(clk_hw_get_flags); |
394 | |
395 | bool clk_hw_is_prepared(const struct clk_hw *hw) |
396 | { |
397 | return clk_core_is_prepared(hw->core); |
398 | } |
399 | EXPORT_SYMBOL_GPL(clk_hw_is_prepared); |
400 | |
401 | bool clk_hw_rate_is_protected(const struct clk_hw *hw) |
402 | { |
403 | return clk_core_rate_is_protected(hw->core); |
404 | } |
405 | EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected); |
406 | |
407 | bool clk_hw_is_enabled(const struct clk_hw *hw) |
408 | { |
409 | return clk_core_is_enabled(hw->core); |
410 | } |
411 | EXPORT_SYMBOL_GPL(clk_hw_is_enabled); |
412 | |
413 | bool __clk_is_enabled(struct clk *clk) |
414 | { |
415 | if (!clk) |
416 | return false; |
417 | |
418 | return clk_core_is_enabled(clk->core); |
419 | } |
420 | EXPORT_SYMBOL_GPL(__clk_is_enabled); |
421 | |
422 | static bool mux_is_better_rate(unsigned long rate, unsigned long now, |
423 | unsigned long best, unsigned long flags) |
424 | { |
425 | if (flags & CLK_MUX_ROUND_CLOSEST) |
426 | return abs(now - rate) < abs(best - rate); |
427 | |
428 | return now <= rate && now > best; |
429 | } |
430 | |
431 | int clk_mux_determine_rate_flags(struct clk_hw *hw, |
432 | struct clk_rate_request *req, |
433 | unsigned long flags) |
434 | { |
435 | struct clk_core *core = hw->core, *parent, *best_parent = NULL; |
436 | int i, num_parents, ret; |
437 | unsigned long best = 0; |
438 | struct clk_rate_request parent_req = *req; |
439 | |
440 | /* if NO_REPARENT flag set, pass through to current parent */ |
441 | if (core->flags & CLK_SET_RATE_NO_REPARENT) { |
442 | parent = core->parent; |
443 | if (core->flags & CLK_SET_RATE_PARENT) { |
444 | ret = __clk_determine_rate(parent ? parent->hw : NULL, |
445 | &parent_req); |
446 | if (ret) |
447 | return ret; |
448 | |
449 | best = parent_req.rate; |
450 | } else if (parent) { |
451 | best = clk_core_get_rate_nolock(parent); |
452 | } else { |
453 | best = clk_core_get_rate_nolock(core); |
454 | } |
455 | |
456 | goto out; |
457 | } |
458 | |
459 | /* find the parent that can provide the fastest rate <= rate */ |
460 | num_parents = core->num_parents; |
461 | for (i = 0; i < num_parents; i++) { |
462 | parent = clk_core_get_parent_by_index(core, i); |
463 | if (!parent) |
464 | continue; |
465 | |
466 | if (core->flags & CLK_SET_RATE_PARENT) { |
467 | parent_req = *req; |
468 | ret = __clk_determine_rate(parent->hw, &parent_req); |
469 | if (ret) |
470 | continue; |
471 | } else { |
472 | parent_req.rate = clk_core_get_rate_nolock(parent); |
473 | } |
474 | |
475 | if (mux_is_better_rate(req->rate, parent_req.rate, |
476 | best, flags)) { |
477 | best_parent = parent; |
478 | best = parent_req.rate; |
479 | } |
480 | } |
481 | |
482 | if (!best_parent) |
483 | return -EINVAL; |
484 | |
485 | out: |
486 | if (best_parent) |
487 | req->best_parent_hw = best_parent->hw; |
488 | req->best_parent_rate = best; |
489 | req->rate = best; |
490 | |
491 | return 0; |
492 | } |
493 | EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags); |
494 | |
495 | struct clk *__clk_lookup(const char *name) |
496 | { |
497 | struct clk_core *core = clk_core_lookup(name); |
498 | |
499 | return !core ? NULL : core->hw->clk; |
500 | } |
501 | |
502 | static void clk_core_get_boundaries(struct clk_core *core, |
503 | unsigned long *min_rate, |
504 | unsigned long *max_rate) |
505 | { |
506 | struct clk *clk_user; |
507 | |
508 | *min_rate = core->min_rate; |
509 | *max_rate = core->max_rate; |
510 | |
511 | hlist_for_each_entry(clk_user, &core->clks, clks_node) |
512 | *min_rate = max(*min_rate, clk_user->min_rate); |
513 | |
514 | hlist_for_each_entry(clk_user, &core->clks, clks_node) |
515 | *max_rate = min(*max_rate, clk_user->max_rate); |
516 | } |
517 | |
518 | void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate, |
519 | unsigned long max_rate) |
520 | { |
521 | hw->core->min_rate = min_rate; |
522 | hw->core->max_rate = max_rate; |
523 | } |
524 | EXPORT_SYMBOL_GPL(clk_hw_set_rate_range); |
525 | |
526 | /* |
527 | * Helper for finding best parent to provide a given frequency. This can be used |
528 | * directly as a determine_rate callback (e.g. for a mux), or from a more |
529 | * complex clock that may combine a mux with other operations. |
530 | */ |
531 | int __clk_mux_determine_rate(struct clk_hw *hw, |
532 | struct clk_rate_request *req) |
533 | { |
534 | return clk_mux_determine_rate_flags(hw, req, 0); |
535 | } |
536 | EXPORT_SYMBOL_GPL(__clk_mux_determine_rate); |
537 | |
538 | int __clk_mux_determine_rate_closest(struct clk_hw *hw, |
539 | struct clk_rate_request *req) |
540 | { |
541 | return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST); |
542 | } |
543 | EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest); |
544 | |
545 | /*** clk api ***/ |
546 | |
547 | static void clk_core_rate_unprotect(struct clk_core *core) |
548 | { |
549 | lockdep_assert_held(&prepare_lock); |
550 | |
551 | if (!core) |
552 | return; |
553 | |
554 | if (WARN(core->protect_count == 0, |
555 | "%s already unprotected\n" , core->name)) |
556 | return; |
557 | |
558 | if (--core->protect_count > 0) |
559 | return; |
560 | |
561 | clk_core_rate_unprotect(core->parent); |
562 | } |
563 | |
564 | static int clk_core_rate_nuke_protect(struct clk_core *core) |
565 | { |
566 | int ret; |
567 | |
568 | lockdep_assert_held(&prepare_lock); |
569 | |
570 | if (!core) |
571 | return -EINVAL; |
572 | |
573 | if (core->protect_count == 0) |
574 | return 0; |
575 | |
576 | ret = core->protect_count; |
577 | core->protect_count = 1; |
578 | clk_core_rate_unprotect(core); |
579 | |
580 | return ret; |
581 | } |
582 | |
583 | /** |
584 | * clk_rate_exclusive_put - release exclusivity over clock rate control |
585 | * @clk: the clk over which the exclusivity is released |
586 | * |
587 | * clk_rate_exclusive_put() completes a critical section during which a clock |
588 | * consumer cannot tolerate any other consumer making any operation on the |
589 | * clock which could result in a rate change or rate glitch. Exclusive clocks |
590 | * cannot have their rate changed, either directly or indirectly due to changes |
591 | * further up the parent chain of clocks. As a result, clocks up parent chain |
592 | * also get under exclusive control of the calling consumer. |
593 | * |
594 | * If exlusivity is claimed more than once on clock, even by the same consumer, |
595 | * the rate effectively gets locked as exclusivity can't be preempted. |
596 | * |
597 | * Calls to clk_rate_exclusive_put() must be balanced with calls to |
598 | * clk_rate_exclusive_get(). Calls to this function may sleep, and do not return |
599 | * error status. |
600 | */ |
601 | void clk_rate_exclusive_put(struct clk *clk) |
602 | { |
603 | if (!clk) |
604 | return; |
605 | |
606 | clk_prepare_lock(); |
607 | |
608 | /* |
609 | * if there is something wrong with this consumer protect count, stop |
610 | * here before messing with the provider |
611 | */ |
612 | if (WARN_ON(clk->exclusive_count <= 0)) |
613 | goto out; |
614 | |
615 | clk_core_rate_unprotect(clk->core); |
616 | clk->exclusive_count--; |
617 | out: |
618 | clk_prepare_unlock(); |
619 | } |
620 | EXPORT_SYMBOL_GPL(clk_rate_exclusive_put); |
621 | |
622 | static void clk_core_rate_protect(struct clk_core *core) |
623 | { |
624 | lockdep_assert_held(&prepare_lock); |
625 | |
626 | if (!core) |
627 | return; |
628 | |
629 | if (core->protect_count == 0) |
630 | clk_core_rate_protect(core->parent); |
631 | |
632 | core->protect_count++; |
633 | } |
634 | |
635 | static void clk_core_rate_restore_protect(struct clk_core *core, int count) |
636 | { |
637 | lockdep_assert_held(&prepare_lock); |
638 | |
639 | if (!core) |
640 | return; |
641 | |
642 | if (count == 0) |
643 | return; |
644 | |
645 | clk_core_rate_protect(core); |
646 | core->protect_count = count; |
647 | } |
648 | |
649 | /** |
650 | * clk_rate_exclusive_get - get exclusivity over the clk rate control |
651 | * @clk: the clk over which the exclusity of rate control is requested |
652 | * |
653 | * clk_rate_exlusive_get() begins a critical section during which a clock |
654 | * consumer cannot tolerate any other consumer making any operation on the |
655 | * clock which could result in a rate change or rate glitch. Exclusive clocks |
656 | * cannot have their rate changed, either directly or indirectly due to changes |
657 | * further up the parent chain of clocks. As a result, clocks up parent chain |
658 | * also get under exclusive control of the calling consumer. |
659 | * |
660 | * If exlusivity is claimed more than once on clock, even by the same consumer, |
661 | * the rate effectively gets locked as exclusivity can't be preempted. |
662 | * |
663 | * Calls to clk_rate_exclusive_get() should be balanced with calls to |
664 | * clk_rate_exclusive_put(). Calls to this function may sleep. |
665 | * Returns 0 on success, -EERROR otherwise |
666 | */ |
667 | int clk_rate_exclusive_get(struct clk *clk) |
668 | { |
669 | if (!clk) |
670 | return 0; |
671 | |
672 | clk_prepare_lock(); |
673 | clk_core_rate_protect(clk->core); |
674 | clk->exclusive_count++; |
675 | clk_prepare_unlock(); |
676 | |
677 | return 0; |
678 | } |
679 | EXPORT_SYMBOL_GPL(clk_rate_exclusive_get); |
680 | |
681 | static void clk_core_unprepare(struct clk_core *core) |
682 | { |
683 | lockdep_assert_held(&prepare_lock); |
684 | |
685 | if (!core) |
686 | return; |
687 | |
688 | if (WARN(core->prepare_count == 0, |
689 | "%s already unprepared\n" , core->name)) |
690 | return; |
691 | |
692 | if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL, |
693 | "Unpreparing critical %s\n" , core->name)) |
694 | return; |
695 | |
696 | if (core->flags & CLK_SET_RATE_GATE) |
697 | clk_core_rate_unprotect(core); |
698 | |
699 | if (--core->prepare_count > 0) |
700 | return; |
701 | |
702 | WARN(core->enable_count > 0, "Unpreparing enabled %s\n" , core->name); |
703 | |
704 | trace_clk_unprepare(core); |
705 | |
706 | if (core->ops->unprepare) |
707 | core->ops->unprepare(core->hw); |
708 | |
709 | clk_pm_runtime_put(core); |
710 | |
711 | trace_clk_unprepare_complete(core); |
712 | clk_core_unprepare(core->parent); |
713 | } |
714 | |
715 | static void clk_core_unprepare_lock(struct clk_core *core) |
716 | { |
717 | clk_prepare_lock(); |
718 | clk_core_unprepare(core); |
719 | clk_prepare_unlock(); |
720 | } |
721 | |
722 | /** |
723 | * clk_unprepare - undo preparation of a clock source |
724 | * @clk: the clk being unprepared |
725 | * |
726 | * clk_unprepare may sleep, which differentiates it from clk_disable. In a |
727 | * simple case, clk_unprepare can be used instead of clk_disable to gate a clk |
728 | * if the operation may sleep. One example is a clk which is accessed over |
729 | * I2c. In the complex case a clk gate operation may require a fast and a slow |
730 | * part. It is this reason that clk_unprepare and clk_disable are not mutually |
731 | * exclusive. In fact clk_disable must be called before clk_unprepare. |
732 | */ |
733 | void clk_unprepare(struct clk *clk) |
734 | { |
735 | if (IS_ERR_OR_NULL(clk)) |
736 | return; |
737 | |
738 | clk_core_unprepare_lock(clk->core); |
739 | } |
740 | EXPORT_SYMBOL_GPL(clk_unprepare); |
741 | |
742 | static int clk_core_prepare(struct clk_core *core) |
743 | { |
744 | int ret = 0; |
745 | |
746 | lockdep_assert_held(&prepare_lock); |
747 | |
748 | if (!core) |
749 | return 0; |
750 | |
751 | if (core->prepare_count == 0) { |
752 | ret = clk_pm_runtime_get(core); |
753 | if (ret) |
754 | return ret; |
755 | |
756 | ret = clk_core_prepare(core->parent); |
757 | if (ret) |
758 | goto runtime_put; |
759 | |
760 | trace_clk_prepare(core); |
761 | |
762 | if (core->ops->prepare) |
763 | ret = core->ops->prepare(core->hw); |
764 | |
765 | trace_clk_prepare_complete(core); |
766 | |
767 | if (ret) |
768 | goto unprepare; |
769 | } |
770 | |
771 | core->prepare_count++; |
772 | |
773 | /* |
774 | * CLK_SET_RATE_GATE is a special case of clock protection |
775 | * Instead of a consumer claiming exclusive rate control, it is |
776 | * actually the provider which prevents any consumer from making any |
777 | * operation which could result in a rate change or rate glitch while |
778 | * the clock is prepared. |
779 | */ |
780 | if (core->flags & CLK_SET_RATE_GATE) |
781 | clk_core_rate_protect(core); |
782 | |
783 | return 0; |
784 | unprepare: |
785 | clk_core_unprepare(core->parent); |
786 | runtime_put: |
787 | clk_pm_runtime_put(core); |
788 | return ret; |
789 | } |
790 | |
791 | static int clk_core_prepare_lock(struct clk_core *core) |
792 | { |
793 | int ret; |
794 | |
795 | clk_prepare_lock(); |
796 | ret = clk_core_prepare(core); |
797 | clk_prepare_unlock(); |
798 | |
799 | return ret; |
800 | } |
801 | |
802 | /** |
803 | * clk_prepare - prepare a clock source |
804 | * @clk: the clk being prepared |
805 | * |
806 | * clk_prepare may sleep, which differentiates it from clk_enable. In a simple |
807 | * case, clk_prepare can be used instead of clk_enable to ungate a clk if the |
808 | * operation may sleep. One example is a clk which is accessed over I2c. In |
809 | * the complex case a clk ungate operation may require a fast and a slow part. |
810 | * It is this reason that clk_prepare and clk_enable are not mutually |
811 | * exclusive. In fact clk_prepare must be called before clk_enable. |
812 | * Returns 0 on success, -EERROR otherwise. |
813 | */ |
814 | int clk_prepare(struct clk *clk) |
815 | { |
816 | if (!clk) |
817 | return 0; |
818 | |
819 | return clk_core_prepare_lock(clk->core); |
820 | } |
821 | EXPORT_SYMBOL_GPL(clk_prepare); |
822 | |
823 | static void clk_core_disable(struct clk_core *core) |
824 | { |
825 | lockdep_assert_held(&enable_lock); |
826 | |
827 | if (!core) |
828 | return; |
829 | |
830 | if (WARN(core->enable_count == 0, "%s already disabled\n" , core->name)) |
831 | return; |
832 | |
833 | if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL, |
834 | "Disabling critical %s\n" , core->name)) |
835 | return; |
836 | |
837 | if (--core->enable_count > 0) |
838 | return; |
839 | |
840 | trace_clk_disable_rcuidle(core); |
841 | |
842 | if (core->ops->disable) |
843 | core->ops->disable(core->hw); |
844 | |
845 | trace_clk_disable_complete_rcuidle(core); |
846 | |
847 | clk_core_disable(core->parent); |
848 | } |
849 | |
850 | static void clk_core_disable_lock(struct clk_core *core) |
851 | { |
852 | unsigned long flags; |
853 | |
854 | flags = clk_enable_lock(); |
855 | clk_core_disable(core); |
856 | clk_enable_unlock(flags); |
857 | } |
858 | |
859 | /** |
860 | * clk_disable - gate a clock |
861 | * @clk: the clk being gated |
862 | * |
863 | * clk_disable must not sleep, which differentiates it from clk_unprepare. In |
864 | * a simple case, clk_disable can be used instead of clk_unprepare to gate a |
865 | * clk if the operation is fast and will never sleep. One example is a |
866 | * SoC-internal clk which is controlled via simple register writes. In the |
867 | * complex case a clk gate operation may require a fast and a slow part. It is |
868 | * this reason that clk_unprepare and clk_disable are not mutually exclusive. |
869 | * In fact clk_disable must be called before clk_unprepare. |
870 | */ |
871 | void clk_disable(struct clk *clk) |
872 | { |
873 | if (IS_ERR_OR_NULL(clk)) |
874 | return; |
875 | |
876 | clk_core_disable_lock(clk->core); |
877 | } |
878 | EXPORT_SYMBOL_GPL(clk_disable); |
879 | |
880 | static int clk_core_enable(struct clk_core *core) |
881 | { |
882 | int ret = 0; |
883 | |
884 | lockdep_assert_held(&enable_lock); |
885 | |
886 | if (!core) |
887 | return 0; |
888 | |
889 | if (WARN(core->prepare_count == 0, |
890 | "Enabling unprepared %s\n" , core->name)) |
891 | return -ESHUTDOWN; |
892 | |
893 | if (core->enable_count == 0) { |
894 | ret = clk_core_enable(core->parent); |
895 | |
896 | if (ret) |
897 | return ret; |
898 | |
899 | trace_clk_enable_rcuidle(core); |
900 | |
901 | if (core->ops->enable) |
902 | ret = core->ops->enable(core->hw); |
903 | |
904 | trace_clk_enable_complete_rcuidle(core); |
905 | |
906 | if (ret) { |
907 | clk_core_disable(core->parent); |
908 | return ret; |
909 | } |
910 | } |
911 | |
912 | core->enable_count++; |
913 | return 0; |
914 | } |
915 | |
916 | static int clk_core_enable_lock(struct clk_core *core) |
917 | { |
918 | unsigned long flags; |
919 | int ret; |
920 | |
921 | flags = clk_enable_lock(); |
922 | ret = clk_core_enable(core); |
923 | clk_enable_unlock(flags); |
924 | |
925 | return ret; |
926 | } |
927 | |
928 | /** |
929 | * clk_gate_restore_context - restore context for poweroff |
930 | * @hw: the clk_hw pointer of clock whose state is to be restored |
931 | * |
932 | * The clock gate restore context function enables or disables |
933 | * the gate clocks based on the enable_count. This is done in cases |
934 | * where the clock context is lost and based on the enable_count |
935 | * the clock either needs to be enabled/disabled. This |
936 | * helps restore the state of gate clocks. |
937 | */ |
938 | void clk_gate_restore_context(struct clk_hw *hw) |
939 | { |
940 | struct clk_core *core = hw->core; |
941 | |
942 | if (core->enable_count) |
943 | core->ops->enable(hw); |
944 | else |
945 | core->ops->disable(hw); |
946 | } |
947 | EXPORT_SYMBOL_GPL(clk_gate_restore_context); |
948 | |
949 | static int clk_core_save_context(struct clk_core *core) |
950 | { |
951 | struct clk_core *child; |
952 | int ret = 0; |
953 | |
954 | hlist_for_each_entry(child, &core->children, child_node) { |
955 | ret = clk_core_save_context(child); |
956 | if (ret < 0) |
957 | return ret; |
958 | } |
959 | |
960 | if (core->ops && core->ops->save_context) |
961 | ret = core->ops->save_context(core->hw); |
962 | |
963 | return ret; |
964 | } |
965 | |
966 | static void clk_core_restore_context(struct clk_core *core) |
967 | { |
968 | struct clk_core *child; |
969 | |
970 | if (core->ops && core->ops->restore_context) |
971 | core->ops->restore_context(core->hw); |
972 | |
973 | hlist_for_each_entry(child, &core->children, child_node) |
974 | clk_core_restore_context(child); |
975 | } |
976 | |
977 | /** |
978 | * clk_save_context - save clock context for poweroff |
979 | * |
980 | * Saves the context of the clock register for powerstates in which the |
981 | * contents of the registers will be lost. Occurs deep within the suspend |
982 | * code. Returns 0 on success. |
983 | */ |
984 | int clk_save_context(void) |
985 | { |
986 | struct clk_core *clk; |
987 | int ret; |
988 | |
989 | hlist_for_each_entry(clk, &clk_root_list, child_node) { |
990 | ret = clk_core_save_context(clk); |
991 | if (ret < 0) |
992 | return ret; |
993 | } |
994 | |
995 | hlist_for_each_entry(clk, &clk_orphan_list, child_node) { |
996 | ret = clk_core_save_context(clk); |
997 | if (ret < 0) |
998 | return ret; |
999 | } |
1000 | |
1001 | return 0; |
1002 | } |
1003 | EXPORT_SYMBOL_GPL(clk_save_context); |
1004 | |
1005 | /** |
1006 | * clk_restore_context - restore clock context after poweroff |
1007 | * |
1008 | * Restore the saved clock context upon resume. |
1009 | * |
1010 | */ |
1011 | void clk_restore_context(void) |
1012 | { |
1013 | struct clk_core *core; |
1014 | |
1015 | hlist_for_each_entry(core, &clk_root_list, child_node) |
1016 | clk_core_restore_context(core); |
1017 | |
1018 | hlist_for_each_entry(core, &clk_orphan_list, child_node) |
1019 | clk_core_restore_context(core); |
1020 | } |
1021 | EXPORT_SYMBOL_GPL(clk_restore_context); |
1022 | |
1023 | /** |
1024 | * clk_enable - ungate a clock |
1025 | * @clk: the clk being ungated |
1026 | * |
1027 | * clk_enable must not sleep, which differentiates it from clk_prepare. In a |
1028 | * simple case, clk_enable can be used instead of clk_prepare to ungate a clk |
1029 | * if the operation will never sleep. One example is a SoC-internal clk which |
1030 | * is controlled via simple register writes. In the complex case a clk ungate |
1031 | * operation may require a fast and a slow part. It is this reason that |
1032 | * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare |
1033 | * must be called before clk_enable. Returns 0 on success, -EERROR |
1034 | * otherwise. |
1035 | */ |
1036 | int clk_enable(struct clk *clk) |
1037 | { |
1038 | if (!clk) |
1039 | return 0; |
1040 | |
1041 | return clk_core_enable_lock(clk->core); |
1042 | } |
1043 | EXPORT_SYMBOL_GPL(clk_enable); |
1044 | |
1045 | static int clk_core_prepare_enable(struct clk_core *core) |
1046 | { |
1047 | int ret; |
1048 | |
1049 | ret = clk_core_prepare_lock(core); |
1050 | if (ret) |
1051 | return ret; |
1052 | |
1053 | ret = clk_core_enable_lock(core); |
1054 | if (ret) |
1055 | clk_core_unprepare_lock(core); |
1056 | |
1057 | return ret; |
1058 | } |
1059 | |
1060 | static void clk_core_disable_unprepare(struct clk_core *core) |
1061 | { |
1062 | clk_core_disable_lock(core); |
1063 | clk_core_unprepare_lock(core); |
1064 | } |
1065 | |
1066 | static void clk_unprepare_unused_subtree(struct clk_core *core) |
1067 | { |
1068 | struct clk_core *child; |
1069 | |
1070 | lockdep_assert_held(&prepare_lock); |
1071 | |
1072 | hlist_for_each_entry(child, &core->children, child_node) |
1073 | clk_unprepare_unused_subtree(child); |
1074 | |
1075 | if (core->prepare_count) |
1076 | return; |
1077 | |
1078 | if (core->flags & CLK_IGNORE_UNUSED) |
1079 | return; |
1080 | |
1081 | if (clk_pm_runtime_get(core)) |
1082 | return; |
1083 | |
1084 | if (clk_core_is_prepared(core)) { |
1085 | trace_clk_unprepare(core); |
1086 | if (core->ops->unprepare_unused) |
1087 | core->ops->unprepare_unused(core->hw); |
1088 | else if (core->ops->unprepare) |
1089 | core->ops->unprepare(core->hw); |
1090 | trace_clk_unprepare_complete(core); |
1091 | } |
1092 | |
1093 | clk_pm_runtime_put(core); |
1094 | } |
1095 | |
1096 | static void clk_disable_unused_subtree(struct clk_core *core) |
1097 | { |
1098 | struct clk_core *child; |
1099 | unsigned long flags; |
1100 | |
1101 | lockdep_assert_held(&prepare_lock); |
1102 | |
1103 | hlist_for_each_entry(child, &core->children, child_node) |
1104 | clk_disable_unused_subtree(child); |
1105 | |
1106 | if (core->flags & CLK_OPS_PARENT_ENABLE) |
1107 | clk_core_prepare_enable(core->parent); |
1108 | |
1109 | if (clk_pm_runtime_get(core)) |
1110 | goto unprepare_out; |
1111 | |
1112 | flags = clk_enable_lock(); |
1113 | |
1114 | if (core->enable_count) |
1115 | goto unlock_out; |
1116 | |
1117 | if (core->flags & CLK_IGNORE_UNUSED) |
1118 | goto unlock_out; |
1119 | |
1120 | /* |
1121 | * some gate clocks have special needs during the disable-unused |
1122 | * sequence. call .disable_unused if available, otherwise fall |
1123 | * back to .disable |
1124 | */ |
1125 | if (clk_core_is_enabled(core)) { |
1126 | trace_clk_disable(core); |
1127 | if (core->ops->disable_unused) |
1128 | core->ops->disable_unused(core->hw); |
1129 | else if (core->ops->disable) |
1130 | core->ops->disable(core->hw); |
1131 | trace_clk_disable_complete(core); |
1132 | } |
1133 | |
1134 | unlock_out: |
1135 | clk_enable_unlock(flags); |
1136 | clk_pm_runtime_put(core); |
1137 | unprepare_out: |
1138 | if (core->flags & CLK_OPS_PARENT_ENABLE) |
1139 | clk_core_disable_unprepare(core->parent); |
1140 | } |
1141 | |
1142 | static bool clk_ignore_unused; |
1143 | static int __init clk_ignore_unused_setup(char *__unused) |
1144 | { |
1145 | clk_ignore_unused = true; |
1146 | return 1; |
1147 | } |
1148 | __setup("clk_ignore_unused" , clk_ignore_unused_setup); |
1149 | |
1150 | static int clk_disable_unused(void) |
1151 | { |
1152 | struct clk_core *core; |
1153 | |
1154 | if (clk_ignore_unused) { |
1155 | pr_warn("clk: Not disabling unused clocks\n" ); |
1156 | return 0; |
1157 | } |
1158 | |
1159 | clk_prepare_lock(); |
1160 | |
1161 | hlist_for_each_entry(core, &clk_root_list, child_node) |
1162 | clk_disable_unused_subtree(core); |
1163 | |
1164 | hlist_for_each_entry(core, &clk_orphan_list, child_node) |
1165 | clk_disable_unused_subtree(core); |
1166 | |
1167 | hlist_for_each_entry(core, &clk_root_list, child_node) |
1168 | clk_unprepare_unused_subtree(core); |
1169 | |
1170 | hlist_for_each_entry(core, &clk_orphan_list, child_node) |
1171 | clk_unprepare_unused_subtree(core); |
1172 | |
1173 | clk_prepare_unlock(); |
1174 | |
1175 | return 0; |
1176 | } |
1177 | late_initcall_sync(clk_disable_unused); |
1178 | |
1179 | static int clk_core_determine_round_nolock(struct clk_core *core, |
1180 | struct clk_rate_request *req) |
1181 | { |
1182 | long rate; |
1183 | |
1184 | lockdep_assert_held(&prepare_lock); |
1185 | |
1186 | if (!core) |
1187 | return 0; |
1188 | |
1189 | /* |
1190 | * At this point, core protection will be disabled if |
1191 | * - if the provider is not protected at all |
1192 | * - if the calling consumer is the only one which has exclusivity |
1193 | * over the provider |
1194 | */ |
1195 | if (clk_core_rate_is_protected(core)) { |
1196 | req->rate = core->rate; |
1197 | } else if (core->ops->determine_rate) { |
1198 | return core->ops->determine_rate(core->hw, req); |
1199 | } else if (core->ops->round_rate) { |
1200 | rate = core->ops->round_rate(core->hw, req->rate, |
1201 | &req->best_parent_rate); |
1202 | if (rate < 0) |
1203 | return rate; |
1204 | |
1205 | req->rate = rate; |
1206 | } else { |
1207 | return -EINVAL; |
1208 | } |
1209 | |
1210 | return 0; |
1211 | } |
1212 | |
1213 | static void clk_core_init_rate_req(struct clk_core * const core, |
1214 | struct clk_rate_request *req) |
1215 | { |
1216 | struct clk_core *parent; |
1217 | |
1218 | if (WARN_ON(!core || !req)) |
1219 | return; |
1220 | |
1221 | parent = core->parent; |
1222 | if (parent) { |
1223 | req->best_parent_hw = parent->hw; |
1224 | req->best_parent_rate = parent->rate; |
1225 | } else { |
1226 | req->best_parent_hw = NULL; |
1227 | req->best_parent_rate = 0; |
1228 | } |
1229 | } |
1230 | |
1231 | static bool clk_core_can_round(struct clk_core * const core) |
1232 | { |
1233 | if (core->ops->determine_rate || core->ops->round_rate) |
1234 | return true; |
1235 | |
1236 | return false; |
1237 | } |
1238 | |
1239 | static int clk_core_round_rate_nolock(struct clk_core *core, |
1240 | struct clk_rate_request *req) |
1241 | { |
1242 | lockdep_assert_held(&prepare_lock); |
1243 | |
1244 | if (!core) { |
1245 | req->rate = 0; |
1246 | return 0; |
1247 | } |
1248 | |
1249 | clk_core_init_rate_req(core, req); |
1250 | |
1251 | if (clk_core_can_round(core)) |
1252 | return clk_core_determine_round_nolock(core, req); |
1253 | else if (core->flags & CLK_SET_RATE_PARENT) |
1254 | return clk_core_round_rate_nolock(core->parent, req); |
1255 | |
1256 | req->rate = core->rate; |
1257 | return 0; |
1258 | } |
1259 | |
1260 | /** |
1261 | * __clk_determine_rate - get the closest rate actually supported by a clock |
1262 | * @hw: determine the rate of this clock |
1263 | * @req: target rate request |
1264 | * |
1265 | * Useful for clk_ops such as .set_rate and .determine_rate. |
1266 | */ |
1267 | int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) |
1268 | { |
1269 | if (!hw) { |
1270 | req->rate = 0; |
1271 | return 0; |
1272 | } |
1273 | |
1274 | return clk_core_round_rate_nolock(hw->core, req); |
1275 | } |
1276 | EXPORT_SYMBOL_GPL(__clk_determine_rate); |
1277 | |
1278 | unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate) |
1279 | { |
1280 | int ret; |
1281 | struct clk_rate_request req; |
1282 | |
1283 | clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate); |
1284 | req.rate = rate; |
1285 | |
1286 | ret = clk_core_round_rate_nolock(hw->core, &req); |
1287 | if (ret) |
1288 | return 0; |
1289 | |
1290 | return req.rate; |
1291 | } |
1292 | EXPORT_SYMBOL_GPL(clk_hw_round_rate); |
1293 | |
1294 | /** |
1295 | * clk_round_rate - round the given rate for a clk |
1296 | * @clk: the clk for which we are rounding a rate |
1297 | * @rate: the rate which is to be rounded |
1298 | * |
1299 | * Takes in a rate as input and rounds it to a rate that the clk can actually |
1300 | * use which is then returned. If clk doesn't support round_rate operation |
1301 | * then the parent rate is returned. |
1302 | */ |
1303 | long clk_round_rate(struct clk *clk, unsigned long rate) |
1304 | { |
1305 | struct clk_rate_request req; |
1306 | int ret; |
1307 | |
1308 | if (!clk) |
1309 | return 0; |
1310 | |
1311 | clk_prepare_lock(); |
1312 | |
1313 | if (clk->exclusive_count) |
1314 | clk_core_rate_unprotect(clk->core); |
1315 | |
1316 | clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate); |
1317 | req.rate = rate; |
1318 | |
1319 | ret = clk_core_round_rate_nolock(clk->core, &req); |
1320 | |
1321 | if (clk->exclusive_count) |
1322 | clk_core_rate_protect(clk->core); |
1323 | |
1324 | clk_prepare_unlock(); |
1325 | |
1326 | if (ret) |
1327 | return ret; |
1328 | |
1329 | return req.rate; |
1330 | } |
1331 | EXPORT_SYMBOL_GPL(clk_round_rate); |
1332 | |
1333 | /** |
1334 | * __clk_notify - call clk notifier chain |
1335 | * @core: clk that is changing rate |
1336 | * @msg: clk notifier type (see include/linux/clk.h) |
1337 | * @old_rate: old clk rate |
1338 | * @new_rate: new clk rate |
1339 | * |
1340 | * Triggers a notifier call chain on the clk rate-change notification |
1341 | * for 'clk'. Passes a pointer to the struct clk and the previous |
1342 | * and current rates to the notifier callback. Intended to be called by |
1343 | * internal clock code only. Returns NOTIFY_DONE from the last driver |
1344 | * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if |
1345 | * a driver returns that. |
1346 | */ |
1347 | static int __clk_notify(struct clk_core *core, unsigned long msg, |
1348 | unsigned long old_rate, unsigned long new_rate) |
1349 | { |
1350 | struct clk_notifier *cn; |
1351 | struct clk_notifier_data cnd; |
1352 | int ret = NOTIFY_DONE; |
1353 | |
1354 | cnd.old_rate = old_rate; |
1355 | cnd.new_rate = new_rate; |
1356 | |
1357 | list_for_each_entry(cn, &clk_notifier_list, node) { |
1358 | if (cn->clk->core == core) { |
1359 | cnd.clk = cn->clk; |
1360 | ret = srcu_notifier_call_chain(&cn->notifier_head, msg, |
1361 | &cnd); |
1362 | if (ret & NOTIFY_STOP_MASK) |
1363 | return ret; |
1364 | } |
1365 | } |
1366 | |
1367 | return ret; |
1368 | } |
1369 | |
1370 | /** |
1371 | * __clk_recalc_accuracies |
1372 | * @core: first clk in the subtree |
1373 | * |
1374 | * Walks the subtree of clks starting with clk and recalculates accuracies as |
1375 | * it goes. Note that if a clk does not implement the .recalc_accuracy |
1376 | * callback then it is assumed that the clock will take on the accuracy of its |
1377 | * parent. |
1378 | */ |
1379 | static void __clk_recalc_accuracies(struct clk_core *core) |
1380 | { |
1381 | unsigned long parent_accuracy = 0; |
1382 | struct clk_core *child; |
1383 | |
1384 | lockdep_assert_held(&prepare_lock); |
1385 | |
1386 | if (core->parent) |
1387 | parent_accuracy = core->parent->accuracy; |
1388 | |
1389 | if (core->ops->recalc_accuracy) |
1390 | core->accuracy = core->ops->recalc_accuracy(core->hw, |
1391 | parent_accuracy); |
1392 | else |
1393 | core->accuracy = parent_accuracy; |
1394 | |
1395 | hlist_for_each_entry(child, &core->children, child_node) |
1396 | __clk_recalc_accuracies(child); |
1397 | } |
1398 | |
1399 | static long clk_core_get_accuracy(struct clk_core *core) |
1400 | { |
1401 | unsigned long accuracy; |
1402 | |
1403 | clk_prepare_lock(); |
1404 | if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE)) |
1405 | __clk_recalc_accuracies(core); |
1406 | |
1407 | accuracy = __clk_get_accuracy(core); |
1408 | clk_prepare_unlock(); |
1409 | |
1410 | return accuracy; |
1411 | } |
1412 | |
1413 | /** |
1414 | * clk_get_accuracy - return the accuracy of clk |
1415 | * @clk: the clk whose accuracy is being returned |
1416 | * |
1417 | * Simply returns the cached accuracy of the clk, unless |
1418 | * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be |
1419 | * issued. |
1420 | * If clk is NULL then returns 0. |
1421 | */ |
1422 | long clk_get_accuracy(struct clk *clk) |
1423 | { |
1424 | if (!clk) |
1425 | return 0; |
1426 | |
1427 | return clk_core_get_accuracy(clk->core); |
1428 | } |
1429 | EXPORT_SYMBOL_GPL(clk_get_accuracy); |
1430 | |
1431 | static unsigned long clk_recalc(struct clk_core *core, |
1432 | unsigned long parent_rate) |
1433 | { |
1434 | unsigned long rate = parent_rate; |
1435 | |
1436 | if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) { |
1437 | rate = core->ops->recalc_rate(core->hw, parent_rate); |
1438 | clk_pm_runtime_put(core); |
1439 | } |
1440 | return rate; |
1441 | } |
1442 | |
1443 | /** |
1444 | * __clk_recalc_rates |
1445 | * @core: first clk in the subtree |
1446 | * @msg: notification type (see include/linux/clk.h) |
1447 | * |
1448 | * Walks the subtree of clks starting with clk and recalculates rates as it |
1449 | * goes. Note that if a clk does not implement the .recalc_rate callback then |
1450 | * it is assumed that the clock will take on the rate of its parent. |
1451 | * |
1452 | * clk_recalc_rates also propagates the POST_RATE_CHANGE notification, |
1453 | * if necessary. |
1454 | */ |
1455 | static void __clk_recalc_rates(struct clk_core *core, unsigned long msg) |
1456 | { |
1457 | unsigned long old_rate; |
1458 | unsigned long parent_rate = 0; |
1459 | struct clk_core *child; |
1460 | |
1461 | lockdep_assert_held(&prepare_lock); |
1462 | |
1463 | old_rate = core->rate; |
1464 | |
1465 | if (core->parent) |
1466 | parent_rate = core->parent->rate; |
1467 | |
1468 | core->rate = clk_recalc(core, parent_rate); |
1469 | |
1470 | /* |
1471 | * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE |
1472 | * & ABORT_RATE_CHANGE notifiers |
1473 | */ |
1474 | if (core->notifier_count && msg) |
1475 | __clk_notify(core, msg, old_rate, core->rate); |
1476 | |
1477 | hlist_for_each_entry(child, &core->children, child_node) |
1478 | __clk_recalc_rates(child, msg); |
1479 | } |
1480 | |
1481 | static unsigned long clk_core_get_rate(struct clk_core *core) |
1482 | { |
1483 | unsigned long rate; |
1484 | |
1485 | clk_prepare_lock(); |
1486 | |
1487 | if (core && (core->flags & CLK_GET_RATE_NOCACHE)) |
1488 | __clk_recalc_rates(core, 0); |
1489 | |
1490 | rate = clk_core_get_rate_nolock(core); |
1491 | clk_prepare_unlock(); |
1492 | |
1493 | return rate; |
1494 | } |
1495 | |
1496 | /** |
1497 | * clk_get_rate - return the rate of clk |
1498 | * @clk: the clk whose rate is being returned |
1499 | * |
1500 | * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag |
1501 | * is set, which means a recalc_rate will be issued. |
1502 | * If clk is NULL then returns 0. |
1503 | */ |
1504 | unsigned long clk_get_rate(struct clk *clk) |
1505 | { |
1506 | if (!clk) |
1507 | return 0; |
1508 | |
1509 | return clk_core_get_rate(clk->core); |
1510 | } |
1511 | EXPORT_SYMBOL_GPL(clk_get_rate); |
1512 | |
1513 | static int clk_fetch_parent_index(struct clk_core *core, |
1514 | struct clk_core *parent) |
1515 | { |
1516 | int i; |
1517 | |
1518 | if (!parent) |
1519 | return -EINVAL; |
1520 | |
1521 | for (i = 0; i < core->num_parents; i++) { |
1522 | if (core->parents[i] == parent) |
1523 | return i; |
1524 | |
1525 | if (core->parents[i]) |
1526 | continue; |
1527 | |
1528 | /* Fallback to comparing globally unique names */ |
1529 | if (!strcmp(parent->name, core->parent_names[i])) { |
1530 | core->parents[i] = parent; |
1531 | return i; |
1532 | } |
1533 | } |
1534 | |
1535 | return -EINVAL; |
1536 | } |
1537 | |
1538 | /* |
1539 | * Update the orphan status of @core and all its children. |
1540 | */ |
1541 | static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan) |
1542 | { |
1543 | struct clk_core *child; |
1544 | |
1545 | core->orphan = is_orphan; |
1546 | |
1547 | hlist_for_each_entry(child, &core->children, child_node) |
1548 | clk_core_update_orphan_status(child, is_orphan); |
1549 | } |
1550 | |
1551 | static void clk_reparent(struct clk_core *core, struct clk_core *new_parent) |
1552 | { |
1553 | bool was_orphan = core->orphan; |
1554 | |
1555 | hlist_del(&core->child_node); |
1556 | |
1557 | if (new_parent) { |
1558 | bool becomes_orphan = new_parent->orphan; |
1559 | |
1560 | /* avoid duplicate POST_RATE_CHANGE notifications */ |
1561 | if (new_parent->new_child == core) |
1562 | new_parent->new_child = NULL; |
1563 | |
1564 | hlist_add_head(&core->child_node, &new_parent->children); |
1565 | |
1566 | if (was_orphan != becomes_orphan) |
1567 | clk_core_update_orphan_status(core, becomes_orphan); |
1568 | } else { |
1569 | hlist_add_head(&core->child_node, &clk_orphan_list); |
1570 | if (!was_orphan) |
1571 | clk_core_update_orphan_status(core, true); |
1572 | } |
1573 | |
1574 | core->parent = new_parent; |
1575 | } |
1576 | |
1577 | static struct clk_core *__clk_set_parent_before(struct clk_core *core, |
1578 | struct clk_core *parent) |
1579 | { |
1580 | unsigned long flags; |
1581 | struct clk_core *old_parent = core->parent; |
1582 | |
1583 | /* |
1584 | * 1. enable parents for CLK_OPS_PARENT_ENABLE clock |
1585 | * |
1586 | * 2. Migrate prepare state between parents and prevent race with |
1587 | * clk_enable(). |
1588 | * |
1589 | * If the clock is not prepared, then a race with |
1590 | * clk_enable/disable() is impossible since we already have the |
1591 | * prepare lock (future calls to clk_enable() need to be preceded by |
1592 | * a clk_prepare()). |
1593 | * |
1594 | * If the clock is prepared, migrate the prepared state to the new |
1595 | * parent and also protect against a race with clk_enable() by |
1596 | * forcing the clock and the new parent on. This ensures that all |
1597 | * future calls to clk_enable() are practically NOPs with respect to |
1598 | * hardware and software states. |
1599 | * |
1600 | * See also: Comment for clk_set_parent() below. |
1601 | */ |
1602 | |
1603 | /* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */ |
1604 | if (core->flags & CLK_OPS_PARENT_ENABLE) { |
1605 | clk_core_prepare_enable(old_parent); |
1606 | clk_core_prepare_enable(parent); |
1607 | } |
1608 | |
1609 | /* migrate prepare count if > 0 */ |
1610 | if (core->prepare_count) { |
1611 | clk_core_prepare_enable(parent); |
1612 | clk_core_enable_lock(core); |
1613 | } |
1614 | |
1615 | /* update the clk tree topology */ |
1616 | flags = clk_enable_lock(); |
1617 | clk_reparent(core, parent); |
1618 | clk_enable_unlock(flags); |
1619 | |
1620 | return old_parent; |
1621 | } |
1622 | |
1623 | static void __clk_set_parent_after(struct clk_core *core, |
1624 | struct clk_core *parent, |
1625 | struct clk_core *old_parent) |
1626 | { |
1627 | /* |
1628 | * Finish the migration of prepare state and undo the changes done |
1629 | * for preventing a race with clk_enable(). |
1630 | */ |
1631 | if (core->prepare_count) { |
1632 | clk_core_disable_lock(core); |
1633 | clk_core_disable_unprepare(old_parent); |
1634 | } |
1635 | |
1636 | /* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */ |
1637 | if (core->flags & CLK_OPS_PARENT_ENABLE) { |
1638 | clk_core_disable_unprepare(parent); |
1639 | clk_core_disable_unprepare(old_parent); |
1640 | } |
1641 | } |
1642 | |
1643 | static int __clk_set_parent(struct clk_core *core, struct clk_core *parent, |
1644 | u8 p_index) |
1645 | { |
1646 | unsigned long flags; |
1647 | int ret = 0; |
1648 | struct clk_core *old_parent; |
1649 | |
1650 | old_parent = __clk_set_parent_before(core, parent); |
1651 | |
1652 | trace_clk_set_parent(core, parent); |
1653 | |
1654 | /* change clock input source */ |
1655 | if (parent && core->ops->set_parent) |
1656 | ret = core->ops->set_parent(core->hw, p_index); |
1657 | |
1658 | trace_clk_set_parent_complete(core, parent); |
1659 | |
1660 | if (ret) { |
1661 | flags = clk_enable_lock(); |
1662 | clk_reparent(core, old_parent); |
1663 | clk_enable_unlock(flags); |
1664 | __clk_set_parent_after(core, old_parent, parent); |
1665 | |
1666 | return ret; |
1667 | } |
1668 | |
1669 | __clk_set_parent_after(core, parent, old_parent); |
1670 | |
1671 | return 0; |
1672 | } |
1673 | |
1674 | /** |
1675 | * __clk_speculate_rates |
1676 | * @core: first clk in the subtree |
1677 | * @parent_rate: the "future" rate of clk's parent |
1678 | * |
1679 | * Walks the subtree of clks starting with clk, speculating rates as it |
1680 | * goes and firing off PRE_RATE_CHANGE notifications as necessary. |
1681 | * |
1682 | * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending |
1683 | * pre-rate change notifications and returns early if no clks in the |
1684 | * subtree have subscribed to the notifications. Note that if a clk does not |
1685 | * implement the .recalc_rate callback then it is assumed that the clock will |
1686 | * take on the rate of its parent. |
1687 | */ |
1688 | static int __clk_speculate_rates(struct clk_core *core, |
1689 | unsigned long parent_rate) |
1690 | { |
1691 | struct clk_core *child; |
1692 | unsigned long new_rate; |
1693 | int ret = NOTIFY_DONE; |
1694 | |
1695 | lockdep_assert_held(&prepare_lock); |
1696 | |
1697 | new_rate = clk_recalc(core, parent_rate); |
1698 | |
1699 | /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */ |
1700 | if (core->notifier_count) |
1701 | ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate); |
1702 | |
1703 | if (ret & NOTIFY_STOP_MASK) { |
1704 | pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n" , |
1705 | __func__, core->name, ret); |
1706 | goto out; |
1707 | } |
1708 | |
1709 | hlist_for_each_entry(child, &core->children, child_node) { |
1710 | ret = __clk_speculate_rates(child, new_rate); |
1711 | if (ret & NOTIFY_STOP_MASK) |
1712 | break; |
1713 | } |
1714 | |
1715 | out: |
1716 | return ret; |
1717 | } |
1718 | |
1719 | static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate, |
1720 | struct clk_core *new_parent, u8 p_index) |
1721 | { |
1722 | struct clk_core *child; |
1723 | |
1724 | core->new_rate = new_rate; |
1725 | core->new_parent = new_parent; |
1726 | core->new_parent_index = p_index; |
1727 | /* include clk in new parent's PRE_RATE_CHANGE notifications */ |
1728 | core->new_child = NULL; |
1729 | if (new_parent && new_parent != core->parent) |
1730 | new_parent->new_child = core; |
1731 | |
1732 | hlist_for_each_entry(child, &core->children, child_node) { |
1733 | child->new_rate = clk_recalc(child, new_rate); |
1734 | clk_calc_subtree(child, child->new_rate, NULL, 0); |
1735 | } |
1736 | } |
1737 | |
1738 | /* |
1739 | * calculate the new rates returning the topmost clock that has to be |
1740 | * changed. |
1741 | */ |
1742 | static struct clk_core *clk_calc_new_rates(struct clk_core *core, |
1743 | unsigned long rate) |
1744 | { |
1745 | struct clk_core *top = core; |
1746 | struct clk_core *old_parent, *parent; |
1747 | unsigned long best_parent_rate = 0; |
1748 | unsigned long new_rate; |
1749 | unsigned long min_rate; |
1750 | unsigned long max_rate; |
1751 | int p_index = 0; |
1752 | long ret; |
1753 | |
1754 | /* sanity */ |
1755 | if (IS_ERR_OR_NULL(core)) |
1756 | return NULL; |
1757 | |
1758 | /* save parent rate, if it exists */ |
1759 | parent = old_parent = core->parent; |
1760 | if (parent) |
1761 | best_parent_rate = parent->rate; |
1762 | |
1763 | clk_core_get_boundaries(core, &min_rate, &max_rate); |
1764 | |
1765 | /* find the closest rate and parent clk/rate */ |
1766 | if (clk_core_can_round(core)) { |
1767 | struct clk_rate_request req; |
1768 | |
1769 | req.rate = rate; |
1770 | req.min_rate = min_rate; |
1771 | req.max_rate = max_rate; |
1772 | |
1773 | clk_core_init_rate_req(core, &req); |
1774 | |
1775 | ret = clk_core_determine_round_nolock(core, &req); |
1776 | if (ret < 0) |
1777 | return NULL; |
1778 | |
1779 | best_parent_rate = req.best_parent_rate; |
1780 | new_rate = req.rate; |
1781 | parent = req.best_parent_hw ? req.best_parent_hw->core : NULL; |
1782 | |
1783 | if (new_rate < min_rate || new_rate > max_rate) |
1784 | return NULL; |
1785 | } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) { |
1786 | /* pass-through clock without adjustable parent */ |
1787 | core->new_rate = core->rate; |
1788 | return NULL; |
1789 | } else { |
1790 | /* pass-through clock with adjustable parent */ |
1791 | top = clk_calc_new_rates(parent, rate); |
1792 | new_rate = parent->new_rate; |
1793 | goto out; |
1794 | } |
1795 | |
1796 | /* some clocks must be gated to change parent */ |
1797 | if (parent != old_parent && |
1798 | (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) { |
1799 | pr_debug("%s: %s not gated but wants to reparent\n" , |
1800 | __func__, core->name); |
1801 | return NULL; |
1802 | } |
1803 | |
1804 | /* try finding the new parent index */ |
1805 | if (parent && core->num_parents > 1) { |
1806 | p_index = clk_fetch_parent_index(core, parent); |
1807 | if (p_index < 0) { |
1808 | pr_debug("%s: clk %s can not be parent of clk %s\n" , |
1809 | __func__, parent->name, core->name); |
1810 | return NULL; |
1811 | } |
1812 | } |
1813 | |
1814 | if ((core->flags & CLK_SET_RATE_PARENT) && parent && |
1815 | best_parent_rate != parent->rate) |
1816 | top = clk_calc_new_rates(parent, best_parent_rate); |
1817 | |
1818 | out: |
1819 | clk_calc_subtree(core, new_rate, parent, p_index); |
1820 | |
1821 | return top; |
1822 | } |
1823 | |
1824 | /* |
1825 | * Notify about rate changes in a subtree. Always walk down the whole tree |
1826 | * so that in case of an error we can walk down the whole tree again and |
1827 | * abort the change. |
1828 | */ |
1829 | static struct clk_core *clk_propagate_rate_change(struct clk_core *core, |
1830 | unsigned long event) |
1831 | { |
1832 | struct clk_core *child, *tmp_clk, *fail_clk = NULL; |
1833 | int ret = NOTIFY_DONE; |
1834 | |
1835 | if (core->rate == core->new_rate) |
1836 | return NULL; |
1837 | |
1838 | if (core->notifier_count) { |
1839 | ret = __clk_notify(core, event, core->rate, core->new_rate); |
1840 | if (ret & NOTIFY_STOP_MASK) |
1841 | fail_clk = core; |
1842 | } |
1843 | |
1844 | hlist_for_each_entry(child, &core->children, child_node) { |
1845 | /* Skip children who will be reparented to another clock */ |
1846 | if (child->new_parent && child->new_parent != core) |
1847 | continue; |
1848 | tmp_clk = clk_propagate_rate_change(child, event); |
1849 | if (tmp_clk) |
1850 | fail_clk = tmp_clk; |
1851 | } |
1852 | |
1853 | /* handle the new child who might not be in core->children yet */ |
1854 | if (core->new_child) { |
1855 | tmp_clk = clk_propagate_rate_change(core->new_child, event); |
1856 | if (tmp_clk) |
1857 | fail_clk = tmp_clk; |
1858 | } |
1859 | |
1860 | return fail_clk; |
1861 | } |
1862 | |
1863 | /* |
1864 | * walk down a subtree and set the new rates notifying the rate |
1865 | * change on the way |
1866 | */ |
1867 | static void clk_change_rate(struct clk_core *core) |
1868 | { |
1869 | struct clk_core *child; |
1870 | struct hlist_node *tmp; |
1871 | unsigned long old_rate; |
1872 | unsigned long best_parent_rate = 0; |
1873 | bool skip_set_rate = false; |
1874 | struct clk_core *old_parent; |
1875 | struct clk_core *parent = NULL; |
1876 | |
1877 | old_rate = core->rate; |
1878 | |
1879 | if (core->new_parent) { |
1880 | parent = core->new_parent; |
1881 | best_parent_rate = core->new_parent->rate; |
1882 | } else if (core->parent) { |
1883 | parent = core->parent; |
1884 | best_parent_rate = core->parent->rate; |
1885 | } |
1886 | |
1887 | if (clk_pm_runtime_get(core)) |
1888 | return; |
1889 | |
1890 | if (core->flags & CLK_SET_RATE_UNGATE) { |
1891 | unsigned long flags; |
1892 | |
1893 | clk_core_prepare(core); |
1894 | flags = clk_enable_lock(); |
1895 | clk_core_enable(core); |
1896 | clk_enable_unlock(flags); |
1897 | } |
1898 | |
1899 | if (core->new_parent && core->new_parent != core->parent) { |
1900 | old_parent = __clk_set_parent_before(core, core->new_parent); |
1901 | trace_clk_set_parent(core, core->new_parent); |
1902 | |
1903 | if (core->ops->set_rate_and_parent) { |
1904 | skip_set_rate = true; |
1905 | core->ops->set_rate_and_parent(core->hw, core->new_rate, |
1906 | best_parent_rate, |
1907 | core->new_parent_index); |
1908 | } else if (core->ops->set_parent) { |
1909 | core->ops->set_parent(core->hw, core->new_parent_index); |
1910 | } |
1911 | |
1912 | trace_clk_set_parent_complete(core, core->new_parent); |
1913 | __clk_set_parent_after(core, core->new_parent, old_parent); |
1914 | } |
1915 | |
1916 | if (core->flags & CLK_OPS_PARENT_ENABLE) |
1917 | clk_core_prepare_enable(parent); |
1918 | |
1919 | trace_clk_set_rate(core, core->new_rate); |
1920 | |
1921 | if (!skip_set_rate && core->ops->set_rate) |
1922 | core->ops->set_rate(core->hw, core->new_rate, best_parent_rate); |
1923 | |
1924 | trace_clk_set_rate_complete(core, core->new_rate); |
1925 | |
1926 | core->rate = clk_recalc(core, best_parent_rate); |
1927 | |
1928 | if (core->flags & CLK_SET_RATE_UNGATE) { |
1929 | unsigned long flags; |
1930 | |
1931 | flags = clk_enable_lock(); |
1932 | clk_core_disable(core); |
1933 | clk_enable_unlock(flags); |
1934 | clk_core_unprepare(core); |
1935 | } |
1936 | |
1937 | if (core->flags & CLK_OPS_PARENT_ENABLE) |
1938 | clk_core_disable_unprepare(parent); |
1939 | |
1940 | if (core->notifier_count && old_rate != core->rate) |
1941 | __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate); |
1942 | |
1943 | if (core->flags & CLK_RECALC_NEW_RATES) |
1944 | (void)clk_calc_new_rates(core, core->new_rate); |
1945 | |
1946 | /* |
1947 | * Use safe iteration, as change_rate can actually swap parents |
1948 | * for certain clock types. |
1949 | */ |
1950 | hlist_for_each_entry_safe(child, tmp, &core->children, child_node) { |
1951 | /* Skip children who will be reparented to another clock */ |
1952 | if (child->new_parent && child->new_parent != core) |
1953 | continue; |
1954 | clk_change_rate(child); |
1955 | } |
1956 | |
1957 | /* handle the new child who might not be in core->children yet */ |
1958 | if (core->new_child) |
1959 | clk_change_rate(core->new_child); |
1960 | |
1961 | clk_pm_runtime_put(core); |
1962 | } |
1963 | |
1964 | static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core, |
1965 | unsigned long req_rate) |
1966 | { |
1967 | int ret, cnt; |
1968 | struct clk_rate_request req; |
1969 | |
1970 | lockdep_assert_held(&prepare_lock); |
1971 | |
1972 | if (!core) |
1973 | return 0; |
1974 | |
1975 | /* simulate what the rate would be if it could be freely set */ |
1976 | cnt = clk_core_rate_nuke_protect(core); |
1977 | if (cnt < 0) |
1978 | return cnt; |
1979 | |
1980 | clk_core_get_boundaries(core, &req.min_rate, &req.max_rate); |
1981 | req.rate = req_rate; |
1982 | |
1983 | ret = clk_core_round_rate_nolock(core, &req); |
1984 | |
1985 | /* restore the protection */ |
1986 | clk_core_rate_restore_protect(core, cnt); |
1987 | |
1988 | return ret ? 0 : req.rate; |
1989 | } |
1990 | |
1991 | static int clk_core_set_rate_nolock(struct clk_core *core, |
1992 | unsigned long req_rate) |
1993 | { |
1994 | struct clk_core *top, *fail_clk; |
1995 | unsigned long rate; |
1996 | int ret = 0; |
1997 | |
1998 | if (!core) |
1999 | return 0; |
2000 | |
2001 | rate = clk_core_req_round_rate_nolock(core, req_rate); |
2002 | |
2003 | /* bail early if nothing to do */ |
2004 | if (rate == clk_core_get_rate_nolock(core)) |
2005 | return 0; |
2006 | |
2007 | /* fail on a direct rate set of a protected provider */ |
2008 | if (clk_core_rate_is_protected(core)) |
2009 | return -EBUSY; |
2010 | |
2011 | /* calculate new rates and get the topmost changed clock */ |
2012 | top = clk_calc_new_rates(core, req_rate); |
2013 | if (!top) |
2014 | return -EINVAL; |
2015 | |
2016 | ret = clk_pm_runtime_get(core); |
2017 | if (ret) |
2018 | return ret; |
2019 | |
2020 | /* notify that we are about to change rates */ |
2021 | fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE); |
2022 | if (fail_clk) { |
2023 | pr_debug("%s: failed to set %s rate\n" , __func__, |
2024 | fail_clk->name); |
2025 | clk_propagate_rate_change(top, ABORT_RATE_CHANGE); |
2026 | ret = -EBUSY; |
2027 | goto err; |
2028 | } |
2029 | |
2030 | /* change the rates */ |
2031 | clk_change_rate(top); |
2032 | |
2033 | core->req_rate = req_rate; |
2034 | err: |
2035 | clk_pm_runtime_put(core); |
2036 | |
2037 | return ret; |
2038 | } |
2039 | |
2040 | /** |
2041 | * clk_set_rate - specify a new rate for clk |
2042 | * @clk: the clk whose rate is being changed |
2043 | * @rate: the new rate for clk |
2044 | * |
2045 | * In the simplest case clk_set_rate will only adjust the rate of clk. |
2046 | * |
2047 | * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to |
2048 | * propagate up to clk's parent; whether or not this happens depends on the |
2049 | * outcome of clk's .round_rate implementation. If *parent_rate is unchanged |
2050 | * after calling .round_rate then upstream parent propagation is ignored. If |
2051 | * *parent_rate comes back with a new rate for clk's parent then we propagate |
2052 | * up to clk's parent and set its rate. Upward propagation will continue |
2053 | * until either a clk does not support the CLK_SET_RATE_PARENT flag or |
2054 | * .round_rate stops requesting changes to clk's parent_rate. |
2055 | * |
2056 | * Rate changes are accomplished via tree traversal that also recalculates the |
2057 | * rates for the clocks and fires off POST_RATE_CHANGE notifiers. |
2058 | * |
2059 | * Returns 0 on success, -EERROR otherwise. |
2060 | */ |
2061 | int clk_set_rate(struct clk *clk, unsigned long rate) |
2062 | { |
2063 | int ret; |
2064 | |
2065 | if (!clk) |
2066 | return 0; |
2067 | |
2068 | /* prevent racing with updates to the clock topology */ |
2069 | clk_prepare_lock(); |
2070 | |
2071 | if (clk->exclusive_count) |
2072 | clk_core_rate_unprotect(clk->core); |
2073 | |
2074 | ret = clk_core_set_rate_nolock(clk->core, rate); |
2075 | |
2076 | if (clk->exclusive_count) |
2077 | clk_core_rate_protect(clk->core); |
2078 | |
2079 | clk_prepare_unlock(); |
2080 | |
2081 | return ret; |
2082 | } |
2083 | EXPORT_SYMBOL_GPL(clk_set_rate); |
2084 | |
2085 | /** |
2086 | * clk_set_rate_exclusive - specify a new rate get exclusive control |
2087 | * @clk: the clk whose rate is being changed |
2088 | * @rate: the new rate for clk |
2089 | * |
2090 | * This is a combination of clk_set_rate() and clk_rate_exclusive_get() |
2091 | * within a critical section |
2092 | * |
2093 | * This can be used initially to ensure that at least 1 consumer is |
2094 | * statisfied when several consumers are competing for exclusivity over the |
2095 | * same clock provider. |
2096 | * |
2097 | * The exclusivity is not applied if setting the rate failed. |
2098 | * |
2099 | * Calls to clk_rate_exclusive_get() should be balanced with calls to |
2100 | * clk_rate_exclusive_put(). |
2101 | * |
2102 | * Returns 0 on success, -EERROR otherwise. |
2103 | */ |
2104 | int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) |
2105 | { |
2106 | int ret; |
2107 | |
2108 | if (!clk) |
2109 | return 0; |
2110 | |
2111 | /* prevent racing with updates to the clock topology */ |
2112 | clk_prepare_lock(); |
2113 | |
2114 | /* |
2115 | * The temporary protection removal is not here, on purpose |
2116 | * This function is meant to be used instead of clk_rate_protect, |
2117 | * so before the consumer code path protect the clock provider |
2118 | */ |
2119 | |
2120 | ret = clk_core_set_rate_nolock(clk->core, rate); |
2121 | if (!ret) { |
2122 | clk_core_rate_protect(clk->core); |
2123 | clk->exclusive_count++; |
2124 | } |
2125 | |
2126 | clk_prepare_unlock(); |
2127 | |
2128 | return ret; |
2129 | } |
2130 | EXPORT_SYMBOL_GPL(clk_set_rate_exclusive); |
2131 | |
2132 | /** |
2133 | * clk_set_rate_range - set a rate range for a clock source |
2134 | * @clk: clock source |
2135 | * @min: desired minimum clock rate in Hz, inclusive |
2136 | * @max: desired maximum clock rate in Hz, inclusive |
2137 | * |
2138 | * Returns success (0) or negative errno. |
2139 | */ |
2140 | int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) |
2141 | { |
2142 | int ret = 0; |
2143 | unsigned long old_min, old_max, rate; |
2144 | |
2145 | if (!clk) |
2146 | return 0; |
2147 | |
2148 | if (min > max) { |
2149 | pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n" , |
2150 | __func__, clk->core->name, clk->dev_id, clk->con_id, |
2151 | min, max); |
2152 | return -EINVAL; |
2153 | } |
2154 | |
2155 | clk_prepare_lock(); |
2156 | |
2157 | if (clk->exclusive_count) |
2158 | clk_core_rate_unprotect(clk->core); |
2159 | |
2160 | /* Save the current values in case we need to rollback the change */ |
2161 | old_min = clk->min_rate; |
2162 | old_max = clk->max_rate; |
2163 | clk->min_rate = min; |
2164 | clk->max_rate = max; |
2165 | |
2166 | rate = clk_core_get_rate_nolock(clk->core); |
2167 | if (rate < min || rate > max) { |
2168 | /* |
2169 | * FIXME: |
2170 | * We are in bit of trouble here, current rate is outside the |
2171 | * the requested range. We are going try to request appropriate |
2172 | * range boundary but there is a catch. It may fail for the |
2173 | * usual reason (clock broken, clock protected, etc) but also |
2174 | * because: |
2175 | * - round_rate() was not favorable and fell on the wrong |
2176 | * side of the boundary |
2177 | * - the determine_rate() callback does not really check for |
2178 | * this corner case when determining the rate |
2179 | */ |
2180 | |
2181 | if (rate < min) |
2182 | rate = min; |
2183 | else |
2184 | rate = max; |
2185 | |
2186 | ret = clk_core_set_rate_nolock(clk->core, rate); |
2187 | if (ret) { |
2188 | /* rollback the changes */ |
2189 | clk->min_rate = old_min; |
2190 | clk->max_rate = old_max; |
2191 | } |
2192 | } |
2193 | |
2194 | if (clk->exclusive_count) |
2195 | clk_core_rate_protect(clk->core); |
2196 | |
2197 | clk_prepare_unlock(); |
2198 | |
2199 | return ret; |
2200 | } |
2201 | EXPORT_SYMBOL_GPL(clk_set_rate_range); |
2202 | |
2203 | /** |
2204 | * clk_set_min_rate - set a minimum clock rate for a clock source |
2205 | * @clk: clock source |
2206 | * @rate: desired minimum clock rate in Hz, inclusive |
2207 | * |
2208 | * Returns success (0) or negative errno. |
2209 | */ |
2210 | int clk_set_min_rate(struct clk *clk, unsigned long rate) |
2211 | { |
2212 | if (!clk) |
2213 | return 0; |
2214 | |
2215 | return clk_set_rate_range(clk, rate, clk->max_rate); |
2216 | } |
2217 | EXPORT_SYMBOL_GPL(clk_set_min_rate); |
2218 | |
2219 | /** |
2220 | * clk_set_max_rate - set a maximum clock rate for a clock source |
2221 | * @clk: clock source |
2222 | * @rate: desired maximum clock rate in Hz, inclusive |
2223 | * |
2224 | * Returns success (0) or negative errno. |
2225 | */ |
2226 | int clk_set_max_rate(struct clk *clk, unsigned long rate) |
2227 | { |
2228 | if (!clk) |
2229 | return 0; |
2230 | |
2231 | return clk_set_rate_range(clk, clk->min_rate, rate); |
2232 | } |
2233 | EXPORT_SYMBOL_GPL(clk_set_max_rate); |
2234 | |
2235 | /** |
2236 | * clk_get_parent - return the parent of a clk |
2237 | * @clk: the clk whose parent gets returned |
2238 | * |
2239 | * Simply returns clk->parent. Returns NULL if clk is NULL. |
2240 | */ |
2241 | struct clk *clk_get_parent(struct clk *clk) |
2242 | { |
2243 | struct clk *parent; |
2244 | |
2245 | if (!clk) |
2246 | return NULL; |
2247 | |
2248 | clk_prepare_lock(); |
2249 | /* TODO: Create a per-user clk and change callers to call clk_put */ |
2250 | parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk; |
2251 | clk_prepare_unlock(); |
2252 | |
2253 | return parent; |
2254 | } |
2255 | EXPORT_SYMBOL_GPL(clk_get_parent); |
2256 | |
2257 | static struct clk_core *__clk_init_parent(struct clk_core *core) |
2258 | { |
2259 | u8 index = 0; |
2260 | |
2261 | if (core->num_parents > 1 && core->ops->get_parent) |
2262 | index = core->ops->get_parent(core->hw); |
2263 | |
2264 | return clk_core_get_parent_by_index(core, index); |
2265 | } |
2266 | |
2267 | static void clk_core_reparent(struct clk_core *core, |
2268 | struct clk_core *new_parent) |
2269 | { |
2270 | clk_reparent(core, new_parent); |
2271 | __clk_recalc_accuracies(core); |
2272 | __clk_recalc_rates(core, POST_RATE_CHANGE); |
2273 | } |
2274 | |
2275 | void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent) |
2276 | { |
2277 | if (!hw) |
2278 | return; |
2279 | |
2280 | clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core); |
2281 | } |
2282 | |
2283 | /** |
2284 | * clk_has_parent - check if a clock is a possible parent for another |
2285 | * @clk: clock source |
2286 | * @parent: parent clock source |
2287 | * |
2288 | * This function can be used in drivers that need to check that a clock can be |
2289 | * the parent of another without actually changing the parent. |
2290 | * |
2291 | * Returns true if @parent is a possible parent for @clk, false otherwise. |
2292 | */ |
2293 | bool clk_has_parent(struct clk *clk, struct clk *parent) |
2294 | { |
2295 | struct clk_core *core, *parent_core; |
2296 | |
2297 | /* NULL clocks should be nops, so return success if either is NULL. */ |
2298 | if (!clk || !parent) |
2299 | return true; |
2300 | |
2301 | core = clk->core; |
2302 | parent_core = parent->core; |
2303 | |
2304 | /* Optimize for the case where the parent is already the parent. */ |
2305 | if (core->parent == parent_core) |
2306 | return true; |
2307 | |
2308 | return match_string(core->parent_names, core->num_parents, |
2309 | parent_core->name) >= 0; |
2310 | } |
2311 | EXPORT_SYMBOL_GPL(clk_has_parent); |
2312 | |
2313 | static int clk_core_set_parent_nolock(struct clk_core *core, |
2314 | struct clk_core *parent) |
2315 | { |
2316 | int ret = 0; |
2317 | int p_index = 0; |
2318 | unsigned long p_rate = 0; |
2319 | |
2320 | lockdep_assert_held(&prepare_lock); |
2321 | |
2322 | if (!core) |
2323 | return 0; |
2324 | |
2325 | if (core->parent == parent) |
2326 | return 0; |
2327 | |
2328 | /* verify ops for for multi-parent clks */ |
2329 | if (core->num_parents > 1 && !core->ops->set_parent) |
2330 | return -EPERM; |
2331 | |
2332 | /* check that we are allowed to re-parent if the clock is in use */ |
2333 | if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) |
2334 | return -EBUSY; |
2335 | |
2336 | if (clk_core_rate_is_protected(core)) |
2337 | return -EBUSY; |
2338 | |
2339 | /* try finding the new parent index */ |
2340 | if (parent) { |
2341 | p_index = clk_fetch_parent_index(core, parent); |
2342 | if (p_index < 0) { |
2343 | pr_debug("%s: clk %s can not be parent of clk %s\n" , |
2344 | __func__, parent->name, core->name); |
2345 | return p_index; |
2346 | } |
2347 | p_rate = parent->rate; |
2348 | } |
2349 | |
2350 | ret = clk_pm_runtime_get(core); |
2351 | if (ret) |
2352 | return ret; |
2353 | |
2354 | /* propagate PRE_RATE_CHANGE notifications */ |
2355 | ret = __clk_speculate_rates(core, p_rate); |
2356 | |
2357 | /* abort if a driver objects */ |
2358 | if (ret & NOTIFY_STOP_MASK) |
2359 | goto runtime_put; |
2360 | |
2361 | /* do the re-parent */ |
2362 | ret = __clk_set_parent(core, parent, p_index); |
2363 | |
2364 | /* propagate rate an accuracy recalculation accordingly */ |
2365 | if (ret) { |
2366 | __clk_recalc_rates(core, ABORT_RATE_CHANGE); |
2367 | } else { |
2368 | __clk_recalc_rates(core, POST_RATE_CHANGE); |
2369 | __clk_recalc_accuracies(core); |
2370 | } |
2371 | |
2372 | runtime_put: |
2373 | clk_pm_runtime_put(core); |
2374 | |
2375 | return ret; |
2376 | } |
2377 | |
2378 | /** |
2379 | * clk_set_parent - switch the parent of a mux clk |
2380 | * @clk: the mux clk whose input we are switching |
2381 | * @parent: the new input to clk |
2382 | * |
2383 | * Re-parent clk to use parent as its new input source. If clk is in |
2384 | * prepared state, the clk will get enabled for the duration of this call. If |
2385 | * that's not acceptable for a specific clk (Eg: the consumer can't handle |
2386 | * that, the reparenting is glitchy in hardware, etc), use the |
2387 | * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared. |
2388 | * |
2389 | * After successfully changing clk's parent clk_set_parent will update the |
2390 | * clk topology, sysfs topology and propagate rate recalculation via |
2391 | * __clk_recalc_rates. |
2392 | * |
2393 | * Returns 0 on success, -EERROR otherwise. |
2394 | */ |
2395 | int clk_set_parent(struct clk *clk, struct clk *parent) |
2396 | { |
2397 | int ret; |
2398 | |
2399 | if (!clk) |
2400 | return 0; |
2401 | |
2402 | clk_prepare_lock(); |
2403 | |
2404 | if (clk->exclusive_count) |
2405 | clk_core_rate_unprotect(clk->core); |
2406 | |
2407 | ret = clk_core_set_parent_nolock(clk->core, |
2408 | parent ? parent->core : NULL); |
2409 | |
2410 | if (clk->exclusive_count) |
2411 | clk_core_rate_protect(clk->core); |
2412 | |
2413 | clk_prepare_unlock(); |
2414 | |
2415 | return ret; |
2416 | } |
2417 | EXPORT_SYMBOL_GPL(clk_set_parent); |
2418 | |
2419 | static int clk_core_set_phase_nolock(struct clk_core *core, int degrees) |
2420 | { |
2421 | int ret = -EINVAL; |
2422 | |
2423 | lockdep_assert_held(&prepare_lock); |
2424 | |
2425 | if (!core) |
2426 | return 0; |
2427 | |
2428 | if (clk_core_rate_is_protected(core)) |
2429 | return -EBUSY; |
2430 | |
2431 | trace_clk_set_phase(core, degrees); |
2432 | |
2433 | if (core->ops->set_phase) { |
2434 | ret = core->ops->set_phase(core->hw, degrees); |
2435 | if (!ret) |
2436 | core->phase = degrees; |
2437 | } |
2438 | |
2439 | trace_clk_set_phase_complete(core, degrees); |
2440 | |
2441 | return ret; |
2442 | } |
2443 | |
2444 | /** |
2445 | * clk_set_phase - adjust the phase shift of a clock signal |
2446 | * @clk: clock signal source |
2447 | * @degrees: number of degrees the signal is shifted |
2448 | * |
2449 | * Shifts the phase of a clock signal by the specified |
2450 | * degrees. Returns 0 on success, -EERROR otherwise. |
2451 | * |
2452 | * This function makes no distinction about the input or reference |
2453 | * signal that we adjust the clock signal phase against. For example |
2454 | * phase locked-loop clock signal generators we may shift phase with |
2455 | * respect to feedback clock signal input, but for other cases the |
2456 | * clock phase may be shifted with respect to some other, unspecified |
2457 | * signal. |
2458 | * |
2459 | * Additionally the concept of phase shift does not propagate through |
2460 | * the clock tree hierarchy, which sets it apart from clock rates and |
2461 | * clock accuracy. A parent clock phase attribute does not have an |
2462 | * impact on the phase attribute of a child clock. |
2463 | */ |
2464 | int clk_set_phase(struct clk *clk, int degrees) |
2465 | { |
2466 | int ret; |
2467 | |
2468 | if (!clk) |
2469 | return 0; |
2470 | |
2471 | /* sanity check degrees */ |
2472 | degrees %= 360; |
2473 | if (degrees < 0) |
2474 | degrees += 360; |
2475 | |
2476 | clk_prepare_lock(); |
2477 | |
2478 | if (clk->exclusive_count) |
2479 | clk_core_rate_unprotect(clk->core); |
2480 | |
2481 | ret = clk_core_set_phase_nolock(clk->core, degrees); |
2482 | |
2483 | if (clk->exclusive_count) |
2484 | clk_core_rate_protect(clk->core); |
2485 | |
2486 | clk_prepare_unlock(); |
2487 | |
2488 | return ret; |
2489 | } |
2490 | EXPORT_SYMBOL_GPL(clk_set_phase); |
2491 | |
2492 | static int clk_core_get_phase(struct clk_core *core) |
2493 | { |
2494 | int ret; |
2495 | |
2496 | clk_prepare_lock(); |
2497 | /* Always try to update cached phase if possible */ |
2498 | if (core->ops->get_phase) |
2499 | core->phase = core->ops->get_phase(core->hw); |
2500 | ret = core->phase; |
2501 | clk_prepare_unlock(); |
2502 | |
2503 | return ret; |
2504 | } |
2505 | |
2506 | /** |
2507 | * clk_get_phase - return the phase shift of a clock signal |
2508 | * @clk: clock signal source |
2509 | * |
2510 | * Returns the phase shift of a clock node in degrees, otherwise returns |
2511 | * -EERROR. |
2512 | */ |
2513 | int clk_get_phase(struct clk *clk) |
2514 | { |
2515 | if (!clk) |
2516 | return 0; |
2517 | |
2518 | return clk_core_get_phase(clk->core); |
2519 | } |
2520 | EXPORT_SYMBOL_GPL(clk_get_phase); |
2521 | |
2522 | static void clk_core_reset_duty_cycle_nolock(struct clk_core *core) |
2523 | { |
2524 | /* Assume a default value of 50% */ |
2525 | core->duty.num = 1; |
2526 | core->duty.den = 2; |
2527 | } |
2528 | |
2529 | static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core); |
2530 | |
2531 | static int clk_core_update_duty_cycle_nolock(struct clk_core *core) |
2532 | { |
2533 | struct clk_duty *duty = &core->duty; |
2534 | int ret = 0; |
2535 | |
2536 | if (!core->ops->get_duty_cycle) |
2537 | return clk_core_update_duty_cycle_parent_nolock(core); |
2538 | |
2539 | ret = core->ops->get_duty_cycle(core->hw, duty); |
2540 | if (ret) |
2541 | goto reset; |
2542 | |
2543 | /* Don't trust the clock provider too much */ |
2544 | if (duty->den == 0 || duty->num > duty->den) { |
2545 | ret = -EINVAL; |
2546 | goto reset; |
2547 | } |
2548 | |
2549 | return 0; |
2550 | |
2551 | reset: |
2552 | clk_core_reset_duty_cycle_nolock(core); |
2553 | return ret; |
2554 | } |
2555 | |
2556 | static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core) |
2557 | { |
2558 | int ret = 0; |
2559 | |
2560 | if (core->parent && |
2561 | core->flags & CLK_DUTY_CYCLE_PARENT) { |
2562 | ret = clk_core_update_duty_cycle_nolock(core->parent); |
2563 | memcpy(&core->duty, &core->parent->duty, sizeof(core->duty)); |
2564 | } else { |
2565 | clk_core_reset_duty_cycle_nolock(core); |
2566 | } |
2567 | |
2568 | return ret; |
2569 | } |
2570 | |
2571 | static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core, |
2572 | struct clk_duty *duty); |
2573 | |
2574 | static int clk_core_set_duty_cycle_nolock(struct clk_core *core, |
2575 | struct clk_duty *duty) |
2576 | { |
2577 | int ret; |
2578 | |
2579 | lockdep_assert_held(&prepare_lock); |
2580 | |
2581 | if (clk_core_rate_is_protected(core)) |
2582 | return -EBUSY; |
2583 | |
2584 | trace_clk_set_duty_cycle(core, duty); |
2585 | |
2586 | if (!core->ops->set_duty_cycle) |
2587 | return clk_core_set_duty_cycle_parent_nolock(core, duty); |
2588 | |
2589 | ret = core->ops->set_duty_cycle(core->hw, duty); |
2590 | if (!ret) |
2591 | memcpy(&core->duty, duty, sizeof(*duty)); |
2592 | |
2593 | trace_clk_set_duty_cycle_complete(core, duty); |
2594 | |
2595 | return ret; |
2596 | } |
2597 | |
2598 | static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core, |
2599 | struct clk_duty *duty) |
2600 | { |
2601 | int ret = 0; |
2602 | |
2603 | if (core->parent && |
2604 | core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) { |
2605 | ret = clk_core_set_duty_cycle_nolock(core->parent, duty); |
2606 | memcpy(&core->duty, &core->parent->duty, sizeof(core->duty)); |
2607 | } |
2608 | |
2609 | return ret; |
2610 | } |
2611 | |
2612 | /** |
2613 | * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal |
2614 | * @clk: clock signal source |
2615 | * @num: numerator of the duty cycle ratio to be applied |
2616 | * @den: denominator of the duty cycle ratio to be applied |
2617 | * |
2618 | * Apply the duty cycle ratio if the ratio is valid and the clock can |
2619 | * perform this operation |
2620 | * |
2621 | * Returns (0) on success, a negative errno otherwise. |
2622 | */ |
2623 | int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den) |
2624 | { |
2625 | int ret; |
2626 | struct clk_duty duty; |
2627 | |
2628 | if (!clk) |
2629 | return 0; |
2630 | |
2631 | /* sanity check the ratio */ |
2632 | if (den == 0 || num > den) |
2633 | return -EINVAL; |
2634 | |
2635 | duty.num = num; |
2636 | duty.den = den; |
2637 | |
2638 | clk_prepare_lock(); |
2639 | |
2640 | if (clk->exclusive_count) |
2641 | clk_core_rate_unprotect(clk->core); |
2642 | |
2643 | ret = clk_core_set_duty_cycle_nolock(clk->core, &duty); |
2644 | |
2645 | if (clk->exclusive_count) |
2646 | clk_core_rate_protect(clk->core); |
2647 | |
2648 | clk_prepare_unlock(); |
2649 | |
2650 | return ret; |
2651 | } |
2652 | EXPORT_SYMBOL_GPL(clk_set_duty_cycle); |
2653 | |
2654 | static int clk_core_get_scaled_duty_cycle(struct clk_core *core, |
2655 | unsigned int scale) |
2656 | { |
2657 | struct clk_duty *duty = &core->duty; |
2658 | int ret; |
2659 | |
2660 | clk_prepare_lock(); |
2661 | |
2662 | ret = clk_core_update_duty_cycle_nolock(core); |
2663 | if (!ret) |
2664 | ret = mult_frac(scale, duty->num, duty->den); |
2665 | |
2666 | clk_prepare_unlock(); |
2667 | |
2668 | return ret; |
2669 | } |
2670 | |
2671 | /** |
2672 | * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal |
2673 | * @clk: clock signal source |
2674 | * @scale: scaling factor to be applied to represent the ratio as an integer |
2675 | * |
2676 | * Returns the duty cycle ratio of a clock node multiplied by the provided |
2677 | * scaling factor, or negative errno on error. |
2678 | */ |
2679 | int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale) |
2680 | { |
2681 | if (!clk) |
2682 | return 0; |
2683 | |
2684 | return clk_core_get_scaled_duty_cycle(clk->core, scale); |
2685 | } |
2686 | EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle); |
2687 | |
2688 | /** |
2689 | * clk_is_match - check if two clk's point to the same hardware clock |
2690 | * @p: clk compared against q |
2691 | * @q: clk compared against p |
2692 | * |
2693 | * Returns true if the two struct clk pointers both point to the same hardware |
2694 | * clock node. Put differently, returns true if struct clk *p and struct clk *q |
2695 | * share the same struct clk_core object. |
2696 | * |
2697 | * Returns false otherwise. Note that two NULL clks are treated as matching. |
2698 | */ |
2699 | bool clk_is_match(const struct clk *p, const struct clk *q) |
2700 | { |
2701 | /* trivial case: identical struct clk's or both NULL */ |
2702 | if (p == q) |
2703 | return true; |
2704 | |
2705 | /* true if clk->core pointers match. Avoid dereferencing garbage */ |
2706 | if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q)) |
2707 | if (p->core == q->core) |
2708 | return true; |
2709 | |
2710 | return false; |
2711 | } |
2712 | EXPORT_SYMBOL_GPL(clk_is_match); |
2713 | |
2714 | /*** debugfs support ***/ |
2715 | |
2716 | #ifdef CONFIG_DEBUG_FS |
2717 | #include <linux/debugfs.h> |
2718 | |
2719 | static struct dentry *rootdir; |
2720 | static int inited = 0; |
2721 | static DEFINE_MUTEX(clk_debug_lock); |
2722 | static HLIST_HEAD(clk_debug_list); |
2723 | |
2724 | static struct hlist_head *all_lists[] = { |
2725 | &clk_root_list, |
2726 | &clk_orphan_list, |
2727 | NULL, |
2728 | }; |
2729 | |
2730 | static struct hlist_head *orphan_list[] = { |
2731 | &clk_orphan_list, |
2732 | NULL, |
2733 | }; |
2734 | |
2735 | static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, |
2736 | int level) |
2737 | { |
2738 | if (!c) |
2739 | return; |
2740 | |
2741 | seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %5d %6d\n" , |
2742 | level * 3 + 1, "" , |
2743 | 30 - level * 3, c->name, |
2744 | c->enable_count, c->prepare_count, c->protect_count, |
2745 | clk_core_get_rate(c), clk_core_get_accuracy(c), |
2746 | clk_core_get_phase(c), |
2747 | clk_core_get_scaled_duty_cycle(c, 100000)); |
2748 | } |
2749 | |
2750 | static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, |
2751 | int level) |
2752 | { |
2753 | struct clk_core *child; |
2754 | |
2755 | if (!c) |
2756 | return; |
2757 | |
2758 | clk_summary_show_one(s, c, level); |
2759 | |
2760 | hlist_for_each_entry(child, &c->children, child_node) |
2761 | clk_summary_show_subtree(s, child, level + 1); |
2762 | } |
2763 | |
2764 | static int clk_summary_show(struct seq_file *s, void *data) |
2765 | { |
2766 | struct clk_core *c; |
2767 | struct hlist_head **lists = (struct hlist_head **)s->private; |
2768 | |
2769 | seq_puts(s, " enable prepare protect duty\n" ); |
2770 | seq_puts(s, " clock count count count rate accuracy phase cycle\n" ); |
2771 | seq_puts(s, "---------------------------------------------------------------------------------------------\n" ); |
2772 | |
2773 | clk_prepare_lock(); |
2774 | |
2775 | for (; *lists; lists++) |
2776 | hlist_for_each_entry(c, *lists, child_node) |
2777 | clk_summary_show_subtree(s, c, 0); |
2778 | |
2779 | clk_prepare_unlock(); |
2780 | |
2781 | return 0; |
2782 | } |
2783 | DEFINE_SHOW_ATTRIBUTE(clk_summary); |
2784 | |
2785 | static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) |
2786 | { |
2787 | if (!c) |
2788 | return; |
2789 | |
2790 | /* This should be JSON format, i.e. elements separated with a comma */ |
2791 | seq_printf(s, "\"%s\": { " , c->name); |
2792 | seq_printf(s, "\"enable_count\": %d," , c->enable_count); |
2793 | seq_printf(s, "\"prepare_count\": %d," , c->prepare_count); |
2794 | seq_printf(s, "\"protect_count\": %d," , c->protect_count); |
2795 | seq_printf(s, "\"rate\": %lu," , clk_core_get_rate(c)); |
2796 | seq_printf(s, "\"accuracy\": %lu," , clk_core_get_accuracy(c)); |
2797 | seq_printf(s, "\"phase\": %d," , clk_core_get_phase(c)); |
2798 | seq_printf(s, "\"duty_cycle\": %u" , |
2799 | clk_core_get_scaled_duty_cycle(c, 100000)); |
2800 | } |
2801 | |
2802 | static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level) |
2803 | { |
2804 | struct clk_core *child; |
2805 | |
2806 | if (!c) |
2807 | return; |
2808 | |
2809 | clk_dump_one(s, c, level); |
2810 | |
2811 | hlist_for_each_entry(child, &c->children, child_node) { |
2812 | seq_putc(s, ','); |
2813 | clk_dump_subtree(s, child, level + 1); |
2814 | } |
2815 | |
2816 | seq_putc(s, '}'); |
2817 | } |
2818 | |
2819 | static int clk_dump_show(struct seq_file *s, void *data) |
2820 | { |
2821 | struct clk_core *c; |
2822 | bool first_node = true; |
2823 | struct hlist_head **lists = (struct hlist_head **)s->private; |
2824 | |
2825 | seq_putc(s, '{'); |
2826 | clk_prepare_lock(); |
2827 | |
2828 | for (; *lists; lists++) { |
2829 | hlist_for_each_entry(c, *lists, child_node) { |
2830 | if (!first_node) |
2831 | seq_putc(s, ','); |
2832 | first_node = false; |
2833 | clk_dump_subtree(s, c, 0); |
2834 | } |
2835 | } |
2836 | |
2837 | clk_prepare_unlock(); |
2838 | |
2839 | seq_puts(s, "}\n" ); |
2840 | return 0; |
2841 | } |
2842 | DEFINE_SHOW_ATTRIBUTE(clk_dump); |
2843 | |
2844 | static const struct { |
2845 | unsigned long flag; |
2846 | const char *name; |
2847 | } clk_flags[] = { |
2848 | #define ENTRY(f) { f, #f } |
2849 | ENTRY(CLK_SET_RATE_GATE), |
2850 | ENTRY(CLK_SET_PARENT_GATE), |
2851 | ENTRY(CLK_SET_RATE_PARENT), |
2852 | ENTRY(CLK_IGNORE_UNUSED), |
2853 | ENTRY(CLK_IS_BASIC), |
2854 | ENTRY(CLK_GET_RATE_NOCACHE), |
2855 | ENTRY(CLK_SET_RATE_NO_REPARENT), |
2856 | ENTRY(CLK_GET_ACCURACY_NOCACHE), |
2857 | ENTRY(CLK_RECALC_NEW_RATES), |
2858 | ENTRY(CLK_SET_RATE_UNGATE), |
2859 | ENTRY(CLK_IS_CRITICAL), |
2860 | ENTRY(CLK_OPS_PARENT_ENABLE), |
2861 | ENTRY(CLK_DUTY_CYCLE_PARENT), |
2862 | #undef ENTRY |
2863 | }; |
2864 | |
2865 | static int clk_flags_show(struct seq_file *s, void *data) |
2866 | { |
2867 | struct clk_core *core = s->private; |
2868 | unsigned long flags = core->flags; |
2869 | unsigned int i; |
2870 | |
2871 | for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) { |
2872 | if (flags & clk_flags[i].flag) { |
2873 | seq_printf(s, "%s\n" , clk_flags[i].name); |
2874 | flags &= ~clk_flags[i].flag; |
2875 | } |
2876 | } |
2877 | if (flags) { |
2878 | /* Unknown flags */ |
2879 | seq_printf(s, "0x%lx\n" , flags); |
2880 | } |
2881 | |
2882 | return 0; |
2883 | } |
2884 | DEFINE_SHOW_ATTRIBUTE(clk_flags); |
2885 | |
2886 | static int possible_parents_show(struct seq_file *s, void *data) |
2887 | { |
2888 | struct clk_core *core = s->private; |
2889 | int i; |
2890 | |
2891 | for (i = 0; i < core->num_parents - 1; i++) |
2892 | seq_printf(s, "%s " , core->parent_names[i]); |
2893 | |
2894 | seq_printf(s, "%s\n" , core->parent_names[i]); |
2895 | |
2896 | return 0; |
2897 | } |
2898 | DEFINE_SHOW_ATTRIBUTE(possible_parents); |
2899 | |
2900 | static int clk_duty_cycle_show(struct seq_file *s, void *data) |
2901 | { |
2902 | struct clk_core *core = s->private; |
2903 | struct clk_duty *duty = &core->duty; |
2904 | |
2905 | seq_printf(s, "%u/%u\n" , duty->num, duty->den); |
2906 | |
2907 | return 0; |
2908 | } |
2909 | DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle); |
2910 | |
2911 | static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) |
2912 | { |
2913 | struct dentry *root; |
2914 | |
2915 | if (!core || !pdentry) |
2916 | return; |
2917 | |
2918 | root = debugfs_create_dir(core->name, pdentry); |
2919 | core->dentry = root; |
2920 | |
2921 | debugfs_create_ulong("clk_rate" , 0444, root, &core->rate); |
2922 | debugfs_create_ulong("clk_accuracy" , 0444, root, &core->accuracy); |
2923 | debugfs_create_u32("clk_phase" , 0444, root, &core->phase); |
2924 | debugfs_create_file("clk_flags" , 0444, root, core, &clk_flags_fops); |
2925 | debugfs_create_u32("clk_prepare_count" , 0444, root, &core->prepare_count); |
2926 | debugfs_create_u32("clk_enable_count" , 0444, root, &core->enable_count); |
2927 | debugfs_create_u32("clk_protect_count" , 0444, root, &core->protect_count); |
2928 | debugfs_create_u32("clk_notifier_count" , 0444, root, &core->notifier_count); |
2929 | debugfs_create_file("clk_duty_cycle" , 0444, root, core, |
2930 | &clk_duty_cycle_fops); |
2931 | |
2932 | if (core->num_parents > 1) |
2933 | debugfs_create_file("clk_possible_parents" , 0444, root, core, |
2934 | &possible_parents_fops); |
2935 | |
2936 | if (core->ops->debug_init) |
2937 | core->ops->debug_init(core->hw, core->dentry); |
2938 | } |
2939 | |
2940 | /** |
2941 | * clk_debug_register - add a clk node to the debugfs clk directory |
2942 | * @core: the clk being added to the debugfs clk directory |
2943 | * |
2944 | * Dynamically adds a clk to the debugfs clk directory if debugfs has been |
2945 | * initialized. Otherwise it bails out early since the debugfs clk directory |
2946 | * will be created lazily by clk_debug_init as part of a late_initcall. |
2947 | */ |
2948 | static void clk_debug_register(struct clk_core *core) |
2949 | { |
2950 | mutex_lock(&clk_debug_lock); |
2951 | hlist_add_head(&core->debug_node, &clk_debug_list); |
2952 | if (inited) |
2953 | clk_debug_create_one(core, rootdir); |
2954 | mutex_unlock(&clk_debug_lock); |
2955 | } |
2956 | |
2957 | /** |
2958 | * clk_debug_unregister - remove a clk node from the debugfs clk directory |
2959 | * @core: the clk being removed from the debugfs clk directory |
2960 | * |
2961 | * Dynamically removes a clk and all its child nodes from the |
2962 | * debugfs clk directory if clk->dentry points to debugfs created by |
2963 | * clk_debug_register in __clk_core_init. |
2964 | */ |
2965 | static void clk_debug_unregister(struct clk_core *core) |
2966 | { |
2967 | mutex_lock(&clk_debug_lock); |
2968 | hlist_del_init(&core->debug_node); |
2969 | debugfs_remove_recursive(core->dentry); |
2970 | core->dentry = NULL; |
2971 | mutex_unlock(&clk_debug_lock); |
2972 | } |
2973 | |
2974 | /** |
2975 | * clk_debug_init - lazily populate the debugfs clk directory |
2976 | * |
2977 | * clks are often initialized very early during boot before memory can be |
2978 | * dynamically allocated and well before debugfs is setup. This function |
2979 | * populates the debugfs clk directory once at boot-time when we know that |
2980 | * debugfs is setup. It should only be called once at boot-time, all other clks |
2981 | * added dynamically will be done so with clk_debug_register. |
2982 | */ |
2983 | static int __init clk_debug_init(void) |
2984 | { |
2985 | struct clk_core *core; |
2986 | |
2987 | rootdir = debugfs_create_dir("clk" , NULL); |
2988 | |
2989 | debugfs_create_file("clk_summary" , 0444, rootdir, &all_lists, |
2990 | &clk_summary_fops); |
2991 | debugfs_create_file("clk_dump" , 0444, rootdir, &all_lists, |
2992 | &clk_dump_fops); |
2993 | debugfs_create_file("clk_orphan_summary" , 0444, rootdir, &orphan_list, |
2994 | &clk_summary_fops); |
2995 | debugfs_create_file("clk_orphan_dump" , 0444, rootdir, &orphan_list, |
2996 | &clk_dump_fops); |
2997 | |
2998 | mutex_lock(&clk_debug_lock); |
2999 | hlist_for_each_entry(core, &clk_debug_list, debug_node) |
3000 | clk_debug_create_one(core, rootdir); |
3001 | |
3002 | inited = 1; |
3003 | mutex_unlock(&clk_debug_lock); |
3004 | |
3005 | return 0; |
3006 | } |
3007 | late_initcall(clk_debug_init); |
3008 | #else |
3009 | static inline void clk_debug_register(struct clk_core *core) { } |
3010 | static inline void clk_debug_reparent(struct clk_core *core, |
3011 | struct clk_core *new_parent) |
3012 | { |
3013 | } |
3014 | static inline void clk_debug_unregister(struct clk_core *core) |
3015 | { |
3016 | } |
3017 | #endif |
3018 | |
3019 | /** |
3020 | * __clk_core_init - initialize the data structures in a struct clk_core |
3021 | * @core: clk_core being initialized |
3022 | * |
3023 | * Initializes the lists in struct clk_core, queries the hardware for the |
3024 | * parent and rate and sets them both. |
3025 | */ |
3026 | static int __clk_core_init(struct clk_core *core) |
3027 | { |
3028 | int i, ret; |
3029 | struct clk_core *orphan; |
3030 | struct hlist_node *tmp2; |
3031 | unsigned long rate; |
3032 | |
3033 | if (!core) |
3034 | return -EINVAL; |
3035 | |
3036 | clk_prepare_lock(); |
3037 | |
3038 | ret = clk_pm_runtime_get(core); |
3039 | if (ret) |
3040 | goto unlock; |
3041 | |
3042 | /* check to see if a clock with this name is already registered */ |
3043 | if (clk_core_lookup(core->name)) { |
3044 | pr_debug("%s: clk %s already initialized\n" , |
3045 | __func__, core->name); |
3046 | ret = -EEXIST; |
3047 | goto out; |
3048 | } |
3049 | |
3050 | /* check that clk_ops are sane. See Documentation/driver-api/clk.rst */ |
3051 | if (core->ops->set_rate && |
3052 | !((core->ops->round_rate || core->ops->determine_rate) && |
3053 | core->ops->recalc_rate)) { |
3054 | pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n" , |
3055 | __func__, core->name); |
3056 | ret = -EINVAL; |
3057 | goto out; |
3058 | } |
3059 | |
3060 | if (core->ops->set_parent && !core->ops->get_parent) { |
3061 | pr_err("%s: %s must implement .get_parent & .set_parent\n" , |
3062 | __func__, core->name); |
3063 | ret = -EINVAL; |
3064 | goto out; |
3065 | } |
3066 | |
3067 | if (core->num_parents > 1 && !core->ops->get_parent) { |
3068 | pr_err("%s: %s must implement .get_parent as it has multi parents\n" , |
3069 | __func__, core->name); |
3070 | ret = -EINVAL; |
3071 | goto out; |
3072 | } |
3073 | |
3074 | if (core->ops->set_rate_and_parent && |
3075 | !(core->ops->set_parent && core->ops->set_rate)) { |
3076 | pr_err("%s: %s must implement .set_parent & .set_rate\n" , |
3077 | __func__, core->name); |
3078 | ret = -EINVAL; |
3079 | goto out; |
3080 | } |
3081 | |
3082 | /* throw a WARN if any entries in parent_names are NULL */ |
3083 | for (i = 0; i < core->num_parents; i++) |
3084 | WARN(!core->parent_names[i], |
3085 | "%s: invalid NULL in %s's .parent_names\n" , |
3086 | __func__, core->name); |
3087 | |
3088 | core->parent = __clk_init_parent(core); |
3089 | |
3090 | /* |
3091 | * Populate core->parent if parent has already been clk_core_init'd. If |
3092 | * parent has not yet been clk_core_init'd then place clk in the orphan |
3093 | * list. If clk doesn't have any parents then place it in the root |
3094 | * clk list. |
3095 | * |
3096 | * Every time a new clk is clk_init'd then we walk the list of orphan |
3097 | * clocks and re-parent any that are children of the clock currently |
3098 | * being clk_init'd. |
3099 | */ |
3100 | if (core->parent) { |
3101 | hlist_add_head(&core->child_node, |
3102 | &core->parent->children); |
3103 | core->orphan = core->parent->orphan; |
3104 | } else if (!core->num_parents) { |
3105 | hlist_add_head(&core->child_node, &clk_root_list); |
3106 | core->orphan = false; |
3107 | } else { |
3108 | hlist_add_head(&core->child_node, &clk_orphan_list); |
3109 | core->orphan = true; |
3110 | } |
3111 | |
3112 | /* |
3113 | * optional platform-specific magic |
3114 | * |
3115 | * The .init callback is not used by any of the basic clock types, but |
3116 | * exists for weird hardware that must perform initialization magic. |
3117 | * Please consider other ways of solving initialization problems before |
3118 | * using this callback, as its use is discouraged. |
3119 | */ |
3120 | if (core->ops->init) |
3121 | core->ops->init(core->hw); |
3122 | |
3123 | /* |
3124 | * Set clk's accuracy. The preferred method is to use |
3125 | * .recalc_accuracy. For simple clocks and lazy developers the default |
3126 | * fallback is to use the parent's accuracy. If a clock doesn't have a |
3127 | * parent (or is orphaned) then accuracy is set to zero (perfect |
3128 | * clock). |
3129 | */ |
3130 | if (core->ops->recalc_accuracy) |
3131 | core->accuracy = core->ops->recalc_accuracy(core->hw, |
3132 | __clk_get_accuracy(core->parent)); |
3133 | else if (core->parent) |
3134 | core->accuracy = core->parent->accuracy; |
3135 | else |
3136 | core->accuracy = 0; |
3137 | |
3138 | /* |
3139 | * Set clk's phase. |
3140 | * Since a phase is by definition relative to its parent, just |
3141 | * query the current clock phase, or just assume it's in phase. |
3142 | */ |
3143 | if (core->ops->get_phase) |
3144 | core->phase = core->ops->get_phase(core->hw); |
3145 | else |
3146 | core->phase = 0; |
3147 | |
3148 | /* |
3149 | * Set clk's duty cycle. |
3150 | */ |
3151 | clk_core_update_duty_cycle_nolock(core); |
3152 | |
3153 | /* |
3154 | * Set clk's rate. The preferred method is to use .recalc_rate. For |
3155 | * simple clocks and lazy developers the default fallback is to use the |
3156 | * parent's rate. If a clock doesn't have a parent (or is orphaned) |
3157 | * then rate is set to zero. |
3158 | */ |
3159 | if (core->ops->recalc_rate) |
3160 | rate = core->ops->recalc_rate(core->hw, |
3161 | clk_core_get_rate_nolock(core->parent)); |
3162 | else if (core->parent) |
3163 | rate = core->parent->rate; |
3164 | else |
3165 | rate = 0; |
3166 | core->rate = core->req_rate = rate; |
3167 | |
3168 | /* |
3169 | * Enable CLK_IS_CRITICAL clocks so newly added critical clocks |
3170 | * don't get accidentally disabled when walking the orphan tree and |
3171 | * reparenting clocks |
3172 | */ |
3173 | if (core->flags & CLK_IS_CRITICAL) { |
3174 | unsigned long flags; |
3175 | |
3176 | clk_core_prepare(core); |
3177 | |
3178 | flags = clk_enable_lock(); |
3179 | clk_core_enable(core); |
3180 | clk_enable_unlock(flags); |
3181 | } |
3182 | |
3183 | /* |
3184 | * walk the list of orphan clocks and reparent any that newly finds a |
3185 | * parent. |
3186 | */ |
3187 | hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { |
3188 | struct clk_core *parent = __clk_init_parent(orphan); |
3189 | |
3190 | /* |
3191 | * We need to use __clk_set_parent_before() and _after() to |
3192 | * to properly migrate any prepare/enable count of the orphan |
3193 | * clock. This is important for CLK_IS_CRITICAL clocks, which |
3194 | * are enabled during init but might not have a parent yet. |
3195 | */ |
3196 | if (parent) { |
3197 | /* update the clk tree topology */ |
3198 | __clk_set_parent_before(orphan, parent); |
3199 | __clk_set_parent_after(orphan, parent, NULL); |
3200 | __clk_recalc_accuracies(orphan); |
3201 | __clk_recalc_rates(orphan, 0); |
3202 | } |
3203 | } |
3204 | |
3205 | kref_init(&core->ref); |
3206 | out: |
3207 | clk_pm_runtime_put(core); |
3208 | unlock: |
3209 | clk_prepare_unlock(); |
3210 | |
3211 | if (!ret) |
3212 | clk_debug_register(core); |
3213 | |
3214 | return ret; |
3215 | } |
3216 | |
3217 | /** |
3218 | * clk_core_link_consumer - Add a clk consumer to the list of consumers in a clk_core |
3219 | * @core: clk to add consumer to |
3220 | * @clk: consumer to link to a clk |
3221 | */ |
3222 | static void clk_core_link_consumer(struct clk_core *core, struct clk *clk) |
3223 | { |
3224 | clk_prepare_lock(); |
3225 | hlist_add_head(&clk->clks_node, &core->clks); |
3226 | clk_prepare_unlock(); |
3227 | } |
3228 | |
3229 | /** |
3230 | * clk_core_unlink_consumer - Remove a clk consumer from the list of consumers in a clk_core |
3231 | * @clk: consumer to unlink |
3232 | */ |
3233 | static void clk_core_unlink_consumer(struct clk *clk) |
3234 | { |
3235 | lockdep_assert_held(&prepare_lock); |
3236 | hlist_del(&clk->clks_node); |
3237 | } |
3238 | |
3239 | /** |
3240 | * alloc_clk - Allocate a clk consumer, but leave it unlinked to the clk_core |
3241 | * @core: clk to allocate a consumer for |
3242 | * @dev_id: string describing device name |
3243 | * @con_id: connection ID string on device |
3244 | * |
3245 | * Returns: clk consumer left unlinked from the consumer list |
3246 | */ |
3247 | static struct clk *alloc_clk(struct clk_core *core, const char *dev_id, |
3248 | const char *con_id) |
3249 | { |
3250 | struct clk *clk; |
3251 | |
3252 | clk = kzalloc(sizeof(*clk), GFP_KERNEL); |
3253 | if (!clk) |
3254 | return ERR_PTR(-ENOMEM); |
3255 | |
3256 | clk->core = core; |
3257 | clk->dev_id = dev_id; |
3258 | clk->con_id = kstrdup_const(con_id, GFP_KERNEL); |
3259 | clk->max_rate = ULONG_MAX; |
3260 | |
3261 | return clk; |
3262 | } |
3263 | |
3264 | /** |
3265 | * free_clk - Free a clk consumer |
3266 | * @clk: clk consumer to free |
3267 | * |
3268 | * Note, this assumes the clk has been unlinked from the clk_core consumer |
3269 | * list. |
3270 | */ |
3271 | static void free_clk(struct clk *clk) |
3272 | { |
3273 | kfree_const(clk->con_id); |
3274 | kfree(clk); |
3275 | } |
3276 | |
3277 | /** |
3278 | * clk_hw_create_clk: Allocate and link a clk consumer to a clk_core given |
3279 | * a clk_hw |
3280 | * @dev: clk consumer device |
3281 | * @hw: clk_hw associated with the clk being consumed |
3282 | * @dev_id: string describing device name |
3283 | * @con_id: connection ID string on device |
3284 | * |
3285 | * This is the main function used to create a clk pointer for use by clk |
3286 | * consumers. It connects a consumer to the clk_core and clk_hw structures |
3287 | * used by the framework and clk provider respectively. |
3288 | */ |
3289 | struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw, |
3290 | const char *dev_id, const char *con_id) |
3291 | { |
3292 | struct clk *clk; |
3293 | struct clk_core *core; |
3294 | |
3295 | /* This is to allow this function to be chained to others */ |
3296 | if (IS_ERR_OR_NULL(hw)) |
3297 | return ERR_CAST(hw); |
3298 | |
3299 | core = hw->core; |
3300 | clk = alloc_clk(core, dev_id, con_id); |
3301 | if (IS_ERR(clk)) |
3302 | return clk; |
3303 | clk->dev = dev; |
3304 | |
3305 | if (!try_module_get(core->owner)) { |
3306 | free_clk(clk); |
3307 | return ERR_PTR(-ENOENT); |
3308 | } |
3309 | |
3310 | kref_get(&core->ref); |
3311 | clk_core_link_consumer(core, clk); |
3312 | |
3313 | return clk; |
3314 | } |
3315 | |
3316 | /** |
3317 | * clk_register - allocate a new clock, register it and return an opaque cookie |
3318 | * @dev: device that is registering this clock |
3319 | * @hw: link to hardware-specific clock data |
3320 | * |
3321 | * clk_register is the primary interface for populating the clock tree with new |
3322 | * clock nodes. It returns a pointer to the newly allocated struct clk which |
3323 | * cannot be dereferenced by driver code but may be used in conjunction with the |
3324 | * rest of the clock API. In the event of an error clk_register will return an |
3325 | * error code; drivers must test for an error code after calling clk_register. |
3326 | */ |
3327 | struct clk *clk_register(struct device *dev, struct clk_hw *hw) |
3328 | { |
3329 | int i, ret; |
3330 | struct clk_core *core; |
3331 | |
3332 | core = kzalloc(sizeof(*core), GFP_KERNEL); |
3333 | if (!core) { |
3334 | ret = -ENOMEM; |
3335 | goto fail_out; |
3336 | } |
3337 | |
3338 | core->name = kstrdup_const(hw->init->name, GFP_KERNEL); |
3339 | if (!core->name) { |
3340 | ret = -ENOMEM; |
3341 | goto fail_name; |
3342 | } |
3343 | |
3344 | if (WARN_ON(!hw->init->ops)) { |
3345 | ret = -EINVAL; |
3346 | goto fail_ops; |
3347 | } |
3348 | core->ops = hw->init->ops; |
3349 | |
3350 | if (dev && pm_runtime_enabled(dev)) |
3351 | core->rpm_enabled = true; |
3352 | core->dev = dev; |
3353 | if (dev && dev->driver) |
3354 | core->owner = dev->driver->owner; |
3355 | core->hw = hw; |
3356 | core->flags = hw->init->flags; |
3357 | core->num_parents = hw->init->num_parents; |
3358 | core->min_rate = 0; |
3359 | core->max_rate = ULONG_MAX; |
3360 | hw->core = core; |
3361 | |
3362 | /* allocate local copy in case parent_names is __initdata */ |
3363 | core->parent_names = kcalloc(core->num_parents, sizeof(char *), |
3364 | GFP_KERNEL); |
3365 | |
3366 | if (!core->parent_names) { |
3367 | ret = -ENOMEM; |
3368 | goto fail_parent_names; |
3369 | } |
3370 | |
3371 | |
3372 | /* copy each string name in case parent_names is __initdata */ |
3373 | for (i = 0; i < core->num_parents; i++) { |
3374 | core->parent_names[i] = kstrdup_const(hw->init->parent_names[i], |
3375 | GFP_KERNEL); |
3376 | if (!core->parent_names[i]) { |
3377 | ret = -ENOMEM; |
3378 | goto fail_parent_names_copy; |
3379 | } |
3380 | } |
3381 | |
3382 | /* avoid unnecessary string look-ups of clk_core's possible parents. */ |
3383 | core->parents = kcalloc(core->num_parents, sizeof(*core->parents), |
3384 | GFP_KERNEL); |
3385 | if (!core->parents) { |
3386 | ret = -ENOMEM; |
3387 | goto fail_parents; |
3388 | }; |
3389 | |
3390 | INIT_HLIST_HEAD(&core->clks); |
3391 | |
3392 | /* |
3393 | * Don't call clk_hw_create_clk() here because that would pin the |
3394 | * provider module to itself and prevent it from ever being removed. |
3395 | */ |
3396 | hw->clk = alloc_clk(core, NULL, NULL); |
3397 | if (IS_ERR(hw->clk)) { |
3398 | ret = PTR_ERR(hw->clk); |
3399 | goto fail_parents; |
3400 | } |
3401 | |
3402 | clk_core_link_consumer(hw->core, hw->clk); |
3403 | |
3404 | ret = __clk_core_init(core); |
3405 | if (!ret) |
3406 | return hw->clk; |
3407 | |
3408 | clk_prepare_lock(); |
3409 | clk_core_unlink_consumer(hw->clk); |
3410 | clk_prepare_unlock(); |
3411 | |
3412 | free_clk(hw->clk); |
3413 | hw->clk = NULL; |
3414 | |
3415 | fail_parents: |
3416 | kfree(core->parents); |
3417 | fail_parent_names_copy: |
3418 | while (--i >= 0) |
3419 | kfree_const(core->parent_names[i]); |
3420 | kfree(core->parent_names); |
3421 | fail_parent_names: |
3422 | fail_ops: |
3423 | kfree_const(core->name); |
3424 | fail_name: |
3425 | kfree(core); |
3426 | fail_out: |
3427 | return ERR_PTR(ret); |
3428 | } |
3429 | EXPORT_SYMBOL_GPL(clk_register); |
3430 | |
3431 | /** |
3432 | * clk_hw_register - register a clk_hw and return an error code |
3433 | * @dev: device that is registering this clock |
3434 | * @hw: link to hardware-specific clock data |
3435 | * |
3436 | * clk_hw_register is the primary interface for populating the clock tree with |
3437 | * new clock nodes. It returns an integer equal to zero indicating success or |
3438 | * less than zero indicating failure. Drivers must test for an error code after |
3439 | * calling clk_hw_register(). |
3440 | */ |
3441 | int clk_hw_register(struct device *dev, struct clk_hw *hw) |
3442 | { |
3443 | return PTR_ERR_OR_ZERO(clk_register(dev, hw)); |
3444 | } |
3445 | EXPORT_SYMBOL_GPL(clk_hw_register); |
3446 | |
3447 | /* Free memory allocated for a clock. */ |
3448 | static void __clk_release(struct kref *ref) |
3449 | { |
3450 | struct clk_core *core = container_of(ref, struct clk_core, ref); |
3451 | int i = core->num_parents; |
3452 | |
3453 | lockdep_assert_held(&prepare_lock); |
3454 | |
3455 | kfree(core->parents); |
3456 | while (--i >= 0) |
3457 | kfree_const(core->parent_names[i]); |
3458 | |
3459 | kfree(core->parent_names); |
3460 | kfree_const(core->name); |
3461 | kfree(core); |
3462 | } |
3463 | |
3464 | /* |
3465 | * Empty clk_ops for unregistered clocks. These are used temporarily |
3466 | * after clk_unregister() was called on a clock and until last clock |
3467 | * consumer calls clk_put() and the struct clk object is freed. |
3468 | */ |
3469 | static int clk_nodrv_prepare_enable(struct clk_hw *hw) |
3470 | { |
3471 | return -ENXIO; |
3472 | } |
3473 | |
3474 | static void clk_nodrv_disable_unprepare(struct clk_hw *hw) |
3475 | { |
3476 | WARN_ON_ONCE(1); |
3477 | } |
3478 | |
3479 | static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate, |
3480 | unsigned long parent_rate) |
3481 | { |
3482 | return -ENXIO; |
3483 | } |
3484 | |
3485 | static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index) |
3486 | { |
3487 | return -ENXIO; |
3488 | } |
3489 | |
3490 | static const struct clk_ops clk_nodrv_ops = { |
3491 | .enable = clk_nodrv_prepare_enable, |
3492 | .disable = clk_nodrv_disable_unprepare, |
3493 | .prepare = clk_nodrv_prepare_enable, |
3494 | .unprepare = clk_nodrv_disable_unprepare, |
3495 | .set_rate = clk_nodrv_set_rate, |
3496 | .set_parent = clk_nodrv_set_parent, |
3497 | }; |
3498 | |
3499 | /** |
3500 | * clk_unregister - unregister a currently registered clock |
3501 | * @clk: clock to unregister |
3502 | */ |
3503 | void clk_unregister(struct clk *clk) |
3504 | { |
3505 | unsigned long flags; |
3506 | |
3507 | if (!clk || WARN_ON_ONCE(IS_ERR(clk))) |
3508 | return; |
3509 | |
3510 | clk_debug_unregister(clk->core); |
3511 | |
3512 | clk_prepare_lock(); |
3513 | |
3514 | if (clk->core->ops == &clk_nodrv_ops) { |
3515 | pr_err("%s: unregistered clock: %s\n" , __func__, |
3516 | clk->core->name); |
3517 | goto unlock; |
3518 | } |
3519 | /* |
3520 | * Assign empty clock ops for consumers that might still hold |
3521 | * a reference to this clock. |
3522 | */ |
3523 | flags = clk_enable_lock(); |
3524 | clk->core->ops = &clk_nodrv_ops; |
3525 | clk_enable_unlock(flags); |
3526 | |
3527 | if (!hlist_empty(&clk->core->children)) { |
3528 | struct clk_core *child; |
3529 | struct hlist_node *t; |
3530 | |
3531 | /* Reparent all children to the orphan list. */ |
3532 | hlist_for_each_entry_safe(child, t, &clk->core->children, |
3533 | child_node) |
3534 | clk_core_set_parent_nolock(child, NULL); |
3535 | } |
3536 | |
3537 | hlist_del_init(&clk->core->child_node); |
3538 | |
3539 | if (clk->core->prepare_count) |
3540 | pr_warn("%s: unregistering prepared clock: %s\n" , |
3541 | __func__, clk->core->name); |
3542 | |
3543 | if (clk->core->protect_count) |
3544 | pr_warn("%s: unregistering protected clock: %s\n" , |
3545 | __func__, clk->core->name); |
3546 | |
3547 | kref_put(&clk->core->ref, __clk_release); |
3548 | unlock: |
3549 | clk_prepare_unlock(); |
3550 | } |
3551 | EXPORT_SYMBOL_GPL(clk_unregister); |
3552 | |
3553 | /** |
3554 | * clk_hw_unregister - unregister a currently registered clk_hw |
3555 | * @hw: hardware-specific clock data to unregister |
3556 | */ |
3557 | void clk_hw_unregister(struct clk_hw *hw) |
3558 | { |
3559 | clk_unregister(hw->clk); |
3560 | } |
3561 | EXPORT_SYMBOL_GPL(clk_hw_unregister); |
3562 | |
3563 | static void devm_clk_release(struct device *dev, void *res) |
3564 | { |
3565 | clk_unregister(*(struct clk **)res); |
3566 | } |
3567 | |
3568 | static void devm_clk_hw_release(struct device *dev, void *res) |
3569 | { |
3570 | clk_hw_unregister(*(struct clk_hw **)res); |
3571 | } |
3572 | |
3573 | /** |
3574 | * devm_clk_register - resource managed clk_register() |
3575 | * @dev: device that is registering this clock |
3576 | * @hw: link to hardware-specific clock data |
3577 | * |
3578 | * Managed clk_register(). Clocks returned from this function are |
3579 | * automatically clk_unregister()ed on driver detach. See clk_register() for |
3580 | * more information. |
3581 | */ |
3582 | struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw) |
3583 | { |
3584 | struct clk *clk; |
3585 | struct clk **clkp; |
3586 | |
3587 | clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL); |
3588 | if (!clkp) |
3589 | return ERR_PTR(-ENOMEM); |
3590 | |
3591 | clk = clk_register(dev, hw); |
3592 | if (!IS_ERR(clk)) { |
3593 | *clkp = clk; |
3594 | devres_add(dev, clkp); |
3595 | } else { |
3596 | devres_free(clkp); |
3597 | } |
3598 | |
3599 | return clk; |
3600 | } |
3601 | EXPORT_SYMBOL_GPL(devm_clk_register); |
3602 | |
3603 | /** |
3604 | * devm_clk_hw_register - resource managed clk_hw_register() |
3605 | * @dev: device that is registering this clock |
3606 | * @hw: link to hardware-specific clock data |
3607 | * |
3608 | * Managed clk_hw_register(). Clocks registered by this function are |
3609 | * automatically clk_hw_unregister()ed on driver detach. See clk_hw_register() |
3610 | * for more information. |
3611 | */ |
3612 | int devm_clk_hw_register(struct device *dev, struct clk_hw *hw) |
3613 | { |
3614 | struct clk_hw **hwp; |
3615 | int ret; |
3616 | |
3617 | hwp = devres_alloc(devm_clk_hw_release, sizeof(*hwp), GFP_KERNEL); |
3618 | if (!hwp) |
3619 | return -ENOMEM; |
3620 | |
3621 | ret = clk_hw_register(dev, hw); |
3622 | if (!ret) { |
3623 | *hwp = hw; |
3624 | devres_add(dev, hwp); |
3625 | } else { |
3626 | devres_free(hwp); |
3627 | } |
3628 | |
3629 | return ret; |
3630 | } |
3631 | EXPORT_SYMBOL_GPL(devm_clk_hw_register); |
3632 | |
3633 | static int devm_clk_match(struct device *dev, void *res, void *data) |
3634 | { |
3635 | struct clk *c = res; |
3636 | if (WARN_ON(!c)) |
3637 | return 0; |
3638 | return c == data; |
3639 | } |
3640 | |
3641 | static int devm_clk_hw_match(struct device *dev, void *res, void *data) |
3642 | { |
3643 | struct clk_hw *hw = res; |
3644 | |
3645 | if (WARN_ON(!hw)) |
3646 | return 0; |
3647 | return hw == data; |
3648 | } |
3649 | |
3650 | /** |
3651 | * devm_clk_unregister - resource managed clk_unregister() |
3652 | * @clk: clock to unregister |
3653 | * |
3654 | * Deallocate a clock allocated with devm_clk_register(). Normally |
3655 | * this function will not need to be called and the resource management |
3656 | * code will ensure that the resource is freed. |
3657 | */ |
3658 | void devm_clk_unregister(struct device *dev, struct clk *clk) |
3659 | { |
3660 | WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk)); |
3661 | } |
3662 | EXPORT_SYMBOL_GPL(devm_clk_unregister); |
3663 | |
3664 | /** |
3665 | * devm_clk_hw_unregister - resource managed clk_hw_unregister() |
3666 | * @dev: device that is unregistering the hardware-specific clock data |
3667 | * @hw: link to hardware-specific clock data |
3668 | * |
3669 | * Unregister a clk_hw registered with devm_clk_hw_register(). Normally |
3670 | * this function will not need to be called and the resource management |
3671 | * code will ensure that the resource is freed. |
3672 | */ |
3673 | void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw) |
3674 | { |
3675 | WARN_ON(devres_release(dev, devm_clk_hw_release, devm_clk_hw_match, |
3676 | hw)); |
3677 | } |
3678 | EXPORT_SYMBOL_GPL(devm_clk_hw_unregister); |
3679 | |
3680 | /* |
3681 | * clkdev helpers |
3682 | */ |
3683 | |
3684 | void __clk_put(struct clk *clk) |
3685 | { |
3686 | struct module *owner; |
3687 | |
3688 | if (!clk || WARN_ON_ONCE(IS_ERR(clk))) |
3689 | return; |
3690 | |
3691 | clk_prepare_lock(); |
3692 | |
3693 | /* |
3694 | * Before calling clk_put, all calls to clk_rate_exclusive_get() from a |
3695 | * given user should be balanced with calls to clk_rate_exclusive_put() |
3696 | * and by that same consumer |
3697 | */ |
3698 | if (WARN_ON(clk->exclusive_count)) { |
3699 | /* We voiced our concern, let's sanitize the situation */ |
3700 | clk->core->protect_count -= (clk->exclusive_count - 1); |
3701 | clk_core_rate_unprotect(clk->core); |
3702 | clk->exclusive_count = 0; |
3703 | } |
3704 | |
3705 | hlist_del(&clk->clks_node); |
3706 | if (clk->min_rate > clk->core->req_rate || |
3707 | clk->max_rate < clk->core->req_rate) |
3708 | clk_core_set_rate_nolock(clk->core, clk->core->req_rate); |
3709 | |
3710 | owner = clk->core->owner; |
3711 | kref_put(&clk->core->ref, __clk_release); |
3712 | |
3713 | clk_prepare_unlock(); |
3714 | |
3715 | module_put(owner); |
3716 | |
3717 | free_clk(clk); |
3718 | } |
3719 | |
3720 | /*** clk rate change notifiers ***/ |
3721 | |
3722 | /** |
3723 | * clk_notifier_register - add a clk rate change notifier |
3724 | * @clk: struct clk * to watch |
3725 | * @nb: struct notifier_block * with callback info |
3726 | * |
3727 | * Request notification when clk's rate changes. This uses an SRCU |
3728 | * notifier because we want it to block and notifier unregistrations are |
3729 | * uncommon. The callbacks associated with the notifier must not |
3730 | * re-enter into the clk framework by calling any top-level clk APIs; |
3731 | * this will cause a nested prepare_lock mutex. |
3732 | * |
3733 | * In all notification cases (pre, post and abort rate change) the original |
3734 | * clock rate is passed to the callback via struct clk_notifier_data.old_rate |
3735 | * and the new frequency is passed via struct clk_notifier_data.new_rate. |
3736 | * |
3737 | * clk_notifier_register() must be called from non-atomic context. |
3738 | * Returns -EINVAL if called with null arguments, -ENOMEM upon |
3739 | * allocation failure; otherwise, passes along the return value of |
3740 | * srcu_notifier_chain_register(). |
3741 | */ |
3742 | int clk_notifier_register(struct clk *clk, struct notifier_block *nb) |
3743 | { |
3744 | struct clk_notifier *cn; |
3745 | int ret = -ENOMEM; |
3746 | |
3747 | if (!clk || !nb) |
3748 | return -EINVAL; |
3749 | |
3750 | clk_prepare_lock(); |
3751 | |
3752 | /* search the list of notifiers for this clk */ |
3753 | list_for_each_entry(cn, &clk_notifier_list, node) |
3754 | if (cn->clk == clk) |
3755 | break; |
3756 | |
3757 | /* if clk wasn't in the notifier list, allocate new clk_notifier */ |
3758 | if (cn->clk != clk) { |
3759 | cn = kzalloc(sizeof(*cn), GFP_KERNEL); |
3760 | if (!cn) |
3761 | goto out; |
3762 | |
3763 | cn->clk = clk; |
3764 | srcu_init_notifier_head(&cn->n |
---|