1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * linux/include/linux/clk.h |
4 | * |
5 | * Copyright (C) 2004 ARM Limited. |
6 | * Written by Deep Blue Solutions Limited. |
7 | * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> |
8 | */ |
9 | #ifndef __LINUX_CLK_H |
10 | #define __LINUX_CLK_H |
11 | |
12 | #include <linux/err.h> |
13 | #include <linux/kernel.h> |
14 | #include <linux/notifier.h> |
15 | |
16 | struct device; |
17 | struct clk; |
18 | struct device_node; |
19 | struct of_phandle_args; |
20 | |
21 | /** |
22 | * DOC: clk notifier callback types |
23 | * |
24 | * PRE_RATE_CHANGE - called immediately before the clk rate is changed, |
25 | * to indicate that the rate change will proceed. Drivers must |
26 | * immediately terminate any operations that will be affected by the |
27 | * rate change. Callbacks may either return NOTIFY_DONE, NOTIFY_OK, |
28 | * NOTIFY_STOP or NOTIFY_BAD. |
29 | * |
30 | * ABORT_RATE_CHANGE: called if the rate change failed for some reason |
31 | * after PRE_RATE_CHANGE. In this case, all registered notifiers on |
32 | * the clk will be called with ABORT_RATE_CHANGE. Callbacks must |
33 | * always return NOTIFY_DONE or NOTIFY_OK. |
34 | * |
35 | * POST_RATE_CHANGE - called after the clk rate change has successfully |
36 | * completed. Callbacks must always return NOTIFY_DONE or NOTIFY_OK. |
37 | * |
38 | */ |
39 | #define PRE_RATE_CHANGE BIT(0) |
40 | #define POST_RATE_CHANGE BIT(1) |
41 | #define ABORT_RATE_CHANGE BIT(2) |
42 | |
43 | /** |
44 | * struct clk_notifier - associate a clk with a notifier |
45 | * @clk: struct clk * to associate the notifier with |
46 | * @notifier_head: a blocking_notifier_head for this clk |
47 | * @node: linked list pointers |
48 | * |
49 | * A list of struct clk_notifier is maintained by the notifier code. |
50 | * An entry is created whenever code registers the first notifier on a |
51 | * particular @clk. Future notifiers on that @clk are added to the |
52 | * @notifier_head. |
53 | */ |
54 | struct clk_notifier { |
55 | struct clk *clk; |
56 | struct srcu_notifier_head notifier_head; |
57 | struct list_head node; |
58 | }; |
59 | |
60 | /** |
61 | * struct clk_notifier_data - rate data to pass to the notifier callback |
62 | * @clk: struct clk * being changed |
63 | * @old_rate: previous rate of this clk |
64 | * @new_rate: new rate of this clk |
65 | * |
66 | * For a pre-notifier, old_rate is the clk's rate before this rate |
67 | * change, and new_rate is what the rate will be in the future. For a |
68 | * post-notifier, old_rate and new_rate are both set to the clk's |
69 | * current rate (this was done to optimize the implementation). |
70 | */ |
71 | struct clk_notifier_data { |
72 | struct clk *clk; |
73 | unsigned long old_rate; |
74 | unsigned long new_rate; |
75 | }; |
76 | |
77 | /** |
78 | * struct clk_bulk_data - Data used for bulk clk operations. |
79 | * |
80 | * @id: clock consumer ID |
81 | * @clk: struct clk * to store the associated clock |
82 | * |
83 | * The CLK APIs provide a series of clk_bulk_() API calls as |
84 | * a convenience to consumers which require multiple clks. This |
85 | * structure is used to manage data for these calls. |
86 | */ |
87 | struct clk_bulk_data { |
88 | const char *id; |
89 | struct clk *clk; |
90 | }; |
91 | |
92 | #ifdef CONFIG_COMMON_CLK |
93 | |
94 | /** |
95 | * clk_notifier_register - register a clock rate-change notifier callback |
96 | * @clk: clock whose rate we are interested in |
97 | * @nb: notifier block with callback function pointer |
98 | * |
99 | * ProTip: debugging across notifier chains can be frustrating. Make sure that |
100 | * your notifier callback function prints a nice big warning in case of |
101 | * failure. |
102 | */ |
103 | int clk_notifier_register(struct clk *clk, struct notifier_block *nb); |
104 | |
105 | /** |
106 | * clk_notifier_unregister - unregister a clock rate-change notifier callback |
107 | * @clk: clock whose rate we are no longer interested in |
108 | * @nb: notifier block which will be unregistered |
109 | */ |
110 | int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb); |
111 | |
112 | /** |
113 | * devm_clk_notifier_register - register a managed rate-change notifier callback |
114 | * @dev: device for clock "consumer" |
115 | * @clk: clock whose rate we are interested in |
116 | * @nb: notifier block with callback function pointer |
117 | * |
118 | * Returns 0 on success, -EERROR otherwise |
119 | */ |
120 | int devm_clk_notifier_register(struct device *dev, struct clk *clk, |
121 | struct notifier_block *nb); |
122 | |
123 | /** |
124 | * clk_get_accuracy - obtain the clock accuracy in ppb (parts per billion) |
125 | * for a clock source. |
126 | * @clk: clock source |
127 | * |
128 | * This gets the clock source accuracy expressed in ppb. |
129 | * A perfect clock returns 0. |
130 | */ |
131 | long clk_get_accuracy(struct clk *clk); |
132 | |
133 | /** |
134 | * clk_set_phase - adjust the phase shift of a clock signal |
135 | * @clk: clock signal source |
136 | * @degrees: number of degrees the signal is shifted |
137 | * |
138 | * Shifts the phase of a clock signal by the specified degrees. Returns 0 on |
139 | * success, -EERROR otherwise. |
140 | */ |
141 | int clk_set_phase(struct clk *clk, int degrees); |
142 | |
143 | /** |
144 | * clk_get_phase - return the phase shift of a clock signal |
145 | * @clk: clock signal source |
146 | * |
147 | * Returns the phase shift of a clock node in degrees, otherwise returns |
148 | * -EERROR. |
149 | */ |
150 | int clk_get_phase(struct clk *clk); |
151 | |
152 | /** |
153 | * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal |
154 | * @clk: clock signal source |
155 | * @num: numerator of the duty cycle ratio to be applied |
156 | * @den: denominator of the duty cycle ratio to be applied |
157 | * |
158 | * Adjust the duty cycle of a clock signal by the specified ratio. Returns 0 on |
159 | * success, -EERROR otherwise. |
160 | */ |
161 | int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den); |
162 | |
163 | /** |
164 | * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal |
165 | * @clk: clock signal source |
166 | * @scale: scaling factor to be applied to represent the ratio as an integer |
167 | * |
168 | * Returns the duty cycle ratio multiplied by the scale provided, otherwise |
169 | * returns -EERROR. |
170 | */ |
171 | int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale); |
172 | |
173 | /** |
174 | * clk_is_match - check if two clk's point to the same hardware clock |
175 | * @p: clk compared against q |
176 | * @q: clk compared against p |
177 | * |
178 | * Returns true if the two struct clk pointers both point to the same hardware |
179 | * clock node. Put differently, returns true if @p and @q |
180 | * share the same &struct clk_core object. |
181 | * |
182 | * Returns false otherwise. Note that two NULL clks are treated as matching. |
183 | */ |
184 | bool clk_is_match(const struct clk *p, const struct clk *q); |
185 | |
186 | /** |
187 | * clk_rate_exclusive_get - get exclusivity over the rate control of a |
188 | * producer |
189 | * @clk: clock source |
190 | * |
191 | * This function allows drivers to get exclusive control over the rate of a |
192 | * provider. It prevents any other consumer to execute, even indirectly, |
193 | * opereation which could alter the rate of the provider or cause glitches |
194 | * |
195 | * If exlusivity is claimed more than once on clock, even by the same driver, |
196 | * the rate effectively gets locked as exclusivity can't be preempted. |
197 | * |
198 | * Must not be called from within atomic context. |
199 | * |
200 | * Returns success (0) or negative errno. |
201 | */ |
202 | int clk_rate_exclusive_get(struct clk *clk); |
203 | |
204 | /** |
205 | * devm_clk_rate_exclusive_get - devm variant of clk_rate_exclusive_get |
206 | * @dev: device the exclusivity is bound to |
207 | * @clk: clock source |
208 | * |
209 | * Calls clk_rate_exclusive_get() on @clk and registers a devm cleanup handler |
210 | * on @dev to call clk_rate_exclusive_put(). |
211 | * |
212 | * Must not be called from within atomic context. |
213 | */ |
214 | int devm_clk_rate_exclusive_get(struct device *dev, struct clk *clk); |
215 | |
216 | /** |
217 | * clk_rate_exclusive_put - release exclusivity over the rate control of a |
218 | * producer |
219 | * @clk: clock source |
220 | * |
221 | * This function allows drivers to release the exclusivity it previously got |
222 | * from clk_rate_exclusive_get() |
223 | * |
224 | * The caller must balance the number of clk_rate_exclusive_get() and |
225 | * clk_rate_exclusive_put() calls. |
226 | * |
227 | * Must not be called from within atomic context. |
228 | */ |
229 | void clk_rate_exclusive_put(struct clk *clk); |
230 | |
231 | #else |
232 | |
233 | static inline int clk_notifier_register(struct clk *clk, |
234 | struct notifier_block *nb) |
235 | { |
236 | return -ENOTSUPP; |
237 | } |
238 | |
239 | static inline int clk_notifier_unregister(struct clk *clk, |
240 | struct notifier_block *nb) |
241 | { |
242 | return -ENOTSUPP; |
243 | } |
244 | |
245 | static inline int devm_clk_notifier_register(struct device *dev, |
246 | struct clk *clk, |
247 | struct notifier_block *nb) |
248 | { |
249 | return -ENOTSUPP; |
250 | } |
251 | |
252 | static inline long clk_get_accuracy(struct clk *clk) |
253 | { |
254 | return -ENOTSUPP; |
255 | } |
256 | |
257 | static inline long clk_set_phase(struct clk *clk, int phase) |
258 | { |
259 | return -ENOTSUPP; |
260 | } |
261 | |
262 | static inline long clk_get_phase(struct clk *clk) |
263 | { |
264 | return -ENOTSUPP; |
265 | } |
266 | |
267 | static inline int clk_set_duty_cycle(struct clk *clk, unsigned int num, |
268 | unsigned int den) |
269 | { |
270 | return -ENOTSUPP; |
271 | } |
272 | |
273 | static inline unsigned int clk_get_scaled_duty_cycle(struct clk *clk, |
274 | unsigned int scale) |
275 | { |
276 | return 0; |
277 | } |
278 | |
279 | static inline bool clk_is_match(const struct clk *p, const struct clk *q) |
280 | { |
281 | return p == q; |
282 | } |
283 | |
284 | static inline int clk_rate_exclusive_get(struct clk *clk) |
285 | { |
286 | return 0; |
287 | } |
288 | |
289 | static inline int devm_clk_rate_exclusive_get(struct device *dev, struct clk *clk) |
290 | { |
291 | return 0; |
292 | } |
293 | |
294 | static inline void clk_rate_exclusive_put(struct clk *clk) {} |
295 | |
296 | #endif |
297 | |
298 | #ifdef CONFIG_HAVE_CLK_PREPARE |
299 | /** |
300 | * clk_prepare - prepare a clock source |
301 | * @clk: clock source |
302 | * |
303 | * This prepares the clock source for use. |
304 | * |
305 | * Must not be called from within atomic context. |
306 | */ |
307 | int clk_prepare(struct clk *clk); |
308 | int __must_check clk_bulk_prepare(int num_clks, |
309 | const struct clk_bulk_data *clks); |
310 | |
311 | /** |
312 | * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it. |
313 | * @clk: clock source |
314 | * |
315 | * Returns true if clk_prepare() implicitly enables the clock, effectively |
316 | * making clk_enable()/clk_disable() no-ops, false otherwise. |
317 | * |
318 | * This is of interest mainly to the power management code where actually |
319 | * disabling the clock also requires unpreparing it to have any material |
320 | * effect. |
321 | * |
322 | * Regardless of the value returned here, the caller must always invoke |
323 | * clk_enable() or clk_prepare_enable() and counterparts for usage counts |
324 | * to be right. |
325 | */ |
326 | bool clk_is_enabled_when_prepared(struct clk *clk); |
327 | #else |
328 | static inline int clk_prepare(struct clk *clk) |
329 | { |
330 | might_sleep(); |
331 | return 0; |
332 | } |
333 | |
334 | static inline int __must_check |
335 | clk_bulk_prepare(int num_clks, const struct clk_bulk_data *clks) |
336 | { |
337 | might_sleep(); |
338 | return 0; |
339 | } |
340 | |
341 | static inline bool clk_is_enabled_when_prepared(struct clk *clk) |
342 | { |
343 | return false; |
344 | } |
345 | #endif |
346 | |
347 | /** |
348 | * clk_unprepare - undo preparation of a clock source |
349 | * @clk: clock source |
350 | * |
351 | * This undoes a previously prepared clock. The caller must balance |
352 | * the number of prepare and unprepare calls. |
353 | * |
354 | * Must not be called from within atomic context. |
355 | */ |
356 | #ifdef CONFIG_HAVE_CLK_PREPARE |
357 | void clk_unprepare(struct clk *clk); |
358 | void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks); |
359 | #else |
360 | static inline void clk_unprepare(struct clk *clk) |
361 | { |
362 | might_sleep(); |
363 | } |
364 | static inline void clk_bulk_unprepare(int num_clks, |
365 | const struct clk_bulk_data *clks) |
366 | { |
367 | might_sleep(); |
368 | } |
369 | #endif |
370 | |
371 | #ifdef CONFIG_HAVE_CLK |
372 | /** |
373 | * clk_get - lookup and obtain a reference to a clock producer. |
374 | * @dev: device for clock "consumer" |
375 | * @id: clock consumer ID |
376 | * |
377 | * Returns a struct clk corresponding to the clock producer, or |
378 | * valid IS_ERR() condition containing errno. The implementation |
379 | * uses @dev and @id to determine the clock consumer, and thereby |
380 | * the clock producer. (IOW, @id may be identical strings, but |
381 | * clk_get may return different clock producers depending on @dev.) |
382 | * |
383 | * Drivers must assume that the clock source is not enabled. |
384 | * |
385 | * clk_get should not be called from within interrupt context. |
386 | */ |
387 | struct clk *clk_get(struct device *dev, const char *id); |
388 | |
389 | /** |
390 | * clk_bulk_get - lookup and obtain a number of references to clock producer. |
391 | * @dev: device for clock "consumer" |
392 | * @num_clks: the number of clk_bulk_data |
393 | * @clks: the clk_bulk_data table of consumer |
394 | * |
395 | * This helper function allows drivers to get several clk consumers in one |
396 | * operation. If any of the clk cannot be acquired then any clks |
397 | * that were obtained will be freed before returning to the caller. |
398 | * |
399 | * Returns 0 if all clocks specified in clk_bulk_data table are obtained |
400 | * successfully, or valid IS_ERR() condition containing errno. |
401 | * The implementation uses @dev and @clk_bulk_data.id to determine the |
402 | * clock consumer, and thereby the clock producer. |
403 | * The clock returned is stored in each @clk_bulk_data.clk field. |
404 | * |
405 | * Drivers must assume that the clock source is not enabled. |
406 | * |
407 | * clk_bulk_get should not be called from within interrupt context. |
408 | */ |
409 | int __must_check clk_bulk_get(struct device *dev, int num_clks, |
410 | struct clk_bulk_data *clks); |
411 | /** |
412 | * clk_bulk_get_all - lookup and obtain all available references to clock |
413 | * producer. |
414 | * @dev: device for clock "consumer" |
415 | * @clks: pointer to the clk_bulk_data table of consumer |
416 | * |
417 | * This helper function allows drivers to get all clk consumers in one |
418 | * operation. If any of the clk cannot be acquired then any clks |
419 | * that were obtained will be freed before returning to the caller. |
420 | * |
421 | * Returns a positive value for the number of clocks obtained while the |
422 | * clock references are stored in the clk_bulk_data table in @clks field. |
423 | * Returns 0 if there're none and a negative value if something failed. |
424 | * |
425 | * Drivers must assume that the clock source is not enabled. |
426 | * |
427 | * clk_bulk_get should not be called from within interrupt context. |
428 | */ |
429 | int __must_check clk_bulk_get_all(struct device *dev, |
430 | struct clk_bulk_data **clks); |
431 | |
432 | /** |
433 | * clk_bulk_get_optional - lookup and obtain a number of references to clock producer |
434 | * @dev: device for clock "consumer" |
435 | * @num_clks: the number of clk_bulk_data |
436 | * @clks: the clk_bulk_data table of consumer |
437 | * |
438 | * Behaves the same as clk_bulk_get() except where there is no clock producer. |
439 | * In this case, instead of returning -ENOENT, the function returns 0 and |
440 | * NULL for a clk for which a clock producer could not be determined. |
441 | */ |
442 | int __must_check clk_bulk_get_optional(struct device *dev, int num_clks, |
443 | struct clk_bulk_data *clks); |
444 | /** |
445 | * devm_clk_bulk_get - managed get multiple clk consumers |
446 | * @dev: device for clock "consumer" |
447 | * @num_clks: the number of clk_bulk_data |
448 | * @clks: the clk_bulk_data table of consumer |
449 | * |
450 | * Return 0 on success, an errno on failure. |
451 | * |
452 | * This helper function allows drivers to get several clk |
453 | * consumers in one operation with management, the clks will |
454 | * automatically be freed when the device is unbound. |
455 | */ |
456 | int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, |
457 | struct clk_bulk_data *clks); |
458 | /** |
459 | * devm_clk_bulk_get_optional - managed get multiple optional consumer clocks |
460 | * @dev: device for clock "consumer" |
461 | * @num_clks: the number of clk_bulk_data |
462 | * @clks: pointer to the clk_bulk_data table of consumer |
463 | * |
464 | * Behaves the same as devm_clk_bulk_get() except where there is no clock |
465 | * producer. In this case, instead of returning -ENOENT, the function returns |
466 | * NULL for given clk. It is assumed all clocks in clk_bulk_data are optional. |
467 | * |
468 | * Returns 0 if all clocks specified in clk_bulk_data table are obtained |
469 | * successfully or for any clk there was no clk provider available, otherwise |
470 | * returns valid IS_ERR() condition containing errno. |
471 | * The implementation uses @dev and @clk_bulk_data.id to determine the |
472 | * clock consumer, and thereby the clock producer. |
473 | * The clock returned is stored in each @clk_bulk_data.clk field. |
474 | * |
475 | * Drivers must assume that the clock source is not enabled. |
476 | * |
477 | * clk_bulk_get should not be called from within interrupt context. |
478 | */ |
479 | int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks, |
480 | struct clk_bulk_data *clks); |
481 | /** |
482 | * devm_clk_bulk_get_all - managed get multiple clk consumers |
483 | * @dev: device for clock "consumer" |
484 | * @clks: pointer to the clk_bulk_data table of consumer |
485 | * |
486 | * Returns a positive value for the number of clocks obtained while the |
487 | * clock references are stored in the clk_bulk_data table in @clks field. |
488 | * Returns 0 if there're none and a negative value if something failed. |
489 | * |
490 | * This helper function allows drivers to get several clk |
491 | * consumers in one operation with management, the clks will |
492 | * automatically be freed when the device is unbound. |
493 | */ |
494 | |
495 | int __must_check devm_clk_bulk_get_all(struct device *dev, |
496 | struct clk_bulk_data **clks); |
497 | |
498 | /** |
499 | * devm_clk_bulk_get_all_enable - Get and enable all clocks of the consumer (managed) |
500 | * @dev: device for clock "consumer" |
501 | * @clks: pointer to the clk_bulk_data table of consumer |
502 | * |
503 | * Returns success (0) or negative errno. |
504 | * |
505 | * This helper function allows drivers to get all clocks of the |
506 | * consumer and enables them in one operation with management. |
507 | * The clks will automatically be disabled and freed when the device |
508 | * is unbound. |
509 | */ |
510 | |
511 | int __must_check devm_clk_bulk_get_all_enable(struct device *dev, |
512 | struct clk_bulk_data **clks); |
513 | |
514 | /** |
515 | * devm_clk_get - lookup and obtain a managed reference to a clock producer. |
516 | * @dev: device for clock "consumer" |
517 | * @id: clock consumer ID |
518 | * |
519 | * Context: May sleep. |
520 | * |
521 | * Return: a struct clk corresponding to the clock producer, or |
522 | * valid IS_ERR() condition containing errno. The implementation |
523 | * uses @dev and @id to determine the clock consumer, and thereby |
524 | * the clock producer. (IOW, @id may be identical strings, but |
525 | * clk_get may return different clock producers depending on @dev.) |
526 | * |
527 | * Drivers must assume that the clock source is neither prepared nor |
528 | * enabled. |
529 | * |
530 | * The clock will automatically be freed when the device is unbound |
531 | * from the bus. |
532 | */ |
533 | struct clk *devm_clk_get(struct device *dev, const char *id); |
534 | |
535 | /** |
536 | * devm_clk_get_prepared - devm_clk_get() + clk_prepare() |
537 | * @dev: device for clock "consumer" |
538 | * @id: clock consumer ID |
539 | * |
540 | * Context: May sleep. |
541 | * |
542 | * Return: a struct clk corresponding to the clock producer, or |
543 | * valid IS_ERR() condition containing errno. The implementation |
544 | * uses @dev and @id to determine the clock consumer, and thereby |
545 | * the clock producer. (IOW, @id may be identical strings, but |
546 | * clk_get may return different clock producers depending on @dev.) |
547 | * |
548 | * The returned clk (if valid) is prepared. Drivers must however assume |
549 | * that the clock is not enabled. |
550 | * |
551 | * The clock will automatically be unprepared and freed when the device |
552 | * is unbound from the bus. |
553 | */ |
554 | struct clk *devm_clk_get_prepared(struct device *dev, const char *id); |
555 | |
556 | /** |
557 | * devm_clk_get_enabled - devm_clk_get() + clk_prepare_enable() |
558 | * @dev: device for clock "consumer" |
559 | * @id: clock consumer ID |
560 | * |
561 | * Context: May sleep. |
562 | * |
563 | * Return: a struct clk corresponding to the clock producer, or |
564 | * valid IS_ERR() condition containing errno. The implementation |
565 | * uses @dev and @id to determine the clock consumer, and thereby |
566 | * the clock producer. (IOW, @id may be identical strings, but |
567 | * clk_get may return different clock producers depending on @dev.) |
568 | * |
569 | * The returned clk (if valid) is prepared and enabled. |
570 | * |
571 | * The clock will automatically be disabled, unprepared and freed |
572 | * when the device is unbound from the bus. |
573 | */ |
574 | struct clk *devm_clk_get_enabled(struct device *dev, const char *id); |
575 | |
576 | /** |
577 | * devm_clk_get_optional - lookup and obtain a managed reference to an optional |
578 | * clock producer. |
579 | * @dev: device for clock "consumer" |
580 | * @id: clock consumer ID |
581 | * |
582 | * Context: May sleep. |
583 | * |
584 | * Return: a struct clk corresponding to the clock producer, or |
585 | * valid IS_ERR() condition containing errno. The implementation |
586 | * uses @dev and @id to determine the clock consumer, and thereby |
587 | * the clock producer. If no such clk is found, it returns NULL |
588 | * which serves as a dummy clk. That's the only difference compared |
589 | * to devm_clk_get(). |
590 | * |
591 | * Drivers must assume that the clock source is neither prepared nor |
592 | * enabled. |
593 | * |
594 | * The clock will automatically be freed when the device is unbound |
595 | * from the bus. |
596 | */ |
597 | struct clk *devm_clk_get_optional(struct device *dev, const char *id); |
598 | |
599 | /** |
600 | * devm_clk_get_optional_prepared - devm_clk_get_optional() + clk_prepare() |
601 | * @dev: device for clock "consumer" |
602 | * @id: clock consumer ID |
603 | * |
604 | * Context: May sleep. |
605 | * |
606 | * Return: a struct clk corresponding to the clock producer, or |
607 | * valid IS_ERR() condition containing errno. The implementation |
608 | * uses @dev and @id to determine the clock consumer, and thereby |
609 | * the clock producer. If no such clk is found, it returns NULL |
610 | * which serves as a dummy clk. That's the only difference compared |
611 | * to devm_clk_get_prepared(). |
612 | * |
613 | * The returned clk (if valid) is prepared. Drivers must however |
614 | * assume that the clock is not enabled. |
615 | * |
616 | * The clock will automatically be unprepared and freed when the |
617 | * device is unbound from the bus. |
618 | */ |
619 | struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id); |
620 | |
621 | /** |
622 | * devm_clk_get_optional_enabled - devm_clk_get_optional() + |
623 | * clk_prepare_enable() |
624 | * @dev: device for clock "consumer" |
625 | * @id: clock consumer ID |
626 | * |
627 | * Context: May sleep. |
628 | * |
629 | * Return: a struct clk corresponding to the clock producer, or |
630 | * valid IS_ERR() condition containing errno. The implementation |
631 | * uses @dev and @id to determine the clock consumer, and thereby |
632 | * the clock producer. If no such clk is found, it returns NULL |
633 | * which serves as a dummy clk. That's the only difference compared |
634 | * to devm_clk_get_enabled(). |
635 | * |
636 | * The returned clk (if valid) is prepared and enabled. |
637 | * |
638 | * The clock will automatically be disabled, unprepared and freed |
639 | * when the device is unbound from the bus. |
640 | */ |
641 | struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id); |
642 | |
643 | /** |
644 | * devm_get_clk_from_child - lookup and obtain a managed reference to a |
645 | * clock producer from child node. |
646 | * @dev: device for clock "consumer" |
647 | * @np: pointer to clock consumer node |
648 | * @con_id: clock consumer ID |
649 | * |
650 | * This function parses the clocks, and uses them to look up the |
651 | * struct clk from the registered list of clock providers by using |
652 | * @np and @con_id |
653 | * |
654 | * The clock will automatically be freed when the device is unbound |
655 | * from the bus. |
656 | */ |
657 | struct clk *devm_get_clk_from_child(struct device *dev, |
658 | struct device_node *np, const char *con_id); |
659 | |
660 | /** |
661 | * clk_enable - inform the system when the clock source should be running. |
662 | * @clk: clock source |
663 | * |
664 | * If the clock can not be enabled/disabled, this should return success. |
665 | * |
666 | * May be called from atomic contexts. |
667 | * |
668 | * Returns success (0) or negative errno. |
669 | */ |
670 | int clk_enable(struct clk *clk); |
671 | |
672 | /** |
673 | * clk_bulk_enable - inform the system when the set of clks should be running. |
674 | * @num_clks: the number of clk_bulk_data |
675 | * @clks: the clk_bulk_data table of consumer |
676 | * |
677 | * May be called from atomic contexts. |
678 | * |
679 | * Returns success (0) or negative errno. |
680 | */ |
681 | int __must_check clk_bulk_enable(int num_clks, |
682 | const struct clk_bulk_data *clks); |
683 | |
684 | /** |
685 | * clk_disable - inform the system when the clock source is no longer required. |
686 | * @clk: clock source |
687 | * |
688 | * Inform the system that a clock source is no longer required by |
689 | * a driver and may be shut down. |
690 | * |
691 | * May be called from atomic contexts. |
692 | * |
693 | * Implementation detail: if the clock source is shared between |
694 | * multiple drivers, clk_enable() calls must be balanced by the |
695 | * same number of clk_disable() calls for the clock source to be |
696 | * disabled. |
697 | */ |
698 | void clk_disable(struct clk *clk); |
699 | |
700 | /** |
701 | * clk_bulk_disable - inform the system when the set of clks is no |
702 | * longer required. |
703 | * @num_clks: the number of clk_bulk_data |
704 | * @clks: the clk_bulk_data table of consumer |
705 | * |
706 | * Inform the system that a set of clks is no longer required by |
707 | * a driver and may be shut down. |
708 | * |
709 | * May be called from atomic contexts. |
710 | * |
711 | * Implementation detail: if the set of clks is shared between |
712 | * multiple drivers, clk_bulk_enable() calls must be balanced by the |
713 | * same number of clk_bulk_disable() calls for the clock source to be |
714 | * disabled. |
715 | */ |
716 | void clk_bulk_disable(int num_clks, const struct clk_bulk_data *clks); |
717 | |
718 | /** |
719 | * clk_get_rate - obtain the current clock rate (in Hz) for a clock source. |
720 | * This is only valid once the clock source has been enabled. |
721 | * @clk: clock source |
722 | */ |
723 | unsigned long clk_get_rate(struct clk *clk); |
724 | |
725 | /** |
726 | * clk_put - "free" the clock source |
727 | * @clk: clock source |
728 | * |
729 | * Note: drivers must ensure that all clk_enable calls made on this |
730 | * clock source are balanced by clk_disable calls prior to calling |
731 | * this function. |
732 | * |
733 | * clk_put should not be called from within interrupt context. |
734 | */ |
735 | void clk_put(struct clk *clk); |
736 | |
737 | /** |
738 | * clk_bulk_put - "free" the clock source |
739 | * @num_clks: the number of clk_bulk_data |
740 | * @clks: the clk_bulk_data table of consumer |
741 | * |
742 | * Note: drivers must ensure that all clk_bulk_enable calls made on this |
743 | * clock source are balanced by clk_bulk_disable calls prior to calling |
744 | * this function. |
745 | * |
746 | * clk_bulk_put should not be called from within interrupt context. |
747 | */ |
748 | void clk_bulk_put(int num_clks, struct clk_bulk_data *clks); |
749 | |
750 | /** |
751 | * clk_bulk_put_all - "free" all the clock source |
752 | * @num_clks: the number of clk_bulk_data |
753 | * @clks: the clk_bulk_data table of consumer |
754 | * |
755 | * Note: drivers must ensure that all clk_bulk_enable calls made on this |
756 | * clock source are balanced by clk_bulk_disable calls prior to calling |
757 | * this function. |
758 | * |
759 | * clk_bulk_put_all should not be called from within interrupt context. |
760 | */ |
761 | void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks); |
762 | |
763 | /** |
764 | * devm_clk_put - "free" a managed clock source |
765 | * @dev: device used to acquire the clock |
766 | * @clk: clock source acquired with devm_clk_get() |
767 | * |
768 | * Note: drivers must ensure that all clk_enable calls made on this |
769 | * clock source are balanced by clk_disable calls prior to calling |
770 | * this function. |
771 | * |
772 | * clk_put should not be called from within interrupt context. |
773 | */ |
774 | void devm_clk_put(struct device *dev, struct clk *clk); |
775 | |
776 | /* |
777 | * The remaining APIs are optional for machine class support. |
778 | */ |
779 | |
780 | |
781 | /** |
782 | * clk_round_rate - adjust a rate to the exact rate a clock can provide |
783 | * @clk: clock source |
784 | * @rate: desired clock rate in Hz |
785 | * |
786 | * This answers the question "if I were to pass @rate to clk_set_rate(), |
787 | * what clock rate would I end up with?" without changing the hardware |
788 | * in any way. In other words: |
789 | * |
790 | * rate = clk_round_rate(clk, r); |
791 | * |
792 | * and: |
793 | * |
794 | * clk_set_rate(clk, r); |
795 | * rate = clk_get_rate(clk); |
796 | * |
797 | * are equivalent except the former does not modify the clock hardware |
798 | * in any way. |
799 | * |
800 | * Returns rounded clock rate in Hz, or negative errno. |
801 | */ |
802 | long clk_round_rate(struct clk *clk, unsigned long rate); |
803 | |
804 | /** |
805 | * clk_set_rate - set the clock rate for a clock source |
806 | * @clk: clock source |
807 | * @rate: desired clock rate in Hz |
808 | * |
809 | * Updating the rate starts at the top-most affected clock and then |
810 | * walks the tree down to the bottom-most clock that needs updating. |
811 | * |
812 | * Returns success (0) or negative errno. |
813 | */ |
814 | int clk_set_rate(struct clk *clk, unsigned long rate); |
815 | |
816 | /** |
817 | * clk_set_rate_exclusive- set the clock rate and claim exclusivity over |
818 | * clock source |
819 | * @clk: clock source |
820 | * @rate: desired clock rate in Hz |
821 | * |
822 | * This helper function allows drivers to atomically set the rate of a producer |
823 | * and claim exclusivity over the rate control of the producer. |
824 | * |
825 | * It is essentially a combination of clk_set_rate() and |
826 | * clk_rate_exclusite_get(). Caller must balance this call with a call to |
827 | * clk_rate_exclusive_put() |
828 | * |
829 | * Returns success (0) or negative errno. |
830 | */ |
831 | int clk_set_rate_exclusive(struct clk *clk, unsigned long rate); |
832 | |
833 | /** |
834 | * clk_has_parent - check if a clock is a possible parent for another |
835 | * @clk: clock source |
836 | * @parent: parent clock source |
837 | * |
838 | * This function can be used in drivers that need to check that a clock can be |
839 | * the parent of another without actually changing the parent. |
840 | * |
841 | * Returns true if @parent is a possible parent for @clk, false otherwise. |
842 | */ |
843 | bool clk_has_parent(const struct clk *clk, const struct clk *parent); |
844 | |
845 | /** |
846 | * clk_set_rate_range - set a rate range for a clock source |
847 | * @clk: clock source |
848 | * @min: desired minimum clock rate in Hz, inclusive |
849 | * @max: desired maximum clock rate in Hz, inclusive |
850 | * |
851 | * Returns success (0) or negative errno. |
852 | */ |
853 | int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max); |
854 | |
855 | /** |
856 | * clk_set_min_rate - set a minimum clock rate for a clock source |
857 | * @clk: clock source |
858 | * @rate: desired minimum clock rate in Hz, inclusive |
859 | * |
860 | * Returns success (0) or negative errno. |
861 | */ |
862 | int clk_set_min_rate(struct clk *clk, unsigned long rate); |
863 | |
864 | /** |
865 | * clk_set_max_rate - set a maximum clock rate for a clock source |
866 | * @clk: clock source |
867 | * @rate: desired maximum clock rate in Hz, inclusive |
868 | * |
869 | * Returns success (0) or negative errno. |
870 | */ |
871 | int clk_set_max_rate(struct clk *clk, unsigned long rate); |
872 | |
873 | /** |
874 | * clk_set_parent - set the parent clock source for this clock |
875 | * @clk: clock source |
876 | * @parent: parent clock source |
877 | * |
878 | * Returns success (0) or negative errno. |
879 | */ |
880 | int clk_set_parent(struct clk *clk, struct clk *parent); |
881 | |
882 | /** |
883 | * clk_get_parent - get the parent clock source for this clock |
884 | * @clk: clock source |
885 | * |
886 | * Returns struct clk corresponding to parent clock source, or |
887 | * valid IS_ERR() condition containing errno. |
888 | */ |
889 | struct clk *clk_get_parent(struct clk *clk); |
890 | |
891 | /** |
892 | * clk_get_sys - get a clock based upon the device name |
893 | * @dev_id: device name |
894 | * @con_id: connection ID |
895 | * |
896 | * Returns a struct clk corresponding to the clock producer, or |
897 | * valid IS_ERR() condition containing errno. The implementation |
898 | * uses @dev_id and @con_id to determine the clock consumer, and |
899 | * thereby the clock producer. In contrast to clk_get() this function |
900 | * takes the device name instead of the device itself for identification. |
901 | * |
902 | * Drivers must assume that the clock source is not enabled. |
903 | * |
904 | * clk_get_sys should not be called from within interrupt context. |
905 | */ |
906 | struct clk *clk_get_sys(const char *dev_id, const char *con_id); |
907 | |
908 | /** |
909 | * clk_save_context - save clock context for poweroff |
910 | * |
911 | * Saves the context of the clock register for powerstates in which the |
912 | * contents of the registers will be lost. Occurs deep within the suspend |
913 | * code so locking is not necessary. |
914 | */ |
915 | int clk_save_context(void); |
916 | |
917 | /** |
918 | * clk_restore_context - restore clock context after poweroff |
919 | * |
920 | * This occurs with all clocks enabled. Occurs deep within the resume code |
921 | * so locking is not necessary. |
922 | */ |
923 | void clk_restore_context(void); |
924 | |
925 | #else /* !CONFIG_HAVE_CLK */ |
926 | |
927 | static inline struct clk *clk_get(struct device *dev, const char *id) |
928 | { |
929 | return NULL; |
930 | } |
931 | |
932 | static inline int __must_check clk_bulk_get(struct device *dev, int num_clks, |
933 | struct clk_bulk_data *clks) |
934 | { |
935 | return 0; |
936 | } |
937 | |
938 | static inline int __must_check clk_bulk_get_optional(struct device *dev, |
939 | int num_clks, struct clk_bulk_data *clks) |
940 | { |
941 | return 0; |
942 | } |
943 | |
944 | static inline int __must_check clk_bulk_get_all(struct device *dev, |
945 | struct clk_bulk_data **clks) |
946 | { |
947 | return 0; |
948 | } |
949 | |
950 | static inline struct clk *devm_clk_get(struct device *dev, const char *id) |
951 | { |
952 | return NULL; |
953 | } |
954 | |
955 | static inline struct clk *devm_clk_get_prepared(struct device *dev, |
956 | const char *id) |
957 | { |
958 | return NULL; |
959 | } |
960 | |
961 | static inline struct clk *devm_clk_get_enabled(struct device *dev, |
962 | const char *id) |
963 | { |
964 | return NULL; |
965 | } |
966 | |
967 | static inline struct clk *devm_clk_get_optional(struct device *dev, |
968 | const char *id) |
969 | { |
970 | return NULL; |
971 | } |
972 | |
973 | static inline struct clk *devm_clk_get_optional_prepared(struct device *dev, |
974 | const char *id) |
975 | { |
976 | return NULL; |
977 | } |
978 | |
979 | static inline struct clk *devm_clk_get_optional_enabled(struct device *dev, |
980 | const char *id) |
981 | { |
982 | return NULL; |
983 | } |
984 | |
985 | static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, |
986 | struct clk_bulk_data *clks) |
987 | { |
988 | return 0; |
989 | } |
990 | |
991 | static inline int __must_check devm_clk_bulk_get_optional(struct device *dev, |
992 | int num_clks, struct clk_bulk_data *clks) |
993 | { |
994 | return 0; |
995 | } |
996 | |
997 | static inline int __must_check devm_clk_bulk_get_all(struct device *dev, |
998 | struct clk_bulk_data **clks) |
999 | { |
1000 | |
1001 | return 0; |
1002 | } |
1003 | |
1004 | static inline int __must_check devm_clk_bulk_get_all_enable(struct device *dev, |
1005 | struct clk_bulk_data **clks) |
1006 | { |
1007 | return 0; |
1008 | } |
1009 | |
1010 | static inline struct clk *devm_get_clk_from_child(struct device *dev, |
1011 | struct device_node *np, const char *con_id) |
1012 | { |
1013 | return NULL; |
1014 | } |
1015 | |
1016 | static inline void clk_put(struct clk *clk) {} |
1017 | |
1018 | static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {} |
1019 | |
1020 | static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {} |
1021 | |
1022 | static inline void devm_clk_put(struct device *dev, struct clk *clk) {} |
1023 | |
1024 | static inline int clk_enable(struct clk *clk) |
1025 | { |
1026 | return 0; |
1027 | } |
1028 | |
1029 | static inline int __must_check clk_bulk_enable(int num_clks, |
1030 | const struct clk_bulk_data *clks) |
1031 | { |
1032 | return 0; |
1033 | } |
1034 | |
1035 | static inline void clk_disable(struct clk *clk) {} |
1036 | |
1037 | |
1038 | static inline void clk_bulk_disable(int num_clks, |
1039 | const struct clk_bulk_data *clks) {} |
1040 | |
1041 | static inline unsigned long clk_get_rate(struct clk *clk) |
1042 | { |
1043 | return 0; |
1044 | } |
1045 | |
1046 | static inline int clk_set_rate(struct clk *clk, unsigned long rate) |
1047 | { |
1048 | return 0; |
1049 | } |
1050 | |
1051 | static inline int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) |
1052 | { |
1053 | return 0; |
1054 | } |
1055 | |
1056 | static inline long clk_round_rate(struct clk *clk, unsigned long rate) |
1057 | { |
1058 | return 0; |
1059 | } |
1060 | |
1061 | static inline bool clk_has_parent(struct clk *clk, struct clk *parent) |
1062 | { |
1063 | return true; |
1064 | } |
1065 | |
1066 | static inline int clk_set_rate_range(struct clk *clk, unsigned long min, |
1067 | unsigned long max) |
1068 | { |
1069 | return 0; |
1070 | } |
1071 | |
1072 | static inline int clk_set_min_rate(struct clk *clk, unsigned long rate) |
1073 | { |
1074 | return 0; |
1075 | } |
1076 | |
1077 | static inline int clk_set_max_rate(struct clk *clk, unsigned long rate) |
1078 | { |
1079 | return 0; |
1080 | } |
1081 | |
1082 | static inline int clk_set_parent(struct clk *clk, struct clk *parent) |
1083 | { |
1084 | return 0; |
1085 | } |
1086 | |
1087 | static inline struct clk *clk_get_parent(struct clk *clk) |
1088 | { |
1089 | return NULL; |
1090 | } |
1091 | |
1092 | static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id) |
1093 | { |
1094 | return NULL; |
1095 | } |
1096 | |
1097 | static inline int clk_save_context(void) |
1098 | { |
1099 | return 0; |
1100 | } |
1101 | |
1102 | static inline void clk_restore_context(void) {} |
1103 | |
1104 | #endif |
1105 | |
1106 | /* clk_prepare_enable helps cases using clk_enable in non-atomic context. */ |
1107 | static inline int clk_prepare_enable(struct clk *clk) |
1108 | { |
1109 | int ret; |
1110 | |
1111 | ret = clk_prepare(clk); |
1112 | if (ret) |
1113 | return ret; |
1114 | ret = clk_enable(clk); |
1115 | if (ret) |
1116 | clk_unprepare(clk); |
1117 | |
1118 | return ret; |
1119 | } |
1120 | |
1121 | /* clk_disable_unprepare helps cases using clk_disable in non-atomic context. */ |
1122 | static inline void clk_disable_unprepare(struct clk *clk) |
1123 | { |
1124 | clk_disable(clk); |
1125 | clk_unprepare(clk); |
1126 | } |
1127 | |
1128 | static inline int __must_check |
1129 | clk_bulk_prepare_enable(int num_clks, const struct clk_bulk_data *clks) |
1130 | { |
1131 | int ret; |
1132 | |
1133 | ret = clk_bulk_prepare(num_clks, clks); |
1134 | if (ret) |
1135 | return ret; |
1136 | ret = clk_bulk_enable(num_clks, clks); |
1137 | if (ret) |
1138 | clk_bulk_unprepare(num_clks, clks); |
1139 | |
1140 | return ret; |
1141 | } |
1142 | |
1143 | static inline void clk_bulk_disable_unprepare(int num_clks, |
1144 | const struct clk_bulk_data *clks) |
1145 | { |
1146 | clk_bulk_disable(num_clks, clks); |
1147 | clk_bulk_unprepare(num_clks, clks); |
1148 | } |
1149 | |
1150 | /** |
1151 | * clk_drop_range - Reset any range set on that clock |
1152 | * @clk: clock source |
1153 | * |
1154 | * Returns success (0) or negative errno. |
1155 | */ |
1156 | static inline int clk_drop_range(struct clk *clk) |
1157 | { |
1158 | return clk_set_rate_range(clk, min: 0, ULONG_MAX); |
1159 | } |
1160 | |
1161 | /** |
1162 | * clk_get_optional - lookup and obtain a reference to an optional clock |
1163 | * producer. |
1164 | * @dev: device for clock "consumer" |
1165 | * @id: clock consumer ID |
1166 | * |
1167 | * Behaves the same as clk_get() except where there is no clock producer. In |
1168 | * this case, instead of returning -ENOENT, the function returns NULL. |
1169 | */ |
1170 | static inline struct clk *clk_get_optional(struct device *dev, const char *id) |
1171 | { |
1172 | struct clk *clk = clk_get(dev, id); |
1173 | |
1174 | if (clk == ERR_PTR(error: -ENOENT)) |
1175 | return NULL; |
1176 | |
1177 | return clk; |
1178 | } |
1179 | |
1180 | #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) |
1181 | struct clk *of_clk_get(struct device_node *np, int index); |
1182 | struct clk *of_clk_get_by_name(struct device_node *np, const char *name); |
1183 | struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec); |
1184 | #else |
1185 | static inline struct clk *of_clk_get(struct device_node *np, int index) |
1186 | { |
1187 | return ERR_PTR(-ENOENT); |
1188 | } |
1189 | static inline struct clk *of_clk_get_by_name(struct device_node *np, |
1190 | const char *name) |
1191 | { |
1192 | return ERR_PTR(-ENOENT); |
1193 | } |
1194 | static inline struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) |
1195 | { |
1196 | return ERR_PTR(-ENOENT); |
1197 | } |
1198 | #endif |
1199 | |
1200 | #endif |
1201 | |