1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Hardware spinlock framework |
4 | * |
5 | * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com |
6 | * |
7 | * Contact: Ohad Ben-Cohen <ohad@wizery.com> |
8 | */ |
9 | |
10 | #define pr_fmt(fmt) "%s: " fmt, __func__ |
11 | |
12 | #include <linux/delay.h> |
13 | #include <linux/kernel.h> |
14 | #include <linux/module.h> |
15 | #include <linux/spinlock.h> |
16 | #include <linux/types.h> |
17 | #include <linux/err.h> |
18 | #include <linux/jiffies.h> |
19 | #include <linux/radix-tree.h> |
20 | #include <linux/hwspinlock.h> |
21 | #include <linux/pm_runtime.h> |
22 | #include <linux/mutex.h> |
23 | #include <linux/of.h> |
24 | |
25 | #include "hwspinlock_internal.h" |
26 | |
27 | /* retry delay used in atomic context */ |
28 | #define HWSPINLOCK_RETRY_DELAY_US 100 |
29 | |
30 | /* radix tree tags */ |
31 | #define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */ |
32 | |
33 | /* |
34 | * A radix tree is used to maintain the available hwspinlock instances. |
35 | * The tree associates hwspinlock pointers with their integer key id, |
36 | * and provides easy-to-use API which makes the hwspinlock core code simple |
37 | * and easy to read. |
38 | * |
39 | * Radix trees are quick on lookups, and reasonably efficient in terms of |
40 | * storage, especially with high density usages such as this framework |
41 | * requires (a continuous range of integer keys, beginning with zero, is |
42 | * used as the ID's of the hwspinlock instances). |
43 | * |
44 | * The radix tree API supports tagging items in the tree, which this |
45 | * framework uses to mark unused hwspinlock instances (see the |
46 | * HWSPINLOCK_UNUSED tag above). As a result, the process of querying the |
47 | * tree, looking for an unused hwspinlock instance, is now reduced to a |
48 | * single radix tree API call. |
49 | */ |
50 | static RADIX_TREE(hwspinlock_tree, GFP_KERNEL); |
51 | |
52 | /* |
53 | * Synchronization of access to the tree is achieved using this mutex, |
54 | * as the radix-tree API requires that users provide all synchronisation. |
55 | * A mutex is needed because we're using non-atomic radix tree allocations. |
56 | */ |
57 | static DEFINE_MUTEX(hwspinlock_tree_lock); |
58 | |
59 | |
60 | /** |
61 | * __hwspin_trylock() - attempt to lock a specific hwspinlock |
62 | * @hwlock: an hwspinlock which we want to trylock |
63 | * @mode: controls whether local interrupts are disabled or not |
64 | * @flags: a pointer where the caller's interrupt state will be saved at (if |
65 | * requested) |
66 | * |
67 | * This function attempts to lock an hwspinlock, and will immediately |
68 | * fail if the hwspinlock is already taken. |
69 | * |
70 | * Caution: If the mode is HWLOCK_RAW, that means user must protect the routine |
71 | * of getting hardware lock with mutex or spinlock. Since in some scenarios, |
72 | * user need some time-consuming or sleepable operations under the hardware |
73 | * lock, they need one sleepable lock (like mutex) to protect the operations. |
74 | * |
75 | * If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful |
76 | * return from this function, preemption (and possibly interrupts) is disabled, |
77 | * so the caller must not sleep, and is advised to release the hwspinlock as |
78 | * soon as possible. This is required in order to minimize remote cores polling |
79 | * on the hardware interconnect. |
80 | * |
81 | * The user decides whether local interrupts are disabled or not, and if yes, |
82 | * whether he wants their previous state to be saved. It is up to the user |
83 | * to choose the appropriate @mode of operation, exactly the same way users |
84 | * should decide between spin_trylock, spin_trylock_irq and |
85 | * spin_trylock_irqsave. |
86 | * |
87 | * Returns 0 if we successfully locked the hwspinlock or -EBUSY if |
88 | * the hwspinlock was already taken. |
89 | * This function will never sleep. |
90 | */ |
91 | int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags) |
92 | { |
93 | int ret; |
94 | |
95 | if (WARN_ON(!hwlock || (!flags && mode == HWLOCK_IRQSTATE))) |
96 | return -EINVAL; |
97 | |
98 | /* |
99 | * This spin_lock{_irq, _irqsave} serves three purposes: |
100 | * |
101 | * 1. Disable preemption, in order to minimize the period of time |
102 | * in which the hwspinlock is taken. This is important in order |
103 | * to minimize the possible polling on the hardware interconnect |
104 | * by a remote user of this lock. |
105 | * 2. Make the hwspinlock SMP-safe (so we can take it from |
106 | * additional contexts on the local host). |
107 | * 3. Ensure that in_atomic/might_sleep checks catch potential |
108 | * problems with hwspinlock usage (e.g. scheduler checks like |
109 | * 'scheduling while atomic' etc.) |
110 | */ |
111 | switch (mode) { |
112 | case HWLOCK_IRQSTATE: |
113 | ret = spin_trylock_irqsave(&hwlock->lock, *flags); |
114 | break; |
115 | case HWLOCK_IRQ: |
116 | ret = spin_trylock_irq(lock: &hwlock->lock); |
117 | break; |
118 | case HWLOCK_RAW: |
119 | case HWLOCK_IN_ATOMIC: |
120 | ret = 1; |
121 | break; |
122 | default: |
123 | ret = spin_trylock(lock: &hwlock->lock); |
124 | break; |
125 | } |
126 | |
127 | /* is lock already taken by another context on the local cpu ? */ |
128 | if (!ret) |
129 | return -EBUSY; |
130 | |
131 | /* try to take the hwspinlock device */ |
132 | ret = hwlock->bank->ops->trylock(hwlock); |
133 | |
134 | /* if hwlock is already taken, undo spin_trylock_* and exit */ |
135 | if (!ret) { |
136 | switch (mode) { |
137 | case HWLOCK_IRQSTATE: |
138 | spin_unlock_irqrestore(lock: &hwlock->lock, flags: *flags); |
139 | break; |
140 | case HWLOCK_IRQ: |
141 | spin_unlock_irq(lock: &hwlock->lock); |
142 | break; |
143 | case HWLOCK_RAW: |
144 | case HWLOCK_IN_ATOMIC: |
145 | /* Nothing to do */ |
146 | break; |
147 | default: |
148 | spin_unlock(lock: &hwlock->lock); |
149 | break; |
150 | } |
151 | |
152 | return -EBUSY; |
153 | } |
154 | |
155 | /* |
156 | * We can be sure the other core's memory operations |
157 | * are observable to us only _after_ we successfully take |
158 | * the hwspinlock, and we must make sure that subsequent memory |
159 | * operations (both reads and writes) will not be reordered before |
160 | * we actually took the hwspinlock. |
161 | * |
162 | * Note: the implicit memory barrier of the spinlock above is too |
163 | * early, so we need this additional explicit memory barrier. |
164 | */ |
165 | mb(); |
166 | |
167 | return 0; |
168 | } |
169 | EXPORT_SYMBOL_GPL(__hwspin_trylock); |
170 | |
171 | /** |
172 | * __hwspin_lock_timeout() - lock an hwspinlock with timeout limit |
173 | * @hwlock: the hwspinlock to be locked |
174 | * @timeout: timeout value in msecs |
175 | * @mode: mode which controls whether local interrupts are disabled or not |
176 | * @flags: a pointer to where the caller's interrupt state will be saved at (if |
177 | * requested) |
178 | * |
179 | * This function locks the given @hwlock. If the @hwlock |
180 | * is already taken, the function will busy loop waiting for it to |
181 | * be released, but give up after @timeout msecs have elapsed. |
182 | * |
183 | * Caution: If the mode is HWLOCK_RAW, that means user must protect the routine |
184 | * of getting hardware lock with mutex or spinlock. Since in some scenarios, |
185 | * user need some time-consuming or sleepable operations under the hardware |
186 | * lock, they need one sleepable lock (like mutex) to protect the operations. |
187 | * |
188 | * If the mode is HWLOCK_IN_ATOMIC (called from an atomic context) the timeout |
189 | * is handled with busy-waiting delays, hence shall not exceed few msecs. |
190 | * |
191 | * If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful |
192 | * return from this function, preemption (and possibly interrupts) is disabled, |
193 | * so the caller must not sleep, and is advised to release the hwspinlock as |
194 | * soon as possible. This is required in order to minimize remote cores polling |
195 | * on the hardware interconnect. |
196 | * |
197 | * The user decides whether local interrupts are disabled or not, and if yes, |
198 | * whether he wants their previous state to be saved. It is up to the user |
199 | * to choose the appropriate @mode of operation, exactly the same way users |
200 | * should decide between spin_lock, spin_lock_irq and spin_lock_irqsave. |
201 | * |
202 | * Returns 0 when the @hwlock was successfully taken, and an appropriate |
203 | * error code otherwise (most notably -ETIMEDOUT if the @hwlock is still |
204 | * busy after @timeout msecs). The function will never sleep. |
205 | */ |
206 | int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to, |
207 | int mode, unsigned long *flags) |
208 | { |
209 | int ret; |
210 | unsigned long expire, atomic_delay = 0; |
211 | |
212 | expire = msecs_to_jiffies(m: to) + jiffies; |
213 | |
214 | for (;;) { |
215 | /* Try to take the hwspinlock */ |
216 | ret = __hwspin_trylock(hwlock, mode, flags); |
217 | if (ret != -EBUSY) |
218 | break; |
219 | |
220 | /* |
221 | * The lock is already taken, let's check if the user wants |
222 | * us to try again |
223 | */ |
224 | if (mode == HWLOCK_IN_ATOMIC) { |
225 | udelay(HWSPINLOCK_RETRY_DELAY_US); |
226 | atomic_delay += HWSPINLOCK_RETRY_DELAY_US; |
227 | if (atomic_delay > to * 1000) |
228 | return -ETIMEDOUT; |
229 | } else { |
230 | if (time_is_before_eq_jiffies(expire)) |
231 | return -ETIMEDOUT; |
232 | } |
233 | |
234 | /* |
235 | * Allow platform-specific relax handlers to prevent |
236 | * hogging the interconnect (no sleeping, though) |
237 | */ |
238 | if (hwlock->bank->ops->relax) |
239 | hwlock->bank->ops->relax(hwlock); |
240 | } |
241 | |
242 | return ret; |
243 | } |
244 | EXPORT_SYMBOL_GPL(__hwspin_lock_timeout); |
245 | |
246 | /** |
247 | * __hwspin_unlock() - unlock a specific hwspinlock |
248 | * @hwlock: a previously-acquired hwspinlock which we want to unlock |
249 | * @mode: controls whether local interrupts needs to be restored or not |
250 | * @flags: previous caller's interrupt state to restore (if requested) |
251 | * |
252 | * This function will unlock a specific hwspinlock, enable preemption and |
253 | * (possibly) enable interrupts or restore their previous state. |
254 | * @hwlock must be already locked before calling this function: it is a bug |
255 | * to call unlock on a @hwlock that is already unlocked. |
256 | * |
257 | * The user decides whether local interrupts should be enabled or not, and |
258 | * if yes, whether he wants their previous state to be restored. It is up |
259 | * to the user to choose the appropriate @mode of operation, exactly the |
260 | * same way users decide between spin_unlock, spin_unlock_irq and |
261 | * spin_unlock_irqrestore. |
262 | * |
263 | * The function will never sleep. |
264 | */ |
265 | void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags) |
266 | { |
267 | if (WARN_ON(!hwlock || (!flags && mode == HWLOCK_IRQSTATE))) |
268 | return; |
269 | |
270 | /* |
271 | * We must make sure that memory operations (both reads and writes), |
272 | * done before unlocking the hwspinlock, will not be reordered |
273 | * after the lock is released. |
274 | * |
275 | * That's the purpose of this explicit memory barrier. |
276 | * |
277 | * Note: the memory barrier induced by the spin_unlock below is too |
278 | * late; the other core is going to access memory soon after it will |
279 | * take the hwspinlock, and by then we want to be sure our memory |
280 | * operations are already observable. |
281 | */ |
282 | mb(); |
283 | |
284 | hwlock->bank->ops->unlock(hwlock); |
285 | |
286 | /* Undo the spin_trylock{_irq, _irqsave} called while locking */ |
287 | switch (mode) { |
288 | case HWLOCK_IRQSTATE: |
289 | spin_unlock_irqrestore(lock: &hwlock->lock, flags: *flags); |
290 | break; |
291 | case HWLOCK_IRQ: |
292 | spin_unlock_irq(lock: &hwlock->lock); |
293 | break; |
294 | case HWLOCK_RAW: |
295 | case HWLOCK_IN_ATOMIC: |
296 | /* Nothing to do */ |
297 | break; |
298 | default: |
299 | spin_unlock(lock: &hwlock->lock); |
300 | break; |
301 | } |
302 | } |
303 | EXPORT_SYMBOL_GPL(__hwspin_unlock); |
304 | |
305 | /** |
306 | * of_hwspin_lock_simple_xlate - translate hwlock_spec to return a lock id |
307 | * @bank: the hwspinlock device bank |
308 | * @hwlock_spec: hwlock specifier as found in the device tree |
309 | * |
310 | * This is a simple translation function, suitable for hwspinlock platform |
311 | * drivers that only has a lock specifier length of 1. |
312 | * |
313 | * Returns a relative index of the lock within a specified bank on success, |
314 | * or -EINVAL on invalid specifier cell count. |
315 | */ |
316 | static inline int |
317 | of_hwspin_lock_simple_xlate(const struct of_phandle_args *hwlock_spec) |
318 | { |
319 | if (WARN_ON(hwlock_spec->args_count != 1)) |
320 | return -EINVAL; |
321 | |
322 | return hwlock_spec->args[0]; |
323 | } |
324 | |
325 | /** |
326 | * of_hwspin_lock_get_id() - get lock id for an OF phandle-based specific lock |
327 | * @np: device node from which to request the specific hwlock |
328 | * @index: index of the hwlock in the list of values |
329 | * |
330 | * This function provides a means for DT users of the hwspinlock module to |
331 | * get the global lock id of a specific hwspinlock using the phandle of the |
332 | * hwspinlock device, so that it can be requested using the normal |
333 | * hwspin_lock_request_specific() API. |
334 | * |
335 | * Returns the global lock id number on success, -EPROBE_DEFER if the hwspinlock |
336 | * device is not yet registered, -EINVAL on invalid args specifier value or an |
337 | * appropriate error as returned from the OF parsing of the DT client node. |
338 | */ |
339 | int of_hwspin_lock_get_id(struct device_node *np, int index) |
340 | { |
341 | struct of_phandle_args args; |
342 | struct hwspinlock *hwlock; |
343 | struct radix_tree_iter iter; |
344 | void **slot; |
345 | int id; |
346 | int ret; |
347 | |
348 | ret = of_parse_phandle_with_args(np, list_name: "hwlocks" , cells_name: "#hwlock-cells" , index, |
349 | out_args: &args); |
350 | if (ret) |
351 | return ret; |
352 | |
353 | if (!of_device_is_available(device: args.np)) { |
354 | ret = -ENOENT; |
355 | goto out; |
356 | } |
357 | |
358 | /* Find the hwspinlock device: we need its base_id */ |
359 | ret = -EPROBE_DEFER; |
360 | rcu_read_lock(); |
361 | radix_tree_for_each_slot(slot, &hwspinlock_tree, &iter, 0) { |
362 | hwlock = radix_tree_deref_slot(slot); |
363 | if (unlikely(!hwlock)) |
364 | continue; |
365 | if (radix_tree_deref_retry(arg: hwlock)) { |
366 | slot = radix_tree_iter_retry(iter: &iter); |
367 | continue; |
368 | } |
369 | |
370 | if (device_match_of_node(dev: hwlock->bank->dev, np: args.np)) { |
371 | ret = 0; |
372 | break; |
373 | } |
374 | } |
375 | rcu_read_unlock(); |
376 | if (ret < 0) |
377 | goto out; |
378 | |
379 | id = of_hwspin_lock_simple_xlate(hwlock_spec: &args); |
380 | if (id < 0 || id >= hwlock->bank->num_locks) { |
381 | ret = -EINVAL; |
382 | goto out; |
383 | } |
384 | id += hwlock->bank->base_id; |
385 | |
386 | out: |
387 | of_node_put(node: args.np); |
388 | return ret ? ret : id; |
389 | } |
390 | EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id); |
391 | |
392 | /** |
393 | * of_hwspin_lock_get_id_byname() - get lock id for an specified hwlock name |
394 | * @np: device node from which to request the specific hwlock |
395 | * @name: hwlock name |
396 | * |
397 | * This function provides a means for DT users of the hwspinlock module to |
398 | * get the global lock id of a specific hwspinlock using the specified name of |
399 | * the hwspinlock device, so that it can be requested using the normal |
400 | * hwspin_lock_request_specific() API. |
401 | * |
402 | * Returns the global lock id number on success, -EPROBE_DEFER if the hwspinlock |
403 | * device is not yet registered, -EINVAL on invalid args specifier value or an |
404 | * appropriate error as returned from the OF parsing of the DT client node. |
405 | */ |
406 | int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name) |
407 | { |
408 | int index; |
409 | |
410 | if (!name) |
411 | return -EINVAL; |
412 | |
413 | index = of_property_match_string(np, propname: "hwlock-names" , string: name); |
414 | if (index < 0) |
415 | return index; |
416 | |
417 | return of_hwspin_lock_get_id(np, index); |
418 | } |
419 | EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id_byname); |
420 | |
421 | static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id) |
422 | { |
423 | struct hwspinlock *tmp; |
424 | int ret; |
425 | |
426 | mutex_lock(&hwspinlock_tree_lock); |
427 | |
428 | ret = radix_tree_insert(&hwspinlock_tree, index: id, hwlock); |
429 | if (ret) { |
430 | if (ret == -EEXIST) |
431 | pr_err("hwspinlock id %d already exists!\n" , id); |
432 | goto out; |
433 | } |
434 | |
435 | /* mark this hwspinlock as available */ |
436 | tmp = radix_tree_tag_set(&hwspinlock_tree, index: id, HWSPINLOCK_UNUSED); |
437 | |
438 | /* self-sanity check which should never fail */ |
439 | WARN_ON(tmp != hwlock); |
440 | |
441 | out: |
442 | mutex_unlock(lock: &hwspinlock_tree_lock); |
443 | return 0; |
444 | } |
445 | |
446 | static struct hwspinlock *hwspin_lock_unregister_single(unsigned int id) |
447 | { |
448 | struct hwspinlock *hwlock = NULL; |
449 | int ret; |
450 | |
451 | mutex_lock(&hwspinlock_tree_lock); |
452 | |
453 | /* make sure the hwspinlock is not in use (tag is set) */ |
454 | ret = radix_tree_tag_get(&hwspinlock_tree, index: id, HWSPINLOCK_UNUSED); |
455 | if (ret == 0) { |
456 | pr_err("hwspinlock %d still in use (or not present)\n" , id); |
457 | goto out; |
458 | } |
459 | |
460 | hwlock = radix_tree_delete(&hwspinlock_tree, id); |
461 | if (!hwlock) { |
462 | pr_err("failed to delete hwspinlock %d\n" , id); |
463 | goto out; |
464 | } |
465 | |
466 | out: |
467 | mutex_unlock(lock: &hwspinlock_tree_lock); |
468 | return hwlock; |
469 | } |
470 | |
471 | /** |
472 | * hwspin_lock_register() - register a new hw spinlock device |
473 | * @bank: the hwspinlock device, which usually provides numerous hw locks |
474 | * @dev: the backing device |
475 | * @ops: hwspinlock handlers for this device |
476 | * @base_id: id of the first hardware spinlock in this bank |
477 | * @num_locks: number of hwspinlocks provided by this device |
478 | * |
479 | * This function should be called from the underlying platform-specific |
480 | * implementation, to register a new hwspinlock device instance. |
481 | * |
482 | * Should be called from a process context (might sleep) |
483 | * |
484 | * Returns 0 on success, or an appropriate error code on failure |
485 | */ |
486 | int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev, |
487 | const struct hwspinlock_ops *ops, int base_id, int num_locks) |
488 | { |
489 | struct hwspinlock *hwlock; |
490 | int ret = 0, i; |
491 | |
492 | if (!bank || !ops || !dev || !num_locks || !ops->trylock || |
493 | !ops->unlock) { |
494 | pr_err("invalid parameters\n" ); |
495 | return -EINVAL; |
496 | } |
497 | |
498 | bank->dev = dev; |
499 | bank->ops = ops; |
500 | bank->base_id = base_id; |
501 | bank->num_locks = num_locks; |
502 | |
503 | for (i = 0; i < num_locks; i++) { |
504 | hwlock = &bank->lock[i]; |
505 | |
506 | spin_lock_init(&hwlock->lock); |
507 | hwlock->bank = bank; |
508 | |
509 | ret = hwspin_lock_register_single(hwlock, id: base_id + i); |
510 | if (ret) |
511 | goto reg_failed; |
512 | } |
513 | |
514 | return 0; |
515 | |
516 | reg_failed: |
517 | while (--i >= 0) |
518 | hwspin_lock_unregister_single(id: base_id + i); |
519 | return ret; |
520 | } |
521 | EXPORT_SYMBOL_GPL(hwspin_lock_register); |
522 | |
523 | /** |
524 | * hwspin_lock_unregister() - unregister an hw spinlock device |
525 | * @bank: the hwspinlock device, which usually provides numerous hw locks |
526 | * |
527 | * This function should be called from the underlying platform-specific |
528 | * implementation, to unregister an existing (and unused) hwspinlock. |
529 | * |
530 | * Should be called from a process context (might sleep) |
531 | * |
532 | * Returns 0 on success, or an appropriate error code on failure |
533 | */ |
534 | int hwspin_lock_unregister(struct hwspinlock_device *bank) |
535 | { |
536 | struct hwspinlock *hwlock, *tmp; |
537 | int i; |
538 | |
539 | for (i = 0; i < bank->num_locks; i++) { |
540 | hwlock = &bank->lock[i]; |
541 | |
542 | tmp = hwspin_lock_unregister_single(id: bank->base_id + i); |
543 | if (!tmp) |
544 | return -EBUSY; |
545 | |
546 | /* self-sanity check that should never fail */ |
547 | WARN_ON(tmp != hwlock); |
548 | } |
549 | |
550 | return 0; |
551 | } |
552 | EXPORT_SYMBOL_GPL(hwspin_lock_unregister); |
553 | |
554 | static void devm_hwspin_lock_unreg(struct device *dev, void *res) |
555 | { |
556 | hwspin_lock_unregister(*(struct hwspinlock_device **)res); |
557 | } |
558 | |
559 | static int devm_hwspin_lock_device_match(struct device *dev, void *res, |
560 | void *data) |
561 | { |
562 | struct hwspinlock_device **bank = res; |
563 | |
564 | if (WARN_ON(!bank || !*bank)) |
565 | return 0; |
566 | |
567 | return *bank == data; |
568 | } |
569 | |
570 | /** |
571 | * devm_hwspin_lock_unregister() - unregister an hw spinlock device for |
572 | * a managed device |
573 | * @dev: the backing device |
574 | * @bank: the hwspinlock device, which usually provides numerous hw locks |
575 | * |
576 | * This function should be called from the underlying platform-specific |
577 | * implementation, to unregister an existing (and unused) hwspinlock. |
578 | * |
579 | * Should be called from a process context (might sleep) |
580 | * |
581 | * Returns 0 on success, or an appropriate error code on failure |
582 | */ |
583 | int devm_hwspin_lock_unregister(struct device *dev, |
584 | struct hwspinlock_device *bank) |
585 | { |
586 | int ret; |
587 | |
588 | ret = devres_release(dev, release: devm_hwspin_lock_unreg, |
589 | match: devm_hwspin_lock_device_match, match_data: bank); |
590 | WARN_ON(ret); |
591 | |
592 | return ret; |
593 | } |
594 | EXPORT_SYMBOL_GPL(devm_hwspin_lock_unregister); |
595 | |
596 | /** |
597 | * devm_hwspin_lock_register() - register a new hw spinlock device for |
598 | * a managed device |
599 | * @dev: the backing device |
600 | * @bank: the hwspinlock device, which usually provides numerous hw locks |
601 | * @ops: hwspinlock handlers for this device |
602 | * @base_id: id of the first hardware spinlock in this bank |
603 | * @num_locks: number of hwspinlocks provided by this device |
604 | * |
605 | * This function should be called from the underlying platform-specific |
606 | * implementation, to register a new hwspinlock device instance. |
607 | * |
608 | * Should be called from a process context (might sleep) |
609 | * |
610 | * Returns 0 on success, or an appropriate error code on failure |
611 | */ |
612 | int devm_hwspin_lock_register(struct device *dev, |
613 | struct hwspinlock_device *bank, |
614 | const struct hwspinlock_ops *ops, |
615 | int base_id, int num_locks) |
616 | { |
617 | struct hwspinlock_device **ptr; |
618 | int ret; |
619 | |
620 | ptr = devres_alloc(devm_hwspin_lock_unreg, sizeof(*ptr), GFP_KERNEL); |
621 | if (!ptr) |
622 | return -ENOMEM; |
623 | |
624 | ret = hwspin_lock_register(bank, dev, ops, base_id, num_locks); |
625 | if (!ret) { |
626 | *ptr = bank; |
627 | devres_add(dev, res: ptr); |
628 | } else { |
629 | devres_free(res: ptr); |
630 | } |
631 | |
632 | return ret; |
633 | } |
634 | EXPORT_SYMBOL_GPL(devm_hwspin_lock_register); |
635 | |
636 | /** |
637 | * __hwspin_lock_request() - tag an hwspinlock as used and power it up |
638 | * |
639 | * This is an internal function that prepares an hwspinlock instance |
640 | * before it is given to the user. The function assumes that |
641 | * hwspinlock_tree_lock is taken. |
642 | * |
643 | * Returns 0 or positive to indicate success, and a negative value to |
644 | * indicate an error (with the appropriate error code) |
645 | */ |
646 | static int __hwspin_lock_request(struct hwspinlock *hwlock) |
647 | { |
648 | struct device *dev = hwlock->bank->dev; |
649 | struct hwspinlock *tmp; |
650 | int ret; |
651 | |
652 | /* prevent underlying implementation from being removed */ |
653 | if (!try_module_get(module: dev->driver->owner)) { |
654 | dev_err(dev, "%s: can't get owner\n" , __func__); |
655 | return -EINVAL; |
656 | } |
657 | |
658 | /* notify PM core that power is now needed */ |
659 | ret = pm_runtime_get_sync(dev); |
660 | if (ret < 0 && ret != -EACCES) { |
661 | dev_err(dev, "%s: can't power on device\n" , __func__); |
662 | pm_runtime_put_noidle(dev); |
663 | module_put(module: dev->driver->owner); |
664 | return ret; |
665 | } |
666 | |
667 | ret = 0; |
668 | |
669 | /* mark hwspinlock as used, should not fail */ |
670 | tmp = radix_tree_tag_clear(&hwspinlock_tree, index: hwlock_to_id(hwlock), |
671 | HWSPINLOCK_UNUSED); |
672 | |
673 | /* self-sanity check that should never fail */ |
674 | WARN_ON(tmp != hwlock); |
675 | |
676 | return ret; |
677 | } |
678 | |
679 | /** |
680 | * hwspin_lock_get_id() - retrieve id number of a given hwspinlock |
681 | * @hwlock: a valid hwspinlock instance |
682 | * |
683 | * Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid. |
684 | */ |
685 | int hwspin_lock_get_id(struct hwspinlock *hwlock) |
686 | { |
687 | if (!hwlock) { |
688 | pr_err("invalid hwlock\n" ); |
689 | return -EINVAL; |
690 | } |
691 | |
692 | return hwlock_to_id(hwlock); |
693 | } |
694 | EXPORT_SYMBOL_GPL(hwspin_lock_get_id); |
695 | |
696 | /** |
697 | * hwspin_lock_request() - request an hwspinlock |
698 | * |
699 | * This function should be called by users of the hwspinlock device, |
700 | * in order to dynamically assign them an unused hwspinlock. |
701 | * Usually the user of this lock will then have to communicate the lock's id |
702 | * to the remote core before it can be used for synchronization (to get the |
703 | * id of a given hwlock, use hwspin_lock_get_id()). |
704 | * |
705 | * Should be called from a process context (might sleep) |
706 | * |
707 | * Returns the address of the assigned hwspinlock, or NULL on error |
708 | */ |
709 | struct hwspinlock *hwspin_lock_request(void) |
710 | { |
711 | struct hwspinlock *hwlock; |
712 | int ret; |
713 | |
714 | mutex_lock(&hwspinlock_tree_lock); |
715 | |
716 | /* look for an unused lock */ |
717 | ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, results: (void **)&hwlock, |
718 | first_index: 0, max_items: 1, HWSPINLOCK_UNUSED); |
719 | if (ret == 0) { |
720 | pr_warn("a free hwspinlock is not available\n" ); |
721 | hwlock = NULL; |
722 | goto out; |
723 | } |
724 | |
725 | /* sanity check that should never fail */ |
726 | WARN_ON(ret > 1); |
727 | |
728 | /* mark as used and power up */ |
729 | ret = __hwspin_lock_request(hwlock); |
730 | if (ret < 0) |
731 | hwlock = NULL; |
732 | |
733 | out: |
734 | mutex_unlock(lock: &hwspinlock_tree_lock); |
735 | return hwlock; |
736 | } |
737 | EXPORT_SYMBOL_GPL(hwspin_lock_request); |
738 | |
739 | /** |
740 | * hwspin_lock_request_specific() - request for a specific hwspinlock |
741 | * @id: index of the specific hwspinlock that is requested |
742 | * |
743 | * This function should be called by users of the hwspinlock module, |
744 | * in order to assign them a specific hwspinlock. |
745 | * Usually early board code will be calling this function in order to |
746 | * reserve specific hwspinlock ids for predefined purposes. |
747 | * |
748 | * Should be called from a process context (might sleep) |
749 | * |
750 | * Returns the address of the assigned hwspinlock, or NULL on error |
751 | */ |
752 | struct hwspinlock *hwspin_lock_request_specific(unsigned int id) |
753 | { |
754 | struct hwspinlock *hwlock; |
755 | int ret; |
756 | |
757 | mutex_lock(&hwspinlock_tree_lock); |
758 | |
759 | /* make sure this hwspinlock exists */ |
760 | hwlock = radix_tree_lookup(&hwspinlock_tree, id); |
761 | if (!hwlock) { |
762 | pr_warn("hwspinlock %u does not exist\n" , id); |
763 | goto out; |
764 | } |
765 | |
766 | /* sanity check (this shouldn't happen) */ |
767 | WARN_ON(hwlock_to_id(hwlock) != id); |
768 | |
769 | /* make sure this hwspinlock is unused */ |
770 | ret = radix_tree_tag_get(&hwspinlock_tree, index: id, HWSPINLOCK_UNUSED); |
771 | if (ret == 0) { |
772 | pr_warn("hwspinlock %u is already in use\n" , id); |
773 | hwlock = NULL; |
774 | goto out; |
775 | } |
776 | |
777 | /* mark as used and power up */ |
778 | ret = __hwspin_lock_request(hwlock); |
779 | if (ret < 0) |
780 | hwlock = NULL; |
781 | |
782 | out: |
783 | mutex_unlock(lock: &hwspinlock_tree_lock); |
784 | return hwlock; |
785 | } |
786 | EXPORT_SYMBOL_GPL(hwspin_lock_request_specific); |
787 | |
788 | /** |
789 | * hwspin_lock_free() - free a specific hwspinlock |
790 | * @hwlock: the specific hwspinlock to free |
791 | * |
792 | * This function mark @hwlock as free again. |
793 | * Should only be called with an @hwlock that was retrieved from |
794 | * an earlier call to hwspin_lock_request{_specific}. |
795 | * |
796 | * Should be called from a process context (might sleep) |
797 | * |
798 | * Returns 0 on success, or an appropriate error code on failure |
799 | */ |
800 | int hwspin_lock_free(struct hwspinlock *hwlock) |
801 | { |
802 | struct device *dev; |
803 | struct hwspinlock *tmp; |
804 | int ret; |
805 | |
806 | if (!hwlock) { |
807 | pr_err("invalid hwlock\n" ); |
808 | return -EINVAL; |
809 | } |
810 | |
811 | dev = hwlock->bank->dev; |
812 | mutex_lock(&hwspinlock_tree_lock); |
813 | |
814 | /* make sure the hwspinlock is used */ |
815 | ret = radix_tree_tag_get(&hwspinlock_tree, index: hwlock_to_id(hwlock), |
816 | HWSPINLOCK_UNUSED); |
817 | if (ret == 1) { |
818 | dev_err(dev, "%s: hwlock is already free\n" , __func__); |
819 | dump_stack(); |
820 | ret = -EINVAL; |
821 | goto out; |
822 | } |
823 | |
824 | /* notify the underlying device that power is not needed */ |
825 | pm_runtime_put(dev); |
826 | |
827 | /* mark this hwspinlock as available */ |
828 | tmp = radix_tree_tag_set(&hwspinlock_tree, index: hwlock_to_id(hwlock), |
829 | HWSPINLOCK_UNUSED); |
830 | |
831 | /* sanity check (this shouldn't happen) */ |
832 | WARN_ON(tmp != hwlock); |
833 | |
834 | module_put(module: dev->driver->owner); |
835 | |
836 | out: |
837 | mutex_unlock(lock: &hwspinlock_tree_lock); |
838 | return ret; |
839 | } |
840 | EXPORT_SYMBOL_GPL(hwspin_lock_free); |
841 | |
842 | static int devm_hwspin_lock_match(struct device *dev, void *res, void *data) |
843 | { |
844 | struct hwspinlock **hwlock = res; |
845 | |
846 | if (WARN_ON(!hwlock || !*hwlock)) |
847 | return 0; |
848 | |
849 | return *hwlock == data; |
850 | } |
851 | |
852 | static void devm_hwspin_lock_release(struct device *dev, void *res) |
853 | { |
854 | hwspin_lock_free(*(struct hwspinlock **)res); |
855 | } |
856 | |
857 | /** |
858 | * devm_hwspin_lock_free() - free a specific hwspinlock for a managed device |
859 | * @dev: the device to free the specific hwspinlock |
860 | * @hwlock: the specific hwspinlock to free |
861 | * |
862 | * This function mark @hwlock as free again. |
863 | * Should only be called with an @hwlock that was retrieved from |
864 | * an earlier call to hwspin_lock_request{_specific}. |
865 | * |
866 | * Should be called from a process context (might sleep) |
867 | * |
868 | * Returns 0 on success, or an appropriate error code on failure |
869 | */ |
870 | int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock) |
871 | { |
872 | int ret; |
873 | |
874 | ret = devres_release(dev, release: devm_hwspin_lock_release, |
875 | match: devm_hwspin_lock_match, match_data: hwlock); |
876 | WARN_ON(ret); |
877 | |
878 | return ret; |
879 | } |
880 | EXPORT_SYMBOL_GPL(devm_hwspin_lock_free); |
881 | |
882 | /** |
883 | * devm_hwspin_lock_request() - request an hwspinlock for a managed device |
884 | * @dev: the device to request an hwspinlock |
885 | * |
886 | * This function should be called by users of the hwspinlock device, |
887 | * in order to dynamically assign them an unused hwspinlock. |
888 | * Usually the user of this lock will then have to communicate the lock's id |
889 | * to the remote core before it can be used for synchronization (to get the |
890 | * id of a given hwlock, use hwspin_lock_get_id()). |
891 | * |
892 | * Should be called from a process context (might sleep) |
893 | * |
894 | * Returns the address of the assigned hwspinlock, or NULL on error |
895 | */ |
896 | struct hwspinlock *devm_hwspin_lock_request(struct device *dev) |
897 | { |
898 | struct hwspinlock **ptr, *hwlock; |
899 | |
900 | ptr = devres_alloc(devm_hwspin_lock_release, sizeof(*ptr), GFP_KERNEL); |
901 | if (!ptr) |
902 | return NULL; |
903 | |
904 | hwlock = hwspin_lock_request(); |
905 | if (hwlock) { |
906 | *ptr = hwlock; |
907 | devres_add(dev, res: ptr); |
908 | } else { |
909 | devres_free(res: ptr); |
910 | } |
911 | |
912 | return hwlock; |
913 | } |
914 | EXPORT_SYMBOL_GPL(devm_hwspin_lock_request); |
915 | |
916 | /** |
917 | * devm_hwspin_lock_request_specific() - request for a specific hwspinlock for |
918 | * a managed device |
919 | * @dev: the device to request the specific hwspinlock |
920 | * @id: index of the specific hwspinlock that is requested |
921 | * |
922 | * This function should be called by users of the hwspinlock module, |
923 | * in order to assign them a specific hwspinlock. |
924 | * Usually early board code will be calling this function in order to |
925 | * reserve specific hwspinlock ids for predefined purposes. |
926 | * |
927 | * Should be called from a process context (might sleep) |
928 | * |
929 | * Returns the address of the assigned hwspinlock, or NULL on error |
930 | */ |
931 | struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev, |
932 | unsigned int id) |
933 | { |
934 | struct hwspinlock **ptr, *hwlock; |
935 | |
936 | ptr = devres_alloc(devm_hwspin_lock_release, sizeof(*ptr), GFP_KERNEL); |
937 | if (!ptr) |
938 | return NULL; |
939 | |
940 | hwlock = hwspin_lock_request_specific(id); |
941 | if (hwlock) { |
942 | *ptr = hwlock; |
943 | devres_add(dev, res: ptr); |
944 | } else { |
945 | devres_free(res: ptr); |
946 | } |
947 | |
948 | return hwlock; |
949 | } |
950 | EXPORT_SYMBOL_GPL(devm_hwspin_lock_request_specific); |
951 | |
952 | MODULE_DESCRIPTION("Hardware spinlock interface" ); |
953 | MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>" ); |
954 | |