1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* |
3 | * This program is free software; you can redistribute it and/or modify |
4 | * it under the terms of the GNU General Public License as published by |
5 | * the Free Software Foundation; either version 2 of the License, or |
6 | * (at your option) any later version. |
7 | * |
8 | * This program is distributed in the hope that it will be useful, |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
11 | * GNU General Public License for more details. |
12 | * |
13 | * Authors: Waiman Long <longman@redhat.com> |
14 | */ |
15 | |
16 | #ifndef LOCK_EVENT |
17 | #define LOCK_EVENT(name) LOCKEVENT_ ## name, |
18 | #endif |
19 | |
20 | #ifdef CONFIG_QUEUED_SPINLOCKS |
21 | #ifdef CONFIG_PARAVIRT_SPINLOCKS |
22 | /* |
23 | * Locking events for PV qspinlock. |
24 | */ |
25 | LOCK_EVENT(pv_hash_hops) /* Average # of hops per hashing operation */ |
26 | LOCK_EVENT(pv_kick_unlock) /* # of vCPU kicks issued at unlock time */ |
27 | LOCK_EVENT(pv_kick_wake) /* # of vCPU kicks for pv_latency_wake */ |
28 | LOCK_EVENT(pv_latency_kick) /* Average latency (ns) of vCPU kick */ |
29 | LOCK_EVENT(pv_latency_wake) /* Average latency (ns) of kick-to-wakeup */ |
30 | LOCK_EVENT(pv_lock_stealing) /* # of lock stealing operations */ |
31 | LOCK_EVENT(pv_spurious_wakeup) /* # of spurious wakeups in non-head vCPUs */ |
32 | LOCK_EVENT(pv_wait_again) /* # of wait's after queue head vCPU kick */ |
33 | LOCK_EVENT(pv_wait_early) /* # of early vCPU wait's */ |
34 | LOCK_EVENT(pv_wait_head) /* # of vCPU wait's at the queue head */ |
35 | LOCK_EVENT(pv_wait_node) /* # of vCPU wait's at non-head queue node */ |
36 | #endif /* CONFIG_PARAVIRT_SPINLOCKS */ |
37 | |
38 | /* |
39 | * Locking events for qspinlock |
40 | * |
41 | * Subtracting lock_use_node[234] from lock_slowpath will give you |
42 | * lock_use_node1. |
43 | */ |
44 | LOCK_EVENT(lock_pending) /* # of locking ops via pending code */ |
45 | LOCK_EVENT(lock_slowpath) /* # of locking ops via MCS lock queue */ |
46 | LOCK_EVENT(lock_use_node2) /* # of locking ops that use 2nd percpu node */ |
47 | LOCK_EVENT(lock_use_node3) /* # of locking ops that use 3rd percpu node */ |
48 | LOCK_EVENT(lock_use_node4) /* # of locking ops that use 4th percpu node */ |
49 | LOCK_EVENT(lock_no_node) /* # of locking ops w/o using percpu node */ |
50 | #endif /* CONFIG_QUEUED_SPINLOCKS */ |
51 | |
52 | /* |
53 | * Locking events for rwsem |
54 | */ |
55 | LOCK_EVENT(rwsem_sleep_reader) /* # of reader sleeps */ |
56 | LOCK_EVENT(rwsem_sleep_writer) /* # of writer sleeps */ |
57 | LOCK_EVENT(rwsem_wake_reader) /* # of reader wakeups */ |
58 | LOCK_EVENT(rwsem_wake_writer) /* # of writer wakeups */ |
59 | LOCK_EVENT(rwsem_opt_lock) /* # of opt-acquired write locks */ |
60 | LOCK_EVENT(rwsem_opt_fail) /* # of failed optspins */ |
61 | LOCK_EVENT(rwsem_opt_nospin) /* # of disabled optspins */ |
62 | LOCK_EVENT(rwsem_rlock) /* # of read locks acquired */ |
63 | LOCK_EVENT(rwsem_rlock_steal) /* # of read locks by lock stealing */ |
64 | LOCK_EVENT(rwsem_rlock_fast) /* # of fast read locks acquired */ |
65 | LOCK_EVENT(rwsem_rlock_fail) /* # of failed read lock acquisitions */ |
66 | LOCK_EVENT(rwsem_rlock_handoff) /* # of read lock handoffs */ |
67 | LOCK_EVENT(rwsem_wlock) /* # of write locks acquired */ |
68 | LOCK_EVENT(rwsem_wlock_fail) /* # of failed write lock acquisitions */ |
69 | LOCK_EVENT(rwsem_wlock_handoff) /* # of write lock handoffs */ |
70 | |