1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_KERNEL_VTIME_H
3#define _LINUX_KERNEL_VTIME_H
4
5#include <linux/context_tracking_state.h>
6#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
7#include <asm/vtime.h>
8#endif
9
10
11struct task_struct;
12
13/*
14 * vtime_accounting_cpu_enabled() definitions/declarations
15 */
16#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE)
17static inline bool vtime_accounting_cpu_enabled(void) { return true; }
18#elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN)
19/*
20 * Checks if vtime is enabled on some CPU. Cputime readers want to be careful
21 * in that case and compute the tickless cputime.
22 * For now vtime state is tied to context tracking. We might want to decouple
23 * those later if necessary.
24 */
25static inline bool vtime_accounting_enabled(void)
26{
27 return context_tracking_is_enabled();
28}
29
30static inline bool vtime_accounting_cpu_enabled(void)
31{
32 if (vtime_accounting_enabled()) {
33 if (context_tracking_cpu_is_enabled())
34 return true;
35 }
36
37 return false;
38}
39#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
40static inline bool vtime_accounting_cpu_enabled(void) { return false; }
41#endif
42
43
44/*
45 * Common vtime APIs
46 */
47#ifdef CONFIG_VIRT_CPU_ACCOUNTING
48
49#ifdef __ARCH_HAS_VTIME_TASK_SWITCH
50extern void vtime_task_switch(struct task_struct *prev);
51#else
52extern void vtime_common_task_switch(struct task_struct *prev);
53static inline void vtime_task_switch(struct task_struct *prev)
54{
55 if (vtime_accounting_cpu_enabled())
56 vtime_common_task_switch(prev);
57}
58#endif /* __ARCH_HAS_VTIME_TASK_SWITCH */
59
60extern void vtime_account_system(struct task_struct *tsk);
61extern void vtime_account_idle(struct task_struct *tsk);
62
63#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
64
65static inline void vtime_task_switch(struct task_struct *prev) { }
66static inline void vtime_account_system(struct task_struct *tsk) { }
67#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
68
69#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
70extern void arch_vtime_task_switch(struct task_struct *tsk);
71extern void vtime_user_enter(struct task_struct *tsk);
72extern void vtime_user_exit(struct task_struct *tsk);
73extern void vtime_guest_enter(struct task_struct *tsk);
74extern void vtime_guest_exit(struct task_struct *tsk);
75extern void vtime_init_idle(struct task_struct *tsk, int cpu);
76#else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */
77static inline void vtime_user_enter(struct task_struct *tsk) { }
78static inline void vtime_user_exit(struct task_struct *tsk) { }
79static inline void vtime_guest_enter(struct task_struct *tsk) { }
80static inline void vtime_guest_exit(struct task_struct *tsk) { }
81static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { }
82#endif
83
84#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
85extern void vtime_account_irq_enter(struct task_struct *tsk);
86static inline void vtime_account_irq_exit(struct task_struct *tsk)
87{
88 /* On hard|softirq exit we always account to hard|softirq cputime */
89 vtime_account_system(tsk);
90}
91extern void vtime_flush(struct task_struct *tsk);
92#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
93static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
94static inline void vtime_account_irq_exit(struct task_struct *tsk) { }
95static inline void vtime_flush(struct task_struct *tsk) { }
96#endif
97
98
99#ifdef CONFIG_IRQ_TIME_ACCOUNTING
100extern void irqtime_account_irq(struct task_struct *tsk);
101#else
102static inline void irqtime_account_irq(struct task_struct *tsk) { }
103#endif
104
105static inline void account_irq_enter_time(struct task_struct *tsk)
106{
107 vtime_account_irq_enter(tsk);
108 irqtime_account_irq(tsk);
109}
110
111static inline void account_irq_exit_time(struct task_struct *tsk)
112{
113 vtime_account_irq_exit(tsk);
114 irqtime_account_irq(tsk);
115}
116
117#endif /* _LINUX_KERNEL_VTIME_H */
118