blob: f5d127a5d819bd1246da1e67bb1c898d25f51a1d [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_CONTEXT_TRACKING_H
3#define _LINUX_CONTEXT_TRACKING_H
4
5#include <linux/sched.h>
6#include <linux/vtime.h>
7#include <linux/context_tracking_state.h>
Olivier Deprez157378f2022-04-04 15:47:50 +02008#include <linux/instrumentation.h>
9
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010#include <asm/ptrace.h>
11
12
13#ifdef CONFIG_CONTEXT_TRACKING
14extern void context_tracking_cpu_set(int cpu);
15
16/* Called with interrupts disabled. */
17extern void __context_tracking_enter(enum ctx_state state);
18extern void __context_tracking_exit(enum ctx_state state);
19
20extern void context_tracking_enter(enum ctx_state state);
21extern void context_tracking_exit(enum ctx_state state);
22extern void context_tracking_user_enter(void);
23extern void context_tracking_user_exit(void);
24
25static inline void user_enter(void)
26{
Olivier Deprez157378f2022-04-04 15:47:50 +020027 if (context_tracking_enabled())
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000028 context_tracking_enter(CONTEXT_USER);
29
30}
31static inline void user_exit(void)
32{
Olivier Deprez157378f2022-04-04 15:47:50 +020033 if (context_tracking_enabled())
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000034 context_tracking_exit(CONTEXT_USER);
35}
36
37/* Called with interrupts disabled. */
Olivier Deprez157378f2022-04-04 15:47:50 +020038static __always_inline void user_enter_irqoff(void)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000039{
Olivier Deprez157378f2022-04-04 15:47:50 +020040 if (context_tracking_enabled())
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000041 __context_tracking_enter(CONTEXT_USER);
42
43}
Olivier Deprez157378f2022-04-04 15:47:50 +020044static __always_inline void user_exit_irqoff(void)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000045{
Olivier Deprez157378f2022-04-04 15:47:50 +020046 if (context_tracking_enabled())
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000047 __context_tracking_exit(CONTEXT_USER);
48}
49
50static inline enum ctx_state exception_enter(void)
51{
52 enum ctx_state prev_ctx;
53
Olivier Deprez157378f2022-04-04 15:47:50 +020054 if (!context_tracking_enabled())
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000055 return 0;
56
57 prev_ctx = this_cpu_read(context_tracking.state);
58 if (prev_ctx != CONTEXT_KERNEL)
59 context_tracking_exit(prev_ctx);
60
61 return prev_ctx;
62}
63
64static inline void exception_exit(enum ctx_state prev_ctx)
65{
Olivier Deprez157378f2022-04-04 15:47:50 +020066 if (context_tracking_enabled()) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000067 if (prev_ctx != CONTEXT_KERNEL)
68 context_tracking_enter(prev_ctx);
69 }
70}
71
72
73/**
74 * ct_state() - return the current context tracking state if known
75 *
76 * Returns the current cpu's context tracking state if context tracking
77 * is enabled. If context tracking is disabled, returns
78 * CONTEXT_DISABLED. This should be used primarily for debugging.
79 */
Olivier Deprez157378f2022-04-04 15:47:50 +020080static __always_inline enum ctx_state ct_state(void)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000081{
Olivier Deprez157378f2022-04-04 15:47:50 +020082 return context_tracking_enabled() ?
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000083 this_cpu_read(context_tracking.state) : CONTEXT_DISABLED;
84}
85#else
86static inline void user_enter(void) { }
87static inline void user_exit(void) { }
88static inline void user_enter_irqoff(void) { }
89static inline void user_exit_irqoff(void) { }
90static inline enum ctx_state exception_enter(void) { return 0; }
91static inline void exception_exit(enum ctx_state prev_ctx) { }
92static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; }
93#endif /* !CONFIG_CONTEXT_TRACKING */
94
Olivier Deprez157378f2022-04-04 15:47:50 +020095#define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000096
97#ifdef CONFIG_CONTEXT_TRACKING_FORCE
98extern void context_tracking_init(void);
99#else
100static inline void context_tracking_init(void) { }
101#endif /* CONFIG_CONTEXT_TRACKING_FORCE */
102
103
104#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
105/* must be called with irqs disabled */
Olivier Deprez157378f2022-04-04 15:47:50 +0200106static __always_inline void guest_enter_irqoff(void)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000107{
Olivier Deprez157378f2022-04-04 15:47:50 +0200108 instrumentation_begin();
109 if (vtime_accounting_enabled_this_cpu())
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000110 vtime_guest_enter(current);
111 else
112 current->flags |= PF_VCPU;
Olivier Deprez157378f2022-04-04 15:47:50 +0200113 instrumentation_end();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000114
Olivier Deprez157378f2022-04-04 15:47:50 +0200115 if (context_tracking_enabled())
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000116 __context_tracking_enter(CONTEXT_GUEST);
117
118 /* KVM does not hold any references to rcu protected data when it
119 * switches CPU into a guest mode. In fact switching to a guest mode
120 * is very similar to exiting to userspace from rcu point of view. In
121 * addition CPU may stay in a guest mode for quite a long time (up to
122 * one time slice). Lets treat guest mode as quiescent state, just like
123 * we do with user-mode execution.
124 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200125 if (!context_tracking_enabled_this_cpu()) {
126 instrumentation_begin();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000127 rcu_virt_note_context_switch(smp_processor_id());
Olivier Deprez157378f2022-04-04 15:47:50 +0200128 instrumentation_end();
129 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000130}
131
Olivier Deprez157378f2022-04-04 15:47:50 +0200132static __always_inline void context_tracking_guest_exit(void)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000133{
Olivier Deprez157378f2022-04-04 15:47:50 +0200134 if (context_tracking_enabled())
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000135 __context_tracking_exit(CONTEXT_GUEST);
Olivier Deprez157378f2022-04-04 15:47:50 +0200136}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000137
Olivier Deprez157378f2022-04-04 15:47:50 +0200138static __always_inline void vtime_account_guest_exit(void)
139{
140 if (vtime_accounting_enabled_this_cpu())
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000141 vtime_guest_exit(current);
142 else
143 current->flags &= ~PF_VCPU;
144}
145
Olivier Deprez157378f2022-04-04 15:47:50 +0200146static __always_inline void guest_exit_irqoff(void)
147{
148 context_tracking_guest_exit();
149
150 instrumentation_begin();
151 vtime_account_guest_exit();
152 instrumentation_end();
153}
154
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000155#else
Olivier Deprez157378f2022-04-04 15:47:50 +0200156static __always_inline void guest_enter_irqoff(void)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000157{
158 /*
159 * This is running in ioctl context so its safe
160 * to assume that it's the stime pending cputime
161 * to flush.
162 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200163 instrumentation_begin();
164 vtime_account_kernel(current);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000165 current->flags |= PF_VCPU;
166 rcu_virt_note_context_switch(smp_processor_id());
Olivier Deprez157378f2022-04-04 15:47:50 +0200167 instrumentation_end();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000168}
169
Olivier Deprez157378f2022-04-04 15:47:50 +0200170static __always_inline void context_tracking_guest_exit(void) { }
171
172static __always_inline void vtime_account_guest_exit(void)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000173{
Olivier Deprez157378f2022-04-04 15:47:50 +0200174 vtime_account_kernel(current);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000175 current->flags &= ~PF_VCPU;
176}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000177
Olivier Deprez157378f2022-04-04 15:47:50 +0200178static __always_inline void guest_exit_irqoff(void)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000179{
Olivier Deprez157378f2022-04-04 15:47:50 +0200180 instrumentation_begin();
181 /* Flush the guest cputime we spent on the guest */
182 vtime_account_guest_exit();
183 instrumentation_end();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000184}
Olivier Deprez157378f2022-04-04 15:47:50 +0200185#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000186
187static inline void guest_exit(void)
188{
189 unsigned long flags;
190
191 local_irq_save(flags);
192 guest_exit_irqoff();
193 local_irq_restore(flags);
194}
195
196#endif