1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __LINUX_COMPILER_H
3#define __LINUX_COMPILER_H
4
5#include <linux/compiler_types.h>
6
7#ifndef __ASSEMBLY__
8
9#ifdef __KERNEL__
10
11/*
12 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
13 * to disable branch tracing on a per file basis.
14 */
15#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
16 && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
17void ftrace_likely_update(struct ftrace_likely_data *f, int val,
18 int expect, int is_constant);
19
20#define likely_notrace(x) __builtin_expect(!!(x), 1)
21#define unlikely_notrace(x) __builtin_expect(!!(x), 0)
22
23#define __branch_check__(x, expect, is_constant) ({ \
24 long ______r; \
25 static struct ftrace_likely_data \
26 __aligned(4) \
27 __section("_ftrace_annotated_branch") \
28 ______f = { \
29 .data.func = __func__, \
30 .data.file = __FILE__, \
31 .data.line = __LINE__, \
32 }; \
33 ______r = __builtin_expect(!!(x), expect); \
34 ftrace_likely_update(&______f, ______r, \
35 expect, is_constant); \
36 ______r; \
37 })
38
39/*
40 * Using __builtin_constant_p(x) to ignore cases where the return
41 * value is always the same. This idea is taken from a similar patch
42 * written by Daniel Walker.
43 */
44# ifndef likely
45# define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x)))
46# endif
47# ifndef unlikely
48# define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x)))
49# endif
50
51#ifdef CONFIG_PROFILE_ALL_BRANCHES
52/*
53 * "Define 'is'", Bill Clinton
54 * "Define 'if'", Steven Rostedt
55 */
56#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
57#define __trace_if(cond) \
58 if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
59 ({ \
60 int ______r; \
61 static struct ftrace_branch_data \
62 __aligned(4) \
63 __section("_ftrace_branch") \
64 ______f = { \
65 .func = __func__, \
66 .file = __FILE__, \
67 .line = __LINE__, \
68 }; \
69 ______r = !!(cond); \
70 ______f.miss_hit[______r]++; \
71 ______r; \
72 }))
73#endif /* CONFIG_PROFILE_ALL_BRANCHES */
74
75#else
76# define likely(x) __builtin_expect(!!(x), 1)
77# define unlikely(x) __builtin_expect(!!(x), 0)
78#endif
79
80/* Optimization barrier */
81#ifndef barrier
82# define barrier() __memory_barrier()
83#endif
84
85#ifndef barrier_data
86# define barrier_data(ptr) barrier()
87#endif
88
89/* workaround for GCC PR82365 if needed */
90#ifndef barrier_before_unreachable
91# define barrier_before_unreachable() do { } while (0)
92#endif
93
94/* Unreachable code */
95#ifdef CONFIG_STACK_VALIDATION
96/*
97 * These macros help objtool understand GCC code flow for unreachable code.
98 * The __COUNTER__ based labels are a hack to make each instance of the macros
99 * unique, to convince GCC not to merge duplicate inline asm statements.
100 */
101#define annotate_reachable() ({ \
102 asm volatile("%c0:\n\t" \
103 ".pushsection .discard.reachable\n\t" \
104 ".long %c0b - .\n\t" \
105 ".popsection\n\t" : : "i" (__COUNTER__)); \
106})
107#define annotate_unreachable() ({ \
108 asm volatile("%c0:\n\t" \
109 ".pushsection .discard.unreachable\n\t" \
110 ".long %c0b - .\n\t" \
111 ".popsection\n\t" : : "i" (__COUNTER__)); \
112})
113#define ASM_UNREACHABLE \
114 "999:\n\t" \
115 ".pushsection .discard.unreachable\n\t" \
116 ".long 999b - .\n\t" \
117 ".popsection\n\t"
118#else
119#define annotate_reachable()
120#define annotate_unreachable()
121#endif
122
123#ifndef ASM_UNREACHABLE
124# define ASM_UNREACHABLE
125#endif
126#ifndef unreachable
127# define unreachable() do { \
128 annotate_unreachable(); \
129 __builtin_unreachable(); \
130} while (0)
131#endif
132
133/*
134 * KENTRY - kernel entry point
135 * This can be used to annotate symbols (functions or data) that are used
136 * without their linker symbol being referenced explicitly. For example,
137 * interrupt vector handlers, or functions in the kernel image that are found
138 * programatically.
139 *
140 * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
141 * are handled in their own way (with KEEP() in linker scripts).
142 *
143 * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
144 * linker script. For example an architecture could KEEP() its entire
145 * boot/exception vector code rather than annotate each function and data.
146 */
147#ifndef KENTRY
148# define KENTRY(sym) \
149 extern typeof(sym) sym; \
150 static const unsigned long __kentry_##sym \
151 __used \
152 __section("___kentry" "+" #sym ) \
153 = (unsigned long)&sym;
154#endif
155
156#ifndef RELOC_HIDE
157# define RELOC_HIDE(ptr, off) \
158 ({ unsigned long __ptr; \
159 __ptr = (unsigned long) (ptr); \
160 (typeof(ptr)) (__ptr + (off)); })
161#endif
162
163#ifndef OPTIMIZER_HIDE_VAR
164/* Make the optimizer believe the variable can be manipulated arbitrarily. */
165#define OPTIMIZER_HIDE_VAR(var) \
166 __asm__ ("" : "=r" (var) : "0" (var))
167#endif
168
169/* Not-quite-unique ID. */
170#ifndef __UNIQUE_ID
171# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
172#endif
173
174#include <uapi/linux/types.h>
175
176#define __READ_ONCE_SIZE \
177({ \
178 switch (size) { \
179 case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
180 case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
181 case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
182 case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
183 default: \
184 barrier(); \
185 __builtin_memcpy((void *)res, (const void *)p, size); \
186 barrier(); \
187 } \
188})
189
190static __always_inline
191void __read_once_size(const volatile void *p, void *res, int size)
192{
193 __READ_ONCE_SIZE;
194}
195
196#ifdef CONFIG_KASAN
197/*
198 * We can't declare function 'inline' because __no_sanitize_address confilcts
199 * with inlining. Attempt to inline it may cause a build failure.
200 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
201 * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
202 */
203# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
204#else
205# define __no_kasan_or_inline __always_inline
206#endif
207
208static __no_kasan_or_inline
209void __read_once_size_nocheck(const volatile void *p, void *res, int size)
210{
211 __READ_ONCE_SIZE;
212}
213
214static __always_inline void __write_once_size(volatile void *p, void *res, int size)
215{
216 switch (size) {
217 case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
218 case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
219 case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
220 case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
221 default:
222 barrier();
223 __builtin_memcpy((void *)p, (const void *)res, size);
224 barrier();
225 }
226}
227
228/*
229 * Prevent the compiler from merging or refetching reads or writes. The
230 * compiler is also forbidden from reordering successive instances of
231 * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
232 * particular ordering. One way to make the compiler aware of ordering is to
233 * put the two invocations of READ_ONCE or WRITE_ONCE in different C
234 * statements.
235 *
236 * These two macros will also work on aggregate data types like structs or
237 * unions. If the size of the accessed data type exceeds the word size of
238 * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
239 * fall back to memcpy(). There's at least two memcpy()s: one for the
240 * __builtin_memcpy() and then one for the macro doing the copy of variable
241 * - '__u' allocated on the stack.
242 *
243 * Their two major use cases are: (1) Mediating communication between
244 * process-level code and irq/NMI handlers, all running on the same CPU,
245 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
246 * mutilate accesses that either do not require ordering or that interact
247 * with an explicit memory barrier or atomic instruction that provides the
248 * required ordering.
249 */
250#include <asm/barrier.h>
251#include <linux/kasan-checks.h>
252
253#define __READ_ONCE(x, check) \
254({ \
255 union { typeof(x) __val; char __c[1]; } __u; \
256 if (check) \
257 __read_once_size(&(x), __u.__c, sizeof(x)); \
258 else \
259 __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
260 smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
261 __u.__val; \
262})
263#define READ_ONCE(x) __READ_ONCE(x, 1)
264
265/*
266 * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
267 * to hide memory access from KASAN.
268 */
269#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
270
271static __no_kasan_or_inline
272unsigned long read_word_at_a_time(const void *addr)
273{
274 kasan_check_read(addr, 1);
275 return *(unsigned long *)addr;
276}
277
278#define WRITE_ONCE(x, val) \
279({ \
280 union { typeof(x) __val; char __c[1]; } __u = \
281 { .__val = (__force typeof(x)) (val) }; \
282 __write_once_size(&(x), __u.__c, sizeof(x)); \
283 __u.__val; \
284})
285
286#endif /* __KERNEL__ */
287
288/*
289 * Force the compiler to emit 'sym' as a symbol, so that we can reference
290 * it from inline assembler. Necessary in case 'sym' could be inlined
291 * otherwise, or eliminated entirely due to lack of references that are
292 * visible to the compiler.
293 */
294#define __ADDRESSABLE(sym) \
295 static void * __section(".discard.addressable") __used \
296 __PASTE(__addressable_##sym, __LINE__) = (void *)&sym;
297
298/**
299 * offset_to_ptr - convert a relative memory offset to an absolute pointer
300 * @off: the address of the 32-bit offset value
301 */
302static inline void *offset_to_ptr(const int *off)
303{
304 return (void *)((unsigned long)off + *off);
305}
306
307#endif /* __ASSEMBLY__ */
308
309/* Compile time object size, -1 for unknown */
310#ifndef __compiletime_object_size
311# define __compiletime_object_size(obj) -1
312#endif
313#ifndef __compiletime_warning
314# define __compiletime_warning(message)
315#endif
316#ifndef __compiletime_error
317# define __compiletime_error(message)
318#endif
319
320#ifdef __OPTIMIZE__
321# define __compiletime_assert(condition, msg, prefix, suffix) \
322 do { \
323 extern void prefix ## suffix(void) __compiletime_error(msg); \
324 if (!(condition)) \
325 prefix ## suffix(); \
326 } while (0)
327#else
328# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
329#endif
330
331#define _compiletime_assert(condition, msg, prefix, suffix) \
332 __compiletime_assert(condition, msg, prefix, suffix)
333
334/**
335 * compiletime_assert - break build and emit msg if condition is false
336 * @condition: a compile-time constant condition to check
337 * @msg: a message to emit if condition is false
338 *
339 * In tradition of POSIX assert, this macro will break the build if the
340 * supplied condition is *false*, emitting the supplied error message if the
341 * compiler has support to do so.
342 */
343#define compiletime_assert(condition, msg) \
344 _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
345
346#define compiletime_assert_atomic_type(t) \
347 compiletime_assert(__native_word(t), \
348 "Need native word sized stores/loads for atomicity.")
349
350/* &a[0] degrades to a pointer: a different type from an array */
351#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
352
353#endif /* __LINUX_COMPILER_H */
354