1/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
2/******************************************************************************
3 *
4 * Name: aclinuxex.h - Extra OS specific defines, etc. for Linux
5 *
6 * Copyright (C) 2000 - 2023, Intel Corp.
7 *
8 *****************************************************************************/
9
10#ifndef __ACLINUXEX_H__
11#define __ACLINUXEX_H__
12
13#ifdef __KERNEL__
14
15#ifndef ACPI_USE_NATIVE_DIVIDE
16
17#ifndef ACPI_DIV_64_BY_32
18#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \
19 do { \
20 u64 (__n) = ((u64) n_hi) << 32 | (n_lo); \
21 (r32) = do_div ((__n), (d32)); \
22 (q32) = (u32) (__n); \
23 } while (0)
24#endif
25
26#ifndef ACPI_SHIFT_RIGHT_64
27#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \
28 do { \
29 (n_lo) >>= 1; \
30 (n_lo) |= (((n_hi) & 1) << 31); \
31 (n_hi) >>= 1; \
32 } while (0)
33#endif
34
35#endif
36
37/*
38 * Overrides for in-kernel ACPICA
39 */
40acpi_status ACPI_INIT_FUNCTION acpi_os_initialize(void);
41
42acpi_status acpi_os_terminate(void);
43
44/*
45 * The irqs_disabled() check is for resume from RAM.
46 * Interrupts are off during resume, just like they are for boot.
47 * However, boot has (system_state != SYSTEM_RUNNING)
48 * to quiet __might_sleep() in kmalloc() and resume does not.
49 */
50static inline void *acpi_os_allocate(acpi_size size)
51{
52 return kmalloc(size, irqs_disabled()? GFP_ATOMIC : GFP_KERNEL);
53}
54
55static inline void *acpi_os_allocate_zeroed(acpi_size size)
56{
57 return kzalloc(size, irqs_disabled()? GFP_ATOMIC : GFP_KERNEL);
58}
59
60static inline void acpi_os_free(void *memory)
61{
62 kfree(objp: memory);
63}
64
65static inline void *acpi_os_acquire_object(acpi_cache_t * cache)
66{
67 return kmem_cache_zalloc(k: cache,
68 irqs_disabled()? GFP_ATOMIC : GFP_KERNEL);
69}
70
71static inline acpi_thread_id acpi_os_get_thread_id(void)
72{
73 return (acpi_thread_id) (unsigned long)current;
74}
75
76/*
77 * When lockdep is enabled, the spin_lock_init() macro stringifies it's
78 * argument and uses that as a name for the lock in debugging.
79 * By executing spin_lock_init() in a macro the key changes from "lock" for
80 * all locks to the name of the argument of acpi_os_create_lock(), which
81 * prevents lockdep from reporting false positives for ACPICA locks.
82 */
83#define acpi_os_create_lock(__handle) \
84 ({ \
85 spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \
86 if (lock) { \
87 *(__handle) = lock; \
88 spin_lock_init(*(__handle)); \
89 } \
90 lock ? AE_OK : AE_NO_MEMORY; \
91 })
92
93
94#define acpi_os_create_raw_lock(__handle) \
95 ({ \
96 raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \
97 if (lock) { \
98 *(__handle) = lock; \
99 raw_spin_lock_init(*(__handle)); \
100 } \
101 lock ? AE_OK : AE_NO_MEMORY; \
102 })
103
104static inline acpi_cpu_flags acpi_os_acquire_raw_lock(acpi_raw_spinlock lockp)
105{
106 acpi_cpu_flags flags;
107
108 raw_spin_lock_irqsave(lockp, flags);
109 return flags;
110}
111
112static inline void acpi_os_release_raw_lock(acpi_raw_spinlock lockp,
113 acpi_cpu_flags flags)
114{
115 raw_spin_unlock_irqrestore(lockp, flags);
116}
117
118static inline void acpi_os_delete_raw_lock(acpi_raw_spinlock handle)
119{
120 ACPI_FREE(handle);
121}
122
123static inline u8 acpi_os_readable(void *pointer, acpi_size length)
124{
125 return TRUE;
126}
127
128static inline acpi_status acpi_os_initialize_debugger(void)
129{
130 return AE_OK;
131}
132
133static inline void acpi_os_terminate_debugger(void)
134{
135 return;
136}
137
138/*
139 * OSL interfaces added by Linux
140 */
141
142#endif /* __KERNEL__ */
143
144#endif /* __ACLINUXEX_H__ */
145

source code of linux/include/acpi/platform/aclinuxex.h