1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _KERNEL_EVENTS_INTERNAL_H |
3 | #define _KERNEL_EVENTS_INTERNAL_H |
4 | |
5 | #include <linux/hardirq.h> |
6 | #include <linux/uaccess.h> |
7 | #include <linux/refcount.h> |
8 | |
9 | /* Buffer handling */ |
10 | |
11 | #define RING_BUFFER_WRITABLE 0x01 |
12 | |
13 | struct perf_buffer { |
14 | refcount_t refcount; |
15 | struct rcu_head rcu_head; |
16 | #ifdef CONFIG_PERF_USE_VMALLOC |
17 | struct work_struct work; |
18 | int page_order; /* allocation order */ |
19 | #endif |
20 | int nr_pages; /* nr of data pages */ |
21 | int overwrite; /* can overwrite itself */ |
22 | int paused; /* can write into ring buffer */ |
23 | |
24 | atomic_t poll; /* POLL_ for wakeups */ |
25 | |
26 | local_t head; /* write position */ |
27 | unsigned int nest; /* nested writers */ |
28 | local_t events; /* event limit */ |
29 | local_t wakeup; /* wakeup stamp */ |
30 | local_t lost; /* nr records lost */ |
31 | |
32 | long watermark; /* wakeup watermark */ |
33 | long aux_watermark; |
34 | /* poll crap */ |
35 | spinlock_t event_lock; |
36 | struct list_head event_list; |
37 | |
38 | atomic_t mmap_count; |
39 | unsigned long mmap_locked; |
40 | struct user_struct *mmap_user; |
41 | |
42 | /* AUX area */ |
43 | long aux_head; |
44 | unsigned int aux_nest; |
45 | long aux_wakeup; /* last aux_watermark boundary crossed by aux_head */ |
46 | unsigned long aux_pgoff; |
47 | int aux_nr_pages; |
48 | int aux_overwrite; |
49 | atomic_t aux_mmap_count; |
50 | unsigned long aux_mmap_locked; |
51 | void (*free_aux)(void *); |
52 | refcount_t aux_refcount; |
53 | int aux_in_sampling; |
54 | void **aux_pages; |
55 | void *aux_priv; |
56 | |
57 | struct perf_event_mmap_page *user_page; |
58 | void *data_pages[]; |
59 | }; |
60 | |
61 | extern void rb_free(struct perf_buffer *rb); |
62 | |
63 | static inline void rb_free_rcu(struct rcu_head *rcu_head) |
64 | { |
65 | struct perf_buffer *rb; |
66 | |
67 | rb = container_of(rcu_head, struct perf_buffer, rcu_head); |
68 | rb_free(rb); |
69 | } |
70 | |
71 | static inline void rb_toggle_paused(struct perf_buffer *rb, bool pause) |
72 | { |
73 | if (!pause && rb->nr_pages) |
74 | rb->paused = 0; |
75 | else |
76 | rb->paused = 1; |
77 | } |
78 | |
79 | extern struct perf_buffer * |
80 | rb_alloc(int nr_pages, long watermark, int cpu, int flags); |
81 | extern void perf_event_wakeup(struct perf_event *event); |
82 | extern int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event, |
83 | pgoff_t pgoff, int nr_pages, long watermark, int flags); |
84 | extern void rb_free_aux(struct perf_buffer *rb); |
85 | extern struct perf_buffer *ring_buffer_get(struct perf_event *event); |
86 | extern void ring_buffer_put(struct perf_buffer *rb); |
87 | |
88 | static inline bool rb_has_aux(struct perf_buffer *rb) |
89 | { |
90 | return !!rb->aux_nr_pages; |
91 | } |
92 | |
93 | void perf_event_aux_event(struct perf_event *event, unsigned long head, |
94 | unsigned long size, u64 flags); |
95 | |
96 | extern struct page * |
97 | perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff); |
98 | |
99 | #ifdef CONFIG_PERF_USE_VMALLOC |
100 | /* |
101 | * Back perf_mmap() with vmalloc memory. |
102 | * |
103 | * Required for architectures that have d-cache aliasing issues. |
104 | */ |
105 | |
106 | static inline int page_order(struct perf_buffer *rb) |
107 | { |
108 | return rb->page_order; |
109 | } |
110 | |
111 | #else |
112 | |
113 | static inline int page_order(struct perf_buffer *rb) |
114 | { |
115 | return 0; |
116 | } |
117 | #endif |
118 | |
119 | static inline int data_page_nr(struct perf_buffer *rb) |
120 | { |
121 | return rb->nr_pages << page_order(rb); |
122 | } |
123 | |
124 | static inline unsigned long perf_data_size(struct perf_buffer *rb) |
125 | { |
126 | return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); |
127 | } |
128 | |
129 | static inline unsigned long perf_aux_size(struct perf_buffer *rb) |
130 | { |
131 | return rb->aux_nr_pages << PAGE_SHIFT; |
132 | } |
133 | |
134 | #define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...) \ |
135 | { \ |
136 | unsigned long size, written; \ |
137 | \ |
138 | do { \ |
139 | size = min(handle->size, len); \ |
140 | written = memcpy_func(__VA_ARGS__); \ |
141 | written = size - written; \ |
142 | \ |
143 | len -= written; \ |
144 | handle->addr += written; \ |
145 | if (advance_buf) \ |
146 | buf += written; \ |
147 | handle->size -= written; \ |
148 | if (!handle->size) { \ |
149 | struct perf_buffer *rb = handle->rb; \ |
150 | \ |
151 | handle->page++; \ |
152 | handle->page &= rb->nr_pages - 1; \ |
153 | handle->addr = rb->data_pages[handle->page]; \ |
154 | handle->size = PAGE_SIZE << page_order(rb); \ |
155 | } \ |
156 | } while (len && written == size); \ |
157 | \ |
158 | return len; \ |
159 | } |
160 | |
161 | #define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \ |
162 | static inline unsigned long \ |
163 | func_name(struct perf_output_handle *handle, \ |
164 | const void *buf, unsigned long len) \ |
165 | __DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size) |
166 | |
167 | static inline unsigned long |
168 | __output_custom(struct perf_output_handle *handle, perf_copy_f copy_func, |
169 | const void *buf, unsigned long len) |
170 | { |
171 | unsigned long orig_len = len; |
172 | __DEFINE_OUTPUT_COPY_BODY(false, copy_func, handle->addr, buf, |
173 | orig_len - len, size) |
174 | } |
175 | |
176 | static inline unsigned long |
177 | memcpy_common(void *dst, const void *src, unsigned long n) |
178 | { |
179 | memcpy(dst, src, n); |
180 | return 0; |
181 | } |
182 | |
183 | DEFINE_OUTPUT_COPY(__output_copy, memcpy_common) |
184 | |
185 | static inline unsigned long |
186 | memcpy_skip(void *dst, const void *src, unsigned long n) |
187 | { |
188 | return 0; |
189 | } |
190 | |
191 | DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip) |
192 | |
193 | #ifndef arch_perf_out_copy_user |
194 | #define arch_perf_out_copy_user arch_perf_out_copy_user |
195 | |
196 | static inline unsigned long |
197 | arch_perf_out_copy_user(void *dst, const void *src, unsigned long n) |
198 | { |
199 | unsigned long ret; |
200 | |
201 | pagefault_disable(); |
202 | ret = __copy_from_user_inatomic(dst, src, n); |
203 | pagefault_enable(); |
204 | |
205 | return ret; |
206 | } |
207 | #endif |
208 | |
209 | DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user) |
210 | |
211 | static inline int get_recursion_context(int *recursion) |
212 | { |
213 | unsigned char rctx = interrupt_context_level(); |
214 | |
215 | if (recursion[rctx]) |
216 | return -1; |
217 | |
218 | recursion[rctx]++; |
219 | barrier(); |
220 | |
221 | return rctx; |
222 | } |
223 | |
224 | static inline void put_recursion_context(int *recursion, int rctx) |
225 | { |
226 | barrier(); |
227 | recursion[rctx]--; |
228 | } |
229 | |
230 | #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP |
231 | static inline bool arch_perf_have_user_stack_dump(void) |
232 | { |
233 | return true; |
234 | } |
235 | |
236 | #define perf_user_stack_pointer(regs) user_stack_pointer(regs) |
237 | #else |
238 | static inline bool arch_perf_have_user_stack_dump(void) |
239 | { |
240 | return false; |
241 | } |
242 | |
243 | #define perf_user_stack_pointer(regs) 0 |
244 | #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */ |
245 | |
246 | #endif /* _KERNEL_EVENTS_INTERNAL_H */ |
247 | |