1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #define _GNU_SOURCE |
3 | #include "main.h" |
4 | #include <stdlib.h> |
5 | #include <stdio.h> |
6 | #include <string.h> |
7 | #include <pthread.h> |
8 | #include <malloc.h> |
9 | #include <assert.h> |
10 | #include <errno.h> |
11 | #include <limits.h> |
12 | |
13 | #define SMP_CACHE_BYTES 64 |
14 | #define cache_line_size() SMP_CACHE_BYTES |
15 | #define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES))) |
16 | #define unlikely(x) (__builtin_expect(!!(x), 0)) |
17 | #define likely(x) (__builtin_expect(!!(x), 1)) |
18 | #define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a)) |
19 | #define SIZE_MAX (~(size_t)0) |
20 | #define KMALLOC_MAX_SIZE SIZE_MAX |
21 | |
22 | typedef pthread_spinlock_t spinlock_t; |
23 | |
24 | typedef int gfp_t; |
25 | #define __GFP_ZERO 0x1 |
26 | |
27 | static void *kmalloc(unsigned size, gfp_t gfp) |
28 | { |
29 | void *p = memalign(64, size); |
30 | if (!p) |
31 | return p; |
32 | |
33 | if (gfp & __GFP_ZERO) |
34 | memset(p, 0, size); |
35 | return p; |
36 | } |
37 | |
38 | static inline void *kzalloc(unsigned size, gfp_t flags) |
39 | { |
40 | return kmalloc(size, gfp: flags | __GFP_ZERO); |
41 | } |
42 | |
43 | static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) |
44 | { |
45 | if (size != 0 && n > SIZE_MAX / size) |
46 | return NULL; |
47 | return kmalloc(size: n * size, gfp: flags); |
48 | } |
49 | |
50 | static inline void *kcalloc(size_t n, size_t size, gfp_t flags) |
51 | { |
52 | return kmalloc_array(n, size, flags | __GFP_ZERO); |
53 | } |
54 | |
55 | static void kfree(void *p) |
56 | { |
57 | if (p) |
58 | free(p); |
59 | } |
60 | |
61 | #define kvmalloc_array kmalloc_array |
62 | #define kvfree kfree |
63 | |
64 | static void spin_lock_init(spinlock_t *lock) |
65 | { |
66 | int r = pthread_spin_init(lock, 0); |
67 | assert(!r); |
68 | } |
69 | |
70 | static void spin_lock(spinlock_t *lock) |
71 | { |
72 | int ret = pthread_spin_lock(lock); |
73 | assert(!ret); |
74 | } |
75 | |
76 | static void spin_unlock(spinlock_t *lock) |
77 | { |
78 | int ret = pthread_spin_unlock(lock); |
79 | assert(!ret); |
80 | } |
81 | |
82 | static void spin_lock_bh(spinlock_t *lock) |
83 | { |
84 | spin_lock(lock); |
85 | } |
86 | |
87 | static void spin_unlock_bh(spinlock_t *lock) |
88 | { |
89 | spin_unlock(lock); |
90 | } |
91 | |
92 | static void spin_lock_irq(spinlock_t *lock) |
93 | { |
94 | spin_lock(lock); |
95 | } |
96 | |
97 | static void spin_unlock_irq(spinlock_t *lock) |
98 | { |
99 | spin_unlock(lock); |
100 | } |
101 | |
102 | static void spin_lock_irqsave(spinlock_t *lock, unsigned long f) |
103 | { |
104 | spin_lock(lock); |
105 | } |
106 | |
107 | static void spin_unlock_irqrestore(spinlock_t *lock, unsigned long f) |
108 | { |
109 | spin_unlock(lock); |
110 | } |
111 | |
112 | #include "../../../include/linux/ptr_ring.h" |
113 | |
114 | static unsigned long long headcnt, tailcnt; |
115 | static struct ptr_ring array ____cacheline_aligned_in_smp; |
116 | |
117 | /* implemented by ring */ |
118 | void alloc_ring(void) |
119 | { |
120 | int ret = ptr_ring_init(r: &array, size: ring_size, gfp: 0); |
121 | assert(!ret); |
122 | /* Hacky way to poke at ring internals. Useful for testing though. */ |
123 | if (param) |
124 | array.batch = param; |
125 | } |
126 | |
127 | /* guest side */ |
128 | int add_inbuf(unsigned len, void *buf, void *datap) |
129 | { |
130 | int ret; |
131 | |
132 | ret = __ptr_ring_produce(r: &array, ptr: buf); |
133 | if (ret >= 0) { |
134 | ret = 0; |
135 | headcnt++; |
136 | } |
137 | |
138 | return ret; |
139 | } |
140 | |
141 | /* |
142 | * ptr_ring API provides no way for producer to find out whether a given |
143 | * buffer was consumed. Our tests merely require that a successful get_buf |
144 | * implies that add_inbuf succeed in the past, and that add_inbuf will succeed, |
145 | * fake it accordingly. |
146 | */ |
147 | void *get_buf(unsigned *lenp, void **bufp) |
148 | { |
149 | void *datap; |
150 | |
151 | if (tailcnt == headcnt || __ptr_ring_full(r: &array)) |
152 | datap = NULL; |
153 | else { |
154 | datap = "Buffer\n" ; |
155 | ++tailcnt; |
156 | } |
157 | |
158 | return datap; |
159 | } |
160 | |
161 | bool used_empty() |
162 | { |
163 | return (tailcnt == headcnt || __ptr_ring_full(r: &array)); |
164 | } |
165 | |
166 | void disable_call() |
167 | { |
168 | assert(0); |
169 | } |
170 | |
171 | bool enable_call() |
172 | { |
173 | assert(0); |
174 | } |
175 | |
176 | void kick_available(void) |
177 | { |
178 | assert(0); |
179 | } |
180 | |
181 | /* host side */ |
182 | void disable_kick() |
183 | { |
184 | assert(0); |
185 | } |
186 | |
187 | bool enable_kick() |
188 | { |
189 | assert(0); |
190 | } |
191 | |
192 | bool avail_empty() |
193 | { |
194 | return __ptr_ring_empty(r: &array); |
195 | } |
196 | |
197 | bool use_buf(unsigned *lenp, void **bufp) |
198 | { |
199 | void *ptr; |
200 | |
201 | ptr = __ptr_ring_consume(r: &array); |
202 | |
203 | return ptr; |
204 | } |
205 | |
206 | void call_used(void) |
207 | { |
208 | assert(0); |
209 | } |
210 | |