1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <stdlib.h> |
3 | #include <string.h> |
4 | #include <malloc.h> |
5 | #include <pthread.h> |
6 | #include <unistd.h> |
7 | #include <assert.h> |
8 | |
9 | #include <linux/gfp.h> |
10 | #include <linux/poison.h> |
11 | #include <linux/slab.h> |
12 | #include <linux/radix-tree.h> |
13 | #include <urcu/uatomic.h> |
14 | |
15 | int nr_allocated; |
16 | int preempt_count; |
17 | int test_verbose; |
18 | |
19 | struct kmem_cache { |
20 | pthread_mutex_t lock; |
21 | unsigned int size; |
22 | unsigned int align; |
23 | int nr_objs; |
24 | void *objs; |
25 | void (*ctor)(void *); |
26 | unsigned int non_kernel; |
27 | unsigned long nr_allocated; |
28 | unsigned long nr_tallocated; |
29 | }; |
30 | |
31 | void kmem_cache_set_non_kernel(struct kmem_cache *cachep, unsigned int val) |
32 | { |
33 | cachep->non_kernel = val; |
34 | } |
35 | |
36 | unsigned long kmem_cache_get_alloc(struct kmem_cache *cachep) |
37 | { |
38 | return cachep->size * cachep->nr_allocated; |
39 | } |
40 | |
41 | unsigned long kmem_cache_nr_allocated(struct kmem_cache *cachep) |
42 | { |
43 | return cachep->nr_allocated; |
44 | } |
45 | |
46 | unsigned long kmem_cache_nr_tallocated(struct kmem_cache *cachep) |
47 | { |
48 | return cachep->nr_tallocated; |
49 | } |
50 | |
51 | void kmem_cache_zero_nr_tallocated(struct kmem_cache *cachep) |
52 | { |
53 | cachep->nr_tallocated = 0; |
54 | } |
55 | |
56 | void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru, |
57 | int gfp) |
58 | { |
59 | void *p; |
60 | |
61 | if (!(gfp & __GFP_DIRECT_RECLAIM)) { |
62 | if (!cachep->non_kernel) |
63 | return NULL; |
64 | |
65 | cachep->non_kernel--; |
66 | } |
67 | |
68 | pthread_mutex_lock(&cachep->lock); |
69 | if (cachep->nr_objs) { |
70 | struct radix_tree_node *node = cachep->objs; |
71 | cachep->nr_objs--; |
72 | cachep->objs = node->parent; |
73 | pthread_mutex_unlock(&cachep->lock); |
74 | node->parent = NULL; |
75 | p = node; |
76 | } else { |
77 | pthread_mutex_unlock(&cachep->lock); |
78 | if (cachep->align) |
79 | posix_memalign(&p, cachep->align, cachep->size); |
80 | else |
81 | p = malloc(cachep->size); |
82 | if (cachep->ctor) |
83 | cachep->ctor(p); |
84 | else if (gfp & __GFP_ZERO) |
85 | memset(p, 0, cachep->size); |
86 | } |
87 | |
88 | uatomic_inc(&cachep->nr_allocated); |
89 | uatomic_inc(&nr_allocated); |
90 | uatomic_inc(&cachep->nr_tallocated); |
91 | if (kmalloc_verbose) |
92 | printf("Allocating %p from slab\n" , p); |
93 | return p; |
94 | } |
95 | |
96 | void __kmem_cache_free_locked(struct kmem_cache *cachep, void *objp) |
97 | { |
98 | assert(objp); |
99 | if (cachep->nr_objs > 10 || cachep->align) { |
100 | memset(objp, POISON_FREE, cachep->size); |
101 | free(objp); |
102 | } else { |
103 | struct radix_tree_node *node = objp; |
104 | cachep->nr_objs++; |
105 | node->parent = cachep->objs; |
106 | cachep->objs = node; |
107 | } |
108 | } |
109 | |
110 | void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp) |
111 | { |
112 | uatomic_dec(&nr_allocated); |
113 | uatomic_dec(&cachep->nr_allocated); |
114 | if (kmalloc_verbose) |
115 | printf("Freeing %p to slab\n" , objp); |
116 | __kmem_cache_free_locked(cachep, objp); |
117 | } |
118 | |
119 | void kmem_cache_free(struct kmem_cache *cachep, void *objp) |
120 | { |
121 | pthread_mutex_lock(&cachep->lock); |
122 | kmem_cache_free_locked(cachep, objp); |
123 | pthread_mutex_unlock(&cachep->lock); |
124 | } |
125 | |
126 | void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list) |
127 | { |
128 | if (kmalloc_verbose) |
129 | pr_debug("Bulk free %p[0-%lu]\n" , list, size - 1); |
130 | |
131 | pthread_mutex_lock(&cachep->lock); |
132 | for (int i = 0; i < size; i++) |
133 | kmem_cache_free_locked(cachep, objp: list[i]); |
134 | pthread_mutex_unlock(&cachep->lock); |
135 | } |
136 | |
137 | void kmem_cache_shrink(struct kmem_cache *cachep) |
138 | { |
139 | } |
140 | |
141 | int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size, |
142 | void **p) |
143 | { |
144 | size_t i; |
145 | |
146 | if (kmalloc_verbose) |
147 | pr_debug("Bulk alloc %lu\n" , size); |
148 | |
149 | pthread_mutex_lock(&cachep->lock); |
150 | if (cachep->nr_objs >= size) { |
151 | struct radix_tree_node *node; |
152 | |
153 | for (i = 0; i < size; i++) { |
154 | if (!(gfp & __GFP_DIRECT_RECLAIM)) { |
155 | if (!cachep->non_kernel) |
156 | break; |
157 | cachep->non_kernel--; |
158 | } |
159 | |
160 | node = cachep->objs; |
161 | cachep->nr_objs--; |
162 | cachep->objs = node->parent; |
163 | p[i] = node; |
164 | node->parent = NULL; |
165 | } |
166 | pthread_mutex_unlock(&cachep->lock); |
167 | } else { |
168 | pthread_mutex_unlock(&cachep->lock); |
169 | for (i = 0; i < size; i++) { |
170 | if (!(gfp & __GFP_DIRECT_RECLAIM)) { |
171 | if (!cachep->non_kernel) |
172 | break; |
173 | cachep->non_kernel--; |
174 | } |
175 | |
176 | if (cachep->align) { |
177 | posix_memalign(&p[i], cachep->align, |
178 | cachep->size); |
179 | } else { |
180 | p[i] = malloc(cachep->size); |
181 | if (!p[i]) |
182 | break; |
183 | } |
184 | if (cachep->ctor) |
185 | cachep->ctor(p[i]); |
186 | else if (gfp & __GFP_ZERO) |
187 | memset(p[i], 0, cachep->size); |
188 | } |
189 | } |
190 | |
191 | if (i < size) { |
192 | size = i; |
193 | pthread_mutex_lock(&cachep->lock); |
194 | for (i = 0; i < size; i++) |
195 | __kmem_cache_free_locked(cachep, objp: p[i]); |
196 | pthread_mutex_unlock(&cachep->lock); |
197 | return 0; |
198 | } |
199 | |
200 | for (i = 0; i < size; i++) { |
201 | uatomic_inc(&nr_allocated); |
202 | uatomic_inc(&cachep->nr_allocated); |
203 | uatomic_inc(&cachep->nr_tallocated); |
204 | if (kmalloc_verbose) |
205 | printf("Allocating %p from slab\n" , p[i]); |
206 | } |
207 | |
208 | return size; |
209 | } |
210 | |
211 | struct kmem_cache * |
212 | kmem_cache_create(const char *name, unsigned int size, unsigned int align, |
213 | unsigned int flags, void (*ctor)(void *)) |
214 | { |
215 | struct kmem_cache *ret = malloc(sizeof(*ret)); |
216 | |
217 | pthread_mutex_init(&ret->lock, NULL); |
218 | ret->size = size; |
219 | ret->align = align; |
220 | ret->nr_objs = 0; |
221 | ret->nr_allocated = 0; |
222 | ret->nr_tallocated = 0; |
223 | ret->objs = NULL; |
224 | ret->ctor = ctor; |
225 | ret->non_kernel = 0; |
226 | return ret; |
227 | } |
228 | |
229 | /* |
230 | * Test the test infrastructure for kem_cache_alloc/free and bulk counterparts. |
231 | */ |
232 | void test_kmem_cache_bulk(void) |
233 | { |
234 | int i; |
235 | void *list[12]; |
236 | static struct kmem_cache *test_cache, *test_cache2; |
237 | |
238 | /* |
239 | * Testing the bulk allocators without aligned kmem_cache to force the |
240 | * bulk alloc/free to reuse |
241 | */ |
242 | test_cache = kmem_cache_create(name: "test_cache" , size: 256, align: 0, SLAB_PANIC, NULL); |
243 | |
244 | for (i = 0; i < 5; i++) |
245 | list[i] = kmem_cache_alloc(cachep: test_cache, __GFP_DIRECT_RECLAIM); |
246 | |
247 | for (i = 0; i < 5; i++) |
248 | kmem_cache_free(cachep: test_cache, objp: list[i]); |
249 | assert(test_cache->nr_objs == 5); |
250 | |
251 | kmem_cache_alloc_bulk(cachep: test_cache, __GFP_DIRECT_RECLAIM, size: 5, p: list); |
252 | kmem_cache_free_bulk(cachep: test_cache, size: 5, list); |
253 | |
254 | for (i = 0; i < 12 ; i++) |
255 | list[i] = kmem_cache_alloc(cachep: test_cache, __GFP_DIRECT_RECLAIM); |
256 | |
257 | for (i = 0; i < 12; i++) |
258 | kmem_cache_free(cachep: test_cache, objp: list[i]); |
259 | |
260 | /* The last free will not be kept around */ |
261 | assert(test_cache->nr_objs == 11); |
262 | |
263 | /* Aligned caches will immediately free */ |
264 | test_cache2 = kmem_cache_create(name: "test_cache2" , size: 128, align: 128, SLAB_PANIC, NULL); |
265 | |
266 | kmem_cache_alloc_bulk(cachep: test_cache2, __GFP_DIRECT_RECLAIM, size: 10, p: list); |
267 | kmem_cache_free_bulk(cachep: test_cache2, size: 10, list); |
268 | assert(!test_cache2->nr_objs); |
269 | |
270 | |
271 | } |
272 | |