1 | #ifndef IOU_ALLOC_CACHE_H |
---|---|
2 | #define IOU_ALLOC_CACHE_H |
3 | |
4 | /* |
5 | * Don't allow the cache to grow beyond this size. |
6 | */ |
7 | #define IO_ALLOC_CACHE_MAX 512 |
8 | |
9 | struct io_cache_entry { |
10 | struct io_wq_work_node node; |
11 | }; |
12 | |
13 | static inline bool io_alloc_cache_put(struct io_alloc_cache *cache, |
14 | struct io_cache_entry *entry) |
15 | { |
16 | if (cache->nr_cached < cache->max_cached) { |
17 | cache->nr_cached++; |
18 | wq_stack_add_head(node: &entry->node, stack: &cache->list); |
19 | kasan_mempool_poison_object(ptr: entry); |
20 | return true; |
21 | } |
22 | return false; |
23 | } |
24 | |
25 | static inline bool io_alloc_cache_empty(struct io_alloc_cache *cache) |
26 | { |
27 | return !cache->list.next; |
28 | } |
29 | |
30 | static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache) |
31 | { |
32 | if (cache->list.next) { |
33 | struct io_cache_entry *entry; |
34 | |
35 | entry = container_of(cache->list.next, struct io_cache_entry, node); |
36 | kasan_mempool_unpoison_object(ptr: entry, size: cache->elem_size); |
37 | cache->list.next = cache->list.next->next; |
38 | cache->nr_cached--; |
39 | return entry; |
40 | } |
41 | |
42 | return NULL; |
43 | } |
44 | |
45 | static inline void io_alloc_cache_init(struct io_alloc_cache *cache, |
46 | unsigned max_nr, size_t size) |
47 | { |
48 | cache->list.next = NULL; |
49 | cache->nr_cached = 0; |
50 | cache->max_cached = max_nr; |
51 | cache->elem_size = size; |
52 | } |
53 | |
54 | static inline void io_alloc_cache_free(struct io_alloc_cache *cache, |
55 | void (*free)(struct io_cache_entry *)) |
56 | { |
57 | while (1) { |
58 | struct io_cache_entry *entry = io_alloc_cache_get(cache); |
59 | |
60 | if (!entry) |
61 | break; |
62 | free(entry); |
63 | } |
64 | cache->nr_cached = 0; |
65 | } |
66 | #endif |
67 |