1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (c) 2021 Facebook */ |
3 | |
4 | #include <linux/bitmap.h> |
5 | #include <linux/bpf.h> |
6 | #include <linux/btf.h> |
7 | #include <linux/err.h> |
8 | #include <linux/jhash.h> |
9 | #include <linux/random.h> |
10 | #include <linux/btf_ids.h> |
11 | |
12 | #define BLOOM_CREATE_FLAG_MASK \ |
13 | (BPF_F_NUMA_NODE | BPF_F_ZERO_SEED | BPF_F_ACCESS_MASK) |
14 | |
15 | struct bpf_bloom_filter { |
16 | struct bpf_map map; |
17 | u32 bitset_mask; |
18 | u32 hash_seed; |
19 | u32 nr_hash_funcs; |
20 | unsigned long bitset[]; |
21 | }; |
22 | |
23 | static u32 hash(struct bpf_bloom_filter *bloom, void *value, |
24 | u32 value_size, u32 index) |
25 | { |
26 | u32 h; |
27 | |
28 | if (likely(value_size % 4 == 0)) |
29 | h = jhash2(k: value, length: value_size / 4, initval: bloom->hash_seed + index); |
30 | else |
31 | h = jhash(key: value, length: value_size, initval: bloom->hash_seed + index); |
32 | |
33 | return h & bloom->bitset_mask; |
34 | } |
35 | |
36 | static long bloom_map_peek_elem(struct bpf_map *map, void *value) |
37 | { |
38 | struct bpf_bloom_filter *bloom = |
39 | container_of(map, struct bpf_bloom_filter, map); |
40 | u32 i, h; |
41 | |
42 | for (i = 0; i < bloom->nr_hash_funcs; i++) { |
43 | h = hash(bloom, value, value_size: map->value_size, index: i); |
44 | if (!test_bit(h, bloom->bitset)) |
45 | return -ENOENT; |
46 | } |
47 | |
48 | return 0; |
49 | } |
50 | |
51 | static long bloom_map_push_elem(struct bpf_map *map, void *value, u64 flags) |
52 | { |
53 | struct bpf_bloom_filter *bloom = |
54 | container_of(map, struct bpf_bloom_filter, map); |
55 | u32 i, h; |
56 | |
57 | if (flags != BPF_ANY) |
58 | return -EINVAL; |
59 | |
60 | for (i = 0; i < bloom->nr_hash_funcs; i++) { |
61 | h = hash(bloom, value, value_size: map->value_size, index: i); |
62 | set_bit(nr: h, addr: bloom->bitset); |
63 | } |
64 | |
65 | return 0; |
66 | } |
67 | |
68 | static long bloom_map_pop_elem(struct bpf_map *map, void *value) |
69 | { |
70 | return -EOPNOTSUPP; |
71 | } |
72 | |
73 | static long bloom_map_delete_elem(struct bpf_map *map, void *value) |
74 | { |
75 | return -EOPNOTSUPP; |
76 | } |
77 | |
78 | static int bloom_map_get_next_key(struct bpf_map *map, void *key, void *next_key) |
79 | { |
80 | return -EOPNOTSUPP; |
81 | } |
82 | |
83 | /* Called from syscall */ |
84 | static int bloom_map_alloc_check(union bpf_attr *attr) |
85 | { |
86 | if (attr->value_size > KMALLOC_MAX_SIZE) |
87 | /* if value_size is bigger, the user space won't be able to |
88 | * access the elements. |
89 | */ |
90 | return -E2BIG; |
91 | |
92 | return 0; |
93 | } |
94 | |
95 | static struct bpf_map *bloom_map_alloc(union bpf_attr *attr) |
96 | { |
97 | u32 bitset_bytes, bitset_mask, nr_hash_funcs, nr_bits; |
98 | int numa_node = bpf_map_attr_numa_node(attr); |
99 | struct bpf_bloom_filter *bloom; |
100 | |
101 | if (attr->key_size != 0 || attr->value_size == 0 || |
102 | attr->max_entries == 0 || |
103 | attr->map_flags & ~BLOOM_CREATE_FLAG_MASK || |
104 | !bpf_map_flags_access_ok(access_flags: attr->map_flags) || |
105 | /* The lower 4 bits of map_extra (0xF) specify the number |
106 | * of hash functions |
107 | */ |
108 | (attr->map_extra & ~0xF)) |
109 | return ERR_PTR(error: -EINVAL); |
110 | |
111 | nr_hash_funcs = attr->map_extra; |
112 | if (nr_hash_funcs == 0) |
113 | /* Default to using 5 hash functions if unspecified */ |
114 | nr_hash_funcs = 5; |
115 | |
116 | /* For the bloom filter, the optimal bit array size that minimizes the |
117 | * false positive probability is n * k / ln(2) where n is the number of |
118 | * expected entries in the bloom filter and k is the number of hash |
119 | * functions. We use 7 / 5 to approximate 1 / ln(2). |
120 | * |
121 | * We round this up to the nearest power of two to enable more efficient |
122 | * hashing using bitmasks. The bitmask will be the bit array size - 1. |
123 | * |
124 | * If this overflows a u32, the bit array size will have 2^32 (4 |
125 | * GB) bits. |
126 | */ |
127 | if (check_mul_overflow(attr->max_entries, nr_hash_funcs, &nr_bits) || |
128 | check_mul_overflow(nr_bits / 5, (u32)7, &nr_bits) || |
129 | nr_bits > (1UL << 31)) { |
130 | /* The bit array size is 2^32 bits but to avoid overflowing the |
131 | * u32, we use U32_MAX, which will round up to the equivalent |
132 | * number of bytes |
133 | */ |
134 | bitset_bytes = BITS_TO_BYTES(U32_MAX); |
135 | bitset_mask = U32_MAX; |
136 | } else { |
137 | if (nr_bits <= BITS_PER_LONG) |
138 | nr_bits = BITS_PER_LONG; |
139 | else |
140 | nr_bits = roundup_pow_of_two(nr_bits); |
141 | bitset_bytes = BITS_TO_BYTES(nr_bits); |
142 | bitset_mask = nr_bits - 1; |
143 | } |
144 | |
145 | bitset_bytes = roundup(bitset_bytes, sizeof(unsigned long)); |
146 | bloom = bpf_map_area_alloc(size: sizeof(*bloom) + bitset_bytes, numa_node); |
147 | |
148 | if (!bloom) |
149 | return ERR_PTR(error: -ENOMEM); |
150 | |
151 | bpf_map_init_from_attr(map: &bloom->map, attr); |
152 | |
153 | bloom->nr_hash_funcs = nr_hash_funcs; |
154 | bloom->bitset_mask = bitset_mask; |
155 | |
156 | if (!(attr->map_flags & BPF_F_ZERO_SEED)) |
157 | bloom->hash_seed = get_random_u32(); |
158 | |
159 | return &bloom->map; |
160 | } |
161 | |
162 | static void bloom_map_free(struct bpf_map *map) |
163 | { |
164 | struct bpf_bloom_filter *bloom = |
165 | container_of(map, struct bpf_bloom_filter, map); |
166 | |
167 | bpf_map_area_free(base: bloom); |
168 | } |
169 | |
170 | static void *bloom_map_lookup_elem(struct bpf_map *map, void *key) |
171 | { |
172 | /* The eBPF program should use map_peek_elem instead */ |
173 | return ERR_PTR(error: -EINVAL); |
174 | } |
175 | |
176 | static long bloom_map_update_elem(struct bpf_map *map, void *key, |
177 | void *value, u64 flags) |
178 | { |
179 | /* The eBPF program should use map_push_elem instead */ |
180 | return -EINVAL; |
181 | } |
182 | |
183 | static int bloom_map_check_btf(const struct bpf_map *map, |
184 | const struct btf *btf, |
185 | const struct btf_type *key_type, |
186 | const struct btf_type *value_type) |
187 | { |
188 | /* Bloom filter maps are keyless */ |
189 | return btf_type_is_void(t: key_type) ? 0 : -EINVAL; |
190 | } |
191 | |
192 | static u64 bloom_map_mem_usage(const struct bpf_map *map) |
193 | { |
194 | struct bpf_bloom_filter *bloom; |
195 | u64 bitset_bytes; |
196 | |
197 | bloom = container_of(map, struct bpf_bloom_filter, map); |
198 | bitset_bytes = BITS_TO_BYTES((u64)bloom->bitset_mask + 1); |
199 | bitset_bytes = roundup(bitset_bytes, sizeof(unsigned long)); |
200 | return sizeof(*bloom) + bitset_bytes; |
201 | } |
202 | |
203 | BTF_ID_LIST_SINGLE(bpf_bloom_map_btf_ids, struct, bpf_bloom_filter) |
204 | const struct bpf_map_ops bloom_filter_map_ops = { |
205 | .map_meta_equal = bpf_map_meta_equal, |
206 | .map_alloc_check = bloom_map_alloc_check, |
207 | .map_alloc = bloom_map_alloc, |
208 | .map_free = bloom_map_free, |
209 | .map_get_next_key = bloom_map_get_next_key, |
210 | .map_push_elem = bloom_map_push_elem, |
211 | .map_peek_elem = bloom_map_peek_elem, |
212 | .map_pop_elem = bloom_map_pop_elem, |
213 | .map_lookup_elem = bloom_map_lookup_elem, |
214 | .map_update_elem = bloom_map_update_elem, |
215 | .map_delete_elem = bloom_map_delete_elem, |
216 | .map_check_btf = bloom_map_check_btf, |
217 | .map_mem_usage = bloom_map_mem_usage, |
218 | .map_btf_id = &bpf_bloom_map_btf_ids[0], |
219 | }; |
220 | |