1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. |
4 | */ |
5 | |
6 | #include <linux/types.h> |
7 | #include <linux/bpf.h> |
8 | #include <linux/bpf_local_storage.h> |
9 | #include <uapi/linux/btf.h> |
10 | #include <linux/btf_ids.h> |
11 | |
12 | DEFINE_BPF_STORAGE_CACHE(cgroup_cache); |
13 | |
14 | static DEFINE_PER_CPU(int, bpf_cgrp_storage_busy); |
15 | |
16 | static void bpf_cgrp_storage_lock(void) |
17 | { |
18 | migrate_disable(); |
19 | this_cpu_inc(bpf_cgrp_storage_busy); |
20 | } |
21 | |
22 | static void bpf_cgrp_storage_unlock(void) |
23 | { |
24 | this_cpu_dec(bpf_cgrp_storage_busy); |
25 | migrate_enable(); |
26 | } |
27 | |
28 | static bool bpf_cgrp_storage_trylock(void) |
29 | { |
30 | migrate_disable(); |
31 | if (unlikely(this_cpu_inc_return(bpf_cgrp_storage_busy) != 1)) { |
32 | this_cpu_dec(bpf_cgrp_storage_busy); |
33 | migrate_enable(); |
34 | return false; |
35 | } |
36 | return true; |
37 | } |
38 | |
39 | static struct bpf_local_storage __rcu **cgroup_storage_ptr(void *owner) |
40 | { |
41 | struct cgroup *cg = owner; |
42 | |
43 | return &cg->bpf_cgrp_storage; |
44 | } |
45 | |
46 | void bpf_cgrp_storage_free(struct cgroup *cgroup) |
47 | { |
48 | struct bpf_local_storage *local_storage; |
49 | |
50 | rcu_read_lock(); |
51 | local_storage = rcu_dereference(cgroup->bpf_cgrp_storage); |
52 | if (!local_storage) { |
53 | rcu_read_unlock(); |
54 | return; |
55 | } |
56 | |
57 | bpf_cgrp_storage_lock(); |
58 | bpf_local_storage_destroy(local_storage); |
59 | bpf_cgrp_storage_unlock(); |
60 | rcu_read_unlock(); |
61 | } |
62 | |
63 | static struct bpf_local_storage_data * |
64 | cgroup_storage_lookup(struct cgroup *cgroup, struct bpf_map *map, bool cacheit_lockit) |
65 | { |
66 | struct bpf_local_storage *cgroup_storage; |
67 | struct bpf_local_storage_map *smap; |
68 | |
69 | cgroup_storage = rcu_dereference_check(cgroup->bpf_cgrp_storage, |
70 | bpf_rcu_lock_held()); |
71 | if (!cgroup_storage) |
72 | return NULL; |
73 | |
74 | smap = (struct bpf_local_storage_map *)map; |
75 | return bpf_local_storage_lookup(local_storage: cgroup_storage, smap, cacheit_lockit); |
76 | } |
77 | |
78 | static void *bpf_cgrp_storage_lookup_elem(struct bpf_map *map, void *key) |
79 | { |
80 | struct bpf_local_storage_data *sdata; |
81 | struct cgroup *cgroup; |
82 | int fd; |
83 | |
84 | fd = *(int *)key; |
85 | cgroup = cgroup_v1v2_get_from_fd(fd); |
86 | if (IS_ERR(ptr: cgroup)) |
87 | return ERR_CAST(ptr: cgroup); |
88 | |
89 | bpf_cgrp_storage_lock(); |
90 | sdata = cgroup_storage_lookup(cgroup, map, cacheit_lockit: true); |
91 | bpf_cgrp_storage_unlock(); |
92 | cgroup_put(cgrp: cgroup); |
93 | return sdata ? sdata->data : NULL; |
94 | } |
95 | |
96 | static long bpf_cgrp_storage_update_elem(struct bpf_map *map, void *key, |
97 | void *value, u64 map_flags) |
98 | { |
99 | struct bpf_local_storage_data *sdata; |
100 | struct cgroup *cgroup; |
101 | int fd; |
102 | |
103 | fd = *(int *)key; |
104 | cgroup = cgroup_v1v2_get_from_fd(fd); |
105 | if (IS_ERR(ptr: cgroup)) |
106 | return PTR_ERR(ptr: cgroup); |
107 | |
108 | bpf_cgrp_storage_lock(); |
109 | sdata = bpf_local_storage_update(owner: cgroup, smap: (struct bpf_local_storage_map *)map, |
110 | value, map_flags, GFP_ATOMIC); |
111 | bpf_cgrp_storage_unlock(); |
112 | cgroup_put(cgrp: cgroup); |
113 | return PTR_ERR_OR_ZERO(ptr: sdata); |
114 | } |
115 | |
116 | static int cgroup_storage_delete(struct cgroup *cgroup, struct bpf_map *map) |
117 | { |
118 | struct bpf_local_storage_data *sdata; |
119 | |
120 | sdata = cgroup_storage_lookup(cgroup, map, cacheit_lockit: false); |
121 | if (!sdata) |
122 | return -ENOENT; |
123 | |
124 | bpf_selem_unlink(SELEM(sdata), reuse_now: false); |
125 | return 0; |
126 | } |
127 | |
128 | static long bpf_cgrp_storage_delete_elem(struct bpf_map *map, void *key) |
129 | { |
130 | struct cgroup *cgroup; |
131 | int err, fd; |
132 | |
133 | fd = *(int *)key; |
134 | cgroup = cgroup_v1v2_get_from_fd(fd); |
135 | if (IS_ERR(ptr: cgroup)) |
136 | return PTR_ERR(ptr: cgroup); |
137 | |
138 | bpf_cgrp_storage_lock(); |
139 | err = cgroup_storage_delete(cgroup, map); |
140 | bpf_cgrp_storage_unlock(); |
141 | cgroup_put(cgrp: cgroup); |
142 | return err; |
143 | } |
144 | |
145 | static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key) |
146 | { |
147 | return -ENOTSUPP; |
148 | } |
149 | |
150 | static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr) |
151 | { |
152 | return bpf_local_storage_map_alloc(attr, cache: &cgroup_cache, bpf_ma: true); |
153 | } |
154 | |
155 | static void cgroup_storage_map_free(struct bpf_map *map) |
156 | { |
157 | bpf_local_storage_map_free(map, cache: &cgroup_cache, NULL); |
158 | } |
159 | |
160 | /* *gfp_flags* is a hidden argument provided by the verifier */ |
161 | BPF_CALL_5(bpf_cgrp_storage_get, struct bpf_map *, map, struct cgroup *, cgroup, |
162 | void *, value, u64, flags, gfp_t, gfp_flags) |
163 | { |
164 | struct bpf_local_storage_data *sdata; |
165 | |
166 | WARN_ON_ONCE(!bpf_rcu_lock_held()); |
167 | if (flags & ~(BPF_LOCAL_STORAGE_GET_F_CREATE)) |
168 | return (unsigned long)NULL; |
169 | |
170 | if (!cgroup) |
171 | return (unsigned long)NULL; |
172 | |
173 | if (!bpf_cgrp_storage_trylock()) |
174 | return (unsigned long)NULL; |
175 | |
176 | sdata = cgroup_storage_lookup(cgroup, map, cacheit_lockit: true); |
177 | if (sdata) |
178 | goto unlock; |
179 | |
180 | /* only allocate new storage, when the cgroup is refcounted */ |
181 | if (!percpu_ref_is_dying(ref: &cgroup->self.refcnt) && |
182 | (flags & BPF_LOCAL_STORAGE_GET_F_CREATE)) |
183 | sdata = bpf_local_storage_update(owner: cgroup, smap: (struct bpf_local_storage_map *)map, |
184 | value, map_flags: BPF_NOEXIST, gfp_flags); |
185 | |
186 | unlock: |
187 | bpf_cgrp_storage_unlock(); |
188 | return IS_ERR_OR_NULL(ptr: sdata) ? (unsigned long)NULL : (unsigned long)sdata->data; |
189 | } |
190 | |
191 | BPF_CALL_2(bpf_cgrp_storage_delete, struct bpf_map *, map, struct cgroup *, cgroup) |
192 | { |
193 | int ret; |
194 | |
195 | WARN_ON_ONCE(!bpf_rcu_lock_held()); |
196 | if (!cgroup) |
197 | return -EINVAL; |
198 | |
199 | if (!bpf_cgrp_storage_trylock()) |
200 | return -EBUSY; |
201 | |
202 | ret = cgroup_storage_delete(cgroup, map); |
203 | bpf_cgrp_storage_unlock(); |
204 | return ret; |
205 | } |
206 | |
207 | const struct bpf_map_ops cgrp_storage_map_ops = { |
208 | .map_meta_equal = bpf_map_meta_equal, |
209 | .map_alloc_check = bpf_local_storage_map_alloc_check, |
210 | .map_alloc = cgroup_storage_map_alloc, |
211 | .map_free = cgroup_storage_map_free, |
212 | .map_get_next_key = notsupp_get_next_key, |
213 | .map_lookup_elem = bpf_cgrp_storage_lookup_elem, |
214 | .map_update_elem = bpf_cgrp_storage_update_elem, |
215 | .map_delete_elem = bpf_cgrp_storage_delete_elem, |
216 | .map_check_btf = bpf_local_storage_map_check_btf, |
217 | .map_mem_usage = bpf_local_storage_map_mem_usage, |
218 | .map_btf_id = &bpf_local_storage_map_btf_id[0], |
219 | .map_owner_storage_ptr = cgroup_storage_ptr, |
220 | }; |
221 | |
222 | const struct bpf_func_proto bpf_cgrp_storage_get_proto = { |
223 | .func = bpf_cgrp_storage_get, |
224 | .gpl_only = false, |
225 | .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, |
226 | .arg1_type = ARG_CONST_MAP_PTR, |
227 | .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL, |
228 | .arg2_btf_id = &bpf_cgroup_btf_id[0], |
229 | .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL, |
230 | .arg4_type = ARG_ANYTHING, |
231 | }; |
232 | |
233 | const struct bpf_func_proto bpf_cgrp_storage_delete_proto = { |
234 | .func = bpf_cgrp_storage_delete, |
235 | .gpl_only = false, |
236 | .ret_type = RET_INTEGER, |
237 | .arg1_type = ARG_CONST_MAP_PTR, |
238 | .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL, |
239 | .arg2_btf_id = &bpf_cgroup_btf_id[0], |
240 | }; |
241 | |