1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2023 Isovalent */
3
4#include <linux/bpf.h>
5#include <linux/bpf_mprog.h>
6
7static int bpf_mprog_link(struct bpf_tuple *tuple,
8 u32 id_or_fd, u32 flags,
9 enum bpf_prog_type type)
10{
11 struct bpf_link *link = ERR_PTR(error: -EINVAL);
12 bool id = flags & BPF_F_ID;
13
14 if (id)
15 link = bpf_link_by_id(id: id_or_fd);
16 else if (id_or_fd)
17 link = bpf_link_get_from_fd(ufd: id_or_fd);
18 if (IS_ERR(ptr: link))
19 return PTR_ERR(ptr: link);
20 if (type && link->prog->type != type) {
21 bpf_link_put(link);
22 return -EINVAL;
23 }
24
25 tuple->link = link;
26 tuple->prog = link->prog;
27 return 0;
28}
29
30static int bpf_mprog_prog(struct bpf_tuple *tuple,
31 u32 id_or_fd, u32 flags,
32 enum bpf_prog_type type)
33{
34 struct bpf_prog *prog = ERR_PTR(error: -EINVAL);
35 bool id = flags & BPF_F_ID;
36
37 if (id)
38 prog = bpf_prog_by_id(id: id_or_fd);
39 else if (id_or_fd)
40 prog = bpf_prog_get(ufd: id_or_fd);
41 if (IS_ERR(ptr: prog))
42 return PTR_ERR(ptr: prog);
43 if (type && prog->type != type) {
44 bpf_prog_put(prog);
45 return -EINVAL;
46 }
47
48 tuple->link = NULL;
49 tuple->prog = prog;
50 return 0;
51}
52
53static int bpf_mprog_tuple_relative(struct bpf_tuple *tuple,
54 u32 id_or_fd, u32 flags,
55 enum bpf_prog_type type)
56{
57 bool link = flags & BPF_F_LINK;
58 bool id = flags & BPF_F_ID;
59
60 memset(tuple, 0, sizeof(*tuple));
61 if (link)
62 return bpf_mprog_link(tuple, id_or_fd, flags, type);
63 /* If no relevant flag is set and no id_or_fd was passed, then
64 * tuple link/prog is just NULLed. This is the case when before/
65 * after selects first/last position without passing fd.
66 */
67 if (!id && !id_or_fd)
68 return 0;
69 return bpf_mprog_prog(tuple, id_or_fd, flags, type);
70}
71
72static void bpf_mprog_tuple_put(struct bpf_tuple *tuple)
73{
74 if (tuple->link)
75 bpf_link_put(link: tuple->link);
76 else if (tuple->prog)
77 bpf_prog_put(prog: tuple->prog);
78}
79
80/* The bpf_mprog_{replace,delete}() operate on exact idx position with the
81 * one exception that for deletion we support delete from front/back. In
82 * case of front idx is -1, in case of back idx is bpf_mprog_total(entry).
83 * Adjustment to first and last entry is trivial. The bpf_mprog_insert()
84 * we have to deal with the following cases:
85 *
86 * idx + before:
87 *
88 * Insert P4 before P3: idx for old array is 1, idx for new array is 2,
89 * hence we adjust target idx for the new array, so that memmove copies
90 * P1 and P2 to the new entry, and we insert P4 into idx 2. Inserting
91 * before P1 would have old idx -1 and new idx 0.
92 *
93 * +--+--+--+ +--+--+--+--+ +--+--+--+--+
94 * |P1|P2|P3| ==> |P1|P2| |P3| ==> |P1|P2|P4|P3|
95 * +--+--+--+ +--+--+--+--+ +--+--+--+--+
96 *
97 * idx + after:
98 *
99 * Insert P4 after P2: idx for old array is 2, idx for new array is 2.
100 * Again, memmove copies P1 and P2 to the new entry, and we insert P4
101 * into idx 2. Inserting after P3 would have both old/new idx at 4 aka
102 * bpf_mprog_total(entry).
103 *
104 * +--+--+--+ +--+--+--+--+ +--+--+--+--+
105 * |P1|P2|P3| ==> |P1|P2| |P3| ==> |P1|P2|P4|P3|
106 * +--+--+--+ +--+--+--+--+ +--+--+--+--+
107 */
108static int bpf_mprog_replace(struct bpf_mprog_entry *entry,
109 struct bpf_mprog_entry **entry_new,
110 struct bpf_tuple *ntuple, int idx)
111{
112 struct bpf_mprog_fp *fp;
113 struct bpf_mprog_cp *cp;
114 struct bpf_prog *oprog;
115
116 bpf_mprog_read(entry, idx, fp: &fp, cp: &cp);
117 oprog = READ_ONCE(fp->prog);
118 bpf_mprog_write(fp, cp, tuple: ntuple);
119 if (!ntuple->link) {
120 WARN_ON_ONCE(cp->link);
121 bpf_prog_put(prog: oprog);
122 }
123 *entry_new = entry;
124 return 0;
125}
126
127static int bpf_mprog_insert(struct bpf_mprog_entry *entry,
128 struct bpf_mprog_entry **entry_new,
129 struct bpf_tuple *ntuple, int idx, u32 flags)
130{
131 int total = bpf_mprog_total(entry);
132 struct bpf_mprog_entry *peer;
133 struct bpf_mprog_fp *fp;
134 struct bpf_mprog_cp *cp;
135
136 peer = bpf_mprog_peer(entry);
137 bpf_mprog_entry_copy(dst: peer, src: entry);
138 if (idx == total)
139 goto insert;
140 else if (flags & BPF_F_BEFORE)
141 idx += 1;
142 bpf_mprog_entry_grow(entry: peer, idx);
143insert:
144 bpf_mprog_read(entry: peer, idx, fp: &fp, cp: &cp);
145 bpf_mprog_write(fp, cp, tuple: ntuple);
146 bpf_mprog_inc(entry: peer);
147 *entry_new = peer;
148 return 0;
149}
150
151static int bpf_mprog_delete(struct bpf_mprog_entry *entry,
152 struct bpf_mprog_entry **entry_new,
153 struct bpf_tuple *dtuple, int idx)
154{
155 int total = bpf_mprog_total(entry);
156 struct bpf_mprog_entry *peer;
157
158 peer = bpf_mprog_peer(entry);
159 bpf_mprog_entry_copy(dst: peer, src: entry);
160 if (idx == -1)
161 idx = 0;
162 else if (idx == total)
163 idx = total - 1;
164 bpf_mprog_entry_shrink(entry: peer, idx);
165 bpf_mprog_dec(entry: peer);
166 bpf_mprog_mark_for_release(entry: peer, tuple: dtuple);
167 *entry_new = peer;
168 return 0;
169}
170
171/* In bpf_mprog_pos_*() we evaluate the target position for the BPF
172 * program/link that needs to be replaced, inserted or deleted for
173 * each "rule" independently. If all rules agree on that position
174 * or existing element, then enact replacement, addition or deletion.
175 * If this is not the case, then the request cannot be satisfied and
176 * we bail out with an error.
177 */
178static int bpf_mprog_pos_exact(struct bpf_mprog_entry *entry,
179 struct bpf_tuple *tuple)
180{
181 struct bpf_mprog_fp *fp;
182 struct bpf_mprog_cp *cp;
183 int i;
184
185 for (i = 0; i < bpf_mprog_total(entry); i++) {
186 bpf_mprog_read(entry, idx: i, fp: &fp, cp: &cp);
187 if (tuple->prog == READ_ONCE(fp->prog))
188 return tuple->link == cp->link ? i : -EBUSY;
189 }
190 return -ENOENT;
191}
192
193static int bpf_mprog_pos_before(struct bpf_mprog_entry *entry,
194 struct bpf_tuple *tuple)
195{
196 struct bpf_mprog_fp *fp;
197 struct bpf_mprog_cp *cp;
198 int i;
199
200 for (i = 0; i < bpf_mprog_total(entry); i++) {
201 bpf_mprog_read(entry, idx: i, fp: &fp, cp: &cp);
202 if (tuple->prog == READ_ONCE(fp->prog) &&
203 (!tuple->link || tuple->link == cp->link))
204 return i - 1;
205 }
206 return tuple->prog ? -ENOENT : -1;
207}
208
209static int bpf_mprog_pos_after(struct bpf_mprog_entry *entry,
210 struct bpf_tuple *tuple)
211{
212 struct bpf_mprog_fp *fp;
213 struct bpf_mprog_cp *cp;
214 int i;
215
216 for (i = 0; i < bpf_mprog_total(entry); i++) {
217 bpf_mprog_read(entry, idx: i, fp: &fp, cp: &cp);
218 if (tuple->prog == READ_ONCE(fp->prog) &&
219 (!tuple->link || tuple->link == cp->link))
220 return i + 1;
221 }
222 return tuple->prog ? -ENOENT : bpf_mprog_total(entry);
223}
224
225int bpf_mprog_attach(struct bpf_mprog_entry *entry,
226 struct bpf_mprog_entry **entry_new,
227 struct bpf_prog *prog_new, struct bpf_link *link,
228 struct bpf_prog *prog_old,
229 u32 flags, u32 id_or_fd, u64 revision)
230{
231 struct bpf_tuple rtuple, ntuple = {
232 .prog = prog_new,
233 .link = link,
234 }, otuple = {
235 .prog = prog_old,
236 .link = link,
237 };
238 int ret, idx = -ERANGE, tidx;
239
240 if (revision && revision != bpf_mprog_revision(entry))
241 return -ESTALE;
242 if (bpf_mprog_exists(entry, prog: prog_new))
243 return -EEXIST;
244 ret = bpf_mprog_tuple_relative(tuple: &rtuple, id_or_fd,
245 flags: flags & ~BPF_F_REPLACE,
246 type: prog_new->type);
247 if (ret)
248 return ret;
249 if (flags & BPF_F_REPLACE) {
250 tidx = bpf_mprog_pos_exact(entry, tuple: &otuple);
251 if (tidx < 0) {
252 ret = tidx;
253 goto out;
254 }
255 idx = tidx;
256 } else if (bpf_mprog_total(entry) == bpf_mprog_max()) {
257 ret = -ERANGE;
258 goto out;
259 }
260 if (flags & BPF_F_BEFORE) {
261 tidx = bpf_mprog_pos_before(entry, tuple: &rtuple);
262 if (tidx < -1 || (idx >= -1 && tidx != idx)) {
263 ret = tidx < -1 ? tidx : -ERANGE;
264 goto out;
265 }
266 idx = tidx;
267 }
268 if (flags & BPF_F_AFTER) {
269 tidx = bpf_mprog_pos_after(entry, tuple: &rtuple);
270 if (tidx < -1 || (idx >= -1 && tidx != idx)) {
271 ret = tidx < 0 ? tidx : -ERANGE;
272 goto out;
273 }
274 idx = tidx;
275 }
276 if (idx < -1) {
277 if (rtuple.prog || flags) {
278 ret = -EINVAL;
279 goto out;
280 }
281 idx = bpf_mprog_total(entry);
282 flags = BPF_F_AFTER;
283 }
284 if (idx >= bpf_mprog_max()) {
285 ret = -ERANGE;
286 goto out;
287 }
288 if (flags & BPF_F_REPLACE)
289 ret = bpf_mprog_replace(entry, entry_new, ntuple: &ntuple, idx);
290 else
291 ret = bpf_mprog_insert(entry, entry_new, ntuple: &ntuple, idx, flags);
292out:
293 bpf_mprog_tuple_put(tuple: &rtuple);
294 return ret;
295}
296
297static int bpf_mprog_fetch(struct bpf_mprog_entry *entry,
298 struct bpf_tuple *tuple, int idx)
299{
300 int total = bpf_mprog_total(entry);
301 struct bpf_mprog_cp *cp;
302 struct bpf_mprog_fp *fp;
303 struct bpf_prog *prog;
304 struct bpf_link *link;
305
306 if (idx == -1)
307 idx = 0;
308 else if (idx == total)
309 idx = total - 1;
310 bpf_mprog_read(entry, idx, fp: &fp, cp: &cp);
311 prog = READ_ONCE(fp->prog);
312 link = cp->link;
313 /* The deletion request can either be without filled tuple in which
314 * case it gets populated here based on idx, or with filled tuple
315 * where the only thing we end up doing is the WARN_ON_ONCE() assert.
316 * If we hit a BPF link at the given index, it must not be removed
317 * from opts path.
318 */
319 if (link && !tuple->link)
320 return -EBUSY;
321 WARN_ON_ONCE(tuple->prog && tuple->prog != prog);
322 WARN_ON_ONCE(tuple->link && tuple->link != link);
323 tuple->prog = prog;
324 tuple->link = link;
325 return 0;
326}
327
328int bpf_mprog_detach(struct bpf_mprog_entry *entry,
329 struct bpf_mprog_entry **entry_new,
330 struct bpf_prog *prog, struct bpf_link *link,
331 u32 flags, u32 id_or_fd, u64 revision)
332{
333 struct bpf_tuple rtuple, dtuple = {
334 .prog = prog,
335 .link = link,
336 };
337 int ret, idx = -ERANGE, tidx;
338
339 if (flags & BPF_F_REPLACE)
340 return -EINVAL;
341 if (revision && revision != bpf_mprog_revision(entry))
342 return -ESTALE;
343 if (!bpf_mprog_total(entry))
344 return -ENOENT;
345 ret = bpf_mprog_tuple_relative(tuple: &rtuple, id_or_fd, flags,
346 type: prog ? prog->type :
347 BPF_PROG_TYPE_UNSPEC);
348 if (ret)
349 return ret;
350 if (dtuple.prog) {
351 tidx = bpf_mprog_pos_exact(entry, tuple: &dtuple);
352 if (tidx < 0) {
353 ret = tidx;
354 goto out;
355 }
356 idx = tidx;
357 }
358 if (flags & BPF_F_BEFORE) {
359 tidx = bpf_mprog_pos_before(entry, tuple: &rtuple);
360 if (tidx < -1 || (idx >= -1 && tidx != idx)) {
361 ret = tidx < -1 ? tidx : -ERANGE;
362 goto out;
363 }
364 idx = tidx;
365 }
366 if (flags & BPF_F_AFTER) {
367 tidx = bpf_mprog_pos_after(entry, tuple: &rtuple);
368 if (tidx < -1 || (idx >= -1 && tidx != idx)) {
369 ret = tidx < 0 ? tidx : -ERANGE;
370 goto out;
371 }
372 idx = tidx;
373 }
374 if (idx < -1) {
375 if (rtuple.prog || flags) {
376 ret = -EINVAL;
377 goto out;
378 }
379 idx = bpf_mprog_total(entry);
380 flags = BPF_F_AFTER;
381 }
382 if (idx >= bpf_mprog_max()) {
383 ret = -ERANGE;
384 goto out;
385 }
386 ret = bpf_mprog_fetch(entry, tuple: &dtuple, idx);
387 if (ret)
388 goto out;
389 ret = bpf_mprog_delete(entry, entry_new, dtuple: &dtuple, idx);
390out:
391 bpf_mprog_tuple_put(tuple: &rtuple);
392 return ret;
393}
394
395int bpf_mprog_query(const union bpf_attr *attr, union bpf_attr __user *uattr,
396 struct bpf_mprog_entry *entry)
397{
398 u32 __user *uprog_flags, *ulink_flags;
399 u32 __user *uprog_id, *ulink_id;
400 struct bpf_mprog_fp *fp;
401 struct bpf_mprog_cp *cp;
402 struct bpf_prog *prog;
403 const u32 flags = 0;
404 u32 id, count = 0;
405 u64 revision = 1;
406 int i, ret = 0;
407
408 if (attr->query.query_flags || attr->query.attach_flags)
409 return -EINVAL;
410 if (entry) {
411 revision = bpf_mprog_revision(entry);
412 count = bpf_mprog_total(entry);
413 }
414 if (copy_to_user(to: &uattr->query.attach_flags, from: &flags, n: sizeof(flags)))
415 return -EFAULT;
416 if (copy_to_user(to: &uattr->query.revision, from: &revision, n: sizeof(revision)))
417 return -EFAULT;
418 if (copy_to_user(to: &uattr->query.count, from: &count, n: sizeof(count)))
419 return -EFAULT;
420 uprog_id = u64_to_user_ptr(attr->query.prog_ids);
421 uprog_flags = u64_to_user_ptr(attr->query.prog_attach_flags);
422 ulink_id = u64_to_user_ptr(attr->query.link_ids);
423 ulink_flags = u64_to_user_ptr(attr->query.link_attach_flags);
424 if (attr->query.count == 0 || !uprog_id || !count)
425 return 0;
426 if (attr->query.count < count) {
427 count = attr->query.count;
428 ret = -ENOSPC;
429 }
430 for (i = 0; i < bpf_mprog_max(); i++) {
431 bpf_mprog_read(entry, idx: i, fp: &fp, cp: &cp);
432 prog = READ_ONCE(fp->prog);
433 if (!prog)
434 break;
435 id = prog->aux->id;
436 if (copy_to_user(to: uprog_id + i, from: &id, n: sizeof(id)))
437 return -EFAULT;
438 if (uprog_flags &&
439 copy_to_user(to: uprog_flags + i, from: &flags, n: sizeof(flags)))
440 return -EFAULT;
441 id = cp->link ? cp->link->id : 0;
442 if (ulink_id &&
443 copy_to_user(to: ulink_id + i, from: &id, n: sizeof(id)))
444 return -EFAULT;
445 if (ulink_flags &&
446 copy_to_user(to: ulink_flags + i, from: &flags, n: sizeof(flags)))
447 return -EFAULT;
448 if (i + 1 == count)
449 break;
450 }
451 return ret;
452}
453

source code of linux/kernel/bpf/mprog.c