1 | // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) |
2 | /* Copyright (C) 2016-2018 Netronome Systems, Inc. */ |
3 | |
4 | #include <linux/bpf.h> |
5 | #include <linux/bpf_verifier.h> |
6 | #include <linux/kernel.h> |
7 | #include <linux/netdevice.h> |
8 | #include <linux/pkt_cls.h> |
9 | |
10 | #include "../nfp_app.h" |
11 | #include "../nfp_main.h" |
12 | #include "../nfp_net.h" |
13 | #include "fw.h" |
14 | #include "main.h" |
15 | |
16 | #define pr_vlog(env, fmt, ...) \ |
17 | bpf_verifier_log_write(env, "[nfp] " fmt, ##__VA_ARGS__) |
18 | |
19 | struct nfp_insn_meta * |
20 | nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, |
21 | unsigned int insn_idx) |
22 | { |
23 | unsigned int forward, backward, i; |
24 | |
25 | backward = meta->n - insn_idx; |
26 | forward = insn_idx - meta->n; |
27 | |
28 | if (min(forward, backward) > nfp_prog->n_insns - insn_idx - 1) { |
29 | backward = nfp_prog->n_insns - insn_idx - 1; |
30 | meta = nfp_prog_last_meta(nfp_prog); |
31 | } |
32 | if (min(forward, backward) > insn_idx && backward > insn_idx) { |
33 | forward = insn_idx; |
34 | meta = nfp_prog_first_meta(nfp_prog); |
35 | } |
36 | |
37 | if (forward < backward) |
38 | for (i = 0; i < forward; i++) |
39 | meta = nfp_meta_next(meta); |
40 | else |
41 | for (i = 0; i < backward; i++) |
42 | meta = nfp_meta_prev(meta); |
43 | |
44 | return meta; |
45 | } |
46 | |
47 | static void |
48 | nfp_record_adjust_head(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog, |
49 | struct nfp_insn_meta *meta, |
50 | const struct bpf_reg_state *reg2) |
51 | { |
52 | unsigned int location = UINT_MAX; |
53 | int imm; |
54 | |
55 | /* Datapath usually can give us guarantees on how much adjust head |
56 | * can be done without the need for any checks. Optimize the simple |
57 | * case where there is only one adjust head by a constant. |
58 | */ |
59 | if (reg2->type != SCALAR_VALUE || !tnum_is_const(a: reg2->var_off)) |
60 | goto exit_set_location; |
61 | imm = reg2->var_off.value; |
62 | /* Translator will skip all checks, we need to guarantee min pkt len */ |
63 | if (imm > ETH_ZLEN - ETH_HLEN) |
64 | goto exit_set_location; |
65 | if (imm > (int)bpf->adjust_head.guaranteed_add || |
66 | imm < -bpf->adjust_head.guaranteed_sub) |
67 | goto exit_set_location; |
68 | |
69 | if (nfp_prog->adjust_head_location) { |
70 | /* Only one call per program allowed */ |
71 | if (nfp_prog->adjust_head_location != meta->n) |
72 | goto exit_set_location; |
73 | |
74 | if (meta->arg2.reg.var_off.value != imm) |
75 | goto exit_set_location; |
76 | } |
77 | |
78 | location = meta->n; |
79 | exit_set_location: |
80 | nfp_prog->adjust_head_location = location; |
81 | } |
82 | |
83 | static bool nfp_bpf_map_update_value_ok(struct bpf_verifier_env *env) |
84 | { |
85 | const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1; |
86 | const struct bpf_reg_state *reg3 = cur_regs(env) + BPF_REG_3; |
87 | struct bpf_offloaded_map *offmap; |
88 | struct bpf_func_state *state; |
89 | struct nfp_bpf_map *nfp_map; |
90 | int off, i; |
91 | |
92 | state = env->cur_state->frame[reg3->frameno]; |
93 | |
94 | /* We need to record each time update happens with non-zero words, |
95 | * in case such word is used in atomic operations. |
96 | * Implicitly depend on nfp_bpf_stack_arg_ok(reg3) being run before. |
97 | */ |
98 | |
99 | offmap = map_to_offmap(map: reg1->map_ptr); |
100 | nfp_map = offmap->dev_priv; |
101 | off = reg3->off + reg3->var_off.value; |
102 | |
103 | for (i = 0; i < offmap->map.value_size; i++) { |
104 | struct bpf_stack_state *stack_entry; |
105 | unsigned int soff; |
106 | |
107 | soff = -(off + i) - 1; |
108 | stack_entry = &state->stack[soff / BPF_REG_SIZE]; |
109 | if (stack_entry->slot_type[soff % BPF_REG_SIZE] == STACK_ZERO) |
110 | continue; |
111 | |
112 | if (nfp_map->use_map[i / 4].type == NFP_MAP_USE_ATOMIC_CNT) { |
113 | pr_vlog(env, "value at offset %d/%d may be non-zero, bpf_map_update_elem() is required to initialize atomic counters to zero to avoid offload endian issues\n" , |
114 | i, soff); |
115 | return false; |
116 | } |
117 | nfp_map->use_map[i / 4].non_zero_update = 1; |
118 | } |
119 | |
120 | return true; |
121 | } |
122 | |
123 | static int |
124 | nfp_bpf_stack_arg_ok(const char *fname, struct bpf_verifier_env *env, |
125 | const struct bpf_reg_state *reg, |
126 | struct nfp_bpf_reg_state *old_arg) |
127 | { |
128 | s64 off, old_off; |
129 | |
130 | if (reg->type != PTR_TO_STACK) { |
131 | pr_vlog(env, "%s: unsupported ptr type %d\n" , |
132 | fname, reg->type); |
133 | return false; |
134 | } |
135 | if (!tnum_is_const(a: reg->var_off)) { |
136 | pr_vlog(env, "%s: variable pointer\n" , fname); |
137 | return false; |
138 | } |
139 | |
140 | off = reg->var_off.value + reg->off; |
141 | if (-off % 4) { |
142 | pr_vlog(env, "%s: unaligned stack pointer %lld\n" , fname, -off); |
143 | return false; |
144 | } |
145 | |
146 | /* Rest of the checks is only if we re-parse the same insn */ |
147 | if (!old_arg) |
148 | return true; |
149 | |
150 | old_off = old_arg->reg.var_off.value + old_arg->reg.off; |
151 | old_arg->var_off |= off != old_off; |
152 | |
153 | return true; |
154 | } |
155 | |
156 | static bool |
157 | nfp_bpf_map_call_ok(const char *fname, struct bpf_verifier_env *env, |
158 | struct nfp_insn_meta *meta, |
159 | u32 helper_tgt, const struct bpf_reg_state *reg1) |
160 | { |
161 | if (!helper_tgt) { |
162 | pr_vlog(env, "%s: not supported by FW\n" , fname); |
163 | return false; |
164 | } |
165 | |
166 | return true; |
167 | } |
168 | |
169 | static int |
170 | nfp_bpf_check_helper_call(struct nfp_prog *nfp_prog, |
171 | struct bpf_verifier_env *env, |
172 | struct nfp_insn_meta *meta) |
173 | { |
174 | const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1; |
175 | const struct bpf_reg_state *reg2 = cur_regs(env) + BPF_REG_2; |
176 | const struct bpf_reg_state *reg3 = cur_regs(env) + BPF_REG_3; |
177 | struct nfp_app_bpf *bpf = nfp_prog->bpf; |
178 | u32 func_id = meta->insn.imm; |
179 | |
180 | switch (func_id) { |
181 | case BPF_FUNC_xdp_adjust_head: |
182 | if (!bpf->adjust_head.off_max) { |
183 | pr_vlog(env, "adjust_head not supported by FW\n" ); |
184 | return -EOPNOTSUPP; |
185 | } |
186 | if (!(bpf->adjust_head.flags & NFP_BPF_ADJUST_HEAD_NO_META)) { |
187 | pr_vlog(env, "adjust_head: FW requires shifting metadata, not supported by the driver\n" ); |
188 | return -EOPNOTSUPP; |
189 | } |
190 | |
191 | nfp_record_adjust_head(bpf, nfp_prog, meta, reg2); |
192 | break; |
193 | |
194 | case BPF_FUNC_xdp_adjust_tail: |
195 | if (!bpf->adjust_tail) { |
196 | pr_vlog(env, "adjust_tail not supported by FW\n" ); |
197 | return -EOPNOTSUPP; |
198 | } |
199 | break; |
200 | |
201 | case BPF_FUNC_map_lookup_elem: |
202 | if (!nfp_bpf_map_call_ok(fname: "map_lookup" , env, meta, |
203 | helper_tgt: bpf->helpers.map_lookup, reg1) || |
204 | !nfp_bpf_stack_arg_ok(fname: "map_lookup" , env, reg: reg2, |
205 | old_arg: meta->func_id ? &meta->arg2 : NULL)) |
206 | return -EOPNOTSUPP; |
207 | break; |
208 | |
209 | case BPF_FUNC_map_update_elem: |
210 | if (!nfp_bpf_map_call_ok(fname: "map_update" , env, meta, |
211 | helper_tgt: bpf->helpers.map_update, reg1) || |
212 | !nfp_bpf_stack_arg_ok(fname: "map_update" , env, reg: reg2, |
213 | old_arg: meta->func_id ? &meta->arg2 : NULL) || |
214 | !nfp_bpf_stack_arg_ok(fname: "map_update" , env, reg: reg3, NULL) || |
215 | !nfp_bpf_map_update_value_ok(env)) |
216 | return -EOPNOTSUPP; |
217 | break; |
218 | |
219 | case BPF_FUNC_map_delete_elem: |
220 | if (!nfp_bpf_map_call_ok(fname: "map_delete" , env, meta, |
221 | helper_tgt: bpf->helpers.map_delete, reg1) || |
222 | !nfp_bpf_stack_arg_ok(fname: "map_delete" , env, reg: reg2, |
223 | old_arg: meta->func_id ? &meta->arg2 : NULL)) |
224 | return -EOPNOTSUPP; |
225 | break; |
226 | |
227 | case BPF_FUNC_get_prandom_u32: |
228 | if (bpf->pseudo_random) |
229 | break; |
230 | pr_vlog(env, "bpf_get_prandom_u32(): FW doesn't support random number generation\n" ); |
231 | return -EOPNOTSUPP; |
232 | |
233 | case BPF_FUNC_perf_event_output: |
234 | BUILD_BUG_ON(NFP_BPF_SCALAR_VALUE != SCALAR_VALUE || |
235 | NFP_BPF_MAP_VALUE != PTR_TO_MAP_VALUE || |
236 | NFP_BPF_STACK != PTR_TO_STACK || |
237 | NFP_BPF_PACKET_DATA != PTR_TO_PACKET); |
238 | |
239 | if (!bpf->helpers.perf_event_output) { |
240 | pr_vlog(env, "event_output: not supported by FW\n" ); |
241 | return -EOPNOTSUPP; |
242 | } |
243 | |
244 | /* Force current CPU to make sure we can report the event |
245 | * wherever we get the control message from FW. |
246 | */ |
247 | if (reg3->var_off.mask & BPF_F_INDEX_MASK || |
248 | (reg3->var_off.value & BPF_F_INDEX_MASK) != |
249 | BPF_F_CURRENT_CPU) { |
250 | char tn_buf[48]; |
251 | |
252 | tnum_strn(str: tn_buf, size: sizeof(tn_buf), a: reg3->var_off); |
253 | pr_vlog(env, "event_output: must use BPF_F_CURRENT_CPU, var_off: %s\n" , |
254 | tn_buf); |
255 | return -EOPNOTSUPP; |
256 | } |
257 | |
258 | /* Save space in meta, we don't care about arguments other |
259 | * than 4th meta, shove it into arg1. |
260 | */ |
261 | reg1 = cur_regs(env) + BPF_REG_4; |
262 | |
263 | if (reg1->type != SCALAR_VALUE /* NULL ptr */ && |
264 | reg1->type != PTR_TO_STACK && |
265 | reg1->type != PTR_TO_MAP_VALUE && |
266 | reg1->type != PTR_TO_PACKET) { |
267 | pr_vlog(env, "event_output: unsupported ptr type: %d\n" , |
268 | reg1->type); |
269 | return -EOPNOTSUPP; |
270 | } |
271 | |
272 | if (reg1->type == PTR_TO_STACK && |
273 | !nfp_bpf_stack_arg_ok(fname: "event_output" , env, reg: reg1, NULL)) |
274 | return -EOPNOTSUPP; |
275 | |
276 | /* Warn user that on offload NFP may return success even if map |
277 | * is not going to accept the event, since the event output is |
278 | * fully async and device won't know the state of the map. |
279 | * There is also FW limitation on the event length. |
280 | * |
281 | * Lost events will not show up on the perf ring, driver |
282 | * won't see them at all. Events may also get reordered. |
283 | */ |
284 | dev_warn_once(&nfp_prog->bpf->app->pf->pdev->dev, |
285 | "bpf: note: return codes and behavior of bpf_event_output() helper differs for offloaded programs!\n" ); |
286 | pr_vlog(env, "warning: return codes and behavior of event_output helper differ for offload!\n" ); |
287 | |
288 | if (!meta->func_id) |
289 | break; |
290 | |
291 | if (reg1->type != meta->arg1.type) { |
292 | pr_vlog(env, "event_output: ptr type changed: %d %d\n" , |
293 | meta->arg1.type, reg1->type); |
294 | return -EINVAL; |
295 | } |
296 | break; |
297 | |
298 | default: |
299 | pr_vlog(env, "unsupported function id: %d\n" , func_id); |
300 | return -EOPNOTSUPP; |
301 | } |
302 | |
303 | meta->func_id = func_id; |
304 | meta->arg1 = *reg1; |
305 | meta->arg2.reg = *reg2; |
306 | |
307 | return 0; |
308 | } |
309 | |
310 | static int |
311 | nfp_bpf_check_exit(struct nfp_prog *nfp_prog, |
312 | struct bpf_verifier_env *env) |
313 | { |
314 | const struct bpf_reg_state *reg0 = cur_regs(env) + BPF_REG_0; |
315 | u64 imm; |
316 | |
317 | if (nfp_prog->type == BPF_PROG_TYPE_XDP) |
318 | return 0; |
319 | |
320 | if (!(reg0->type == SCALAR_VALUE && tnum_is_const(a: reg0->var_off))) { |
321 | char tn_buf[48]; |
322 | |
323 | tnum_strn(str: tn_buf, size: sizeof(tn_buf), a: reg0->var_off); |
324 | pr_vlog(env, "unsupported exit state: %d, var_off: %s\n" , |
325 | reg0->type, tn_buf); |
326 | return -EINVAL; |
327 | } |
328 | |
329 | imm = reg0->var_off.value; |
330 | if (nfp_prog->type == BPF_PROG_TYPE_SCHED_CLS && |
331 | imm <= TC_ACT_REDIRECT && |
332 | imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN && |
333 | imm != TC_ACT_QUEUED) { |
334 | pr_vlog(env, "unsupported exit state: %d, imm: %llx\n" , |
335 | reg0->type, imm); |
336 | return -EINVAL; |
337 | } |
338 | |
339 | return 0; |
340 | } |
341 | |
342 | static int |
343 | nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog, |
344 | struct nfp_insn_meta *meta, |
345 | const struct bpf_reg_state *reg, |
346 | struct bpf_verifier_env *env) |
347 | { |
348 | s32 old_off, new_off; |
349 | |
350 | if (reg->frameno != env->cur_state->curframe) |
351 | meta->flags |= FLAG_INSN_PTR_CALLER_STACK_FRAME; |
352 | |
353 | if (!tnum_is_const(a: reg->var_off)) { |
354 | pr_vlog(env, "variable ptr stack access\n" ); |
355 | return -EINVAL; |
356 | } |
357 | |
358 | if (meta->ptr.type == NOT_INIT) |
359 | return 0; |
360 | |
361 | old_off = meta->ptr.off + meta->ptr.var_off.value; |
362 | new_off = reg->off + reg->var_off.value; |
363 | |
364 | meta->ptr_not_const |= old_off != new_off; |
365 | |
366 | if (!meta->ptr_not_const) |
367 | return 0; |
368 | |
369 | if (old_off % 4 == new_off % 4) |
370 | return 0; |
371 | |
372 | pr_vlog(env, "stack access changed location was:%d is:%d\n" , |
373 | old_off, new_off); |
374 | return -EINVAL; |
375 | } |
376 | |
377 | static const char *nfp_bpf_map_use_name(enum nfp_bpf_map_use use) |
378 | { |
379 | static const char * const names[] = { |
380 | [NFP_MAP_UNUSED] = "unused" , |
381 | [NFP_MAP_USE_READ] = "read" , |
382 | [NFP_MAP_USE_WRITE] = "write" , |
383 | [NFP_MAP_USE_ATOMIC_CNT] = "atomic" , |
384 | }; |
385 | |
386 | if (use >= ARRAY_SIZE(names) || !names[use]) |
387 | return "unknown" ; |
388 | return names[use]; |
389 | } |
390 | |
391 | static int |
392 | nfp_bpf_map_mark_used_one(struct bpf_verifier_env *env, |
393 | struct nfp_bpf_map *nfp_map, |
394 | unsigned int off, enum nfp_bpf_map_use use) |
395 | { |
396 | if (nfp_map->use_map[off / 4].type != NFP_MAP_UNUSED && |
397 | nfp_map->use_map[off / 4].type != use) { |
398 | pr_vlog(env, "map value use type conflict %s vs %s off: %u\n" , |
399 | nfp_bpf_map_use_name(nfp_map->use_map[off / 4].type), |
400 | nfp_bpf_map_use_name(use), off); |
401 | return -EOPNOTSUPP; |
402 | } |
403 | |
404 | if (nfp_map->use_map[off / 4].non_zero_update && |
405 | use == NFP_MAP_USE_ATOMIC_CNT) { |
406 | pr_vlog(env, "atomic counter in map value may already be initialized to non-zero value off: %u\n" , |
407 | off); |
408 | return -EOPNOTSUPP; |
409 | } |
410 | |
411 | nfp_map->use_map[off / 4].type = use; |
412 | |
413 | return 0; |
414 | } |
415 | |
416 | static int |
417 | nfp_bpf_map_mark_used(struct bpf_verifier_env *env, struct nfp_insn_meta *meta, |
418 | const struct bpf_reg_state *reg, |
419 | enum nfp_bpf_map_use use) |
420 | { |
421 | struct bpf_offloaded_map *offmap; |
422 | struct nfp_bpf_map *nfp_map; |
423 | unsigned int size, off; |
424 | int i, err; |
425 | |
426 | if (!tnum_is_const(a: reg->var_off)) { |
427 | pr_vlog(env, "map value offset is variable\n" ); |
428 | return -EOPNOTSUPP; |
429 | } |
430 | |
431 | off = reg->var_off.value + meta->insn.off + reg->off; |
432 | size = BPF_LDST_BYTES(&meta->insn); |
433 | offmap = map_to_offmap(map: reg->map_ptr); |
434 | nfp_map = offmap->dev_priv; |
435 | |
436 | if (off + size > offmap->map.value_size) { |
437 | pr_vlog(env, "map value access out-of-bounds\n" ); |
438 | return -EINVAL; |
439 | } |
440 | |
441 | for (i = 0; i < size; i += 4 - (off + i) % 4) { |
442 | err = nfp_bpf_map_mark_used_one(env, nfp_map, off: off + i, use); |
443 | if (err) |
444 | return err; |
445 | } |
446 | |
447 | return 0; |
448 | } |
449 | |
450 | static int |
451 | nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, |
452 | struct bpf_verifier_env *env, u8 reg_no) |
453 | { |
454 | const struct bpf_reg_state *reg = cur_regs(env) + reg_no; |
455 | int err; |
456 | |
457 | if (reg->type != PTR_TO_CTX && |
458 | reg->type != PTR_TO_STACK && |
459 | reg->type != PTR_TO_MAP_VALUE && |
460 | reg->type != PTR_TO_PACKET) { |
461 | pr_vlog(env, "unsupported ptr type: %d\n" , reg->type); |
462 | return -EINVAL; |
463 | } |
464 | |
465 | if (reg->type == PTR_TO_STACK) { |
466 | err = nfp_bpf_check_stack_access(nfp_prog, meta, reg, env); |
467 | if (err) |
468 | return err; |
469 | } |
470 | |
471 | if (reg->type == PTR_TO_MAP_VALUE) { |
472 | if (is_mbpf_load(meta)) { |
473 | err = nfp_bpf_map_mark_used(env, meta, reg, |
474 | use: NFP_MAP_USE_READ); |
475 | if (err) |
476 | return err; |
477 | } |
478 | if (is_mbpf_store(meta)) { |
479 | pr_vlog(env, "map writes not supported\n" ); |
480 | return -EOPNOTSUPP; |
481 | } |
482 | if (is_mbpf_atomic(meta)) { |
483 | err = nfp_bpf_map_mark_used(env, meta, reg, |
484 | use: NFP_MAP_USE_ATOMIC_CNT); |
485 | if (err) |
486 | return err; |
487 | } |
488 | } |
489 | |
490 | if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) { |
491 | pr_vlog(env, "ptr type changed for instruction %d -> %d\n" , |
492 | meta->ptr.type, reg->type); |
493 | return -EINVAL; |
494 | } |
495 | |
496 | meta->ptr = *reg; |
497 | |
498 | return 0; |
499 | } |
500 | |
501 | static int |
502 | nfp_bpf_check_store(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, |
503 | struct bpf_verifier_env *env) |
504 | { |
505 | const struct bpf_reg_state *reg = cur_regs(env) + meta->insn.dst_reg; |
506 | |
507 | if (reg->type == PTR_TO_CTX) { |
508 | if (nfp_prog->type == BPF_PROG_TYPE_XDP) { |
509 | /* XDP ctx accesses must be 4B in size */ |
510 | switch (meta->insn.off) { |
511 | case offsetof(struct xdp_md, rx_queue_index): |
512 | if (nfp_prog->bpf->queue_select) |
513 | goto exit_check_ptr; |
514 | pr_vlog(env, "queue selection not supported by FW\n" ); |
515 | return -EOPNOTSUPP; |
516 | } |
517 | } |
518 | pr_vlog(env, "unsupported store to context field\n" ); |
519 | return -EOPNOTSUPP; |
520 | } |
521 | exit_check_ptr: |
522 | return nfp_bpf_check_ptr(nfp_prog, meta, env, reg_no: meta->insn.dst_reg); |
523 | } |
524 | |
525 | static int |
526 | nfp_bpf_check_atomic(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, |
527 | struct bpf_verifier_env *env) |
528 | { |
529 | const struct bpf_reg_state *sreg = cur_regs(env) + meta->insn.src_reg; |
530 | const struct bpf_reg_state *dreg = cur_regs(env) + meta->insn.dst_reg; |
531 | |
532 | if (meta->insn.imm != BPF_ADD) { |
533 | pr_vlog(env, "atomic op not implemented: %d\n" , meta->insn.imm); |
534 | return -EOPNOTSUPP; |
535 | } |
536 | |
537 | if (dreg->type != PTR_TO_MAP_VALUE) { |
538 | pr_vlog(env, "atomic add not to a map value pointer: %d\n" , |
539 | dreg->type); |
540 | return -EOPNOTSUPP; |
541 | } |
542 | if (sreg->type != SCALAR_VALUE) { |
543 | pr_vlog(env, "atomic add not of a scalar: %d\n" , sreg->type); |
544 | return -EOPNOTSUPP; |
545 | } |
546 | |
547 | meta->xadd_over_16bit |= |
548 | sreg->var_off.value > 0xffff || sreg->var_off.mask > 0xffff; |
549 | meta->xadd_maybe_16bit |= |
550 | (sreg->var_off.value & ~sreg->var_off.mask) <= 0xffff; |
551 | |
552 | return nfp_bpf_check_ptr(nfp_prog, meta, env, reg_no: meta->insn.dst_reg); |
553 | } |
554 | |
555 | static int |
556 | nfp_bpf_check_alu(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, |
557 | struct bpf_verifier_env *env) |
558 | { |
559 | const struct bpf_reg_state *sreg = |
560 | cur_regs(env) + meta->insn.src_reg; |
561 | const struct bpf_reg_state *dreg = |
562 | cur_regs(env) + meta->insn.dst_reg; |
563 | |
564 | meta->umin_src = min(meta->umin_src, sreg->umin_value); |
565 | meta->umax_src = max(meta->umax_src, sreg->umax_value); |
566 | meta->umin_dst = min(meta->umin_dst, dreg->umin_value); |
567 | meta->umax_dst = max(meta->umax_dst, dreg->umax_value); |
568 | |
569 | /* NFP supports u16 and u32 multiplication. |
570 | * |
571 | * For ALU64, if either operand is beyond u32's value range, we reject |
572 | * it. One thing to note, if the source operand is BPF_K, then we need |
573 | * to check "imm" field directly, and we'd reject it if it is negative. |
574 | * Because for ALU64, "imm" (with s32 type) is expected to be sign |
575 | * extended to s64 which NFP mul doesn't support. |
576 | * |
577 | * For ALU32, it is fine for "imm" be negative though, because the |
578 | * result is 32-bits and there is no difference on the low halve of |
579 | * the result for signed/unsigned mul, so we will get correct result. |
580 | */ |
581 | if (is_mbpf_mul(meta)) { |
582 | if (meta->umax_dst > U32_MAX) { |
583 | pr_vlog(env, "multiplier is not within u32 value range\n" ); |
584 | return -EINVAL; |
585 | } |
586 | if (mbpf_src(meta) == BPF_X && meta->umax_src > U32_MAX) { |
587 | pr_vlog(env, "multiplicand is not within u32 value range\n" ); |
588 | return -EINVAL; |
589 | } |
590 | if (mbpf_class(meta) == BPF_ALU64 && |
591 | mbpf_src(meta) == BPF_K && meta->insn.imm < 0) { |
592 | pr_vlog(env, "sign extended multiplicand won't be within u32 value range\n" ); |
593 | return -EINVAL; |
594 | } |
595 | } |
596 | |
597 | /* NFP doesn't have divide instructions, we support divide by constant |
598 | * through reciprocal multiplication. Given NFP support multiplication |
599 | * no bigger than u32, we'd require divisor and dividend no bigger than |
600 | * that as well. |
601 | * |
602 | * Also eBPF doesn't support signed divide and has enforced this on C |
603 | * language level by failing compilation. However LLVM assembler hasn't |
604 | * enforced this, so it is possible for negative constant to leak in as |
605 | * a BPF_K operand through assembly code, we reject such cases as well. |
606 | */ |
607 | if (is_mbpf_div(meta)) { |
608 | if (meta->umax_dst > U32_MAX) { |
609 | pr_vlog(env, "dividend is not within u32 value range\n" ); |
610 | return -EINVAL; |
611 | } |
612 | if (mbpf_src(meta) == BPF_X) { |
613 | if (meta->umin_src != meta->umax_src) { |
614 | pr_vlog(env, "divisor is not constant\n" ); |
615 | return -EINVAL; |
616 | } |
617 | if (meta->umax_src > U32_MAX) { |
618 | pr_vlog(env, "divisor is not within u32 value range\n" ); |
619 | return -EINVAL; |
620 | } |
621 | } |
622 | if (mbpf_src(meta) == BPF_K && meta->insn.imm < 0) { |
623 | pr_vlog(env, "divide by negative constant is not supported\n" ); |
624 | return -EINVAL; |
625 | } |
626 | } |
627 | |
628 | return 0; |
629 | } |
630 | |
631 | int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, |
632 | int prev_insn_idx) |
633 | { |
634 | struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv; |
635 | struct nfp_insn_meta *meta = nfp_prog->verifier_meta; |
636 | |
637 | meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx); |
638 | nfp_prog->verifier_meta = meta; |
639 | |
640 | if (!nfp_bpf_supported_opcode(code: meta->insn.code)) { |
641 | pr_vlog(env, "instruction %#02x not supported\n" , |
642 | meta->insn.code); |
643 | return -EINVAL; |
644 | } |
645 | |
646 | if (meta->insn.src_reg >= MAX_BPF_REG || |
647 | meta->insn.dst_reg >= MAX_BPF_REG) { |
648 | pr_vlog(env, "program uses extended registers - jit hardening?\n" ); |
649 | return -EINVAL; |
650 | } |
651 | |
652 | if (is_mbpf_helper_call(meta)) |
653 | return nfp_bpf_check_helper_call(nfp_prog, env, meta); |
654 | if (meta->insn.code == (BPF_JMP | BPF_EXIT)) |
655 | return nfp_bpf_check_exit(nfp_prog, env); |
656 | |
657 | if (is_mbpf_load(meta)) |
658 | return nfp_bpf_check_ptr(nfp_prog, meta, env, |
659 | reg_no: meta->insn.src_reg); |
660 | if (is_mbpf_store(meta)) |
661 | return nfp_bpf_check_store(nfp_prog, meta, env); |
662 | |
663 | if (is_mbpf_atomic(meta)) |
664 | return nfp_bpf_check_atomic(nfp_prog, meta, env); |
665 | |
666 | if (is_mbpf_alu(meta)) |
667 | return nfp_bpf_check_alu(nfp_prog, meta, env); |
668 | |
669 | return 0; |
670 | } |
671 | |
672 | static int |
673 | nfp_assign_subprog_idx_and_regs(struct bpf_verifier_env *env, |
674 | struct nfp_prog *nfp_prog) |
675 | { |
676 | struct nfp_insn_meta *meta; |
677 | int index = 0; |
678 | |
679 | list_for_each_entry(meta, &nfp_prog->insns, l) { |
680 | if (nfp_is_subprog_start(meta)) |
681 | index++; |
682 | meta->subprog_idx = index; |
683 | |
684 | if (meta->insn.dst_reg >= BPF_REG_6 && |
685 | meta->insn.dst_reg <= BPF_REG_9) |
686 | nfp_prog->subprog[index].needs_reg_push = 1; |
687 | } |
688 | |
689 | if (index + 1 != nfp_prog->subprog_cnt) { |
690 | pr_vlog(env, "BUG: number of processed BPF functions is not consistent (processed %d, expected %d)\n" , |
691 | index + 1, nfp_prog->subprog_cnt); |
692 | return -EFAULT; |
693 | } |
694 | |
695 | return 0; |
696 | } |
697 | |
698 | static unsigned int nfp_bpf_get_stack_usage(struct nfp_prog *nfp_prog) |
699 | { |
700 | struct nfp_insn_meta *meta = nfp_prog_first_meta(nfp_prog); |
701 | unsigned int max_depth = 0, depth = 0, frame = 0; |
702 | struct nfp_insn_meta *ret_insn[MAX_CALL_FRAMES]; |
703 | unsigned short frame_depths[MAX_CALL_FRAMES]; |
704 | unsigned short ret_prog[MAX_CALL_FRAMES]; |
705 | unsigned short idx = meta->subprog_idx; |
706 | |
707 | /* Inspired from check_max_stack_depth() from kernel verifier. |
708 | * Starting from main subprogram, walk all instructions and recursively |
709 | * walk all callees that given subprogram can call. Since recursion is |
710 | * prevented by the kernel verifier, this algorithm only needs a local |
711 | * stack of MAX_CALL_FRAMES to remember callsites. |
712 | */ |
713 | process_subprog: |
714 | frame_depths[frame] = nfp_prog->subprog[idx].stack_depth; |
715 | frame_depths[frame] = round_up(frame_depths[frame], STACK_FRAME_ALIGN); |
716 | depth += frame_depths[frame]; |
717 | max_depth = max(max_depth, depth); |
718 | |
719 | continue_subprog: |
720 | for (; meta != nfp_prog_last_meta(nfp_prog) && meta->subprog_idx == idx; |
721 | meta = nfp_meta_next(meta)) { |
722 | if (!is_mbpf_pseudo_call(meta)) |
723 | continue; |
724 | |
725 | /* We found a call to a subprogram. Remember instruction to |
726 | * return to and subprog id. |
727 | */ |
728 | ret_insn[frame] = nfp_meta_next(meta); |
729 | ret_prog[frame] = idx; |
730 | |
731 | /* Find the callee and start processing it. */ |
732 | meta = nfp_bpf_goto_meta(nfp_prog, meta, |
733 | insn_idx: meta->n + 1 + meta->insn.imm); |
734 | idx = meta->subprog_idx; |
735 | frame++; |
736 | goto process_subprog; |
737 | } |
738 | /* End of for() loop means the last instruction of the subprog was |
739 | * reached. If we popped all stack frames, return; otherwise, go on |
740 | * processing remaining instructions from the caller. |
741 | */ |
742 | if (frame == 0) |
743 | return max_depth; |
744 | |
745 | depth -= frame_depths[frame]; |
746 | frame--; |
747 | meta = ret_insn[frame]; |
748 | idx = ret_prog[frame]; |
749 | goto continue_subprog; |
750 | } |
751 | |
752 | static void nfp_bpf_insn_flag_zext(struct nfp_prog *nfp_prog, |
753 | struct bpf_insn_aux_data *aux) |
754 | { |
755 | struct nfp_insn_meta *meta; |
756 | |
757 | list_for_each_entry(meta, &nfp_prog->insns, l) { |
758 | if (aux[meta->n].zext_dst) |
759 | meta->flags |= FLAG_INSN_DO_ZEXT; |
760 | } |
761 | } |
762 | |
763 | int nfp_bpf_finalize(struct bpf_verifier_env *env) |
764 | { |
765 | struct bpf_subprog_info *info; |
766 | struct nfp_prog *nfp_prog; |
767 | unsigned int max_stack; |
768 | struct nfp_net *nn; |
769 | int i; |
770 | |
771 | nfp_prog = env->prog->aux->offload->dev_priv; |
772 | nfp_prog->subprog_cnt = env->subprog_cnt; |
773 | nfp_prog->subprog = kcalloc(n: nfp_prog->subprog_cnt, |
774 | size: sizeof(nfp_prog->subprog[0]), GFP_KERNEL); |
775 | if (!nfp_prog->subprog) |
776 | return -ENOMEM; |
777 | |
778 | nfp_assign_subprog_idx_and_regs(env, nfp_prog); |
779 | |
780 | info = env->subprog_info; |
781 | for (i = 0; i < nfp_prog->subprog_cnt; i++) { |
782 | nfp_prog->subprog[i].stack_depth = info[i].stack_depth; |
783 | |
784 | if (i == 0) |
785 | continue; |
786 | |
787 | /* Account for size of return address. */ |
788 | nfp_prog->subprog[i].stack_depth += REG_WIDTH; |
789 | /* Account for size of saved registers, if necessary. */ |
790 | if (nfp_prog->subprog[i].needs_reg_push) |
791 | nfp_prog->subprog[i].stack_depth += BPF_REG_SIZE * 4; |
792 | } |
793 | |
794 | nn = netdev_priv(dev: env->prog->aux->offload->netdev); |
795 | max_stack = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64; |
796 | nfp_prog->stack_size = nfp_bpf_get_stack_usage(nfp_prog); |
797 | if (nfp_prog->stack_size > max_stack) { |
798 | pr_vlog(env, "stack too large: program %dB > FW stack %dB\n" , |
799 | nfp_prog->stack_size, max_stack); |
800 | return -EOPNOTSUPP; |
801 | } |
802 | |
803 | nfp_bpf_insn_flag_zext(nfp_prog, aux: env->insn_aux_data); |
804 | return 0; |
805 | } |
806 | |
807 | int nfp_bpf_opt_replace_insn(struct bpf_verifier_env *env, u32 off, |
808 | struct bpf_insn *insn) |
809 | { |
810 | struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv; |
811 | struct bpf_insn_aux_data *aux_data = env->insn_aux_data; |
812 | struct nfp_insn_meta *meta = nfp_prog->verifier_meta; |
813 | |
814 | meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx: aux_data[off].orig_idx); |
815 | nfp_prog->verifier_meta = meta; |
816 | |
817 | /* conditional jump to jump conversion */ |
818 | if (is_mbpf_cond_jump(meta) && |
819 | insn->code == (BPF_JMP | BPF_JA | BPF_K)) { |
820 | unsigned int tgt_off; |
821 | |
822 | tgt_off = off + insn->off + 1; |
823 | |
824 | if (!insn->off) { |
825 | meta->jmp_dst = list_next_entry(meta, l); |
826 | meta->jump_neg_op = false; |
827 | } else if (meta->jmp_dst->n != aux_data[tgt_off].orig_idx) { |
828 | pr_vlog(env, "branch hard wire at %d changes target %d -> %d\n" , |
829 | off, meta->jmp_dst->n, |
830 | aux_data[tgt_off].orig_idx); |
831 | return -EINVAL; |
832 | } |
833 | return 0; |
834 | } |
835 | |
836 | pr_vlog(env, "unsupported instruction replacement %hhx -> %hhx\n" , |
837 | meta->insn.code, insn->code); |
838 | return -EINVAL; |
839 | } |
840 | |
841 | int nfp_bpf_opt_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt) |
842 | { |
843 | struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv; |
844 | struct bpf_insn_aux_data *aux_data = env->insn_aux_data; |
845 | struct nfp_insn_meta *meta = nfp_prog->verifier_meta; |
846 | unsigned int i; |
847 | |
848 | meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx: aux_data[off].orig_idx); |
849 | |
850 | for (i = 0; i < cnt; i++) { |
851 | if (WARN_ON_ONCE(&meta->l == &nfp_prog->insns)) |
852 | return -EINVAL; |
853 | |
854 | /* doesn't count if it already has the flag */ |
855 | if (meta->flags & FLAG_INSN_SKIP_VERIFIER_OPT) |
856 | i--; |
857 | |
858 | meta->flags |= FLAG_INSN_SKIP_VERIFIER_OPT; |
859 | meta = list_next_entry(meta, l); |
860 | } |
861 | |
862 | return 0; |
863 | } |
864 | |