1 | /* |
2 | * Linux Socket Filter - Kernel level socket filtering |
3 | * |
4 | * Based on the design of the Berkeley Packet Filter. The new |
5 | * internal format has been designed by PLUMgrid: |
6 | * |
7 | * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com |
8 | * |
9 | * Authors: |
10 | * |
11 | * Jay Schulist <jschlst@samba.org> |
12 | * Alexei Starovoitov <ast@plumgrid.com> |
13 | * Daniel Borkmann <dborkman@redhat.com> |
14 | * |
15 | * This program is free software; you can redistribute it and/or |
16 | * modify it under the terms of the GNU General Public License |
17 | * as published by the Free Software Foundation; either version |
18 | * 2 of the License, or (at your option) any later version. |
19 | * |
20 | * Andi Kleen - Fix a few bad bugs and races. |
21 | * Kris Katterjohn - Added many additional checks in bpf_check_classic() |
22 | */ |
23 | |
24 | #include <linux/module.h> |
25 | #include <linux/types.h> |
26 | #include <linux/mm.h> |
27 | #include <linux/fcntl.h> |
28 | #include <linux/socket.h> |
29 | #include <linux/sock_diag.h> |
30 | #include <linux/in.h> |
31 | #include <linux/inet.h> |
32 | #include <linux/netdevice.h> |
33 | #include <linux/if_packet.h> |
34 | #include <linux/if_arp.h> |
35 | #include <linux/gfp.h> |
36 | #include <net/inet_common.h> |
37 | #include <net/ip.h> |
38 | #include <net/protocol.h> |
39 | #include <net/netlink.h> |
40 | #include <linux/skbuff.h> |
41 | #include <linux/skmsg.h> |
42 | #include <net/sock.h> |
43 | #include <net/flow_dissector.h> |
44 | #include <linux/errno.h> |
45 | #include <linux/timer.h> |
46 | #include <linux/uaccess.h> |
47 | #include <asm/unaligned.h> |
48 | #include <asm/cmpxchg.h> |
49 | #include <linux/filter.h> |
50 | #include <linux/ratelimit.h> |
51 | #include <linux/seccomp.h> |
52 | #include <linux/if_vlan.h> |
53 | #include <linux/bpf.h> |
54 | #include <net/sch_generic.h> |
55 | #include <net/cls_cgroup.h> |
56 | #include <net/dst_metadata.h> |
57 | #include <net/dst.h> |
58 | #include <net/sock_reuseport.h> |
59 | #include <net/busy_poll.h> |
60 | #include <net/tcp.h> |
61 | #include <net/xfrm.h> |
62 | #include <net/udp.h> |
63 | #include <linux/bpf_trace.h> |
64 | #include <net/xdp_sock.h> |
65 | #include <linux/inetdevice.h> |
66 | #include <net/inet_hashtables.h> |
67 | #include <net/inet6_hashtables.h> |
68 | #include <net/ip_fib.h> |
69 | #include <net/flow.h> |
70 | #include <net/arp.h> |
71 | #include <net/ipv6.h> |
72 | #include <net/net_namespace.h> |
73 | #include <linux/seg6_local.h> |
74 | #include <net/seg6.h> |
75 | #include <net/seg6_local.h> |
76 | #include <net/lwtunnel.h> |
77 | |
78 | /** |
79 | * sk_filter_trim_cap - run a packet through a socket filter |
80 | * @sk: sock associated with &sk_buff |
81 | * @skb: buffer to filter |
82 | * @cap: limit on how short the eBPF program may trim the packet |
83 | * |
84 | * Run the eBPF program and then cut skb->data to correct size returned by |
85 | * the program. If pkt_len is 0 we toss packet. If skb->len is smaller |
86 | * than pkt_len we keep whole skb->data. This is the socket level |
87 | * wrapper to BPF_PROG_RUN. It returns 0 if the packet should |
88 | * be accepted or -EPERM if the packet should be tossed. |
89 | * |
90 | */ |
91 | int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap) |
92 | { |
93 | int err; |
94 | struct sk_filter *filter; |
95 | |
96 | /* |
97 | * If the skb was allocated from pfmemalloc reserves, only |
98 | * allow SOCK_MEMALLOC sockets to use it as this socket is |
99 | * helping free memory |
100 | */ |
101 | if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) { |
102 | NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP); |
103 | return -ENOMEM; |
104 | } |
105 | err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb); |
106 | if (err) |
107 | return err; |
108 | |
109 | err = security_sock_rcv_skb(sk, skb); |
110 | if (err) |
111 | return err; |
112 | |
113 | rcu_read_lock(); |
114 | filter = rcu_dereference(sk->sk_filter); |
115 | if (filter) { |
116 | struct sock *save_sk = skb->sk; |
117 | unsigned int pkt_len; |
118 | |
119 | skb->sk = sk; |
120 | pkt_len = bpf_prog_run_save_cb(filter->prog, skb); |
121 | skb->sk = save_sk; |
122 | err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM; |
123 | } |
124 | rcu_read_unlock(); |
125 | |
126 | return err; |
127 | } |
128 | EXPORT_SYMBOL(sk_filter_trim_cap); |
129 | |
130 | BPF_CALL_1(bpf_skb_get_pay_offset, struct sk_buff *, skb) |
131 | { |
132 | return skb_get_poff(skb); |
133 | } |
134 | |
135 | BPF_CALL_3(bpf_skb_get_nlattr, struct sk_buff *, skb, u32, a, u32, x) |
136 | { |
137 | struct nlattr *nla; |
138 | |
139 | if (skb_is_nonlinear(skb)) |
140 | return 0; |
141 | |
142 | if (skb->len < sizeof(struct nlattr)) |
143 | return 0; |
144 | |
145 | if (a > skb->len - sizeof(struct nlattr)) |
146 | return 0; |
147 | |
148 | nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x); |
149 | if (nla) |
150 | return (void *) nla - (void *) skb->data; |
151 | |
152 | return 0; |
153 | } |
154 | |
155 | BPF_CALL_3(bpf_skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x) |
156 | { |
157 | struct nlattr *nla; |
158 | |
159 | if (skb_is_nonlinear(skb)) |
160 | return 0; |
161 | |
162 | if (skb->len < sizeof(struct nlattr)) |
163 | return 0; |
164 | |
165 | if (a > skb->len - sizeof(struct nlattr)) |
166 | return 0; |
167 | |
168 | nla = (struct nlattr *) &skb->data[a]; |
169 | if (nla->nla_len > skb->len - a) |
170 | return 0; |
171 | |
172 | nla = nla_find_nested(nla, x); |
173 | if (nla) |
174 | return (void *) nla - (void *) skb->data; |
175 | |
176 | return 0; |
177 | } |
178 | |
179 | BPF_CALL_4(bpf_skb_load_helper_8, const struct sk_buff *, skb, const void *, |
180 | data, int, headlen, int, offset) |
181 | { |
182 | u8 tmp, *ptr; |
183 | const int len = sizeof(tmp); |
184 | |
185 | if (offset >= 0) { |
186 | if (headlen - offset >= len) |
187 | return *(u8 *)(data + offset); |
188 | if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) |
189 | return tmp; |
190 | } else { |
191 | ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len); |
192 | if (likely(ptr)) |
193 | return *(u8 *)ptr; |
194 | } |
195 | |
196 | return -EFAULT; |
197 | } |
198 | |
199 | BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb, |
200 | int, offset) |
201 | { |
202 | return ____bpf_skb_load_helper_8(skb, skb->data, skb->len - skb->data_len, |
203 | offset); |
204 | } |
205 | |
206 | BPF_CALL_4(bpf_skb_load_helper_16, const struct sk_buff *, skb, const void *, |
207 | data, int, headlen, int, offset) |
208 | { |
209 | u16 tmp, *ptr; |
210 | const int len = sizeof(tmp); |
211 | |
212 | if (offset >= 0) { |
213 | if (headlen - offset >= len) |
214 | return get_unaligned_be16(data + offset); |
215 | if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) |
216 | return be16_to_cpu(tmp); |
217 | } else { |
218 | ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len); |
219 | if (likely(ptr)) |
220 | return get_unaligned_be16(ptr); |
221 | } |
222 | |
223 | return -EFAULT; |
224 | } |
225 | |
226 | BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb, |
227 | int, offset) |
228 | { |
229 | return ____bpf_skb_load_helper_16(skb, skb->data, skb->len - skb->data_len, |
230 | offset); |
231 | } |
232 | |
233 | BPF_CALL_4(bpf_skb_load_helper_32, const struct sk_buff *, skb, const void *, |
234 | data, int, headlen, int, offset) |
235 | { |
236 | u32 tmp, *ptr; |
237 | const int len = sizeof(tmp); |
238 | |
239 | if (likely(offset >= 0)) { |
240 | if (headlen - offset >= len) |
241 | return get_unaligned_be32(data + offset); |
242 | if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) |
243 | return be32_to_cpu(tmp); |
244 | } else { |
245 | ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len); |
246 | if (likely(ptr)) |
247 | return get_unaligned_be32(ptr); |
248 | } |
249 | |
250 | return -EFAULT; |
251 | } |
252 | |
253 | BPF_CALL_2(bpf_skb_load_helper_32_no_cache, const struct sk_buff *, skb, |
254 | int, offset) |
255 | { |
256 | return ____bpf_skb_load_helper_32(skb, skb->data, skb->len - skb->data_len, |
257 | offset); |
258 | } |
259 | |
260 | BPF_CALL_0(bpf_get_raw_cpu_id) |
261 | { |
262 | return raw_smp_processor_id(); |
263 | } |
264 | |
265 | static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = { |
266 | .func = bpf_get_raw_cpu_id, |
267 | .gpl_only = false, |
268 | .ret_type = RET_INTEGER, |
269 | }; |
270 | |
271 | static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg, |
272 | struct bpf_insn *insn_buf) |
273 | { |
274 | struct bpf_insn *insn = insn_buf; |
275 | |
276 | switch (skb_field) { |
277 | case SKF_AD_MARK: |
278 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); |
279 | |
280 | *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, |
281 | offsetof(struct sk_buff, mark)); |
282 | break; |
283 | |
284 | case SKF_AD_PKTTYPE: |
285 | *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET()); |
286 | *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX); |
287 | #ifdef __BIG_ENDIAN_BITFIELD |
288 | *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5); |
289 | #endif |
290 | break; |
291 | |
292 | case SKF_AD_QUEUE: |
293 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); |
294 | |
295 | *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, |
296 | offsetof(struct sk_buff, queue_mapping)); |
297 | break; |
298 | |
299 | case SKF_AD_VLAN_TAG: |
300 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); |
301 | |
302 | /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */ |
303 | *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, |
304 | offsetof(struct sk_buff, vlan_tci)); |
305 | break; |
306 | case SKF_AD_VLAN_TAG_PRESENT: |
307 | *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET()); |
308 | if (PKT_VLAN_PRESENT_BIT) |
309 | *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, PKT_VLAN_PRESENT_BIT); |
310 | if (PKT_VLAN_PRESENT_BIT < 7) |
311 | *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1); |
312 | break; |
313 | } |
314 | |
315 | return insn - insn_buf; |
316 | } |
317 | |
318 | static bool convert_bpf_extensions(struct sock_filter *fp, |
319 | struct bpf_insn **insnp) |
320 | { |
321 | struct bpf_insn *insn = *insnp; |
322 | u32 cnt; |
323 | |
324 | switch (fp->k) { |
325 | case SKF_AD_OFF + SKF_AD_PROTOCOL: |
326 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); |
327 | |
328 | /* A = *(u16 *) (CTX + offsetof(protocol)) */ |
329 | *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, |
330 | offsetof(struct sk_buff, protocol)); |
331 | /* A = ntohs(A) [emitting a nop or swap16] */ |
332 | *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16); |
333 | break; |
334 | |
335 | case SKF_AD_OFF + SKF_AD_PKTTYPE: |
336 | cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn); |
337 | insn += cnt - 1; |
338 | break; |
339 | |
340 | case SKF_AD_OFF + SKF_AD_IFINDEX: |
341 | case SKF_AD_OFF + SKF_AD_HATYPE: |
342 | BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); |
343 | BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2); |
344 | |
345 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev), |
346 | BPF_REG_TMP, BPF_REG_CTX, |
347 | offsetof(struct sk_buff, dev)); |
348 | /* if (tmp != 0) goto pc + 1 */ |
349 | *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1); |
350 | *insn++ = BPF_EXIT_INSN(); |
351 | if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX) |
352 | *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP, |
353 | offsetof(struct net_device, ifindex)); |
354 | else |
355 | *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP, |
356 | offsetof(struct net_device, type)); |
357 | break; |
358 | |
359 | case SKF_AD_OFF + SKF_AD_MARK: |
360 | cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn); |
361 | insn += cnt - 1; |
362 | break; |
363 | |
364 | case SKF_AD_OFF + SKF_AD_RXHASH: |
365 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); |
366 | |
367 | *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, |
368 | offsetof(struct sk_buff, hash)); |
369 | break; |
370 | |
371 | case SKF_AD_OFF + SKF_AD_QUEUE: |
372 | cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn); |
373 | insn += cnt - 1; |
374 | break; |
375 | |
376 | case SKF_AD_OFF + SKF_AD_VLAN_TAG: |
377 | cnt = convert_skb_access(SKF_AD_VLAN_TAG, |
378 | BPF_REG_A, BPF_REG_CTX, insn); |
379 | insn += cnt - 1; |
380 | break; |
381 | |
382 | case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT: |
383 | cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT, |
384 | BPF_REG_A, BPF_REG_CTX, insn); |
385 | insn += cnt - 1; |
386 | break; |
387 | |
388 | case SKF_AD_OFF + SKF_AD_VLAN_TPID: |
389 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2); |
390 | |
391 | /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */ |
392 | *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, |
393 | offsetof(struct sk_buff, vlan_proto)); |
394 | /* A = ntohs(A) [emitting a nop or swap16] */ |
395 | *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16); |
396 | break; |
397 | |
398 | case SKF_AD_OFF + SKF_AD_PAY_OFFSET: |
399 | case SKF_AD_OFF + SKF_AD_NLATTR: |
400 | case SKF_AD_OFF + SKF_AD_NLATTR_NEST: |
401 | case SKF_AD_OFF + SKF_AD_CPU: |
402 | case SKF_AD_OFF + SKF_AD_RANDOM: |
403 | /* arg1 = CTX */ |
404 | *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX); |
405 | /* arg2 = A */ |
406 | *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A); |
407 | /* arg3 = X */ |
408 | *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X); |
409 | /* Emit call(arg1=CTX, arg2=A, arg3=X) */ |
410 | switch (fp->k) { |
411 | case SKF_AD_OFF + SKF_AD_PAY_OFFSET: |
412 | *insn = BPF_EMIT_CALL(bpf_skb_get_pay_offset); |
413 | break; |
414 | case SKF_AD_OFF + SKF_AD_NLATTR: |
415 | *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr); |
416 | break; |
417 | case SKF_AD_OFF + SKF_AD_NLATTR_NEST: |
418 | *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr_nest); |
419 | break; |
420 | case SKF_AD_OFF + SKF_AD_CPU: |
421 | *insn = BPF_EMIT_CALL(bpf_get_raw_cpu_id); |
422 | break; |
423 | case SKF_AD_OFF + SKF_AD_RANDOM: |
424 | *insn = BPF_EMIT_CALL(bpf_user_rnd_u32); |
425 | bpf_user_rnd_init_once(); |
426 | break; |
427 | } |
428 | break; |
429 | |
430 | case SKF_AD_OFF + SKF_AD_ALU_XOR_X: |
431 | /* A ^= X */ |
432 | *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X); |
433 | break; |
434 | |
435 | default: |
436 | /* This is just a dummy call to avoid letting the compiler |
437 | * evict __bpf_call_base() as an optimization. Placed here |
438 | * where no-one bothers. |
439 | */ |
440 | BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0); |
441 | return false; |
442 | } |
443 | |
444 | *insnp = insn; |
445 | return true; |
446 | } |
447 | |
448 | static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp) |
449 | { |
450 | const bool unaligned_ok = IS_BUILTIN(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS); |
451 | int size = bpf_size_to_bytes(BPF_SIZE(fp->code)); |
452 | bool endian = BPF_SIZE(fp->code) == BPF_H || |
453 | BPF_SIZE(fp->code) == BPF_W; |
454 | bool indirect = BPF_MODE(fp->code) == BPF_IND; |
455 | const int ip_align = NET_IP_ALIGN; |
456 | struct bpf_insn *insn = *insnp; |
457 | int offset = fp->k; |
458 | |
459 | if (!indirect && |
460 | ((unaligned_ok && offset >= 0) || |
461 | (!unaligned_ok && offset >= 0 && |
462 | offset + ip_align >= 0 && |
463 | offset + ip_align % size == 0))) { |
464 | bool ldx_off_ok = offset <= S16_MAX; |
465 | |
466 | *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H); |
467 | if (offset) |
468 | *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset); |
469 | *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP, |
470 | size, 2 + endian + (!ldx_off_ok * 2)); |
471 | if (ldx_off_ok) { |
472 | *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, |
473 | BPF_REG_D, offset); |
474 | } else { |
475 | *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_D); |
476 | *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, offset); |
477 | *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, |
478 | BPF_REG_TMP, 0); |
479 | } |
480 | if (endian) |
481 | *insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8); |
482 | *insn++ = BPF_JMP_A(8); |
483 | } |
484 | |
485 | *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX); |
486 | *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_D); |
487 | *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_H); |
488 | if (!indirect) { |
489 | *insn++ = BPF_MOV64_IMM(BPF_REG_ARG4, offset); |
490 | } else { |
491 | *insn++ = BPF_MOV64_REG(BPF_REG_ARG4, BPF_REG_X); |
492 | if (fp->k) |
493 | *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG4, offset); |
494 | } |
495 | |
496 | switch (BPF_SIZE(fp->code)) { |
497 | case BPF_B: |
498 | *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8); |
499 | break; |
500 | case BPF_H: |
501 | *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16); |
502 | break; |
503 | case BPF_W: |
504 | *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32); |
505 | break; |
506 | default: |
507 | return false; |
508 | } |
509 | |
510 | *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_A, 0, 2); |
511 | *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); |
512 | *insn = BPF_EXIT_INSN(); |
513 | |
514 | *insnp = insn; |
515 | return true; |
516 | } |
517 | |
518 | /** |
519 | * bpf_convert_filter - convert filter program |
520 | * @prog: the user passed filter program |
521 | * @len: the length of the user passed filter program |
522 | * @new_prog: allocated 'struct bpf_prog' or NULL |
523 | * @new_len: pointer to store length of converted program |
524 | * @seen_ld_abs: bool whether we've seen ld_abs/ind |
525 | * |
526 | * Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn' |
527 | * style extended BPF (eBPF). |
528 | * Conversion workflow: |
529 | * |
530 | * 1) First pass for calculating the new program length: |
531 | * bpf_convert_filter(old_prog, old_len, NULL, &new_len, &seen_ld_abs) |
532 | * |
533 | * 2) 2nd pass to remap in two passes: 1st pass finds new |
534 | * jump offsets, 2nd pass remapping: |
535 | * bpf_convert_filter(old_prog, old_len, new_prog, &new_len, &seen_ld_abs) |
536 | */ |
537 | static int bpf_convert_filter(struct sock_filter *prog, int len, |
538 | struct bpf_prog *new_prog, int *new_len, |
539 | bool *seen_ld_abs) |
540 | { |
541 | int new_flen = 0, pass = 0, target, i, stack_off; |
542 | struct bpf_insn *new_insn, *first_insn = NULL; |
543 | struct sock_filter *fp; |
544 | int *addrs = NULL; |
545 | u8 bpf_src; |
546 | |
547 | BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK); |
548 | BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); |
549 | |
550 | if (len <= 0 || len > BPF_MAXINSNS) |
551 | return -EINVAL; |
552 | |
553 | if (new_prog) { |
554 | first_insn = new_prog->insnsi; |
555 | addrs = kcalloc(len, sizeof(*addrs), |
556 | GFP_KERNEL | __GFP_NOWARN); |
557 | if (!addrs) |
558 | return -ENOMEM; |
559 | } |
560 | |
561 | do_pass: |
562 | new_insn = first_insn; |
563 | fp = prog; |
564 | |
565 | /* Classic BPF related prologue emission. */ |
566 | if (new_prog) { |
567 | /* Classic BPF expects A and X to be reset first. These need |
568 | * to be guaranteed to be the first two instructions. |
569 | */ |
570 | *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); |
571 | *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_X, BPF_REG_X); |
572 | |
573 | /* All programs must keep CTX in callee saved BPF_REG_CTX. |
574 | * In eBPF case it's done by the compiler, here we need to |
575 | * do this ourself. Initial CTX is present in BPF_REG_ARG1. |
576 | */ |
577 | *new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1); |
578 | if (*seen_ld_abs) { |
579 | /* For packet access in classic BPF, cache skb->data |
580 | * in callee-saved BPF R8 and skb->len - skb->data_len |
581 | * (headlen) in BPF R9. Since classic BPF is read-only |
582 | * on CTX, we only need to cache it once. |
583 | */ |
584 | *new_insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data), |
585 | BPF_REG_D, BPF_REG_CTX, |
586 | offsetof(struct sk_buff, data)); |
587 | *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_H, BPF_REG_CTX, |
588 | offsetof(struct sk_buff, len)); |
589 | *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_TMP, BPF_REG_CTX, |
590 | offsetof(struct sk_buff, data_len)); |
591 | *new_insn++ = BPF_ALU32_REG(BPF_SUB, BPF_REG_H, BPF_REG_TMP); |
592 | } |
593 | } else { |
594 | new_insn += 3; |
595 | } |
596 | |
597 | for (i = 0; i < len; fp++, i++) { |
598 | struct bpf_insn tmp_insns[32] = { }; |
599 | struct bpf_insn *insn = tmp_insns; |
600 | |
601 | if (addrs) |
602 | addrs[i] = new_insn - first_insn; |
603 | |
604 | switch (fp->code) { |
605 | /* All arithmetic insns and skb loads map as-is. */ |
606 | case BPF_ALU | BPF_ADD | BPF_X: |
607 | case BPF_ALU | BPF_ADD | BPF_K: |
608 | case BPF_ALU | BPF_SUB | BPF_X: |
609 | case BPF_ALU | BPF_SUB | BPF_K: |
610 | case BPF_ALU | BPF_AND | BPF_X: |
611 | case BPF_ALU | BPF_AND | BPF_K: |
612 | case BPF_ALU | BPF_OR | BPF_X: |
613 | case BPF_ALU | BPF_OR | BPF_K: |
614 | case BPF_ALU | BPF_LSH | BPF_X: |
615 | case BPF_ALU | BPF_LSH | BPF_K: |
616 | case BPF_ALU | BPF_RSH | BPF_X: |
617 | case BPF_ALU | BPF_RSH | BPF_K: |
618 | case BPF_ALU | BPF_XOR | BPF_X: |
619 | case BPF_ALU | BPF_XOR | BPF_K: |
620 | case BPF_ALU | BPF_MUL | BPF_X: |
621 | case BPF_ALU | BPF_MUL | BPF_K: |
622 | case BPF_ALU | BPF_DIV | BPF_X: |
623 | case BPF_ALU | BPF_DIV | BPF_K: |
624 | case BPF_ALU | BPF_MOD | BPF_X: |
625 | case BPF_ALU | BPF_MOD | BPF_K: |
626 | case BPF_ALU | BPF_NEG: |
627 | case BPF_LD | BPF_ABS | BPF_W: |
628 | case BPF_LD | BPF_ABS | BPF_H: |
629 | case BPF_LD | BPF_ABS | BPF_B: |
630 | case BPF_LD | BPF_IND | BPF_W: |
631 | case BPF_LD | BPF_IND | BPF_H: |
632 | case BPF_LD | BPF_IND | BPF_B: |
633 | /* Check for overloaded BPF extension and |
634 | * directly convert it if found, otherwise |
635 | * just move on with mapping. |
636 | */ |
637 | if (BPF_CLASS(fp->code) == BPF_LD && |
638 | BPF_MODE(fp->code) == BPF_ABS && |
639 | convert_bpf_extensions(fp, &insn)) |
640 | break; |
641 | if (BPF_CLASS(fp->code) == BPF_LD && |
642 | convert_bpf_ld_abs(fp, &insn)) { |
643 | *seen_ld_abs = true; |
644 | break; |
645 | } |
646 | |
647 | if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) || |
648 | fp->code == (BPF_ALU | BPF_MOD | BPF_X)) { |
649 | *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X); |
650 | /* Error with exception code on div/mod by 0. |
651 | * For cBPF programs, this was always return 0. |
652 | */ |
653 | *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_X, 0, 2); |
654 | *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); |
655 | *insn++ = BPF_EXIT_INSN(); |
656 | } |
657 | |
658 | *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k); |
659 | break; |
660 | |
661 | /* Jump transformation cannot use BPF block macros |
662 | * everywhere as offset calculation and target updates |
663 | * require a bit more work than the rest, i.e. jump |
664 | * opcodes map as-is, but offsets need adjustment. |
665 | */ |
666 | |
667 | #define BPF_EMIT_JMP \ |
668 | do { \ |
669 | const s32 off_min = S16_MIN, off_max = S16_MAX; \ |
670 | s32 off; \ |
671 | \ |
672 | if (target >= len || target < 0) \ |
673 | goto err; \ |
674 | off = addrs ? addrs[target] - addrs[i] - 1 : 0; \ |
675 | /* Adjust pc relative offset for 2nd or 3rd insn. */ \ |
676 | off -= insn - tmp_insns; \ |
677 | /* Reject anything not fitting into insn->off. */ \ |
678 | if (off < off_min || off > off_max) \ |
679 | goto err; \ |
680 | insn->off = off; \ |
681 | } while (0) |
682 | |
683 | case BPF_JMP | BPF_JA: |
684 | target = i + fp->k + 1; |
685 | insn->code = fp->code; |
686 | BPF_EMIT_JMP; |
687 | break; |
688 | |
689 | case BPF_JMP | BPF_JEQ | BPF_K: |
690 | case BPF_JMP | BPF_JEQ | BPF_X: |
691 | case BPF_JMP | BPF_JSET | BPF_K: |
692 | case BPF_JMP | BPF_JSET | BPF_X: |
693 | case BPF_JMP | BPF_JGT | BPF_K: |
694 | case BPF_JMP | BPF_JGT | BPF_X: |
695 | case BPF_JMP | BPF_JGE | BPF_K: |
696 | case BPF_JMP | BPF_JGE | BPF_X: |
697 | if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) { |
698 | /* BPF immediates are signed, zero extend |
699 | * immediate into tmp register and use it |
700 | * in compare insn. |
701 | */ |
702 | *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k); |
703 | |
704 | insn->dst_reg = BPF_REG_A; |
705 | insn->src_reg = BPF_REG_TMP; |
706 | bpf_src = BPF_X; |
707 | } else { |
708 | insn->dst_reg = BPF_REG_A; |
709 | insn->imm = fp->k; |
710 | bpf_src = BPF_SRC(fp->code); |
711 | insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0; |
712 | } |
713 | |
714 | /* Common case where 'jump_false' is next insn. */ |
715 | if (fp->jf == 0) { |
716 | insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; |
717 | target = i + fp->jt + 1; |
718 | BPF_EMIT_JMP; |
719 | break; |
720 | } |
721 | |
722 | /* Convert some jumps when 'jump_true' is next insn. */ |
723 | if (fp->jt == 0) { |
724 | switch (BPF_OP(fp->code)) { |
725 | case BPF_JEQ: |
726 | insn->code = BPF_JMP | BPF_JNE | bpf_src; |
727 | break; |
728 | case BPF_JGT: |
729 | insn->code = BPF_JMP | BPF_JLE | bpf_src; |
730 | break; |
731 | case BPF_JGE: |
732 | insn->code = BPF_JMP | BPF_JLT | bpf_src; |
733 | break; |
734 | default: |
735 | goto jmp_rest; |
736 | } |
737 | |
738 | target = i + fp->jf + 1; |
739 | BPF_EMIT_JMP; |
740 | break; |
741 | } |
742 | jmp_rest: |
743 | /* Other jumps are mapped into two insns: Jxx and JA. */ |
744 | target = i + fp->jt + 1; |
745 | insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; |
746 | BPF_EMIT_JMP; |
747 | insn++; |
748 | |
749 | insn->code = BPF_JMP | BPF_JA; |
750 | target = i + fp->jf + 1; |
751 | BPF_EMIT_JMP; |
752 | break; |
753 | |
754 | /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */ |
755 | case BPF_LDX | BPF_MSH | BPF_B: { |
756 | struct sock_filter tmp = { |
757 | .code = BPF_LD | BPF_ABS | BPF_B, |
758 | .k = fp->k, |
759 | }; |
760 | |
761 | *seen_ld_abs = true; |
762 | |
763 | /* X = A */ |
764 | *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); |
765 | /* A = BPF_R0 = *(u8 *) (skb->data + K) */ |
766 | convert_bpf_ld_abs(&tmp, &insn); |
767 | insn++; |
768 | /* A &= 0xf */ |
769 | *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf); |
770 | /* A <<= 2 */ |
771 | *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2); |
772 | /* tmp = X */ |
773 | *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_X); |
774 | /* X = A */ |
775 | *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); |
776 | /* A = tmp */ |
777 | *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP); |
778 | break; |
779 | } |
780 | /* RET_K is remaped into 2 insns. RET_A case doesn't need an |
781 | * extra mov as BPF_REG_0 is already mapped into BPF_REG_A. |
782 | */ |
783 | case BPF_RET | BPF_A: |
784 | case BPF_RET | BPF_K: |
785 | if (BPF_RVAL(fp->code) == BPF_K) |
786 | *insn++ = BPF_MOV32_RAW(BPF_K, BPF_REG_0, |
787 | 0, fp->k); |
788 | *insn = BPF_EXIT_INSN(); |
789 | break; |
790 | |
791 | /* Store to stack. */ |
792 | case BPF_ST: |
793 | case BPF_STX: |
794 | stack_off = fp->k * 4 + 4; |
795 | *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) == |
796 | BPF_ST ? BPF_REG_A : BPF_REG_X, |
797 | -stack_off); |
798 | /* check_load_and_stores() verifies that classic BPF can |
799 | * load from stack only after write, so tracking |
800 | * stack_depth for ST|STX insns is enough |
801 | */ |
802 | if (new_prog && new_prog->aux->stack_depth < stack_off) |
803 | new_prog->aux->stack_depth = stack_off; |
804 | break; |
805 | |
806 | /* Load from stack. */ |
807 | case BPF_LD | BPF_MEM: |
808 | case BPF_LDX | BPF_MEM: |
809 | stack_off = fp->k * 4 + 4; |
810 | *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ? |
811 | BPF_REG_A : BPF_REG_X, BPF_REG_FP, |
812 | -stack_off); |
813 | break; |
814 | |
815 | /* A = K or X = K */ |
816 | case BPF_LD | BPF_IMM: |
817 | case BPF_LDX | BPF_IMM: |
818 | *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ? |
819 | BPF_REG_A : BPF_REG_X, fp->k); |
820 | break; |
821 | |
822 | /* X = A */ |
823 | case BPF_MISC | BPF_TAX: |
824 | *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); |
825 | break; |
826 | |
827 | /* A = X */ |
828 | case BPF_MISC | BPF_TXA: |
829 | *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X); |
830 | break; |
831 | |
832 | /* A = skb->len or X = skb->len */ |
833 | case BPF_LD | BPF_W | BPF_LEN: |
834 | case BPF_LDX | BPF_W | BPF_LEN: |
835 | *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ? |
836 | BPF_REG_A : BPF_REG_X, BPF_REG_CTX, |
837 | offsetof(struct sk_buff, len)); |
838 | break; |
839 | |
840 | /* Access seccomp_data fields. */ |
841 | case BPF_LDX | BPF_ABS | BPF_W: |
842 | /* A = *(u32 *) (ctx + K) */ |
843 | *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k); |
844 | break; |
845 | |
846 | /* Unknown instruction. */ |
847 | default: |
848 | goto err; |
849 | } |
850 | |
851 | insn++; |
852 | if (new_prog) |
853 | memcpy(new_insn, tmp_insns, |
854 | sizeof(*insn) * (insn - tmp_insns)); |
855 | new_insn += insn - tmp_insns; |
856 | } |
857 | |
858 | if (!new_prog) { |
859 | /* Only calculating new length. */ |
860 | *new_len = new_insn - first_insn; |
861 | if (*seen_ld_abs) |
862 | *new_len += 4; /* Prologue bits. */ |
863 | return 0; |
864 | } |
865 | |
866 | pass++; |
867 | if (new_flen != new_insn - first_insn) { |
868 | new_flen = new_insn - first_insn; |
869 | if (pass > 2) |
870 | goto err; |
871 | goto do_pass; |
872 | } |
873 | |
874 | kfree(addrs); |
875 | BUG_ON(*new_len != new_flen); |
876 | return 0; |
877 | err: |
878 | kfree(addrs); |
879 | return -EINVAL; |
880 | } |
881 | |
882 | /* Security: |
883 | * |
884 | * As we dont want to clear mem[] array for each packet going through |
885 | * __bpf_prog_run(), we check that filter loaded by user never try to read |
886 | * a cell if not previously written, and we check all branches to be sure |
887 | * a malicious user doesn't try to abuse us. |
888 | */ |
889 | static int check_load_and_stores(const struct sock_filter *filter, int flen) |
890 | { |
891 | u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */ |
892 | int pc, ret = 0; |
893 | |
894 | BUILD_BUG_ON(BPF_MEMWORDS > 16); |
895 | |
896 | masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL); |
897 | if (!masks) |
898 | return -ENOMEM; |
899 | |
900 | memset(masks, 0xff, flen * sizeof(*masks)); |
901 | |
902 | for (pc = 0; pc < flen; pc++) { |
903 | memvalid &= masks[pc]; |
904 | |
905 | switch (filter[pc].code) { |
906 | case BPF_ST: |
907 | case BPF_STX: |
908 | memvalid |= (1 << filter[pc].k); |
909 | break; |
910 | case BPF_LD | BPF_MEM: |
911 | case BPF_LDX | BPF_MEM: |
912 | if (!(memvalid & (1 << filter[pc].k))) { |
913 | ret = -EINVAL; |
914 | goto error; |
915 | } |
916 | break; |
917 | case BPF_JMP | BPF_JA: |
918 | /* A jump must set masks on target */ |
919 | masks[pc + 1 + filter[pc].k] &= memvalid; |
920 | memvalid = ~0; |
921 | break; |
922 | case BPF_JMP | BPF_JEQ | BPF_K: |
923 | case BPF_JMP | BPF_JEQ | BPF_X: |
924 | case BPF_JMP | BPF_JGE | BPF_K: |
925 | case BPF_JMP | BPF_JGE | BPF_X: |
926 | case BPF_JMP | BPF_JGT | BPF_K: |
927 | case BPF_JMP | BPF_JGT | BPF_X: |
928 | case BPF_JMP | BPF_JSET | BPF_K: |
929 | case BPF_JMP | BPF_JSET | BPF_X: |
930 | /* A jump must set masks on targets */ |
931 | masks[pc + 1 + filter[pc].jt] &= memvalid; |
932 | masks[pc + 1 + filter[pc].jf] &= memvalid; |
933 | memvalid = ~0; |
934 | break; |
935 | } |
936 | } |
937 | error: |
938 | kfree(masks); |
939 | return ret; |
940 | } |
941 | |
942 | static bool chk_code_allowed(u16 code_to_probe) |
943 | { |
944 | static const bool codes[] = { |
945 | /* 32 bit ALU operations */ |
946 | [BPF_ALU | BPF_ADD | BPF_K] = true, |
947 | [BPF_ALU | BPF_ADD | BPF_X] = true, |
948 | [BPF_ALU | BPF_SUB | BPF_K] = true, |
949 | [BPF_ALU | BPF_SUB | BPF_X] = true, |
950 | [BPF_ALU | BPF_MUL | BPF_K] = true, |
951 | [BPF_ALU | BPF_MUL | BPF_X] = true, |
952 | [BPF_ALU | BPF_DIV | BPF_K] = true, |
953 | [BPF_ALU | BPF_DIV | BPF_X] = true, |
954 | [BPF_ALU | BPF_MOD | BPF_K] = true, |
955 | [BPF_ALU | BPF_MOD | BPF_X] = true, |
956 | [BPF_ALU | BPF_AND | BPF_K] = true, |
957 | [BPF_ALU | BPF_AND | BPF_X] = true, |
958 | [BPF_ALU | BPF_OR | BPF_K] = true, |
959 | [BPF_ALU | BPF_OR | BPF_X] = true, |
960 | [BPF_ALU | BPF_XOR | BPF_K] = true, |
961 | [BPF_ALU | BPF_XOR | BPF_X] = true, |
962 | [BPF_ALU | BPF_LSH | BPF_K] = true, |
963 | [BPF_ALU | BPF_LSH | BPF_X] = true, |
964 | [BPF_ALU | BPF_RSH | BPF_K] = true, |
965 | [BPF_ALU | BPF_RSH | BPF_X] = true, |
966 | [BPF_ALU | BPF_NEG] = true, |
967 | /* Load instructions */ |
968 | [BPF_LD | BPF_W | BPF_ABS] = true, |
969 | [BPF_LD | BPF_H | BPF_ABS] = true, |
970 | [BPF_LD | BPF_B | BPF_ABS] = true, |
971 | [BPF_LD | BPF_W | BPF_LEN] = true, |
972 | [BPF_LD | BPF_W | BPF_IND] = true, |
973 | [BPF_LD | BPF_H | BPF_IND] = true, |
974 | [BPF_LD | BPF_B | BPF_IND] = true, |
975 | [BPF_LD | BPF_IMM] = true, |
976 | [BPF_LD | BPF_MEM] = true, |
977 | [BPF_LDX | BPF_W | BPF_LEN] = true, |
978 | [BPF_LDX | BPF_B | BPF_MSH] = true, |
979 | [BPF_LDX | BPF_IMM] = true, |
980 | [BPF_LDX | BPF_MEM] = true, |
981 | /* Store instructions */ |
982 | [BPF_ST] = true, |
983 | [BPF_STX] = true, |
984 | /* Misc instructions */ |
985 | [BPF_MISC | BPF_TAX] = true, |
986 | [BPF_MISC | BPF_TXA] = true, |
987 | /* Return instructions */ |
988 | [BPF_RET | BPF_K] = true, |
989 | [BPF_RET | BPF_A] = true, |
990 | /* Jump instructions */ |
991 | [BPF_JMP | BPF_JA] = true, |
992 | [BPF_JMP | BPF_JEQ | BPF_K] = true, |
993 | [BPF_JMP | BPF_JEQ | BPF_X] = true, |
994 | [BPF_JMP | BPF_JGE | BPF_K] = true, |
995 | [BPF_JMP | BPF_JGE | BPF_X] = true, |
996 | [BPF_JMP | BPF_JGT | BPF_K] = true, |
997 | [BPF_JMP | BPF_JGT | BPF_X] = true, |
998 | [BPF_JMP | BPF_JSET | BPF_K] = true, |
999 | [BPF_JMP | BPF_JSET | BPF_X] = true, |
1000 | }; |
1001 | |
1002 | if (code_to_probe >= ARRAY_SIZE(codes)) |
1003 | return false; |
1004 | |
1005 | return codes[code_to_probe]; |
1006 | } |
1007 | |
1008 | static bool bpf_check_basics_ok(const struct sock_filter *filter, |
1009 | unsigned int flen) |
1010 | { |
1011 | if (filter == NULL) |
1012 | return false; |
1013 | if (flen == 0 || flen > BPF_MAXINSNS) |
1014 | return false; |
1015 | |
1016 | return true; |
1017 | } |
1018 | |
1019 | /** |
1020 | * bpf_check_classic - verify socket filter code |
1021 | * @filter: filter to verify |
1022 | * @flen: length of filter |
1023 | * |
1024 | * Check the user's filter code. If we let some ugly |
1025 | * filter code slip through kaboom! The filter must contain |
1026 | * no references or jumps that are out of range, no illegal |
1027 | * instructions, and must end with a RET instruction. |
1028 | * |
1029 | * All jumps are forward as they are not signed. |
1030 | * |
1031 | * Returns 0 if the rule set is legal or -EINVAL if not. |
1032 | */ |
1033 | static int bpf_check_classic(const struct sock_filter *filter, |
1034 | unsigned int flen) |
1035 | { |
1036 | bool anc_found; |
1037 | int pc; |
1038 | |
1039 | /* Check the filter code now */ |
1040 | for (pc = 0; pc < flen; pc++) { |
1041 | const struct sock_filter *ftest = &filter[pc]; |
1042 | |
1043 | /* May we actually operate on this code? */ |
1044 | if (!chk_code_allowed(ftest->code)) |
1045 | return -EINVAL; |
1046 | |
1047 | /* Some instructions need special checks */ |
1048 | switch (ftest->code) { |
1049 | case BPF_ALU | BPF_DIV | BPF_K: |
1050 | case BPF_ALU | BPF_MOD | BPF_K: |
1051 | /* Check for division by zero */ |
1052 | if (ftest->k == 0) |
1053 | return -EINVAL; |
1054 | break; |
1055 | case BPF_ALU | BPF_LSH | BPF_K: |
1056 | case BPF_ALU | BPF_RSH | BPF_K: |
1057 | if (ftest->k >= 32) |
1058 | return -EINVAL; |
1059 | break; |
1060 | case BPF_LD | BPF_MEM: |
1061 | case BPF_LDX | BPF_MEM: |
1062 | case BPF_ST: |
1063 | case BPF_STX: |
1064 | /* Check for invalid memory addresses */ |
1065 | if (ftest->k >= BPF_MEMWORDS) |
1066 | return -EINVAL; |
1067 | break; |
1068 | case BPF_JMP | BPF_JA: |
1069 | /* Note, the large ftest->k might cause loops. |
1070 | * Compare this with conditional jumps below, |
1071 | * where offsets are limited. --ANK (981016) |
1072 | */ |
1073 | if (ftest->k >= (unsigned int)(flen - pc - 1)) |
1074 | return -EINVAL; |
1075 | break; |
1076 | case BPF_JMP | BPF_JEQ | BPF_K: |
1077 | case BPF_JMP | BPF_JEQ | BPF_X: |
1078 | case BPF_JMP | BPF_JGE | BPF_K: |
1079 | case BPF_JMP | BPF_JGE | BPF_X: |
1080 | case BPF_JMP | BPF_JGT | BPF_K: |
1081 | case BPF_JMP | BPF_JGT | BPF_X: |
1082 | case BPF_JMP | BPF_JSET | BPF_K: |
1083 | case BPF_JMP | BPF_JSET | BPF_X: |
1084 | /* Both conditionals must be safe */ |
1085 | if (pc + ftest->jt + 1 >= flen || |
1086 | pc + ftest->jf + 1 >= flen) |
1087 | return -EINVAL; |
1088 | break; |
1089 | case BPF_LD | BPF_W | BPF_ABS: |
1090 | case BPF_LD | BPF_H | BPF_ABS: |
1091 | case BPF_LD | BPF_B | BPF_ABS: |
1092 | anc_found = false; |
1093 | if (bpf_anc_helper(ftest) & BPF_ANC) |
1094 | anc_found = true; |
1095 | /* Ancillary operation unknown or unsupported */ |
1096 | if (anc_found == false && ftest->k >= SKF_AD_OFF) |
1097 | return -EINVAL; |
1098 | } |
1099 | } |
1100 | |
1101 | /* Last instruction must be a RET code */ |
1102 | switch (filter[flen - 1].code) { |
1103 | case BPF_RET | BPF_K: |
1104 | case BPF_RET | BPF_A: |
1105 | return check_load_and_stores(filter, flen); |
1106 | } |
1107 | |
1108 | return -EINVAL; |
1109 | } |
1110 | |
1111 | static int bpf_prog_store_orig_filter(struct bpf_prog *fp, |
1112 | const struct sock_fprog *fprog) |
1113 | { |
1114 | unsigned int fsize = bpf_classic_proglen(fprog); |
1115 | struct sock_fprog_kern *fkprog; |
1116 | |
1117 | fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL); |
1118 | if (!fp->orig_prog) |
1119 | return -ENOMEM; |
1120 | |
1121 | fkprog = fp->orig_prog; |
1122 | fkprog->len = fprog->len; |
1123 | |
1124 | fkprog->filter = kmemdup(fp->insns, fsize, |
1125 | GFP_KERNEL | __GFP_NOWARN); |
1126 | if (!fkprog->filter) { |
1127 | kfree(fp->orig_prog); |
1128 | return -ENOMEM; |
1129 | } |
1130 | |
1131 | return 0; |
1132 | } |
1133 | |
1134 | static void bpf_release_orig_filter(struct bpf_prog *fp) |
1135 | { |
1136 | struct sock_fprog_kern *fprog = fp->orig_prog; |
1137 | |
1138 | if (fprog) { |
1139 | kfree(fprog->filter); |
1140 | kfree(fprog); |
1141 | } |
1142 | } |
1143 | |
1144 | static void __bpf_prog_release(struct bpf_prog *prog) |
1145 | { |
1146 | if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) { |
1147 | bpf_prog_put(prog); |
1148 | } else { |
1149 | bpf_release_orig_filter(prog); |
1150 | bpf_prog_free(prog); |
1151 | } |
1152 | } |
1153 | |
1154 | static void __sk_filter_release(struct sk_filter *fp) |
1155 | { |
1156 | __bpf_prog_release(fp->prog); |
1157 | kfree(fp); |
1158 | } |
1159 | |
1160 | /** |
1161 | * sk_filter_release_rcu - Release a socket filter by rcu_head |
1162 | * @rcu: rcu_head that contains the sk_filter to free |
1163 | */ |
1164 | static void sk_filter_release_rcu(struct rcu_head *rcu) |
1165 | { |
1166 | struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); |
1167 | |
1168 | __sk_filter_release(fp); |
1169 | } |
1170 | |
1171 | /** |
1172 | * sk_filter_release - release a socket filter |
1173 | * @fp: filter to remove |
1174 | * |
1175 | * Remove a filter from a socket and release its resources. |
1176 | */ |
1177 | static void sk_filter_release(struct sk_filter *fp) |
1178 | { |
1179 | if (refcount_dec_and_test(&fp->refcnt)) |
1180 | call_rcu(&fp->rcu, sk_filter_release_rcu); |
1181 | } |
1182 | |
1183 | void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) |
1184 | { |
1185 | u32 filter_size = bpf_prog_size(fp->prog->len); |
1186 | |
1187 | atomic_sub(filter_size, &sk->sk_omem_alloc); |
1188 | sk_filter_release(fp); |
1189 | } |
1190 | |
1191 | /* try to charge the socket memory if there is space available |
1192 | * return true on success |
1193 | */ |
1194 | static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp) |
1195 | { |
1196 | u32 filter_size = bpf_prog_size(fp->prog->len); |
1197 | |
1198 | /* same check as in sock_kmalloc() */ |
1199 | if (filter_size <= sysctl_optmem_max && |
1200 | atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) { |
1201 | atomic_add(filter_size, &sk->sk_omem_alloc); |
1202 | return true; |
1203 | } |
1204 | return false; |
1205 | } |
1206 | |
1207 | bool sk_filter_charge(struct sock *sk, struct sk_filter *fp) |
1208 | { |
1209 | if (!refcount_inc_not_zero(&fp->refcnt)) |
1210 | return false; |
1211 | |
1212 | if (!__sk_filter_charge(sk, fp)) { |
1213 | sk_filter_release(fp); |
1214 | return false; |
1215 | } |
1216 | return true; |
1217 | } |
1218 | |
1219 | static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp) |
1220 | { |
1221 | struct sock_filter *old_prog; |
1222 | struct bpf_prog *old_fp; |
1223 | int err, new_len, old_len = fp->len; |
1224 | bool seen_ld_abs = false; |
1225 | |
1226 | /* We are free to overwrite insns et al right here as it |
1227 | * won't be used at this point in time anymore internally |
1228 | * after the migration to the internal BPF instruction |
1229 | * representation. |
1230 | */ |
1231 | BUILD_BUG_ON(sizeof(struct sock_filter) != |
1232 | sizeof(struct bpf_insn)); |
1233 | |
1234 | /* Conversion cannot happen on overlapping memory areas, |
1235 | * so we need to keep the user BPF around until the 2nd |
1236 | * pass. At this time, the user BPF is stored in fp->insns. |
1237 | */ |
1238 | old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter), |
1239 | GFP_KERNEL | __GFP_NOWARN); |
1240 | if (!old_prog) { |
1241 | err = -ENOMEM; |
1242 | goto out_err; |
1243 | } |
1244 | |
1245 | /* 1st pass: calculate the new program length. */ |
1246 | err = bpf_convert_filter(old_prog, old_len, NULL, &new_len, |
1247 | &seen_ld_abs); |
1248 | if (err) |
1249 | goto out_err_free; |
1250 | |
1251 | /* Expand fp for appending the new filter representation. */ |
1252 | old_fp = fp; |
1253 | fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0); |
1254 | if (!fp) { |
1255 | /* The old_fp is still around in case we couldn't |
1256 | * allocate new memory, so uncharge on that one. |
1257 | */ |
1258 | fp = old_fp; |
1259 | err = -ENOMEM; |
1260 | goto out_err_free; |
1261 | } |
1262 | |
1263 | fp->len = new_len; |
1264 | |
1265 | /* 2nd pass: remap sock_filter insns into bpf_insn insns. */ |
1266 | err = bpf_convert_filter(old_prog, old_len, fp, &new_len, |
1267 | &seen_ld_abs); |
1268 | if (err) |
1269 | /* 2nd bpf_convert_filter() can fail only if it fails |
1270 | * to allocate memory, remapping must succeed. Note, |
1271 | * that at this time old_fp has already been released |
1272 | * by krealloc(). |
1273 | */ |
1274 | goto out_err_free; |
1275 | |
1276 | fp = bpf_prog_select_runtime(fp, &err); |
1277 | if (err) |
1278 | goto out_err_free; |
1279 | |
1280 | kfree(old_prog); |
1281 | return fp; |
1282 | |
1283 | out_err_free: |
1284 | kfree(old_prog); |
1285 | out_err: |
1286 | __bpf_prog_release(fp); |
1287 | return ERR_PTR(err); |
1288 | } |
1289 | |
1290 | static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp, |
1291 | bpf_aux_classic_check_t trans) |
1292 | { |
1293 | int err; |
1294 | |
1295 | fp->bpf_func = NULL; |
1296 | fp->jited = 0; |
1297 | |
1298 | err = bpf_check_classic(fp->insns, fp->len); |
1299 | if (err) { |
1300 | __bpf_prog_release(fp); |
1301 | return ERR_PTR(err); |
1302 | } |
1303 | |
1304 | /* There might be additional checks and transformations |
1305 | * needed on classic filters, f.e. in case of seccomp. |
1306 | */ |
1307 | if (trans) { |
1308 | err = trans(fp->insns, fp->len); |
1309 | if (err) { |
1310 | __bpf_prog_release(fp); |
1311 | return ERR_PTR(err); |
1312 | } |
1313 | } |
1314 | |
1315 | /* Probe if we can JIT compile the filter and if so, do |
1316 | * the compilation of the filter. |
1317 | */ |
1318 | bpf_jit_compile(fp); |
1319 | |
1320 | /* JIT compiler couldn't process this filter, so do the |
1321 | * internal BPF translation for the optimized interpreter. |
1322 | */ |
1323 | if (!fp->jited) |
1324 | fp = bpf_migrate_filter(fp); |
1325 | |
1326 | return fp; |
1327 | } |
1328 | |
1329 | /** |
1330 | * bpf_prog_create - create an unattached filter |
1331 | * @pfp: the unattached filter that is created |
1332 | * @fprog: the filter program |
1333 | * |
1334 | * Create a filter independent of any socket. We first run some |
1335 | * sanity checks on it to make sure it does not explode on us later. |
1336 | * If an error occurs or there is insufficient memory for the filter |
1337 | * a negative errno code is returned. On success the return is zero. |
1338 | */ |
1339 | int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog) |
1340 | { |
1341 | unsigned int fsize = bpf_classic_proglen(fprog); |
1342 | struct bpf_prog *fp; |
1343 | |
1344 | /* Make sure new filter is there and in the right amounts. */ |
1345 | if (!bpf_check_basics_ok(fprog->filter, fprog->len)) |
1346 | return -EINVAL; |
1347 | |
1348 | fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); |
1349 | if (!fp) |
1350 | return -ENOMEM; |
1351 | |
1352 | memcpy(fp->insns, fprog->filter, fsize); |
1353 | |
1354 | fp->len = fprog->len; |
1355 | /* Since unattached filters are not copied back to user |
1356 | * space through sk_get_filter(), we do not need to hold |
1357 | * a copy here, and can spare us the work. |
1358 | */ |
1359 | fp->orig_prog = NULL; |
1360 | |
1361 | /* bpf_prepare_filter() already takes care of freeing |
1362 | * memory in case something goes wrong. |
1363 | */ |
1364 | fp = bpf_prepare_filter(fp, NULL); |
1365 | if (IS_ERR(fp)) |
1366 | return PTR_ERR(fp); |
1367 | |
1368 | *pfp = fp; |
1369 | return 0; |
1370 | } |
1371 | EXPORT_SYMBOL_GPL(bpf_prog_create); |
1372 | |
1373 | /** |
1374 | * bpf_prog_create_from_user - create an unattached filter from user buffer |
1375 | * @pfp: the unattached filter that is created |
1376 | * @fprog: the filter program |
1377 | * @trans: post-classic verifier transformation handler |
1378 | * @save_orig: save classic BPF program |
1379 | * |
1380 | * This function effectively does the same as bpf_prog_create(), only |
1381 | * that it builds up its insns buffer from user space provided buffer. |
1382 | * It also allows for passing a bpf_aux_classic_check_t handler. |
1383 | */ |
1384 | int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, |
1385 | bpf_aux_classic_check_t trans, bool save_orig) |
1386 | { |
1387 | unsigned int fsize = bpf_classic_proglen(fprog); |
1388 | struct bpf_prog *fp; |
1389 | int err; |
1390 | |
1391 | /* Make sure new filter is there and in the right amounts. */ |
1392 | if (!bpf_check_basics_ok(fprog->filter, fprog->len)) |
1393 | return -EINVAL; |
1394 | |
1395 | fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); |
1396 | if (!fp) |
1397 | return -ENOMEM; |
1398 | |
1399 | if (copy_from_user(fp->insns, fprog->filter, fsize)) { |
1400 | __bpf_prog_free(fp); |
1401 | return -EFAULT; |
1402 | } |
1403 | |
1404 | fp->len = fprog->len; |
1405 | fp->orig_prog = NULL; |
1406 | |
1407 | if (save_orig) { |
1408 | err = bpf_prog_store_orig_filter(fp, fprog); |
1409 | if (err) { |
1410 | __bpf_prog_free(fp); |
1411 | return -ENOMEM; |
1412 | } |
1413 | } |
1414 | |
1415 | /* bpf_prepare_filter() already takes care of freeing |
1416 | * memory in case something goes wrong. |
1417 | */ |
1418 | fp = bpf_prepare_filter(fp, trans); |
1419 | if (IS_ERR(fp)) |
1420 | return PTR_ERR(fp); |
1421 | |
1422 | *pfp = fp; |
1423 | return 0; |
1424 | } |
1425 | EXPORT_SYMBOL_GPL(bpf_prog_create_from_user); |
1426 | |
1427 | void bpf_prog_destroy(struct bpf_prog *fp) |
1428 | { |
1429 | __bpf_prog_release(fp); |
1430 | } |
1431 | EXPORT_SYMBOL_GPL(bpf_prog_destroy); |
1432 | |
1433 | static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk) |
1434 | { |
1435 | struct sk_filter *fp, *old_fp; |
1436 | |
1437 | fp = kmalloc(sizeof(*fp), GFP_KERNEL); |
1438 | if (!fp) |
1439 | return -ENOMEM; |
1440 | |
1441 | fp->prog = prog; |
1442 | |
1443 | if (!__sk_filter_charge(sk, fp)) { |
1444 | kfree(fp); |
1445 | return -ENOMEM; |
1446 | } |
1447 | refcount_set(&fp->refcnt, 1); |
1448 | |
1449 | old_fp = rcu_dereference_protected(sk->sk_filter, |
1450 | lockdep_sock_is_held(sk)); |
1451 | rcu_assign_pointer(sk->sk_filter, fp); |
1452 | |
1453 | if (old_fp) |
1454 | sk_filter_uncharge(sk, old_fp); |
1455 | |
1456 | return 0; |
1457 | } |
1458 | |
1459 | static |
1460 | struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk) |
1461 | { |
1462 | unsigned int fsize = bpf_classic_proglen(fprog); |
1463 | struct bpf_prog *prog; |
1464 | int err; |
1465 | |
1466 | if (sock_flag(sk, SOCK_FILTER_LOCKED)) |
1467 | return ERR_PTR(-EPERM); |
1468 | |
1469 | /* Make sure new filter is there and in the right amounts. */ |
1470 | if (!bpf_check_basics_ok(fprog->filter, fprog->len)) |
1471 | return ERR_PTR(-EINVAL); |
1472 | |
1473 | prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); |
1474 | if (!prog) |
1475 | return ERR_PTR(-ENOMEM); |
1476 | |
1477 | if (copy_from_user(prog->insns, fprog->filter, fsize)) { |
1478 | __bpf_prog_free(prog); |
1479 | return ERR_PTR(-EFAULT); |
1480 | } |
1481 | |
1482 | prog->len = fprog->len; |
1483 | |
1484 | err = bpf_prog_store_orig_filter(prog, fprog); |
1485 | if (err) { |
1486 | __bpf_prog_free(prog); |
1487 | return ERR_PTR(-ENOMEM); |
1488 | } |
1489 | |
1490 | /* bpf_prepare_filter() already takes care of freeing |
1491 | * memory in case something goes wrong. |
1492 | */ |
1493 | return bpf_prepare_filter(prog, NULL); |
1494 | } |
1495 | |
1496 | /** |
1497 | * sk_attach_filter - attach a socket filter |
1498 | * @fprog: the filter program |
1499 | * @sk: the socket to use |
1500 | * |
1501 | * Attach the user's filter code. We first run some sanity checks on |
1502 | * it to make sure it does not explode on us later. If an error |
1503 | * occurs or there is insufficient memory for the filter a negative |
1504 | * errno code is returned. On success the return is zero. |
1505 | */ |
1506 | int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) |
1507 | { |
1508 | struct bpf_prog *prog = __get_filter(fprog, sk); |
1509 | int err; |
1510 | |
1511 | if (IS_ERR(prog)) |
1512 | return PTR_ERR(prog); |
1513 | |
1514 | err = __sk_attach_prog(prog, sk); |
1515 | if (err < 0) { |
1516 | __bpf_prog_release(prog); |
1517 | return err; |
1518 | } |
1519 | |
1520 | return 0; |
1521 | } |
1522 | EXPORT_SYMBOL_GPL(sk_attach_filter); |
1523 | |
1524 | int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk) |
1525 | { |
1526 | struct bpf_prog *prog = __get_filter(fprog, sk); |
1527 | int err; |
1528 | |
1529 | if (IS_ERR(prog)) |
1530 | return PTR_ERR(prog); |
1531 | |
1532 | if (bpf_prog_size(prog->len) > sysctl_optmem_max) |
1533 | err = -ENOMEM; |
1534 | else |
1535 | err = reuseport_attach_prog(sk, prog); |
1536 | |
1537 | if (err) |
1538 | __bpf_prog_release(prog); |
1539 | |
1540 | return err; |
1541 | } |
1542 | |
1543 | static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk) |
1544 | { |
1545 | if (sock_flag(sk, SOCK_FILTER_LOCKED)) |
1546 | return ERR_PTR(-EPERM); |
1547 | |
1548 | return bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER); |
1549 | } |
1550 | |
1551 | int sk_attach_bpf(u32 ufd, struct sock *sk) |
1552 | { |
1553 | struct bpf_prog *prog = __get_bpf(ufd, sk); |
1554 | int err; |
1555 | |
1556 | if (IS_ERR(prog)) |
1557 | return PTR_ERR(prog); |
1558 | |
1559 | err = __sk_attach_prog(prog, sk); |
1560 | if (err < 0) { |
1561 | bpf_prog_put(prog); |
1562 | return err; |
1563 | } |
1564 | |
1565 | return 0; |
1566 | } |
1567 | |
1568 | int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk) |
1569 | { |
1570 | struct bpf_prog *prog; |
1571 | int err; |
1572 | |
1573 | if (sock_flag(sk, SOCK_FILTER_LOCKED)) |
1574 | return -EPERM; |
1575 | |
1576 | prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER); |
1577 | if (IS_ERR(prog) && PTR_ERR(prog) == -EINVAL) |
1578 | prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SK_REUSEPORT); |
1579 | if (IS_ERR(prog)) |
1580 | return PTR_ERR(prog); |
1581 | |
1582 | if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) { |
1583 | /* Like other non BPF_PROG_TYPE_SOCKET_FILTER |
1584 | * bpf prog (e.g. sockmap). It depends on the |
1585 | * limitation imposed by bpf_prog_load(). |
1586 | * Hence, sysctl_optmem_max is not checked. |
1587 | */ |
1588 | if ((sk->sk_type != SOCK_STREAM && |
1589 | sk->sk_type != SOCK_DGRAM) || |
1590 | (sk->sk_protocol != IPPROTO_UDP && |
1591 | sk->sk_protocol != IPPROTO_TCP) || |
1592 | (sk->sk_family != AF_INET && |
1593 | sk->sk_family != AF_INET6)) { |
1594 | err = -ENOTSUPP; |
1595 | goto err_prog_put; |
1596 | } |
1597 | } else { |
1598 | /* BPF_PROG_TYPE_SOCKET_FILTER */ |
1599 | if (bpf_prog_size(prog->len) > sysctl_optmem_max) { |
1600 | err = -ENOMEM; |
1601 | goto err_prog_put; |
1602 | } |
1603 | } |
1604 | |
1605 | err = reuseport_attach_prog(sk, prog); |
1606 | err_prog_put: |
1607 | if (err) |
1608 | bpf_prog_put(prog); |
1609 | |
1610 | return err; |
1611 | } |
1612 | |
1613 | void sk_reuseport_prog_free(struct bpf_prog *prog) |
1614 | { |
1615 | if (!prog) |
1616 | return; |
1617 | |
1618 | if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) |
1619 | bpf_prog_put(prog); |
1620 | else |
1621 | bpf_prog_destroy(prog); |
1622 | } |
1623 | |
1624 | struct bpf_scratchpad { |
1625 | union { |
1626 | __be32 diff[MAX_BPF_STACK / sizeof(__be32)]; |
1627 | u8 buff[MAX_BPF_STACK]; |
1628 | }; |
1629 | }; |
1630 | |
1631 | static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp); |
1632 | |
1633 | static inline int __bpf_try_make_writable(struct sk_buff *skb, |
1634 | unsigned int write_len) |
1635 | { |
1636 | return skb_ensure_writable(skb, write_len); |
1637 | } |
1638 | |
1639 | static inline int bpf_try_make_writable(struct sk_buff *skb, |
1640 | unsigned int write_len) |
1641 | { |
1642 | int err = __bpf_try_make_writable(skb, write_len); |
1643 | |
1644 | bpf_compute_data_pointers(skb); |
1645 | return err; |
1646 | } |
1647 | |
1648 | static int bpf_try_make_head_writable(struct sk_buff *skb) |
1649 | { |
1650 | return bpf_try_make_writable(skb, skb_headlen(skb)); |
1651 | } |
1652 | |
1653 | static inline void bpf_push_mac_rcsum(struct sk_buff *skb) |
1654 | { |
1655 | if (skb_at_tc_ingress(skb)) |
1656 | skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len); |
1657 | } |
1658 | |
1659 | static inline void bpf_pull_mac_rcsum(struct sk_buff *skb) |
1660 | { |
1661 | if (skb_at_tc_ingress(skb)) |
1662 | skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len); |
1663 | } |
1664 | |
1665 | BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset, |
1666 | const void *, from, u32, len, u64, flags) |
1667 | { |
1668 | void *ptr; |
1669 | |
1670 | if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH))) |
1671 | return -EINVAL; |
1672 | if (unlikely(offset > 0xffff)) |
1673 | return -EFAULT; |
1674 | if (unlikely(bpf_try_make_writable(skb, offset + len))) |
1675 | return -EFAULT; |
1676 | |
1677 | ptr = skb->data + offset; |
1678 | if (flags & BPF_F_RECOMPUTE_CSUM) |
1679 | __skb_postpull_rcsum(skb, ptr, len, offset); |
1680 | |
1681 | memcpy(ptr, from, len); |
1682 | |
1683 | if (flags & BPF_F_RECOMPUTE_CSUM) |
1684 | __skb_postpush_rcsum(skb, ptr, len, offset); |
1685 | if (flags & BPF_F_INVALIDATE_HASH) |
1686 | skb_clear_hash(skb); |
1687 | |
1688 | return 0; |
1689 | } |
1690 | |
1691 | static const struct bpf_func_proto bpf_skb_store_bytes_proto = { |
1692 | .func = bpf_skb_store_bytes, |
1693 | .gpl_only = false, |
1694 | .ret_type = RET_INTEGER, |
1695 | .arg1_type = ARG_PTR_TO_CTX, |
1696 | .arg2_type = ARG_ANYTHING, |
1697 | .arg3_type = ARG_PTR_TO_MEM, |
1698 | .arg4_type = ARG_CONST_SIZE, |
1699 | .arg5_type = ARG_ANYTHING, |
1700 | }; |
1701 | |
1702 | BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset, |
1703 | void *, to, u32, len) |
1704 | { |
1705 | void *ptr; |
1706 | |
1707 | if (unlikely(offset > 0xffff)) |
1708 | goto err_clear; |
1709 | |
1710 | ptr = skb_header_pointer(skb, offset, len, to); |
1711 | if (unlikely(!ptr)) |
1712 | goto err_clear; |
1713 | if (ptr != to) |
1714 | memcpy(to, ptr, len); |
1715 | |
1716 | return 0; |
1717 | err_clear: |
1718 | memset(to, 0, len); |
1719 | return -EFAULT; |
1720 | } |
1721 | |
1722 | static const struct bpf_func_proto bpf_skb_load_bytes_proto = { |
1723 | .func = bpf_skb_load_bytes, |
1724 | .gpl_only = false, |
1725 | .ret_type = RET_INTEGER, |
1726 | .arg1_type = ARG_PTR_TO_CTX, |
1727 | .arg2_type = ARG_ANYTHING, |
1728 | .arg3_type = ARG_PTR_TO_UNINIT_MEM, |
1729 | .arg4_type = ARG_CONST_SIZE, |
1730 | }; |
1731 | |
1732 | BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb, |
1733 | u32, offset, void *, to, u32, len, u32, ) |
1734 | { |
1735 | u8 *end = skb_tail_pointer(skb); |
1736 | u8 *net = skb_network_header(skb); |
1737 | u8 *mac = skb_mac_header(skb); |
1738 | u8 *ptr; |
1739 | |
1740 | if (unlikely(offset > 0xffff || len > (end - mac))) |
1741 | goto err_clear; |
1742 | |
1743 | switch (start_header) { |
1744 | case BPF_HDR_START_MAC: |
1745 | ptr = mac + offset; |
1746 | break; |
1747 | case BPF_HDR_START_NET: |
1748 | ptr = net + offset; |
1749 | break; |
1750 | default: |
1751 | goto err_clear; |
1752 | } |
1753 | |
1754 | if (likely(ptr >= mac && ptr + len <= end)) { |
1755 | memcpy(to, ptr, len); |
1756 | return 0; |
1757 | } |
1758 | |
1759 | err_clear: |
1760 | memset(to, 0, len); |
1761 | return -EFAULT; |
1762 | } |
1763 | |
1764 | static const struct bpf_func_proto bpf_skb_load_bytes_relative_proto = { |
1765 | .func = bpf_skb_load_bytes_relative, |
1766 | .gpl_only = false, |
1767 | .ret_type = RET_INTEGER, |
1768 | .arg1_type = ARG_PTR_TO_CTX, |
1769 | .arg2_type = ARG_ANYTHING, |
1770 | .arg3_type = ARG_PTR_TO_UNINIT_MEM, |
1771 | .arg4_type = ARG_CONST_SIZE, |
1772 | .arg5_type = ARG_ANYTHING, |
1773 | }; |
1774 | |
1775 | BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len) |
1776 | { |
1777 | /* Idea is the following: should the needed direct read/write |
1778 | * test fail during runtime, we can pull in more data and redo |
1779 | * again, since implicitly, we invalidate previous checks here. |
1780 | * |
1781 | * Or, since we know how much we need to make read/writeable, |
1782 | * this can be done once at the program beginning for direct |
1783 | * access case. By this we overcome limitations of only current |
1784 | * headroom being accessible. |
1785 | */ |
1786 | return bpf_try_make_writable(skb, len ? : skb_headlen(skb)); |
1787 | } |
1788 | |
1789 | static const struct bpf_func_proto bpf_skb_pull_data_proto = { |
1790 | .func = bpf_skb_pull_data, |
1791 | .gpl_only = false, |
1792 | .ret_type = RET_INTEGER, |
1793 | .arg1_type = ARG_PTR_TO_CTX, |
1794 | .arg2_type = ARG_ANYTHING, |
1795 | }; |
1796 | |
1797 | BPF_CALL_1(bpf_sk_fullsock, struct sock *, sk) |
1798 | { |
1799 | sk = sk_to_full_sk(sk); |
1800 | |
1801 | return sk_fullsock(sk) ? (unsigned long)sk : (unsigned long)NULL; |
1802 | } |
1803 | |
1804 | static const struct bpf_func_proto bpf_sk_fullsock_proto = { |
1805 | .func = bpf_sk_fullsock, |
1806 | .gpl_only = false, |
1807 | .ret_type = RET_PTR_TO_SOCKET_OR_NULL, |
1808 | .arg1_type = ARG_PTR_TO_SOCK_COMMON, |
1809 | }; |
1810 | |
1811 | static inline int sk_skb_try_make_writable(struct sk_buff *skb, |
1812 | unsigned int write_len) |
1813 | { |
1814 | int err = __bpf_try_make_writable(skb, write_len); |
1815 | |
1816 | bpf_compute_data_end_sk_skb(skb); |
1817 | return err; |
1818 | } |
1819 | |
1820 | BPF_CALL_2(sk_skb_pull_data, struct sk_buff *, skb, u32, len) |
1821 | { |
1822 | /* Idea is the following: should the needed direct read/write |
1823 | * test fail during runtime, we can pull in more data and redo |
1824 | * again, since implicitly, we invalidate previous checks here. |
1825 | * |
1826 | * Or, since we know how much we need to make read/writeable, |
1827 | * this can be done once at the program beginning for direct |
1828 | * access case. By this we overcome limitations of only current |
1829 | * headroom being accessible. |
1830 | */ |
1831 | return sk_skb_try_make_writable(skb, len ? : skb_headlen(skb)); |
1832 | } |
1833 | |
1834 | static const struct bpf_func_proto sk_skb_pull_data_proto = { |
1835 | .func = sk_skb_pull_data, |
1836 | .gpl_only = false, |
1837 | .ret_type = RET_INTEGER, |
1838 | .arg1_type = ARG_PTR_TO_CTX, |
1839 | .arg2_type = ARG_ANYTHING, |
1840 | }; |
1841 | |
1842 | BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset, |
1843 | u64, from, u64, to, u64, flags) |
1844 | { |
1845 | __sum16 *ptr; |
1846 | |
1847 | if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK))) |
1848 | return -EINVAL; |
1849 | if (unlikely(offset > 0xffff || offset & 1)) |
1850 | return -EFAULT; |
1851 | if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr)))) |
1852 | return -EFAULT; |
1853 | |
1854 | ptr = (__sum16 *)(skb->data + offset); |
1855 | switch (flags & BPF_F_HDR_FIELD_MASK) { |
1856 | case 0: |
1857 | if (unlikely(from != 0)) |
1858 | return -EINVAL; |
1859 | |
1860 | csum_replace_by_diff(ptr, to); |
1861 | break; |
1862 | case 2: |
1863 | csum_replace2(ptr, from, to); |
1864 | break; |
1865 | case 4: |
1866 | csum_replace4(ptr, from, to); |
1867 | break; |
1868 | default: |
1869 | return -EINVAL; |
1870 | } |
1871 | |
1872 | return 0; |
1873 | } |
1874 | |
1875 | static const struct bpf_func_proto bpf_l3_csum_replace_proto = { |
1876 | .func = bpf_l3_csum_replace, |
1877 | .gpl_only = false, |
1878 | .ret_type = RET_INTEGER, |
1879 | .arg1_type = ARG_PTR_TO_CTX, |
1880 | .arg2_type = ARG_ANYTHING, |
1881 | .arg3_type = ARG_ANYTHING, |
1882 | .arg4_type = ARG_ANYTHING, |
1883 | .arg5_type = ARG_ANYTHING, |
1884 | }; |
1885 | |
1886 | BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset, |
1887 | u64, from, u64, to, u64, flags) |
1888 | { |
1889 | bool is_pseudo = flags & BPF_F_PSEUDO_HDR; |
1890 | bool is_mmzero = flags & BPF_F_MARK_MANGLED_0; |
1891 | bool do_mforce = flags & BPF_F_MARK_ENFORCE; |
1892 | __sum16 *ptr; |
1893 | |
1894 | if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE | |
1895 | BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK))) |
1896 | return -EINVAL; |
1897 | if (unlikely(offset > 0xffff || offset & 1)) |
1898 | return -EFAULT; |
1899 | if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr)))) |
1900 | return -EFAULT; |
1901 | |
1902 | ptr = (__sum16 *)(skb->data + offset); |
1903 | if (is_mmzero && !do_mforce && !*ptr) |
1904 | return 0; |
1905 | |
1906 | switch (flags & BPF_F_HDR_FIELD_MASK) { |
1907 | case 0: |
1908 | if (unlikely(from != 0)) |
1909 | return -EINVAL; |
1910 | |
1911 | inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo); |
1912 | break; |
1913 | case 2: |
1914 | inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo); |
1915 | break; |
1916 | case 4: |
1917 | inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo); |
1918 | break; |
1919 | default: |
1920 | return -EINVAL; |
1921 | } |
1922 | |
1923 | if (is_mmzero && !*ptr) |
1924 | *ptr = CSUM_MANGLED_0; |
1925 | return 0; |
1926 | } |
1927 | |
1928 | static const struct bpf_func_proto bpf_l4_csum_replace_proto = { |
1929 | .func = bpf_l4_csum_replace, |
1930 | .gpl_only = false, |
1931 | .ret_type = RET_INTEGER, |
1932 | .arg1_type = ARG_PTR_TO_CTX, |
1933 | .arg2_type = ARG_ANYTHING, |
1934 | .arg3_type = ARG_ANYTHING, |
1935 | .arg4_type = ARG_ANYTHING, |
1936 | .arg5_type = ARG_ANYTHING, |
1937 | }; |
1938 | |
1939 | BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size, |
1940 | __be32 *, to, u32, to_size, __wsum, seed) |
1941 | { |
1942 | struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp); |
1943 | u32 diff_size = from_size + to_size; |
1944 | int i, j = 0; |
1945 | |
1946 | /* This is quite flexible, some examples: |
1947 | * |
1948 | * from_size == 0, to_size > 0, seed := csum --> pushing data |
1949 | * from_size > 0, to_size == 0, seed := csum --> pulling data |
1950 | * from_size > 0, to_size > 0, seed := 0 --> diffing data |
1951 | * |
1952 | * Even for diffing, from_size and to_size don't need to be equal. |
1953 | */ |
1954 | if (unlikely(((from_size | to_size) & (sizeof(__be32) - 1)) || |
1955 | diff_size > sizeof(sp->diff))) |
1956 | return -EINVAL; |
1957 | |
1958 | for (i = 0; i < from_size / sizeof(__be32); i++, j++) |
1959 | sp->diff[j] = ~from[i]; |
1960 | for (i = 0; i < to_size / sizeof(__be32); i++, j++) |
1961 | sp->diff[j] = to[i]; |
1962 | |
1963 | return csum_partial(sp->diff, diff_size, seed); |
1964 | } |
1965 | |
1966 | static const struct bpf_func_proto bpf_csum_diff_proto = { |
1967 | .func = bpf_csum_diff, |
1968 | .gpl_only = false, |
1969 | .pkt_access = true, |
1970 | .ret_type = RET_INTEGER, |
1971 | .arg1_type = ARG_PTR_TO_MEM_OR_NULL, |
1972 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, |
1973 | .arg3_type = ARG_PTR_TO_MEM_OR_NULL, |
1974 | .arg4_type = ARG_CONST_SIZE_OR_ZERO, |
1975 | .arg5_type = ARG_ANYTHING, |
1976 | }; |
1977 | |
1978 | BPF_CALL_2(bpf_csum_update, struct sk_buff *, skb, __wsum, csum) |
1979 | { |
1980 | /* The interface is to be used in combination with bpf_csum_diff() |
1981 | * for direct packet writes. csum rotation for alignment as well |
1982 | * as emulating csum_sub() can be done from the eBPF program. |
1983 | */ |
1984 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
1985 | return (skb->csum = csum_add(skb->csum, csum)); |
1986 | |
1987 | return -ENOTSUPP; |
1988 | } |
1989 | |
1990 | static const struct bpf_func_proto bpf_csum_update_proto = { |
1991 | .func = bpf_csum_update, |
1992 | .gpl_only = false, |
1993 | .ret_type = RET_INTEGER, |
1994 | .arg1_type = ARG_PTR_TO_CTX, |
1995 | .arg2_type = ARG_ANYTHING, |
1996 | }; |
1997 | |
1998 | static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb) |
1999 | { |
2000 | return dev_forward_skb(dev, skb); |
2001 | } |
2002 | |
2003 | static inline int __bpf_rx_skb_no_mac(struct net_device *dev, |
2004 | struct sk_buff *skb) |
2005 | { |
2006 | int ret = ____dev_forward_skb(dev, skb); |
2007 | |
2008 | if (likely(!ret)) { |
2009 | skb->dev = dev; |
2010 | ret = netif_rx(skb); |
2011 | } |
2012 | |
2013 | return ret; |
2014 | } |
2015 | |
2016 | static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) |
2017 | { |
2018 | int ret; |
2019 | |
2020 | if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) { |
2021 | net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n" ); |
2022 | kfree_skb(skb); |
2023 | return -ENETDOWN; |
2024 | } |
2025 | |
2026 | skb->dev = dev; |
2027 | |
2028 | __this_cpu_inc(xmit_recursion); |
2029 | ret = dev_queue_xmit(skb); |
2030 | __this_cpu_dec(xmit_recursion); |
2031 | |
2032 | return ret; |
2033 | } |
2034 | |
2035 | static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, |
2036 | u32 flags) |
2037 | { |
2038 | unsigned int mlen = skb_network_offset(skb); |
2039 | |
2040 | if (mlen) { |
2041 | __skb_pull(skb, mlen); |
2042 | |
2043 | /* At ingress, the mac header has already been pulled once. |
2044 | * At egress, skb_pospull_rcsum has to be done in case that |
2045 | * the skb is originated from ingress (i.e. a forwarded skb) |
2046 | * to ensure that rcsum starts at net header. |
2047 | */ |
2048 | if (!skb_at_tc_ingress(skb)) |
2049 | skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); |
2050 | } |
2051 | skb_pop_mac_header(skb); |
2052 | skb_reset_mac_len(skb); |
2053 | return flags & BPF_F_INGRESS ? |
2054 | __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb); |
2055 | } |
2056 | |
2057 | static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev, |
2058 | u32 flags) |
2059 | { |
2060 | /* Verify that a link layer header is carried */ |
2061 | if (unlikely(skb->mac_header >= skb->network_header)) { |
2062 | kfree_skb(skb); |
2063 | return -ERANGE; |
2064 | } |
2065 | |
2066 | bpf_push_mac_rcsum(skb); |
2067 | return flags & BPF_F_INGRESS ? |
2068 | __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); |
2069 | } |
2070 | |
2071 | static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev, |
2072 | u32 flags) |
2073 | { |
2074 | if (dev_is_mac_header_xmit(dev)) |
2075 | return __bpf_redirect_common(skb, dev, flags); |
2076 | else |
2077 | return __bpf_redirect_no_mac(skb, dev, flags); |
2078 | } |
2079 | |
2080 | BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags) |
2081 | { |
2082 | struct net_device *dev; |
2083 | struct sk_buff *clone; |
2084 | int ret; |
2085 | |
2086 | if (unlikely(flags & ~(BPF_F_INGRESS))) |
2087 | return -EINVAL; |
2088 | |
2089 | dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex); |
2090 | if (unlikely(!dev)) |
2091 | return -EINVAL; |
2092 | |
2093 | clone = skb_clone(skb, GFP_ATOMIC); |
2094 | if (unlikely(!clone)) |
2095 | return -ENOMEM; |
2096 | |
2097 | /* For direct write, we need to keep the invariant that the skbs |
2098 | * we're dealing with need to be uncloned. Should uncloning fail |
2099 | * here, we need to free the just generated clone to unclone once |
2100 | * again. |
2101 | */ |
2102 | ret = bpf_try_make_head_writable(skb); |
2103 | if (unlikely(ret)) { |
2104 | kfree_skb(clone); |
2105 | return -ENOMEM; |
2106 | } |
2107 | |
2108 | return __bpf_redirect(clone, dev, flags); |
2109 | } |
2110 | |
2111 | static const struct bpf_func_proto bpf_clone_redirect_proto = { |
2112 | .func = bpf_clone_redirect, |
2113 | .gpl_only = false, |
2114 | .ret_type = RET_INTEGER, |
2115 | .arg1_type = ARG_PTR_TO_CTX, |
2116 | .arg2_type = ARG_ANYTHING, |
2117 | .arg3_type = ARG_ANYTHING, |
2118 | }; |
2119 | |
2120 | DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info); |
2121 | EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info); |
2122 | |
2123 | BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags) |
2124 | { |
2125 | struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); |
2126 | |
2127 | if (unlikely(flags & ~(BPF_F_INGRESS))) |
2128 | return TC_ACT_SHOT; |
2129 | |
2130 | ri->ifindex = ifindex; |
2131 | ri->flags = flags; |
2132 | |
2133 | return TC_ACT_REDIRECT; |
2134 | } |
2135 | |
2136 | int skb_do_redirect(struct sk_buff *skb) |
2137 | { |
2138 | struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); |
2139 | struct net_device *dev; |
2140 | |
2141 | dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->ifindex); |
2142 | ri->ifindex = 0; |
2143 | if (unlikely(!dev)) { |
2144 | kfree_skb(skb); |
2145 | return -EINVAL; |
2146 | } |
2147 | |
2148 | return __bpf_redirect(skb, dev, ri->flags); |
2149 | } |
2150 | |
2151 | static const struct bpf_func_proto bpf_redirect_proto = { |
2152 | .func = bpf_redirect, |
2153 | .gpl_only = false, |
2154 | .ret_type = RET_INTEGER, |
2155 | .arg1_type = ARG_ANYTHING, |
2156 | .arg2_type = ARG_ANYTHING, |
2157 | }; |
2158 | |
2159 | BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg *, msg, u32, bytes) |
2160 | { |
2161 | msg->apply_bytes = bytes; |
2162 | return 0; |
2163 | } |
2164 | |
2165 | static const struct bpf_func_proto bpf_msg_apply_bytes_proto = { |
2166 | .func = bpf_msg_apply_bytes, |
2167 | .gpl_only = false, |
2168 | .ret_type = RET_INTEGER, |
2169 | .arg1_type = ARG_PTR_TO_CTX, |
2170 | .arg2_type = ARG_ANYTHING, |
2171 | }; |
2172 | |
2173 | BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg *, msg, u32, bytes) |
2174 | { |
2175 | msg->cork_bytes = bytes; |
2176 | return 0; |
2177 | } |
2178 | |
2179 | static const struct bpf_func_proto bpf_msg_cork_bytes_proto = { |
2180 | .func = bpf_msg_cork_bytes, |
2181 | .gpl_only = false, |
2182 | .ret_type = RET_INTEGER, |
2183 | .arg1_type = ARG_PTR_TO_CTX, |
2184 | .arg2_type = ARG_ANYTHING, |
2185 | }; |
2186 | |
2187 | BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start, |
2188 | u32, end, u64, flags) |
2189 | { |
2190 | u32 len = 0, offset = 0, copy = 0, poffset = 0, bytes = end - start; |
2191 | u32 first_sge, last_sge, i, shift, bytes_sg_total; |
2192 | struct scatterlist *sge; |
2193 | u8 *raw, *to, *from; |
2194 | struct page *page; |
2195 | |
2196 | if (unlikely(flags || end <= start)) |
2197 | return -EINVAL; |
2198 | |
2199 | /* First find the starting scatterlist element */ |
2200 | i = msg->sg.start; |
2201 | do { |
2202 | len = sk_msg_elem(msg, i)->length; |
2203 | if (start < offset + len) |
2204 | break; |
2205 | offset += len; |
2206 | sk_msg_iter_var_next(i); |
2207 | } while (i != msg->sg.end); |
2208 | |
2209 | if (unlikely(start >= offset + len)) |
2210 | return -EINVAL; |
2211 | |
2212 | first_sge = i; |
2213 | /* The start may point into the sg element so we need to also |
2214 | * account for the headroom. |
2215 | */ |
2216 | bytes_sg_total = start - offset + bytes; |
2217 | if (!msg->sg.copy[i] && bytes_sg_total <= len) |
2218 | goto out; |
2219 | |
2220 | /* At this point we need to linearize multiple scatterlist |
2221 | * elements or a single shared page. Either way we need to |
2222 | * copy into a linear buffer exclusively owned by BPF. Then |
2223 | * place the buffer in the scatterlist and fixup the original |
2224 | * entries by removing the entries now in the linear buffer |
2225 | * and shifting the remaining entries. For now we do not try |
2226 | * to copy partial entries to avoid complexity of running out |
2227 | * of sg_entry slots. The downside is reading a single byte |
2228 | * will copy the entire sg entry. |
2229 | */ |
2230 | do { |
2231 | copy += sk_msg_elem(msg, i)->length; |
2232 | sk_msg_iter_var_next(i); |
2233 | if (bytes_sg_total <= copy) |
2234 | break; |
2235 | } while (i != msg->sg.end); |
2236 | last_sge = i; |
2237 | |
2238 | if (unlikely(bytes_sg_total > copy)) |
2239 | return -EINVAL; |
2240 | |
2241 | page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP, |
2242 | get_order(copy)); |
2243 | if (unlikely(!page)) |
2244 | return -ENOMEM; |
2245 | |
2246 | raw = page_address(page); |
2247 | i = first_sge; |
2248 | do { |
2249 | sge = sk_msg_elem(msg, i); |
2250 | from = sg_virt(sge); |
2251 | len = sge->length; |
2252 | to = raw + poffset; |
2253 | |
2254 | memcpy(to, from, len); |
2255 | poffset += len; |
2256 | sge->length = 0; |
2257 | put_page(sg_page(sge)); |
2258 | |
2259 | sk_msg_iter_var_next(i); |
2260 | } while (i != last_sge); |
2261 | |
2262 | sg_set_page(&msg->sg.data[first_sge], page, copy, 0); |
2263 | |
2264 | /* To repair sg ring we need to shift entries. If we only |
2265 | * had a single entry though we can just replace it and |
2266 | * be done. Otherwise walk the ring and shift the entries. |
2267 | */ |
2268 | WARN_ON_ONCE(last_sge == first_sge); |
2269 | shift = last_sge > first_sge ? |
2270 | last_sge - first_sge - 1 : |
2271 | MAX_SKB_FRAGS - first_sge + last_sge - 1; |
2272 | if (!shift) |
2273 | goto out; |
2274 | |
2275 | i = first_sge; |
2276 | sk_msg_iter_var_next(i); |
2277 | do { |
2278 | u32 move_from; |
2279 | |
2280 | if (i + shift >= MAX_MSG_FRAGS) |
2281 | move_from = i + shift - MAX_MSG_FRAGS; |
2282 | else |
2283 | move_from = i + shift; |
2284 | if (move_from == msg->sg.end) |
2285 | break; |
2286 | |
2287 | msg->sg.data[i] = msg->sg.data[move_from]; |
2288 | msg->sg.data[move_from].length = 0; |
2289 | msg->sg.data[move_from].page_link = 0; |
2290 | msg->sg.data[move_from].offset = 0; |
2291 | sk_msg_iter_var_next(i); |
2292 | } while (1); |
2293 | |
2294 | msg->sg.end = msg->sg.end - shift > msg->sg.end ? |
2295 | msg->sg.end - shift + MAX_MSG_FRAGS : |
2296 | msg->sg.end - shift; |
2297 | out: |
2298 | msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset; |
2299 | msg->data_end = msg->data + bytes; |
2300 | return 0; |
2301 | } |
2302 | |
2303 | static const struct bpf_func_proto bpf_msg_pull_data_proto = { |
2304 | .func = bpf_msg_pull_data, |
2305 | .gpl_only = false, |
2306 | .ret_type = RET_INTEGER, |
2307 | .arg1_type = ARG_PTR_TO_CTX, |
2308 | .arg2_type = ARG_ANYTHING, |
2309 | .arg3_type = ARG_ANYTHING, |
2310 | .arg4_type = ARG_ANYTHING, |
2311 | }; |
2312 | |
2313 | BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start, |
2314 | u32, len, u64, flags) |
2315 | { |
2316 | struct scatterlist sge, nsge, nnsge, rsge = {0}, *psge; |
2317 | u32 new, i = 0, l, space, copy = 0, offset = 0; |
2318 | u8 *raw, *to, *from; |
2319 | struct page *page; |
2320 | |
2321 | if (unlikely(flags)) |
2322 | return -EINVAL; |
2323 | |
2324 | /* First find the starting scatterlist element */ |
2325 | i = msg->sg.start; |
2326 | do { |
2327 | l = sk_msg_elem(msg, i)->length; |
2328 | |
2329 | if (start < offset + l) |
2330 | break; |
2331 | offset += l; |
2332 | sk_msg_iter_var_next(i); |
2333 | } while (i != msg->sg.end); |
2334 | |
2335 | if (start >= offset + l) |
2336 | return -EINVAL; |
2337 | |
2338 | space = MAX_MSG_FRAGS - sk_msg_elem_used(msg); |
2339 | |
2340 | /* If no space available will fallback to copy, we need at |
2341 | * least one scatterlist elem available to push data into |
2342 | * when start aligns to the beginning of an element or two |
2343 | * when it falls inside an element. We handle the start equals |
2344 | * offset case because its the common case for inserting a |
2345 | * header. |
2346 | */ |
2347 | if (!space || (space == 1 && start != offset)) |
2348 | copy = msg->sg.data[i].length; |
2349 | |
2350 | page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP, |
2351 | get_order(copy + len)); |
2352 | if (unlikely(!page)) |
2353 | return -ENOMEM; |
2354 | |
2355 | if (copy) { |
2356 | int front, back; |
2357 | |
2358 | raw = page_address(page); |
2359 | |
2360 | psge = sk_msg_elem(msg, i); |
2361 | front = start - offset; |
2362 | back = psge->length - front; |
2363 | from = sg_virt(psge); |
2364 | |
2365 | if (front) |
2366 | memcpy(raw, from, front); |
2367 | |
2368 | if (back) { |
2369 | from += front; |
2370 | to = raw + front + len; |
2371 | |
2372 | memcpy(to, from, back); |
2373 | } |
2374 | |
2375 | put_page(sg_page(psge)); |
2376 | } else if (start - offset) { |
2377 | psge = sk_msg_elem(msg, i); |
2378 | rsge = sk_msg_elem_cpy(msg, i); |
2379 | |
2380 | psge->length = start - offset; |
2381 | rsge.length -= psge->length; |
2382 | rsge.offset += start; |
2383 | |
2384 | sk_msg_iter_var_next(i); |
2385 | sg_unmark_end(psge); |
2386 | sk_msg_iter_next(msg, end); |
2387 | } |
2388 | |
2389 | /* Slot(s) to place newly allocated data */ |
2390 | new = i; |
2391 | |
2392 | /* Shift one or two slots as needed */ |
2393 | if (!copy) { |
2394 | sge = sk_msg_elem_cpy(msg, i); |
2395 | |
2396 | sk_msg_iter_var_next(i); |
2397 | sg_unmark_end(&sge); |
2398 | sk_msg_iter_next(msg, end); |
2399 | |
2400 | nsge = sk_msg_elem_cpy(msg, i); |
2401 | if (rsge.length) { |
2402 | sk_msg_iter_var_next(i); |
2403 | nnsge = sk_msg_elem_cpy(msg, i); |
2404 | } |
2405 | |
2406 | while (i != msg->sg.end) { |
2407 | msg->sg.data[i] = sge; |
2408 | sge = nsge; |
2409 | sk_msg_iter_var_next(i); |
2410 | if (rsge.length) { |
2411 | nsge = nnsge; |
2412 | nnsge = sk_msg_elem_cpy(msg, i); |
2413 | } else { |
2414 | nsge = sk_msg_elem_cpy(msg, i); |
2415 | } |
2416 | } |
2417 | } |
2418 | |
2419 | /* Place newly allocated data buffer */ |
2420 | sk_mem_charge(msg->sk, len); |
2421 | msg->sg.size += len; |
2422 | msg->sg.copy[new] = false; |
2423 | sg_set_page(&msg->sg.data[new], page, len + copy, 0); |
2424 | if (rsge.length) { |
2425 | get_page(sg_page(&rsge)); |
2426 | sk_msg_iter_var_next(new); |
2427 | msg->sg.data[new] = rsge; |
2428 | } |
2429 | |
2430 | sk_msg_compute_data_pointers(msg); |
2431 | return 0; |
2432 | } |
2433 | |
2434 | static const struct bpf_func_proto bpf_msg_push_data_proto = { |
2435 | .func = bpf_msg_push_data, |
2436 | .gpl_only = false, |
2437 | .ret_type = RET_INTEGER, |
2438 | .arg1_type = ARG_PTR_TO_CTX, |
2439 | .arg2_type = ARG_ANYTHING, |
2440 | .arg3_type = ARG_ANYTHING, |
2441 | .arg4_type = ARG_ANYTHING, |
2442 | }; |
2443 | |
2444 | static void sk_msg_shift_left(struct sk_msg *msg, int i) |
2445 | { |
2446 | int prev; |
2447 | |
2448 | do { |
2449 | prev = i; |
2450 | sk_msg_iter_var_next(i); |
2451 | msg->sg.data[prev] = msg->sg.data[i]; |
2452 | } while (i != msg->sg.end); |
2453 | |
2454 | sk_msg_iter_prev(msg, end); |
2455 | } |
2456 | |
2457 | static void sk_msg_shift_right(struct sk_msg *msg, int i) |
2458 | { |
2459 | struct scatterlist tmp, sge; |
2460 | |
2461 | sk_msg_iter_next(msg, end); |
2462 | sge = sk_msg_elem_cpy(msg, i); |
2463 | sk_msg_iter_var_next(i); |
2464 | tmp = sk_msg_elem_cpy(msg, i); |
2465 | |
2466 | while (i != msg->sg.end) { |
2467 | msg->sg.data[i] = sge; |
2468 | sk_msg_iter_var_next(i); |
2469 | sge = tmp; |
2470 | tmp = sk_msg_elem_cpy(msg, i); |
2471 | } |
2472 | } |
2473 | |
2474 | BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start, |
2475 | u32, len, u64, flags) |
2476 | { |
2477 | u32 i = 0, l, space, offset = 0; |
2478 | u64 last = start + len; |
2479 | int pop; |
2480 | |
2481 | if (unlikely(flags)) |
2482 | return -EINVAL; |
2483 | |
2484 | /* First find the starting scatterlist element */ |
2485 | i = msg->sg.start; |
2486 | do { |
2487 | l = sk_msg_elem(msg, i)->length; |
2488 | |
2489 | if (start < offset + l) |
2490 | break; |
2491 | offset += l; |
2492 | sk_msg_iter_var_next(i); |
2493 | } while (i != msg->sg.end); |
2494 | |
2495 | /* Bounds checks: start and pop must be inside message */ |
2496 | if (start >= offset + l || last >= msg->sg.size) |
2497 | return -EINVAL; |
2498 | |
2499 | space = MAX_MSG_FRAGS - sk_msg_elem_used(msg); |
2500 | |
2501 | pop = len; |
2502 | /* --------------| offset |
2503 | * -| start |-------- len -------| |
2504 | * |
2505 | * |----- a ----|-------- pop -------|----- b ----| |
2506 | * |______________________________________________| length |
2507 | * |
2508 | * |
2509 | * a: region at front of scatter element to save |
2510 | * b: region at back of scatter element to save when length > A + pop |
2511 | * pop: region to pop from element, same as input 'pop' here will be |
2512 | * decremented below per iteration. |
2513 | * |
2514 | * Two top-level cases to handle when start != offset, first B is non |
2515 | * zero and second B is zero corresponding to when a pop includes more |
2516 | * than one element. |
2517 | * |
2518 | * Then if B is non-zero AND there is no space allocate space and |
2519 | * compact A, B regions into page. If there is space shift ring to |
2520 | * the rigth free'ing the next element in ring to place B, leaving |
2521 | * A untouched except to reduce length. |
2522 | */ |
2523 | if (start != offset) { |
2524 | struct scatterlist *nsge, *sge = sk_msg_elem(msg, i); |
2525 | int a = start; |
2526 | int b = sge->length - pop - a; |
2527 | |
2528 | sk_msg_iter_var_next(i); |
2529 | |
2530 | if (pop < sge->length - a) { |
2531 | if (space) { |
2532 | sge->length = a; |
2533 | sk_msg_shift_right(msg, i); |
2534 | nsge = sk_msg_elem(msg, i); |
2535 | get_page(sg_page(sge)); |
2536 | sg_set_page(nsge, |
2537 | sg_page(sge), |
2538 | b, sge->offset + pop + a); |
2539 | } else { |
2540 | struct page *page, *orig; |
2541 | u8 *to, *from; |
2542 | |
2543 | page = alloc_pages(__GFP_NOWARN | |
2544 | __GFP_COMP | GFP_ATOMIC, |
2545 | get_order(a + b)); |
2546 | if (unlikely(!page)) |
2547 | return -ENOMEM; |
2548 | |
2549 | sge->length = a; |
2550 | orig = sg_page(sge); |
2551 | from = sg_virt(sge); |
2552 | to = page_address(page); |
2553 | memcpy(to, from, a); |
2554 | memcpy(to + a, from + a + pop, b); |
2555 | sg_set_page(sge, page, a + b, 0); |
2556 | put_page(orig); |
2557 | } |
2558 | pop = 0; |
2559 | } else if (pop >= sge->length - a) { |
2560 | sge->length = a; |
2561 | pop -= (sge->length - a); |
2562 | } |
2563 | } |
2564 | |
2565 | /* From above the current layout _must_ be as follows, |
2566 | * |
2567 | * -| offset |
2568 | * -| start |
2569 | * |
2570 | * |---- pop ---|---------------- b ------------| |
2571 | * |____________________________________________| length |
2572 | * |
2573 | * Offset and start of the current msg elem are equal because in the |
2574 | * previous case we handled offset != start and either consumed the |
2575 | * entire element and advanced to the next element OR pop == 0. |
2576 | * |
2577 | * Two cases to handle here are first pop is less than the length |
2578 | * leaving some remainder b above. Simply adjust the element's layout |
2579 | * in this case. Or pop >= length of the element so that b = 0. In this |
2580 | * case advance to next element decrementing pop. |
2581 | */ |
2582 | while (pop) { |
2583 | struct scatterlist *sge = sk_msg_elem(msg, i); |
2584 | |
2585 | if (pop < sge->length) { |
2586 | sge->length -= pop; |
2587 | sge->offset += pop; |
2588 | pop = 0; |
2589 | } else { |
2590 | pop -= sge->length; |
2591 | sk_msg_shift_left(msg, i); |
2592 | } |
2593 | sk_msg_iter_var_next(i); |
2594 | } |
2595 | |
2596 | sk_mem_uncharge(msg->sk, len - pop); |
2597 | msg->sg.size -= (len - pop); |
2598 | sk_msg_compute_data_pointers(msg); |
2599 | return 0; |
2600 | } |
2601 | |
2602 | static const struct bpf_func_proto bpf_msg_pop_data_proto = { |
2603 | .func = bpf_msg_pop_data, |
2604 | .gpl_only = false, |
2605 | .ret_type = RET_INTEGER, |
2606 | .arg1_type = ARG_PTR_TO_CTX, |
2607 | .arg2_type = ARG_ANYTHING, |
2608 | .arg3_type = ARG_ANYTHING, |
2609 | .arg4_type = ARG_ANYTHING, |
2610 | }; |
2611 | |
2612 | BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb) |
2613 | { |
2614 | return task_get_classid(skb); |
2615 | } |
2616 | |
2617 | static const struct bpf_func_proto bpf_get_cgroup_classid_proto = { |
2618 | .func = bpf_get_cgroup_classid, |
2619 | .gpl_only = false, |
2620 | .ret_type = RET_INTEGER, |
2621 | .arg1_type = ARG_PTR_TO_CTX, |
2622 | }; |
2623 | |
2624 | BPF_CALL_1(bpf_get_route_realm, const struct sk_buff *, skb) |
2625 | { |
2626 | return dst_tclassid(skb); |
2627 | } |
2628 | |
2629 | static const struct bpf_func_proto bpf_get_route_realm_proto = { |
2630 | .func = bpf_get_route_realm, |
2631 | .gpl_only = false, |
2632 | .ret_type = RET_INTEGER, |
2633 | .arg1_type = ARG_PTR_TO_CTX, |
2634 | }; |
2635 | |
2636 | BPF_CALL_1(bpf_get_hash_recalc, struct sk_buff *, skb) |
2637 | { |
2638 | /* If skb_clear_hash() was called due to mangling, we can |
2639 | * trigger SW recalculation here. Later access to hash |
2640 | * can then use the inline skb->hash via context directly |
2641 | * instead of calling this helper again. |
2642 | */ |
2643 | return skb_get_hash(skb); |
2644 | } |
2645 | |
2646 | static const struct bpf_func_proto bpf_get_hash_recalc_proto = { |
2647 | .func = bpf_get_hash_recalc, |
2648 | .gpl_only = false, |
2649 | .ret_type = RET_INTEGER, |
2650 | .arg1_type = ARG_PTR_TO_CTX, |
2651 | }; |
2652 | |
2653 | BPF_CALL_1(bpf_set_hash_invalid, struct sk_buff *, skb) |
2654 | { |
2655 | /* After all direct packet write, this can be used once for |
2656 | * triggering a lazy recalc on next skb_get_hash() invocation. |
2657 | */ |
2658 | skb_clear_hash(skb); |
2659 | return 0; |
2660 | } |
2661 | |
2662 | static const struct bpf_func_proto bpf_set_hash_invalid_proto = { |
2663 | .func = bpf_set_hash_invalid, |
2664 | .gpl_only = false, |
2665 | .ret_type = RET_INTEGER, |
2666 | .arg1_type = ARG_PTR_TO_CTX, |
2667 | }; |
2668 | |
2669 | BPF_CALL_2(bpf_set_hash, struct sk_buff *, skb, u32, hash) |
2670 | { |
2671 | /* Set user specified hash as L4(+), so that it gets returned |
2672 | * on skb_get_hash() call unless BPF prog later on triggers a |
2673 | * skb_clear_hash(). |
2674 | */ |
2675 | __skb_set_sw_hash(skb, hash, true); |
2676 | return 0; |
2677 | } |
2678 | |
2679 | static const struct bpf_func_proto bpf_set_hash_proto = { |
2680 | .func = bpf_set_hash, |
2681 | .gpl_only = false, |
2682 | .ret_type = RET_INTEGER, |
2683 | .arg1_type = ARG_PTR_TO_CTX, |
2684 | .arg2_type = ARG_ANYTHING, |
2685 | }; |
2686 | |
2687 | BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto, |
2688 | u16, vlan_tci) |
2689 | { |
2690 | int ret; |
2691 | |
2692 | if (unlikely(vlan_proto != htons(ETH_P_8021Q) && |
2693 | vlan_proto != htons(ETH_P_8021AD))) |
2694 | vlan_proto = htons(ETH_P_8021Q); |
2695 | |
2696 | bpf_push_mac_rcsum(skb); |
2697 | ret = skb_vlan_push(skb, vlan_proto, vlan_tci); |
2698 | bpf_pull_mac_rcsum(skb); |
2699 | |
2700 | bpf_compute_data_pointers(skb); |
2701 | return ret; |
2702 | } |
2703 | |
2704 | static const struct bpf_func_proto bpf_skb_vlan_push_proto = { |
2705 | .func = bpf_skb_vlan_push, |
2706 | .gpl_only = false, |
2707 | .ret_type = RET_INTEGER, |
2708 | .arg1_type = ARG_PTR_TO_CTX, |
2709 | .arg2_type = ARG_ANYTHING, |
2710 | .arg3_type = ARG_ANYTHING, |
2711 | }; |
2712 | |
2713 | BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb) |
2714 | { |
2715 | int ret; |
2716 | |
2717 | bpf_push_mac_rcsum(skb); |
2718 | ret = skb_vlan_pop(skb); |
2719 | bpf_pull_mac_rcsum(skb); |
2720 | |
2721 | bpf_compute_data_pointers(skb); |
2722 | return ret; |
2723 | } |
2724 | |
2725 | static const struct bpf_func_proto bpf_skb_vlan_pop_proto = { |
2726 | .func = bpf_skb_vlan_pop, |
2727 | .gpl_only = false, |
2728 | .ret_type = RET_INTEGER, |
2729 | .arg1_type = ARG_PTR_TO_CTX, |
2730 | }; |
2731 | |
2732 | static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len) |
2733 | { |
2734 | /* Caller already did skb_cow() with len as headroom, |
2735 | * so no need to do it here. |
2736 | */ |
2737 | skb_push(skb, len); |
2738 | memmove(skb->data, skb->data + len, off); |
2739 | memset(skb->data + off, 0, len); |
2740 | |
2741 | /* No skb_postpush_rcsum(skb, skb->data + off, len) |
2742 | * needed here as it does not change the skb->csum |
2743 | * result for checksum complete when summing over |
2744 | * zeroed blocks. |
2745 | */ |
2746 | return 0; |
2747 | } |
2748 | |
2749 | static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len) |
2750 | { |
2751 | /* skb_ensure_writable() is not needed here, as we're |
2752 | * already working on an uncloned skb. |
2753 | */ |
2754 | if (unlikely(!pskb_may_pull(skb, off + len))) |
2755 | return -ENOMEM; |
2756 | |
2757 | skb_postpull_rcsum(skb, skb->data + off, len); |
2758 | memmove(skb->data + len, skb->data, off); |
2759 | __skb_pull(skb, len); |
2760 | |
2761 | return 0; |
2762 | } |
2763 | |
2764 | static int bpf_skb_net_hdr_push(struct sk_buff *skb, u32 off, u32 len) |
2765 | { |
2766 | bool trans_same = skb->transport_header == skb->network_header; |
2767 | int ret; |
2768 | |
2769 | /* There's no need for __skb_push()/__skb_pull() pair to |
2770 | * get to the start of the mac header as we're guaranteed |
2771 | * to always start from here under eBPF. |
2772 | */ |
2773 | ret = bpf_skb_generic_push(skb, off, len); |
2774 | if (likely(!ret)) { |
2775 | skb->mac_header -= len; |
2776 | skb->network_header -= len; |
2777 | if (trans_same) |
2778 | skb->transport_header = skb->network_header; |
2779 | } |
2780 | |
2781 | return ret; |
2782 | } |
2783 | |
2784 | static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len) |
2785 | { |
2786 | bool trans_same = skb->transport_header == skb->network_header; |
2787 | int ret; |
2788 | |
2789 | /* Same here, __skb_push()/__skb_pull() pair not needed. */ |
2790 | ret = bpf_skb_generic_pop(skb, off, len); |
2791 | if (likely(!ret)) { |
2792 | skb->mac_header += len; |
2793 | skb->network_header += len; |
2794 | if (trans_same) |
2795 | skb->transport_header = skb->network_header; |
2796 | } |
2797 | |
2798 | return ret; |
2799 | } |
2800 | |
2801 | static int bpf_skb_proto_4_to_6(struct sk_buff *skb) |
2802 | { |
2803 | const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr); |
2804 | u32 off = skb_mac_header_len(skb); |
2805 | int ret; |
2806 | |
2807 | if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) |
2808 | return -ENOTSUPP; |
2809 | |
2810 | ret = skb_cow(skb, len_diff); |
2811 | if (unlikely(ret < 0)) |
2812 | return ret; |
2813 | |
2814 | ret = bpf_skb_net_hdr_push(skb, off, len_diff); |
2815 | if (unlikely(ret < 0)) |
2816 | return ret; |
2817 | |
2818 | if (skb_is_gso(skb)) { |
2819 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
2820 | |
2821 | /* SKB_GSO_TCPV4 needs to be changed into |
2822 | * SKB_GSO_TCPV6. |
2823 | */ |
2824 | if (shinfo->gso_type & SKB_GSO_TCPV4) { |
2825 | shinfo->gso_type &= ~SKB_GSO_TCPV4; |
2826 | shinfo->gso_type |= SKB_GSO_TCPV6; |
2827 | } |
2828 | |
2829 | /* Due to IPv6 header, MSS needs to be downgraded. */ |
2830 | skb_decrease_gso_size(shinfo, len_diff); |
2831 | /* Header must be checked, and gso_segs recomputed. */ |
2832 | shinfo->gso_type |= SKB_GSO_DODGY; |
2833 | shinfo->gso_segs = 0; |
2834 | } |
2835 | |
2836 | skb->protocol = htons(ETH_P_IPV6); |
2837 | skb_clear_hash(skb); |
2838 | |
2839 | return 0; |
2840 | } |
2841 | |
2842 | static int bpf_skb_proto_6_to_4(struct sk_buff *skb) |
2843 | { |
2844 | const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr); |
2845 | u32 off = skb_mac_header_len(skb); |
2846 | int ret; |
2847 | |
2848 | if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) |
2849 | return -ENOTSUPP; |
2850 | |
2851 | ret = skb_unclone(skb, GFP_ATOMIC); |
2852 | if (unlikely(ret < 0)) |
2853 | return ret; |
2854 | |
2855 | ret = bpf_skb_net_hdr_pop(skb, off, len_diff); |
2856 | if (unlikely(ret < 0)) |
2857 | return ret; |
2858 | |
2859 | if (skb_is_gso(skb)) { |
2860 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
2861 | |
2862 | /* SKB_GSO_TCPV6 needs to be changed into |
2863 | * SKB_GSO_TCPV4. |
2864 | */ |
2865 | if (shinfo->gso_type & SKB_GSO_TCPV6) { |
2866 | shinfo->gso_type &= ~SKB_GSO_TCPV6; |
2867 | shinfo->gso_type |= SKB_GSO_TCPV4; |
2868 | } |
2869 | |
2870 | /* Due to IPv4 header, MSS can be upgraded. */ |
2871 | skb_increase_gso_size(shinfo, len_diff); |
2872 | /* Header must be checked, and gso_segs recomputed. */ |
2873 | shinfo->gso_type |= SKB_GSO_DODGY; |
2874 | shinfo->gso_segs = 0; |
2875 | } |
2876 | |
2877 | skb->protocol = htons(ETH_P_IP); |
2878 | skb_clear_hash(skb); |
2879 | |
2880 | return 0; |
2881 | } |
2882 | |
2883 | static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto) |
2884 | { |
2885 | __be16 from_proto = skb->protocol; |
2886 | |
2887 | if (from_proto == htons(ETH_P_IP) && |
2888 | to_proto == htons(ETH_P_IPV6)) |
2889 | return bpf_skb_proto_4_to_6(skb); |
2890 | |
2891 | if (from_proto == htons(ETH_P_IPV6) && |
2892 | to_proto == htons(ETH_P_IP)) |
2893 | return bpf_skb_proto_6_to_4(skb); |
2894 | |
2895 | return -ENOTSUPP; |
2896 | } |
2897 | |
2898 | BPF_CALL_3(bpf_skb_change_proto, struct sk_buff *, skb, __be16, proto, |
2899 | u64, flags) |
2900 | { |
2901 | int ret; |
2902 | |
2903 | if (unlikely(flags)) |
2904 | return -EINVAL; |
2905 | |
2906 | /* General idea is that this helper does the basic groundwork |
2907 | * needed for changing the protocol, and eBPF program fills the |
2908 | * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace() |
2909 | * and other helpers, rather than passing a raw buffer here. |
2910 | * |
2911 | * The rationale is to keep this minimal and without a need to |
2912 | * deal with raw packet data. F.e. even if we would pass buffers |
2913 | * here, the program still needs to call the bpf_lX_csum_replace() |
2914 | * helpers anyway. Plus, this way we keep also separation of |
2915 | * concerns, since f.e. bpf_skb_store_bytes() should only take |
2916 | * care of stores. |
2917 | * |
2918 | * Currently, additional options and extension header space are |
2919 | * not supported, but flags register is reserved so we can adapt |
2920 | * that. For offloads, we mark packet as dodgy, so that headers |
2921 | * need to be verified first. |
2922 | */ |
2923 | ret = bpf_skb_proto_xlat(skb, proto); |
2924 | bpf_compute_data_pointers(skb); |
2925 | return ret; |
2926 | } |
2927 | |
2928 | static const struct bpf_func_proto bpf_skb_change_proto_proto = { |
2929 | .func = bpf_skb_change_proto, |
2930 | .gpl_only = false, |
2931 | .ret_type = RET_INTEGER, |
2932 | .arg1_type = ARG_PTR_TO_CTX, |
2933 | .arg2_type = ARG_ANYTHING, |
2934 | .arg3_type = ARG_ANYTHING, |
2935 | }; |
2936 | |
2937 | BPF_CALL_2(bpf_skb_change_type, struct sk_buff *, skb, u32, pkt_type) |
2938 | { |
2939 | /* We only allow a restricted subset to be changed for now. */ |
2940 | if (unlikely(!skb_pkt_type_ok(skb->pkt_type) || |
2941 | !skb_pkt_type_ok(pkt_type))) |
2942 | return -EINVAL; |
2943 | |
2944 | skb->pkt_type = pkt_type; |
2945 | return 0; |
2946 | } |
2947 | |
2948 | static const struct bpf_func_proto bpf_skb_change_type_proto = { |
2949 | .func = bpf_skb_change_type, |
2950 | .gpl_only = false, |
2951 | .ret_type = RET_INTEGER, |
2952 | .arg1_type = ARG_PTR_TO_CTX, |
2953 | .arg2_type = ARG_ANYTHING, |
2954 | }; |
2955 | |
2956 | static u32 bpf_skb_net_base_len(const struct sk_buff *skb) |
2957 | { |
2958 | switch (skb->protocol) { |
2959 | case htons(ETH_P_IP): |
2960 | return sizeof(struct iphdr); |
2961 | case htons(ETH_P_IPV6): |
2962 | return sizeof(struct ipv6hdr); |
2963 | default: |
2964 | return ~0U; |
2965 | } |
2966 | } |
2967 | |
2968 | static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff) |
2969 | { |
2970 | u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); |
2971 | int ret; |
2972 | |
2973 | if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) |
2974 | return -ENOTSUPP; |
2975 | |
2976 | ret = skb_cow(skb, len_diff); |
2977 | if (unlikely(ret < 0)) |
2978 | return ret; |
2979 | |
2980 | ret = bpf_skb_net_hdr_push(skb, off, len_diff); |
2981 | if (unlikely(ret < 0)) |
2982 | return ret; |
2983 | |
2984 | if (skb_is_gso(skb)) { |
2985 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
2986 | |
2987 | /* Due to header grow, MSS needs to be downgraded. */ |
2988 | skb_decrease_gso_size(shinfo, len_diff); |
2989 | /* Header must be checked, and gso_segs recomputed. */ |
2990 | shinfo->gso_type |= SKB_GSO_DODGY; |
2991 | shinfo->gso_segs = 0; |
2992 | } |
2993 | |
2994 | return 0; |
2995 | } |
2996 | |
2997 | static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff) |
2998 | { |
2999 | u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); |
3000 | int ret; |
3001 | |
3002 | if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) |
3003 | return -ENOTSUPP; |
3004 | |
3005 | ret = skb_unclone(skb, GFP_ATOMIC); |
3006 | if (unlikely(ret < 0)) |
3007 | return ret; |
3008 | |
3009 | ret = bpf_skb_net_hdr_pop(skb, off, len_diff); |
3010 | if (unlikely(ret < 0)) |
3011 | return ret; |
3012 | |
3013 | if (skb_is_gso(skb)) { |
3014 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
3015 | |
3016 | /* Due to header shrink, MSS can be upgraded. */ |
3017 | skb_increase_gso_size(shinfo, len_diff); |
3018 | /* Header must be checked, and gso_segs recomputed. */ |
3019 | shinfo->gso_type |= SKB_GSO_DODGY; |
3020 | shinfo->gso_segs = 0; |
3021 | } |
3022 | |
3023 | return 0; |
3024 | } |
3025 | |
3026 | static u32 __bpf_skb_max_len(const struct sk_buff *skb) |
3027 | { |
3028 | return skb->dev ? skb->dev->mtu + skb->dev->hard_header_len : |
3029 | SKB_MAX_ALLOC; |
3030 | } |
3031 | |
3032 | static int bpf_skb_adjust_net(struct sk_buff *skb, s32 len_diff) |
3033 | { |
3034 | bool trans_same = skb->transport_header == skb->network_header; |
3035 | u32 len_cur, len_diff_abs = abs(len_diff); |
3036 | u32 len_min = bpf_skb_net_base_len(skb); |
3037 | u32 len_max = __bpf_skb_max_len(skb); |
3038 | __be16 proto = skb->protocol; |
3039 | bool shrink = len_diff < 0; |
3040 | int ret; |
3041 | |
3042 | if (unlikely(len_diff_abs > 0xfffU)) |
3043 | return -EFAULT; |
3044 | if (unlikely(proto != htons(ETH_P_IP) && |
3045 | proto != htons(ETH_P_IPV6))) |
3046 | return -ENOTSUPP; |
3047 | |
3048 | len_cur = skb->len - skb_network_offset(skb); |
3049 | if (skb_transport_header_was_set(skb) && !trans_same) |
3050 | len_cur = skb_network_header_len(skb); |
3051 | if ((shrink && (len_diff_abs >= len_cur || |
3052 | len_cur - len_diff_abs < len_min)) || |
3053 | (!shrink && (skb->len + len_diff_abs > len_max && |
3054 | !skb_is_gso(skb)))) |
3055 | return -ENOTSUPP; |
3056 | |
3057 | ret = shrink ? bpf_skb_net_shrink(skb, len_diff_abs) : |
3058 | bpf_skb_net_grow(skb, len_diff_abs); |
3059 | |
3060 | bpf_compute_data_pointers(skb); |
3061 | return ret; |
3062 | } |
3063 | |
3064 | BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff, |
3065 | u32, mode, u64, flags) |
3066 | { |
3067 | if (unlikely(flags)) |
3068 | return -EINVAL; |
3069 | if (likely(mode == BPF_ADJ_ROOM_NET)) |
3070 | return bpf_skb_adjust_net(skb, len_diff); |
3071 | |
3072 | return -ENOTSUPP; |
3073 | } |
3074 | |
3075 | static const struct bpf_func_proto bpf_skb_adjust_room_proto = { |
3076 | .func = bpf_skb_adjust_room, |
3077 | .gpl_only = false, |
3078 | .ret_type = RET_INTEGER, |
3079 | .arg1_type = ARG_PTR_TO_CTX, |
3080 | .arg2_type = ARG_ANYTHING, |
3081 | .arg3_type = ARG_ANYTHING, |
3082 | .arg4_type = ARG_ANYTHING, |
3083 | }; |
3084 | |
3085 | static u32 __bpf_skb_min_len(const struct sk_buff *skb) |
3086 | { |
3087 | u32 min_len = skb_network_offset(skb); |
3088 | |
3089 | if (skb_transport_header_was_set(skb)) |
3090 | min_len = skb_transport_offset(skb); |
3091 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
3092 | min_len = skb_checksum_start_offset(skb) + |
3093 | skb->csum_offset + sizeof(__sum16); |
3094 | return min_len; |
3095 | } |
3096 | |
3097 | static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsig |
---|