1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2// Copyright (c) 2020 Cloudflare
3
4#include <errno.h>
5#include <stdbool.h>
6#include <stddef.h>
7#include <linux/bpf.h>
8#include <linux/in.h>
9#include <sys/socket.h>
10
11#include <bpf/bpf_endian.h>
12#include <bpf/bpf_helpers.h>
13
14#define IP4(a, b, c, d) \
15 bpf_htonl((((__u32)(a) & 0xffU) << 24) | \
16 (((__u32)(b) & 0xffU) << 16) | \
17 (((__u32)(c) & 0xffU) << 8) | \
18 (((__u32)(d) & 0xffU) << 0))
19#define IP6(aaaa, bbbb, cccc, dddd) \
20 { bpf_htonl(aaaa), bpf_htonl(bbbb), bpf_htonl(cccc), bpf_htonl(dddd) }
21
22/* Macros for least-significant byte and word accesses. */
23#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
24#define LSE_INDEX(index, size) (index)
25#else
26#define LSE_INDEX(index, size) ((size) - (index) - 1)
27#endif
28#define LSB(value, index) \
29 (((__u8 *)&(value))[LSE_INDEX((index), sizeof(value))])
30#define LSW(value, index) \
31 (((__u16 *)&(value))[LSE_INDEX((index), sizeof(value) / 2)])
32
33#define MAX_SOCKS 32
34
35struct {
36 __uint(type, BPF_MAP_TYPE_SOCKMAP);
37 __uint(max_entries, MAX_SOCKS);
38 __type(key, __u32);
39 __type(value, __u64);
40} redir_map SEC(".maps");
41
42struct {
43 __uint(type, BPF_MAP_TYPE_ARRAY);
44 __uint(max_entries, 2);
45 __type(key, int);
46 __type(value, int);
47} run_map SEC(".maps");
48
49enum {
50 PROG1 = 0,
51 PROG2,
52};
53
54enum {
55 SERVER_A = 0,
56 SERVER_B,
57};
58
59/* Addressable key/value constants for convenience */
60static const int KEY_PROG1 = PROG1;
61static const int KEY_PROG2 = PROG2;
62static const int PROG_DONE = 1;
63
64static const __u32 KEY_SERVER_A = SERVER_A;
65static const __u32 KEY_SERVER_B = SERVER_B;
66
67static const __u16 SRC_PORT = bpf_htons(8008);
68static const __u32 SRC_IP4 = IP4(127, 0, 0, 2);
69static const __u32 SRC_IP6[] = IP6(0xfd000000, 0x0, 0x0, 0x00000002);
70
71static const __u16 DST_PORT = 7007; /* Host byte order */
72static const __u32 DST_IP4 = IP4(127, 0, 0, 1);
73static const __u32 DST_IP6[] = IP6(0xfd000000, 0x0, 0x0, 0x00000001);
74
75SEC("sk_lookup")
76int lookup_pass(struct bpf_sk_lookup *ctx)
77{
78 return SK_PASS;
79}
80
81SEC("sk_lookup")
82int lookup_drop(struct bpf_sk_lookup *ctx)
83{
84 return SK_DROP;
85}
86
87SEC("sk_lookup")
88int check_ifindex(struct bpf_sk_lookup *ctx)
89{
90 if (ctx->ingress_ifindex == 1)
91 return SK_DROP;
92 return SK_PASS;
93}
94
95SEC("sk_reuseport")
96int reuseport_pass(struct sk_reuseport_md *ctx)
97{
98 return SK_PASS;
99}
100
101SEC("sk_reuseport")
102int reuseport_drop(struct sk_reuseport_md *ctx)
103{
104 return SK_DROP;
105}
106
107/* Redirect packets destined for port DST_PORT to socket at redir_map[0]. */
108SEC("sk_lookup")
109int redir_port(struct bpf_sk_lookup *ctx)
110{
111 struct bpf_sock *sk;
112 int err;
113
114 if (ctx->local_port != DST_PORT)
115 return SK_PASS;
116
117 sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
118 if (!sk)
119 return SK_PASS;
120
121 err = bpf_sk_assign(ctx, sk, 0);
122 bpf_sk_release(sk);
123 return err ? SK_DROP : SK_PASS;
124}
125
126/* Redirect packets destined for DST_IP4 address to socket at redir_map[0]. */
127SEC("sk_lookup")
128int redir_ip4(struct bpf_sk_lookup *ctx)
129{
130 struct bpf_sock *sk;
131 int err;
132
133 if (ctx->family != AF_INET)
134 return SK_PASS;
135 if (ctx->local_port != DST_PORT)
136 return SK_PASS;
137 if (ctx->local_ip4 != DST_IP4)
138 return SK_PASS;
139
140 sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
141 if (!sk)
142 return SK_PASS;
143
144 err = bpf_sk_assign(ctx, sk, 0);
145 bpf_sk_release(sk);
146 return err ? SK_DROP : SK_PASS;
147}
148
149/* Redirect packets destined for DST_IP6 address to socket at redir_map[0]. */
150SEC("sk_lookup")
151int redir_ip6(struct bpf_sk_lookup *ctx)
152{
153 struct bpf_sock *sk;
154 int err;
155
156 if (ctx->family != AF_INET6)
157 return SK_PASS;
158 if (ctx->local_port != DST_PORT)
159 return SK_PASS;
160 if (ctx->local_ip6[0] != DST_IP6[0] ||
161 ctx->local_ip6[1] != DST_IP6[1] ||
162 ctx->local_ip6[2] != DST_IP6[2] ||
163 ctx->local_ip6[3] != DST_IP6[3])
164 return SK_PASS;
165
166 sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
167 if (!sk)
168 return SK_PASS;
169
170 err = bpf_sk_assign(ctx, sk, 0);
171 bpf_sk_release(sk);
172 return err ? SK_DROP : SK_PASS;
173}
174
175SEC("sk_lookup")
176int select_sock_a(struct bpf_sk_lookup *ctx)
177{
178 struct bpf_sock *sk;
179 int err;
180
181 sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
182 if (!sk)
183 return SK_PASS;
184
185 err = bpf_sk_assign(ctx, sk, 0);
186 bpf_sk_release(sk);
187 return err ? SK_DROP : SK_PASS;
188}
189
190SEC("sk_lookup")
191int select_sock_a_no_reuseport(struct bpf_sk_lookup *ctx)
192{
193 struct bpf_sock *sk;
194 int err;
195
196 sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
197 if (!sk)
198 return SK_DROP;
199
200 err = bpf_sk_assign(ctx, sk, BPF_SK_LOOKUP_F_NO_REUSEPORT);
201 bpf_sk_release(sk);
202 return err ? SK_DROP : SK_PASS;
203}
204
205SEC("sk_reuseport")
206int select_sock_b(struct sk_reuseport_md *ctx)
207{
208 __u32 key = KEY_SERVER_B;
209 int err;
210
211 err = bpf_sk_select_reuseport(ctx, &redir_map, &key, 0);
212 return err ? SK_DROP : SK_PASS;
213}
214
215/* Check that bpf_sk_assign() returns -EEXIST if socket already selected. */
216SEC("sk_lookup")
217int sk_assign_eexist(struct bpf_sk_lookup *ctx)
218{
219 struct bpf_sock *sk;
220 int err, ret;
221
222 ret = SK_DROP;
223 sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B);
224 if (!sk)
225 goto out;
226 err = bpf_sk_assign(ctx, sk, 0);
227 if (err)
228 goto out;
229 bpf_sk_release(sk);
230
231 sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
232 if (!sk)
233 goto out;
234 err = bpf_sk_assign(ctx, sk, 0);
235 if (err != -EEXIST) {
236 bpf_printk("sk_assign returned %d, expected %d\n",
237 err, -EEXIST);
238 goto out;
239 }
240
241 ret = SK_PASS; /* Success, redirect to KEY_SERVER_B */
242out:
243 if (sk)
244 bpf_sk_release(sk);
245 return ret;
246}
247
248/* Check that bpf_sk_assign(BPF_SK_LOOKUP_F_REPLACE) can override selection. */
249SEC("sk_lookup")
250int sk_assign_replace_flag(struct bpf_sk_lookup *ctx)
251{
252 struct bpf_sock *sk;
253 int err, ret;
254
255 ret = SK_DROP;
256 sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
257 if (!sk)
258 goto out;
259 err = bpf_sk_assign(ctx, sk, 0);
260 if (err)
261 goto out;
262 bpf_sk_release(sk);
263
264 sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B);
265 if (!sk)
266 goto out;
267 err = bpf_sk_assign(ctx, sk, BPF_SK_LOOKUP_F_REPLACE);
268 if (err) {
269 bpf_printk("sk_assign returned %d, expected 0\n", err);
270 goto out;
271 }
272
273 ret = SK_PASS; /* Success, redirect to KEY_SERVER_B */
274out:
275 if (sk)
276 bpf_sk_release(sk);
277 return ret;
278}
279
280/* Check that bpf_sk_assign(sk=NULL) is accepted. */
281SEC("sk_lookup")
282int sk_assign_null(struct bpf_sk_lookup *ctx)
283{
284 struct bpf_sock *sk = NULL;
285 int err, ret;
286
287 ret = SK_DROP;
288
289 err = bpf_sk_assign(ctx, NULL, 0);
290 if (err) {
291 bpf_printk("sk_assign returned %d, expected 0\n", err);
292 goto out;
293 }
294
295 sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B);
296 if (!sk)
297 goto out;
298 err = bpf_sk_assign(ctx, sk, BPF_SK_LOOKUP_F_REPLACE);
299 if (err) {
300 bpf_printk("sk_assign returned %d, expected 0\n", err);
301 goto out;
302 }
303
304 if (ctx->sk != sk)
305 goto out;
306 err = bpf_sk_assign(ctx, NULL, 0);
307 if (err != -EEXIST)
308 goto out;
309 err = bpf_sk_assign(ctx, NULL, BPF_SK_LOOKUP_F_REPLACE);
310 if (err)
311 goto out;
312 err = bpf_sk_assign(ctx, sk, BPF_SK_LOOKUP_F_REPLACE);
313 if (err)
314 goto out;
315
316 ret = SK_PASS; /* Success, redirect to KEY_SERVER_B */
317out:
318 if (sk)
319 bpf_sk_release(sk);
320 return ret;
321}
322
323/* Check that selected sk is accessible through context. */
324SEC("sk_lookup")
325int access_ctx_sk(struct bpf_sk_lookup *ctx)
326{
327 struct bpf_sock *sk1 = NULL, *sk2 = NULL;
328 int err, ret;
329
330 ret = SK_DROP;
331
332 /* Try accessing unassigned (NULL) ctx->sk field */
333 if (ctx->sk && ctx->sk->family != AF_INET)
334 goto out;
335
336 /* Assign a value to ctx->sk */
337 sk1 = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
338 if (!sk1)
339 goto out;
340 err = bpf_sk_assign(ctx, sk1, 0);
341 if (err)
342 goto out;
343 if (ctx->sk != sk1)
344 goto out;
345
346 /* Access ctx->sk fields */
347 if (ctx->sk->family != AF_INET ||
348 ctx->sk->type != SOCK_STREAM ||
349 ctx->sk->state != BPF_TCP_LISTEN)
350 goto out;
351
352 /* Reset selection */
353 err = bpf_sk_assign(ctx, NULL, BPF_SK_LOOKUP_F_REPLACE);
354 if (err)
355 goto out;
356 if (ctx->sk)
357 goto out;
358
359 /* Assign another socket */
360 sk2 = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B);
361 if (!sk2)
362 goto out;
363 err = bpf_sk_assign(ctx, sk2, BPF_SK_LOOKUP_F_REPLACE);
364 if (err)
365 goto out;
366 if (ctx->sk != sk2)
367 goto out;
368
369 /* Access reassigned ctx->sk fields */
370 if (ctx->sk->family != AF_INET ||
371 ctx->sk->type != SOCK_STREAM ||
372 ctx->sk->state != BPF_TCP_LISTEN)
373 goto out;
374
375 ret = SK_PASS; /* Success, redirect to KEY_SERVER_B */
376out:
377 if (sk1)
378 bpf_sk_release(sk1);
379 if (sk2)
380 bpf_sk_release(sk2);
381 return ret;
382}
383
384/* Check narrow loads from ctx fields that support them.
385 *
386 * Narrow loads of size >= target field size from a non-zero offset
387 * are not covered because they give bogus results, that is the
388 * verifier ignores the offset.
389 */
390SEC("sk_lookup")
391int ctx_narrow_access(struct bpf_sk_lookup *ctx)
392{
393 struct bpf_sock *sk;
394 __u32 val_u32;
395 bool v4;
396
397 v4 = (ctx->family == AF_INET);
398
399 /* Narrow loads from family field */
400 if (LSB(ctx->family, 0) != (v4 ? AF_INET : AF_INET6) ||
401 LSB(ctx->family, 1) != 0 || LSB(ctx->family, 2) != 0 || LSB(ctx->family, 3) != 0)
402 return SK_DROP;
403 if (LSW(ctx->family, 0) != (v4 ? AF_INET : AF_INET6))
404 return SK_DROP;
405
406 /* Narrow loads from protocol field */
407 if (LSB(ctx->protocol, 0) != IPPROTO_TCP ||
408 LSB(ctx->protocol, 1) != 0 || LSB(ctx->protocol, 2) != 0 || LSB(ctx->protocol, 3) != 0)
409 return SK_DROP;
410 if (LSW(ctx->protocol, 0) != IPPROTO_TCP)
411 return SK_DROP;
412
413 /* Narrow loads from remote_port field. Expect SRC_PORT. */
414 if (LSB(ctx->remote_port, 0) != ((SRC_PORT >> 0) & 0xff) ||
415 LSB(ctx->remote_port, 1) != ((SRC_PORT >> 8) & 0xff))
416 return SK_DROP;
417 if (LSW(ctx->remote_port, 0) != SRC_PORT)
418 return SK_DROP;
419
420 /*
421 * NOTE: 4-byte load from bpf_sk_lookup at remote_port offset
422 * is quirky. It gets rewritten by the access converter to a
423 * 2-byte load for backward compatibility. Treating the load
424 * result as a be16 value makes the code portable across
425 * little- and big-endian platforms.
426 */
427 val_u32 = *(__u32 *)&ctx->remote_port;
428 if (val_u32 != SRC_PORT)
429 return SK_DROP;
430
431 /* Narrow loads from local_port field. Expect DST_PORT. */
432 if (LSB(ctx->local_port, 0) != ((DST_PORT >> 0) & 0xff) ||
433 LSB(ctx->local_port, 1) != ((DST_PORT >> 8) & 0xff) ||
434 LSB(ctx->local_port, 2) != 0 || LSB(ctx->local_port, 3) != 0)
435 return SK_DROP;
436 if (LSW(ctx->local_port, 0) != DST_PORT)
437 return SK_DROP;
438
439 /* Narrow loads from IPv4 fields */
440 if (v4) {
441 /* Expect SRC_IP4 in remote_ip4 */
442 if (LSB(ctx->remote_ip4, 0) != ((SRC_IP4 >> 0) & 0xff) ||
443 LSB(ctx->remote_ip4, 1) != ((SRC_IP4 >> 8) & 0xff) ||
444 LSB(ctx->remote_ip4, 2) != ((SRC_IP4 >> 16) & 0xff) ||
445 LSB(ctx->remote_ip4, 3) != ((SRC_IP4 >> 24) & 0xff))
446 return SK_DROP;
447 if (LSW(ctx->remote_ip4, 0) != ((SRC_IP4 >> 0) & 0xffff) ||
448 LSW(ctx->remote_ip4, 1) != ((SRC_IP4 >> 16) & 0xffff))
449 return SK_DROP;
450
451 /* Expect DST_IP4 in local_ip4 */
452 if (LSB(ctx->local_ip4, 0) != ((DST_IP4 >> 0) & 0xff) ||
453 LSB(ctx->local_ip4, 1) != ((DST_IP4 >> 8) & 0xff) ||
454 LSB(ctx->local_ip4, 2) != ((DST_IP4 >> 16) & 0xff) ||
455 LSB(ctx->local_ip4, 3) != ((DST_IP4 >> 24) & 0xff))
456 return SK_DROP;
457 if (LSW(ctx->local_ip4, 0) != ((DST_IP4 >> 0) & 0xffff) ||
458 LSW(ctx->local_ip4, 1) != ((DST_IP4 >> 16) & 0xffff))
459 return SK_DROP;
460 } else {
461 /* Expect 0.0.0.0 IPs when family != AF_INET */
462 if (LSB(ctx->remote_ip4, 0) != 0 || LSB(ctx->remote_ip4, 1) != 0 ||
463 LSB(ctx->remote_ip4, 2) != 0 || LSB(ctx->remote_ip4, 3) != 0)
464 return SK_DROP;
465 if (LSW(ctx->remote_ip4, 0) != 0 || LSW(ctx->remote_ip4, 1) != 0)
466 return SK_DROP;
467
468 if (LSB(ctx->local_ip4, 0) != 0 || LSB(ctx->local_ip4, 1) != 0 ||
469 LSB(ctx->local_ip4, 2) != 0 || LSB(ctx->local_ip4, 3) != 0)
470 return SK_DROP;
471 if (LSW(ctx->local_ip4, 0) != 0 || LSW(ctx->local_ip4, 1) != 0)
472 return SK_DROP;
473 }
474
475 /* Narrow loads from IPv6 fields */
476 if (!v4) {
477 /* Expect SRC_IP6 in remote_ip6 */
478 if (LSB(ctx->remote_ip6[0], 0) != ((SRC_IP6[0] >> 0) & 0xff) ||
479 LSB(ctx->remote_ip6[0], 1) != ((SRC_IP6[0] >> 8) & 0xff) ||
480 LSB(ctx->remote_ip6[0], 2) != ((SRC_IP6[0] >> 16) & 0xff) ||
481 LSB(ctx->remote_ip6[0], 3) != ((SRC_IP6[0] >> 24) & 0xff) ||
482 LSB(ctx->remote_ip6[1], 0) != ((SRC_IP6[1] >> 0) & 0xff) ||
483 LSB(ctx->remote_ip6[1], 1) != ((SRC_IP6[1] >> 8) & 0xff) ||
484 LSB(ctx->remote_ip6[1], 2) != ((SRC_IP6[1] >> 16) & 0xff) ||
485 LSB(ctx->remote_ip6[1], 3) != ((SRC_IP6[1] >> 24) & 0xff) ||
486 LSB(ctx->remote_ip6[2], 0) != ((SRC_IP6[2] >> 0) & 0xff) ||
487 LSB(ctx->remote_ip6[2], 1) != ((SRC_IP6[2] >> 8) & 0xff) ||
488 LSB(ctx->remote_ip6[2], 2) != ((SRC_IP6[2] >> 16) & 0xff) ||
489 LSB(ctx->remote_ip6[2], 3) != ((SRC_IP6[2] >> 24) & 0xff) ||
490 LSB(ctx->remote_ip6[3], 0) != ((SRC_IP6[3] >> 0) & 0xff) ||
491 LSB(ctx->remote_ip6[3], 1) != ((SRC_IP6[3] >> 8) & 0xff) ||
492 LSB(ctx->remote_ip6[3], 2) != ((SRC_IP6[3] >> 16) & 0xff) ||
493 LSB(ctx->remote_ip6[3], 3) != ((SRC_IP6[3] >> 24) & 0xff))
494 return SK_DROP;
495 if (LSW(ctx->remote_ip6[0], 0) != ((SRC_IP6[0] >> 0) & 0xffff) ||
496 LSW(ctx->remote_ip6[0], 1) != ((SRC_IP6[0] >> 16) & 0xffff) ||
497 LSW(ctx->remote_ip6[1], 0) != ((SRC_IP6[1] >> 0) & 0xffff) ||
498 LSW(ctx->remote_ip6[1], 1) != ((SRC_IP6[1] >> 16) & 0xffff) ||
499 LSW(ctx->remote_ip6[2], 0) != ((SRC_IP6[2] >> 0) & 0xffff) ||
500 LSW(ctx->remote_ip6[2], 1) != ((SRC_IP6[2] >> 16) & 0xffff) ||
501 LSW(ctx->remote_ip6[3], 0) != ((SRC_IP6[3] >> 0) & 0xffff) ||
502 LSW(ctx->remote_ip6[3], 1) != ((SRC_IP6[3] >> 16) & 0xffff))
503 return SK_DROP;
504 /* Expect DST_IP6 in local_ip6 */
505 if (LSB(ctx->local_ip6[0], 0) != ((DST_IP6[0] >> 0) & 0xff) ||
506 LSB(ctx->local_ip6[0], 1) != ((DST_IP6[0] >> 8) & 0xff) ||
507 LSB(ctx->local_ip6[0], 2) != ((DST_IP6[0] >> 16) & 0xff) ||
508 LSB(ctx->local_ip6[0], 3) != ((DST_IP6[0] >> 24) & 0xff) ||
509 LSB(ctx->local_ip6[1], 0) != ((DST_IP6[1] >> 0) & 0xff) ||
510 LSB(ctx->local_ip6[1], 1) != ((DST_IP6[1] >> 8) & 0xff) ||
511 LSB(ctx->local_ip6[1], 2) != ((DST_IP6[1] >> 16) & 0xff) ||
512 LSB(ctx->local_ip6[1], 3) != ((DST_IP6[1] >> 24) & 0xff) ||
513 LSB(ctx->local_ip6[2], 0) != ((DST_IP6[2] >> 0) & 0xff) ||
514 LSB(ctx->local_ip6[2], 1) != ((DST_IP6[2] >> 8) & 0xff) ||
515 LSB(ctx->local_ip6[2], 2) != ((DST_IP6[2] >> 16) & 0xff) ||
516 LSB(ctx->local_ip6[2], 3) != ((DST_IP6[2] >> 24) & 0xff) ||
517 LSB(ctx->local_ip6[3], 0) != ((DST_IP6[3] >> 0) & 0xff) ||
518 LSB(ctx->local_ip6[3], 1) != ((DST_IP6[3] >> 8) & 0xff) ||
519 LSB(ctx->local_ip6[3], 2) != ((DST_IP6[3] >> 16) & 0xff) ||
520 LSB(ctx->local_ip6[3], 3) != ((DST_IP6[3] >> 24) & 0xff))
521 return SK_DROP;
522 if (LSW(ctx->local_ip6[0], 0) != ((DST_IP6[0] >> 0) & 0xffff) ||
523 LSW(ctx->local_ip6[0], 1) != ((DST_IP6[0] >> 16) & 0xffff) ||
524 LSW(ctx->local_ip6[1], 0) != ((DST_IP6[1] >> 0) & 0xffff) ||
525 LSW(ctx->local_ip6[1], 1) != ((DST_IP6[1] >> 16) & 0xffff) ||
526 LSW(ctx->local_ip6[2], 0) != ((DST_IP6[2] >> 0) & 0xffff) ||
527 LSW(ctx->local_ip6[2], 1) != ((DST_IP6[2] >> 16) & 0xffff) ||
528 LSW(ctx->local_ip6[3], 0) != ((DST_IP6[3] >> 0) & 0xffff) ||
529 LSW(ctx->local_ip6[3], 1) != ((DST_IP6[3] >> 16) & 0xffff))
530 return SK_DROP;
531 } else {
532 /* Expect :: IPs when family != AF_INET6 */
533 if (LSB(ctx->remote_ip6[0], 0) != 0 || LSB(ctx->remote_ip6[0], 1) != 0 ||
534 LSB(ctx->remote_ip6[0], 2) != 0 || LSB(ctx->remote_ip6[0], 3) != 0 ||
535 LSB(ctx->remote_ip6[1], 0) != 0 || LSB(ctx->remote_ip6[1], 1) != 0 ||
536 LSB(ctx->remote_ip6[1], 2) != 0 || LSB(ctx->remote_ip6[1], 3) != 0 ||
537 LSB(ctx->remote_ip6[2], 0) != 0 || LSB(ctx->remote_ip6[2], 1) != 0 ||
538 LSB(ctx->remote_ip6[2], 2) != 0 || LSB(ctx->remote_ip6[2], 3) != 0 ||
539 LSB(ctx->remote_ip6[3], 0) != 0 || LSB(ctx->remote_ip6[3], 1) != 0 ||
540 LSB(ctx->remote_ip6[3], 2) != 0 || LSB(ctx->remote_ip6[3], 3) != 0)
541 return SK_DROP;
542 if (LSW(ctx->remote_ip6[0], 0) != 0 || LSW(ctx->remote_ip6[0], 1) != 0 ||
543 LSW(ctx->remote_ip6[1], 0) != 0 || LSW(ctx->remote_ip6[1], 1) != 0 ||
544 LSW(ctx->remote_ip6[2], 0) != 0 || LSW(ctx->remote_ip6[2], 1) != 0 ||
545 LSW(ctx->remote_ip6[3], 0) != 0 || LSW(ctx->remote_ip6[3], 1) != 0)
546 return SK_DROP;
547
548 if (LSB(ctx->local_ip6[0], 0) != 0 || LSB(ctx->local_ip6[0], 1) != 0 ||
549 LSB(ctx->local_ip6[0], 2) != 0 || LSB(ctx->local_ip6[0], 3) != 0 ||
550 LSB(ctx->local_ip6[1], 0) != 0 || LSB(ctx->local_ip6[1], 1) != 0 ||
551 LSB(ctx->local_ip6[1], 2) != 0 || LSB(ctx->local_ip6[1], 3) != 0 ||
552 LSB(ctx->local_ip6[2], 0) != 0 || LSB(ctx->local_ip6[2], 1) != 0 ||
553 LSB(ctx->local_ip6[2], 2) != 0 || LSB(ctx->local_ip6[2], 3) != 0 ||
554 LSB(ctx->local_ip6[3], 0) != 0 || LSB(ctx->local_ip6[3], 1) != 0 ||
555 LSB(ctx->local_ip6[3], 2) != 0 || LSB(ctx->local_ip6[3], 3) != 0)
556 return SK_DROP;
557 if (LSW(ctx->remote_ip6[0], 0) != 0 || LSW(ctx->remote_ip6[0], 1) != 0 ||
558 LSW(ctx->remote_ip6[1], 0) != 0 || LSW(ctx->remote_ip6[1], 1) != 0 ||
559 LSW(ctx->remote_ip6[2], 0) != 0 || LSW(ctx->remote_ip6[2], 1) != 0 ||
560 LSW(ctx->remote_ip6[3], 0) != 0 || LSW(ctx->remote_ip6[3], 1) != 0)
561 return SK_DROP;
562 }
563
564 /* Success, redirect to KEY_SERVER_B */
565 sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B);
566 if (sk) {
567 bpf_sk_assign(ctx, sk, 0);
568 bpf_sk_release(sk);
569 }
570 return SK_PASS;
571}
572
573/* Check that sk_assign rejects SERVER_A socket with -ESOCKNOSUPPORT */
574SEC("sk_lookup")
575int sk_assign_esocknosupport(struct bpf_sk_lookup *ctx)
576{
577 struct bpf_sock *sk;
578 int err, ret;
579
580 ret = SK_DROP;
581 sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
582 if (!sk)
583 goto out;
584
585 err = bpf_sk_assign(ctx, sk, 0);
586 if (err != -ESOCKTNOSUPPORT) {
587 bpf_printk("sk_assign returned %d, expected %d\n",
588 err, -ESOCKTNOSUPPORT);
589 goto out;
590 }
591
592 ret = SK_PASS; /* Success, pass to regular lookup */
593out:
594 if (sk)
595 bpf_sk_release(sk);
596 return ret;
597}
598
599SEC("sk_lookup")
600int multi_prog_pass1(struct bpf_sk_lookup *ctx)
601{
602 bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY);
603 return SK_PASS;
604}
605
606SEC("sk_lookup")
607int multi_prog_pass2(struct bpf_sk_lookup *ctx)
608{
609 bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY);
610 return SK_PASS;
611}
612
613SEC("sk_lookup")
614int multi_prog_drop1(struct bpf_sk_lookup *ctx)
615{
616 bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY);
617 return SK_DROP;
618}
619
620SEC("sk_lookup")
621int multi_prog_drop2(struct bpf_sk_lookup *ctx)
622{
623 bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY);
624 return SK_DROP;
625}
626
627static __always_inline int select_server_a(struct bpf_sk_lookup *ctx)
628{
629 struct bpf_sock *sk;
630 int err;
631
632 sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
633 if (!sk)
634 return SK_DROP;
635
636 err = bpf_sk_assign(ctx, sk, 0);
637 bpf_sk_release(sk);
638 if (err)
639 return SK_DROP;
640
641 return SK_PASS;
642}
643
644SEC("sk_lookup")
645int multi_prog_redir1(struct bpf_sk_lookup *ctx)
646{
647 (void)select_server_a(ctx);
648 bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY);
649 return SK_PASS;
650}
651
652SEC("sk_lookup")
653int multi_prog_redir2(struct bpf_sk_lookup *ctx)
654{
655 (void)select_server_a(ctx);
656 bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY);
657 return SK_PASS;
658}
659
660char _license[] SEC("license") = "Dual BSD/GPL";
661

source code of linux/tools/testing/selftests/bpf/progs/test_sk_lookup.c