1// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2018 Facebook
3// Copyright (c) 2019 Cloudflare
4
5#include <string.h>
6
7#include <linux/bpf.h>
8#include <linux/pkt_cls.h>
9#include <linux/if_ether.h>
10#include <linux/in.h>
11#include <linux/ip.h>
12#include <linux/ipv6.h>
13#include <sys/socket.h>
14#include <linux/tcp.h>
15
16#include <bpf/bpf_helpers.h>
17#include <bpf/bpf_endian.h>
18
19struct {
20 __uint(type, BPF_MAP_TYPE_ARRAY);
21 __type(key, __u32);
22 __type(value, __u32);
23 __uint(max_entries, 3);
24} results SEC(".maps");
25
26static __always_inline __s64 gen_syncookie(void *data_end, struct bpf_sock *sk,
27 void *iph, __u32 ip_size,
28 struct tcphdr *tcph)
29{
30 __u32 thlen = tcph->doff * 4;
31
32 if (tcph->syn && !tcph->ack) {
33 // packet should only have an MSS option
34 if (thlen != 24)
35 return 0;
36
37 if ((void *)tcph + thlen > data_end)
38 return 0;
39
40 return bpf_tcp_gen_syncookie(sk, iph, ip_size, tcph, thlen);
41 }
42 return 0;
43}
44
45static __always_inline void check_syncookie(void *ctx, void *data,
46 void *data_end)
47{
48 struct bpf_sock_tuple tup;
49 struct bpf_sock *sk;
50 struct ethhdr *ethh;
51 struct iphdr *ipv4h;
52 struct ipv6hdr *ipv6h;
53 struct tcphdr *tcph;
54 int ret;
55 __u32 key_mss = 2;
56 __u32 key_gen = 1;
57 __u32 key = 0;
58 __s64 seq_mss;
59
60 ethh = data;
61 if (ethh + 1 > data_end)
62 return;
63
64 switch (bpf_ntohs(ethh->h_proto)) {
65 case ETH_P_IP:
66 ipv4h = data + sizeof(struct ethhdr);
67 if (ipv4h + 1 > data_end)
68 return;
69
70 if (ipv4h->ihl != 5)
71 return;
72
73 tcph = data + sizeof(struct ethhdr) + sizeof(struct iphdr);
74 if (tcph + 1 > data_end)
75 return;
76
77 tup.ipv4.saddr = ipv4h->saddr;
78 tup.ipv4.daddr = ipv4h->daddr;
79 tup.ipv4.sport = tcph->source;
80 tup.ipv4.dport = tcph->dest;
81
82 sk = bpf_skc_lookup_tcp(ctx, &tup, sizeof(tup.ipv4),
83 BPF_F_CURRENT_NETNS, 0);
84 if (!sk)
85 return;
86
87 if (sk->state != BPF_TCP_LISTEN)
88 goto release;
89
90 seq_mss = gen_syncookie(data_end, sk, iph: ipv4h, ip_size: sizeof(*ipv4h),
91 tcph);
92
93 ret = bpf_tcp_check_syncookie(sk, ipv4h, sizeof(*ipv4h),
94 tcph, sizeof(*tcph));
95 break;
96
97 case ETH_P_IPV6:
98 ipv6h = data + sizeof(struct ethhdr);
99 if (ipv6h + 1 > data_end)
100 return;
101
102 if (ipv6h->nexthdr != IPPROTO_TCP)
103 return;
104
105 tcph = data + sizeof(struct ethhdr) + sizeof(struct ipv6hdr);
106 if (tcph + 1 > data_end)
107 return;
108
109 memcpy(tup.ipv6.saddr, &ipv6h->saddr, sizeof(tup.ipv6.saddr));
110 memcpy(tup.ipv6.daddr, &ipv6h->daddr, sizeof(tup.ipv6.daddr));
111 tup.ipv6.sport = tcph->source;
112 tup.ipv6.dport = tcph->dest;
113
114 sk = bpf_skc_lookup_tcp(ctx, &tup, sizeof(tup.ipv6),
115 BPF_F_CURRENT_NETNS, 0);
116 if (!sk)
117 return;
118
119 if (sk->state != BPF_TCP_LISTEN)
120 goto release;
121
122 seq_mss = gen_syncookie(data_end, sk, iph: ipv6h, ip_size: sizeof(*ipv6h),
123 tcph);
124
125 ret = bpf_tcp_check_syncookie(sk, ipv6h, sizeof(*ipv6h),
126 tcph, sizeof(*tcph));
127 break;
128
129 default:
130 return;
131 }
132
133 if (seq_mss > 0) {
134 __u32 cookie = (__u32)seq_mss;
135 __u32 mss = seq_mss >> 32;
136
137 bpf_map_update_elem(&results, &key_gen, &cookie, 0);
138 bpf_map_update_elem(&results, &key_mss, &mss, 0);
139 }
140
141 if (ret == 0) {
142 __u32 cookie = bpf_ntohl(tcph->ack_seq) - 1;
143
144 bpf_map_update_elem(&results, &key, &cookie, 0);
145 }
146
147release:
148 bpf_sk_release(sk);
149}
150
151SEC("tc")
152int check_syncookie_clsact(struct __sk_buff *skb)
153{
154 check_syncookie(ctx: skb, data: (void *)(long)skb->data,
155 data_end: (void *)(long)skb->data_end);
156 return TC_ACT_OK;
157}
158
159SEC("xdp")
160int check_syncookie_xdp(struct xdp_md *ctx)
161{
162 check_syncookie(ctx, data: (void *)(long)ctx->data,
163 data_end: (void *)(long)ctx->data_end);
164 return XDP_PASS;
165}
166
167char _license[] SEC("license") = "GPL";
168

source code of linux/tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c