1/*
2 * IPV4 GSO/GRO offload support
3 * Linux INET implementation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 *
10 * TCPv4 GSO/GRO support
11 */
12
13#include <linux/skbuff.h>
14#include <net/tcp.h>
15#include <net/protocol.h>
16
17static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
18 unsigned int seq, unsigned int mss)
19{
20 while (skb) {
21 if (before(ts_seq, seq + mss)) {
22 skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
23 skb_shinfo(skb)->tskey = ts_seq;
24 return;
25 }
26
27 skb = skb->next;
28 seq += mss;
29 }
30}
31
32static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
33 netdev_features_t features)
34{
35 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
36 return ERR_PTR(-EINVAL);
37
38 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
39 return ERR_PTR(-EINVAL);
40
41 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
42 const struct iphdr *iph = ip_hdr(skb);
43 struct tcphdr *th = tcp_hdr(skb);
44
45 /* Set up checksum pseudo header, usually expect stack to
46 * have done this already.
47 */
48
49 th->check = 0;
50 skb->ip_summed = CHECKSUM_PARTIAL;
51 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
52 }
53
54 return tcp_gso_segment(skb, features);
55}
56
57struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
58 netdev_features_t features)
59{
60 struct sk_buff *segs = ERR_PTR(-EINVAL);
61 unsigned int sum_truesize = 0;
62 struct tcphdr *th;
63 unsigned int thlen;
64 unsigned int seq;
65 __be32 delta;
66 unsigned int oldlen;
67 unsigned int mss;
68 struct sk_buff *gso_skb = skb;
69 __sum16 newcheck;
70 bool ooo_okay, copy_destructor;
71
72 th = tcp_hdr(skb);
73 thlen = th->doff * 4;
74 if (thlen < sizeof(*th))
75 goto out;
76
77 if (!pskb_may_pull(skb, thlen))
78 goto out;
79
80 oldlen = (u16)~skb->len;
81 __skb_pull(skb, thlen);
82
83 mss = skb_shinfo(skb)->gso_size;
84 if (unlikely(skb->len <= mss))
85 goto out;
86
87 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
88 /* Packet is from an untrusted source, reset gso_segs. */
89
90 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
91
92 segs = NULL;
93 goto out;
94 }
95
96 copy_destructor = gso_skb->destructor == tcp_wfree;
97 ooo_okay = gso_skb->ooo_okay;
98 /* All segments but the first should have ooo_okay cleared */
99 skb->ooo_okay = 0;
100
101 segs = skb_segment(skb, features);
102 if (IS_ERR(segs))
103 goto out;
104
105 /* Only first segment might have ooo_okay set */
106 segs->ooo_okay = ooo_okay;
107
108 /* GSO partial and frag_list segmentation only requires splitting
109 * the frame into an MSS multiple and possibly a remainder, both
110 * cases return a GSO skb. So update the mss now.
111 */
112 if (skb_is_gso(segs))
113 mss *= skb_shinfo(segs)->gso_segs;
114
115 delta = htonl(oldlen + (thlen + mss));
116
117 skb = segs;
118 th = tcp_hdr(skb);
119 seq = ntohl(th->seq);
120
121 if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
122 tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
123
124 newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
125 (__force u32)delta));
126
127 while (skb->next) {
128 th->fin = th->psh = 0;
129 th->check = newcheck;
130
131 if (skb->ip_summed == CHECKSUM_PARTIAL)
132 gso_reset_checksum(skb, ~th->check);
133 else
134 th->check = gso_make_checksum(skb, ~th->check);
135
136 seq += mss;
137 if (copy_destructor) {
138 skb->destructor = gso_skb->destructor;
139 skb->sk = gso_skb->sk;
140 sum_truesize += skb->truesize;
141 }
142 skb = skb->next;
143 th = tcp_hdr(skb);
144
145 th->seq = htonl(seq);
146 th->cwr = 0;
147 }
148
149 /* Following permits TCP Small Queues to work well with GSO :
150 * The callback to TCP stack will be called at the time last frag
151 * is freed at TX completion, and not right now when gso_skb
152 * is freed by GSO engine
153 */
154 if (copy_destructor) {
155 int delta;
156
157 swap(gso_skb->sk, skb->sk);
158 swap(gso_skb->destructor, skb->destructor);
159 sum_truesize += skb->truesize;
160 delta = sum_truesize - gso_skb->truesize;
161 /* In some pathological cases, delta can be negative.
162 * We need to either use refcount_add() or refcount_sub_and_test()
163 */
164 if (likely(delta >= 0))
165 refcount_add(delta, &skb->sk->sk_wmem_alloc);
166 else
167 WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
168 }
169
170 delta = htonl(oldlen + (skb_tail_pointer(skb) -
171 skb_transport_header(skb)) +
172 skb->data_len);
173 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
174 (__force u32)delta));
175 if (skb->ip_summed == CHECKSUM_PARTIAL)
176 gso_reset_checksum(skb, ~th->check);
177 else
178 th->check = gso_make_checksum(skb, ~th->check);
179out:
180 return segs;
181}
182
183struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
184{
185 struct sk_buff *pp = NULL;
186 struct sk_buff *p;
187 struct tcphdr *th;
188 struct tcphdr *th2;
189 unsigned int len;
190 unsigned int thlen;
191 __be32 flags;
192 unsigned int mss = 1;
193 unsigned int hlen;
194 unsigned int off;
195 int flush = 1;
196 int i;
197
198 off = skb_gro_offset(skb);
199 hlen = off + sizeof(*th);
200 th = skb_gro_header_fast(skb, off);
201 if (skb_gro_header_hard(skb, hlen)) {
202 th = skb_gro_header_slow(skb, hlen, off);
203 if (unlikely(!th))
204 goto out;
205 }
206
207 thlen = th->doff * 4;
208 if (thlen < sizeof(*th))
209 goto out;
210
211 hlen = off + thlen;
212 if (skb_gro_header_hard(skb, hlen)) {
213 th = skb_gro_header_slow(skb, hlen, off);
214 if (unlikely(!th))
215 goto out;
216 }
217
218 skb_gro_pull(skb, thlen);
219
220 len = skb_gro_len(skb);
221 flags = tcp_flag_word(th);
222
223 list_for_each_entry(p, head, list) {
224 if (!NAPI_GRO_CB(p)->same_flow)
225 continue;
226
227 th2 = tcp_hdr(p);
228
229 if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
230 NAPI_GRO_CB(p)->same_flow = 0;
231 continue;
232 }
233
234 goto found;
235 }
236 p = NULL;
237 goto out_check_final;
238
239found:
240 /* Include the IP ID check below from the inner most IP hdr */
241 flush = NAPI_GRO_CB(p)->flush;
242 flush |= (__force int)(flags & TCP_FLAG_CWR);
243 flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
244 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
245 flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
246 for (i = sizeof(*th); i < thlen; i += 4)
247 flush |= *(u32 *)((u8 *)th + i) ^
248 *(u32 *)((u8 *)th2 + i);
249
250 /* When we receive our second frame we can made a decision on if we
251 * continue this flow as an atomic flow with a fixed ID or if we use
252 * an incrementing ID.
253 */
254 if (NAPI_GRO_CB(p)->flush_id != 1 ||
255 NAPI_GRO_CB(p)->count != 1 ||
256 !NAPI_GRO_CB(p)->is_atomic)
257 flush |= NAPI_GRO_CB(p)->flush_id;
258 else
259 NAPI_GRO_CB(p)->is_atomic = false;
260
261 mss = skb_shinfo(p)->gso_size;
262
263 flush |= (len - 1) >= mss;
264 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
265#ifdef CONFIG_TLS_DEVICE
266 flush |= p->decrypted ^ skb->decrypted;
267#endif
268
269 if (flush || skb_gro_receive(p, skb)) {
270 mss = 1;
271 goto out_check_final;
272 }
273
274 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
275
276out_check_final:
277 flush = len < mss;
278 flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
279 TCP_FLAG_RST | TCP_FLAG_SYN |
280 TCP_FLAG_FIN));
281
282 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
283 pp = p;
284
285out:
286 NAPI_GRO_CB(skb)->flush |= (flush != 0);
287
288 return pp;
289}
290
291int tcp_gro_complete(struct sk_buff *skb)
292{
293 struct tcphdr *th = tcp_hdr(skb);
294
295 skb->csum_start = (unsigned char *)th - skb->head;
296 skb->csum_offset = offsetof(struct tcphdr, check);
297 skb->ip_summed = CHECKSUM_PARTIAL;
298
299 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
300
301 if (th->cwr)
302 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
303
304 return 0;
305}
306EXPORT_SYMBOL(tcp_gro_complete);
307
308static struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
309{
310 /* Don't bother verifying checksum if we're going to flush anyway. */
311 if (!NAPI_GRO_CB(skb)->flush &&
312 skb_gro_checksum_validate(skb, IPPROTO_TCP,
313 inet_gro_compute_pseudo)) {
314 NAPI_GRO_CB(skb)->flush = 1;
315 return NULL;
316 }
317
318 return tcp_gro_receive(head, skb);
319}
320
321static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
322{
323 const struct iphdr *iph = ip_hdr(skb);
324 struct tcphdr *th = tcp_hdr(skb);
325
326 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
327 iph->daddr, 0);
328 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
329
330 if (NAPI_GRO_CB(skb)->is_atomic)
331 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
332
333 return tcp_gro_complete(skb);
334}
335
336static const struct net_offload tcpv4_offload = {
337 .callbacks = {
338 .gso_segment = tcp4_gso_segment,
339 .gro_receive = tcp4_gro_receive,
340 .gro_complete = tcp4_gro_complete,
341 },
342};
343
344int __init tcpv4_offload_init(void)
345{
346 return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
347}
348