1/*
2 * IPV4 GSO/GRO offload support
3 * Linux INET implementation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 *
10 * TCPv4 GSO/GRO support
11 */
12
13#include <linux/indirect_call_wrapper.h>
14#include <linux/skbuff.h>
15#include <net/tcp.h>
16#include <net/protocol.h>
17
18static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
19 unsigned int seq, unsigned int mss)
20{
21 while (skb) {
22 if (before(ts_seq, seq + mss)) {
23 skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
24 skb_shinfo(skb)->tskey = ts_seq;
25 return;
26 }
27
28 skb = skb->next;
29 seq += mss;
30 }
31}
32
33static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
34 netdev_features_t features)
35{
36 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
37 return ERR_PTR(-EINVAL);
38
39 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
40 return ERR_PTR(-EINVAL);
41
42 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
43 const struct iphdr *iph = ip_hdr(skb);
44 struct tcphdr *th = tcp_hdr(skb);
45
46 /* Set up checksum pseudo header, usually expect stack to
47 * have done this already.
48 */
49
50 th->check = 0;
51 skb->ip_summed = CHECKSUM_PARTIAL;
52 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
53 }
54
55 return tcp_gso_segment(skb, features);
56}
57
58struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
59 netdev_features_t features)
60{
61 struct sk_buff *segs = ERR_PTR(-EINVAL);
62 unsigned int sum_truesize = 0;
63 struct tcphdr *th;
64 unsigned int thlen;
65 unsigned int seq;
66 __be32 delta;
67 unsigned int oldlen;
68 unsigned int mss;
69 struct sk_buff *gso_skb = skb;
70 __sum16 newcheck;
71 bool ooo_okay, copy_destructor;
72
73 th = tcp_hdr(skb);
74 thlen = th->doff * 4;
75 if (thlen < sizeof(*th))
76 goto out;
77
78 if (!pskb_may_pull(skb, thlen))
79 goto out;
80
81 oldlen = (u16)~skb->len;
82 __skb_pull(skb, thlen);
83
84 mss = skb_shinfo(skb)->gso_size;
85 if (unlikely(skb->len <= mss))
86 goto out;
87
88 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
89 /* Packet is from an untrusted source, reset gso_segs. */
90
91 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
92
93 segs = NULL;
94 goto out;
95 }
96
97 copy_destructor = gso_skb->destructor == tcp_wfree;
98 ooo_okay = gso_skb->ooo_okay;
99 /* All segments but the first should have ooo_okay cleared */
100 skb->ooo_okay = 0;
101
102 segs = skb_segment(skb, features);
103 if (IS_ERR(segs))
104 goto out;
105
106 /* Only first segment might have ooo_okay set */
107 segs->ooo_okay = ooo_okay;
108
109 /* GSO partial and frag_list segmentation only requires splitting
110 * the frame into an MSS multiple and possibly a remainder, both
111 * cases return a GSO skb. So update the mss now.
112 */
113 if (skb_is_gso(segs))
114 mss *= skb_shinfo(segs)->gso_segs;
115
116 delta = htonl(oldlen + (thlen + mss));
117
118 skb = segs;
119 th = tcp_hdr(skb);
120 seq = ntohl(th->seq);
121
122 if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
123 tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
124
125 newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
126 (__force u32)delta));
127
128 while (skb->next) {
129 th->fin = th->psh = 0;
130 th->check = newcheck;
131
132 if (skb->ip_summed == CHECKSUM_PARTIAL)
133 gso_reset_checksum(skb, ~th->check);
134 else
135 th->check = gso_make_checksum(skb, ~th->check);
136
137 seq += mss;
138 if (copy_destructor) {
139 skb->destructor = gso_skb->destructor;
140 skb->sk = gso_skb->sk;
141 sum_truesize += skb->truesize;
142 }
143 skb = skb->next;
144 th = tcp_hdr(skb);
145
146 th->seq = htonl(seq);
147 th->cwr = 0;
148 }
149
150 /* Following permits TCP Small Queues to work well with GSO :
151 * The callback to TCP stack will be called at the time last frag
152 * is freed at TX completion, and not right now when gso_skb
153 * is freed by GSO engine
154 */
155 if (copy_destructor) {
156 int delta;
157
158 swap(gso_skb->sk, skb->sk);
159 swap(gso_skb->destructor, skb->destructor);
160 sum_truesize += skb->truesize;
161 delta = sum_truesize - gso_skb->truesize;
162 /* In some pathological cases, delta can be negative.
163 * We need to either use refcount_add() or refcount_sub_and_test()
164 */
165 if (likely(delta >= 0))
166 refcount_add(delta, &skb->sk->sk_wmem_alloc);
167 else
168 WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
169 }
170
171 delta = htonl(oldlen + (skb_tail_pointer(skb) -
172 skb_transport_header(skb)) +
173 skb->data_len);
174 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
175 (__force u32)delta));
176 if (skb->ip_summed == CHECKSUM_PARTIAL)
177 gso_reset_checksum(skb, ~th->check);
178 else
179 th->check = gso_make_checksum(skb, ~th->check);
180out:
181 return segs;
182}
183
184struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
185{
186 struct sk_buff *pp = NULL;
187 struct sk_buff *p;
188 struct tcphdr *th;
189 struct tcphdr *th2;
190 unsigned int len;
191 unsigned int thlen;
192 __be32 flags;
193 unsigned int mss = 1;
194 unsigned int hlen;
195 unsigned int off;
196 int flush = 1;
197 int i;
198
199 off = skb_gro_offset(skb);
200 hlen = off + sizeof(*th);
201 th = skb_gro_header_fast(skb, off);
202 if (skb_gro_header_hard(skb, hlen)) {
203 th = skb_gro_header_slow(skb, hlen, off);
204 if (unlikely(!th))
205 goto out;
206 }
207
208 thlen = th->doff * 4;
209 if (thlen < sizeof(*th))
210 goto out;
211
212 hlen = off + thlen;
213 if (skb_gro_header_hard(skb, hlen)) {
214 th = skb_gro_header_slow(skb, hlen, off);
215 if (unlikely(!th))
216 goto out;
217 }
218
219 skb_gro_pull(skb, thlen);
220
221 len = skb_gro_len(skb);
222 flags = tcp_flag_word(th);
223
224 list_for_each_entry(p, head, list) {
225 if (!NAPI_GRO_CB(p)->same_flow)
226 continue;
227
228 th2 = tcp_hdr(p);
229
230 if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
231 NAPI_GRO_CB(p)->same_flow = 0;
232 continue;
233 }
234
235 goto found;
236 }
237 p = NULL;
238 goto out_check_final;
239
240found:
241 /* Include the IP ID check below from the inner most IP hdr */
242 flush = NAPI_GRO_CB(p)->flush;
243 flush |= (__force int)(flags & TCP_FLAG_CWR);
244 flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
245 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
246 flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
247 for (i = sizeof(*th); i < thlen; i += 4)
248 flush |= *(u32 *)((u8 *)th + i) ^
249 *(u32 *)((u8 *)th2 + i);
250
251 /* When we receive our second frame we can made a decision on if we
252 * continue this flow as an atomic flow with a fixed ID or if we use
253 * an incrementing ID.
254 */
255 if (NAPI_GRO_CB(p)->flush_id != 1 ||
256 NAPI_GRO_CB(p)->count != 1 ||
257 !NAPI_GRO_CB(p)->is_atomic)
258 flush |= NAPI_GRO_CB(p)->flush_id;
259 else
260 NAPI_GRO_CB(p)->is_atomic = false;
261
262 mss = skb_shinfo(p)->gso_size;
263
264 flush |= (len - 1) >= mss;
265 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
266#ifdef CONFIG_TLS_DEVICE
267 flush |= p->decrypted ^ skb->decrypted;
268#endif
269
270 if (flush || skb_gro_receive(p, skb)) {
271 mss = 1;
272 goto out_check_final;
273 }
274
275 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
276
277out_check_final:
278 flush = len < mss;
279 flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
280 TCP_FLAG_RST | TCP_FLAG_SYN |
281 TCP_FLAG_FIN));
282
283 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
284 pp = p;
285
286out:
287 NAPI_GRO_CB(skb)->flush |= (flush != 0);
288
289 return pp;
290}
291
292int tcp_gro_complete(struct sk_buff *skb)
293{
294 struct tcphdr *th = tcp_hdr(skb);
295
296 skb->csum_start = (unsigned char *)th - skb->head;
297 skb->csum_offset = offsetof(struct tcphdr, check);
298 skb->ip_summed = CHECKSUM_PARTIAL;
299
300 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
301
302 if (th->cwr)
303 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
304
305 return 0;
306}
307EXPORT_SYMBOL(tcp_gro_complete);
308
309INDIRECT_CALLABLE_SCOPE
310struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
311{
312 /* Don't bother verifying checksum if we're going to flush anyway. */
313 if (!NAPI_GRO_CB(skb)->flush &&
314 skb_gro_checksum_validate(skb, IPPROTO_TCP,
315 inet_gro_compute_pseudo)) {
316 NAPI_GRO_CB(skb)->flush = 1;
317 return NULL;
318 }
319
320 return tcp_gro_receive(head, skb);
321}
322
323INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
324{
325 const struct iphdr *iph = ip_hdr(skb);
326 struct tcphdr *th = tcp_hdr(skb);
327
328 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
329 iph->daddr, 0);
330 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
331
332 if (NAPI_GRO_CB(skb)->is_atomic)
333 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
334
335 return tcp_gro_complete(skb);
336}
337
338static const struct net_offload tcpv4_offload = {
339 .callbacks = {
340 .gso_segment = tcp4_gso_segment,
341 .gro_receive = tcp4_gro_receive,
342 .gro_complete = tcp4_gro_complete,
343 },
344};
345
346int __init tcpv4_offload_init(void)
347{
348 return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
349}
350