1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Checksum updating actions |
4 | * |
5 | * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org> |
6 | */ |
7 | |
8 | #include <linux/types.h> |
9 | #include <linux/init.h> |
10 | #include <linux/kernel.h> |
11 | #include <linux/module.h> |
12 | #include <linux/spinlock.h> |
13 | |
14 | #include <linux/netlink.h> |
15 | #include <net/netlink.h> |
16 | #include <linux/rtnetlink.h> |
17 | |
18 | #include <linux/skbuff.h> |
19 | |
20 | #include <net/ip.h> |
21 | #include <net/ipv6.h> |
22 | #include <net/icmp.h> |
23 | #include <linux/icmpv6.h> |
24 | #include <linux/igmp.h> |
25 | #include <net/tcp.h> |
26 | #include <net/udp.h> |
27 | #include <net/ip6_checksum.h> |
28 | #include <net/sctp/checksum.h> |
29 | |
30 | #include <net/act_api.h> |
31 | #include <net/pkt_cls.h> |
32 | |
33 | #include <linux/tc_act/tc_csum.h> |
34 | #include <net/tc_act/tc_csum.h> |
35 | #include <net/tc_wrapper.h> |
36 | |
37 | static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = { |
38 | [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), }, |
39 | }; |
40 | |
41 | static struct tc_action_ops act_csum_ops; |
42 | |
43 | static int tcf_csum_init(struct net *net, struct nlattr *nla, |
44 | struct nlattr *est, struct tc_action **a, |
45 | struct tcf_proto *tp, |
46 | u32 flags, struct netlink_ext_ack *extack) |
47 | { |
48 | struct tc_action_net *tn = net_generic(net, id: act_csum_ops.net_id); |
49 | bool bind = flags & TCA_ACT_FLAGS_BIND; |
50 | struct tcf_csum_params *params_new; |
51 | struct nlattr *tb[TCA_CSUM_MAX + 1]; |
52 | struct tcf_chain *goto_ch = NULL; |
53 | struct tc_csum *parm; |
54 | struct tcf_csum *p; |
55 | int ret = 0, err; |
56 | u32 index; |
57 | |
58 | if (nla == NULL) |
59 | return -EINVAL; |
60 | |
61 | err = nla_parse_nested_deprecated(tb, TCA_CSUM_MAX, nla, policy: csum_policy, |
62 | NULL); |
63 | if (err < 0) |
64 | return err; |
65 | |
66 | if (tb[TCA_CSUM_PARMS] == NULL) |
67 | return -EINVAL; |
68 | parm = nla_data(nla: tb[TCA_CSUM_PARMS]); |
69 | index = parm->index; |
70 | err = tcf_idr_check_alloc(tn, index: &index, a, bind); |
71 | if (!err) { |
72 | ret = tcf_idr_create_from_flags(tn, index, est, a, |
73 | ops: &act_csum_ops, bind, flags); |
74 | if (ret) { |
75 | tcf_idr_cleanup(tn, index); |
76 | return ret; |
77 | } |
78 | ret = ACT_P_CREATED; |
79 | } else if (err > 0) { |
80 | if (bind)/* dont override defaults */ |
81 | return 0; |
82 | if (!(flags & TCA_ACT_FLAGS_REPLACE)) { |
83 | tcf_idr_release(a: *a, bind); |
84 | return -EEXIST; |
85 | } |
86 | } else { |
87 | return err; |
88 | } |
89 | |
90 | err = tcf_action_check_ctrlact(action: parm->action, tp, handle: &goto_ch, newchain: extack); |
91 | if (err < 0) |
92 | goto release_idr; |
93 | |
94 | p = to_tcf_csum(*a); |
95 | |
96 | params_new = kzalloc(size: sizeof(*params_new), GFP_KERNEL); |
97 | if (unlikely(!params_new)) { |
98 | err = -ENOMEM; |
99 | goto put_chain; |
100 | } |
101 | params_new->update_flags = parm->update_flags; |
102 | |
103 | spin_lock_bh(lock: &p->tcf_lock); |
104 | goto_ch = tcf_action_set_ctrlact(a: *a, action: parm->action, newchain: goto_ch); |
105 | params_new = rcu_replace_pointer(p->params, params_new, |
106 | lockdep_is_held(&p->tcf_lock)); |
107 | spin_unlock_bh(lock: &p->tcf_lock); |
108 | |
109 | if (goto_ch) |
110 | tcf_chain_put_by_act(chain: goto_ch); |
111 | if (params_new) |
112 | kfree_rcu(params_new, rcu); |
113 | |
114 | return ret; |
115 | put_chain: |
116 | if (goto_ch) |
117 | tcf_chain_put_by_act(chain: goto_ch); |
118 | release_idr: |
119 | tcf_idr_release(a: *a, bind); |
120 | return err; |
121 | } |
122 | |
123 | /** |
124 | * tcf_csum_skb_nextlayer - Get next layer pointer |
125 | * @skb: sk_buff to use |
126 | * @ihl: previous summed headers length |
127 | * @ipl: complete packet length |
128 | * @jhl: next header length |
129 | * |
130 | * Check the expected next layer availability in the specified sk_buff. |
131 | * Return the next layer pointer if pass, NULL otherwise. |
132 | */ |
133 | static void *tcf_csum_skb_nextlayer(struct sk_buff *skb, |
134 | unsigned int ihl, unsigned int ipl, |
135 | unsigned int jhl) |
136 | { |
137 | int ntkoff = skb_network_offset(skb); |
138 | int hl = ihl + jhl; |
139 | |
140 | if (!pskb_may_pull(skb, len: ipl + ntkoff) || (ipl < hl) || |
141 | skb_try_make_writable(skb, write_len: hl + ntkoff)) |
142 | return NULL; |
143 | else |
144 | return (void *)(skb_network_header(skb) + ihl); |
145 | } |
146 | |
147 | static int tcf_csum_ipv4_icmp(struct sk_buff *skb, unsigned int ihl, |
148 | unsigned int ipl) |
149 | { |
150 | struct icmphdr *icmph; |
151 | |
152 | icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, jhl: sizeof(*icmph)); |
153 | if (icmph == NULL) |
154 | return 0; |
155 | |
156 | icmph->checksum = 0; |
157 | skb->csum = csum_partial(buff: icmph, len: ipl - ihl, sum: 0); |
158 | icmph->checksum = csum_fold(sum: skb->csum); |
159 | |
160 | skb->ip_summed = CHECKSUM_NONE; |
161 | |
162 | return 1; |
163 | } |
164 | |
165 | static int tcf_csum_ipv4_igmp(struct sk_buff *skb, |
166 | unsigned int ihl, unsigned int ipl) |
167 | { |
168 | struct igmphdr *igmph; |
169 | |
170 | igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, jhl: sizeof(*igmph)); |
171 | if (igmph == NULL) |
172 | return 0; |
173 | |
174 | igmph->csum = 0; |
175 | skb->csum = csum_partial(buff: igmph, len: ipl - ihl, sum: 0); |
176 | igmph->csum = csum_fold(sum: skb->csum); |
177 | |
178 | skb->ip_summed = CHECKSUM_NONE; |
179 | |
180 | return 1; |
181 | } |
182 | |
183 | static int tcf_csum_ipv6_icmp(struct sk_buff *skb, unsigned int ihl, |
184 | unsigned int ipl) |
185 | { |
186 | struct icmp6hdr *icmp6h; |
187 | const struct ipv6hdr *ip6h; |
188 | |
189 | icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, jhl: sizeof(*icmp6h)); |
190 | if (icmp6h == NULL) |
191 | return 0; |
192 | |
193 | ip6h = ipv6_hdr(skb); |
194 | icmp6h->icmp6_cksum = 0; |
195 | skb->csum = csum_partial(buff: icmp6h, len: ipl - ihl, sum: 0); |
196 | icmp6h->icmp6_cksum = csum_ipv6_magic(saddr: &ip6h->saddr, daddr: &ip6h->daddr, |
197 | len: ipl - ihl, IPPROTO_ICMPV6, |
198 | sum: skb->csum); |
199 | |
200 | skb->ip_summed = CHECKSUM_NONE; |
201 | |
202 | return 1; |
203 | } |
204 | |
205 | static int tcf_csum_ipv4_tcp(struct sk_buff *skb, unsigned int ihl, |
206 | unsigned int ipl) |
207 | { |
208 | struct tcphdr *tcph; |
209 | const struct iphdr *iph; |
210 | |
211 | if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) |
212 | return 1; |
213 | |
214 | tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, jhl: sizeof(*tcph)); |
215 | if (tcph == NULL) |
216 | return 0; |
217 | |
218 | iph = ip_hdr(skb); |
219 | tcph->check = 0; |
220 | skb->csum = csum_partial(buff: tcph, len: ipl - ihl, sum: 0); |
221 | tcph->check = tcp_v4_check(len: ipl - ihl, |
222 | saddr: iph->saddr, daddr: iph->daddr, base: skb->csum); |
223 | |
224 | skb->ip_summed = CHECKSUM_NONE; |
225 | |
226 | return 1; |
227 | } |
228 | |
229 | static int tcf_csum_ipv6_tcp(struct sk_buff *skb, unsigned int ihl, |
230 | unsigned int ipl) |
231 | { |
232 | struct tcphdr *tcph; |
233 | const struct ipv6hdr *ip6h; |
234 | |
235 | if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) |
236 | return 1; |
237 | |
238 | tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, jhl: sizeof(*tcph)); |
239 | if (tcph == NULL) |
240 | return 0; |
241 | |
242 | ip6h = ipv6_hdr(skb); |
243 | tcph->check = 0; |
244 | skb->csum = csum_partial(buff: tcph, len: ipl - ihl, sum: 0); |
245 | tcph->check = csum_ipv6_magic(saddr: &ip6h->saddr, daddr: &ip6h->daddr, |
246 | len: ipl - ihl, IPPROTO_TCP, |
247 | sum: skb->csum); |
248 | |
249 | skb->ip_summed = CHECKSUM_NONE; |
250 | |
251 | return 1; |
252 | } |
253 | |
254 | static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl, |
255 | unsigned int ipl, int udplite) |
256 | { |
257 | struct udphdr *udph; |
258 | const struct iphdr *iph; |
259 | u16 ul; |
260 | |
261 | if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) |
262 | return 1; |
263 | |
264 | /* |
265 | * Support both UDP and UDPLITE checksum algorithms, Don't use |
266 | * udph->len to get the real length without any protocol check, |
267 | * UDPLITE uses udph->len for another thing, |
268 | * Use iph->tot_len, or just ipl. |
269 | */ |
270 | |
271 | udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, jhl: sizeof(*udph)); |
272 | if (udph == NULL) |
273 | return 0; |
274 | |
275 | iph = ip_hdr(skb); |
276 | ul = ntohs(udph->len); |
277 | |
278 | if (udplite || udph->check) { |
279 | |
280 | udph->check = 0; |
281 | |
282 | if (udplite) { |
283 | if (ul == 0) |
284 | skb->csum = csum_partial(buff: udph, len: ipl - ihl, sum: 0); |
285 | else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl)) |
286 | skb->csum = csum_partial(buff: udph, len: ul, sum: 0); |
287 | else |
288 | goto ignore_obscure_skb; |
289 | } else { |
290 | if (ul != ipl - ihl) |
291 | goto ignore_obscure_skb; |
292 | |
293 | skb->csum = csum_partial(buff: udph, len: ul, sum: 0); |
294 | } |
295 | |
296 | udph->check = csum_tcpudp_magic(saddr: iph->saddr, daddr: iph->daddr, |
297 | len: ul, proto: iph->protocol, |
298 | sum: skb->csum); |
299 | |
300 | if (!udph->check) |
301 | udph->check = CSUM_MANGLED_0; |
302 | } |
303 | |
304 | skb->ip_summed = CHECKSUM_NONE; |
305 | |
306 | ignore_obscure_skb: |
307 | return 1; |
308 | } |
309 | |
310 | static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl, |
311 | unsigned int ipl, int udplite) |
312 | { |
313 | struct udphdr *udph; |
314 | const struct ipv6hdr *ip6h; |
315 | u16 ul; |
316 | |
317 | if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) |
318 | return 1; |
319 | |
320 | /* |
321 | * Support both UDP and UDPLITE checksum algorithms, Don't use |
322 | * udph->len to get the real length without any protocol check, |
323 | * UDPLITE uses udph->len for another thing, |
324 | * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl. |
325 | */ |
326 | |
327 | udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, jhl: sizeof(*udph)); |
328 | if (udph == NULL) |
329 | return 0; |
330 | |
331 | ip6h = ipv6_hdr(skb); |
332 | ul = ntohs(udph->len); |
333 | |
334 | udph->check = 0; |
335 | |
336 | if (udplite) { |
337 | if (ul == 0) |
338 | skb->csum = csum_partial(buff: udph, len: ipl - ihl, sum: 0); |
339 | |
340 | else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl)) |
341 | skb->csum = csum_partial(buff: udph, len: ul, sum: 0); |
342 | |
343 | else |
344 | goto ignore_obscure_skb; |
345 | } else { |
346 | if (ul != ipl - ihl) |
347 | goto ignore_obscure_skb; |
348 | |
349 | skb->csum = csum_partial(buff: udph, len: ul, sum: 0); |
350 | } |
351 | |
352 | udph->check = csum_ipv6_magic(saddr: &ip6h->saddr, daddr: &ip6h->daddr, len: ul, |
353 | proto: udplite ? IPPROTO_UDPLITE : IPPROTO_UDP, |
354 | sum: skb->csum); |
355 | |
356 | if (!udph->check) |
357 | udph->check = CSUM_MANGLED_0; |
358 | |
359 | skb->ip_summed = CHECKSUM_NONE; |
360 | |
361 | ignore_obscure_skb: |
362 | return 1; |
363 | } |
364 | |
365 | static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl, |
366 | unsigned int ipl) |
367 | { |
368 | struct sctphdr *sctph; |
369 | |
370 | if (skb_is_gso(skb) && skb_is_gso_sctp(skb)) |
371 | return 1; |
372 | |
373 | sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, jhl: sizeof(*sctph)); |
374 | if (!sctph) |
375 | return 0; |
376 | |
377 | sctph->checksum = sctp_compute_cksum(skb, |
378 | offset: skb_network_offset(skb) + ihl); |
379 | skb_reset_csum_not_inet(skb); |
380 | |
381 | return 1; |
382 | } |
383 | |
384 | static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags) |
385 | { |
386 | const struct iphdr *iph; |
387 | int ntkoff; |
388 | |
389 | ntkoff = skb_network_offset(skb); |
390 | |
391 | if (!pskb_may_pull(skb, len: sizeof(*iph) + ntkoff)) |
392 | goto fail; |
393 | |
394 | iph = ip_hdr(skb); |
395 | |
396 | switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) { |
397 | case IPPROTO_ICMP: |
398 | if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP) |
399 | if (!tcf_csum_ipv4_icmp(skb, ihl: iph->ihl * 4, |
400 | ntohs(iph->tot_len))) |
401 | goto fail; |
402 | break; |
403 | case IPPROTO_IGMP: |
404 | if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP) |
405 | if (!tcf_csum_ipv4_igmp(skb, ihl: iph->ihl * 4, |
406 | ntohs(iph->tot_len))) |
407 | goto fail; |
408 | break; |
409 | case IPPROTO_TCP: |
410 | if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP) |
411 | if (!tcf_csum_ipv4_tcp(skb, ihl: iph->ihl * 4, |
412 | ntohs(iph->tot_len))) |
413 | goto fail; |
414 | break; |
415 | case IPPROTO_UDP: |
416 | if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP) |
417 | if (!tcf_csum_ipv4_udp(skb, ihl: iph->ihl * 4, |
418 | ntohs(iph->tot_len), udplite: 0)) |
419 | goto fail; |
420 | break; |
421 | case IPPROTO_UDPLITE: |
422 | if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE) |
423 | if (!tcf_csum_ipv4_udp(skb, ihl: iph->ihl * 4, |
424 | ntohs(iph->tot_len), udplite: 1)) |
425 | goto fail; |
426 | break; |
427 | case IPPROTO_SCTP: |
428 | if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) && |
429 | !tcf_csum_sctp(skb, ihl: iph->ihl * 4, ntohs(iph->tot_len))) |
430 | goto fail; |
431 | break; |
432 | } |
433 | |
434 | if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) { |
435 | if (skb_try_make_writable(skb, write_len: sizeof(*iph) + ntkoff)) |
436 | goto fail; |
437 | |
438 | ip_send_check(ip: ip_hdr(skb)); |
439 | } |
440 | |
441 | return 1; |
442 | |
443 | fail: |
444 | return 0; |
445 | } |
446 | |
447 | static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh, unsigned int ixhl, |
448 | unsigned int *pl) |
449 | { |
450 | int off, len, optlen; |
451 | unsigned char *xh = (void *)ip6xh; |
452 | |
453 | off = sizeof(*ip6xh); |
454 | len = ixhl - off; |
455 | |
456 | while (len > 1) { |
457 | switch (xh[off]) { |
458 | case IPV6_TLV_PAD1: |
459 | optlen = 1; |
460 | break; |
461 | case IPV6_TLV_JUMBO: |
462 | optlen = xh[off + 1] + 2; |
463 | if (optlen != 6 || len < 6 || (off & 3) != 2) |
464 | /* wrong jumbo option length/alignment */ |
465 | return 0; |
466 | *pl = ntohl(*(__be32 *)(xh + off + 2)); |
467 | goto done; |
468 | default: |
469 | optlen = xh[off + 1] + 2; |
470 | if (optlen > len) |
471 | /* ignore obscure options */ |
472 | goto done; |
473 | break; |
474 | } |
475 | off += optlen; |
476 | len -= optlen; |
477 | } |
478 | |
479 | done: |
480 | return 1; |
481 | } |
482 | |
483 | static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags) |
484 | { |
485 | struct ipv6hdr *ip6h; |
486 | struct ipv6_opt_hdr *ip6xh; |
487 | unsigned int hl, ixhl; |
488 | unsigned int pl; |
489 | int ntkoff; |
490 | u8 nexthdr; |
491 | |
492 | ntkoff = skb_network_offset(skb); |
493 | |
494 | hl = sizeof(*ip6h); |
495 | |
496 | if (!pskb_may_pull(skb, len: hl + ntkoff)) |
497 | goto fail; |
498 | |
499 | ip6h = ipv6_hdr(skb); |
500 | |
501 | pl = ntohs(ip6h->payload_len); |
502 | nexthdr = ip6h->nexthdr; |
503 | |
504 | do { |
505 | switch (nexthdr) { |
506 | case NEXTHDR_FRAGMENT: |
507 | goto ignore_skb; |
508 | case NEXTHDR_ROUTING: |
509 | case NEXTHDR_HOP: |
510 | case NEXTHDR_DEST: |
511 | if (!pskb_may_pull(skb, len: hl + sizeof(*ip6xh) + ntkoff)) |
512 | goto fail; |
513 | ip6xh = (void *)(skb_network_header(skb) + hl); |
514 | ixhl = ipv6_optlen(ip6xh); |
515 | if (!pskb_may_pull(skb, len: hl + ixhl + ntkoff)) |
516 | goto fail; |
517 | ip6xh = (void *)(skb_network_header(skb) + hl); |
518 | if ((nexthdr == NEXTHDR_HOP) && |
519 | !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, pl: &pl))) |
520 | goto fail; |
521 | nexthdr = ip6xh->nexthdr; |
522 | hl += ixhl; |
523 | break; |
524 | case IPPROTO_ICMPV6: |
525 | if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP) |
526 | if (!tcf_csum_ipv6_icmp(skb, |
527 | ihl: hl, ipl: pl + sizeof(*ip6h))) |
528 | goto fail; |
529 | goto done; |
530 | case IPPROTO_TCP: |
531 | if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP) |
532 | if (!tcf_csum_ipv6_tcp(skb, |
533 | ihl: hl, ipl: pl + sizeof(*ip6h))) |
534 | goto fail; |
535 | goto done; |
536 | case IPPROTO_UDP: |
537 | if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP) |
538 | if (!tcf_csum_ipv6_udp(skb, ihl: hl, |
539 | ipl: pl + sizeof(*ip6h), udplite: 0)) |
540 | goto fail; |
541 | goto done; |
542 | case IPPROTO_UDPLITE: |
543 | if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE) |
544 | if (!tcf_csum_ipv6_udp(skb, ihl: hl, |
545 | ipl: pl + sizeof(*ip6h), udplite: 1)) |
546 | goto fail; |
547 | goto done; |
548 | case IPPROTO_SCTP: |
549 | if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) && |
550 | !tcf_csum_sctp(skb, ihl: hl, ipl: pl + sizeof(*ip6h))) |
551 | goto fail; |
552 | goto done; |
553 | default: |
554 | goto ignore_skb; |
555 | } |
556 | } while (pskb_may_pull(skb, len: hl + 1 + ntkoff)); |
557 | |
558 | done: |
559 | ignore_skb: |
560 | return 1; |
561 | |
562 | fail: |
563 | return 0; |
564 | } |
565 | |
566 | TC_INDIRECT_SCOPE int tcf_csum_act(struct sk_buff *skb, |
567 | const struct tc_action *a, |
568 | struct tcf_result *res) |
569 | { |
570 | struct tcf_csum *p = to_tcf_csum(a); |
571 | bool orig_vlan_tag_present = false; |
572 | unsigned int vlan_hdr_count = 0; |
573 | struct tcf_csum_params *params; |
574 | u32 update_flags; |
575 | __be16 protocol; |
576 | int action; |
577 | |
578 | params = rcu_dereference_bh(p->params); |
579 | |
580 | tcf_lastuse_update(tm: &p->tcf_tm); |
581 | tcf_action_update_bstats(a: &p->common, skb); |
582 | |
583 | action = READ_ONCE(p->tcf_action); |
584 | if (unlikely(action == TC_ACT_SHOT)) |
585 | goto drop; |
586 | |
587 | update_flags = params->update_flags; |
588 | protocol = skb_protocol(skb, skip_vlan: false); |
589 | again: |
590 | switch (protocol) { |
591 | case cpu_to_be16(ETH_P_IP): |
592 | if (!tcf_csum_ipv4(skb, update_flags)) |
593 | goto drop; |
594 | break; |
595 | case cpu_to_be16(ETH_P_IPV6): |
596 | if (!tcf_csum_ipv6(skb, update_flags)) |
597 | goto drop; |
598 | break; |
599 | case cpu_to_be16(ETH_P_8021AD): |
600 | fallthrough; |
601 | case cpu_to_be16(ETH_P_8021Q): |
602 | if (skb_vlan_tag_present(skb) && !orig_vlan_tag_present) { |
603 | protocol = skb->protocol; |
604 | orig_vlan_tag_present = true; |
605 | } else { |
606 | struct vlan_hdr *vlan = (struct vlan_hdr *)skb->data; |
607 | |
608 | protocol = vlan->h_vlan_encapsulated_proto; |
609 | skb_pull(skb, VLAN_HLEN); |
610 | skb_reset_network_header(skb); |
611 | vlan_hdr_count++; |
612 | } |
613 | goto again; |
614 | } |
615 | |
616 | out: |
617 | /* Restore the skb for the pulled VLAN tags */ |
618 | while (vlan_hdr_count--) { |
619 | skb_push(skb, VLAN_HLEN); |
620 | skb_reset_network_header(skb); |
621 | } |
622 | |
623 | return action; |
624 | |
625 | drop: |
626 | tcf_action_inc_drop_qstats(a: &p->common); |
627 | action = TC_ACT_SHOT; |
628 | goto out; |
629 | } |
630 | |
631 | static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind, |
632 | int ref) |
633 | { |
634 | unsigned char *b = skb_tail_pointer(skb); |
635 | struct tcf_csum *p = to_tcf_csum(a); |
636 | struct tcf_csum_params *params; |
637 | struct tc_csum opt = { |
638 | .index = p->tcf_index, |
639 | .refcnt = refcount_read(r: &p->tcf_refcnt) - ref, |
640 | .bindcnt = atomic_read(v: &p->tcf_bindcnt) - bind, |
641 | }; |
642 | struct tcf_t t; |
643 | |
644 | spin_lock_bh(lock: &p->tcf_lock); |
645 | params = rcu_dereference_protected(p->params, |
646 | lockdep_is_held(&p->tcf_lock)); |
647 | opt.action = p->tcf_action; |
648 | opt.update_flags = params->update_flags; |
649 | |
650 | if (nla_put(skb, attrtype: TCA_CSUM_PARMS, attrlen: sizeof(opt), data: &opt)) |
651 | goto nla_put_failure; |
652 | |
653 | tcf_tm_dump(dtm: &t, stm: &p->tcf_tm); |
654 | if (nla_put_64bit(skb, attrtype: TCA_CSUM_TM, attrlen: sizeof(t), data: &t, padattr: TCA_CSUM_PAD)) |
655 | goto nla_put_failure; |
656 | spin_unlock_bh(lock: &p->tcf_lock); |
657 | |
658 | return skb->len; |
659 | |
660 | nla_put_failure: |
661 | spin_unlock_bh(lock: &p->tcf_lock); |
662 | nlmsg_trim(skb, mark: b); |
663 | return -1; |
664 | } |
665 | |
666 | static void tcf_csum_cleanup(struct tc_action *a) |
667 | { |
668 | struct tcf_csum *p = to_tcf_csum(a); |
669 | struct tcf_csum_params *params; |
670 | |
671 | params = rcu_dereference_protected(p->params, 1); |
672 | if (params) |
673 | kfree_rcu(params, rcu); |
674 | } |
675 | |
676 | static size_t tcf_csum_get_fill_size(const struct tc_action *act) |
677 | { |
678 | return nla_total_size(payload: sizeof(struct tc_csum)); |
679 | } |
680 | |
681 | static int tcf_csum_offload_act_setup(struct tc_action *act, void *entry_data, |
682 | u32 *index_inc, bool bind, |
683 | struct netlink_ext_ack *extack) |
684 | { |
685 | if (bind) { |
686 | struct flow_action_entry *entry = entry_data; |
687 | |
688 | entry->id = FLOW_ACTION_CSUM; |
689 | entry->csum_flags = tcf_csum_update_flags(a: act); |
690 | *index_inc = 1; |
691 | } else { |
692 | struct flow_offload_action *fl_action = entry_data; |
693 | |
694 | fl_action->id = FLOW_ACTION_CSUM; |
695 | } |
696 | |
697 | return 0; |
698 | } |
699 | |
700 | static struct tc_action_ops act_csum_ops = { |
701 | .kind = "csum" , |
702 | .id = TCA_ID_CSUM, |
703 | .owner = THIS_MODULE, |
704 | .act = tcf_csum_act, |
705 | .dump = tcf_csum_dump, |
706 | .init = tcf_csum_init, |
707 | .cleanup = tcf_csum_cleanup, |
708 | .get_fill_size = tcf_csum_get_fill_size, |
709 | .offload_act_setup = tcf_csum_offload_act_setup, |
710 | .size = sizeof(struct tcf_csum), |
711 | }; |
712 | |
713 | static __net_init int csum_init_net(struct net *net) |
714 | { |
715 | struct tc_action_net *tn = net_generic(net, id: act_csum_ops.net_id); |
716 | |
717 | return tc_action_net_init(net, tn, ops: &act_csum_ops); |
718 | } |
719 | |
720 | static void __net_exit csum_exit_net(struct list_head *net_list) |
721 | { |
722 | tc_action_net_exit(net_list, id: act_csum_ops.net_id); |
723 | } |
724 | |
725 | static struct pernet_operations csum_net_ops = { |
726 | .init = csum_init_net, |
727 | .exit_batch = csum_exit_net, |
728 | .id = &act_csum_ops.net_id, |
729 | .size = sizeof(struct tc_action_net), |
730 | }; |
731 | |
732 | MODULE_DESCRIPTION("Checksum updating actions" ); |
733 | MODULE_LICENSE("GPL" ); |
734 | |
735 | static int __init csum_init_module(void) |
736 | { |
737 | return tcf_register_action(a: &act_csum_ops, ops: &csum_net_ops); |
738 | } |
739 | |
740 | static void __exit csum_cleanup_module(void) |
741 | { |
742 | tcf_unregister_action(a: &act_csum_ops, ops: &csum_net_ops); |
743 | } |
744 | |
745 | module_init(csum_init_module); |
746 | module_exit(csum_cleanup_module); |
747 | |