1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/kernel.h>
3#include <linux/init.h>
4#include <linux/module.h>
5#include <linux/seqlock.h>
6#include <linux/netlink.h>
7#include <linux/netfilter.h>
8#include <linux/netfilter/nf_tables.h>
9#include <net/netfilter/nf_tables.h>
10#include <net/dst_metadata.h>
11#include <net/ip_tunnels.h>
12#include <net/vxlan.h>
13#include <net/erspan.h>
14#include <net/geneve.h>
15
16struct nft_tunnel {
17 enum nft_tunnel_keys key:8;
18 u8 dreg;
19 enum nft_tunnel_mode mode:8;
20 u8 len;
21};
22
23static void nft_tunnel_get_eval(const struct nft_expr *expr,
24 struct nft_regs *regs,
25 const struct nft_pktinfo *pkt)
26{
27 const struct nft_tunnel *priv = nft_expr_priv(expr);
28 u32 *dest = &regs->data[priv->dreg];
29 struct ip_tunnel_info *tun_info;
30
31 tun_info = skb_tunnel_info(skb: pkt->skb);
32
33 switch (priv->key) {
34 case NFT_TUNNEL_PATH:
35 if (!tun_info) {
36 nft_reg_store8(dreg: dest, val: false);
37 return;
38 }
39 if (priv->mode == NFT_TUNNEL_MODE_NONE ||
40 (priv->mode == NFT_TUNNEL_MODE_RX &&
41 !(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
42 (priv->mode == NFT_TUNNEL_MODE_TX &&
43 (tun_info->mode & IP_TUNNEL_INFO_TX)))
44 nft_reg_store8(dreg: dest, val: true);
45 else
46 nft_reg_store8(dreg: dest, val: false);
47 break;
48 case NFT_TUNNEL_ID:
49 if (!tun_info) {
50 regs->verdict.code = NFT_BREAK;
51 return;
52 }
53 if (priv->mode == NFT_TUNNEL_MODE_NONE ||
54 (priv->mode == NFT_TUNNEL_MODE_RX &&
55 !(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
56 (priv->mode == NFT_TUNNEL_MODE_TX &&
57 (tun_info->mode & IP_TUNNEL_INFO_TX)))
58 *dest = ntohl(tunnel_id_to_key32(tun_info->key.tun_id));
59 else
60 regs->verdict.code = NFT_BREAK;
61 break;
62 default:
63 WARN_ON(1);
64 regs->verdict.code = NFT_BREAK;
65 }
66}
67
68static const struct nla_policy nft_tunnel_policy[NFTA_TUNNEL_MAX + 1] = {
69 [NFTA_TUNNEL_KEY] = NLA_POLICY_MAX(NLA_BE32, 255),
70 [NFTA_TUNNEL_DREG] = { .type = NLA_U32 },
71 [NFTA_TUNNEL_MODE] = NLA_POLICY_MAX(NLA_BE32, 255),
72};
73
74static int nft_tunnel_get_init(const struct nft_ctx *ctx,
75 const struct nft_expr *expr,
76 const struct nlattr * const tb[])
77{
78 struct nft_tunnel *priv = nft_expr_priv(expr);
79 u32 len;
80
81 if (!tb[NFTA_TUNNEL_KEY] ||
82 !tb[NFTA_TUNNEL_DREG])
83 return -EINVAL;
84
85 priv->key = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY]));
86 switch (priv->key) {
87 case NFT_TUNNEL_PATH:
88 len = sizeof(u8);
89 break;
90 case NFT_TUNNEL_ID:
91 len = sizeof(u32);
92 break;
93 default:
94 return -EOPNOTSUPP;
95 }
96
97 if (tb[NFTA_TUNNEL_MODE]) {
98 priv->mode = ntohl(nla_get_be32(tb[NFTA_TUNNEL_MODE]));
99 if (priv->mode > NFT_TUNNEL_MODE_MAX)
100 return -EOPNOTSUPP;
101 } else {
102 priv->mode = NFT_TUNNEL_MODE_NONE;
103 }
104
105 priv->len = len;
106 return nft_parse_register_store(ctx, attr: tb[NFTA_TUNNEL_DREG], dreg: &priv->dreg,
107 NULL, type: NFT_DATA_VALUE, len);
108}
109
110static int nft_tunnel_get_dump(struct sk_buff *skb,
111 const struct nft_expr *expr, bool reset)
112{
113 const struct nft_tunnel *priv = nft_expr_priv(expr);
114
115 if (nla_put_be32(skb, attrtype: NFTA_TUNNEL_KEY, htonl(priv->key)))
116 goto nla_put_failure;
117 if (nft_dump_register(skb, attr: NFTA_TUNNEL_DREG, reg: priv->dreg))
118 goto nla_put_failure;
119 if (nla_put_be32(skb, attrtype: NFTA_TUNNEL_MODE, htonl(priv->mode)))
120 goto nla_put_failure;
121 return 0;
122
123nla_put_failure:
124 return -1;
125}
126
127static bool nft_tunnel_get_reduce(struct nft_regs_track *track,
128 const struct nft_expr *expr)
129{
130 const struct nft_tunnel *priv = nft_expr_priv(expr);
131 const struct nft_tunnel *tunnel;
132
133 if (!nft_reg_track_cmp(track, expr, dreg: priv->dreg)) {
134 nft_reg_track_update(track, expr, dreg: priv->dreg, len: priv->len);
135 return false;
136 }
137
138 tunnel = nft_expr_priv(expr: track->regs[priv->dreg].selector);
139 if (priv->key != tunnel->key ||
140 priv->dreg != tunnel->dreg ||
141 priv->mode != tunnel->mode) {
142 nft_reg_track_update(track, expr, dreg: priv->dreg, len: priv->len);
143 return false;
144 }
145
146 if (!track->regs[priv->dreg].bitwise)
147 return true;
148
149 return false;
150}
151
152static struct nft_expr_type nft_tunnel_type;
153static const struct nft_expr_ops nft_tunnel_get_ops = {
154 .type = &nft_tunnel_type,
155 .size = NFT_EXPR_SIZE(sizeof(struct nft_tunnel)),
156 .eval = nft_tunnel_get_eval,
157 .init = nft_tunnel_get_init,
158 .dump = nft_tunnel_get_dump,
159 .reduce = nft_tunnel_get_reduce,
160};
161
162static struct nft_expr_type nft_tunnel_type __read_mostly = {
163 .name = "tunnel",
164 .family = NFPROTO_NETDEV,
165 .ops = &nft_tunnel_get_ops,
166 .policy = nft_tunnel_policy,
167 .maxattr = NFTA_TUNNEL_MAX,
168 .owner = THIS_MODULE,
169};
170
171struct nft_tunnel_opts {
172 union {
173 struct vxlan_metadata vxlan;
174 struct erspan_metadata erspan;
175 u8 data[IP_TUNNEL_OPTS_MAX];
176 } u;
177 u32 len;
178 __be16 flags;
179};
180
181struct nft_tunnel_obj {
182 struct metadata_dst *md;
183 struct nft_tunnel_opts opts;
184};
185
186static const struct nla_policy nft_tunnel_ip_policy[NFTA_TUNNEL_KEY_IP_MAX + 1] = {
187 [NFTA_TUNNEL_KEY_IP_SRC] = { .type = NLA_U32 },
188 [NFTA_TUNNEL_KEY_IP_DST] = { .type = NLA_U32 },
189};
190
191static int nft_tunnel_obj_ip_init(const struct nft_ctx *ctx,
192 const struct nlattr *attr,
193 struct ip_tunnel_info *info)
194{
195 struct nlattr *tb[NFTA_TUNNEL_KEY_IP_MAX + 1];
196 int err;
197
198 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP_MAX, nla: attr,
199 policy: nft_tunnel_ip_policy, NULL);
200 if (err < 0)
201 return err;
202
203 if (!tb[NFTA_TUNNEL_KEY_IP_DST])
204 return -EINVAL;
205
206 if (tb[NFTA_TUNNEL_KEY_IP_SRC])
207 info->key.u.ipv4.src = nla_get_be32(nla: tb[NFTA_TUNNEL_KEY_IP_SRC]);
208 if (tb[NFTA_TUNNEL_KEY_IP_DST])
209 info->key.u.ipv4.dst = nla_get_be32(nla: tb[NFTA_TUNNEL_KEY_IP_DST]);
210
211 return 0;
212}
213
214static const struct nla_policy nft_tunnel_ip6_policy[NFTA_TUNNEL_KEY_IP6_MAX + 1] = {
215 [NFTA_TUNNEL_KEY_IP6_SRC] = { .len = sizeof(struct in6_addr), },
216 [NFTA_TUNNEL_KEY_IP6_DST] = { .len = sizeof(struct in6_addr), },
217 [NFTA_TUNNEL_KEY_IP6_FLOWLABEL] = { .type = NLA_U32, }
218};
219
220static int nft_tunnel_obj_ip6_init(const struct nft_ctx *ctx,
221 const struct nlattr *attr,
222 struct ip_tunnel_info *info)
223{
224 struct nlattr *tb[NFTA_TUNNEL_KEY_IP6_MAX + 1];
225 int err;
226
227 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP6_MAX, nla: attr,
228 policy: nft_tunnel_ip6_policy, NULL);
229 if (err < 0)
230 return err;
231
232 if (!tb[NFTA_TUNNEL_KEY_IP6_DST])
233 return -EINVAL;
234
235 if (tb[NFTA_TUNNEL_KEY_IP6_SRC]) {
236 memcpy(&info->key.u.ipv6.src,
237 nla_data(tb[NFTA_TUNNEL_KEY_IP6_SRC]),
238 sizeof(struct in6_addr));
239 }
240 if (tb[NFTA_TUNNEL_KEY_IP6_DST]) {
241 memcpy(&info->key.u.ipv6.dst,
242 nla_data(tb[NFTA_TUNNEL_KEY_IP6_DST]),
243 sizeof(struct in6_addr));
244 }
245 if (tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL])
246 info->key.label = nla_get_be32(nla: tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL]);
247
248 info->mode |= IP_TUNNEL_INFO_IPV6;
249
250 return 0;
251}
252
253static const struct nla_policy nft_tunnel_opts_vxlan_policy[NFTA_TUNNEL_KEY_VXLAN_MAX + 1] = {
254 [NFTA_TUNNEL_KEY_VXLAN_GBP] = { .type = NLA_U32 },
255};
256
257static int nft_tunnel_obj_vxlan_init(const struct nlattr *attr,
258 struct nft_tunnel_opts *opts)
259{
260 struct nlattr *tb[NFTA_TUNNEL_KEY_VXLAN_MAX + 1];
261 int err;
262
263 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_VXLAN_MAX, nla: attr,
264 policy: nft_tunnel_opts_vxlan_policy, NULL);
265 if (err < 0)
266 return err;
267
268 if (!tb[NFTA_TUNNEL_KEY_VXLAN_GBP])
269 return -EINVAL;
270
271 opts->u.vxlan.gbp = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_VXLAN_GBP]));
272
273 opts->len = sizeof(struct vxlan_metadata);
274 opts->flags = TUNNEL_VXLAN_OPT;
275
276 return 0;
277}
278
279static const struct nla_policy nft_tunnel_opts_erspan_policy[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1] = {
280 [NFTA_TUNNEL_KEY_ERSPAN_VERSION] = { .type = NLA_U32 },
281 [NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX] = { .type = NLA_U32 },
282 [NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] = { .type = NLA_U8 },
283 [NFTA_TUNNEL_KEY_ERSPAN_V2_HWID] = { .type = NLA_U8 },
284};
285
286static int nft_tunnel_obj_erspan_init(const struct nlattr *attr,
287 struct nft_tunnel_opts *opts)
288{
289 struct nlattr *tb[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1];
290 uint8_t hwid, dir;
291 int err, version;
292
293 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_ERSPAN_MAX,
294 nla: attr, policy: nft_tunnel_opts_erspan_policy,
295 NULL);
296 if (err < 0)
297 return err;
298
299 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION])
300 return -EINVAL;
301
302 version = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION]));
303 switch (version) {
304 case ERSPAN_VERSION:
305 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX])
306 return -EINVAL;
307
308 opts->u.erspan.u.index =
309 nla_get_be32(nla: tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX]);
310 break;
311 case ERSPAN_VERSION2:
312 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] ||
313 !tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID])
314 return -EINVAL;
315
316 hwid = nla_get_u8(nla: tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID]);
317 dir = nla_get_u8(nla: tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR]);
318
319 set_hwid(md2: &opts->u.erspan.u.md2, hwid);
320 opts->u.erspan.u.md2.dir = dir;
321 break;
322 default:
323 return -EOPNOTSUPP;
324 }
325 opts->u.erspan.version = version;
326
327 opts->len = sizeof(struct erspan_metadata);
328 opts->flags = TUNNEL_ERSPAN_OPT;
329
330 return 0;
331}
332
333static const struct nla_policy nft_tunnel_opts_geneve_policy[NFTA_TUNNEL_KEY_GENEVE_MAX + 1] = {
334 [NFTA_TUNNEL_KEY_GENEVE_CLASS] = { .type = NLA_U16 },
335 [NFTA_TUNNEL_KEY_GENEVE_TYPE] = { .type = NLA_U8 },
336 [NFTA_TUNNEL_KEY_GENEVE_DATA] = { .type = NLA_BINARY, .len = 128 },
337};
338
339static int nft_tunnel_obj_geneve_init(const struct nlattr *attr,
340 struct nft_tunnel_opts *opts)
341{
342 struct geneve_opt *opt = (struct geneve_opt *)opts->u.data + opts->len;
343 struct nlattr *tb[NFTA_TUNNEL_KEY_GENEVE_MAX + 1];
344 int err, data_len;
345
346 err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_GENEVE_MAX, nla: attr,
347 policy: nft_tunnel_opts_geneve_policy, NULL);
348 if (err < 0)
349 return err;
350
351 if (!tb[NFTA_TUNNEL_KEY_GENEVE_CLASS] ||
352 !tb[NFTA_TUNNEL_KEY_GENEVE_TYPE] ||
353 !tb[NFTA_TUNNEL_KEY_GENEVE_DATA])
354 return -EINVAL;
355
356 attr = tb[NFTA_TUNNEL_KEY_GENEVE_DATA];
357 data_len = nla_len(nla: attr);
358 if (data_len % 4)
359 return -EINVAL;
360
361 opts->len += sizeof(*opt) + data_len;
362 if (opts->len > IP_TUNNEL_OPTS_MAX)
363 return -EINVAL;
364
365 memcpy(opt->opt_data, nla_data(attr), data_len);
366 opt->length = data_len / 4;
367 opt->opt_class = nla_get_be16(nla: tb[NFTA_TUNNEL_KEY_GENEVE_CLASS]);
368 opt->type = nla_get_u8(nla: tb[NFTA_TUNNEL_KEY_GENEVE_TYPE]);
369 opts->flags = TUNNEL_GENEVE_OPT;
370
371 return 0;
372}
373
374static const struct nla_policy nft_tunnel_opts_policy[NFTA_TUNNEL_KEY_OPTS_MAX + 1] = {
375 [NFTA_TUNNEL_KEY_OPTS_UNSPEC] = {
376 .strict_start_type = NFTA_TUNNEL_KEY_OPTS_GENEVE },
377 [NFTA_TUNNEL_KEY_OPTS_VXLAN] = { .type = NLA_NESTED, },
378 [NFTA_TUNNEL_KEY_OPTS_ERSPAN] = { .type = NLA_NESTED, },
379 [NFTA_TUNNEL_KEY_OPTS_GENEVE] = { .type = NLA_NESTED, },
380};
381
382static int nft_tunnel_obj_opts_init(const struct nft_ctx *ctx,
383 const struct nlattr *attr,
384 struct ip_tunnel_info *info,
385 struct nft_tunnel_opts *opts)
386{
387 struct nlattr *nla;
388 __be16 type = 0;
389 int err, rem;
390
391 err = nla_validate_nested_deprecated(start: attr, NFTA_TUNNEL_KEY_OPTS_MAX,
392 policy: nft_tunnel_opts_policy, NULL);
393 if (err < 0)
394 return err;
395
396 nla_for_each_attr(nla, nla_data(attr), nla_len(attr), rem) {
397 switch (nla_type(nla)) {
398 case NFTA_TUNNEL_KEY_OPTS_VXLAN:
399 if (type)
400 return -EINVAL;
401 err = nft_tunnel_obj_vxlan_init(attr: nla, opts);
402 if (err)
403 return err;
404 type = TUNNEL_VXLAN_OPT;
405 break;
406 case NFTA_TUNNEL_KEY_OPTS_ERSPAN:
407 if (type)
408 return -EINVAL;
409 err = nft_tunnel_obj_erspan_init(attr: nla, opts);
410 if (err)
411 return err;
412 type = TUNNEL_ERSPAN_OPT;
413 break;
414 case NFTA_TUNNEL_KEY_OPTS_GENEVE:
415 if (type && type != TUNNEL_GENEVE_OPT)
416 return -EINVAL;
417 err = nft_tunnel_obj_geneve_init(attr: nla, opts);
418 if (err)
419 return err;
420 type = TUNNEL_GENEVE_OPT;
421 break;
422 default:
423 return -EOPNOTSUPP;
424 }
425 }
426
427 return err;
428}
429
430static const struct nla_policy nft_tunnel_key_policy[NFTA_TUNNEL_KEY_MAX + 1] = {
431 [NFTA_TUNNEL_KEY_IP] = { .type = NLA_NESTED, },
432 [NFTA_TUNNEL_KEY_IP6] = { .type = NLA_NESTED, },
433 [NFTA_TUNNEL_KEY_ID] = { .type = NLA_U32, },
434 [NFTA_TUNNEL_KEY_FLAGS] = { .type = NLA_U32, },
435 [NFTA_TUNNEL_KEY_TOS] = { .type = NLA_U8, },
436 [NFTA_TUNNEL_KEY_TTL] = { .type = NLA_U8, },
437 [NFTA_TUNNEL_KEY_SPORT] = { .type = NLA_U16, },
438 [NFTA_TUNNEL_KEY_DPORT] = { .type = NLA_U16, },
439 [NFTA_TUNNEL_KEY_OPTS] = { .type = NLA_NESTED, },
440};
441
442static int nft_tunnel_obj_init(const struct nft_ctx *ctx,
443 const struct nlattr * const tb[],
444 struct nft_object *obj)
445{
446 struct nft_tunnel_obj *priv = nft_obj_data(obj);
447 struct ip_tunnel_info info;
448 struct metadata_dst *md;
449 int err;
450
451 if (!tb[NFTA_TUNNEL_KEY_ID])
452 return -EINVAL;
453
454 memset(&info, 0, sizeof(info));
455 info.mode = IP_TUNNEL_INFO_TX;
456 info.key.tun_id = key32_to_tunnel_id(key: nla_get_be32(nla: tb[NFTA_TUNNEL_KEY_ID]));
457 info.key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
458
459 if (tb[NFTA_TUNNEL_KEY_IP]) {
460 err = nft_tunnel_obj_ip_init(ctx, attr: tb[NFTA_TUNNEL_KEY_IP], info: &info);
461 if (err < 0)
462 return err;
463 } else if (tb[NFTA_TUNNEL_KEY_IP6]) {
464 err = nft_tunnel_obj_ip6_init(ctx, attr: tb[NFTA_TUNNEL_KEY_IP6], info: &info);
465 if (err < 0)
466 return err;
467 } else {
468 return -EINVAL;
469 }
470
471 if (tb[NFTA_TUNNEL_KEY_SPORT]) {
472 info.key.tp_src = nla_get_be16(nla: tb[NFTA_TUNNEL_KEY_SPORT]);
473 }
474 if (tb[NFTA_TUNNEL_KEY_DPORT]) {
475 info.key.tp_dst = nla_get_be16(nla: tb[NFTA_TUNNEL_KEY_DPORT]);
476 }
477
478 if (tb[NFTA_TUNNEL_KEY_FLAGS]) {
479 u32 tun_flags;
480
481 tun_flags = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_FLAGS]));
482 if (tun_flags & ~NFT_TUNNEL_F_MASK)
483 return -EOPNOTSUPP;
484
485 if (tun_flags & NFT_TUNNEL_F_ZERO_CSUM_TX)
486 info.key.tun_flags &= ~TUNNEL_CSUM;
487 if (tun_flags & NFT_TUNNEL_F_DONT_FRAGMENT)
488 info.key.tun_flags |= TUNNEL_DONT_FRAGMENT;
489 if (tun_flags & NFT_TUNNEL_F_SEQ_NUMBER)
490 info.key.tun_flags |= TUNNEL_SEQ;
491 }
492 if (tb[NFTA_TUNNEL_KEY_TOS])
493 info.key.tos = nla_get_u8(nla: tb[NFTA_TUNNEL_KEY_TOS]);
494 if (tb[NFTA_TUNNEL_KEY_TTL])
495 info.key.ttl = nla_get_u8(nla: tb[NFTA_TUNNEL_KEY_TTL]);
496 else
497 info.key.ttl = U8_MAX;
498
499 if (tb[NFTA_TUNNEL_KEY_OPTS]) {
500 err = nft_tunnel_obj_opts_init(ctx, attr: tb[NFTA_TUNNEL_KEY_OPTS],
501 info: &info, opts: &priv->opts);
502 if (err < 0)
503 return err;
504 }
505
506 md = metadata_dst_alloc(optslen: priv->opts.len, type: METADATA_IP_TUNNEL, GFP_KERNEL);
507 if (!md)
508 return -ENOMEM;
509
510 memcpy(&md->u.tun_info, &info, sizeof(info));
511#ifdef CONFIG_DST_CACHE
512 err = dst_cache_init(dst_cache: &md->u.tun_info.dst_cache, GFP_KERNEL);
513 if (err < 0) {
514 metadata_dst_free(md);
515 return err;
516 }
517#endif
518 ip_tunnel_info_opts_set(info: &md->u.tun_info, from: &priv->opts.u, len: priv->opts.len,
519 flags: priv->opts.flags);
520 priv->md = md;
521
522 return 0;
523}
524
525static inline void nft_tunnel_obj_eval(struct nft_object *obj,
526 struct nft_regs *regs,
527 const struct nft_pktinfo *pkt)
528{
529 struct nft_tunnel_obj *priv = nft_obj_data(obj);
530 struct sk_buff *skb = pkt->skb;
531
532 skb_dst_drop(skb);
533 dst_hold(dst: (struct dst_entry *) priv->md);
534 skb_dst_set(skb, dst: (struct dst_entry *) priv->md);
535}
536
537static int nft_tunnel_ip_dump(struct sk_buff *skb, struct ip_tunnel_info *info)
538{
539 struct nlattr *nest;
540
541 if (info->mode & IP_TUNNEL_INFO_IPV6) {
542 nest = nla_nest_start_noflag(skb, attrtype: NFTA_TUNNEL_KEY_IP6);
543 if (!nest)
544 return -1;
545
546 if (nla_put_in6_addr(skb, attrtype: NFTA_TUNNEL_KEY_IP6_SRC,
547 addr: &info->key.u.ipv6.src) < 0 ||
548 nla_put_in6_addr(skb, attrtype: NFTA_TUNNEL_KEY_IP6_DST,
549 addr: &info->key.u.ipv6.dst) < 0 ||
550 nla_put_be32(skb, attrtype: NFTA_TUNNEL_KEY_IP6_FLOWLABEL,
551 value: info->key.label)) {
552 nla_nest_cancel(skb, start: nest);
553 return -1;
554 }
555
556 nla_nest_end(skb, start: nest);
557 } else {
558 nest = nla_nest_start_noflag(skb, attrtype: NFTA_TUNNEL_KEY_IP);
559 if (!nest)
560 return -1;
561
562 if (nla_put_in_addr(skb, attrtype: NFTA_TUNNEL_KEY_IP_SRC,
563 addr: info->key.u.ipv4.src) < 0 ||
564 nla_put_in_addr(skb, attrtype: NFTA_TUNNEL_KEY_IP_DST,
565 addr: info->key.u.ipv4.dst) < 0) {
566 nla_nest_cancel(skb, start: nest);
567 return -1;
568 }
569
570 nla_nest_end(skb, start: nest);
571 }
572
573 return 0;
574}
575
576static int nft_tunnel_opts_dump(struct sk_buff *skb,
577 struct nft_tunnel_obj *priv)
578{
579 struct nft_tunnel_opts *opts = &priv->opts;
580 struct nlattr *nest, *inner;
581
582 nest = nla_nest_start_noflag(skb, attrtype: NFTA_TUNNEL_KEY_OPTS);
583 if (!nest)
584 return -1;
585
586 if (opts->flags & TUNNEL_VXLAN_OPT) {
587 inner = nla_nest_start_noflag(skb, attrtype: NFTA_TUNNEL_KEY_OPTS_VXLAN);
588 if (!inner)
589 goto failure;
590 if (nla_put_be32(skb, attrtype: NFTA_TUNNEL_KEY_VXLAN_GBP,
591 htonl(opts->u.vxlan.gbp)))
592 goto inner_failure;
593 nla_nest_end(skb, start: inner);
594 } else if (opts->flags & TUNNEL_ERSPAN_OPT) {
595 inner = nla_nest_start_noflag(skb, attrtype: NFTA_TUNNEL_KEY_OPTS_ERSPAN);
596 if (!inner)
597 goto failure;
598 if (nla_put_be32(skb, attrtype: NFTA_TUNNEL_KEY_ERSPAN_VERSION,
599 htonl(opts->u.erspan.version)))
600 goto inner_failure;
601 switch (opts->u.erspan.version) {
602 case ERSPAN_VERSION:
603 if (nla_put_be32(skb, attrtype: NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX,
604 value: opts->u.erspan.u.index))
605 goto inner_failure;
606 break;
607 case ERSPAN_VERSION2:
608 if (nla_put_u8(skb, attrtype: NFTA_TUNNEL_KEY_ERSPAN_V2_HWID,
609 value: get_hwid(md2: &opts->u.erspan.u.md2)) ||
610 nla_put_u8(skb, attrtype: NFTA_TUNNEL_KEY_ERSPAN_V2_DIR,
611 value: opts->u.erspan.u.md2.dir))
612 goto inner_failure;
613 break;
614 }
615 nla_nest_end(skb, start: inner);
616 } else if (opts->flags & TUNNEL_GENEVE_OPT) {
617 struct geneve_opt *opt;
618 int offset = 0;
619
620 inner = nla_nest_start_noflag(skb, attrtype: NFTA_TUNNEL_KEY_OPTS_GENEVE);
621 if (!inner)
622 goto failure;
623 while (opts->len > offset) {
624 opt = (struct geneve_opt *)opts->u.data + offset;
625 if (nla_put_be16(skb, attrtype: NFTA_TUNNEL_KEY_GENEVE_CLASS,
626 value: opt->opt_class) ||
627 nla_put_u8(skb, attrtype: NFTA_TUNNEL_KEY_GENEVE_TYPE,
628 value: opt->type) ||
629 nla_put(skb, attrtype: NFTA_TUNNEL_KEY_GENEVE_DATA,
630 attrlen: opt->length * 4, data: opt->opt_data))
631 goto inner_failure;
632 offset += sizeof(*opt) + opt->length * 4;
633 }
634 nla_nest_end(skb, start: inner);
635 }
636 nla_nest_end(skb, start: nest);
637 return 0;
638
639inner_failure:
640 nla_nest_cancel(skb, start: inner);
641failure:
642 nla_nest_cancel(skb, start: nest);
643 return -1;
644}
645
646static int nft_tunnel_ports_dump(struct sk_buff *skb,
647 struct ip_tunnel_info *info)
648{
649 if (nla_put_be16(skb, attrtype: NFTA_TUNNEL_KEY_SPORT, value: info->key.tp_src) < 0 ||
650 nla_put_be16(skb, attrtype: NFTA_TUNNEL_KEY_DPORT, value: info->key.tp_dst) < 0)
651 return -1;
652
653 return 0;
654}
655
656static int nft_tunnel_flags_dump(struct sk_buff *skb,
657 struct ip_tunnel_info *info)
658{
659 u32 flags = 0;
660
661 if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
662 flags |= NFT_TUNNEL_F_DONT_FRAGMENT;
663 if (!(info->key.tun_flags & TUNNEL_CSUM))
664 flags |= NFT_TUNNEL_F_ZERO_CSUM_TX;
665 if (info->key.tun_flags & TUNNEL_SEQ)
666 flags |= NFT_TUNNEL_F_SEQ_NUMBER;
667
668 if (nla_put_be32(skb, attrtype: NFTA_TUNNEL_KEY_FLAGS, htonl(flags)) < 0)
669 return -1;
670
671 return 0;
672}
673
674static int nft_tunnel_obj_dump(struct sk_buff *skb,
675 struct nft_object *obj, bool reset)
676{
677 struct nft_tunnel_obj *priv = nft_obj_data(obj);
678 struct ip_tunnel_info *info = &priv->md->u.tun_info;
679
680 if (nla_put_be32(skb, attrtype: NFTA_TUNNEL_KEY_ID,
681 value: tunnel_id_to_key32(tun_id: info->key.tun_id)) ||
682 nft_tunnel_ip_dump(skb, info) < 0 ||
683 nft_tunnel_ports_dump(skb, info) < 0 ||
684 nft_tunnel_flags_dump(skb, info) < 0 ||
685 nla_put_u8(skb, attrtype: NFTA_TUNNEL_KEY_TOS, value: info->key.tos) ||
686 nla_put_u8(skb, attrtype: NFTA_TUNNEL_KEY_TTL, value: info->key.ttl) ||
687 nft_tunnel_opts_dump(skb, priv) < 0)
688 goto nla_put_failure;
689
690 return 0;
691
692nla_put_failure:
693 return -1;
694}
695
696static void nft_tunnel_obj_destroy(const struct nft_ctx *ctx,
697 struct nft_object *obj)
698{
699 struct nft_tunnel_obj *priv = nft_obj_data(obj);
700
701 metadata_dst_free(priv->md);
702}
703
704static struct nft_object_type nft_tunnel_obj_type;
705static const struct nft_object_ops nft_tunnel_obj_ops = {
706 .type = &nft_tunnel_obj_type,
707 .size = sizeof(struct nft_tunnel_obj),
708 .eval = nft_tunnel_obj_eval,
709 .init = nft_tunnel_obj_init,
710 .destroy = nft_tunnel_obj_destroy,
711 .dump = nft_tunnel_obj_dump,
712};
713
714static struct nft_object_type nft_tunnel_obj_type __read_mostly = {
715 .type = NFT_OBJECT_TUNNEL,
716 .ops = &nft_tunnel_obj_ops,
717 .maxattr = NFTA_TUNNEL_KEY_MAX,
718 .policy = nft_tunnel_key_policy,
719 .owner = THIS_MODULE,
720};
721
722static int __init nft_tunnel_module_init(void)
723{
724 int err;
725
726 err = nft_register_expr(&nft_tunnel_type);
727 if (err < 0)
728 return err;
729
730 err = nft_register_obj(obj_type: &nft_tunnel_obj_type);
731 if (err < 0)
732 nft_unregister_expr(&nft_tunnel_type);
733
734 return err;
735}
736
737static void __exit nft_tunnel_module_exit(void)
738{
739 nft_unregister_obj(obj_type: &nft_tunnel_obj_type);
740 nft_unregister_expr(&nft_tunnel_type);
741}
742
743module_init(nft_tunnel_module_init);
744module_exit(nft_tunnel_module_exit);
745
746MODULE_LICENSE("GPL");
747MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
748MODULE_ALIAS_NFT_EXPR("tunnel");
749MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_TUNNEL);
750MODULE_DESCRIPTION("nftables tunnel expression support");
751

source code of linux/net/netfilter/nft_tunnel.c