1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4 *
5 * Development of this code funded by Astaro AG (http://www.astaro.com/)
6 */
7
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/netlink.h>
12#include <linux/netfilter.h>
13#include <linux/if_arp.h>
14#include <linux/netfilter/nf_tables.h>
15#include <net/netfilter/nf_tables_core.h>
16#include <net/netfilter/nf_tables_offload.h>
17#include <net/netfilter/nf_tables.h>
18
19struct nft_cmp_expr {
20 struct nft_data data;
21 u8 sreg;
22 u8 len;
23 enum nft_cmp_ops op:8;
24};
25
26void nft_cmp_eval(const struct nft_expr *expr,
27 struct nft_regs *regs,
28 const struct nft_pktinfo *pkt)
29{
30 const struct nft_cmp_expr *priv = nft_expr_priv(expr);
31 int d;
32
33 d = memcmp(p: &regs->data[priv->sreg], q: &priv->data, size: priv->len);
34 switch (priv->op) {
35 case NFT_CMP_EQ:
36 if (d != 0)
37 goto mismatch;
38 break;
39 case NFT_CMP_NEQ:
40 if (d == 0)
41 goto mismatch;
42 break;
43 case NFT_CMP_LT:
44 if (d == 0)
45 goto mismatch;
46 fallthrough;
47 case NFT_CMP_LTE:
48 if (d > 0)
49 goto mismatch;
50 break;
51 case NFT_CMP_GT:
52 if (d == 0)
53 goto mismatch;
54 fallthrough;
55 case NFT_CMP_GTE:
56 if (d < 0)
57 goto mismatch;
58 break;
59 }
60 return;
61
62mismatch:
63 regs->verdict.code = NFT_BREAK;
64}
65
66static const struct nla_policy nft_cmp_policy[NFTA_CMP_MAX + 1] = {
67 [NFTA_CMP_SREG] = { .type = NLA_U32 },
68 [NFTA_CMP_OP] = { .type = NLA_U32 },
69 [NFTA_CMP_DATA] = { .type = NLA_NESTED },
70};
71
72static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
73 const struct nlattr * const tb[])
74{
75 struct nft_cmp_expr *priv = nft_expr_priv(expr);
76 struct nft_data_desc desc = {
77 .type = NFT_DATA_VALUE,
78 .size = sizeof(priv->data),
79 };
80 int err;
81
82 err = nft_data_init(NULL, data: &priv->data, desc: &desc, nla: tb[NFTA_CMP_DATA]);
83 if (err < 0)
84 return err;
85
86 err = nft_parse_register_load(attr: tb[NFTA_CMP_SREG], sreg: &priv->sreg, len: desc.len);
87 if (err < 0)
88 return err;
89
90 priv->op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
91 priv->len = desc.len;
92 return 0;
93}
94
95static int nft_cmp_dump(struct sk_buff *skb,
96 const struct nft_expr *expr, bool reset)
97{
98 const struct nft_cmp_expr *priv = nft_expr_priv(expr);
99
100 if (nft_dump_register(skb, attr: NFTA_CMP_SREG, reg: priv->sreg))
101 goto nla_put_failure;
102 if (nla_put_be32(skb, attrtype: NFTA_CMP_OP, htonl(priv->op)))
103 goto nla_put_failure;
104
105 if (nft_data_dump(skb, attr: NFTA_CMP_DATA, data: &priv->data,
106 type: NFT_DATA_VALUE, len: priv->len) < 0)
107 goto nla_put_failure;
108 return 0;
109
110nla_put_failure:
111 return -1;
112}
113
114union nft_cmp_offload_data {
115 u16 val16;
116 u32 val32;
117 u64 val64;
118};
119
120static void nft_payload_n2h(union nft_cmp_offload_data *data,
121 const u8 *val, u32 len)
122{
123 switch (len) {
124 case 2:
125 data->val16 = ntohs(*((__be16 *)val));
126 break;
127 case 4:
128 data->val32 = ntohl(*((__be32 *)val));
129 break;
130 case 8:
131 data->val64 = be64_to_cpu(*((__be64 *)val));
132 break;
133 default:
134 WARN_ON_ONCE(1);
135 break;
136 }
137}
138
139static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
140 struct nft_flow_rule *flow,
141 const struct nft_cmp_expr *priv)
142{
143 struct nft_offload_reg *reg = &ctx->regs[priv->sreg];
144 union nft_cmp_offload_data _data, _datamask;
145 u8 *mask = (u8 *)&flow->match.mask;
146 u8 *key = (u8 *)&flow->match.key;
147 u8 *data, *datamask;
148
149 if (priv->op != NFT_CMP_EQ || priv->len > reg->len)
150 return -EOPNOTSUPP;
151
152 if (reg->flags & NFT_OFFLOAD_F_NETWORK2HOST) {
153 nft_payload_n2h(data: &_data, val: (u8 *)&priv->data, len: reg->len);
154 nft_payload_n2h(data: &_datamask, val: (u8 *)&reg->mask, len: reg->len);
155 data = (u8 *)&_data;
156 datamask = (u8 *)&_datamask;
157 } else {
158 data = (u8 *)&priv->data;
159 datamask = (u8 *)&reg->mask;
160 }
161
162 memcpy(key + reg->offset, data, reg->len);
163 memcpy(mask + reg->offset, datamask, reg->len);
164
165 flow->match.dissector.used_keys |= BIT_ULL(reg->key);
166 flow->match.dissector.offset[reg->key] = reg->base_offset;
167
168 if (reg->key == FLOW_DISSECTOR_KEY_META &&
169 reg->offset == offsetof(struct nft_flow_key, meta.ingress_iftype) &&
170 nft_reg_load16(sreg: priv->data.data) != ARPHRD_ETHER)
171 return -EOPNOTSUPP;
172
173 nft_offload_update_dependency(ctx, data: &priv->data, len: reg->len);
174
175 return 0;
176}
177
178static int nft_cmp_offload(struct nft_offload_ctx *ctx,
179 struct nft_flow_rule *flow,
180 const struct nft_expr *expr)
181{
182 const struct nft_cmp_expr *priv = nft_expr_priv(expr);
183
184 return __nft_cmp_offload(ctx, flow, priv);
185}
186
187static const struct nft_expr_ops nft_cmp_ops = {
188 .type = &nft_cmp_type,
189 .size = NFT_EXPR_SIZE(sizeof(struct nft_cmp_expr)),
190 .eval = nft_cmp_eval,
191 .init = nft_cmp_init,
192 .dump = nft_cmp_dump,
193 .reduce = NFT_REDUCE_READONLY,
194 .offload = nft_cmp_offload,
195};
196
197/* Calculate the mask for the nft_cmp_fast expression. On big endian the
198 * mask needs to include the *upper* bytes when interpreting that data as
199 * something smaller than the full u32, therefore a cpu_to_le32 is done.
200 */
201static u32 nft_cmp_fast_mask(unsigned int len)
202{
203 __le32 mask = cpu_to_le32(~0U >> (sizeof_field(struct nft_cmp_fast_expr,
204 data) * BITS_PER_BYTE - len));
205
206 return (__force u32)mask;
207}
208
209static int nft_cmp_fast_init(const struct nft_ctx *ctx,
210 const struct nft_expr *expr,
211 const struct nlattr * const tb[])
212{
213 struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
214 struct nft_data data;
215 struct nft_data_desc desc = {
216 .type = NFT_DATA_VALUE,
217 .size = sizeof(data),
218 };
219 int err;
220
221 err = nft_data_init(NULL, data: &data, desc: &desc, nla: tb[NFTA_CMP_DATA]);
222 if (err < 0)
223 return err;
224
225 err = nft_parse_register_load(attr: tb[NFTA_CMP_SREG], sreg: &priv->sreg, len: desc.len);
226 if (err < 0)
227 return err;
228
229 desc.len *= BITS_PER_BYTE;
230
231 priv->mask = nft_cmp_fast_mask(len: desc.len);
232 priv->data = data.data[0] & priv->mask;
233 priv->len = desc.len;
234 priv->inv = ntohl(nla_get_be32(tb[NFTA_CMP_OP])) != NFT_CMP_EQ;
235 return 0;
236}
237
238static int nft_cmp_fast_offload(struct nft_offload_ctx *ctx,
239 struct nft_flow_rule *flow,
240 const struct nft_expr *expr)
241{
242 const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
243 struct nft_cmp_expr cmp = {
244 .data = {
245 .data = {
246 [0] = priv->data,
247 },
248 },
249 .sreg = priv->sreg,
250 .len = priv->len / BITS_PER_BYTE,
251 .op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ,
252 };
253
254 return __nft_cmp_offload(ctx, flow, priv: &cmp);
255}
256
257static int nft_cmp_fast_dump(struct sk_buff *skb,
258 const struct nft_expr *expr, bool reset)
259{
260 const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
261 enum nft_cmp_ops op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ;
262 struct nft_data data;
263
264 if (nft_dump_register(skb, attr: NFTA_CMP_SREG, reg: priv->sreg))
265 goto nla_put_failure;
266 if (nla_put_be32(skb, attrtype: NFTA_CMP_OP, htonl(op)))
267 goto nla_put_failure;
268
269 data.data[0] = priv->data;
270 if (nft_data_dump(skb, attr: NFTA_CMP_DATA, data: &data,
271 type: NFT_DATA_VALUE, len: priv->len / BITS_PER_BYTE) < 0)
272 goto nla_put_failure;
273 return 0;
274
275nla_put_failure:
276 return -1;
277}
278
279const struct nft_expr_ops nft_cmp_fast_ops = {
280 .type = &nft_cmp_type,
281 .size = NFT_EXPR_SIZE(sizeof(struct nft_cmp_fast_expr)),
282 .eval = NULL, /* inlined */
283 .init = nft_cmp_fast_init,
284 .dump = nft_cmp_fast_dump,
285 .reduce = NFT_REDUCE_READONLY,
286 .offload = nft_cmp_fast_offload,
287};
288
289static u32 nft_cmp_mask(u32 bitlen)
290{
291 return (__force u32)cpu_to_le32(~0U >> (sizeof(u32) * BITS_PER_BYTE - bitlen));
292}
293
294static void nft_cmp16_fast_mask(struct nft_data *data, unsigned int bitlen)
295{
296 int len = bitlen / BITS_PER_BYTE;
297 int i, words = len / sizeof(u32);
298
299 for (i = 0; i < words; i++) {
300 data->data[i] = 0xffffffff;
301 bitlen -= sizeof(u32) * BITS_PER_BYTE;
302 }
303
304 if (len % sizeof(u32))
305 data->data[i++] = nft_cmp_mask(bitlen);
306
307 for (; i < 4; i++)
308 data->data[i] = 0;
309}
310
311static int nft_cmp16_fast_init(const struct nft_ctx *ctx,
312 const struct nft_expr *expr,
313 const struct nlattr * const tb[])
314{
315 struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr);
316 struct nft_data_desc desc = {
317 .type = NFT_DATA_VALUE,
318 .size = sizeof(priv->data),
319 };
320 int err;
321
322 err = nft_data_init(NULL, data: &priv->data, desc: &desc, nla: tb[NFTA_CMP_DATA]);
323 if (err < 0)
324 return err;
325
326 err = nft_parse_register_load(attr: tb[NFTA_CMP_SREG], sreg: &priv->sreg, len: desc.len);
327 if (err < 0)
328 return err;
329
330 nft_cmp16_fast_mask(data: &priv->mask, bitlen: desc.len * BITS_PER_BYTE);
331 priv->inv = ntohl(nla_get_be32(tb[NFTA_CMP_OP])) != NFT_CMP_EQ;
332 priv->len = desc.len;
333
334 return 0;
335}
336
337static int nft_cmp16_fast_offload(struct nft_offload_ctx *ctx,
338 struct nft_flow_rule *flow,
339 const struct nft_expr *expr)
340{
341 const struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr);
342 struct nft_cmp_expr cmp = {
343 .data = priv->data,
344 .sreg = priv->sreg,
345 .len = priv->len,
346 .op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ,
347 };
348
349 return __nft_cmp_offload(ctx, flow, priv: &cmp);
350}
351
352static int nft_cmp16_fast_dump(struct sk_buff *skb,
353 const struct nft_expr *expr, bool reset)
354{
355 const struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr);
356 enum nft_cmp_ops op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ;
357
358 if (nft_dump_register(skb, attr: NFTA_CMP_SREG, reg: priv->sreg))
359 goto nla_put_failure;
360 if (nla_put_be32(skb, attrtype: NFTA_CMP_OP, htonl(op)))
361 goto nla_put_failure;
362
363 if (nft_data_dump(skb, attr: NFTA_CMP_DATA, data: &priv->data,
364 type: NFT_DATA_VALUE, len: priv->len) < 0)
365 goto nla_put_failure;
366 return 0;
367
368nla_put_failure:
369 return -1;
370}
371
372
373const struct nft_expr_ops nft_cmp16_fast_ops = {
374 .type = &nft_cmp_type,
375 .size = NFT_EXPR_SIZE(sizeof(struct nft_cmp16_fast_expr)),
376 .eval = NULL, /* inlined */
377 .init = nft_cmp16_fast_init,
378 .dump = nft_cmp16_fast_dump,
379 .reduce = NFT_REDUCE_READONLY,
380 .offload = nft_cmp16_fast_offload,
381};
382
383static const struct nft_expr_ops *
384nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
385{
386 struct nft_data data;
387 struct nft_data_desc desc = {
388 .type = NFT_DATA_VALUE,
389 .size = sizeof(data),
390 };
391 enum nft_cmp_ops op;
392 u8 sreg;
393 int err;
394
395 if (tb[NFTA_CMP_SREG] == NULL ||
396 tb[NFTA_CMP_OP] == NULL ||
397 tb[NFTA_CMP_DATA] == NULL)
398 return ERR_PTR(error: -EINVAL);
399
400 op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
401 switch (op) {
402 case NFT_CMP_EQ:
403 case NFT_CMP_NEQ:
404 case NFT_CMP_LT:
405 case NFT_CMP_LTE:
406 case NFT_CMP_GT:
407 case NFT_CMP_GTE:
408 break;
409 default:
410 return ERR_PTR(error: -EINVAL);
411 }
412
413 err = nft_data_init(NULL, data: &data, desc: &desc, nla: tb[NFTA_CMP_DATA]);
414 if (err < 0)
415 return ERR_PTR(error: err);
416
417 sreg = ntohl(nla_get_be32(tb[NFTA_CMP_SREG]));
418
419 if (op == NFT_CMP_EQ || op == NFT_CMP_NEQ) {
420 if (desc.len <= sizeof(u32))
421 return &nft_cmp_fast_ops;
422 else if (desc.len <= sizeof(data) &&
423 ((sreg >= NFT_REG_1 && sreg <= NFT_REG_4) ||
424 (sreg >= NFT_REG32_00 && sreg <= NFT_REG32_12 && sreg % 2 == 0)))
425 return &nft_cmp16_fast_ops;
426 }
427 return &nft_cmp_ops;
428}
429
430struct nft_expr_type nft_cmp_type __read_mostly = {
431 .name = "cmp",
432 .select_ops = nft_cmp_select_ops,
433 .policy = nft_cmp_policy,
434 .maxattr = NFTA_CMP_MAX,
435 .owner = THIS_MODULE,
436};
437

source code of linux/net/netfilter/nft_cmp.c