1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org> |
4 | */ |
5 | |
6 | #include <linux/kernel.h> |
7 | #include <linux/init.h> |
8 | #include <linux/module.h> |
9 | #include <linux/atomic.h> |
10 | #include <linux/netlink.h> |
11 | #include <linux/netfilter.h> |
12 | #include <linux/netfilter/nf_tables.h> |
13 | #include <net/netfilter/nf_tables.h> |
14 | |
15 | struct nft_quota { |
16 | atomic64_t quota; |
17 | unsigned long flags; |
18 | atomic64_t *consumed; |
19 | }; |
20 | |
21 | static inline bool nft_overquota(struct nft_quota *priv, |
22 | const struct sk_buff *skb) |
23 | { |
24 | return atomic64_add_return(i: skb->len, v: priv->consumed) >= |
25 | atomic64_read(v: &priv->quota); |
26 | } |
27 | |
28 | static inline bool nft_quota_invert(struct nft_quota *priv) |
29 | { |
30 | return priv->flags & NFT_QUOTA_F_INV; |
31 | } |
32 | |
33 | static inline void nft_quota_do_eval(struct nft_quota *priv, |
34 | struct nft_regs *regs, |
35 | const struct nft_pktinfo *pkt) |
36 | { |
37 | if (nft_overquota(priv, skb: pkt->skb) ^ nft_quota_invert(priv)) |
38 | regs->verdict.code = NFT_BREAK; |
39 | } |
40 | |
41 | static const struct nla_policy nft_quota_policy[NFTA_QUOTA_MAX + 1] = { |
42 | [NFTA_QUOTA_BYTES] = { .type = NLA_U64 }, |
43 | [NFTA_QUOTA_FLAGS] = { .type = NLA_U32 }, |
44 | [NFTA_QUOTA_CONSUMED] = { .type = NLA_U64 }, |
45 | }; |
46 | |
47 | #define NFT_QUOTA_DEPLETED_BIT 1 /* From NFT_QUOTA_F_DEPLETED. */ |
48 | |
49 | static void nft_quota_obj_eval(struct nft_object *obj, |
50 | struct nft_regs *regs, |
51 | const struct nft_pktinfo *pkt) |
52 | { |
53 | struct nft_quota *priv = nft_obj_data(obj); |
54 | bool overquota; |
55 | |
56 | overquota = nft_overquota(priv, skb: pkt->skb); |
57 | if (overquota ^ nft_quota_invert(priv)) |
58 | regs->verdict.code = NFT_BREAK; |
59 | |
60 | if (overquota && |
61 | !test_and_set_bit(NFT_QUOTA_DEPLETED_BIT, addr: &priv->flags)) |
62 | nft_obj_notify(net: nft_net(pkt), table: obj->key.table, obj, portid: 0, seq: 0, |
63 | event: NFT_MSG_NEWOBJ, flags: 0, family: nft_pf(pkt), report: 0, GFP_ATOMIC); |
64 | } |
65 | |
66 | static int nft_quota_do_init(const struct nlattr * const tb[], |
67 | struct nft_quota *priv) |
68 | { |
69 | unsigned long flags = 0; |
70 | u64 quota, consumed = 0; |
71 | |
72 | if (!tb[NFTA_QUOTA_BYTES]) |
73 | return -EINVAL; |
74 | |
75 | quota = be64_to_cpu(nla_get_be64(tb[NFTA_QUOTA_BYTES])); |
76 | if (quota > S64_MAX) |
77 | return -EOVERFLOW; |
78 | |
79 | if (tb[NFTA_QUOTA_CONSUMED]) { |
80 | consumed = be64_to_cpu(nla_get_be64(tb[NFTA_QUOTA_CONSUMED])); |
81 | if (consumed > quota) |
82 | return -EINVAL; |
83 | } |
84 | |
85 | if (tb[NFTA_QUOTA_FLAGS]) { |
86 | flags = ntohl(nla_get_be32(tb[NFTA_QUOTA_FLAGS])); |
87 | if (flags & ~NFT_QUOTA_F_INV) |
88 | return -EINVAL; |
89 | if (flags & NFT_QUOTA_F_DEPLETED) |
90 | return -EOPNOTSUPP; |
91 | } |
92 | |
93 | priv->consumed = kmalloc(size: sizeof(*priv->consumed), GFP_KERNEL_ACCOUNT); |
94 | if (!priv->consumed) |
95 | return -ENOMEM; |
96 | |
97 | atomic64_set(v: &priv->quota, i: quota); |
98 | priv->flags = flags; |
99 | atomic64_set(v: priv->consumed, i: consumed); |
100 | |
101 | return 0; |
102 | } |
103 | |
104 | static void nft_quota_do_destroy(const struct nft_ctx *ctx, |
105 | struct nft_quota *priv) |
106 | { |
107 | kfree(objp: priv->consumed); |
108 | } |
109 | |
110 | static int nft_quota_obj_init(const struct nft_ctx *ctx, |
111 | const struct nlattr * const tb[], |
112 | struct nft_object *obj) |
113 | { |
114 | struct nft_quota *priv = nft_obj_data(obj); |
115 | |
116 | return nft_quota_do_init(tb, priv); |
117 | } |
118 | |
119 | static void nft_quota_obj_update(struct nft_object *obj, |
120 | struct nft_object *newobj) |
121 | { |
122 | struct nft_quota *newpriv = nft_obj_data(obj: newobj); |
123 | struct nft_quota *priv = nft_obj_data(obj); |
124 | u64 newquota; |
125 | |
126 | newquota = atomic64_read(v: &newpriv->quota); |
127 | atomic64_set(v: &priv->quota, i: newquota); |
128 | priv->flags = newpriv->flags; |
129 | } |
130 | |
131 | static int nft_quota_do_dump(struct sk_buff *skb, struct nft_quota *priv, |
132 | bool reset) |
133 | { |
134 | u64 consumed, consumed_cap, quota; |
135 | u32 flags = priv->flags; |
136 | |
137 | /* Since we inconditionally increment consumed quota for each packet |
138 | * that we see, don't go over the quota boundary in what we send to |
139 | * userspace. |
140 | */ |
141 | consumed = atomic64_read(v: priv->consumed); |
142 | quota = atomic64_read(v: &priv->quota); |
143 | if (consumed >= quota) { |
144 | consumed_cap = quota; |
145 | flags |= NFT_QUOTA_F_DEPLETED; |
146 | } else { |
147 | consumed_cap = consumed; |
148 | } |
149 | |
150 | if (nla_put_be64(skb, attrtype: NFTA_QUOTA_BYTES, cpu_to_be64(quota), |
151 | padattr: NFTA_QUOTA_PAD) || |
152 | nla_put_be64(skb, attrtype: NFTA_QUOTA_CONSUMED, cpu_to_be64(consumed_cap), |
153 | padattr: NFTA_QUOTA_PAD) || |
154 | nla_put_be32(skb, attrtype: NFTA_QUOTA_FLAGS, htonl(flags))) |
155 | goto nla_put_failure; |
156 | |
157 | if (reset) { |
158 | atomic64_sub(i: consumed, v: priv->consumed); |
159 | clear_bit(NFT_QUOTA_DEPLETED_BIT, addr: &priv->flags); |
160 | } |
161 | return 0; |
162 | |
163 | nla_put_failure: |
164 | return -1; |
165 | } |
166 | |
167 | static int nft_quota_obj_dump(struct sk_buff *skb, struct nft_object *obj, |
168 | bool reset) |
169 | { |
170 | struct nft_quota *priv = nft_obj_data(obj); |
171 | |
172 | return nft_quota_do_dump(skb, priv, reset); |
173 | } |
174 | |
175 | static void nft_quota_obj_destroy(const struct nft_ctx *ctx, |
176 | struct nft_object *obj) |
177 | { |
178 | struct nft_quota *priv = nft_obj_data(obj); |
179 | |
180 | return nft_quota_do_destroy(ctx, priv); |
181 | } |
182 | |
183 | static struct nft_object_type nft_quota_obj_type; |
184 | static const struct nft_object_ops nft_quota_obj_ops = { |
185 | .type = &nft_quota_obj_type, |
186 | .size = sizeof(struct nft_quota), |
187 | .init = nft_quota_obj_init, |
188 | .destroy = nft_quota_obj_destroy, |
189 | .eval = nft_quota_obj_eval, |
190 | .dump = nft_quota_obj_dump, |
191 | .update = nft_quota_obj_update, |
192 | }; |
193 | |
194 | static struct nft_object_type nft_quota_obj_type __read_mostly = { |
195 | .type = NFT_OBJECT_QUOTA, |
196 | .ops = &nft_quota_obj_ops, |
197 | .maxattr = NFTA_QUOTA_MAX, |
198 | .policy = nft_quota_policy, |
199 | .owner = THIS_MODULE, |
200 | }; |
201 | |
202 | static void nft_quota_eval(const struct nft_expr *expr, |
203 | struct nft_regs *regs, |
204 | const struct nft_pktinfo *pkt) |
205 | { |
206 | struct nft_quota *priv = nft_expr_priv(expr); |
207 | |
208 | nft_quota_do_eval(priv, regs, pkt); |
209 | } |
210 | |
211 | static int nft_quota_init(const struct nft_ctx *ctx, |
212 | const struct nft_expr *expr, |
213 | const struct nlattr * const tb[]) |
214 | { |
215 | struct nft_quota *priv = nft_expr_priv(expr); |
216 | |
217 | return nft_quota_do_init(tb, priv); |
218 | } |
219 | |
220 | static int nft_quota_dump(struct sk_buff *skb, |
221 | const struct nft_expr *expr, bool reset) |
222 | { |
223 | struct nft_quota *priv = nft_expr_priv(expr); |
224 | |
225 | return nft_quota_do_dump(skb, priv, reset); |
226 | } |
227 | |
228 | static void nft_quota_destroy(const struct nft_ctx *ctx, |
229 | const struct nft_expr *expr) |
230 | { |
231 | struct nft_quota *priv = nft_expr_priv(expr); |
232 | |
233 | return nft_quota_do_destroy(ctx, priv); |
234 | } |
235 | |
236 | static int nft_quota_clone(struct nft_expr *dst, const struct nft_expr *src) |
237 | { |
238 | struct nft_quota *priv_dst = nft_expr_priv(expr: dst); |
239 | struct nft_quota *priv_src = nft_expr_priv(expr: src); |
240 | |
241 | priv_dst->quota = priv_src->quota; |
242 | priv_dst->flags = priv_src->flags; |
243 | |
244 | priv_dst->consumed = kmalloc(size: sizeof(*priv_dst->consumed), GFP_ATOMIC); |
245 | if (!priv_dst->consumed) |
246 | return -ENOMEM; |
247 | |
248 | *priv_dst->consumed = *priv_src->consumed; |
249 | |
250 | return 0; |
251 | } |
252 | |
253 | static struct nft_expr_type nft_quota_type; |
254 | static const struct nft_expr_ops nft_quota_ops = { |
255 | .type = &nft_quota_type, |
256 | .size = NFT_EXPR_SIZE(sizeof(struct nft_quota)), |
257 | .eval = nft_quota_eval, |
258 | .init = nft_quota_init, |
259 | .destroy = nft_quota_destroy, |
260 | .clone = nft_quota_clone, |
261 | .dump = nft_quota_dump, |
262 | .reduce = NFT_REDUCE_READONLY, |
263 | }; |
264 | |
265 | static struct nft_expr_type nft_quota_type __read_mostly = { |
266 | .name = "quota" , |
267 | .ops = &nft_quota_ops, |
268 | .policy = nft_quota_policy, |
269 | .maxattr = NFTA_QUOTA_MAX, |
270 | .flags = NFT_EXPR_STATEFUL, |
271 | .owner = THIS_MODULE, |
272 | }; |
273 | |
274 | static int __init nft_quota_module_init(void) |
275 | { |
276 | int err; |
277 | |
278 | err = nft_register_obj(obj_type: &nft_quota_obj_type); |
279 | if (err < 0) |
280 | return err; |
281 | |
282 | err = nft_register_expr(&nft_quota_type); |
283 | if (err < 0) |
284 | goto err1; |
285 | |
286 | return 0; |
287 | err1: |
288 | nft_unregister_obj(obj_type: &nft_quota_obj_type); |
289 | return err; |
290 | } |
291 | |
292 | static void __exit nft_quota_module_exit(void) |
293 | { |
294 | nft_unregister_expr(&nft_quota_type); |
295 | nft_unregister_obj(obj_type: &nft_quota_obj_type); |
296 | } |
297 | |
298 | module_init(nft_quota_module_init); |
299 | module_exit(nft_quota_module_exit); |
300 | |
301 | MODULE_LICENSE("GPL" ); |
302 | MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>" ); |
303 | MODULE_ALIAS_NFT_EXPR("quota" ); |
304 | MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_QUOTA); |
305 | MODULE_DESCRIPTION("Netfilter nftables quota module" ); |
306 | |