1 | // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 |
2 | /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ |
3 | |
4 | #include <linux/kernel.h> |
5 | #include <linux/errno.h> |
6 | #include <linux/netdevice.h> |
7 | #include <linux/log2.h> |
8 | #include <net/net_namespace.h> |
9 | #include <net/flow_dissector.h> |
10 | #include <net/pkt_cls.h> |
11 | #include <net/tc_act/tc_gact.h> |
12 | #include <net/tc_act/tc_mirred.h> |
13 | #include <net/tc_act/tc_vlan.h> |
14 | |
15 | #include "spectrum.h" |
16 | #include "core_acl_flex_keys.h" |
17 | |
18 | static int mlxsw_sp_policer_validate(const struct flow_action *action, |
19 | const struct flow_action_entry *act, |
20 | struct netlink_ext_ack *extack) |
21 | { |
22 | if (act->police.exceed.act_id != FLOW_ACTION_DROP) { |
23 | NL_SET_ERR_MSG_MOD(extack, |
24 | "Offload not supported when exceed action is not drop" ); |
25 | return -EOPNOTSUPP; |
26 | } |
27 | |
28 | if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && |
29 | act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { |
30 | NL_SET_ERR_MSG_MOD(extack, |
31 | "Offload not supported when conform action is not pipe or ok" ); |
32 | return -EOPNOTSUPP; |
33 | } |
34 | |
35 | if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && |
36 | !flow_action_is_last_entry(action, entry: act)) { |
37 | NL_SET_ERR_MSG_MOD(extack, |
38 | "Offload not supported when conform action is ok, but action is not last" ); |
39 | return -EOPNOTSUPP; |
40 | } |
41 | |
42 | if (act->police.peakrate_bytes_ps || |
43 | act->police.avrate || act->police.overhead) { |
44 | NL_SET_ERR_MSG_MOD(extack, |
45 | "Offload not supported when peakrate/avrate/overhead is configured" ); |
46 | return -EOPNOTSUPP; |
47 | } |
48 | |
49 | if (act->police.rate_pkt_ps) { |
50 | NL_SET_ERR_MSG_MOD(extack, |
51 | "QoS offload not support packets per second" ); |
52 | return -EOPNOTSUPP; |
53 | } |
54 | |
55 | return 0; |
56 | } |
57 | |
58 | static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, |
59 | struct mlxsw_sp_flow_block *block, |
60 | struct mlxsw_sp_acl_rule_info *rulei, |
61 | struct flow_action *flow_action, |
62 | struct netlink_ext_ack *extack) |
63 | { |
64 | const struct flow_action_entry *act; |
65 | int mirror_act_count = 0; |
66 | int police_act_count = 0; |
67 | int sample_act_count = 0; |
68 | int err, i; |
69 | |
70 | if (!flow_action_has_entries(action: flow_action)) |
71 | return 0; |
72 | if (!flow_action_mixed_hw_stats_check(action: flow_action, extack)) |
73 | return -EOPNOTSUPP; |
74 | |
75 | act = flow_action_first_entry_get(action: flow_action); |
76 | if (act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED) { |
77 | /* Nothing to do */ |
78 | } else if (act->hw_stats & FLOW_ACTION_HW_STATS_IMMEDIATE) { |
79 | /* Count action is inserted first */ |
80 | err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack); |
81 | if (err) |
82 | return err; |
83 | } else { |
84 | NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type" ); |
85 | return -EOPNOTSUPP; |
86 | } |
87 | |
88 | flow_action_for_each(i, act, flow_action) { |
89 | switch (act->id) { |
90 | case FLOW_ACTION_ACCEPT: |
91 | err = mlxsw_sp_acl_rulei_act_terminate(rulei); |
92 | if (err) { |
93 | NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action" ); |
94 | return err; |
95 | } |
96 | break; |
97 | case FLOW_ACTION_DROP: { |
98 | bool ingress; |
99 | |
100 | if (mlxsw_sp_flow_block_is_mixed_bound(block)) { |
101 | NL_SET_ERR_MSG_MOD(extack, "Drop action is not supported when block is bound to ingress and egress" ); |
102 | return -EOPNOTSUPP; |
103 | } |
104 | ingress = mlxsw_sp_flow_block_is_ingress_bound(block); |
105 | err = mlxsw_sp_acl_rulei_act_drop(rulei, ingress, |
106 | fa_cookie: act->user_cookie, extack); |
107 | if (err) { |
108 | NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action" ); |
109 | return err; |
110 | } |
111 | |
112 | /* Forbid block with this rulei to be bound |
113 | * to ingress/egress in future. Ingress rule is |
114 | * a blocker for egress and vice versa. |
115 | */ |
116 | if (ingress) |
117 | rulei->egress_bind_blocker = 1; |
118 | else |
119 | rulei->ingress_bind_blocker = 1; |
120 | } |
121 | break; |
122 | case FLOW_ACTION_TRAP: |
123 | err = mlxsw_sp_acl_rulei_act_trap(rulei); |
124 | if (err) { |
125 | NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action" ); |
126 | return err; |
127 | } |
128 | break; |
129 | case FLOW_ACTION_GOTO: { |
130 | u32 chain_index = act->chain_index; |
131 | struct mlxsw_sp_acl_ruleset *ruleset; |
132 | u16 group_id; |
133 | |
134 | ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block, |
135 | chain_index, |
136 | profile: MLXSW_SP_ACL_PROFILE_FLOWER); |
137 | if (IS_ERR(ptr: ruleset)) |
138 | return PTR_ERR(ptr: ruleset); |
139 | |
140 | group_id = mlxsw_sp_acl_ruleset_group_id(ruleset); |
141 | err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id); |
142 | if (err) { |
143 | NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action" ); |
144 | return err; |
145 | } |
146 | } |
147 | break; |
148 | case FLOW_ACTION_REDIRECT: { |
149 | struct net_device *out_dev; |
150 | struct mlxsw_sp_fid *fid; |
151 | u16 fid_index; |
152 | |
153 | if (mlxsw_sp_flow_block_is_egress_bound(block)) { |
154 | NL_SET_ERR_MSG_MOD(extack, "Redirect action is not supported on egress" ); |
155 | return -EOPNOTSUPP; |
156 | } |
157 | |
158 | /* Forbid block with this rulei to be bound |
159 | * to egress in future. |
160 | */ |
161 | rulei->egress_bind_blocker = 1; |
162 | |
163 | /* Ignore learning and security lookup as redirection |
164 | * using ingress filters happens before the bridge. |
165 | */ |
166 | err = mlxsw_sp_acl_rulei_act_ignore(mlxsw_sp, rulei, |
167 | disable_learning: true, disable_security: true); |
168 | if (err) { |
169 | NL_SET_ERR_MSG_MOD(extack, "Cannot append ignore action" ); |
170 | return err; |
171 | } |
172 | |
173 | fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp); |
174 | fid_index = mlxsw_sp_fid_index(fid); |
175 | err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei, |
176 | fid: fid_index, extack); |
177 | if (err) |
178 | return err; |
179 | |
180 | out_dev = act->dev; |
181 | err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei, |
182 | out_dev, extack); |
183 | if (err) |
184 | return err; |
185 | } |
186 | break; |
187 | case FLOW_ACTION_MIRRED: { |
188 | struct net_device *out_dev = act->dev; |
189 | |
190 | if (mirror_act_count++) { |
191 | NL_SET_ERR_MSG_MOD(extack, "Multiple mirror actions per rule are not supported" ); |
192 | return -EOPNOTSUPP; |
193 | } |
194 | |
195 | err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei, |
196 | block, out_dev, |
197 | extack); |
198 | if (err) |
199 | return err; |
200 | } |
201 | break; |
202 | case FLOW_ACTION_VLAN_MANGLE: { |
203 | u16 proto = be16_to_cpu(act->vlan.proto); |
204 | u8 prio = act->vlan.prio; |
205 | u16 vid = act->vlan.vid; |
206 | |
207 | err = mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei, |
208 | action: act->id, vid, |
209 | proto, prio, extack); |
210 | if (err) |
211 | return err; |
212 | break; |
213 | } |
214 | case FLOW_ACTION_PRIORITY: |
215 | err = mlxsw_sp_acl_rulei_act_priority(mlxsw_sp, rulei, |
216 | prio: act->priority, |
217 | extack); |
218 | if (err) |
219 | return err; |
220 | break; |
221 | case FLOW_ACTION_MANGLE: { |
222 | enum flow_action_mangle_base htype = act->mangle.htype; |
223 | __be32 be_mask = (__force __be32) act->mangle.mask; |
224 | __be32 be_val = (__force __be32) act->mangle.val; |
225 | u32 offset = act->mangle.offset; |
226 | u32 mask = be32_to_cpu(be_mask); |
227 | u32 val = be32_to_cpu(be_val); |
228 | |
229 | err = mlxsw_sp_acl_rulei_act_mangle(mlxsw_sp, rulei, |
230 | htype, offset, |
231 | mask, val, extack); |
232 | if (err) |
233 | return err; |
234 | break; |
235 | } |
236 | case FLOW_ACTION_POLICE: { |
237 | u32 burst; |
238 | |
239 | if (police_act_count++) { |
240 | NL_SET_ERR_MSG_MOD(extack, "Multiple police actions per rule are not supported" ); |
241 | return -EOPNOTSUPP; |
242 | } |
243 | |
244 | err = mlxsw_sp_policer_validate(action: flow_action, act, extack); |
245 | if (err) |
246 | return err; |
247 | |
248 | /* The kernel might adjust the requested burst size so |
249 | * that it is not exactly a power of two. Re-adjust it |
250 | * here since the hardware only supports burst sizes |
251 | * that are a power of two. |
252 | */ |
253 | burst = roundup_pow_of_two(act->police.burst); |
254 | err = mlxsw_sp_acl_rulei_act_police(mlxsw_sp, rulei, |
255 | index: act->hw_index, |
256 | rate_bytes_ps: act->police.rate_bytes_ps, |
257 | burst, extack); |
258 | if (err) |
259 | return err; |
260 | break; |
261 | } |
262 | case FLOW_ACTION_SAMPLE: { |
263 | if (sample_act_count++) { |
264 | NL_SET_ERR_MSG_MOD(extack, "Multiple sample actions per rule are not supported" ); |
265 | return -EOPNOTSUPP; |
266 | } |
267 | |
268 | err = mlxsw_sp_acl_rulei_act_sample(mlxsw_sp, rulei, |
269 | block, |
270 | psample_group: act->sample.psample_group, |
271 | rate: act->sample.rate, |
272 | trunc_size: act->sample.trunc_size, |
273 | truncate: act->sample.truncate, |
274 | extack); |
275 | if (err) |
276 | return err; |
277 | break; |
278 | } |
279 | default: |
280 | NL_SET_ERR_MSG_MOD(extack, "Unsupported action" ); |
281 | dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n" ); |
282 | return -EOPNOTSUPP; |
283 | } |
284 | } |
285 | |
286 | if (rulei->ipv6_valid) { |
287 | NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field" ); |
288 | return -EOPNOTSUPP; |
289 | } |
290 | |
291 | return 0; |
292 | } |
293 | |
294 | static int |
295 | mlxsw_sp_flower_parse_meta_iif(struct mlxsw_sp_acl_rule_info *rulei, |
296 | const struct mlxsw_sp_flow_block *block, |
297 | const struct flow_match_meta *match, |
298 | struct netlink_ext_ack *extack) |
299 | { |
300 | struct mlxsw_sp_port *mlxsw_sp_port; |
301 | struct net_device *ingress_dev; |
302 | |
303 | if (!match->mask->ingress_ifindex) |
304 | return 0; |
305 | |
306 | if (match->mask->ingress_ifindex != 0xFFFFFFFF) { |
307 | NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask" ); |
308 | return -EINVAL; |
309 | } |
310 | |
311 | ingress_dev = __dev_get_by_index(net: block->net, |
312 | ifindex: match->key->ingress_ifindex); |
313 | if (!ingress_dev) { |
314 | NL_SET_ERR_MSG_MOD(extack, "Can't find specified ingress port to match on" ); |
315 | return -EINVAL; |
316 | } |
317 | |
318 | if (!mlxsw_sp_port_dev_check(dev: ingress_dev)) { |
319 | NL_SET_ERR_MSG_MOD(extack, "Can't match on non-mlxsw ingress port" ); |
320 | return -EINVAL; |
321 | } |
322 | |
323 | mlxsw_sp_port = netdev_priv(dev: ingress_dev); |
324 | if (mlxsw_sp_port->mlxsw_sp != block->mlxsw_sp) { |
325 | NL_SET_ERR_MSG_MOD(extack, "Can't match on a port from different device" ); |
326 | return -EINVAL; |
327 | } |
328 | |
329 | mlxsw_sp_acl_rulei_keymask_u32(rulei, |
330 | element: MLXSW_AFK_ELEMENT_SRC_SYS_PORT, |
331 | key_value: mlxsw_sp_port->local_port, |
332 | mask_value: 0xFFFFFFFF); |
333 | |
334 | return 0; |
335 | } |
336 | |
337 | static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info *rulei, |
338 | struct flow_cls_offload *f, |
339 | struct mlxsw_sp_flow_block *block) |
340 | { |
341 | struct flow_rule *rule = flow_cls_offload_flow_rule(flow_cmd: f); |
342 | struct flow_match_meta match; |
343 | |
344 | if (!flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_META)) |
345 | return 0; |
346 | |
347 | flow_rule_match_meta(rule, out: &match); |
348 | |
349 | mlxsw_sp_acl_rulei_keymask_u32(rulei, element: MLXSW_AFK_ELEMENT_FDB_MISS, |
350 | key_value: match.key->l2_miss, mask_value: match.mask->l2_miss); |
351 | |
352 | return mlxsw_sp_flower_parse_meta_iif(rulei, block, match: &match, |
353 | extack: f->common.extack); |
354 | } |
355 | |
356 | static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei, |
357 | struct flow_cls_offload *f) |
358 | { |
359 | struct flow_match_ipv4_addrs match; |
360 | |
361 | flow_rule_match_ipv4_addrs(rule: f->rule, out: &match); |
362 | |
363 | mlxsw_sp_acl_rulei_keymask_buf(rulei, element: MLXSW_AFK_ELEMENT_SRC_IP_0_31, |
364 | key_value: (char *) &match.key->src, |
365 | mask_value: (char *) &match.mask->src, len: 4); |
366 | mlxsw_sp_acl_rulei_keymask_buf(rulei, element: MLXSW_AFK_ELEMENT_DST_IP_0_31, |
367 | key_value: (char *) &match.key->dst, |
368 | mask_value: (char *) &match.mask->dst, len: 4); |
369 | } |
370 | |
371 | static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei, |
372 | struct flow_cls_offload *f) |
373 | { |
374 | struct flow_match_ipv6_addrs match; |
375 | |
376 | flow_rule_match_ipv6_addrs(rule: f->rule, out: &match); |
377 | |
378 | mlxsw_sp_acl_rulei_keymask_buf(rulei, element: MLXSW_AFK_ELEMENT_SRC_IP_96_127, |
379 | key_value: &match.key->src.s6_addr[0x0], |
380 | mask_value: &match.mask->src.s6_addr[0x0], len: 4); |
381 | mlxsw_sp_acl_rulei_keymask_buf(rulei, element: MLXSW_AFK_ELEMENT_SRC_IP_64_95, |
382 | key_value: &match.key->src.s6_addr[0x4], |
383 | mask_value: &match.mask->src.s6_addr[0x4], len: 4); |
384 | mlxsw_sp_acl_rulei_keymask_buf(rulei, element: MLXSW_AFK_ELEMENT_SRC_IP_32_63, |
385 | key_value: &match.key->src.s6_addr[0x8], |
386 | mask_value: &match.mask->src.s6_addr[0x8], len: 4); |
387 | mlxsw_sp_acl_rulei_keymask_buf(rulei, element: MLXSW_AFK_ELEMENT_SRC_IP_0_31, |
388 | key_value: &match.key->src.s6_addr[0xC], |
389 | mask_value: &match.mask->src.s6_addr[0xC], len: 4); |
390 | mlxsw_sp_acl_rulei_keymask_buf(rulei, element: MLXSW_AFK_ELEMENT_DST_IP_96_127, |
391 | key_value: &match.key->dst.s6_addr[0x0], |
392 | mask_value: &match.mask->dst.s6_addr[0x0], len: 4); |
393 | mlxsw_sp_acl_rulei_keymask_buf(rulei, element: MLXSW_AFK_ELEMENT_DST_IP_64_95, |
394 | key_value: &match.key->dst.s6_addr[0x4], |
395 | mask_value: &match.mask->dst.s6_addr[0x4], len: 4); |
396 | mlxsw_sp_acl_rulei_keymask_buf(rulei, element: MLXSW_AFK_ELEMENT_DST_IP_32_63, |
397 | key_value: &match.key->dst.s6_addr[0x8], |
398 | mask_value: &match.mask->dst.s6_addr[0x8], len: 4); |
399 | mlxsw_sp_acl_rulei_keymask_buf(rulei, element: MLXSW_AFK_ELEMENT_DST_IP_0_31, |
400 | key_value: &match.key->dst.s6_addr[0xC], |
401 | mask_value: &match.mask->dst.s6_addr[0xC], len: 4); |
402 | } |
403 | |
404 | static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp, |
405 | struct mlxsw_sp_acl_rule_info *rulei, |
406 | struct flow_cls_offload *f, |
407 | u8 ip_proto) |
408 | { |
409 | const struct flow_rule *rule = flow_cls_offload_flow_rule(flow_cmd: f); |
410 | struct flow_match_ports match; |
411 | |
412 | if (!flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_PORTS)) |
413 | return 0; |
414 | |
415 | if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { |
416 | NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported" ); |
417 | dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n" ); |
418 | return -EINVAL; |
419 | } |
420 | |
421 | flow_rule_match_ports(rule, out: &match); |
422 | mlxsw_sp_acl_rulei_keymask_u32(rulei, element: MLXSW_AFK_ELEMENT_DST_L4_PORT, |
423 | ntohs(match.key->dst), |
424 | ntohs(match.mask->dst)); |
425 | mlxsw_sp_acl_rulei_keymask_u32(rulei, element: MLXSW_AFK_ELEMENT_SRC_L4_PORT, |
426 | ntohs(match.key->src), |
427 | ntohs(match.mask->src)); |
428 | return 0; |
429 | } |
430 | |
431 | static int |
432 | mlxsw_sp_flower_parse_ports_range(struct mlxsw_sp *mlxsw_sp, |
433 | struct mlxsw_sp_acl_rule_info *rulei, |
434 | struct flow_cls_offload *f, u8 ip_proto) |
435 | { |
436 | const struct flow_rule *rule = flow_cls_offload_flow_rule(flow_cmd: f); |
437 | struct flow_match_ports_range match; |
438 | u32 key_mask_value = 0; |
439 | |
440 | if (!flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_PORTS_RANGE)) |
441 | return 0; |
442 | |
443 | if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { |
444 | NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported" ); |
445 | return -EINVAL; |
446 | } |
447 | |
448 | flow_rule_match_ports_range(rule, out: &match); |
449 | |
450 | if (match.mask->tp_min.src) { |
451 | struct mlxsw_sp_port_range range = { |
452 | .min = ntohs(match.key->tp_min.src), |
453 | .max = ntohs(match.key->tp_max.src), |
454 | .source = true, |
455 | }; |
456 | u8 prr_index; |
457 | int err; |
458 | |
459 | err = mlxsw_sp_port_range_reg_get(mlxsw_sp, range: &range, |
460 | extack: f->common.extack, p_prr_index: &prr_index); |
461 | if (err) |
462 | return err; |
463 | |
464 | rulei->src_port_range_reg_index = prr_index; |
465 | rulei->src_port_range_reg_valid = true; |
466 | key_mask_value |= BIT(prr_index); |
467 | } |
468 | |
469 | if (match.mask->tp_min.dst) { |
470 | struct mlxsw_sp_port_range range = { |
471 | .min = ntohs(match.key->tp_min.dst), |
472 | .max = ntohs(match.key->tp_max.dst), |
473 | }; |
474 | u8 prr_index; |
475 | int err; |
476 | |
477 | err = mlxsw_sp_port_range_reg_get(mlxsw_sp, range: &range, |
478 | extack: f->common.extack, p_prr_index: &prr_index); |
479 | if (err) |
480 | return err; |
481 | |
482 | rulei->dst_port_range_reg_index = prr_index; |
483 | rulei->dst_port_range_reg_valid = true; |
484 | key_mask_value |= BIT(prr_index); |
485 | } |
486 | |
487 | mlxsw_sp_acl_rulei_keymask_u32(rulei, element: MLXSW_AFK_ELEMENT_L4_PORT_RANGE, |
488 | key_value: key_mask_value, mask_value: key_mask_value); |
489 | |
490 | return 0; |
491 | } |
492 | |
493 | static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp, |
494 | struct mlxsw_sp_acl_rule_info *rulei, |
495 | struct flow_cls_offload *f, |
496 | u8 ip_proto) |
497 | { |
498 | const struct flow_rule *rule = flow_cls_offload_flow_rule(flow_cmd: f); |
499 | struct flow_match_tcp match; |
500 | |
501 | if (!flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_TCP)) |
502 | return 0; |
503 | |
504 | if (ip_proto != IPPROTO_TCP) { |
505 | NL_SET_ERR_MSG_MOD(f->common.extack, "TCP keys supported only for TCP" ); |
506 | dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n" ); |
507 | return -EINVAL; |
508 | } |
509 | |
510 | flow_rule_match_tcp(rule, out: &match); |
511 | |
512 | if (match.mask->flags & htons(0x0E00)) { |
513 | NL_SET_ERR_MSG_MOD(f->common.extack, "TCP flags match not supported on reserved bits" ); |
514 | dev_err(mlxsw_sp->bus_info->dev, "TCP flags match not supported on reserved bits\n" ); |
515 | return -EINVAL; |
516 | } |
517 | |
518 | mlxsw_sp_acl_rulei_keymask_u32(rulei, element: MLXSW_AFK_ELEMENT_TCP_FLAGS, |
519 | ntohs(match.key->flags), |
520 | ntohs(match.mask->flags)); |
521 | return 0; |
522 | } |
523 | |
524 | static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp, |
525 | struct mlxsw_sp_acl_rule_info *rulei, |
526 | struct flow_cls_offload *f, |
527 | u16 n_proto) |
528 | { |
529 | const struct flow_rule *rule = flow_cls_offload_flow_rule(flow_cmd: f); |
530 | struct flow_match_ip match; |
531 | |
532 | if (!flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_IP)) |
533 | return 0; |
534 | |
535 | if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) { |
536 | NL_SET_ERR_MSG_MOD(f->common.extack, "IP keys supported only for IPv4/6" ); |
537 | dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n" ); |
538 | return -EINVAL; |
539 | } |
540 | |
541 | flow_rule_match_ip(rule, out: &match); |
542 | |
543 | mlxsw_sp_acl_rulei_keymask_u32(rulei, element: MLXSW_AFK_ELEMENT_IP_TTL_, |
544 | key_value: match.key->ttl, mask_value: match.mask->ttl); |
545 | |
546 | mlxsw_sp_acl_rulei_keymask_u32(rulei, element: MLXSW_AFK_ELEMENT_IP_ECN, |
547 | key_value: match.key->tos & 0x3, |
548 | mask_value: match.mask->tos & 0x3); |
549 | |
550 | mlxsw_sp_acl_rulei_keymask_u32(rulei, element: MLXSW_AFK_ELEMENT_IP_DSCP, |
551 | key_value: match.key->tos >> 2, |
552 | mask_value: match.mask->tos >> 2); |
553 | |
554 | return 0; |
555 | } |
556 | |
557 | static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, |
558 | struct mlxsw_sp_flow_block *block, |
559 | struct mlxsw_sp_acl_rule_info *rulei, |
560 | struct flow_cls_offload *f) |
561 | { |
562 | struct flow_rule *rule = flow_cls_offload_flow_rule(flow_cmd: f); |
563 | struct flow_dissector *dissector = rule->match.dissector; |
564 | u16 n_proto_mask = 0; |
565 | u16 n_proto_key = 0; |
566 | u16 addr_type = 0; |
567 | u8 ip_proto = 0; |
568 | int err; |
569 | |
570 | if (dissector->used_keys & |
571 | ~(BIT_ULL(FLOW_DISSECTOR_KEY_META) | |
572 | BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | |
573 | BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | |
574 | BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | |
575 | BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | |
576 | BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | |
577 | BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | |
578 | BIT_ULL(FLOW_DISSECTOR_KEY_PORTS_RANGE) | |
579 | BIT_ULL(FLOW_DISSECTOR_KEY_TCP) | |
580 | BIT_ULL(FLOW_DISSECTOR_KEY_IP) | |
581 | BIT_ULL(FLOW_DISSECTOR_KEY_VLAN))) { |
582 | dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n" ); |
583 | NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key" ); |
584 | return -EOPNOTSUPP; |
585 | } |
586 | |
587 | mlxsw_sp_acl_rulei_priority(rulei, priority: f->common.prio); |
588 | |
589 | err = mlxsw_sp_flower_parse_meta(rulei, f, block); |
590 | if (err) |
591 | return err; |
592 | |
593 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_CONTROL)) { |
594 | struct flow_match_control match; |
595 | |
596 | flow_rule_match_control(rule, out: &match); |
597 | addr_type = match.key->addr_type; |
598 | } |
599 | |
600 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_BASIC)) { |
601 | struct flow_match_basic match; |
602 | |
603 | flow_rule_match_basic(rule, out: &match); |
604 | n_proto_key = ntohs(match.key->n_proto); |
605 | n_proto_mask = ntohs(match.mask->n_proto); |
606 | |
607 | if (n_proto_key == ETH_P_ALL) { |
608 | n_proto_key = 0; |
609 | n_proto_mask = 0; |
610 | } |
611 | mlxsw_sp_acl_rulei_keymask_u32(rulei, |
612 | element: MLXSW_AFK_ELEMENT_ETHERTYPE, |
613 | key_value: n_proto_key, mask_value: n_proto_mask); |
614 | |
615 | ip_proto = match.key->ip_proto; |
616 | mlxsw_sp_acl_rulei_keymask_u32(rulei, |
617 | element: MLXSW_AFK_ELEMENT_IP_PROTO, |
618 | key_value: match.key->ip_proto, |
619 | mask_value: match.mask->ip_proto); |
620 | } |
621 | |
622 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_ETH_ADDRS)) { |
623 | struct flow_match_eth_addrs match; |
624 | |
625 | flow_rule_match_eth_addrs(rule, out: &match); |
626 | mlxsw_sp_acl_rulei_keymask_buf(rulei, |
627 | element: MLXSW_AFK_ELEMENT_DMAC_32_47, |
628 | key_value: match.key->dst, |
629 | mask_value: match.mask->dst, len: 2); |
630 | mlxsw_sp_acl_rulei_keymask_buf(rulei, |
631 | element: MLXSW_AFK_ELEMENT_DMAC_0_31, |
632 | key_value: match.key->dst + 2, |
633 | mask_value: match.mask->dst + 2, len: 4); |
634 | mlxsw_sp_acl_rulei_keymask_buf(rulei, |
635 | element: MLXSW_AFK_ELEMENT_SMAC_32_47, |
636 | key_value: match.key->src, |
637 | mask_value: match.mask->src, len: 2); |
638 | mlxsw_sp_acl_rulei_keymask_buf(rulei, |
639 | element: MLXSW_AFK_ELEMENT_SMAC_0_31, |
640 | key_value: match.key->src + 2, |
641 | mask_value: match.mask->src + 2, len: 4); |
642 | } |
643 | |
644 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_VLAN)) { |
645 | struct flow_match_vlan match; |
646 | |
647 | flow_rule_match_vlan(rule, out: &match); |
648 | if (mlxsw_sp_flow_block_is_egress_bound(block) && |
649 | match.mask->vlan_id) { |
650 | NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress" ); |
651 | return -EOPNOTSUPP; |
652 | } |
653 | |
654 | /* Forbid block with this rulei to be bound |
655 | * to egress in future. |
656 | */ |
657 | rulei->egress_bind_blocker = 1; |
658 | |
659 | if (match.mask->vlan_id != 0) |
660 | mlxsw_sp_acl_rulei_keymask_u32(rulei, |
661 | element: MLXSW_AFK_ELEMENT_VID, |
662 | key_value: match.key->vlan_id, |
663 | mask_value: match.mask->vlan_id); |
664 | if (match.mask->vlan_priority != 0) |
665 | mlxsw_sp_acl_rulei_keymask_u32(rulei, |
666 | element: MLXSW_AFK_ELEMENT_PCP, |
667 | key_value: match.key->vlan_priority, |
668 | mask_value: match.mask->vlan_priority); |
669 | } |
670 | |
671 | if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
672 | mlxsw_sp_flower_parse_ipv4(rulei, f); |
673 | |
674 | if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
675 | mlxsw_sp_flower_parse_ipv6(rulei, f); |
676 | |
677 | err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto); |
678 | if (err) |
679 | return err; |
680 | |
681 | err = mlxsw_sp_flower_parse_ports_range(mlxsw_sp, rulei, f, ip_proto); |
682 | if (err) |
683 | return err; |
684 | |
685 | err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto); |
686 | if (err) |
687 | return err; |
688 | |
689 | err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto: n_proto_key & n_proto_mask); |
690 | if (err) |
691 | return err; |
692 | |
693 | return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, |
694 | flow_action: &f->rule->action, |
695 | extack: f->common.extack); |
696 | } |
697 | |
698 | static int mlxsw_sp_flower_mall_prio_check(struct mlxsw_sp_flow_block *block, |
699 | struct flow_cls_offload *f) |
700 | { |
701 | bool ingress = mlxsw_sp_flow_block_is_ingress_bound(block); |
702 | unsigned int mall_min_prio; |
703 | unsigned int mall_max_prio; |
704 | int err; |
705 | |
706 | err = mlxsw_sp_mall_prio_get(block, chain_index: f->common.chain_index, |
707 | p_min_prio: &mall_min_prio, p_max_prio: &mall_max_prio); |
708 | if (err) { |
709 | if (err == -ENOENT) |
710 | /* No matchall filters installed on this chain. */ |
711 | return 0; |
712 | NL_SET_ERR_MSG(f->common.extack, "Failed to get matchall priorities" ); |
713 | return err; |
714 | } |
715 | if (ingress && f->common.prio <= mall_min_prio) { |
716 | NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing matchall rules" ); |
717 | return -EOPNOTSUPP; |
718 | } |
719 | if (!ingress && f->common.prio >= mall_max_prio) { |
720 | NL_SET_ERR_MSG(f->common.extack, "Failed to add behind of existing matchall rules" ); |
721 | return -EOPNOTSUPP; |
722 | } |
723 | return 0; |
724 | } |
725 | |
726 | int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp, |
727 | struct mlxsw_sp_flow_block *block, |
728 | struct flow_cls_offload *f) |
729 | { |
730 | struct mlxsw_sp_acl_rule_info *rulei; |
731 | struct mlxsw_sp_acl_ruleset *ruleset; |
732 | struct mlxsw_sp_acl_rule *rule; |
733 | int err; |
734 | |
735 | err = mlxsw_sp_flower_mall_prio_check(block, f); |
736 | if (err) |
737 | return err; |
738 | |
739 | ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, |
740 | chain_index: f->common.chain_index, |
741 | profile: MLXSW_SP_ACL_PROFILE_FLOWER, NULL); |
742 | if (IS_ERR(ptr: ruleset)) |
743 | return PTR_ERR(ptr: ruleset); |
744 | |
745 | rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, cookie: f->cookie, NULL, |
746 | extack: f->common.extack); |
747 | if (IS_ERR(ptr: rule)) { |
748 | err = PTR_ERR(ptr: rule); |
749 | goto err_rule_create; |
750 | } |
751 | |
752 | rulei = mlxsw_sp_acl_rule_rulei(rule); |
753 | err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f); |
754 | if (err) |
755 | goto err_flower_parse; |
756 | |
757 | err = mlxsw_sp_acl_rulei_commit(rulei); |
758 | if (err) |
759 | goto err_rulei_commit; |
760 | |
761 | err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule); |
762 | if (err) |
763 | goto err_rule_add; |
764 | |
765 | mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); |
766 | return 0; |
767 | |
768 | err_rule_add: |
769 | err_rulei_commit: |
770 | err_flower_parse: |
771 | mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule); |
772 | err_rule_create: |
773 | mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); |
774 | return err; |
775 | } |
776 | |
777 | void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp, |
778 | struct mlxsw_sp_flow_block *block, |
779 | struct flow_cls_offload *f) |
780 | { |
781 | struct mlxsw_sp_acl_ruleset *ruleset; |
782 | struct mlxsw_sp_acl_rule *rule; |
783 | |
784 | ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, |
785 | chain_index: f->common.chain_index, |
786 | profile: MLXSW_SP_ACL_PROFILE_FLOWER, NULL); |
787 | if (IS_ERR(ptr: ruleset)) |
788 | return; |
789 | |
790 | rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, cookie: f->cookie); |
791 | if (rule) { |
792 | mlxsw_sp_acl_rule_del(mlxsw_sp, rule); |
793 | mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule); |
794 | } |
795 | |
796 | mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); |
797 | } |
798 | |
799 | int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp, |
800 | struct mlxsw_sp_flow_block *block, |
801 | struct flow_cls_offload *f) |
802 | { |
803 | enum flow_action_hw_stats used_hw_stats = FLOW_ACTION_HW_STATS_DISABLED; |
804 | struct mlxsw_sp_acl_ruleset *ruleset; |
805 | struct mlxsw_sp_acl_rule *rule; |
806 | u64 packets; |
807 | u64 lastuse; |
808 | u64 bytes; |
809 | u64 drops; |
810 | int err; |
811 | |
812 | ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, |
813 | chain_index: f->common.chain_index, |
814 | profile: MLXSW_SP_ACL_PROFILE_FLOWER, NULL); |
815 | if (WARN_ON(IS_ERR(ruleset))) |
816 | return -EINVAL; |
817 | |
818 | rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, cookie: f->cookie); |
819 | if (!rule) |
820 | return -EINVAL; |
821 | |
822 | err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, packets: &packets, bytes: &bytes, |
823 | drops: &drops, last_use: &lastuse, used_hw_stats: &used_hw_stats); |
824 | if (err) |
825 | goto err_rule_get_stats; |
826 | |
827 | flow_stats_update(flow_stats: &f->stats, bytes, pkts: packets, drops, lastused: lastuse, |
828 | used_hw_stats); |
829 | |
830 | mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); |
831 | return 0; |
832 | |
833 | err_rule_get_stats: |
834 | mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); |
835 | return err; |
836 | } |
837 | |
838 | int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp, |
839 | struct mlxsw_sp_flow_block *block, |
840 | struct flow_cls_offload *f) |
841 | { |
842 | struct mlxsw_sp_acl_ruleset *ruleset; |
843 | struct mlxsw_sp_acl_rule_info rulei; |
844 | int err; |
845 | |
846 | memset(&rulei, 0, sizeof(rulei)); |
847 | err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei: &rulei, f); |
848 | if (err) |
849 | return err; |
850 | ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, |
851 | chain_index: f->common.chain_index, |
852 | profile: MLXSW_SP_ACL_PROFILE_FLOWER, |
853 | tmplt_elusage: &rulei.values.elusage); |
854 | |
855 | /* keep the reference to the ruleset */ |
856 | return PTR_ERR_OR_ZERO(ptr: ruleset); |
857 | } |
858 | |
859 | void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp, |
860 | struct mlxsw_sp_flow_block *block, |
861 | struct flow_cls_offload *f) |
862 | { |
863 | struct mlxsw_sp_acl_ruleset *ruleset; |
864 | |
865 | ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, |
866 | chain_index: f->common.chain_index, |
867 | profile: MLXSW_SP_ACL_PROFILE_FLOWER, NULL); |
868 | if (IS_ERR(ptr: ruleset)) |
869 | return; |
870 | /* put the reference to the ruleset kept in create */ |
871 | mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); |
872 | mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); |
873 | } |
874 | |
875 | int mlxsw_sp_flower_prio_get(struct mlxsw_sp *mlxsw_sp, |
876 | struct mlxsw_sp_flow_block *block, |
877 | u32 chain_index, unsigned int *p_min_prio, |
878 | unsigned int *p_max_prio) |
879 | { |
880 | struct mlxsw_sp_acl_ruleset *ruleset; |
881 | |
882 | ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block, |
883 | chain_index, |
884 | profile: MLXSW_SP_ACL_PROFILE_FLOWER); |
885 | if (IS_ERR(ptr: ruleset)) |
886 | /* In case there are no flower rules, the caller |
887 | * receives -ENOENT to indicate there is no need |
888 | * to check the priorities. |
889 | */ |
890 | return PTR_ERR(ptr: ruleset); |
891 | mlxsw_sp_acl_ruleset_prio_get(ruleset, p_min_prio, p_max_prio); |
892 | return 0; |
893 | } |
894 | |