1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3
4#include <linux/kernel.h>
5#include <linux/slab.h>
6#include <linux/errno.h>
7#include <linux/list.h>
8#include <linux/string.h>
9#include <linux/rhashtable.h>
10#include <linux/netdevice.h>
11#include <linux/mutex.h>
12#include <net/net_namespace.h>
13#include <net/tc_act/tc_vlan.h>
14
15#include "reg.h"
16#include "core.h"
17#include "resources.h"
18#include "spectrum.h"
19#include "core_acl_flex_keys.h"
20#include "core_acl_flex_actions.h"
21#include "spectrum_acl_tcam.h"
22
23struct mlxsw_sp_acl {
24 struct mlxsw_sp *mlxsw_sp;
25 struct mlxsw_afk *afk;
26 struct mlxsw_sp_fid *dummy_fid;
27 struct rhashtable ruleset_ht;
28 struct list_head rules;
29 struct mutex rules_lock; /* Protects rules list */
30 struct {
31 struct delayed_work dw;
32 unsigned long interval; /* ms */
33#define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
34 } rule_activity_update;
35 struct mlxsw_sp_acl_tcam tcam;
36};
37
38struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
39{
40 return acl->afk;
41}
42
43struct mlxsw_sp_acl_tcam *mlxsw_sp_acl_to_tcam(struct mlxsw_sp_acl *acl)
44{
45 return &acl->tcam;
46}
47
48struct mlxsw_sp_acl_ruleset_ht_key {
49 struct mlxsw_sp_flow_block *block;
50 u32 chain_index;
51 const struct mlxsw_sp_acl_profile_ops *ops;
52};
53
54struct mlxsw_sp_acl_ruleset {
55 struct rhash_head ht_node; /* Member of acl HT */
56 struct mlxsw_sp_acl_ruleset_ht_key ht_key;
57 struct rhashtable rule_ht;
58 unsigned int ref_count;
59 unsigned int min_prio;
60 unsigned int max_prio;
61 unsigned long priv[];
62 /* priv has to be always the last item */
63};
64
65struct mlxsw_sp_acl_rule {
66 struct rhash_head ht_node; /* Member of rule HT */
67 struct list_head list;
68 unsigned long cookie; /* HT key */
69 struct mlxsw_sp_acl_ruleset *ruleset;
70 struct mlxsw_sp_acl_rule_info *rulei;
71 u64 last_used;
72 u64 last_packets;
73 u64 last_bytes;
74 u64 last_drops;
75 unsigned long priv[];
76 /* priv has to be always the last item */
77};
78
79static const struct rhashtable_params mlxsw_sp_acl_ruleset_ht_params = {
80 .key_len = sizeof(struct mlxsw_sp_acl_ruleset_ht_key),
81 .key_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_key),
82 .head_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_node),
83 .automatic_shrinking = true,
84};
85
86static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = {
87 .key_len = sizeof(unsigned long),
88 .key_offset = offsetof(struct mlxsw_sp_acl_rule, cookie),
89 .head_offset = offsetof(struct mlxsw_sp_acl_rule, ht_node),
90 .automatic_shrinking = true,
91};
92
93struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp)
94{
95 return mlxsw_sp->acl->dummy_fid;
96}
97
98static bool
99mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
100{
101 /* We hold a reference on ruleset ourselves */
102 return ruleset->ref_count == 2;
103}
104
105int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
106 struct mlxsw_sp_flow_block *block,
107 struct mlxsw_sp_flow_block_binding *binding)
108{
109 struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
110 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
111
112 return ops->ruleset_bind(mlxsw_sp, ruleset->priv,
113 binding->mlxsw_sp_port, binding->ingress);
114}
115
116void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
117 struct mlxsw_sp_flow_block *block,
118 struct mlxsw_sp_flow_block_binding *binding)
119{
120 struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
121 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
122
123 ops->ruleset_unbind(mlxsw_sp, ruleset->priv,
124 binding->mlxsw_sp_port, binding->ingress);
125}
126
127static int
128mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp *mlxsw_sp,
129 struct mlxsw_sp_acl_ruleset *ruleset,
130 struct mlxsw_sp_flow_block *block)
131{
132 struct mlxsw_sp_flow_block_binding *binding;
133 int err;
134
135 block->ruleset_zero = ruleset;
136 list_for_each_entry(binding, &block->binding_list, list) {
137 err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
138 if (err)
139 goto rollback;
140 }
141 return 0;
142
143rollback:
144 list_for_each_entry_continue_reverse(binding, &block->binding_list,
145 list)
146 mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
147 block->ruleset_zero = NULL;
148
149 return err;
150}
151
152static void
153mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp *mlxsw_sp,
154 struct mlxsw_sp_acl_ruleset *ruleset,
155 struct mlxsw_sp_flow_block *block)
156{
157 struct mlxsw_sp_flow_block_binding *binding;
158
159 list_for_each_entry(binding, &block->binding_list, list)
160 mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
161 block->ruleset_zero = NULL;
162}
163
164static struct mlxsw_sp_acl_ruleset *
165mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
166 struct mlxsw_sp_flow_block *block, u32 chain_index,
167 const struct mlxsw_sp_acl_profile_ops *ops,
168 struct mlxsw_afk_element_usage *tmplt_elusage)
169{
170 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
171 struct mlxsw_sp_acl_ruleset *ruleset;
172 size_t alloc_size;
173 int err;
174
175 alloc_size = sizeof(*ruleset) + ops->ruleset_priv_size;
176 ruleset = kzalloc(size: alloc_size, GFP_KERNEL);
177 if (!ruleset)
178 return ERR_PTR(error: -ENOMEM);
179 ruleset->ref_count = 1;
180 ruleset->ht_key.block = block;
181 ruleset->ht_key.chain_index = chain_index;
182 ruleset->ht_key.ops = ops;
183
184 err = rhashtable_init(ht: &ruleset->rule_ht, params: &mlxsw_sp_acl_rule_ht_params);
185 if (err)
186 goto err_rhashtable_init;
187
188 err = ops->ruleset_add(mlxsw_sp, &acl->tcam, ruleset->priv,
189 tmplt_elusage, &ruleset->min_prio,
190 &ruleset->max_prio);
191 if (err)
192 goto err_ops_ruleset_add;
193
194 err = rhashtable_insert_fast(ht: &acl->ruleset_ht, obj: &ruleset->ht_node,
195 params: mlxsw_sp_acl_ruleset_ht_params);
196 if (err)
197 goto err_ht_insert;
198
199 return ruleset;
200
201err_ht_insert:
202 ops->ruleset_del(mlxsw_sp, ruleset->priv);
203err_ops_ruleset_add:
204 rhashtable_destroy(ht: &ruleset->rule_ht);
205err_rhashtable_init:
206 kfree(objp: ruleset);
207 return ERR_PTR(error: err);
208}
209
210static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
211 struct mlxsw_sp_acl_ruleset *ruleset)
212{
213 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
214 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
215
216 rhashtable_remove_fast(ht: &acl->ruleset_ht, obj: &ruleset->ht_node,
217 params: mlxsw_sp_acl_ruleset_ht_params);
218 ops->ruleset_del(mlxsw_sp, ruleset->priv);
219 rhashtable_destroy(ht: &ruleset->rule_ht);
220 kfree(objp: ruleset);
221}
222
223static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
224{
225 ruleset->ref_count++;
226}
227
228static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
229 struct mlxsw_sp_acl_ruleset *ruleset)
230{
231 if (--ruleset->ref_count)
232 return;
233 mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
234}
235
236static struct mlxsw_sp_acl_ruleset *
237__mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl,
238 struct mlxsw_sp_flow_block *block, u32 chain_index,
239 const struct mlxsw_sp_acl_profile_ops *ops)
240{
241 struct mlxsw_sp_acl_ruleset_ht_key ht_key;
242
243 memset(&ht_key, 0, sizeof(ht_key));
244 ht_key.block = block;
245 ht_key.chain_index = chain_index;
246 ht_key.ops = ops;
247 return rhashtable_lookup_fast(ht: &acl->ruleset_ht, key: &ht_key,
248 params: mlxsw_sp_acl_ruleset_ht_params);
249}
250
251struct mlxsw_sp_acl_ruleset *
252mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
253 struct mlxsw_sp_flow_block *block, u32 chain_index,
254 enum mlxsw_sp_acl_profile profile)
255{
256 const struct mlxsw_sp_acl_profile_ops *ops;
257 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
258 struct mlxsw_sp_acl_ruleset *ruleset;
259
260 ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
261 if (!ops)
262 return ERR_PTR(error: -EINVAL);
263 ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
264 if (!ruleset)
265 return ERR_PTR(error: -ENOENT);
266 return ruleset;
267}
268
269struct mlxsw_sp_acl_ruleset *
270mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
271 struct mlxsw_sp_flow_block *block, u32 chain_index,
272 enum mlxsw_sp_acl_profile profile,
273 struct mlxsw_afk_element_usage *tmplt_elusage)
274{
275 const struct mlxsw_sp_acl_profile_ops *ops;
276 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
277 struct mlxsw_sp_acl_ruleset *ruleset;
278
279 ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
280 if (!ops)
281 return ERR_PTR(error: -EINVAL);
282
283 ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
284 if (ruleset) {
285 mlxsw_sp_acl_ruleset_ref_inc(ruleset);
286 return ruleset;
287 }
288 return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops,
289 tmplt_elusage);
290}
291
292void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
293 struct mlxsw_sp_acl_ruleset *ruleset)
294{
295 mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
296}
297
298u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset)
299{
300 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
301
302 return ops->ruleset_group_id(ruleset->priv);
303}
304
305void mlxsw_sp_acl_ruleset_prio_get(struct mlxsw_sp_acl_ruleset *ruleset,
306 unsigned int *p_min_prio,
307 unsigned int *p_max_prio)
308{
309 *p_min_prio = ruleset->min_prio;
310 *p_max_prio = ruleset->max_prio;
311}
312
313struct mlxsw_sp_acl_rule_info *
314mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl,
315 struct mlxsw_afa_block *afa_block)
316{
317 struct mlxsw_sp_acl_rule_info *rulei;
318 int err;
319
320 rulei = kzalloc(size: sizeof(*rulei), GFP_KERNEL);
321 if (!rulei)
322 return ERR_PTR(error: -ENOMEM);
323
324 if (afa_block) {
325 rulei->act_block = afa_block;
326 return rulei;
327 }
328
329 rulei->act_block = mlxsw_afa_block_create(mlxsw_afa: acl->mlxsw_sp->afa);
330 if (IS_ERR(ptr: rulei->act_block)) {
331 err = PTR_ERR(ptr: rulei->act_block);
332 goto err_afa_block_create;
333 }
334 rulei->action_created = 1;
335 return rulei;
336
337err_afa_block_create:
338 kfree(objp: rulei);
339 return ERR_PTR(error: err);
340}
341
342void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp *mlxsw_sp,
343 struct mlxsw_sp_acl_rule_info *rulei)
344{
345 if (rulei->action_created)
346 mlxsw_afa_block_destroy(block: rulei->act_block);
347 if (rulei->src_port_range_reg_valid)
348 mlxsw_sp_port_range_reg_put(mlxsw_sp,
349 prr_index: rulei->src_port_range_reg_index);
350 if (rulei->dst_port_range_reg_valid)
351 mlxsw_sp_port_range_reg_put(mlxsw_sp,
352 prr_index: rulei->dst_port_range_reg_index);
353 kfree(objp: rulei);
354}
355
356int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
357{
358 return mlxsw_afa_block_commit(block: rulei->act_block);
359}
360
361void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
362 unsigned int priority)
363{
364 rulei->priority = priority;
365}
366
367void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
368 enum mlxsw_afk_element element,
369 u32 key_value, u32 mask_value)
370{
371 mlxsw_afk_values_add_u32(values: &rulei->values, element,
372 key_value, mask_value);
373}
374
375void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
376 enum mlxsw_afk_element element,
377 const char *key_value,
378 const char *mask_value, unsigned int len)
379{
380 mlxsw_afk_values_add_buf(values: &rulei->values, element,
381 key_value, mask_value, len);
382}
383
384int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei)
385{
386 return mlxsw_afa_block_continue(block: rulei->act_block);
387}
388
389int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
390 u16 group_id)
391{
392 return mlxsw_afa_block_jump(block: rulei->act_block, group_id);
393}
394
395int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei)
396{
397 return mlxsw_afa_block_terminate(block: rulei->act_block);
398}
399
400int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei,
401 bool ingress,
402 const struct flow_action_cookie *fa_cookie,
403 struct netlink_ext_ack *extack)
404{
405 return mlxsw_afa_block_append_drop(block: rulei->act_block, ingress,
406 fa_cookie, extack);
407}
408
409int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei)
410{
411 return mlxsw_afa_block_append_trap(block: rulei->act_block,
412 trap_id: MLXSW_TRAP_ID_ACL0);
413}
414
415int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
416 struct mlxsw_sp_acl_rule_info *rulei,
417 struct net_device *out_dev,
418 struct netlink_ext_ack *extack)
419{
420 struct mlxsw_sp_port *mlxsw_sp_port;
421 u16 local_port;
422 bool in_port;
423
424 if (out_dev) {
425 if (!mlxsw_sp_port_dev_check(dev: out_dev)) {
426 NL_SET_ERR_MSG_MOD(extack, "Invalid output device");
427 return -EINVAL;
428 }
429 mlxsw_sp_port = netdev_priv(dev: out_dev);
430 if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp) {
431 NL_SET_ERR_MSG_MOD(extack, "Invalid output device");
432 return -EINVAL;
433 }
434 local_port = mlxsw_sp_port->local_port;
435 in_port = false;
436 } else {
437 /* If out_dev is NULL, the caller wants to
438 * set forward to ingress port.
439 */
440 local_port = 0;
441 in_port = true;
442 }
443 return mlxsw_afa_block_append_fwd(block: rulei->act_block,
444 local_port, in_port, extack);
445}
446
447int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
448 struct mlxsw_sp_acl_rule_info *rulei,
449 struct mlxsw_sp_flow_block *block,
450 struct net_device *out_dev,
451 struct netlink_ext_ack *extack)
452{
453 struct mlxsw_sp_flow_block_binding *binding;
454 struct mlxsw_sp_port *in_port;
455
456 if (!list_is_singular(head: &block->binding_list)) {
457 NL_SET_ERR_MSG_MOD(extack, "Only a single mirror source is allowed");
458 return -EOPNOTSUPP;
459 }
460 binding = list_first_entry(&block->binding_list,
461 struct mlxsw_sp_flow_block_binding, list);
462 in_port = binding->mlxsw_sp_port;
463
464 return mlxsw_afa_block_append_mirror(block: rulei->act_block,
465 local_in_port: in_port->local_port,
466 out_dev,
467 ingress: binding->ingress,
468 extack);
469}
470
471int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
472 struct mlxsw_sp_acl_rule_info *rulei,
473 u32 action, u16 vid, u16 proto, u8 prio,
474 struct netlink_ext_ack *extack)
475{
476 u8 ethertype;
477
478 if (action == FLOW_ACTION_VLAN_MANGLE) {
479 switch (proto) {
480 case ETH_P_8021Q:
481 ethertype = 0;
482 break;
483 case ETH_P_8021AD:
484 ethertype = 1;
485 break;
486 default:
487 NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN protocol");
488 dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n",
489 proto);
490 return -EINVAL;
491 }
492
493 return mlxsw_afa_block_append_vlan_modify(block: rulei->act_block,
494 vid, pcp: prio, et: ethertype,
495 extack);
496 } else {
497 NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN action");
498 dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n");
499 return -EINVAL;
500 }
501}
502
503int mlxsw_sp_acl_rulei_act_priority(struct mlxsw_sp *mlxsw_sp,
504 struct mlxsw_sp_acl_rule_info *rulei,
505 u32 prio, struct netlink_ext_ack *extack)
506{
507 /* Even though both Linux and Spectrum switches support 16 priorities,
508 * spectrum_qdisc only processes the first eight priomap elements, and
509 * the DCB and PFC features are tied to 8 priorities as well. Therefore
510 * bounce attempts to prioritize packets to higher priorities.
511 */
512 if (prio >= IEEE_8021QAZ_MAX_TCS) {
513 NL_SET_ERR_MSG_MOD(extack, "Only priorities 0..7 are supported");
514 return -EINVAL;
515 }
516 return mlxsw_afa_block_append_qos_switch_prio(block: rulei->act_block, prio,
517 extack);
518}
519
520struct mlxsw_sp_acl_mangle_action {
521 enum flow_action_mangle_base htype;
522 /* Offset is u32-aligned. */
523 u32 offset;
524 /* Mask bits are unset for the modified field. */
525 u32 mask;
526 /* Shift required to extract the set value. */
527 u32 shift;
528 enum mlxsw_sp_acl_mangle_field field;
529};
530
531#define MLXSW_SP_ACL_MANGLE_ACTION(_htype, _offset, _mask, _shift, _field) \
532 { \
533 .htype = _htype, \
534 .offset = _offset, \
535 .mask = _mask, \
536 .shift = _shift, \
537 .field = MLXSW_SP_ACL_MANGLE_FIELD_##_field, \
538 }
539
540#define MLXSW_SP_ACL_MANGLE_ACTION_IP4(_offset, _mask, _shift, _field) \
541 MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_IP4, \
542 _offset, _mask, _shift, _field)
543
544#define MLXSW_SP_ACL_MANGLE_ACTION_IP6(_offset, _mask, _shift, _field) \
545 MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_IP6, \
546 _offset, _mask, _shift, _field)
547
548#define MLXSW_SP_ACL_MANGLE_ACTION_TCP(_offset, _mask, _shift, _field) \
549 MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_TCP, _offset, _mask, _shift, _field)
550
551#define MLXSW_SP_ACL_MANGLE_ACTION_UDP(_offset, _mask, _shift, _field) \
552 MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_UDP, _offset, _mask, _shift, _field)
553
554static struct mlxsw_sp_acl_mangle_action mlxsw_sp_acl_mangle_actions[] = {
555 MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xff00ffff, 16, IP_DSFIELD),
556 MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xff03ffff, 18, IP_DSCP),
557 MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xfffcffff, 16, IP_ECN),
558
559 MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xf00fffff, 20, IP_DSFIELD),
560 MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xf03fffff, 22, IP_DSCP),
561 MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xffcfffff, 20, IP_ECN),
562
563 MLXSW_SP_ACL_MANGLE_ACTION_TCP(0, 0x0000ffff, 16, IP_SPORT),
564 MLXSW_SP_ACL_MANGLE_ACTION_TCP(0, 0xffff0000, 0, IP_DPORT),
565
566 MLXSW_SP_ACL_MANGLE_ACTION_UDP(0, 0x0000ffff, 16, IP_SPORT),
567 MLXSW_SP_ACL_MANGLE_ACTION_UDP(0, 0xffff0000, 0, IP_DPORT),
568
569 MLXSW_SP_ACL_MANGLE_ACTION_IP4(12, 0x00000000, 0, IP4_SIP),
570 MLXSW_SP_ACL_MANGLE_ACTION_IP4(16, 0x00000000, 0, IP4_DIP),
571
572 MLXSW_SP_ACL_MANGLE_ACTION_IP6(8, 0x00000000, 0, IP6_SIP_1),
573 MLXSW_SP_ACL_MANGLE_ACTION_IP6(12, 0x00000000, 0, IP6_SIP_2),
574 MLXSW_SP_ACL_MANGLE_ACTION_IP6(16, 0x00000000, 0, IP6_SIP_3),
575 MLXSW_SP_ACL_MANGLE_ACTION_IP6(20, 0x00000000, 0, IP6_SIP_4),
576 MLXSW_SP_ACL_MANGLE_ACTION_IP6(24, 0x00000000, 0, IP6_DIP_1),
577 MLXSW_SP_ACL_MANGLE_ACTION_IP6(28, 0x00000000, 0, IP6_DIP_2),
578 MLXSW_SP_ACL_MANGLE_ACTION_IP6(32, 0x00000000, 0, IP6_DIP_3),
579 MLXSW_SP_ACL_MANGLE_ACTION_IP6(36, 0x00000000, 0, IP6_DIP_4),
580};
581
582static int
583mlxsw_sp_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
584 struct mlxsw_sp_acl_rule_info *rulei,
585 struct mlxsw_sp_acl_mangle_action *mact,
586 u32 val, struct netlink_ext_ack *extack)
587{
588 switch (mact->field) {
589 case MLXSW_SP_ACL_MANGLE_FIELD_IP_DSFIELD:
590 return mlxsw_afa_block_append_qos_dsfield(block: rulei->act_block,
591 dsfield: val, extack);
592 case MLXSW_SP_ACL_MANGLE_FIELD_IP_DSCP:
593 return mlxsw_afa_block_append_qos_dscp(block: rulei->act_block,
594 dscp: val, extack);
595 case MLXSW_SP_ACL_MANGLE_FIELD_IP_ECN:
596 return mlxsw_afa_block_append_qos_ecn(block: rulei->act_block,
597 ecn: val, extack);
598 default:
599 return -EOPNOTSUPP;
600 }
601}
602
603static int mlxsw_sp1_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
604 struct mlxsw_sp_acl_rule_info *rulei,
605 struct mlxsw_sp_acl_mangle_action *mact,
606 u32 val, struct netlink_ext_ack *extack)
607{
608 int err;
609
610 err = mlxsw_sp_acl_rulei_act_mangle_field(mlxsw_sp, rulei, mact, val, extack);
611 if (err != -EOPNOTSUPP)
612 return err;
613
614 NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field");
615 return err;
616}
617
618static int
619mlxsw_sp2_acl_rulei_act_mangle_field_ip_odd(struct mlxsw_sp_acl_rule_info *rulei,
620 enum mlxsw_sp_acl_mangle_field field,
621 u32 val, struct netlink_ext_ack *extack)
622{
623 if (!rulei->ipv6_valid) {
624 rulei->ipv6.prev_val = val;
625 rulei->ipv6_valid = true;
626 rulei->ipv6.prev_field = field;
627 return 0;
628 }
629
630 NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field order");
631 return -EOPNOTSUPP;
632}
633
634static int mlxsw_sp2_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
635 struct mlxsw_sp_acl_rule_info *rulei,
636 struct mlxsw_sp_acl_mangle_action *mact,
637 u32 val, struct netlink_ext_ack *extack)
638{
639 int err;
640
641 err = mlxsw_sp_acl_rulei_act_mangle_field(mlxsw_sp, rulei, mact, val, extack);
642 if (err != -EOPNOTSUPP)
643 return err;
644
645 switch (mact->field) {
646 case MLXSW_SP_ACL_MANGLE_FIELD_IP_SPORT:
647 return mlxsw_afa_block_append_l4port(block: rulei->act_block, is_dport: false, l4_port: val, extack);
648 case MLXSW_SP_ACL_MANGLE_FIELD_IP_DPORT:
649 return mlxsw_afa_block_append_l4port(block: rulei->act_block, is_dport: true, l4_port: val, extack);
650 /* IPv4 fields */
651 case MLXSW_SP_ACL_MANGLE_FIELD_IP4_SIP:
652 return mlxsw_afa_block_append_ip(block: rulei->act_block, is_dip: false,
653 is_lsb: true, val_31_0: val, val_63_32: 0, extack);
654 case MLXSW_SP_ACL_MANGLE_FIELD_IP4_DIP:
655 return mlxsw_afa_block_append_ip(block: rulei->act_block, is_dip: true,
656 is_lsb: true, val_31_0: val, val_63_32: 0, extack);
657 /* IPv6 fields */
658 case MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_1:
659 case MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_3:
660 case MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_1:
661 case MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_3:
662 return mlxsw_sp2_acl_rulei_act_mangle_field_ip_odd(rulei,
663 field: mact->field,
664 val, extack);
665 case MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_2:
666 if (rulei->ipv6_valid &&
667 rulei->ipv6.prev_field == MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_1) {
668 rulei->ipv6_valid = false;
669 return mlxsw_afa_block_append_ip(block: rulei->act_block,
670 is_dip: false, is_lsb: false, val_31_0: val,
671 val_63_32: rulei->ipv6.prev_val,
672 extack);
673 }
674 break;
675 case MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_4:
676 if (rulei->ipv6_valid &&
677 rulei->ipv6.prev_field == MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_3) {
678 rulei->ipv6_valid = false;
679 return mlxsw_afa_block_append_ip(block: rulei->act_block,
680 is_dip: false, is_lsb: true, val_31_0: val,
681 val_63_32: rulei->ipv6.prev_val,
682 extack);
683 }
684 break;
685 case MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_2:
686 if (rulei->ipv6_valid &&
687 rulei->ipv6.prev_field == MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_1) {
688 rulei->ipv6_valid = false;
689 return mlxsw_afa_block_append_ip(block: rulei->act_block,
690 is_dip: true, is_lsb: false, val_31_0: val,
691 val_63_32: rulei->ipv6.prev_val,
692 extack);
693 }
694 break;
695 case MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_4:
696 if (rulei->ipv6_valid &&
697 rulei->ipv6.prev_field == MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_3) {
698 rulei->ipv6_valid = false;
699 return mlxsw_afa_block_append_ip(block: rulei->act_block,
700 is_dip: true, is_lsb: true, val_31_0: val,
701 val_63_32: rulei->ipv6.prev_val,
702 extack);
703 }
704 break;
705 default:
706 break;
707 }
708
709 NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field");
710 return err;
711}
712
713int mlxsw_sp_acl_rulei_act_mangle(struct mlxsw_sp *mlxsw_sp,
714 struct mlxsw_sp_acl_rule_info *rulei,
715 enum flow_action_mangle_base htype,
716 u32 offset, u32 mask, u32 val,
717 struct netlink_ext_ack *extack)
718{
719 const struct mlxsw_sp_acl_rulei_ops *acl_rulei_ops = mlxsw_sp->acl_rulei_ops;
720 struct mlxsw_sp_acl_mangle_action *mact;
721 size_t i;
722
723 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_acl_mangle_actions); ++i) {
724 mact = &mlxsw_sp_acl_mangle_actions[i];
725 if (mact->htype == htype &&
726 mact->offset == offset &&
727 mact->mask == mask) {
728 val >>= mact->shift;
729 return acl_rulei_ops->act_mangle_field(mlxsw_sp,
730 rulei, mact,
731 val, extack);
732 }
733 }
734
735 NL_SET_ERR_MSG_MOD(extack, "Unknown mangle field");
736 return -EINVAL;
737}
738
739int mlxsw_sp_acl_rulei_act_police(struct mlxsw_sp *mlxsw_sp,
740 struct mlxsw_sp_acl_rule_info *rulei,
741 u32 index, u64 rate_bytes_ps,
742 u32 burst, struct netlink_ext_ack *extack)
743{
744 int err;
745
746 err = mlxsw_afa_block_append_police(block: rulei->act_block, fa_index: index,
747 rate_bytes_ps, burst,
748 p_policer_index: &rulei->policer_index, extack);
749 if (err)
750 return err;
751
752 rulei->policer_index_valid = true;
753
754 return 0;
755}
756
757int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
758 struct mlxsw_sp_acl_rule_info *rulei,
759 struct netlink_ext_ack *extack)
760{
761 int err;
762
763 err = mlxsw_afa_block_append_counter(block: rulei->act_block,
764 p_counter_index: &rulei->counter_index, extack);
765 if (err)
766 return err;
767 rulei->counter_valid = true;
768 return 0;
769}
770
771int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
772 struct mlxsw_sp_acl_rule_info *rulei,
773 u16 fid, struct netlink_ext_ack *extack)
774{
775 return mlxsw_afa_block_append_fid_set(block: rulei->act_block, fid, extack);
776}
777
778int mlxsw_sp_acl_rulei_act_ignore(struct mlxsw_sp *mlxsw_sp,
779 struct mlxsw_sp_acl_rule_info *rulei,
780 bool disable_learning, bool disable_security)
781{
782 return mlxsw_afa_block_append_ignore(block: rulei->act_block,
783 disable_learning,
784 disable_security);
785}
786
787int mlxsw_sp_acl_rulei_act_sample(struct mlxsw_sp *mlxsw_sp,
788 struct mlxsw_sp_acl_rule_info *rulei,
789 struct mlxsw_sp_flow_block *block,
790 struct psample_group *psample_group, u32 rate,
791 u32 trunc_size, bool truncate,
792 struct netlink_ext_ack *extack)
793{
794 struct mlxsw_sp_flow_block_binding *binding;
795 struct mlxsw_sp_port *mlxsw_sp_port;
796
797 if (!list_is_singular(head: &block->binding_list)) {
798 NL_SET_ERR_MSG_MOD(extack, "Only a single sampling source is allowed");
799 return -EOPNOTSUPP;
800 }
801 binding = list_first_entry(&block->binding_list,
802 struct mlxsw_sp_flow_block_binding, list);
803 mlxsw_sp_port = binding->mlxsw_sp_port;
804
805 return mlxsw_afa_block_append_sampler(block: rulei->act_block,
806 local_port: mlxsw_sp_port->local_port,
807 psample_group, rate, trunc_size,
808 truncate, ingress: binding->ingress,
809 extack);
810}
811
812struct mlxsw_sp_acl_rule *
813mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
814 struct mlxsw_sp_acl_ruleset *ruleset,
815 unsigned long cookie,
816 struct mlxsw_afa_block *afa_block,
817 struct netlink_ext_ack *extack)
818{
819 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
820 struct mlxsw_sp_acl_rule *rule;
821 int err;
822
823 mlxsw_sp_acl_ruleset_ref_inc(ruleset);
824 rule = kzalloc(size: sizeof(*rule) + ops->rule_priv_size,
825 GFP_KERNEL);
826 if (!rule) {
827 err = -ENOMEM;
828 goto err_alloc;
829 }
830 rule->cookie = cookie;
831 rule->ruleset = ruleset;
832
833 rule->rulei = mlxsw_sp_acl_rulei_create(acl: mlxsw_sp->acl, afa_block);
834 if (IS_ERR(ptr: rule->rulei)) {
835 err = PTR_ERR(ptr: rule->rulei);
836 goto err_rulei_create;
837 }
838
839 return rule;
840
841err_rulei_create:
842 kfree(objp: rule);
843err_alloc:
844 mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
845 return ERR_PTR(error: err);
846}
847
848void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
849 struct mlxsw_sp_acl_rule *rule)
850{
851 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
852
853 mlxsw_sp_acl_rulei_destroy(mlxsw_sp, rulei: rule->rulei);
854 kfree(objp: rule);
855 mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
856}
857
858int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
859 struct mlxsw_sp_acl_rule *rule)
860{
861 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
862 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
863 struct mlxsw_sp_flow_block *block = ruleset->ht_key.block;
864 int err;
865
866 err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei);
867 if (err)
868 return err;
869
870 err = rhashtable_insert_fast(ht: &ruleset->rule_ht, obj: &rule->ht_node,
871 params: mlxsw_sp_acl_rule_ht_params);
872 if (err)
873 goto err_rhashtable_insert;
874
875 if (!ruleset->ht_key.chain_index &&
876 mlxsw_sp_acl_ruleset_is_singular(ruleset)) {
877 /* We only need ruleset with chain index 0, the implicit
878 * one, to be directly bound to device. The rest of the
879 * rulesets are bound by "Goto action set".
880 */
881 err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, block);
882 if (err)
883 goto err_ruleset_block_bind;
884 }
885
886 mutex_lock(&mlxsw_sp->acl->rules_lock);
887 list_add_tail(new: &rule->list, head: &mlxsw_sp->acl->rules);
888 mutex_unlock(lock: &mlxsw_sp->acl->rules_lock);
889 block->rule_count++;
890 block->ingress_blocker_rule_count += rule->rulei->ingress_bind_blocker;
891 block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker;
892 return 0;
893
894err_ruleset_block_bind:
895 rhashtable_remove_fast(ht: &ruleset->rule_ht, obj: &rule->ht_node,
896 params: mlxsw_sp_acl_rule_ht_params);
897err_rhashtable_insert:
898 ops->rule_del(mlxsw_sp, rule->priv);
899 return err;
900}
901
902void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
903 struct mlxsw_sp_acl_rule *rule)
904{
905 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
906 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
907 struct mlxsw_sp_flow_block *block = ruleset->ht_key.block;
908
909 block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
910 block->ingress_blocker_rule_count -= rule->rulei->ingress_bind_blocker;
911 block->rule_count--;
912 mutex_lock(&mlxsw_sp->acl->rules_lock);
913 list_del(entry: &rule->list);
914 mutex_unlock(lock: &mlxsw_sp->acl->rules_lock);
915 if (!ruleset->ht_key.chain_index &&
916 mlxsw_sp_acl_ruleset_is_singular(ruleset))
917 mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, block);
918 rhashtable_remove_fast(ht: &ruleset->rule_ht, obj: &rule->ht_node,
919 params: mlxsw_sp_acl_rule_ht_params);
920 ops->rule_del(mlxsw_sp, rule->priv);
921}
922
923int mlxsw_sp_acl_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
924 struct mlxsw_sp_acl_rule *rule,
925 struct mlxsw_afa_block *afa_block)
926{
927 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
928 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
929 struct mlxsw_sp_acl_rule_info *rulei;
930
931 rulei = mlxsw_sp_acl_rule_rulei(rule);
932 rulei->act_block = afa_block;
933
934 return ops->rule_action_replace(mlxsw_sp, rule->priv, rule->rulei);
935}
936
937struct mlxsw_sp_acl_rule *
938mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
939 struct mlxsw_sp_acl_ruleset *ruleset,
940 unsigned long cookie)
941{
942 return rhashtable_lookup_fast(ht: &ruleset->rule_ht, key: &cookie,
943 params: mlxsw_sp_acl_rule_ht_params);
944}
945
946struct mlxsw_sp_acl_rule_info *
947mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule)
948{
949 return rule->rulei;
950}
951
952static int mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp *mlxsw_sp,
953 struct mlxsw_sp_acl_rule *rule)
954{
955 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
956 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
957 bool active;
958 int err;
959
960 err = ops->rule_activity_get(mlxsw_sp, rule->priv, &active);
961 if (err)
962 return err;
963 if (active)
964 rule->last_used = jiffies;
965 return 0;
966}
967
968static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
969{
970 struct mlxsw_sp_acl_rule *rule;
971 int err;
972
973 mutex_lock(&acl->rules_lock);
974 list_for_each_entry(rule, &acl->rules, list) {
975 err = mlxsw_sp_acl_rule_activity_update(mlxsw_sp: acl->mlxsw_sp,
976 rule);
977 if (err)
978 goto err_rule_update;
979 }
980 mutex_unlock(lock: &acl->rules_lock);
981 return 0;
982
983err_rule_update:
984 mutex_unlock(lock: &acl->rules_lock);
985 return err;
986}
987
988static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl)
989{
990 unsigned long interval = acl->rule_activity_update.interval;
991
992 mlxsw_core_schedule_dw(dwork: &acl->rule_activity_update.dw,
993 delay: msecs_to_jiffies(m: interval));
994}
995
996static void mlxsw_sp_acl_rule_activity_update_work(struct work_struct *work)
997{
998 struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl,
999 rule_activity_update.dw.work);
1000 int err;
1001
1002 err = mlxsw_sp_acl_rules_activity_update(acl);
1003 if (err)
1004 dev_err(acl->mlxsw_sp->bus_info->dev, "Could not update acl activity");
1005
1006 mlxsw_sp_acl_rule_activity_work_schedule(acl);
1007}
1008
1009int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
1010 struct mlxsw_sp_acl_rule *rule,
1011 u64 *packets, u64 *bytes, u64 *drops,
1012 u64 *last_use,
1013 enum flow_action_hw_stats *used_hw_stats)
1014
1015{
1016 enum mlxsw_sp_policer_type type = MLXSW_SP_POLICER_TYPE_SINGLE_RATE;
1017 struct mlxsw_sp_acl_rule_info *rulei;
1018 u64 current_packets = 0;
1019 u64 current_bytes = 0;
1020 u64 current_drops = 0;
1021 int err;
1022
1023 rulei = mlxsw_sp_acl_rule_rulei(rule);
1024 if (rulei->counter_valid) {
1025 err = mlxsw_sp_flow_counter_get(mlxsw_sp, counter_index: rulei->counter_index,
1026 packets: &current_packets,
1027 bytes: &current_bytes);
1028 if (err)
1029 return err;
1030 *used_hw_stats = FLOW_ACTION_HW_STATS_IMMEDIATE;
1031 }
1032 if (rulei->policer_index_valid) {
1033 err = mlxsw_sp_policer_drops_counter_get(mlxsw_sp, type,
1034 policer_index: rulei->policer_index,
1035 p_drops: &current_drops);
1036 if (err)
1037 return err;
1038 }
1039 *packets = current_packets - rule->last_packets;
1040 *bytes = current_bytes - rule->last_bytes;
1041 *drops = current_drops - rule->last_drops;
1042 *last_use = rule->last_used;
1043
1044 rule->last_bytes = current_bytes;
1045 rule->last_packets = current_packets;
1046 rule->last_drops = current_drops;
1047
1048 return 0;
1049}
1050
1051int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
1052{
1053 struct mlxsw_sp_fid *fid;
1054 struct mlxsw_sp_acl *acl;
1055 size_t alloc_size;
1056 int err;
1057
1058 alloc_size = sizeof(*acl) + mlxsw_sp_acl_tcam_priv_size(mlxsw_sp);
1059 acl = kzalloc(size: alloc_size, GFP_KERNEL);
1060 if (!acl)
1061 return -ENOMEM;
1062 mlxsw_sp->acl = acl;
1063 acl->mlxsw_sp = mlxsw_sp;
1064 acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
1065 ACL_FLEX_KEYS),
1066 ops: mlxsw_sp->afk_ops);
1067 if (!acl->afk) {
1068 err = -ENOMEM;
1069 goto err_afk_create;
1070 }
1071
1072 err = rhashtable_init(ht: &acl->ruleset_ht,
1073 params: &mlxsw_sp_acl_ruleset_ht_params);
1074 if (err)
1075 goto err_rhashtable_init;
1076
1077 fid = mlxsw_sp_fid_dummy_get(mlxsw_sp);
1078 if (IS_ERR(ptr: fid)) {
1079 err = PTR_ERR(ptr: fid);
1080 goto err_fid_get;
1081 }
1082 acl->dummy_fid = fid;
1083
1084 INIT_LIST_HEAD(list: &acl->rules);
1085 mutex_init(&acl->rules_lock);
1086 err = mlxsw_sp_acl_tcam_init(mlxsw_sp, tcam: &acl->tcam);
1087 if (err)
1088 goto err_acl_ops_init;
1089
1090 /* Create the delayed work for the rule activity_update */
1091 INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
1092 mlxsw_sp_acl_rule_activity_update_work);
1093 acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS;
1094 mlxsw_core_schedule_dw(dwork: &acl->rule_activity_update.dw, delay: 0);
1095 return 0;
1096
1097err_acl_ops_init:
1098 mutex_destroy(lock: &acl->rules_lock);
1099 mlxsw_sp_fid_put(fid);
1100err_fid_get:
1101 rhashtable_destroy(ht: &acl->ruleset_ht);
1102err_rhashtable_init:
1103 mlxsw_afk_destroy(mlxsw_afk: acl->afk);
1104err_afk_create:
1105 kfree(objp: acl);
1106 return err;
1107}
1108
1109void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
1110{
1111 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
1112
1113 cancel_delayed_work_sync(dwork: &mlxsw_sp->acl->rule_activity_update.dw);
1114 mlxsw_sp_acl_tcam_fini(mlxsw_sp, tcam: &acl->tcam);
1115 mutex_destroy(lock: &acl->rules_lock);
1116 WARN_ON(!list_empty(&acl->rules));
1117 mlxsw_sp_fid_put(fid: acl->dummy_fid);
1118 rhashtable_destroy(ht: &acl->ruleset_ht);
1119 mlxsw_afk_destroy(mlxsw_afk: acl->afk);
1120 kfree(objp: acl);
1121}
1122
1123struct mlxsw_sp_acl_rulei_ops mlxsw_sp1_acl_rulei_ops = {
1124 .act_mangle_field = mlxsw_sp1_acl_rulei_act_mangle_field,
1125};
1126
1127struct mlxsw_sp_acl_rulei_ops mlxsw_sp2_acl_rulei_ops = {
1128 .act_mangle_field = mlxsw_sp2_acl_rulei_act_mangle_field,
1129};
1130

source code of linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c