1 | /* |
2 | * Copyright (c) 2015, Mellanox Technologies. All rights reserved. |
3 | * |
4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the |
8 | * OpenIB.org BSD license below: |
9 | * |
10 | * Redistribution and use in source and binary forms, with or |
11 | * without modification, are permitted provided that the following |
12 | * conditions are met: |
13 | * |
14 | * - Redistributions of source code must retain the above |
15 | * copyright notice, this list of conditions and the following |
16 | * disclaimer. |
17 | * |
18 | * - Redistributions in binary form must reproduce the above |
19 | * copyright notice, this list of conditions and the following |
20 | * disclaimer in the documentation and/or other materials |
21 | * provided with the distribution. |
22 | * |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
30 | * SOFTWARE. |
31 | */ |
32 | |
33 | #include <linux/mutex.h> |
34 | #include <linux/mlx5/driver.h> |
35 | #include <linux/mlx5/vport.h> |
36 | #include <linux/mlx5/eswitch.h> |
37 | #include <net/devlink.h> |
38 | |
39 | #include "mlx5_core.h" |
40 | #include "fs_core.h" |
41 | #include "fs_cmd.h" |
42 | #include "fs_ft_pool.h" |
43 | #include "diag/fs_tracepoint.h" |
44 | #include "devlink.h" |
45 | |
46 | #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\ |
47 | sizeof(struct init_tree_node)) |
48 | |
49 | #define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\ |
50 | ...) {.type = FS_TYPE_PRIO,\ |
51 | .min_ft_level = min_level_val,\ |
52 | .num_levels = num_levels_val,\ |
53 | .num_leaf_prios = num_prios_val,\ |
54 | .caps = caps_val,\ |
55 | .children = (struct init_tree_node[]) {__VA_ARGS__},\ |
56 | .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \ |
57 | } |
58 | |
59 | #define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\ |
60 | ADD_PRIO(num_prios_val, 0, num_levels_val, {},\ |
61 | __VA_ARGS__)\ |
62 | |
63 | #define ADD_NS(def_miss_act, ...) {.type = FS_TYPE_NAMESPACE, \ |
64 | .def_miss_action = def_miss_act,\ |
65 | .children = (struct init_tree_node[]) {__VA_ARGS__},\ |
66 | .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \ |
67 | } |
68 | |
69 | #define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\ |
70 | sizeof(long)) |
71 | |
72 | #define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap)) |
73 | |
74 | #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \ |
75 | .caps = (long[]) {__VA_ARGS__} } |
76 | |
77 | #define FS_CHAINING_CAPS FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \ |
78 | FS_CAP(flow_table_properties_nic_receive.modify_root), \ |
79 | FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \ |
80 | FS_CAP(flow_table_properties_nic_receive.flow_table_modify)) |
81 | |
82 | #define FS_CHAINING_CAPS_EGRESS \ |
83 | FS_REQUIRED_CAPS( \ |
84 | FS_CAP(flow_table_properties_nic_transmit.flow_modify_en), \ |
85 | FS_CAP(flow_table_properties_nic_transmit.modify_root), \ |
86 | FS_CAP(flow_table_properties_nic_transmit \ |
87 | .identified_miss_table_mode), \ |
88 | FS_CAP(flow_table_properties_nic_transmit.flow_table_modify)) |
89 | |
90 | #define FS_CHAINING_CAPS_RDMA_TX \ |
91 | FS_REQUIRED_CAPS( \ |
92 | FS_CAP(flow_table_properties_nic_transmit_rdma.flow_modify_en), \ |
93 | FS_CAP(flow_table_properties_nic_transmit_rdma.modify_root), \ |
94 | FS_CAP(flow_table_properties_nic_transmit_rdma \ |
95 | .identified_miss_table_mode), \ |
96 | FS_CAP(flow_table_properties_nic_transmit_rdma \ |
97 | .flow_table_modify)) |
98 | |
99 | #define LEFTOVERS_NUM_LEVELS 1 |
100 | #define LEFTOVERS_NUM_PRIOS 1 |
101 | |
102 | #define RDMA_RX_COUNTERS_PRIO_NUM_LEVELS 1 |
103 | #define RDMA_TX_COUNTERS_PRIO_NUM_LEVELS 1 |
104 | |
105 | #define BY_PASS_PRIO_NUM_LEVELS 1 |
106 | #define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\ |
107 | LEFTOVERS_NUM_PRIOS) |
108 | |
109 | #define KERNEL_RX_MACSEC_NUM_PRIOS 1 |
110 | #define KERNEL_RX_MACSEC_NUM_LEVELS 3 |
111 | #define KERNEL_RX_MACSEC_MIN_LEVEL (BY_PASS_MIN_LEVEL + KERNEL_RX_MACSEC_NUM_PRIOS) |
112 | |
113 | #define ETHTOOL_PRIO_NUM_LEVELS 1 |
114 | #define ETHTOOL_NUM_PRIOS 11 |
115 | #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS) |
116 | /* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy, |
117 | * {IPsec RoCE MPV,Alias table},IPsec RoCE policy |
118 | */ |
119 | #define KERNEL_NIC_PRIO_NUM_LEVELS 11 |
120 | #define KERNEL_NIC_NUM_PRIOS 1 |
121 | /* One more level for tc */ |
122 | #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1) |
123 | |
124 | #define KERNEL_NIC_TC_NUM_PRIOS 1 |
125 | #define KERNEL_NIC_TC_NUM_LEVELS 3 |
126 | |
127 | #define ANCHOR_NUM_LEVELS 1 |
128 | #define ANCHOR_NUM_PRIOS 1 |
129 | #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1) |
130 | |
131 | #define OFFLOADS_MAX_FT 2 |
132 | #define OFFLOADS_NUM_PRIOS 2 |
133 | #define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + OFFLOADS_NUM_PRIOS) |
134 | |
135 | #define LAG_PRIO_NUM_LEVELS 1 |
136 | #define LAG_NUM_PRIOS 1 |
137 | #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + KERNEL_RX_MACSEC_MIN_LEVEL + 1) |
138 | |
139 | #define KERNEL_TX_IPSEC_NUM_PRIOS 1 |
140 | #define KERNEL_TX_IPSEC_NUM_LEVELS 4 |
141 | #define KERNEL_TX_IPSEC_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS) |
142 | |
143 | #define KERNEL_TX_MACSEC_NUM_PRIOS 1 |
144 | #define KERNEL_TX_MACSEC_NUM_LEVELS 2 |
145 | #define KERNEL_TX_MACSEC_MIN_LEVEL (KERNEL_TX_IPSEC_MIN_LEVEL + KERNEL_TX_MACSEC_NUM_PRIOS) |
146 | |
147 | struct node_caps { |
148 | size_t arr_sz; |
149 | long *caps; |
150 | }; |
151 | |
152 | static struct init_tree_node { |
153 | enum fs_node_type type; |
154 | struct init_tree_node *children; |
155 | int ar_size; |
156 | struct node_caps caps; |
157 | int min_ft_level; |
158 | int num_leaf_prios; |
159 | int prio; |
160 | int num_levels; |
161 | enum mlx5_flow_table_miss_action def_miss_action; |
162 | } root_fs = { |
163 | .type = FS_TYPE_NAMESPACE, |
164 | .ar_size = 8, |
165 | .children = (struct init_tree_node[]){ |
166 | ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS, |
167 | ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, |
168 | ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, |
169 | BY_PASS_PRIO_NUM_LEVELS))), |
170 | ADD_PRIO(0, KERNEL_RX_MACSEC_MIN_LEVEL, 0, FS_CHAINING_CAPS, |
171 | ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, |
172 | ADD_MULTIPLE_PRIO(KERNEL_RX_MACSEC_NUM_PRIOS, |
173 | KERNEL_RX_MACSEC_NUM_LEVELS))), |
174 | ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS, |
175 | ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, |
176 | ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS, |
177 | LAG_PRIO_NUM_LEVELS))), |
178 | ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, FS_CHAINING_CAPS, |
179 | ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, |
180 | ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, |
181 | OFFLOADS_MAX_FT))), |
182 | ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, FS_CHAINING_CAPS, |
183 | ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, |
184 | ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS, |
185 | ETHTOOL_PRIO_NUM_LEVELS))), |
186 | ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {}, |
187 | ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, |
188 | ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS, |
189 | KERNEL_NIC_TC_NUM_LEVELS), |
190 | ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS, |
191 | KERNEL_NIC_PRIO_NUM_LEVELS))), |
192 | ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS, |
193 | ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, |
194 | ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, |
195 | LEFTOVERS_NUM_LEVELS))), |
196 | ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {}, |
197 | ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, |
198 | ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, |
199 | ANCHOR_NUM_LEVELS))), |
200 | } |
201 | }; |
202 | |
203 | static struct init_tree_node egress_root_fs = { |
204 | .type = FS_TYPE_NAMESPACE, |
205 | .ar_size = 3, |
206 | .children = (struct init_tree_node[]) { |
207 | ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0, |
208 | FS_CHAINING_CAPS_EGRESS, |
209 | ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, |
210 | ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, |
211 | BY_PASS_PRIO_NUM_LEVELS))), |
212 | ADD_PRIO(0, KERNEL_TX_IPSEC_MIN_LEVEL, 0, |
213 | FS_CHAINING_CAPS_EGRESS, |
214 | ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, |
215 | ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS, |
216 | KERNEL_TX_IPSEC_NUM_LEVELS))), |
217 | ADD_PRIO(0, KERNEL_TX_MACSEC_MIN_LEVEL, 0, |
218 | FS_CHAINING_CAPS_EGRESS, |
219 | ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, |
220 | ADD_MULTIPLE_PRIO(KERNEL_TX_MACSEC_NUM_PRIOS, |
221 | KERNEL_TX_MACSEC_NUM_LEVELS))), |
222 | } |
223 | }; |
224 | |
225 | enum { |
226 | RDMA_RX_IPSEC_PRIO, |
227 | RDMA_RX_MACSEC_PRIO, |
228 | RDMA_RX_COUNTERS_PRIO, |
229 | RDMA_RX_BYPASS_PRIO, |
230 | RDMA_RX_KERNEL_PRIO, |
231 | }; |
232 | |
233 | #define RDMA_RX_IPSEC_NUM_PRIOS 1 |
234 | #define RDMA_RX_IPSEC_NUM_LEVELS 4 |
235 | #define RDMA_RX_IPSEC_MIN_LEVEL (RDMA_RX_IPSEC_NUM_LEVELS) |
236 | |
237 | #define RDMA_RX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_REGULAR_PRIOS |
238 | #define RDMA_RX_KERNEL_MIN_LEVEL (RDMA_RX_BYPASS_MIN_LEVEL + 1) |
239 | #define RDMA_RX_COUNTERS_MIN_LEVEL (RDMA_RX_KERNEL_MIN_LEVEL + 2) |
240 | |
241 | #define RDMA_RX_MACSEC_NUM_PRIOS 1 |
242 | #define RDMA_RX_MACSEC_PRIO_NUM_LEVELS 2 |
243 | #define RDMA_RX_MACSEC_MIN_LEVEL (RDMA_RX_COUNTERS_MIN_LEVEL + RDMA_RX_MACSEC_NUM_PRIOS) |
244 | |
245 | static struct init_tree_node rdma_rx_root_fs = { |
246 | .type = FS_TYPE_NAMESPACE, |
247 | .ar_size = 5, |
248 | .children = (struct init_tree_node[]) { |
249 | [RDMA_RX_IPSEC_PRIO] = |
250 | ADD_PRIO(0, RDMA_RX_IPSEC_MIN_LEVEL, 0, |
251 | FS_CHAINING_CAPS, |
252 | ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, |
253 | ADD_MULTIPLE_PRIO(RDMA_RX_IPSEC_NUM_PRIOS, |
254 | RDMA_RX_IPSEC_NUM_LEVELS))), |
255 | [RDMA_RX_MACSEC_PRIO] = |
256 | ADD_PRIO(0, RDMA_RX_MACSEC_MIN_LEVEL, 0, |
257 | FS_CHAINING_CAPS, |
258 | ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, |
259 | ADD_MULTIPLE_PRIO(RDMA_RX_MACSEC_NUM_PRIOS, |
260 | RDMA_RX_MACSEC_PRIO_NUM_LEVELS))), |
261 | [RDMA_RX_COUNTERS_PRIO] = |
262 | ADD_PRIO(0, RDMA_RX_COUNTERS_MIN_LEVEL, 0, |
263 | FS_CHAINING_CAPS, |
264 | ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, |
265 | ADD_MULTIPLE_PRIO(MLX5_RDMA_RX_NUM_COUNTERS_PRIOS, |
266 | RDMA_RX_COUNTERS_PRIO_NUM_LEVELS))), |
267 | [RDMA_RX_BYPASS_PRIO] = |
268 | ADD_PRIO(0, RDMA_RX_BYPASS_MIN_LEVEL, 0, |
269 | FS_CHAINING_CAPS, |
270 | ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, |
271 | ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS, |
272 | BY_PASS_PRIO_NUM_LEVELS))), |
273 | [RDMA_RX_KERNEL_PRIO] = |
274 | ADD_PRIO(0, RDMA_RX_KERNEL_MIN_LEVEL, 0, |
275 | FS_CHAINING_CAPS, |
276 | ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN, |
277 | ADD_MULTIPLE_PRIO(1, 1))), |
278 | } |
279 | }; |
280 | |
281 | enum { |
282 | RDMA_TX_COUNTERS_PRIO, |
283 | RDMA_TX_IPSEC_PRIO, |
284 | RDMA_TX_MACSEC_PRIO, |
285 | RDMA_TX_BYPASS_PRIO, |
286 | }; |
287 | |
288 | #define RDMA_TX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_PRIOS |
289 | #define RDMA_TX_COUNTERS_MIN_LEVEL (RDMA_TX_BYPASS_MIN_LEVEL + 1) |
290 | |
291 | #define RDMA_TX_IPSEC_NUM_PRIOS 2 |
292 | #define RDMA_TX_IPSEC_PRIO_NUM_LEVELS 1 |
293 | #define RDMA_TX_IPSEC_MIN_LEVEL (RDMA_TX_COUNTERS_MIN_LEVEL + RDMA_TX_IPSEC_NUM_PRIOS) |
294 | |
295 | #define RDMA_TX_MACSEC_NUM_PRIOS 1 |
296 | #define RDMA_TX_MACESC_PRIO_NUM_LEVELS 1 |
297 | #define RDMA_TX_MACSEC_MIN_LEVEL (RDMA_TX_COUNTERS_MIN_LEVEL + RDMA_TX_MACSEC_NUM_PRIOS) |
298 | |
299 | static struct init_tree_node rdma_tx_root_fs = { |
300 | .type = FS_TYPE_NAMESPACE, |
301 | .ar_size = 4, |
302 | .children = (struct init_tree_node[]) { |
303 | [RDMA_TX_COUNTERS_PRIO] = |
304 | ADD_PRIO(0, RDMA_TX_COUNTERS_MIN_LEVEL, 0, |
305 | FS_CHAINING_CAPS, |
306 | ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, |
307 | ADD_MULTIPLE_PRIO(MLX5_RDMA_TX_NUM_COUNTERS_PRIOS, |
308 | RDMA_TX_COUNTERS_PRIO_NUM_LEVELS))), |
309 | [RDMA_TX_IPSEC_PRIO] = |
310 | ADD_PRIO(0, RDMA_TX_IPSEC_MIN_LEVEL, 0, |
311 | FS_CHAINING_CAPS, |
312 | ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, |
313 | ADD_MULTIPLE_PRIO(RDMA_TX_IPSEC_NUM_PRIOS, |
314 | RDMA_TX_IPSEC_PRIO_NUM_LEVELS))), |
315 | [RDMA_TX_MACSEC_PRIO] = |
316 | ADD_PRIO(0, RDMA_TX_MACSEC_MIN_LEVEL, 0, |
317 | FS_CHAINING_CAPS, |
318 | ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, |
319 | ADD_MULTIPLE_PRIO(RDMA_TX_MACSEC_NUM_PRIOS, |
320 | RDMA_TX_MACESC_PRIO_NUM_LEVELS))), |
321 | [RDMA_TX_BYPASS_PRIO] = |
322 | ADD_PRIO(0, RDMA_TX_BYPASS_MIN_LEVEL, 0, |
323 | FS_CHAINING_CAPS_RDMA_TX, |
324 | ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, |
325 | ADD_MULTIPLE_PRIO(RDMA_TX_BYPASS_MIN_LEVEL, |
326 | BY_PASS_PRIO_NUM_LEVELS))), |
327 | } |
328 | }; |
329 | |
330 | enum fs_i_lock_class { |
331 | FS_LOCK_GRANDPARENT, |
332 | FS_LOCK_PARENT, |
333 | FS_LOCK_CHILD |
334 | }; |
335 | |
336 | static const struct rhashtable_params rhash_fte = { |
337 | .key_len = sizeof_field(struct fs_fte, val), |
338 | .key_offset = offsetof(struct fs_fte, val), |
339 | .head_offset = offsetof(struct fs_fte, hash), |
340 | .automatic_shrinking = true, |
341 | .min_size = 1, |
342 | }; |
343 | |
344 | static const struct rhashtable_params rhash_fg = { |
345 | .key_len = sizeof_field(struct mlx5_flow_group, mask), |
346 | .key_offset = offsetof(struct mlx5_flow_group, mask), |
347 | .head_offset = offsetof(struct mlx5_flow_group, hash), |
348 | .automatic_shrinking = true, |
349 | .min_size = 1, |
350 | |
351 | }; |
352 | |
353 | static void del_hw_flow_table(struct fs_node *node); |
354 | static void del_hw_flow_group(struct fs_node *node); |
355 | static void del_hw_fte(struct fs_node *node); |
356 | static void del_sw_flow_table(struct fs_node *node); |
357 | static void del_sw_flow_group(struct fs_node *node); |
358 | static void del_sw_fte(struct fs_node *node); |
359 | static void del_sw_prio(struct fs_node *node); |
360 | static void del_sw_ns(struct fs_node *node); |
361 | /* Delete rule (destination) is special case that |
362 | * requires to lock the FTE for all the deletion process. |
363 | */ |
364 | static void del_sw_hw_rule(struct fs_node *node); |
365 | static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1, |
366 | struct mlx5_flow_destination *d2); |
367 | static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns); |
368 | static struct mlx5_flow_rule * |
369 | find_flow_rule(struct fs_fte *fte, |
370 | struct mlx5_flow_destination *dest); |
371 | |
372 | static void tree_init_node(struct fs_node *node, |
373 | void (*del_hw_func)(struct fs_node *), |
374 | void (*del_sw_func)(struct fs_node *)) |
375 | { |
376 | refcount_set(r: &node->refcount, n: 1); |
377 | INIT_LIST_HEAD(list: &node->list); |
378 | INIT_LIST_HEAD(list: &node->children); |
379 | init_rwsem(&node->lock); |
380 | node->del_hw_func = del_hw_func; |
381 | node->del_sw_func = del_sw_func; |
382 | node->active = false; |
383 | } |
384 | |
385 | static void tree_add_node(struct fs_node *node, struct fs_node *parent) |
386 | { |
387 | if (parent) |
388 | refcount_inc(r: &parent->refcount); |
389 | node->parent = parent; |
390 | |
391 | /* Parent is the root */ |
392 | if (!parent) |
393 | node->root = node; |
394 | else |
395 | node->root = parent->root; |
396 | } |
397 | |
398 | static int tree_get_node(struct fs_node *node) |
399 | { |
400 | return refcount_inc_not_zero(r: &node->refcount); |
401 | } |
402 | |
403 | static void nested_down_read_ref_node(struct fs_node *node, |
404 | enum fs_i_lock_class class) |
405 | { |
406 | if (node) { |
407 | down_read_nested(sem: &node->lock, subclass: class); |
408 | refcount_inc(r: &node->refcount); |
409 | } |
410 | } |
411 | |
412 | static void nested_down_write_ref_node(struct fs_node *node, |
413 | enum fs_i_lock_class class) |
414 | { |
415 | if (node) { |
416 | down_write_nested(sem: &node->lock, subclass: class); |
417 | refcount_inc(r: &node->refcount); |
418 | } |
419 | } |
420 | |
421 | static void down_write_ref_node(struct fs_node *node, bool locked) |
422 | { |
423 | if (node) { |
424 | if (!locked) |
425 | down_write(sem: &node->lock); |
426 | refcount_inc(r: &node->refcount); |
427 | } |
428 | } |
429 | |
430 | static void up_read_ref_node(struct fs_node *node) |
431 | { |
432 | refcount_dec(r: &node->refcount); |
433 | up_read(sem: &node->lock); |
434 | } |
435 | |
436 | static void up_write_ref_node(struct fs_node *node, bool locked) |
437 | { |
438 | refcount_dec(r: &node->refcount); |
439 | if (!locked) |
440 | up_write(sem: &node->lock); |
441 | } |
442 | |
443 | static void tree_put_node(struct fs_node *node, bool locked) |
444 | { |
445 | struct fs_node *parent_node = node->parent; |
446 | |
447 | if (refcount_dec_and_test(r: &node->refcount)) { |
448 | if (node->del_hw_func) |
449 | node->del_hw_func(node); |
450 | if (parent_node) { |
451 | down_write_ref_node(node: parent_node, locked); |
452 | list_del_init(entry: &node->list); |
453 | } |
454 | node->del_sw_func(node); |
455 | if (parent_node) |
456 | up_write_ref_node(node: parent_node, locked); |
457 | node = NULL; |
458 | } |
459 | if (!node && parent_node) |
460 | tree_put_node(node: parent_node, locked); |
461 | } |
462 | |
463 | static int tree_remove_node(struct fs_node *node, bool locked) |
464 | { |
465 | if (refcount_read(r: &node->refcount) > 1) { |
466 | refcount_dec(r: &node->refcount); |
467 | return -EEXIST; |
468 | } |
469 | tree_put_node(node, locked); |
470 | return 0; |
471 | } |
472 | |
473 | static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns, |
474 | unsigned int prio) |
475 | { |
476 | struct fs_prio *iter_prio; |
477 | |
478 | fs_for_each_prio(iter_prio, ns) { |
479 | if (iter_prio->prio == prio) |
480 | return iter_prio; |
481 | } |
482 | |
483 | return NULL; |
484 | } |
485 | |
486 | static bool is_fwd_next_action(u32 action) |
487 | { |
488 | return action & (MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO | |
489 | MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS); |
490 | } |
491 | |
492 | static bool is_fwd_dest_type(enum mlx5_flow_destination_type type) |
493 | { |
494 | return type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM || |
495 | type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE || |
496 | type == MLX5_FLOW_DESTINATION_TYPE_UPLINK || |
497 | type == MLX5_FLOW_DESTINATION_TYPE_VPORT || |
498 | type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER || |
499 | type == MLX5_FLOW_DESTINATION_TYPE_TIR || |
500 | type == MLX5_FLOW_DESTINATION_TYPE_RANGE || |
501 | type == MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE; |
502 | } |
503 | |
504 | static bool check_valid_spec(const struct mlx5_flow_spec *spec) |
505 | { |
506 | int i; |
507 | |
508 | for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++) |
509 | if (spec->match_value[i] & ~spec->match_criteria[i]) { |
510 | pr_warn("mlx5_core: match_value differs from match_criteria\n" ); |
511 | return false; |
512 | } |
513 | |
514 | return true; |
515 | } |
516 | |
517 | struct mlx5_flow_root_namespace *find_root(struct fs_node *node) |
518 | { |
519 | struct fs_node *root; |
520 | struct mlx5_flow_namespace *ns; |
521 | |
522 | root = node->root; |
523 | |
524 | if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) { |
525 | pr_warn("mlx5: flow steering node is not in tree or garbaged\n" ); |
526 | return NULL; |
527 | } |
528 | |
529 | ns = container_of(root, struct mlx5_flow_namespace, node); |
530 | return container_of(ns, struct mlx5_flow_root_namespace, ns); |
531 | } |
532 | |
533 | static inline struct mlx5_flow_steering *get_steering(struct fs_node *node) |
534 | { |
535 | struct mlx5_flow_root_namespace *root = find_root(node); |
536 | |
537 | if (root) |
538 | return root->dev->priv.steering; |
539 | return NULL; |
540 | } |
541 | |
542 | static inline struct mlx5_core_dev *get_dev(struct fs_node *node) |
543 | { |
544 | struct mlx5_flow_root_namespace *root = find_root(node); |
545 | |
546 | if (root) |
547 | return root->dev; |
548 | return NULL; |
549 | } |
550 | |
551 | static void del_sw_ns(struct fs_node *node) |
552 | { |
553 | kfree(objp: node); |
554 | } |
555 | |
556 | static void del_sw_prio(struct fs_node *node) |
557 | { |
558 | kfree(objp: node); |
559 | } |
560 | |
561 | static void del_hw_flow_table(struct fs_node *node) |
562 | { |
563 | struct mlx5_flow_root_namespace *root; |
564 | struct mlx5_flow_table *ft; |
565 | struct mlx5_core_dev *dev; |
566 | int err; |
567 | |
568 | fs_get_obj(ft, node); |
569 | dev = get_dev(node: &ft->node); |
570 | root = find_root(node: &ft->node); |
571 | trace_mlx5_fs_del_ft(ft); |
572 | |
573 | if (node->active) { |
574 | err = root->cmds->destroy_flow_table(root, ft); |
575 | if (err) |
576 | mlx5_core_warn(dev, "flow steering can't destroy ft\n" ); |
577 | } |
578 | } |
579 | |
580 | static void del_sw_flow_table(struct fs_node *node) |
581 | { |
582 | struct mlx5_flow_table *ft; |
583 | struct fs_prio *prio; |
584 | |
585 | fs_get_obj(ft, node); |
586 | |
587 | rhltable_destroy(hlt: &ft->fgs_hash); |
588 | if (ft->node.parent) { |
589 | fs_get_obj(prio, ft->node.parent); |
590 | prio->num_ft--; |
591 | } |
592 | kfree(objp: ft); |
593 | } |
594 | |
595 | static void modify_fte(struct fs_fte *fte) |
596 | { |
597 | struct mlx5_flow_root_namespace *root; |
598 | struct mlx5_flow_table *ft; |
599 | struct mlx5_flow_group *fg; |
600 | struct mlx5_core_dev *dev; |
601 | int err; |
602 | |
603 | fs_get_obj(fg, fte->node.parent); |
604 | fs_get_obj(ft, fg->node.parent); |
605 | dev = get_dev(node: &fte->node); |
606 | |
607 | root = find_root(node: &ft->node); |
608 | err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte); |
609 | if (err) |
610 | mlx5_core_warn(dev, |
611 | "%s can't del rule fg id=%d fte_index=%d\n" , |
612 | __func__, fg->id, fte->index); |
613 | fte->modify_mask = 0; |
614 | } |
615 | |
616 | static void del_sw_hw_rule(struct fs_node *node) |
617 | { |
618 | struct mlx5_flow_rule *rule; |
619 | struct fs_fte *fte; |
620 | |
621 | fs_get_obj(rule, node); |
622 | fs_get_obj(fte, rule->node.parent); |
623 | trace_mlx5_fs_del_rule(rule); |
624 | if (is_fwd_next_action(action: rule->sw_action)) { |
625 | mutex_lock(&rule->dest_attr.ft->lock); |
626 | list_del(entry: &rule->next_ft); |
627 | mutex_unlock(lock: &rule->dest_attr.ft->lock); |
628 | } |
629 | |
630 | if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) { |
631 | --fte->dests_size; |
632 | fte->modify_mask |= |
633 | BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) | |
634 | BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS); |
635 | fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT; |
636 | goto out; |
637 | } |
638 | |
639 | if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT) { |
640 | --fte->dests_size; |
641 | fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION); |
642 | fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW; |
643 | goto out; |
644 | } |
645 | |
646 | if (is_fwd_dest_type(type: rule->dest_attr.type)) { |
647 | --fte->dests_size; |
648 | --fte->fwd_dests; |
649 | |
650 | if (!fte->fwd_dests) |
651 | fte->action.action &= |
652 | ~MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; |
653 | fte->modify_mask |= |
654 | BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST); |
655 | goto out; |
656 | } |
657 | out: |
658 | kfree(objp: rule); |
659 | } |
660 | |
661 | static void del_hw_fte(struct fs_node *node) |
662 | { |
663 | struct mlx5_flow_root_namespace *root; |
664 | struct mlx5_flow_table *ft; |
665 | struct mlx5_flow_group *fg; |
666 | struct mlx5_core_dev *dev; |
667 | struct fs_fte *fte; |
668 | int err; |
669 | |
670 | fs_get_obj(fte, node); |
671 | fs_get_obj(fg, fte->node.parent); |
672 | fs_get_obj(ft, fg->node.parent); |
673 | |
674 | trace_mlx5_fs_del_fte(fte); |
675 | WARN_ON(fte->dests_size); |
676 | dev = get_dev(node: &ft->node); |
677 | root = find_root(node: &ft->node); |
678 | if (node->active) { |
679 | err = root->cmds->delete_fte(root, ft, fte); |
680 | if (err) |
681 | mlx5_core_warn(dev, |
682 | "flow steering can't delete fte in index %d of flow group id %d\n" , |
683 | fte->index, fg->id); |
684 | node->active = false; |
685 | } |
686 | } |
687 | |
688 | static void del_sw_fte(struct fs_node *node) |
689 | { |
690 | struct mlx5_flow_steering *steering = get_steering(node); |
691 | struct mlx5_flow_group *fg; |
692 | struct fs_fte *fte; |
693 | int err; |
694 | |
695 | fs_get_obj(fte, node); |
696 | fs_get_obj(fg, fte->node.parent); |
697 | |
698 | err = rhashtable_remove_fast(ht: &fg->ftes_hash, |
699 | obj: &fte->hash, |
700 | params: rhash_fte); |
701 | WARN_ON(err); |
702 | ida_free(&fg->fte_allocator, id: fte->index - fg->start_index); |
703 | kmem_cache_free(s: steering->ftes_cache, objp: fte); |
704 | } |
705 | |
706 | static void del_hw_flow_group(struct fs_node *node) |
707 | { |
708 | struct mlx5_flow_root_namespace *root; |
709 | struct mlx5_flow_group *fg; |
710 | struct mlx5_flow_table *ft; |
711 | struct mlx5_core_dev *dev; |
712 | |
713 | fs_get_obj(fg, node); |
714 | fs_get_obj(ft, fg->node.parent); |
715 | dev = get_dev(node: &ft->node); |
716 | trace_mlx5_fs_del_fg(fg); |
717 | |
718 | root = find_root(node: &ft->node); |
719 | if (fg->node.active && root->cmds->destroy_flow_group(root, ft, fg)) |
720 | mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n" , |
721 | fg->id, ft->id); |
722 | } |
723 | |
724 | static void del_sw_flow_group(struct fs_node *node) |
725 | { |
726 | struct mlx5_flow_steering *steering = get_steering(node); |
727 | struct mlx5_flow_group *fg; |
728 | struct mlx5_flow_table *ft; |
729 | int err; |
730 | |
731 | fs_get_obj(fg, node); |
732 | fs_get_obj(ft, fg->node.parent); |
733 | |
734 | rhashtable_destroy(ht: &fg->ftes_hash); |
735 | ida_destroy(ida: &fg->fte_allocator); |
736 | if (ft->autogroup.active && |
737 | fg->max_ftes == ft->autogroup.group_size && |
738 | fg->start_index < ft->autogroup.max_fte) |
739 | ft->autogroup.num_groups--; |
740 | err = rhltable_remove(hlt: &ft->fgs_hash, |
741 | list: &fg->hash, |
742 | params: rhash_fg); |
743 | WARN_ON(err); |
744 | kmem_cache_free(s: steering->fgs_cache, objp: fg); |
745 | } |
746 | |
747 | static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte) |
748 | { |
749 | int index; |
750 | int ret; |
751 | |
752 | index = ida_alloc_max(ida: &fg->fte_allocator, max: fg->max_ftes - 1, GFP_KERNEL); |
753 | if (index < 0) |
754 | return index; |
755 | |
756 | fte->index = index + fg->start_index; |
757 | ret = rhashtable_insert_fast(ht: &fg->ftes_hash, |
758 | obj: &fte->hash, |
759 | params: rhash_fte); |
760 | if (ret) |
761 | goto err_ida_remove; |
762 | |
763 | tree_add_node(node: &fte->node, parent: &fg->node); |
764 | list_add_tail(new: &fte->node.list, head: &fg->node.children); |
765 | return 0; |
766 | |
767 | err_ida_remove: |
768 | ida_free(&fg->fte_allocator, id: index); |
769 | return ret; |
770 | } |
771 | |
772 | static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft, |
773 | const struct mlx5_flow_spec *spec, |
774 | struct mlx5_flow_act *flow_act) |
775 | { |
776 | struct mlx5_flow_steering *steering = get_steering(node: &ft->node); |
777 | struct fs_fte *fte; |
778 | |
779 | fte = kmem_cache_zalloc(k: steering->ftes_cache, GFP_KERNEL); |
780 | if (!fte) |
781 | return ERR_PTR(error: -ENOMEM); |
782 | |
783 | memcpy(fte->val, &spec->match_value, sizeof(fte->val)); |
784 | fte->node.type = FS_TYPE_FLOW_ENTRY; |
785 | fte->action = *flow_act; |
786 | fte->flow_context = spec->flow_context; |
787 | |
788 | tree_init_node(node: &fte->node, del_hw_func: del_hw_fte, del_sw_func: del_sw_fte); |
789 | |
790 | return fte; |
791 | } |
792 | |
793 | static void dealloc_flow_group(struct mlx5_flow_steering *steering, |
794 | struct mlx5_flow_group *fg) |
795 | { |
796 | rhashtable_destroy(ht: &fg->ftes_hash); |
797 | kmem_cache_free(s: steering->fgs_cache, objp: fg); |
798 | } |
799 | |
800 | static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering, |
801 | u8 match_criteria_enable, |
802 | const void *match_criteria, |
803 | int start_index, |
804 | int end_index) |
805 | { |
806 | struct mlx5_flow_group *fg; |
807 | int ret; |
808 | |
809 | fg = kmem_cache_zalloc(k: steering->fgs_cache, GFP_KERNEL); |
810 | if (!fg) |
811 | return ERR_PTR(error: -ENOMEM); |
812 | |
813 | ret = rhashtable_init(ht: &fg->ftes_hash, params: &rhash_fte); |
814 | if (ret) { |
815 | kmem_cache_free(s: steering->fgs_cache, objp: fg); |
816 | return ERR_PTR(error: ret); |
817 | } |
818 | |
819 | ida_init(ida: &fg->fte_allocator); |
820 | fg->mask.match_criteria_enable = match_criteria_enable; |
821 | memcpy(&fg->mask.match_criteria, match_criteria, |
822 | sizeof(fg->mask.match_criteria)); |
823 | fg->node.type = FS_TYPE_FLOW_GROUP; |
824 | fg->start_index = start_index; |
825 | fg->max_ftes = end_index - start_index + 1; |
826 | |
827 | return fg; |
828 | } |
829 | |
830 | static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft, |
831 | u8 match_criteria_enable, |
832 | const void *match_criteria, |
833 | int start_index, |
834 | int end_index, |
835 | struct list_head *prev) |
836 | { |
837 | struct mlx5_flow_steering *steering = get_steering(node: &ft->node); |
838 | struct mlx5_flow_group *fg; |
839 | int ret; |
840 | |
841 | fg = alloc_flow_group(steering, match_criteria_enable, match_criteria, |
842 | start_index, end_index); |
843 | if (IS_ERR(ptr: fg)) |
844 | return fg; |
845 | |
846 | /* initialize refcnt, add to parent list */ |
847 | ret = rhltable_insert(hlt: &ft->fgs_hash, |
848 | list: &fg->hash, |
849 | params: rhash_fg); |
850 | if (ret) { |
851 | dealloc_flow_group(steering, fg); |
852 | return ERR_PTR(error: ret); |
853 | } |
854 | |
855 | tree_init_node(node: &fg->node, del_hw_func: del_hw_flow_group, del_sw_func: del_sw_flow_group); |
856 | tree_add_node(node: &fg->node, parent: &ft->node); |
857 | /* Add node to group list */ |
858 | list_add(new: &fg->node.list, head: prev); |
859 | atomic_inc(v: &ft->node.version); |
860 | |
861 | return fg; |
862 | } |
863 | |
864 | static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, |
865 | enum fs_flow_table_type table_type, |
866 | enum fs_flow_table_op_mod op_mod, |
867 | u32 flags) |
868 | { |
869 | struct mlx5_flow_table *ft; |
870 | int ret; |
871 | |
872 | ft = kzalloc(size: sizeof(*ft), GFP_KERNEL); |
873 | if (!ft) |
874 | return ERR_PTR(error: -ENOMEM); |
875 | |
876 | ret = rhltable_init(hlt: &ft->fgs_hash, params: &rhash_fg); |
877 | if (ret) { |
878 | kfree(objp: ft); |
879 | return ERR_PTR(error: ret); |
880 | } |
881 | |
882 | ft->level = level; |
883 | ft->node.type = FS_TYPE_FLOW_TABLE; |
884 | ft->op_mod = op_mod; |
885 | ft->type = table_type; |
886 | ft->vport = vport; |
887 | ft->flags = flags; |
888 | INIT_LIST_HEAD(list: &ft->fwd_rules); |
889 | mutex_init(&ft->lock); |
890 | |
891 | return ft; |
892 | } |
893 | |
894 | /* If reverse is false, then we search for the first flow table in the |
895 | * root sub-tree from start(closest from right), else we search for the |
896 | * last flow table in the root sub-tree till start(closest from left). |
897 | */ |
898 | static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root, |
899 | struct list_head *start, |
900 | bool reverse) |
901 | { |
902 | #define list_advance_entry(pos, reverse) \ |
903 | ((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list)) |
904 | |
905 | #define list_for_each_advance_continue(pos, head, reverse) \ |
906 | for (pos = list_advance_entry(pos, reverse); \ |
907 | &pos->list != (head); \ |
908 | pos = list_advance_entry(pos, reverse)) |
909 | |
910 | struct fs_node *iter = list_entry(start, struct fs_node, list); |
911 | struct mlx5_flow_table *ft = NULL; |
912 | |
913 | if (!root) |
914 | return NULL; |
915 | |
916 | list_for_each_advance_continue(iter, &root->children, reverse) { |
917 | if (iter->type == FS_TYPE_FLOW_TABLE) { |
918 | fs_get_obj(ft, iter); |
919 | return ft; |
920 | } |
921 | ft = find_closest_ft_recursive(root: iter, start: &iter->children, reverse); |
922 | if (ft) |
923 | return ft; |
924 | } |
925 | |
926 | return ft; |
927 | } |
928 | |
929 | static struct fs_node *find_prio_chains_parent(struct fs_node *parent, |
930 | struct fs_node **child) |
931 | { |
932 | struct fs_node *node = NULL; |
933 | |
934 | while (parent && parent->type != FS_TYPE_PRIO_CHAINS) { |
935 | node = parent; |
936 | parent = parent->parent; |
937 | } |
938 | |
939 | if (child) |
940 | *child = node; |
941 | |
942 | return parent; |
943 | } |
944 | |
945 | /* If reverse is false then return the first flow table next to the passed node |
946 | * in the tree, else return the last flow table before the node in the tree. |
947 | * If skip is true, skip the flow tables in the same prio_chains prio. |
948 | */ |
949 | static struct mlx5_flow_table *find_closest_ft(struct fs_node *node, bool reverse, |
950 | bool skip) |
951 | { |
952 | struct fs_node *prio_chains_parent = NULL; |
953 | struct mlx5_flow_table *ft = NULL; |
954 | struct fs_node *curr_node; |
955 | struct fs_node *parent; |
956 | |
957 | if (skip) |
958 | prio_chains_parent = find_prio_chains_parent(parent: node, NULL); |
959 | parent = node->parent; |
960 | curr_node = node; |
961 | while (!ft && parent) { |
962 | if (parent != prio_chains_parent) |
963 | ft = find_closest_ft_recursive(root: parent, start: &curr_node->list, |
964 | reverse); |
965 | curr_node = parent; |
966 | parent = curr_node->parent; |
967 | } |
968 | return ft; |
969 | } |
970 | |
971 | /* Assuming all the tree is locked by mutex chain lock */ |
972 | static struct mlx5_flow_table *find_next_chained_ft(struct fs_node *node) |
973 | { |
974 | return find_closest_ft(node, reverse: false, skip: true); |
975 | } |
976 | |
977 | /* Assuming all the tree is locked by mutex chain lock */ |
978 | static struct mlx5_flow_table *find_prev_chained_ft(struct fs_node *node) |
979 | { |
980 | return find_closest_ft(node, reverse: true, skip: true); |
981 | } |
982 | |
983 | static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft, |
984 | struct mlx5_flow_act *flow_act) |
985 | { |
986 | struct fs_prio *prio; |
987 | bool next_ns; |
988 | |
989 | next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS; |
990 | fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent); |
991 | |
992 | return find_next_chained_ft(node: &prio->node); |
993 | } |
994 | |
995 | static int connect_fts_in_prio(struct mlx5_core_dev *dev, |
996 | struct fs_prio *prio, |
997 | struct mlx5_flow_table *ft) |
998 | { |
999 | struct mlx5_flow_root_namespace *root = find_root(node: &prio->node); |
1000 | struct mlx5_flow_table *iter; |
1001 | int err; |
1002 | |
1003 | fs_for_each_ft(iter, prio) { |
1004 | err = root->cmds->modify_flow_table(root, iter, ft); |
1005 | if (err) { |
1006 | mlx5_core_err(dev, |
1007 | "Failed to modify flow table id %d, type %d, err %d\n" , |
1008 | iter->id, iter->type, err); |
1009 | /* The driver is out of sync with the FW */ |
1010 | return err; |
1011 | } |
1012 | } |
1013 | return 0; |
1014 | } |
1015 | |
1016 | static struct mlx5_flow_table *find_closet_ft_prio_chains(struct fs_node *node, |
1017 | struct fs_node *parent, |
1018 | struct fs_node **child, |
1019 | bool reverse) |
1020 | { |
1021 | struct mlx5_flow_table *ft; |
1022 | |
1023 | ft = find_closest_ft(node, reverse, skip: false); |
1024 | |
1025 | if (ft && parent == find_prio_chains_parent(parent: &ft->node, child)) |
1026 | return ft; |
1027 | |
1028 | return NULL; |
1029 | } |
1030 | |
1031 | /* Connect flow tables from previous priority of prio to ft */ |
1032 | static int connect_prev_fts(struct mlx5_core_dev *dev, |
1033 | struct mlx5_flow_table *ft, |
1034 | struct fs_prio *prio) |
1035 | { |
1036 | struct fs_node *prio_parent, *parent = NULL, *child, *node; |
1037 | struct mlx5_flow_table *prev_ft; |
1038 | int err = 0; |
1039 | |
1040 | prio_parent = find_prio_chains_parent(parent: &prio->node, child: &child); |
1041 | |
1042 | /* return directly if not under the first sub ns of prio_chains prio */ |
1043 | if (prio_parent && !list_is_first(list: &child->list, head: &prio_parent->children)) |
1044 | return 0; |
1045 | |
1046 | prev_ft = find_prev_chained_ft(node: &prio->node); |
1047 | while (prev_ft) { |
1048 | struct fs_prio *prev_prio; |
1049 | |
1050 | fs_get_obj(prev_prio, prev_ft->node.parent); |
1051 | err = connect_fts_in_prio(dev, prio: prev_prio, ft); |
1052 | if (err) |
1053 | break; |
1054 | |
1055 | if (!parent) { |
1056 | parent = find_prio_chains_parent(parent: &prev_prio->node, child: &child); |
1057 | if (!parent) |
1058 | break; |
1059 | } |
1060 | |
1061 | node = child; |
1062 | prev_ft = find_closet_ft_prio_chains(node, parent, child: &child, reverse: true); |
1063 | } |
1064 | return err; |
1065 | } |
1066 | |
1067 | static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio |
1068 | *prio) |
1069 | { |
1070 | struct mlx5_flow_root_namespace *root = find_root(node: &prio->node); |
1071 | struct mlx5_ft_underlay_qp *uqp; |
1072 | int min_level = INT_MAX; |
1073 | int err = 0; |
1074 | u32 qpn; |
1075 | |
1076 | if (root->root_ft) |
1077 | min_level = root->root_ft->level; |
1078 | |
1079 | if (ft->level >= min_level) |
1080 | return 0; |
1081 | |
1082 | if (list_empty(head: &root->underlay_qpns)) { |
1083 | /* Don't set any QPN (zero) in case QPN list is empty */ |
1084 | qpn = 0; |
1085 | err = root->cmds->update_root_ft(root, ft, qpn, false); |
1086 | } else { |
1087 | list_for_each_entry(uqp, &root->underlay_qpns, list) { |
1088 | qpn = uqp->qpn; |
1089 | err = root->cmds->update_root_ft(root, ft, |
1090 | qpn, false); |
1091 | if (err) |
1092 | break; |
1093 | } |
1094 | } |
1095 | |
1096 | if (err) |
1097 | mlx5_core_warn(root->dev, |
1098 | "Update root flow table of id(%u) qpn(%d) failed\n" , |
1099 | ft->id, qpn); |
1100 | else |
1101 | root->root_ft = ft; |
1102 | |
1103 | return err; |
1104 | } |
1105 | |
1106 | static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, |
1107 | struct mlx5_flow_destination *dest) |
1108 | { |
1109 | struct mlx5_flow_root_namespace *root; |
1110 | struct mlx5_flow_table *ft; |
1111 | struct mlx5_flow_group *fg; |
1112 | struct fs_fte *fte; |
1113 | int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST); |
1114 | int err = 0; |
1115 | |
1116 | fs_get_obj(fte, rule->node.parent); |
1117 | if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) |
1118 | return -EINVAL; |
1119 | down_write_ref_node(node: &fte->node, locked: false); |
1120 | fs_get_obj(fg, fte->node.parent); |
1121 | fs_get_obj(ft, fg->node.parent); |
1122 | |
1123 | memcpy(&rule->dest_attr, dest, sizeof(*dest)); |
1124 | root = find_root(node: &ft->node); |
1125 | err = root->cmds->update_fte(root, ft, fg, |
1126 | modify_mask, fte); |
1127 | up_write_ref_node(node: &fte->node, locked: false); |
1128 | |
1129 | return err; |
1130 | } |
1131 | |
1132 | int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle, |
1133 | struct mlx5_flow_destination *new_dest, |
1134 | struct mlx5_flow_destination *old_dest) |
1135 | { |
1136 | int i; |
1137 | |
1138 | if (!old_dest) { |
1139 | if (handle->num_rules != 1) |
1140 | return -EINVAL; |
1141 | return _mlx5_modify_rule_destination(rule: handle->rule[0], |
1142 | dest: new_dest); |
1143 | } |
1144 | |
1145 | for (i = 0; i < handle->num_rules; i++) { |
1146 | if (mlx5_flow_dests_cmp(d1: old_dest, d2: &handle->rule[i]->dest_attr)) |
1147 | return _mlx5_modify_rule_destination(rule: handle->rule[i], |
1148 | dest: new_dest); |
1149 | } |
1150 | |
1151 | return -EINVAL; |
1152 | } |
1153 | |
1154 | /* Modify/set FWD rules that point on old_next_ft to point on new_next_ft */ |
1155 | static int connect_fwd_rules(struct mlx5_core_dev *dev, |
1156 | struct mlx5_flow_table *new_next_ft, |
1157 | struct mlx5_flow_table *old_next_ft) |
1158 | { |
1159 | struct mlx5_flow_destination dest = {}; |
1160 | struct mlx5_flow_rule *iter; |
1161 | int err = 0; |
1162 | |
1163 | /* new_next_ft and old_next_ft could be NULL only |
1164 | * when we create/destroy the anchor flow table. |
1165 | */ |
1166 | if (!new_next_ft || !old_next_ft) |
1167 | return 0; |
1168 | |
1169 | dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; |
1170 | dest.ft = new_next_ft; |
1171 | |
1172 | mutex_lock(&old_next_ft->lock); |
1173 | list_splice_init(list: &old_next_ft->fwd_rules, head: &new_next_ft->fwd_rules); |
1174 | mutex_unlock(lock: &old_next_ft->lock); |
1175 | list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) { |
1176 | if ((iter->sw_action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS) && |
1177 | iter->ft->ns == new_next_ft->ns) |
1178 | continue; |
1179 | |
1180 | err = _mlx5_modify_rule_destination(rule: iter, dest: &dest); |
1181 | if (err) |
1182 | pr_err("mlx5_core: failed to modify rule to point on flow table %d\n" , |
1183 | new_next_ft->id); |
1184 | } |
1185 | return 0; |
1186 | } |
1187 | |
1188 | static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, |
1189 | struct fs_prio *prio) |
1190 | { |
1191 | struct mlx5_flow_table *next_ft, *first_ft; |
1192 | int err = 0; |
1193 | |
1194 | /* Connect_prev_fts and update_root_ft_create are mutually exclusive */ |
1195 | |
1196 | first_ft = list_first_entry_or_null(&prio->node.children, |
1197 | struct mlx5_flow_table, node.list); |
1198 | if (!first_ft || first_ft->level > ft->level) { |
1199 | err = connect_prev_fts(dev, ft, prio); |
1200 | if (err) |
1201 | return err; |
1202 | |
1203 | next_ft = first_ft ? first_ft : find_next_chained_ft(node: &prio->node); |
1204 | err = connect_fwd_rules(dev, new_next_ft: ft, old_next_ft: next_ft); |
1205 | if (err) |
1206 | return err; |
1207 | } |
1208 | |
1209 | if (MLX5_CAP_FLOWTABLE(dev, |
1210 | flow_table_properties_nic_receive.modify_root)) |
1211 | err = update_root_ft_create(ft, prio); |
1212 | return err; |
1213 | } |
1214 | |
1215 | static void list_add_flow_table(struct mlx5_flow_table *ft, |
1216 | struct fs_prio *prio) |
1217 | { |
1218 | struct list_head *prev = &prio->node.children; |
1219 | struct mlx5_flow_table *iter; |
1220 | |
1221 | fs_for_each_ft(iter, prio) { |
1222 | if (iter->level > ft->level) |
1223 | break; |
1224 | prev = &iter->node.list; |
1225 | } |
1226 | list_add(new: &ft->node.list, head: prev); |
1227 | } |
1228 | |
1229 | static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns, |
1230 | struct mlx5_flow_table_attr *ft_attr, |
1231 | enum fs_flow_table_op_mod op_mod, |
1232 | u16 vport) |
1233 | { |
1234 | struct mlx5_flow_root_namespace *root = find_root(node: &ns->node); |
1235 | bool unmanaged = ft_attr->flags & MLX5_FLOW_TABLE_UNMANAGED; |
1236 | struct mlx5_flow_table *next_ft; |
1237 | struct fs_prio *fs_prio = NULL; |
1238 | struct mlx5_flow_table *ft; |
1239 | int err; |
1240 | |
1241 | if (!root) { |
1242 | pr_err("mlx5: flow steering failed to find root of namespace\n" ); |
1243 | return ERR_PTR(error: -ENODEV); |
1244 | } |
1245 | |
1246 | mutex_lock(&root->chain_lock); |
1247 | fs_prio = find_prio(ns, prio: ft_attr->prio); |
1248 | if (!fs_prio) { |
1249 | err = -EINVAL; |
1250 | goto unlock_root; |
1251 | } |
1252 | if (!unmanaged) { |
1253 | /* The level is related to the |
1254 | * priority level range. |
1255 | */ |
1256 | if (ft_attr->level >= fs_prio->num_levels) { |
1257 | err = -ENOSPC; |
1258 | goto unlock_root; |
1259 | } |
1260 | |
1261 | ft_attr->level += fs_prio->start_level; |
1262 | } |
1263 | |
1264 | /* The level is related to the |
1265 | * priority level range. |
1266 | */ |
1267 | ft = alloc_flow_table(level: ft_attr->level, |
1268 | vport, |
1269 | table_type: root->table_type, |
1270 | op_mod, flags: ft_attr->flags); |
1271 | if (IS_ERR(ptr: ft)) { |
1272 | err = PTR_ERR(ptr: ft); |
1273 | goto unlock_root; |
1274 | } |
1275 | |
1276 | tree_init_node(node: &ft->node, del_hw_func: del_hw_flow_table, del_sw_func: del_sw_flow_table); |
1277 | next_ft = unmanaged ? ft_attr->next_ft : |
1278 | find_next_chained_ft(node: &fs_prio->node); |
1279 | ft->def_miss_action = ns->def_miss_action; |
1280 | ft->ns = ns; |
1281 | err = root->cmds->create_flow_table(root, ft, ft_attr, next_ft); |
1282 | if (err) |
1283 | goto free_ft; |
1284 | |
1285 | if (!unmanaged) { |
1286 | err = connect_flow_table(dev: root->dev, ft, prio: fs_prio); |
1287 | if (err) |
1288 | goto destroy_ft; |
1289 | } |
1290 | |
1291 | ft->node.active = true; |
1292 | down_write_ref_node(node: &fs_prio->node, locked: false); |
1293 | if (!unmanaged) { |
1294 | tree_add_node(node: &ft->node, parent: &fs_prio->node); |
1295 | list_add_flow_table(ft, prio: fs_prio); |
1296 | } else { |
1297 | ft->node.root = fs_prio->node.root; |
1298 | } |
1299 | fs_prio->num_ft++; |
1300 | up_write_ref_node(node: &fs_prio->node, locked: false); |
1301 | mutex_unlock(lock: &root->chain_lock); |
1302 | trace_mlx5_fs_add_ft(ft); |
1303 | return ft; |
1304 | destroy_ft: |
1305 | root->cmds->destroy_flow_table(root, ft); |
1306 | free_ft: |
1307 | rhltable_destroy(hlt: &ft->fgs_hash); |
1308 | kfree(objp: ft); |
1309 | unlock_root: |
1310 | mutex_unlock(lock: &root->chain_lock); |
1311 | return ERR_PTR(error: err); |
1312 | } |
1313 | |
1314 | struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, |
1315 | struct mlx5_flow_table_attr *ft_attr) |
1316 | { |
1317 | return __mlx5_create_flow_table(ns, ft_attr, op_mod: FS_FT_OP_MOD_NORMAL, vport: 0); |
1318 | } |
1319 | EXPORT_SYMBOL(mlx5_create_flow_table); |
1320 | |
1321 | u32 mlx5_flow_table_id(struct mlx5_flow_table *ft) |
1322 | { |
1323 | return ft->id; |
1324 | } |
1325 | EXPORT_SYMBOL(mlx5_flow_table_id); |
1326 | |
1327 | struct mlx5_flow_table * |
1328 | mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, |
1329 | struct mlx5_flow_table_attr *ft_attr, u16 vport) |
1330 | { |
1331 | return __mlx5_create_flow_table(ns, ft_attr, op_mod: FS_FT_OP_MOD_NORMAL, vport); |
1332 | } |
1333 | |
1334 | struct mlx5_flow_table* |
1335 | mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns, |
1336 | int prio, u32 level) |
1337 | { |
1338 | struct mlx5_flow_table_attr ft_attr = {}; |
1339 | |
1340 | ft_attr.level = level; |
1341 | ft_attr.prio = prio; |
1342 | ft_attr.max_fte = 1; |
1343 | |
1344 | return __mlx5_create_flow_table(ns, ft_attr: &ft_attr, op_mod: FS_FT_OP_MOD_LAG_DEMUX, vport: 0); |
1345 | } |
1346 | EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table); |
1347 | |
1348 | #define MAX_FLOW_GROUP_SIZE BIT(24) |
1349 | struct mlx5_flow_table* |
1350 | mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, |
1351 | struct mlx5_flow_table_attr *ft_attr) |
1352 | { |
1353 | int num_reserved_entries = ft_attr->autogroup.num_reserved_entries; |
1354 | int max_num_groups = ft_attr->autogroup.max_num_groups; |
1355 | struct mlx5_flow_table *ft; |
1356 | int autogroups_max_fte; |
1357 | |
1358 | ft = mlx5_create_flow_table(ns, ft_attr); |
1359 | if (IS_ERR(ptr: ft)) |
1360 | return ft; |
1361 | |
1362 | autogroups_max_fte = ft->max_fte - num_reserved_entries; |
1363 | if (max_num_groups > autogroups_max_fte) |
1364 | goto err_validate; |
1365 | if (num_reserved_entries > ft->max_fte) |
1366 | goto err_validate; |
1367 | |
1368 | /* Align the number of groups according to the largest group size */ |
1369 | if (autogroups_max_fte / (max_num_groups + 1) > MAX_FLOW_GROUP_SIZE) |
1370 | max_num_groups = (autogroups_max_fte / MAX_FLOW_GROUP_SIZE) - 1; |
1371 | |
1372 | ft->autogroup.active = true; |
1373 | ft->autogroup.required_groups = max_num_groups; |
1374 | ft->autogroup.max_fte = autogroups_max_fte; |
1375 | /* We save place for flow groups in addition to max types */ |
1376 | ft->autogroup.group_size = autogroups_max_fte / (max_num_groups + 1); |
1377 | |
1378 | return ft; |
1379 | |
1380 | err_validate: |
1381 | mlx5_destroy_flow_table(ft); |
1382 | return ERR_PTR(error: -ENOSPC); |
1383 | } |
1384 | EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table); |
1385 | |
1386 | struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft, |
1387 | u32 *fg_in) |
1388 | { |
1389 | struct mlx5_flow_root_namespace *root = find_root(node: &ft->node); |
1390 | void *match_criteria = MLX5_ADDR_OF(create_flow_group_in, |
1391 | fg_in, match_criteria); |
1392 | u8 match_criteria_enable = MLX5_GET(create_flow_group_in, |
1393 | fg_in, |
1394 | match_criteria_enable); |
1395 | int start_index = MLX5_GET(create_flow_group_in, fg_in, |
1396 | start_flow_index); |
1397 | int end_index = MLX5_GET(create_flow_group_in, fg_in, |
1398 | end_flow_index); |
1399 | struct mlx5_flow_group *fg; |
1400 | int err; |
1401 | |
1402 | if (ft->autogroup.active && start_index < ft->autogroup.max_fte) |
1403 | return ERR_PTR(error: -EPERM); |
1404 | |
1405 | down_write_ref_node(node: &ft->node, locked: false); |
1406 | fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria, |
1407 | start_index, end_index, |
1408 | prev: ft->node.children.prev); |
1409 | up_write_ref_node(node: &ft->node, locked: false); |
1410 | if (IS_ERR(ptr: fg)) |
1411 | return fg; |
1412 | |
1413 | err = root->cmds->create_flow_group(root, ft, fg_in, fg); |
1414 | if (err) { |
1415 | tree_put_node(node: &fg->node, locked: false); |
1416 | return ERR_PTR(error: err); |
1417 | } |
1418 | trace_mlx5_fs_add_fg(fg); |
1419 | fg->node.active = true; |
1420 | |
1421 | return fg; |
1422 | } |
1423 | EXPORT_SYMBOL(mlx5_create_flow_group); |
1424 | |
1425 | static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest) |
1426 | { |
1427 | struct mlx5_flow_rule *rule; |
1428 | |
1429 | rule = kzalloc(size: sizeof(*rule), GFP_KERNEL); |
1430 | if (!rule) |
1431 | return NULL; |
1432 | |
1433 | INIT_LIST_HEAD(list: &rule->next_ft); |
1434 | rule->node.type = FS_TYPE_FLOW_DEST; |
1435 | if (dest) |
1436 | memcpy(&rule->dest_attr, dest, sizeof(*dest)); |
1437 | else |
1438 | rule->dest_attr.type = MLX5_FLOW_DESTINATION_TYPE_NONE; |
1439 | |
1440 | return rule; |
1441 | } |
1442 | |
1443 | static struct mlx5_flow_handle *alloc_handle(int num_rules) |
1444 | { |
1445 | struct mlx5_flow_handle *handle; |
1446 | |
1447 | handle = kzalloc(struct_size(handle, rule, num_rules), GFP_KERNEL); |
1448 | if (!handle) |
1449 | return NULL; |
1450 | |
1451 | handle->num_rules = num_rules; |
1452 | |
1453 | return handle; |
1454 | } |
1455 | |
1456 | static void destroy_flow_handle(struct fs_fte *fte, |
1457 | struct mlx5_flow_handle *handle, |
1458 | struct mlx5_flow_destination *dest, |
1459 | int i) |
1460 | { |
1461 | for (; --i >= 0;) { |
1462 | if (refcount_dec_and_test(r: &handle->rule[i]->node.refcount)) { |
1463 | fte->dests_size--; |
1464 | list_del(entry: &handle->rule[i]->node.list); |
1465 | kfree(objp: handle->rule[i]); |
1466 | } |
1467 | } |
1468 | kfree(objp: handle); |
1469 | } |
1470 | |
1471 | static struct mlx5_flow_handle * |
1472 | create_flow_handle(struct fs_fte *fte, |
1473 | struct mlx5_flow_destination *dest, |
1474 | int dest_num, |
1475 | int *modify_mask, |
1476 | bool *new_rule) |
1477 | { |
1478 | struct mlx5_flow_handle *handle; |
1479 | struct mlx5_flow_rule *rule = NULL; |
1480 | static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS); |
1481 | static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST); |
1482 | int type; |
1483 | int i = 0; |
1484 | |
1485 | handle = alloc_handle(num_rules: (dest_num) ? dest_num : 1); |
1486 | if (!handle) |
1487 | return ERR_PTR(error: -ENOMEM); |
1488 | |
1489 | do { |
1490 | if (dest) { |
1491 | rule = find_flow_rule(fte, dest: dest + i); |
1492 | if (rule) { |
1493 | refcount_inc(r: &rule->node.refcount); |
1494 | goto rule_found; |
1495 | } |
1496 | } |
1497 | |
1498 | *new_rule = true; |
1499 | rule = alloc_rule(dest: dest + i); |
1500 | if (!rule) |
1501 | goto free_rules; |
1502 | |
1503 | /* Add dest to dests list- we need flow tables to be in the |
1504 | * end of the list for forward to next prio rules. |
1505 | */ |
1506 | tree_init_node(node: &rule->node, NULL, del_sw_func: del_sw_hw_rule); |
1507 | if (dest && |
1508 | dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) |
1509 | list_add(new: &rule->node.list, head: &fte->node.children); |
1510 | else |
1511 | list_add_tail(new: &rule->node.list, head: &fte->node.children); |
1512 | if (dest) { |
1513 | fte->dests_size++; |
1514 | |
1515 | if (is_fwd_dest_type(type: dest[i].type)) |
1516 | fte->fwd_dests++; |
1517 | |
1518 | type = dest[i].type == |
1519 | MLX5_FLOW_DESTINATION_TYPE_COUNTER; |
1520 | *modify_mask |= type ? count : dst; |
1521 | } |
1522 | rule_found: |
1523 | handle->rule[i] = rule; |
1524 | } while (++i < dest_num); |
1525 | |
1526 | return handle; |
1527 | |
1528 | free_rules: |
1529 | destroy_flow_handle(fte, handle, dest, i); |
1530 | return ERR_PTR(error: -ENOMEM); |
1531 | } |
1532 | |
1533 | /* fte should not be deleted while calling this function */ |
1534 | static struct mlx5_flow_handle * |
1535 | add_rule_fte(struct fs_fte *fte, |
1536 | struct mlx5_flow_group *fg, |
1537 | struct mlx5_flow_destination *dest, |
1538 | int dest_num, |
1539 | bool update_action) |
1540 | { |
1541 | struct mlx5_flow_root_namespace *root; |
1542 | struct mlx5_flow_handle *handle; |
1543 | struct mlx5_flow_table *ft; |
1544 | int modify_mask = 0; |
1545 | int err; |
1546 | bool new_rule = false; |
1547 | |
1548 | handle = create_flow_handle(fte, dest, dest_num, modify_mask: &modify_mask, |
1549 | new_rule: &new_rule); |
1550 | if (IS_ERR(ptr: handle) || !new_rule) |
1551 | goto out; |
1552 | |
1553 | if (update_action) |
1554 | modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION); |
1555 | |
1556 | fs_get_obj(ft, fg->node.parent); |
1557 | root = find_root(node: &fg->node); |
1558 | if (!(fte->status & FS_FTE_STATUS_EXISTING)) |
1559 | err = root->cmds->create_fte(root, ft, fg, fte); |
1560 | else |
1561 | err = root->cmds->update_fte(root, ft, fg, modify_mask, fte); |
1562 | if (err) |
1563 | goto free_handle; |
1564 | |
1565 | fte->node.active = true; |
1566 | fte->status |= FS_FTE_STATUS_EXISTING; |
1567 | atomic_inc(v: &fg->node.version); |
1568 | |
1569 | out: |
1570 | return handle; |
1571 | |
1572 | free_handle: |
1573 | destroy_flow_handle(fte, handle, dest, i: handle->num_rules); |
1574 | return ERR_PTR(error: err); |
1575 | } |
1576 | |
1577 | static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft, |
1578 | const struct mlx5_flow_spec *spec) |
1579 | { |
1580 | struct list_head *prev = &ft->node.children; |
1581 | u32 max_fte = ft->autogroup.max_fte; |
1582 | unsigned int candidate_index = 0; |
1583 | unsigned int group_size = 0; |
1584 | struct mlx5_flow_group *fg; |
1585 | |
1586 | if (!ft->autogroup.active) |
1587 | return ERR_PTR(error: -ENOENT); |
1588 | |
1589 | if (ft->autogroup.num_groups < ft->autogroup.required_groups) |
1590 | group_size = ft->autogroup.group_size; |
1591 | |
1592 | /* max_fte == ft->autogroup.max_types */ |
1593 | if (group_size == 0) |
1594 | group_size = 1; |
1595 | |
1596 | /* sorted by start_index */ |
1597 | fs_for_each_fg(fg, ft) { |
1598 | if (candidate_index + group_size > fg->start_index) |
1599 | candidate_index = fg->start_index + fg->max_ftes; |
1600 | else |
1601 | break; |
1602 | prev = &fg->node.list; |
1603 | } |
1604 | |
1605 | if (candidate_index + group_size > max_fte) |
1606 | return ERR_PTR(error: -ENOSPC); |
1607 | |
1608 | fg = alloc_insert_flow_group(ft, |
1609 | match_criteria_enable: spec->match_criteria_enable, |
1610 | match_criteria: spec->match_criteria, |
1611 | start_index: candidate_index, |
1612 | end_index: candidate_index + group_size - 1, |
1613 | prev); |
1614 | if (IS_ERR(ptr: fg)) |
1615 | goto out; |
1616 | |
1617 | if (group_size == ft->autogroup.group_size) |
1618 | ft->autogroup.num_groups++; |
1619 | |
1620 | out: |
1621 | return fg; |
1622 | } |
1623 | |
1624 | static int create_auto_flow_group(struct mlx5_flow_table *ft, |
1625 | struct mlx5_flow_group *fg) |
1626 | { |
1627 | struct mlx5_flow_root_namespace *root = find_root(node: &ft->node); |
1628 | int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); |
1629 | void *match_criteria_addr; |
1630 | u8 src_esw_owner_mask_on; |
1631 | void *misc; |
1632 | int err; |
1633 | u32 *in; |
1634 | |
1635 | in = kvzalloc(size: inlen, GFP_KERNEL); |
1636 | if (!in) |
1637 | return -ENOMEM; |
1638 | |
1639 | MLX5_SET(create_flow_group_in, in, match_criteria_enable, |
1640 | fg->mask.match_criteria_enable); |
1641 | MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index); |
1642 | MLX5_SET(create_flow_group_in, in, end_flow_index, fg->start_index + |
1643 | fg->max_ftes - 1); |
1644 | |
1645 | misc = MLX5_ADDR_OF(fte_match_param, fg->mask.match_criteria, |
1646 | misc_parameters); |
1647 | src_esw_owner_mask_on = !!MLX5_GET(fte_match_set_misc, misc, |
1648 | source_eswitch_owner_vhca_id); |
1649 | MLX5_SET(create_flow_group_in, in, |
1650 | source_eswitch_owner_vhca_id_valid, src_esw_owner_mask_on); |
1651 | |
1652 | match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in, |
1653 | in, match_criteria); |
1654 | memcpy(match_criteria_addr, fg->mask.match_criteria, |
1655 | sizeof(fg->mask.match_criteria)); |
1656 | |
1657 | err = root->cmds->create_flow_group(root, ft, in, fg); |
1658 | if (!err) { |
1659 | fg->node.active = true; |
1660 | trace_mlx5_fs_add_fg(fg); |
1661 | } |
1662 | |
1663 | kvfree(addr: in); |
1664 | return err; |
1665 | } |
1666 | |
1667 | static bool mlx5_pkt_reformat_cmp(struct mlx5_pkt_reformat *p1, |
1668 | struct mlx5_pkt_reformat *p2) |
1669 | { |
1670 | return p1->owner == p2->owner && |
1671 | (p1->owner == MLX5_FLOW_RESOURCE_OWNER_FW ? |
1672 | p1->id == p2->id : |
1673 | mlx5_fs_dr_action_get_pkt_reformat_id(pkt_reformat: p1) == |
1674 | mlx5_fs_dr_action_get_pkt_reformat_id(pkt_reformat: p2)); |
1675 | } |
1676 | |
1677 | static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1, |
1678 | struct mlx5_flow_destination *d2) |
1679 | { |
1680 | if (d1->type == d2->type) { |
1681 | if (((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT || |
1682 | d1->type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) && |
1683 | d1->vport.num == d2->vport.num && |
1684 | d1->vport.flags == d2->vport.flags && |
1685 | ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ? |
1686 | (d1->vport.vhca_id == d2->vport.vhca_id) : true) && |
1687 | ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ? |
1688 | mlx5_pkt_reformat_cmp(p1: d1->vport.pkt_reformat, |
1689 | p2: d2->vport.pkt_reformat) : true)) || |
1690 | (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE && |
1691 | d1->ft == d2->ft) || |
1692 | (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR && |
1693 | d1->tir_num == d2->tir_num) || |
1694 | (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM && |
1695 | d1->ft_num == d2->ft_num) || |
1696 | (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER && |
1697 | d1->sampler_id == d2->sampler_id) || |
1698 | (d1->type == MLX5_FLOW_DESTINATION_TYPE_RANGE && |
1699 | d1->range.field == d2->range.field && |
1700 | d1->range.hit_ft == d2->range.hit_ft && |
1701 | d1->range.miss_ft == d2->range.miss_ft && |
1702 | d1->range.min == d2->range.min && |
1703 | d1->range.max == d2->range.max)) |
1704 | return true; |
1705 | } |
1706 | |
1707 | return false; |
1708 | } |
1709 | |
1710 | static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte, |
1711 | struct mlx5_flow_destination *dest) |
1712 | { |
1713 | struct mlx5_flow_rule *rule; |
1714 | |
1715 | list_for_each_entry(rule, &fte->node.children, node.list) { |
1716 | if (mlx5_flow_dests_cmp(d1: &rule->dest_attr, d2: dest)) |
1717 | return rule; |
1718 | } |
1719 | return NULL; |
1720 | } |
1721 | |
1722 | static bool check_conflicting_actions_vlan(const struct mlx5_fs_vlan *vlan0, |
1723 | const struct mlx5_fs_vlan *vlan1) |
1724 | { |
1725 | return vlan0->ethtype != vlan1->ethtype || |
1726 | vlan0->vid != vlan1->vid || |
1727 | vlan0->prio != vlan1->prio; |
1728 | } |
1729 | |
1730 | static bool check_conflicting_actions(const struct mlx5_flow_act *act1, |
1731 | const struct mlx5_flow_act *act2) |
1732 | { |
1733 | u32 action1 = act1->action; |
1734 | u32 action2 = act2->action; |
1735 | u32 xored_actions; |
1736 | |
1737 | xored_actions = action1 ^ action2; |
1738 | |
1739 | /* if one rule only wants to count, it's ok */ |
1740 | if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT || |
1741 | action2 == MLX5_FLOW_CONTEXT_ACTION_COUNT) |
1742 | return false; |
1743 | |
1744 | if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP | |
1745 | MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT | |
1746 | MLX5_FLOW_CONTEXT_ACTION_DECAP | |
1747 | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | |
1748 | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP | |
1749 | MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | |
1750 | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 | |
1751 | MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2)) |
1752 | return true; |
1753 | |
1754 | if (action1 & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT && |
1755 | act1->pkt_reformat != act2->pkt_reformat) |
1756 | return true; |
1757 | |
1758 | if (action1 & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && |
1759 | act1->modify_hdr != act2->modify_hdr) |
1760 | return true; |
1761 | |
1762 | if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH && |
1763 | check_conflicting_actions_vlan(vlan0: &act1->vlan[0], vlan1: &act2->vlan[0])) |
1764 | return true; |
1765 | |
1766 | if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 && |
1767 | check_conflicting_actions_vlan(vlan0: &act1->vlan[1], vlan1: &act2->vlan[1])) |
1768 | return true; |
1769 | |
1770 | return false; |
1771 | } |
1772 | |
1773 | static int check_conflicting_ftes(struct fs_fte *fte, |
1774 | const struct mlx5_flow_context *flow_context, |
1775 | const struct mlx5_flow_act *flow_act) |
1776 | { |
1777 | if (check_conflicting_actions(act1: flow_act, act2: &fte->action)) { |
1778 | mlx5_core_warn(get_dev(&fte->node), |
1779 | "Found two FTEs with conflicting actions\n" ); |
1780 | return -EEXIST; |
1781 | } |
1782 | |
1783 | if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) && |
1784 | fte->flow_context.flow_tag != flow_context->flow_tag) { |
1785 | mlx5_core_warn(get_dev(&fte->node), |
1786 | "FTE flow tag %u already exists with different flow tag %u\n" , |
1787 | fte->flow_context.flow_tag, |
1788 | flow_context->flow_tag); |
1789 | return -EEXIST; |
1790 | } |
1791 | |
1792 | return 0; |
1793 | } |
1794 | |
1795 | static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg, |
1796 | const struct mlx5_flow_spec *spec, |
1797 | struct mlx5_flow_act *flow_act, |
1798 | struct mlx5_flow_destination *dest, |
1799 | int dest_num, |
1800 | struct fs_fte *fte) |
1801 | { |
1802 | struct mlx5_flow_handle *handle; |
1803 | int old_action; |
1804 | int i; |
1805 | int ret; |
1806 | |
1807 | ret = check_conflicting_ftes(fte, flow_context: &spec->flow_context, flow_act); |
1808 | if (ret) |
1809 | return ERR_PTR(error: ret); |
1810 | |
1811 | old_action = fte->action.action; |
1812 | fte->action.action |= flow_act->action; |
1813 | handle = add_rule_fte(fte, fg, dest, dest_num, |
1814 | update_action: old_action != flow_act->action); |
1815 | if (IS_ERR(ptr: handle)) { |
1816 | fte->action.action = old_action; |
1817 | return handle; |
1818 | } |
1819 | trace_mlx5_fs_set_fte(fte, new_fte: false); |
1820 | |
1821 | /* Link newly added rules into the tree. */ |
1822 | for (i = 0; i < handle->num_rules; i++) { |
1823 | if (!handle->rule[i]->node.parent) { |
1824 | tree_add_node(node: &handle->rule[i]->node, parent: &fte->node); |
1825 | trace_mlx5_fs_add_rule(rule: handle->rule[i]); |
1826 | } |
1827 | } |
1828 | return handle; |
1829 | } |
1830 | |
1831 | static bool counter_is_valid(u32 action) |
1832 | { |
1833 | return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP | |
1834 | MLX5_FLOW_CONTEXT_ACTION_ALLOW | |
1835 | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)); |
1836 | } |
1837 | |
1838 | static bool dest_is_valid(struct mlx5_flow_destination *dest, |
1839 | struct mlx5_flow_act *flow_act, |
1840 | struct mlx5_flow_table *ft) |
1841 | { |
1842 | bool ignore_level = flow_act->flags & FLOW_ACT_IGNORE_FLOW_LEVEL; |
1843 | u32 action = flow_act->action; |
1844 | |
1845 | if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)) |
1846 | return counter_is_valid(action); |
1847 | |
1848 | if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) |
1849 | return true; |
1850 | |
1851 | if (ignore_level) { |
1852 | if (ft->type != FS_FT_FDB && |
1853 | ft->type != FS_FT_NIC_RX && |
1854 | ft->type != FS_FT_NIC_TX) |
1855 | return false; |
1856 | |
1857 | if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE && |
1858 | ft->type != dest->ft->type) |
1859 | return false; |
1860 | } |
1861 | |
1862 | if (!dest || ((dest->type == |
1863 | MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) && |
1864 | (dest->ft->level <= ft->level && !ignore_level))) |
1865 | return false; |
1866 | return true; |
1867 | } |
1868 | |
1869 | struct match_list { |
1870 | struct list_head list; |
1871 | struct mlx5_flow_group *g; |
1872 | }; |
1873 | |
1874 | static void free_match_list(struct match_list *head, bool ft_locked) |
1875 | { |
1876 | struct match_list *iter, *match_tmp; |
1877 | |
1878 | list_for_each_entry_safe(iter, match_tmp, &head->list, |
1879 | list) { |
1880 | tree_put_node(node: &iter->g->node, locked: ft_locked); |
1881 | list_del(entry: &iter->list); |
1882 | kfree(objp: iter); |
1883 | } |
1884 | } |
1885 | |
1886 | static int build_match_list(struct match_list *match_head, |
1887 | struct mlx5_flow_table *ft, |
1888 | const struct mlx5_flow_spec *spec, |
1889 | struct mlx5_flow_group *fg, |
1890 | bool ft_locked) |
1891 | { |
1892 | struct rhlist_head *tmp, *list; |
1893 | struct mlx5_flow_group *g; |
1894 | |
1895 | rcu_read_lock(); |
1896 | INIT_LIST_HEAD(list: &match_head->list); |
1897 | /* Collect all fgs which has a matching match_criteria */ |
1898 | list = rhltable_lookup(hlt: &ft->fgs_hash, key: spec, params: rhash_fg); |
1899 | /* RCU is atomic, we can't execute FW commands here */ |
1900 | rhl_for_each_entry_rcu(g, tmp, list, hash) { |
1901 | struct match_list *curr_match; |
1902 | |
1903 | if (fg && fg != g) |
1904 | continue; |
1905 | |
1906 | if (unlikely(!tree_get_node(&g->node))) |
1907 | continue; |
1908 | |
1909 | curr_match = kmalloc(size: sizeof(*curr_match), GFP_ATOMIC); |
1910 | if (!curr_match) { |
1911 | rcu_read_unlock(); |
1912 | free_match_list(head: match_head, ft_locked); |
1913 | return -ENOMEM; |
1914 | } |
1915 | curr_match->g = g; |
1916 | list_add_tail(new: &curr_match->list, head: &match_head->list); |
1917 | } |
1918 | rcu_read_unlock(); |
1919 | return 0; |
1920 | } |
1921 | |
1922 | static u64 matched_fgs_get_version(struct list_head *match_head) |
1923 | { |
1924 | struct match_list *iter; |
1925 | u64 version = 0; |
1926 | |
1927 | list_for_each_entry(iter, match_head, list) |
1928 | version += (u64)atomic_read(v: &iter->g->node.version); |
1929 | return version; |
1930 | } |
1931 | |
1932 | static struct fs_fte * |
1933 | lookup_fte_locked(struct mlx5_flow_group *g, |
1934 | const u32 *match_value, |
1935 | bool take_write) |
1936 | { |
1937 | struct fs_fte *fte_tmp; |
1938 | |
1939 | if (take_write) |
1940 | nested_down_write_ref_node(node: &g->node, class: FS_LOCK_PARENT); |
1941 | else |
1942 | nested_down_read_ref_node(node: &g->node, class: FS_LOCK_PARENT); |
1943 | fte_tmp = rhashtable_lookup_fast(ht: &g->ftes_hash, key: match_value, |
1944 | params: rhash_fte); |
1945 | if (!fte_tmp || !tree_get_node(node: &fte_tmp->node)) { |
1946 | fte_tmp = NULL; |
1947 | goto out; |
1948 | } |
1949 | if (!fte_tmp->node.active) { |
1950 | tree_put_node(node: &fte_tmp->node, locked: false); |
1951 | fte_tmp = NULL; |
1952 | goto out; |
1953 | } |
1954 | |
1955 | nested_down_write_ref_node(node: &fte_tmp->node, class: FS_LOCK_CHILD); |
1956 | out: |
1957 | if (take_write) |
1958 | up_write_ref_node(node: &g->node, locked: false); |
1959 | else |
1960 | up_read_ref_node(node: &g->node); |
1961 | return fte_tmp; |
1962 | } |
1963 | |
1964 | static struct mlx5_flow_handle * |
1965 | try_add_to_existing_fg(struct mlx5_flow_table *ft, |
1966 | struct list_head *match_head, |
1967 | const struct mlx5_flow_spec *spec, |
1968 | struct mlx5_flow_act *flow_act, |
1969 | struct mlx5_flow_destination *dest, |
1970 | int dest_num, |
1971 | int ft_version) |
1972 | { |
1973 | struct mlx5_flow_steering *steering = get_steering(node: &ft->node); |
1974 | struct mlx5_flow_group *g; |
1975 | struct mlx5_flow_handle *rule; |
1976 | struct match_list *iter; |
1977 | bool take_write = false; |
1978 | struct fs_fte *fte; |
1979 | u64 version = 0; |
1980 | int err; |
1981 | |
1982 | fte = alloc_fte(ft, spec, flow_act); |
1983 | if (IS_ERR(ptr: fte)) |
1984 | return ERR_PTR(error: -ENOMEM); |
1985 | |
1986 | search_again_locked: |
1987 | if (flow_act->flags & FLOW_ACT_NO_APPEND) |
1988 | goto skip_search; |
1989 | version = matched_fgs_get_version(match_head); |
1990 | /* Try to find an fte with identical match value and attempt update its |
1991 | * action. |
1992 | */ |
1993 | list_for_each_entry(iter, match_head, list) { |
1994 | struct fs_fte *fte_tmp; |
1995 | |
1996 | g = iter->g; |
1997 | fte_tmp = lookup_fte_locked(g, match_value: spec->match_value, take_write); |
1998 | if (!fte_tmp) |
1999 | continue; |
2000 | rule = add_rule_fg(fg: g, spec, flow_act, dest, dest_num, fte: fte_tmp); |
2001 | /* No error check needed here, because insert_fte() is not called */ |
2002 | up_write_ref_node(node: &fte_tmp->node, locked: false); |
2003 | tree_put_node(node: &fte_tmp->node, locked: false); |
2004 | kmem_cache_free(s: steering->ftes_cache, objp: fte); |
2005 | return rule; |
2006 | } |
2007 | |
2008 | skip_search: |
2009 | /* No group with matching fte found, or we skipped the search. |
2010 | * Try to add a new fte to any matching fg. |
2011 | */ |
2012 | |
2013 | /* Check the ft version, for case that new flow group |
2014 | * was added while the fgs weren't locked |
2015 | */ |
2016 | if (atomic_read(v: &ft->node.version) != ft_version) { |
2017 | rule = ERR_PTR(error: -EAGAIN); |
2018 | goto out; |
2019 | } |
2020 | |
2021 | /* Check the fgs version. If version have changed it could be that an |
2022 | * FTE with the same match value was added while the fgs weren't |
2023 | * locked. |
2024 | */ |
2025 | if (!(flow_act->flags & FLOW_ACT_NO_APPEND) && |
2026 | version != matched_fgs_get_version(match_head)) { |
2027 | take_write = true; |
2028 | goto search_again_locked; |
2029 | } |
2030 | |
2031 | list_for_each_entry(iter, match_head, list) { |
2032 | g = iter->g; |
2033 | |
2034 | nested_down_write_ref_node(node: &g->node, class: FS_LOCK_PARENT); |
2035 | |
2036 | if (!g->node.active) { |
2037 | up_write_ref_node(node: &g->node, locked: false); |
2038 | continue; |
2039 | } |
2040 | |
2041 | err = insert_fte(fg: g, fte); |
2042 | if (err) { |
2043 | up_write_ref_node(node: &g->node, locked: false); |
2044 | if (err == -ENOSPC) |
2045 | continue; |
2046 | kmem_cache_free(s: steering->ftes_cache, objp: fte); |
2047 | return ERR_PTR(error: err); |
2048 | } |
2049 | |
2050 | nested_down_write_ref_node(node: &fte->node, class: FS_LOCK_CHILD); |
2051 | up_write_ref_node(node: &g->node, locked: false); |
2052 | rule = add_rule_fg(fg: g, spec, flow_act, dest, dest_num, fte); |
2053 | up_write_ref_node(node: &fte->node, locked: false); |
2054 | if (IS_ERR(ptr: rule)) |
2055 | tree_put_node(node: &fte->node, locked: false); |
2056 | return rule; |
2057 | } |
2058 | rule = ERR_PTR(error: -ENOENT); |
2059 | out: |
2060 | kmem_cache_free(s: steering->ftes_cache, objp: fte); |
2061 | return rule; |
2062 | } |
2063 | |
2064 | static struct mlx5_flow_handle * |
2065 | _mlx5_add_flow_rules(struct mlx5_flow_table *ft, |
2066 | const struct mlx5_flow_spec *spec, |
2067 | struct mlx5_flow_act *flow_act, |
2068 | struct mlx5_flow_destination *dest, |
2069 | int dest_num) |
2070 | |
2071 | { |
2072 | struct mlx5_flow_steering *steering = get_steering(node: &ft->node); |
2073 | struct mlx5_flow_handle *rule; |
2074 | struct match_list match_head; |
2075 | struct mlx5_flow_group *g; |
2076 | bool take_write = false; |
2077 | struct fs_fte *fte; |
2078 | int version; |
2079 | int err; |
2080 | int i; |
2081 | |
2082 | if (!check_valid_spec(spec)) |
2083 | return ERR_PTR(error: -EINVAL); |
2084 | |
2085 | if (flow_act->fg && ft->autogroup.active) |
2086 | return ERR_PTR(error: -EINVAL); |
2087 | |
2088 | if (dest && dest_num <= 0) |
2089 | return ERR_PTR(error: -EINVAL); |
2090 | |
2091 | for (i = 0; i < dest_num; i++) { |
2092 | if (!dest_is_valid(dest: &dest[i], flow_act, ft)) |
2093 | return ERR_PTR(error: -EINVAL); |
2094 | } |
2095 | nested_down_read_ref_node(node: &ft->node, class: FS_LOCK_GRANDPARENT); |
2096 | search_again_locked: |
2097 | version = atomic_read(v: &ft->node.version); |
2098 | |
2099 | /* Collect all fgs which has a matching match_criteria */ |
2100 | err = build_match_list(match_head: &match_head, ft, spec, fg: flow_act->fg, ft_locked: take_write); |
2101 | if (err) { |
2102 | if (take_write) |
2103 | up_write_ref_node(node: &ft->node, locked: false); |
2104 | else |
2105 | up_read_ref_node(node: &ft->node); |
2106 | return ERR_PTR(error: err); |
2107 | } |
2108 | |
2109 | if (!take_write) |
2110 | up_read_ref_node(node: &ft->node); |
2111 | |
2112 | rule = try_add_to_existing_fg(ft, match_head: &match_head.list, spec, flow_act, dest, |
2113 | dest_num, ft_version: version); |
2114 | free_match_list(head: &match_head, ft_locked: take_write); |
2115 | if (!IS_ERR(ptr: rule) || |
2116 | (PTR_ERR(ptr: rule) != -ENOENT && PTR_ERR(ptr: rule) != -EAGAIN)) { |
2117 | if (take_write) |
2118 | up_write_ref_node(node: &ft->node, locked: false); |
2119 | return rule; |
2120 | } |
2121 | |
2122 | if (!take_write) { |
2123 | nested_down_write_ref_node(node: &ft->node, class: FS_LOCK_GRANDPARENT); |
2124 | take_write = true; |
2125 | } |
2126 | |
2127 | if (PTR_ERR(ptr: rule) == -EAGAIN || |
2128 | version != atomic_read(v: &ft->node.version)) |
2129 | goto search_again_locked; |
2130 | |
2131 | g = alloc_auto_flow_group(ft, spec); |
2132 | if (IS_ERR(ptr: g)) { |
2133 | rule = ERR_CAST(ptr: g); |
2134 | up_write_ref_node(node: &ft->node, locked: false); |
2135 | return rule; |
2136 | } |
2137 | |
2138 | fte = alloc_fte(ft, spec, flow_act); |
2139 | if (IS_ERR(ptr: fte)) { |
2140 | up_write_ref_node(node: &ft->node, locked: false); |
2141 | err = PTR_ERR(ptr: fte); |
2142 | goto err_alloc_fte; |
2143 | } |
2144 | |
2145 | nested_down_write_ref_node(node: &g->node, class: FS_LOCK_PARENT); |
2146 | up_write_ref_node(node: &ft->node, locked: false); |
2147 | |
2148 | err = create_auto_flow_group(ft, fg: g); |
2149 | if (err) |
2150 | goto err_release_fg; |
2151 | |
2152 | err = insert_fte(fg: g, fte); |
2153 | if (err) |
2154 | goto err_release_fg; |
2155 | |
2156 | nested_down_write_ref_node(node: &fte->node, class: FS_LOCK_CHILD); |
2157 | up_write_ref_node(node: &g->node, locked: false); |
2158 | rule = add_rule_fg(fg: g, spec, flow_act, dest, dest_num, fte); |
2159 | up_write_ref_node(node: &fte->node, locked: false); |
2160 | if (IS_ERR(ptr: rule)) |
2161 | tree_put_node(node: &fte->node, locked: false); |
2162 | tree_put_node(node: &g->node, locked: false); |
2163 | return rule; |
2164 | |
2165 | err_release_fg: |
2166 | up_write_ref_node(node: &g->node, locked: false); |
2167 | kmem_cache_free(s: steering->ftes_cache, objp: fte); |
2168 | err_alloc_fte: |
2169 | tree_put_node(node: &g->node, locked: false); |
2170 | return ERR_PTR(error: err); |
2171 | } |
2172 | |
2173 | static bool fwd_next_prio_supported(struct mlx5_flow_table *ft) |
2174 | { |
2175 | return ((ft->type == FS_FT_NIC_RX) && |
2176 | (MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs))); |
2177 | } |
2178 | |
2179 | struct mlx5_flow_handle * |
2180 | mlx5_add_flow_rules(struct mlx5_flow_table *ft, |
2181 | const struct mlx5_flow_spec *spec, |
2182 | struct mlx5_flow_act *flow_act, |
2183 | struct mlx5_flow_destination *dest, |
2184 | int num_dest) |
2185 | { |
2186 | struct mlx5_flow_root_namespace *root = find_root(node: &ft->node); |
2187 | static const struct mlx5_flow_spec zero_spec = {}; |
2188 | struct mlx5_flow_destination *gen_dest = NULL; |
2189 | struct mlx5_flow_table *next_ft = NULL; |
2190 | struct mlx5_flow_handle *handle = NULL; |
2191 | u32 sw_action = flow_act->action; |
2192 | int i; |
2193 | |
2194 | if (!spec) |
2195 | spec = &zero_spec; |
2196 | |
2197 | if (!is_fwd_next_action(action: sw_action)) |
2198 | return _mlx5_add_flow_rules(ft, spec, flow_act, dest, dest_num: num_dest); |
2199 | |
2200 | if (!fwd_next_prio_supported(ft)) |
2201 | return ERR_PTR(error: -EOPNOTSUPP); |
2202 | |
2203 | mutex_lock(&root->chain_lock); |
2204 | next_ft = find_next_fwd_ft(ft, flow_act); |
2205 | if (!next_ft) { |
2206 | handle = ERR_PTR(error: -EOPNOTSUPP); |
2207 | goto unlock; |
2208 | } |
2209 | |
2210 | gen_dest = kcalloc(n: num_dest + 1, size: sizeof(*dest), |
2211 | GFP_KERNEL); |
2212 | if (!gen_dest) { |
2213 | handle = ERR_PTR(error: -ENOMEM); |
2214 | goto unlock; |
2215 | } |
2216 | for (i = 0; i < num_dest; i++) |
2217 | gen_dest[i] = dest[i]; |
2218 | gen_dest[i].type = |
2219 | MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; |
2220 | gen_dest[i].ft = next_ft; |
2221 | dest = gen_dest; |
2222 | num_dest++; |
2223 | flow_act->action &= ~(MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO | |
2224 | MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS); |
2225 | flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; |
2226 | handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, dest_num: num_dest); |
2227 | if (IS_ERR(ptr: handle)) |
2228 | goto unlock; |
2229 | |
2230 | if (list_empty(head: &handle->rule[num_dest - 1]->next_ft)) { |
2231 | mutex_lock(&next_ft->lock); |
2232 | list_add(new: &handle->rule[num_dest - 1]->next_ft, |
2233 | head: &next_ft->fwd_rules); |
2234 | mutex_unlock(lock: &next_ft->lock); |
2235 | handle->rule[num_dest - 1]->sw_action = sw_action; |
2236 | handle->rule[num_dest - 1]->ft = ft; |
2237 | } |
2238 | unlock: |
2239 | mutex_unlock(lock: &root->chain_lock); |
2240 | kfree(objp: gen_dest); |
2241 | return handle; |
2242 | } |
2243 | EXPORT_SYMBOL(mlx5_add_flow_rules); |
2244 | |
2245 | void mlx5_del_flow_rules(struct mlx5_flow_handle *handle) |
2246 | { |
2247 | struct fs_fte *fte; |
2248 | int i; |
2249 | |
2250 | /* In order to consolidate the HW changes we lock the FTE for other |
2251 | * changes, and increase its refcount, in order not to perform the |
2252 | * "del" functions of the FTE. Will handle them here. |
2253 | * The removal of the rules is done under locked FTE. |
2254 | * After removing all the handle's rules, if there are remaining |
2255 | * rules, it means we just need to modify the FTE in FW, and |
2256 | * unlock/decrease the refcount we increased before. |
2257 | * Otherwise, it means the FTE should be deleted. First delete the |
2258 | * FTE in FW. Then, unlock the FTE, and proceed the tree_put_node of |
2259 | * the FTE, which will handle the last decrease of the refcount, as |
2260 | * well as required handling of its parent. |
2261 | */ |
2262 | fs_get_obj(fte, handle->rule[0]->node.parent); |
2263 | down_write_ref_node(node: &fte->node, locked: false); |
2264 | for (i = handle->num_rules - 1; i >= 0; i--) |
2265 | tree_remove_node(node: &handle->rule[i]->node, locked: true); |
2266 | if (list_empty(head: &fte->node.children)) { |
2267 | fte->node.del_hw_func(&fte->node); |
2268 | /* Avoid double call to del_hw_fte */ |
2269 | fte->node.del_hw_func = NULL; |
2270 | up_write_ref_node(node: &fte->node, locked: false); |
2271 | tree_put_node(node: &fte->node, locked: false); |
2272 | } else if (fte->dests_size) { |
2273 | if (fte->modify_mask) |
2274 | modify_fte(fte); |
2275 | up_write_ref_node(node: &fte->node, locked: false); |
2276 | } else { |
2277 | up_write_ref_node(node: &fte->node, locked: false); |
2278 | } |
2279 | kfree(objp: handle); |
2280 | } |
2281 | EXPORT_SYMBOL(mlx5_del_flow_rules); |
2282 | |
2283 | /* Assuming prio->node.children(flow tables) is sorted by level */ |
2284 | static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft) |
2285 | { |
2286 | struct fs_node *prio_parent, *child; |
2287 | struct fs_prio *prio; |
2288 | |
2289 | fs_get_obj(prio, ft->node.parent); |
2290 | |
2291 | if (!list_is_last(list: &ft->node.list, head: &prio->node.children)) |
2292 | return list_next_entry(ft, node.list); |
2293 | |
2294 | prio_parent = find_prio_chains_parent(parent: &prio->node, child: &child); |
2295 | |
2296 | if (prio_parent && list_is_first(list: &child->list, head: &prio_parent->children)) |
2297 | return find_closest_ft(node: &prio->node, reverse: false, skip: false); |
2298 | |
2299 | return find_next_chained_ft(node: &prio->node); |
2300 | } |
2301 | |
2302 | static int update_root_ft_destroy(struct mlx5_flow_table *ft) |
2303 | { |
2304 | struct mlx5_flow_root_namespace *root = find_root(node: &ft->node); |
2305 | struct mlx5_ft_underlay_qp *uqp; |
2306 | struct mlx5_flow_table *new_root_ft = NULL; |
2307 | int err = 0; |
2308 | u32 qpn; |
2309 | |
2310 | if (root->root_ft != ft) |
2311 | return 0; |
2312 | |
2313 | new_root_ft = find_next_ft(ft); |
2314 | if (!new_root_ft) { |
2315 | root->root_ft = NULL; |
2316 | return 0; |
2317 | } |
2318 | |
2319 | if (list_empty(head: &root->underlay_qpns)) { |
2320 | /* Don't set any QPN (zero) in case QPN list is empty */ |
2321 | qpn = 0; |
2322 | err = root->cmds->update_root_ft(root, new_root_ft, |
2323 | qpn, false); |
2324 | } else { |
2325 | list_for_each_entry(uqp, &root->underlay_qpns, list) { |
2326 | qpn = uqp->qpn; |
2327 | err = root->cmds->update_root_ft(root, |
2328 | new_root_ft, qpn, |
2329 | false); |
2330 | if (err) |
2331 | break; |
2332 | } |
2333 | } |
2334 | |
2335 | if (err) |
2336 | mlx5_core_warn(root->dev, |
2337 | "Update root flow table of id(%u) qpn(%d) failed\n" , |
2338 | ft->id, qpn); |
2339 | else |
2340 | root->root_ft = new_root_ft; |
2341 | |
2342 | return 0; |
2343 | } |
2344 | |
2345 | /* Connect flow table from previous priority to |
2346 | * the next flow table. |
2347 | */ |
2348 | static int disconnect_flow_table(struct mlx5_flow_table *ft) |
2349 | { |
2350 | struct mlx5_core_dev *dev = get_dev(node: &ft->node); |
2351 | struct mlx5_flow_table *next_ft; |
2352 | struct fs_prio *prio; |
2353 | int err = 0; |
2354 | |
2355 | err = update_root_ft_destroy(ft); |
2356 | if (err) |
2357 | return err; |
2358 | |
2359 | fs_get_obj(prio, ft->node.parent); |
2360 | if (!(list_first_entry(&prio->node.children, |
2361 | struct mlx5_flow_table, |
2362 | node.list) == ft)) |
2363 | return 0; |
2364 | |
2365 | next_ft = find_next_ft(ft); |
2366 | err = connect_fwd_rules(dev, new_next_ft: next_ft, old_next_ft: ft); |
2367 | if (err) |
2368 | return err; |
2369 | |
2370 | err = connect_prev_fts(dev, ft: next_ft, prio); |
2371 | if (err) |
2372 | mlx5_core_warn(dev, "Failed to disconnect flow table %d\n" , |
2373 | ft->id); |
2374 | return err; |
2375 | } |
2376 | |
2377 | int mlx5_destroy_flow_table(struct mlx5_flow_table *ft) |
2378 | { |
2379 | struct mlx5_flow_root_namespace *root = find_root(node: &ft->node); |
2380 | int err = 0; |
2381 | |
2382 | mutex_lock(&root->chain_lock); |
2383 | if (!(ft->flags & MLX5_FLOW_TABLE_UNMANAGED)) |
2384 | err = disconnect_flow_table(ft); |
2385 | if (err) { |
2386 | mutex_unlock(lock: &root->chain_lock); |
2387 | return err; |
2388 | } |
2389 | if (tree_remove_node(node: &ft->node, locked: false)) |
2390 | mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n" , |
2391 | ft->id); |
2392 | mutex_unlock(lock: &root->chain_lock); |
2393 | |
2394 | return err; |
2395 | } |
2396 | EXPORT_SYMBOL(mlx5_destroy_flow_table); |
2397 | |
2398 | void mlx5_destroy_flow_group(struct mlx5_flow_group *fg) |
2399 | { |
2400 | if (tree_remove_node(node: &fg->node, locked: false)) |
2401 | mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n" , |
2402 | fg->id); |
2403 | } |
2404 | EXPORT_SYMBOL(mlx5_destroy_flow_group); |
2405 | |
2406 | struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev, |
2407 | int n) |
2408 | { |
2409 | struct mlx5_flow_steering *steering = dev->priv.steering; |
2410 | |
2411 | if (!steering || !steering->fdb_sub_ns) |
2412 | return NULL; |
2413 | |
2414 | return steering->fdb_sub_ns[n]; |
2415 | } |
2416 | EXPORT_SYMBOL(mlx5_get_fdb_sub_ns); |
2417 | |
2418 | static bool is_nic_rx_ns(enum mlx5_flow_namespace_type type) |
2419 | { |
2420 | switch (type) { |
2421 | case MLX5_FLOW_NAMESPACE_BYPASS: |
2422 | case MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC: |
2423 | case MLX5_FLOW_NAMESPACE_LAG: |
2424 | case MLX5_FLOW_NAMESPACE_OFFLOADS: |
2425 | case MLX5_FLOW_NAMESPACE_ETHTOOL: |
2426 | case MLX5_FLOW_NAMESPACE_KERNEL: |
2427 | case MLX5_FLOW_NAMESPACE_LEFTOVERS: |
2428 | case MLX5_FLOW_NAMESPACE_ANCHOR: |
2429 | return true; |
2430 | default: |
2431 | return false; |
2432 | } |
2433 | } |
2434 | |
2435 | struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, |
2436 | enum mlx5_flow_namespace_type type) |
2437 | { |
2438 | struct mlx5_flow_steering *steering = dev->priv.steering; |
2439 | struct mlx5_flow_root_namespace *root_ns; |
2440 | int prio = 0; |
2441 | struct fs_prio *fs_prio; |
2442 | struct mlx5_flow_namespace *ns; |
2443 | |
2444 | if (!steering) |
2445 | return NULL; |
2446 | |
2447 | switch (type) { |
2448 | case MLX5_FLOW_NAMESPACE_FDB: |
2449 | if (steering->fdb_root_ns) |
2450 | return &steering->fdb_root_ns->ns; |
2451 | return NULL; |
2452 | case MLX5_FLOW_NAMESPACE_PORT_SEL: |
2453 | if (steering->port_sel_root_ns) |
2454 | return &steering->port_sel_root_ns->ns; |
2455 | return NULL; |
2456 | case MLX5_FLOW_NAMESPACE_SNIFFER_RX: |
2457 | if (steering->sniffer_rx_root_ns) |
2458 | return &steering->sniffer_rx_root_ns->ns; |
2459 | return NULL; |
2460 | case MLX5_FLOW_NAMESPACE_SNIFFER_TX: |
2461 | if (steering->sniffer_tx_root_ns) |
2462 | return &steering->sniffer_tx_root_ns->ns; |
2463 | return NULL; |
2464 | case MLX5_FLOW_NAMESPACE_FDB_BYPASS: |
2465 | root_ns = steering->fdb_root_ns; |
2466 | prio = FDB_BYPASS_PATH; |
2467 | break; |
2468 | case MLX5_FLOW_NAMESPACE_EGRESS: |
2469 | case MLX5_FLOW_NAMESPACE_EGRESS_IPSEC: |
2470 | case MLX5_FLOW_NAMESPACE_EGRESS_MACSEC: |
2471 | root_ns = steering->egress_root_ns; |
2472 | prio = type - MLX5_FLOW_NAMESPACE_EGRESS; |
2473 | break; |
2474 | case MLX5_FLOW_NAMESPACE_RDMA_RX: |
2475 | root_ns = steering->rdma_rx_root_ns; |
2476 | prio = RDMA_RX_BYPASS_PRIO; |
2477 | break; |
2478 | case MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL: |
2479 | root_ns = steering->rdma_rx_root_ns; |
2480 | prio = RDMA_RX_KERNEL_PRIO; |
2481 | break; |
2482 | case MLX5_FLOW_NAMESPACE_RDMA_TX: |
2483 | root_ns = steering->rdma_tx_root_ns; |
2484 | break; |
2485 | case MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS: |
2486 | root_ns = steering->rdma_rx_root_ns; |
2487 | prio = RDMA_RX_COUNTERS_PRIO; |
2488 | break; |
2489 | case MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS: |
2490 | root_ns = steering->rdma_tx_root_ns; |
2491 | prio = RDMA_TX_COUNTERS_PRIO; |
2492 | break; |
2493 | case MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC: |
2494 | root_ns = steering->rdma_rx_root_ns; |
2495 | prio = RDMA_RX_IPSEC_PRIO; |
2496 | break; |
2497 | case MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC: |
2498 | root_ns = steering->rdma_tx_root_ns; |
2499 | prio = RDMA_TX_IPSEC_PRIO; |
2500 | break; |
2501 | case MLX5_FLOW_NAMESPACE_RDMA_RX_MACSEC: |
2502 | root_ns = steering->rdma_rx_root_ns; |
2503 | prio = RDMA_RX_MACSEC_PRIO; |
2504 | break; |
2505 | case MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC: |
2506 | root_ns = steering->rdma_tx_root_ns; |
2507 | prio = RDMA_TX_MACSEC_PRIO; |
2508 | break; |
2509 | default: /* Must be NIC RX */ |
2510 | WARN_ON(!is_nic_rx_ns(type)); |
2511 | root_ns = steering->root_ns; |
2512 | prio = type; |
2513 | break; |
2514 | } |
2515 | |
2516 | if (!root_ns) |
2517 | return NULL; |
2518 | |
2519 | fs_prio = find_prio(ns: &root_ns->ns, prio); |
2520 | if (!fs_prio) |
2521 | return NULL; |
2522 | |
2523 | ns = list_first_entry(&fs_prio->node.children, |
2524 | typeof(*ns), |
2525 | node.list); |
2526 | |
2527 | return ns; |
2528 | } |
2529 | EXPORT_SYMBOL(mlx5_get_flow_namespace); |
2530 | |
2531 | struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev, |
2532 | enum mlx5_flow_namespace_type type, |
2533 | int vport) |
2534 | { |
2535 | struct mlx5_flow_steering *steering = dev->priv.steering; |
2536 | |
2537 | if (!steering) |
2538 | return NULL; |
2539 | |
2540 | switch (type) { |
2541 | case MLX5_FLOW_NAMESPACE_ESW_EGRESS: |
2542 | if (vport >= steering->esw_egress_acl_vports) |
2543 | return NULL; |
2544 | if (steering->esw_egress_root_ns && |
2545 | steering->esw_egress_root_ns[vport]) |
2546 | return &steering->esw_egress_root_ns[vport]->ns; |
2547 | else |
2548 | return NULL; |
2549 | case MLX5_FLOW_NAMESPACE_ESW_INGRESS: |
2550 | if (vport >= steering->esw_ingress_acl_vports) |
2551 | return NULL; |
2552 | if (steering->esw_ingress_root_ns && |
2553 | steering->esw_ingress_root_ns[vport]) |
2554 | return &steering->esw_ingress_root_ns[vport]->ns; |
2555 | else |
2556 | return NULL; |
2557 | default: |
2558 | return NULL; |
2559 | } |
2560 | } |
2561 | |
2562 | static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns, |
2563 | unsigned int prio, |
2564 | int num_levels, |
2565 | enum fs_node_type type) |
2566 | { |
2567 | struct fs_prio *fs_prio; |
2568 | |
2569 | fs_prio = kzalloc(size: sizeof(*fs_prio), GFP_KERNEL); |
2570 | if (!fs_prio) |
2571 | return ERR_PTR(error: -ENOMEM); |
2572 | |
2573 | fs_prio->node.type = type; |
2574 | tree_init_node(node: &fs_prio->node, NULL, del_sw_func: del_sw_prio); |
2575 | tree_add_node(node: &fs_prio->node, parent: &ns->node); |
2576 | fs_prio->num_levels = num_levels; |
2577 | fs_prio->prio = prio; |
2578 | list_add_tail(new: &fs_prio->node.list, head: &ns->node.children); |
2579 | |
2580 | return fs_prio; |
2581 | } |
2582 | |
2583 | static struct fs_prio *fs_create_prio_chained(struct mlx5_flow_namespace *ns, |
2584 | unsigned int prio, |
2585 | int num_levels) |
2586 | { |
2587 | return _fs_create_prio(ns, prio, num_levels, type: FS_TYPE_PRIO_CHAINS); |
2588 | } |
2589 | |
2590 | static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns, |
2591 | unsigned int prio, int num_levels) |
2592 | { |
2593 | return _fs_create_prio(ns, prio, num_levels, type: FS_TYPE_PRIO); |
2594 | } |
2595 | |
2596 | static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace |
2597 | *ns) |
2598 | { |
2599 | ns->node.type = FS_TYPE_NAMESPACE; |
2600 | |
2601 | return ns; |
2602 | } |
2603 | |
2604 | static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio, |
2605 | int def_miss_act) |
2606 | { |
2607 | struct mlx5_flow_namespace *ns; |
2608 | |
2609 | ns = kzalloc(size: sizeof(*ns), GFP_KERNEL); |
2610 | if (!ns) |
2611 | return ERR_PTR(error: -ENOMEM); |
2612 | |
2613 | fs_init_namespace(ns); |
2614 | ns->def_miss_action = def_miss_act; |
2615 | tree_init_node(node: &ns->node, NULL, del_sw_func: del_sw_ns); |
2616 | tree_add_node(node: &ns->node, parent: &prio->node); |
2617 | list_add_tail(new: &ns->node.list, head: &prio->node.children); |
2618 | |
2619 | return ns; |
2620 | } |
2621 | |
2622 | static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio, |
2623 | struct init_tree_node *prio_metadata) |
2624 | { |
2625 | struct fs_prio *fs_prio; |
2626 | int i; |
2627 | |
2628 | for (i = 0; i < prio_metadata->num_leaf_prios; i++) { |
2629 | fs_prio = fs_create_prio(ns, prio: prio++, num_levels: prio_metadata->num_levels); |
2630 | if (IS_ERR(ptr: fs_prio)) |
2631 | return PTR_ERR(ptr: fs_prio); |
2632 | } |
2633 | return 0; |
2634 | } |
2635 | |
2636 | #define FLOW_TABLE_BIT_SZ 1 |
2637 | #define GET_FLOW_TABLE_CAP(dev, offset) \ |
2638 | ((be32_to_cpu(*((__be32 *)(dev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur) + \ |
2639 | offset / 32)) >> \ |
2640 | (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ) |
2641 | static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps) |
2642 | { |
2643 | int i; |
2644 | |
2645 | for (i = 0; i < caps->arr_sz; i++) { |
2646 | if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i])) |
2647 | return false; |
2648 | } |
2649 | return true; |
2650 | } |
2651 | |
2652 | static int init_root_tree_recursive(struct mlx5_flow_steering *steering, |
2653 | struct init_tree_node *init_node, |
2654 | struct fs_node *fs_parent_node, |
2655 | struct init_tree_node *init_parent_node, |
2656 | int prio) |
2657 | { |
2658 | int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev, |
2659 | flow_table_properties_nic_receive. |
2660 | max_ft_level); |
2661 | struct mlx5_flow_namespace *fs_ns; |
2662 | struct fs_prio *fs_prio; |
2663 | struct fs_node *base; |
2664 | int i; |
2665 | int err; |
2666 | |
2667 | if (init_node->type == FS_TYPE_PRIO) { |
2668 | if ((init_node->min_ft_level > max_ft_level) || |
2669 | !has_required_caps(dev: steering->dev, caps: &init_node->caps)) |
2670 | return 0; |
2671 | |
2672 | fs_get_obj(fs_ns, fs_parent_node); |
2673 | if (init_node->num_leaf_prios) |
2674 | return create_leaf_prios(ns: fs_ns, prio, prio_metadata: init_node); |
2675 | fs_prio = fs_create_prio(ns: fs_ns, prio, num_levels: init_node->num_levels); |
2676 | if (IS_ERR(ptr: fs_prio)) |
2677 | return PTR_ERR(ptr: fs_prio); |
2678 | base = &fs_prio->node; |
2679 | } else if (init_node->type == FS_TYPE_NAMESPACE) { |
2680 | fs_get_obj(fs_prio, fs_parent_node); |
2681 | fs_ns = fs_create_namespace(prio: fs_prio, def_miss_act: init_node->def_miss_action); |
2682 | if (IS_ERR(ptr: fs_ns)) |
2683 | return PTR_ERR(ptr: fs_ns); |
2684 | base = &fs_ns->node; |
2685 | } else { |
2686 | return -EINVAL; |
2687 | } |
2688 | prio = 0; |
2689 | for (i = 0; i < init_node->ar_size; i++) { |
2690 | err = init_root_tree_recursive(steering, init_node: &init_node->children[i], |
2691 | fs_parent_node: base, init_parent_node: init_node, prio); |
2692 | if (err) |
2693 | return err; |
2694 | if (init_node->children[i].type == FS_TYPE_PRIO && |
2695 | init_node->children[i].num_leaf_prios) { |
2696 | prio += init_node->children[i].num_leaf_prios; |
2697 | } |
2698 | } |
2699 | |
2700 | return 0; |
2701 | } |
2702 | |
2703 | static int init_root_tree(struct mlx5_flow_steering *steering, |
2704 | struct init_tree_node *init_node, |
2705 | struct fs_node *fs_parent_node) |
2706 | { |
2707 | int err; |
2708 | int i; |
2709 | |
2710 | for (i = 0; i < init_node->ar_size; i++) { |
2711 | err = init_root_tree_recursive(steering, init_node: &init_node->children[i], |
2712 | fs_parent_node, |
2713 | init_parent_node: init_node, prio: i); |
2714 | if (err) |
2715 | return err; |
2716 | } |
2717 | return 0; |
2718 | } |
2719 | |
2720 | static void del_sw_root_ns(struct fs_node *node) |
2721 | { |
2722 | struct mlx5_flow_root_namespace *root_ns; |
2723 | struct mlx5_flow_namespace *ns; |
2724 | |
2725 | fs_get_obj(ns, node); |
2726 | root_ns = container_of(ns, struct mlx5_flow_root_namespace, ns); |
2727 | mutex_destroy(lock: &root_ns->chain_lock); |
2728 | kfree(objp: node); |
2729 | } |
2730 | |
2731 | static struct mlx5_flow_root_namespace |
2732 | *create_root_ns(struct mlx5_flow_steering *steering, |
2733 | enum fs_flow_table_type table_type) |
2734 | { |
2735 | const struct mlx5_flow_cmds *cmds = mlx5_fs_cmd_get_default(type: table_type); |
2736 | struct mlx5_flow_root_namespace *root_ns; |
2737 | struct mlx5_flow_namespace *ns; |
2738 | |
2739 | /* Create the root namespace */ |
2740 | root_ns = kzalloc(size: sizeof(*root_ns), GFP_KERNEL); |
2741 | if (!root_ns) |
2742 | return NULL; |
2743 | |
2744 | root_ns->dev = steering->dev; |
2745 | root_ns->table_type = table_type; |
2746 | root_ns->cmds = cmds; |
2747 | |
2748 | INIT_LIST_HEAD(list: &root_ns->underlay_qpns); |
2749 | |
2750 | ns = &root_ns->ns; |
2751 | fs_init_namespace(ns); |
2752 | mutex_init(&root_ns->chain_lock); |
2753 | tree_init_node(node: &ns->node, NULL, del_sw_func: del_sw_root_ns); |
2754 | tree_add_node(node: &ns->node, NULL); |
2755 | |
2756 | return root_ns; |
2757 | } |
2758 | |
2759 | static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level); |
2760 | |
2761 | static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level) |
2762 | { |
2763 | struct fs_prio *prio; |
2764 | |
2765 | fs_for_each_prio(prio, ns) { |
2766 | /* This updates prio start_level and num_levels */ |
2767 | set_prio_attrs_in_prio(prio, acc_level); |
2768 | acc_level += prio->num_levels; |
2769 | } |
2770 | return acc_level; |
2771 | } |
2772 | |
2773 | static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level) |
2774 | { |
2775 | struct mlx5_flow_namespace *ns; |
2776 | int acc_level_ns = acc_level; |
2777 | |
2778 | prio->start_level = acc_level; |
2779 | fs_for_each_ns(ns, prio) { |
2780 | /* This updates start_level and num_levels of ns's priority descendants */ |
2781 | acc_level_ns = set_prio_attrs_in_ns(ns, acc_level); |
2782 | |
2783 | /* If this a prio with chains, and we can jump from one chain |
2784 | * (namespace) to another, so we accumulate the levels |
2785 | */ |
2786 | if (prio->node.type == FS_TYPE_PRIO_CHAINS) |
2787 | acc_level = acc_level_ns; |
2788 | } |
2789 | |
2790 | if (!prio->num_levels) |
2791 | prio->num_levels = acc_level_ns - prio->start_level; |
2792 | WARN_ON(prio->num_levels < acc_level_ns - prio->start_level); |
2793 | } |
2794 | |
2795 | static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns) |
2796 | { |
2797 | struct mlx5_flow_namespace *ns = &root_ns->ns; |
2798 | struct fs_prio *prio; |
2799 | int start_level = 0; |
2800 | |
2801 | fs_for_each_prio(prio, ns) { |
2802 | set_prio_attrs_in_prio(prio, acc_level: start_level); |
2803 | start_level += prio->num_levels; |
2804 | } |
2805 | } |
2806 | |
2807 | #define ANCHOR_PRIO 0 |
2808 | #define ANCHOR_SIZE 1 |
2809 | #define ANCHOR_LEVEL 0 |
2810 | static int create_anchor_flow_table(struct mlx5_flow_steering *steering) |
2811 | { |
2812 | struct mlx5_flow_namespace *ns = NULL; |
2813 | struct mlx5_flow_table_attr ft_attr = {}; |
2814 | struct mlx5_flow_table *ft; |
2815 | |
2816 | ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR); |
2817 | if (WARN_ON(!ns)) |
2818 | return -EINVAL; |
2819 | |
2820 | ft_attr.max_fte = ANCHOR_SIZE; |
2821 | ft_attr.level = ANCHOR_LEVEL; |
2822 | ft_attr.prio = ANCHOR_PRIO; |
2823 | |
2824 | ft = mlx5_create_flow_table(ns, &ft_attr); |
2825 | if (IS_ERR(ptr: ft)) { |
2826 | mlx5_core_err(steering->dev, "Failed to create last anchor flow table" ); |
2827 | return PTR_ERR(ptr: ft); |
2828 | } |
2829 | return 0; |
2830 | } |
2831 | |
2832 | static int init_root_ns(struct mlx5_flow_steering *steering) |
2833 | { |
2834 | int err; |
2835 | |
2836 | steering->root_ns = create_root_ns(steering, table_type: FS_FT_NIC_RX); |
2837 | if (!steering->root_ns) |
2838 | return -ENOMEM; |
2839 | |
2840 | err = init_root_tree(steering, init_node: &root_fs, fs_parent_node: &steering->root_ns->ns.node); |
2841 | if (err) |
2842 | goto out_err; |
2843 | |
2844 | set_prio_attrs(steering->root_ns); |
2845 | err = create_anchor_flow_table(steering); |
2846 | if (err) |
2847 | goto out_err; |
2848 | |
2849 | return 0; |
2850 | |
2851 | out_err: |
2852 | cleanup_root_ns(root_ns: steering->root_ns); |
2853 | steering->root_ns = NULL; |
2854 | return err; |
2855 | } |
2856 | |
2857 | static void clean_tree(struct fs_node *node) |
2858 | { |
2859 | if (node) { |
2860 | struct fs_node *iter; |
2861 | struct fs_node *temp; |
2862 | |
2863 | tree_get_node(node); |
2864 | list_for_each_entry_safe(iter, temp, &node->children, list) |
2865 | clean_tree(node: iter); |
2866 | tree_put_node(node, locked: false); |
2867 | tree_remove_node(node, locked: false); |
2868 | } |
2869 | } |
2870 | |
2871 | static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns) |
2872 | { |
2873 | if (!root_ns) |
2874 | return; |
2875 | |
2876 | clean_tree(node: &root_ns->ns.node); |
2877 | } |
2878 | |
2879 | static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering) |
2880 | { |
2881 | struct fs_prio *prio; |
2882 | |
2883 | steering->sniffer_tx_root_ns = create_root_ns(steering, table_type: FS_FT_SNIFFER_TX); |
2884 | if (!steering->sniffer_tx_root_ns) |
2885 | return -ENOMEM; |
2886 | |
2887 | /* Create single prio */ |
2888 | prio = fs_create_prio(ns: &steering->sniffer_tx_root_ns->ns, prio: 0, num_levels: 1); |
2889 | return PTR_ERR_OR_ZERO(ptr: prio); |
2890 | } |
2891 | |
2892 | static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering) |
2893 | { |
2894 | struct fs_prio *prio; |
2895 | |
2896 | steering->sniffer_rx_root_ns = create_root_ns(steering, table_type: FS_FT_SNIFFER_RX); |
2897 | if (!steering->sniffer_rx_root_ns) |
2898 | return -ENOMEM; |
2899 | |
2900 | /* Create single prio */ |
2901 | prio = fs_create_prio(ns: &steering->sniffer_rx_root_ns->ns, prio: 0, num_levels: 1); |
2902 | return PTR_ERR_OR_ZERO(ptr: prio); |
2903 | } |
2904 | |
2905 | #define PORT_SEL_NUM_LEVELS 3 |
2906 | static int init_port_sel_root_ns(struct mlx5_flow_steering *steering) |
2907 | { |
2908 | struct fs_prio *prio; |
2909 | |
2910 | steering->port_sel_root_ns = create_root_ns(steering, table_type: FS_FT_PORT_SEL); |
2911 | if (!steering->port_sel_root_ns) |
2912 | return -ENOMEM; |
2913 | |
2914 | /* Create single prio */ |
2915 | prio = fs_create_prio(ns: &steering->port_sel_root_ns->ns, prio: 0, |
2916 | PORT_SEL_NUM_LEVELS); |
2917 | return PTR_ERR_OR_ZERO(ptr: prio); |
2918 | } |
2919 | |
2920 | static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering) |
2921 | { |
2922 | int err; |
2923 | |
2924 | steering->rdma_rx_root_ns = create_root_ns(steering, table_type: FS_FT_RDMA_RX); |
2925 | if (!steering->rdma_rx_root_ns) |
2926 | return -ENOMEM; |
2927 | |
2928 | err = init_root_tree(steering, init_node: &rdma_rx_root_fs, |
2929 | fs_parent_node: &steering->rdma_rx_root_ns->ns.node); |
2930 | if (err) |
2931 | goto out_err; |
2932 | |
2933 | set_prio_attrs(steering->rdma_rx_root_ns); |
2934 | |
2935 | return 0; |
2936 | |
2937 | out_err: |
2938 | cleanup_root_ns(root_ns: steering->rdma_rx_root_ns); |
2939 | steering->rdma_rx_root_ns = NULL; |
2940 | return err; |
2941 | } |
2942 | |
2943 | static int init_rdma_tx_root_ns(struct mlx5_flow_steering *steering) |
2944 | { |
2945 | int err; |
2946 | |
2947 | steering->rdma_tx_root_ns = create_root_ns(steering, table_type: FS_FT_RDMA_TX); |
2948 | if (!steering->rdma_tx_root_ns) |
2949 | return -ENOMEM; |
2950 | |
2951 | err = init_root_tree(steering, init_node: &rdma_tx_root_fs, |
2952 | fs_parent_node: &steering->rdma_tx_root_ns->ns.node); |
2953 | if (err) |
2954 | goto out_err; |
2955 | |
2956 | set_prio_attrs(steering->rdma_tx_root_ns); |
2957 | |
2958 | return 0; |
2959 | |
2960 | out_err: |
2961 | cleanup_root_ns(root_ns: steering->rdma_tx_root_ns); |
2962 | steering->rdma_tx_root_ns = NULL; |
2963 | return err; |
2964 | } |
2965 | |
2966 | /* FT and tc chains are stored in the same array so we can re-use the |
2967 | * mlx5_get_fdb_sub_ns() and tc api for FT chains. |
2968 | * When creating a new ns for each chain store it in the first available slot. |
2969 | * Assume tc chains are created and stored first and only then the FT chain. |
2970 | */ |
2971 | static void store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering, |
2972 | struct mlx5_flow_namespace *ns) |
2973 | { |
2974 | int chain = 0; |
2975 | |
2976 | while (steering->fdb_sub_ns[chain]) |
2977 | ++chain; |
2978 | |
2979 | steering->fdb_sub_ns[chain] = ns; |
2980 | } |
2981 | |
2982 | static int create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering, |
2983 | struct fs_prio *maj_prio) |
2984 | { |
2985 | struct mlx5_flow_namespace *ns; |
2986 | struct fs_prio *min_prio; |
2987 | int prio; |
2988 | |
2989 | ns = fs_create_namespace(prio: maj_prio, def_miss_act: MLX5_FLOW_TABLE_MISS_ACTION_DEF); |
2990 | if (IS_ERR(ptr: ns)) |
2991 | return PTR_ERR(ptr: ns); |
2992 | |
2993 | for (prio = 0; prio < FDB_TC_MAX_PRIO; prio++) { |
2994 | min_prio = fs_create_prio(ns, prio, FDB_TC_LEVELS_PER_PRIO); |
2995 | if (IS_ERR(ptr: min_prio)) |
2996 | return PTR_ERR(ptr: min_prio); |
2997 | } |
2998 | |
2999 | store_fdb_sub_ns_prio_chain(steering, ns); |
3000 | |
3001 | return 0; |
3002 | } |
3003 | |
3004 | static int create_fdb_chains(struct mlx5_flow_steering *steering, |
3005 | int fs_prio, |
3006 | int chains) |
3007 | { |
3008 | struct fs_prio *maj_prio; |
3009 | int levels; |
3010 | int chain; |
3011 | int err; |
3012 | |
3013 | levels = FDB_TC_LEVELS_PER_PRIO * FDB_TC_MAX_PRIO * chains; |
3014 | maj_prio = fs_create_prio_chained(ns: &steering->fdb_root_ns->ns, |
3015 | prio: fs_prio, |
3016 | num_levels: levels); |
3017 | if (IS_ERR(ptr: maj_prio)) |
3018 | return PTR_ERR(ptr: maj_prio); |
3019 | |
3020 | for (chain = 0; chain < chains; chain++) { |
3021 | err = create_fdb_sub_ns_prio_chain(steering, maj_prio); |
3022 | if (err) |
3023 | return err; |
3024 | } |
3025 | |
3026 | return 0; |
3027 | } |
3028 | |
3029 | static int create_fdb_fast_path(struct mlx5_flow_steering *steering) |
3030 | { |
3031 | int err; |
3032 | |
3033 | steering->fdb_sub_ns = kcalloc(FDB_NUM_CHAINS, |
3034 | size: sizeof(*steering->fdb_sub_ns), |
3035 | GFP_KERNEL); |
3036 | if (!steering->fdb_sub_ns) |
3037 | return -ENOMEM; |
3038 | |
3039 | err = create_fdb_chains(steering, fs_prio: FDB_TC_OFFLOAD, FDB_TC_MAX_CHAIN + 1); |
3040 | if (err) |
3041 | return err; |
3042 | |
3043 | err = create_fdb_chains(steering, fs_prio: FDB_FT_OFFLOAD, chains: 1); |
3044 | if (err) |
3045 | return err; |
3046 | |
3047 | return 0; |
3048 | } |
3049 | |
3050 | static int create_fdb_bypass(struct mlx5_flow_steering *steering) |
3051 | { |
3052 | struct mlx5_flow_namespace *ns; |
3053 | struct fs_prio *prio; |
3054 | int i; |
3055 | |
3056 | prio = fs_create_prio(ns: &steering->fdb_root_ns->ns, prio: FDB_BYPASS_PATH, num_levels: 0); |
3057 | if (IS_ERR(ptr: prio)) |
3058 | return PTR_ERR(ptr: prio); |
3059 | |
3060 | ns = fs_create_namespace(prio, def_miss_act: MLX5_FLOW_TABLE_MISS_ACTION_DEF); |
3061 | if (IS_ERR(ptr: ns)) |
3062 | return PTR_ERR(ptr: ns); |
3063 | |
3064 | for (i = 0; i < MLX5_BY_PASS_NUM_REGULAR_PRIOS; i++) { |
3065 | prio = fs_create_prio(ns, prio: i, num_levels: 1); |
3066 | if (IS_ERR(ptr: prio)) |
3067 | return PTR_ERR(ptr: prio); |
3068 | } |
3069 | return 0; |
3070 | } |
3071 | |
3072 | static void cleanup_fdb_root_ns(struct mlx5_flow_steering *steering) |
3073 | { |
3074 | cleanup_root_ns(root_ns: steering->fdb_root_ns); |
3075 | steering->fdb_root_ns = NULL; |
3076 | kfree(objp: steering->fdb_sub_ns); |
3077 | steering->fdb_sub_ns = NULL; |
3078 | } |
3079 | |
3080 | static int init_fdb_root_ns(struct mlx5_flow_steering *steering) |
3081 | { |
3082 | struct fs_prio *maj_prio; |
3083 | int err; |
3084 | |
3085 | steering->fdb_root_ns = create_root_ns(steering, table_type: FS_FT_FDB); |
3086 | if (!steering->fdb_root_ns) |
3087 | return -ENOMEM; |
3088 | |
3089 | err = create_fdb_bypass(steering); |
3090 | if (err) |
3091 | goto out_err; |
3092 | |
3093 | maj_prio = fs_create_prio(ns: &steering->fdb_root_ns->ns, prio: FDB_CRYPTO_INGRESS, num_levels: 3); |
3094 | if (IS_ERR(ptr: maj_prio)) { |
3095 | err = PTR_ERR(ptr: maj_prio); |
3096 | goto out_err; |
3097 | } |
3098 | |
3099 | err = create_fdb_fast_path(steering); |
3100 | if (err) |
3101 | goto out_err; |
3102 | |
3103 | maj_prio = fs_create_prio(ns: &steering->fdb_root_ns->ns, prio: FDB_TC_MISS, num_levels: 1); |
3104 | if (IS_ERR(ptr: maj_prio)) { |
3105 | err = PTR_ERR(ptr: maj_prio); |
3106 | goto out_err; |
3107 | } |
3108 | |
3109 | maj_prio = fs_create_prio(ns: &steering->fdb_root_ns->ns, prio: FDB_BR_OFFLOAD, num_levels: 4); |
3110 | if (IS_ERR(ptr: maj_prio)) { |
3111 | err = PTR_ERR(ptr: maj_prio); |
3112 | goto out_err; |
3113 | } |
3114 | |
3115 | maj_prio = fs_create_prio(ns: &steering->fdb_root_ns->ns, prio: FDB_SLOW_PATH, num_levels: 1); |
3116 | if (IS_ERR(ptr: maj_prio)) { |
3117 | err = PTR_ERR(ptr: maj_prio); |
3118 | goto out_err; |
3119 | } |
3120 | |
3121 | maj_prio = fs_create_prio(ns: &steering->fdb_root_ns->ns, prio: FDB_CRYPTO_EGRESS, num_levels: 3); |
3122 | if (IS_ERR(ptr: maj_prio)) { |
3123 | err = PTR_ERR(ptr: maj_prio); |
3124 | goto out_err; |
3125 | } |
3126 | |
3127 | /* We put this priority last, knowing that nothing will get here |
3128 | * unless explicitly forwarded to. This is possible because the |
3129 | * slow path tables have catch all rules and nothing gets passed |
3130 | * those tables. |
3131 | */ |
3132 | maj_prio = fs_create_prio(ns: &steering->fdb_root_ns->ns, prio: FDB_PER_VPORT, num_levels: 1); |
3133 | if (IS_ERR(ptr: maj_prio)) { |
3134 | err = PTR_ERR(ptr: maj_prio); |
3135 | goto out_err; |
3136 | } |
3137 | |
3138 | set_prio_attrs(steering->fdb_root_ns); |
3139 | return 0; |
3140 | |
3141 | out_err: |
3142 | cleanup_fdb_root_ns(steering); |
3143 | return err; |
3144 | } |
3145 | |
3146 | static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport) |
3147 | { |
3148 | struct fs_prio *prio; |
3149 | |
3150 | steering->esw_egress_root_ns[vport] = create_root_ns(steering, table_type: FS_FT_ESW_EGRESS_ACL); |
3151 | if (!steering->esw_egress_root_ns[vport]) |
3152 | return -ENOMEM; |
3153 | |
3154 | /* create 1 prio*/ |
3155 | prio = fs_create_prio(ns: &steering->esw_egress_root_ns[vport]->ns, prio: 0, num_levels: 1); |
3156 | return PTR_ERR_OR_ZERO(ptr: prio); |
3157 | } |
3158 | |
3159 | static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport) |
3160 | { |
3161 | struct fs_prio *prio; |
3162 | |
3163 | steering->esw_ingress_root_ns[vport] = create_root_ns(steering, table_type: FS_FT_ESW_INGRESS_ACL); |
3164 | if (!steering->esw_ingress_root_ns[vport]) |
3165 | return -ENOMEM; |
3166 | |
3167 | /* create 1 prio*/ |
3168 | prio = fs_create_prio(ns: &steering->esw_ingress_root_ns[vport]->ns, prio: 0, num_levels: 1); |
3169 | return PTR_ERR_OR_ZERO(ptr: prio); |
3170 | } |
3171 | |
3172 | int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports) |
3173 | { |
3174 | struct mlx5_flow_steering *steering = dev->priv.steering; |
3175 | int err; |
3176 | int i; |
3177 | |
3178 | steering->esw_egress_root_ns = |
3179 | kcalloc(n: total_vports, |
3180 | size: sizeof(*steering->esw_egress_root_ns), |
3181 | GFP_KERNEL); |
3182 | if (!steering->esw_egress_root_ns) |
3183 | return -ENOMEM; |
3184 | |
3185 | for (i = 0; i < total_vports; i++) { |
3186 | err = init_egress_acl_root_ns(steering, vport: i); |
3187 | if (err) |
3188 | goto cleanup_root_ns; |
3189 | } |
3190 | steering->esw_egress_acl_vports = total_vports; |
3191 | return 0; |
3192 | |
3193 | cleanup_root_ns: |
3194 | for (i--; i >= 0; i--) |
3195 | cleanup_root_ns(root_ns: steering->esw_egress_root_ns[i]); |
3196 | kfree(objp: steering->esw_egress_root_ns); |
3197 | steering->esw_egress_root_ns = NULL; |
3198 | return err; |
3199 | } |
3200 | |
3201 | void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev) |
3202 | { |
3203 | struct mlx5_flow_steering *steering = dev->priv.steering; |
3204 | int i; |
3205 | |
3206 | if (!steering->esw_egress_root_ns) |
3207 | return; |
3208 | |
3209 | for (i = 0; i < steering->esw_egress_acl_vports; i++) |
3210 | cleanup_root_ns(root_ns: steering->esw_egress_root_ns[i]); |
3211 | |
3212 | kfree(objp: steering->esw_egress_root_ns); |
3213 | steering->esw_egress_root_ns = NULL; |
3214 | } |
3215 | |
3216 | int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports) |
3217 | { |
3218 | struct mlx5_flow_steering *steering = dev->priv.steering; |
3219 | int err; |
3220 | int i; |
3221 | |
3222 | steering->esw_ingress_root_ns = |
3223 | kcalloc(n: total_vports, |
3224 | size: sizeof(*steering->esw_ingress_root_ns), |
3225 | GFP_KERNEL); |
3226 | if (!steering->esw_ingress_root_ns) |
3227 | return -ENOMEM; |
3228 | |
3229 | for (i = 0; i < total_vports; i++) { |
3230 | err = init_ingress_acl_root_ns(steering, vport: i); |
3231 | if (err) |
3232 | goto cleanup_root_ns; |
3233 | } |
3234 | steering->esw_ingress_acl_vports = total_vports; |
3235 | return 0; |
3236 | |
3237 | cleanup_root_ns: |
3238 | for (i--; i >= 0; i--) |
3239 | cleanup_root_ns(root_ns: steering->esw_ingress_root_ns[i]); |
3240 | kfree(objp: steering->esw_ingress_root_ns); |
3241 | steering->esw_ingress_root_ns = NULL; |
3242 | return err; |
3243 | } |
3244 | |
3245 | void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev) |
3246 | { |
3247 | struct mlx5_flow_steering *steering = dev->priv.steering; |
3248 | int i; |
3249 | |
3250 | if (!steering->esw_ingress_root_ns) |
3251 | return; |
3252 | |
3253 | for (i = 0; i < steering->esw_ingress_acl_vports; i++) |
3254 | cleanup_root_ns(root_ns: steering->esw_ingress_root_ns[i]); |
3255 | |
3256 | kfree(objp: steering->esw_ingress_root_ns); |
3257 | steering->esw_ingress_root_ns = NULL; |
3258 | } |
3259 | |
3260 | u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type) |
3261 | { |
3262 | struct mlx5_flow_root_namespace *root; |
3263 | struct mlx5_flow_namespace *ns; |
3264 | |
3265 | ns = mlx5_get_flow_namespace(dev, type); |
3266 | if (!ns) |
3267 | return 0; |
3268 | |
3269 | root = find_root(node: &ns->node); |
3270 | if (!root) |
3271 | return 0; |
3272 | |
3273 | return root->cmds->get_capabilities(root, root->table_type); |
3274 | } |
3275 | |
3276 | static int init_egress_root_ns(struct mlx5_flow_steering *steering) |
3277 | { |
3278 | int err; |
3279 | |
3280 | steering->egress_root_ns = create_root_ns(steering, |
3281 | table_type: FS_FT_NIC_TX); |
3282 | if (!steering->egress_root_ns) |
3283 | return -ENOMEM; |
3284 | |
3285 | err = init_root_tree(steering, init_node: &egress_root_fs, |
3286 | fs_parent_node: &steering->egress_root_ns->ns.node); |
3287 | if (err) |
3288 | goto cleanup; |
3289 | set_prio_attrs(steering->egress_root_ns); |
3290 | return 0; |
3291 | cleanup: |
3292 | cleanup_root_ns(root_ns: steering->egress_root_ns); |
3293 | steering->egress_root_ns = NULL; |
3294 | return err; |
3295 | } |
3296 | |
3297 | static int mlx5_fs_mode_validate(struct devlink *devlink, u32 id, |
3298 | union devlink_param_value val, |
3299 | struct netlink_ext_ack *extack) |
3300 | { |
3301 | struct mlx5_core_dev *dev = devlink_priv(devlink); |
3302 | char *value = val.vstr; |
3303 | int err = 0; |
3304 | |
3305 | if (!strcmp(value, "dmfs" )) { |
3306 | return 0; |
3307 | } else if (!strcmp(value, "smfs" )) { |
3308 | u8 eswitch_mode; |
3309 | bool smfs_cap; |
3310 | |
3311 | eswitch_mode = mlx5_eswitch_mode(dev); |
3312 | smfs_cap = mlx5_fs_dr_is_supported(dev); |
3313 | |
3314 | if (!smfs_cap) { |
3315 | err = -EOPNOTSUPP; |
3316 | NL_SET_ERR_MSG_MOD(extack, |
3317 | "Software managed steering is not supported by current device" ); |
3318 | } |
3319 | |
3320 | else if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) { |
3321 | NL_SET_ERR_MSG_MOD(extack, |
3322 | "Software managed steering is not supported when eswitch offloads enabled." ); |
3323 | err = -EOPNOTSUPP; |
3324 | } |
3325 | } else { |
3326 | NL_SET_ERR_MSG_MOD(extack, |
3327 | "Bad parameter: supported values are [\"dmfs\", \"smfs\"]" ); |
3328 | err = -EINVAL; |
3329 | } |
3330 | |
3331 | return err; |
3332 | } |
3333 | |
3334 | static int mlx5_fs_mode_set(struct devlink *devlink, u32 id, |
3335 | struct devlink_param_gset_ctx *ctx) |
3336 | { |
3337 | struct mlx5_core_dev *dev = devlink_priv(devlink); |
3338 | enum mlx5_flow_steering_mode mode; |
3339 | |
3340 | if (!strcmp(ctx->val.vstr, "smfs" )) |
3341 | mode = MLX5_FLOW_STEERING_MODE_SMFS; |
3342 | else |
3343 | mode = MLX5_FLOW_STEERING_MODE_DMFS; |
3344 | dev->priv.steering->mode = mode; |
3345 | |
3346 | return 0; |
3347 | } |
3348 | |
3349 | static int mlx5_fs_mode_get(struct devlink *devlink, u32 id, |
3350 | struct devlink_param_gset_ctx *ctx) |
3351 | { |
3352 | struct mlx5_core_dev *dev = devlink_priv(devlink); |
3353 | |
3354 | if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS) |
3355 | strcpy(p: ctx->val.vstr, q: "smfs" ); |
3356 | else |
3357 | strcpy(p: ctx->val.vstr, q: "dmfs" ); |
3358 | return 0; |
3359 | } |
3360 | |
3361 | static const struct devlink_param mlx5_fs_params[] = { |
3362 | DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE, |
3363 | "flow_steering_mode" , DEVLINK_PARAM_TYPE_STRING, |
3364 | BIT(DEVLINK_PARAM_CMODE_RUNTIME), |
3365 | mlx5_fs_mode_get, mlx5_fs_mode_set, |
3366 | mlx5_fs_mode_validate), |
3367 | }; |
3368 | |
3369 | void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev) |
3370 | { |
3371 | struct mlx5_flow_steering *steering = dev->priv.steering; |
3372 | |
3373 | cleanup_root_ns(root_ns: steering->root_ns); |
3374 | cleanup_fdb_root_ns(steering); |
3375 | cleanup_root_ns(root_ns: steering->port_sel_root_ns); |
3376 | cleanup_root_ns(root_ns: steering->sniffer_rx_root_ns); |
3377 | cleanup_root_ns(root_ns: steering->sniffer_tx_root_ns); |
3378 | cleanup_root_ns(root_ns: steering->rdma_rx_root_ns); |
3379 | cleanup_root_ns(root_ns: steering->rdma_tx_root_ns); |
3380 | cleanup_root_ns(root_ns: steering->egress_root_ns); |
3381 | |
3382 | devl_params_unregister(devlink: priv_to_devlink(priv: dev), params: mlx5_fs_params, |
3383 | ARRAY_SIZE(mlx5_fs_params)); |
3384 | } |
3385 | |
3386 | int mlx5_fs_core_init(struct mlx5_core_dev *dev) |
3387 | { |
3388 | struct mlx5_flow_steering *steering = dev->priv.steering; |
3389 | int err; |
3390 | |
3391 | err = devl_params_register(devlink: priv_to_devlink(priv: dev), params: mlx5_fs_params, |
3392 | ARRAY_SIZE(mlx5_fs_params)); |
3393 | if (err) |
3394 | return err; |
3395 | |
3396 | if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && |
3397 | (MLX5_CAP_GEN(dev, nic_flow_table))) || |
3398 | ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) && |
3399 | MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) && |
3400 | MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) { |
3401 | err = init_root_ns(steering); |
3402 | if (err) |
3403 | goto err; |
3404 | } |
3405 | |
3406 | if (MLX5_ESWITCH_MANAGER(dev)) { |
3407 | if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) { |
3408 | err = init_fdb_root_ns(steering); |
3409 | if (err) |
3410 | goto err; |
3411 | } |
3412 | } |
3413 | |
3414 | if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) { |
3415 | err = init_sniffer_rx_root_ns(steering); |
3416 | if (err) |
3417 | goto err; |
3418 | } |
3419 | |
3420 | if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) { |
3421 | err = init_sniffer_tx_root_ns(steering); |
3422 | if (err) |
3423 | goto err; |
3424 | } |
3425 | |
3426 | if (MLX5_CAP_FLOWTABLE_PORT_SELECTION(dev, ft_support)) { |
3427 | err = init_port_sel_root_ns(steering); |
3428 | if (err) |
3429 | goto err; |
3430 | } |
3431 | |
3432 | if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) && |
3433 | MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) { |
3434 | err = init_rdma_rx_root_ns(steering); |
3435 | if (err) |
3436 | goto err; |
3437 | } |
3438 | |
3439 | if (MLX5_CAP_FLOWTABLE_RDMA_TX(dev, ft_support)) { |
3440 | err = init_rdma_tx_root_ns(steering); |
3441 | if (err) |
3442 | goto err; |
3443 | } |
3444 | |
3445 | if (MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) { |
3446 | err = init_egress_root_ns(steering); |
3447 | if (err) |
3448 | goto err; |
3449 | } |
3450 | |
3451 | return 0; |
3452 | |
3453 | err: |
3454 | mlx5_fs_core_cleanup(dev); |
3455 | return err; |
3456 | } |
3457 | |
3458 | void mlx5_fs_core_free(struct mlx5_core_dev *dev) |
3459 | { |
3460 | struct mlx5_flow_steering *steering = dev->priv.steering; |
3461 | |
3462 | kmem_cache_destroy(s: steering->ftes_cache); |
3463 | kmem_cache_destroy(s: steering->fgs_cache); |
3464 | kfree(objp: steering); |
3465 | mlx5_ft_pool_destroy(dev); |
3466 | mlx5_cleanup_fc_stats(dev); |
3467 | } |
3468 | |
3469 | int mlx5_fs_core_alloc(struct mlx5_core_dev *dev) |
3470 | { |
3471 | struct mlx5_flow_steering *steering; |
3472 | int err = 0; |
3473 | |
3474 | err = mlx5_init_fc_stats(dev); |
3475 | if (err) |
3476 | return err; |
3477 | |
3478 | err = mlx5_ft_pool_init(dev); |
3479 | if (err) |
3480 | goto err; |
3481 | |
3482 | steering = kzalloc(size: sizeof(*steering), GFP_KERNEL); |
3483 | if (!steering) { |
3484 | err = -ENOMEM; |
3485 | goto err; |
3486 | } |
3487 | |
3488 | steering->dev = dev; |
3489 | dev->priv.steering = steering; |
3490 | |
3491 | if (mlx5_fs_dr_is_supported(dev)) |
3492 | steering->mode = MLX5_FLOW_STEERING_MODE_SMFS; |
3493 | else |
3494 | steering->mode = MLX5_FLOW_STEERING_MODE_DMFS; |
3495 | |
3496 | steering->fgs_cache = kmem_cache_create(name: "mlx5_fs_fgs" , |
3497 | size: sizeof(struct mlx5_flow_group), align: 0, |
3498 | flags: 0, NULL); |
3499 | steering->ftes_cache = kmem_cache_create(name: "mlx5_fs_ftes" , size: sizeof(struct fs_fte), align: 0, |
3500 | flags: 0, NULL); |
3501 | if (!steering->ftes_cache || !steering->fgs_cache) { |
3502 | err = -ENOMEM; |
3503 | goto err; |
3504 | } |
3505 | |
3506 | return 0; |
3507 | |
3508 | err: |
3509 | mlx5_fs_core_free(dev); |
3510 | return err; |
3511 | } |
3512 | |
3513 | int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn) |
3514 | { |
3515 | struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns; |
3516 | struct mlx5_ft_underlay_qp *new_uqp; |
3517 | int err = 0; |
3518 | |
3519 | new_uqp = kzalloc(size: sizeof(*new_uqp), GFP_KERNEL); |
3520 | if (!new_uqp) |
3521 | return -ENOMEM; |
3522 | |
3523 | mutex_lock(&root->chain_lock); |
3524 | |
3525 | if (!root->root_ft) { |
3526 | err = -EINVAL; |
3527 | goto update_ft_fail; |
3528 | } |
3529 | |
3530 | err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn, |
3531 | false); |
3532 | if (err) { |
3533 | mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n" , |
3534 | underlay_qpn, err); |
3535 | goto update_ft_fail; |
3536 | } |
3537 | |
3538 | new_uqp->qpn = underlay_qpn; |
3539 | list_add_tail(new: &new_uqp->list, head: &root->underlay_qpns); |
3540 | |
3541 | mutex_unlock(lock: &root->chain_lock); |
3542 | |
3543 | return 0; |
3544 | |
3545 | update_ft_fail: |
3546 | mutex_unlock(lock: &root->chain_lock); |
3547 | kfree(objp: new_uqp); |
3548 | return err; |
3549 | } |
3550 | EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn); |
3551 | |
3552 | int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn) |
3553 | { |
3554 | struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns; |
3555 | struct mlx5_ft_underlay_qp *uqp; |
3556 | bool found = false; |
3557 | int err = 0; |
3558 | |
3559 | mutex_lock(&root->chain_lock); |
3560 | list_for_each_entry(uqp, &root->underlay_qpns, list) { |
3561 | if (uqp->qpn == underlay_qpn) { |
3562 | found = true; |
3563 | break; |
3564 | } |
3565 | } |
3566 | |
3567 | if (!found) { |
3568 | mlx5_core_warn(dev, "Failed finding underlay qp (%u) in qpn list\n" , |
3569 | underlay_qpn); |
3570 | err = -EINVAL; |
3571 | goto out; |
3572 | } |
3573 | |
3574 | err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn, |
3575 | true); |
3576 | if (err) |
3577 | mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n" , |
3578 | underlay_qpn, err); |
3579 | |
3580 | list_del(entry: &uqp->list); |
3581 | mutex_unlock(lock: &root->chain_lock); |
3582 | kfree(objp: uqp); |
3583 | |
3584 | return 0; |
3585 | |
3586 | out: |
3587 | mutex_unlock(lock: &root->chain_lock); |
3588 | return err; |
3589 | } |
3590 | EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn); |
3591 | |
3592 | static struct mlx5_flow_root_namespace |
3593 | *get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type) |
3594 | { |
3595 | struct mlx5_flow_namespace *ns; |
3596 | |
3597 | if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS || |
3598 | ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS) |
3599 | ns = mlx5_get_flow_vport_acl_namespace(dev, type: ns_type, vport: 0); |
3600 | else |
3601 | ns = mlx5_get_flow_namespace(dev, ns_type); |
3602 | if (!ns) |
3603 | return NULL; |
3604 | |
3605 | return find_root(node: &ns->node); |
3606 | } |
3607 | |
3608 | struct mlx5_modify_hdr *(struct mlx5_core_dev *dev, |
3609 | u8 ns_type, u8 num_actions, |
3610 | void *modify_actions) |
3611 | { |
3612 | struct mlx5_flow_root_namespace *root; |
3613 | struct mlx5_modify_hdr *modify_hdr; |
3614 | int err; |
3615 | |
3616 | root = get_root_namespace(dev, ns_type); |
3617 | if (!root) |
3618 | return ERR_PTR(error: -EOPNOTSUPP); |
3619 | |
3620 | modify_hdr = kzalloc(size: sizeof(*modify_hdr), GFP_KERNEL); |
3621 | if (!modify_hdr) |
3622 | return ERR_PTR(error: -ENOMEM); |
3623 | |
3624 | modify_hdr->ns_type = ns_type; |
3625 | err = root->cmds->modify_header_alloc(root, ns_type, num_actions, |
3626 | modify_actions, modify_hdr); |
3627 | if (err) { |
3628 | kfree(objp: modify_hdr); |
3629 | return ERR_PTR(error: err); |
3630 | } |
3631 | |
3632 | return modify_hdr; |
3633 | } |
3634 | EXPORT_SYMBOL(mlx5_modify_header_alloc); |
3635 | |
3636 | void (struct mlx5_core_dev *dev, |
3637 | struct mlx5_modify_hdr *modify_hdr) |
3638 | { |
3639 | struct mlx5_flow_root_namespace *root; |
3640 | |
3641 | root = get_root_namespace(dev, ns_type: modify_hdr->ns_type); |
3642 | if (WARN_ON(!root)) |
3643 | return; |
3644 | root->cmds->modify_header_dealloc(root, modify_hdr); |
3645 | kfree(objp: modify_hdr); |
3646 | } |
3647 | EXPORT_SYMBOL(mlx5_modify_header_dealloc); |
3648 | |
3649 | struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev, |
3650 | struct mlx5_pkt_reformat_params *params, |
3651 | enum mlx5_flow_namespace_type ns_type) |
3652 | { |
3653 | struct mlx5_pkt_reformat *pkt_reformat; |
3654 | struct mlx5_flow_root_namespace *root; |
3655 | int err; |
3656 | |
3657 | root = get_root_namespace(dev, ns_type); |
3658 | if (!root) |
3659 | return ERR_PTR(error: -EOPNOTSUPP); |
3660 | |
3661 | pkt_reformat = kzalloc(size: sizeof(*pkt_reformat), GFP_KERNEL); |
3662 | if (!pkt_reformat) |
3663 | return ERR_PTR(error: -ENOMEM); |
3664 | |
3665 | pkt_reformat->ns_type = ns_type; |
3666 | pkt_reformat->reformat_type = params->type; |
3667 | err = root->cmds->packet_reformat_alloc(root, params, ns_type, |
3668 | pkt_reformat); |
3669 | if (err) { |
3670 | kfree(objp: pkt_reformat); |
3671 | return ERR_PTR(error: err); |
3672 | } |
3673 | |
3674 | return pkt_reformat; |
3675 | } |
3676 | EXPORT_SYMBOL(mlx5_packet_reformat_alloc); |
3677 | |
3678 | void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev, |
3679 | struct mlx5_pkt_reformat *pkt_reformat) |
3680 | { |
3681 | struct mlx5_flow_root_namespace *root; |
3682 | |
3683 | root = get_root_namespace(dev, ns_type: pkt_reformat->ns_type); |
3684 | if (WARN_ON(!root)) |
3685 | return; |
3686 | root->cmds->packet_reformat_dealloc(root, pkt_reformat); |
3687 | kfree(objp: pkt_reformat); |
3688 | } |
3689 | EXPORT_SYMBOL(mlx5_packet_reformat_dealloc); |
3690 | |
3691 | int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer) |
3692 | { |
3693 | return definer->id; |
3694 | } |
3695 | |
3696 | struct mlx5_flow_definer * |
3697 | mlx5_create_match_definer(struct mlx5_core_dev *dev, |
3698 | enum mlx5_flow_namespace_type ns_type, u16 format_id, |
3699 | u32 *match_mask) |
3700 | { |
3701 | struct mlx5_flow_root_namespace *root; |
3702 | struct mlx5_flow_definer *definer; |
3703 | int id; |
3704 | |
3705 | root = get_root_namespace(dev, ns_type); |
3706 | if (!root) |
3707 | return ERR_PTR(error: -EOPNOTSUPP); |
3708 | |
3709 | definer = kzalloc(size: sizeof(*definer), GFP_KERNEL); |
3710 | if (!definer) |
3711 | return ERR_PTR(error: -ENOMEM); |
3712 | |
3713 | definer->ns_type = ns_type; |
3714 | id = root->cmds->create_match_definer(root, format_id, match_mask); |
3715 | if (id < 0) { |
3716 | mlx5_core_warn(root->dev, "Failed to create match definer (%d)\n" , id); |
3717 | kfree(objp: definer); |
3718 | return ERR_PTR(error: id); |
3719 | } |
3720 | definer->id = id; |
3721 | return definer; |
3722 | } |
3723 | |
3724 | void mlx5_destroy_match_definer(struct mlx5_core_dev *dev, |
3725 | struct mlx5_flow_definer *definer) |
3726 | { |
3727 | struct mlx5_flow_root_namespace *root; |
3728 | |
3729 | root = get_root_namespace(dev, ns_type: definer->ns_type); |
3730 | if (WARN_ON(!root)) |
3731 | return; |
3732 | |
3733 | root->cmds->destroy_match_definer(root, definer->id); |
3734 | kfree(objp: definer); |
3735 | } |
3736 | |
3737 | int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns, |
3738 | struct mlx5_flow_root_namespace *peer_ns, |
3739 | u16 peer_vhca_id) |
3740 | { |
3741 | if (peer_ns && ns->mode != peer_ns->mode) { |
3742 | mlx5_core_err(ns->dev, |
3743 | "Can't peer namespace of different steering mode\n" ); |
3744 | return -EINVAL; |
3745 | } |
3746 | |
3747 | return ns->cmds->set_peer(ns, peer_ns, peer_vhca_id); |
3748 | } |
3749 | |
3750 | /* This function should be called only at init stage of the namespace. |
3751 | * It is not safe to call this function while steering operations |
3752 | * are executed in the namespace. |
3753 | */ |
3754 | int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns, |
3755 | enum mlx5_flow_steering_mode mode) |
3756 | { |
3757 | struct mlx5_flow_root_namespace *root; |
3758 | const struct mlx5_flow_cmds *cmds; |
3759 | int err; |
3760 | |
3761 | root = find_root(node: &ns->node); |
3762 | if (&root->ns != ns) |
3763 | /* Can't set cmds to non root namespace */ |
3764 | return -EINVAL; |
3765 | |
3766 | if (root->table_type != FS_FT_FDB) |
3767 | return -EOPNOTSUPP; |
3768 | |
3769 | if (root->mode == mode) |
3770 | return 0; |
3771 | |
3772 | if (mode == MLX5_FLOW_STEERING_MODE_SMFS) |
3773 | cmds = mlx5_fs_cmd_get_dr_cmds(); |
3774 | else |
3775 | cmds = mlx5_fs_cmd_get_fw_cmds(); |
3776 | if (!cmds) |
3777 | return -EOPNOTSUPP; |
3778 | |
3779 | err = cmds->create_ns(root); |
3780 | if (err) { |
3781 | mlx5_core_err(root->dev, "Failed to create flow namespace (%d)\n" , |
3782 | err); |
3783 | return err; |
3784 | } |
3785 | |
3786 | root->cmds->destroy_ns(root); |
3787 | root->cmds = cmds; |
3788 | root->mode = mode; |
3789 | |
3790 | return 0; |
3791 | } |
3792 | |