1 | /* |
2 | * Copyright (c) 2016, Mellanox Technologies. All rights reserved. |
3 | * |
4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the |
8 | * OpenIB.org BSD license below: |
9 | * |
10 | * Redistribution and use in source and binary forms, with or |
11 | * without modification, are permitted provided that the following |
12 | * conditions are met: |
13 | * |
14 | * - Redistributions of source code must retain the above |
15 | * copyright notice, this list of conditions and the following |
16 | * disclaimer. |
17 | * |
18 | * - Redistributions in binary form must reproduce the above |
19 | * copyright notice, this list of conditions and the following |
20 | * disclaimer in the documentation and/or other materials |
21 | * provided with the distribution. |
22 | * |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
30 | * SOFTWARE. |
31 | */ |
32 | |
33 | #include <linux/debugfs.h> |
34 | #include <linux/mlx5/fs.h> |
35 | #include <net/switchdev.h> |
36 | #include <net/pkt_cls.h> |
37 | #include <net/act_api.h> |
38 | #include <net/devlink.h> |
39 | #include <net/ipv6_stubs.h> |
40 | |
41 | #include "eswitch.h" |
42 | #include "en.h" |
43 | #include "en_rep.h" |
44 | #include "en/params.h" |
45 | #include "en/txrx.h" |
46 | #include "en_tc.h" |
47 | #include "en/rep/tc.h" |
48 | #include "en/rep/neigh.h" |
49 | #include "en/rep/bridge.h" |
50 | #include "en/devlink.h" |
51 | #include "fs_core.h" |
52 | #include "lib/mlx5.h" |
53 | #include "lib/devcom.h" |
54 | #include "lib/vxlan.h" |
55 | #define CREATE_TRACE_POINTS |
56 | #include "diag/en_rep_tracepoint.h" |
57 | #include "diag/reporter_vnic.h" |
58 | #include "en_accel/ipsec.h" |
59 | #include "en/tc/int_port.h" |
60 | #include "en/ptp.h" |
61 | #include "en/fs_ethtool.h" |
62 | |
63 | #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \ |
64 | max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE) |
65 | #define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1 |
66 | |
67 | static const char mlx5e_rep_driver_name[] = "mlx5e_rep" ; |
68 | |
69 | static void mlx5e_rep_get_drvinfo(struct net_device *dev, |
70 | struct ethtool_drvinfo *drvinfo) |
71 | { |
72 | struct mlx5e_priv *priv = netdev_priv(dev); |
73 | struct mlx5_core_dev *mdev = priv->mdev; |
74 | int count; |
75 | |
76 | strscpy(drvinfo->driver, mlx5e_rep_driver_name, |
77 | sizeof(drvinfo->driver)); |
78 | count = snprintf(buf: drvinfo->fw_version, size: sizeof(drvinfo->fw_version), |
79 | fmt: "%d.%d.%04d (%.16s)" , fw_rev_maj(dev: mdev), |
80 | fw_rev_min(dev: mdev), fw_rev_sub(dev: mdev), mdev->board_id); |
81 | if (count >= sizeof(drvinfo->fw_version)) |
82 | snprintf(buf: drvinfo->fw_version, size: sizeof(drvinfo->fw_version), |
83 | fmt: "%d.%d.%04d" , fw_rev_maj(dev: mdev), |
84 | fw_rev_min(dev: mdev), fw_rev_sub(dev: mdev)); |
85 | } |
86 | |
87 | static const struct counter_desc sw_rep_stats_desc[] = { |
88 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) }, |
89 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) }, |
90 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) }, |
91 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) }, |
92 | }; |
93 | |
94 | static const struct counter_desc vport_rep_stats_desc[] = { |
95 | { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, vport_rx_packets) }, |
96 | { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, vport_rx_bytes) }, |
97 | { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, vport_tx_packets) }, |
98 | { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, vport_tx_bytes) }, |
99 | { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, |
100 | rx_vport_rdma_unicast_packets) }, |
101 | { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, rx_vport_rdma_unicast_bytes) }, |
102 | { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, |
103 | tx_vport_rdma_unicast_packets) }, |
104 | { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, tx_vport_rdma_unicast_bytes) }, |
105 | { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, |
106 | rx_vport_rdma_multicast_packets) }, |
107 | { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, |
108 | rx_vport_rdma_multicast_bytes) }, |
109 | { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, |
110 | tx_vport_rdma_multicast_packets) }, |
111 | { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, |
112 | tx_vport_rdma_multicast_bytes) }, |
113 | }; |
114 | |
115 | static const struct counter_desc vport_rep_loopback_stats_desc[] = { |
116 | { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, |
117 | vport_loopback_packets) }, |
118 | { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, |
119 | vport_loopback_bytes) }, |
120 | }; |
121 | |
122 | #define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc) |
123 | #define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc) |
124 | #define NUM_VPORT_REP_LOOPBACK_COUNTERS(dev) \ |
125 | (MLX5_CAP_GEN(dev, vport_counter_local_loopback) ? \ |
126 | ARRAY_SIZE(vport_rep_loopback_stats_desc) : 0) |
127 | |
128 | static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw_rep) |
129 | { |
130 | return NUM_VPORT_REP_SW_COUNTERS; |
131 | } |
132 | |
133 | static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw_rep) |
134 | { |
135 | int i; |
136 | |
137 | for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++) |
138 | strcpy(p: data + (idx++) * ETH_GSTRING_LEN, |
139 | q: sw_rep_stats_desc[i].format); |
140 | return idx; |
141 | } |
142 | |
143 | static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep) |
144 | { |
145 | int i; |
146 | |
147 | for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++) |
148 | data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, |
149 | sw_rep_stats_desc, i); |
150 | return idx; |
151 | } |
152 | |
153 | static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw_rep) |
154 | { |
155 | struct mlx5e_sw_stats *s = &priv->stats.sw; |
156 | struct rtnl_link_stats64 stats64 = {}; |
157 | |
158 | memset(s, 0, sizeof(*s)); |
159 | mlx5e_fold_sw_stats64(priv, s: &stats64); |
160 | |
161 | s->rx_packets = stats64.rx_packets; |
162 | s->rx_bytes = stats64.rx_bytes; |
163 | s->tx_packets = stats64.tx_packets; |
164 | s->tx_bytes = stats64.tx_bytes; |
165 | s->tx_queue_dropped = stats64.tx_dropped; |
166 | } |
167 | |
168 | static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport_rep) |
169 | { |
170 | return NUM_VPORT_REP_HW_COUNTERS + |
171 | NUM_VPORT_REP_LOOPBACK_COUNTERS(priv->mdev); |
172 | } |
173 | |
174 | static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep) |
175 | { |
176 | int i; |
177 | |
178 | for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++) |
179 | strcpy(p: data + (idx++) * ETH_GSTRING_LEN, q: vport_rep_stats_desc[i].format); |
180 | for (i = 0; i < NUM_VPORT_REP_LOOPBACK_COUNTERS(priv->mdev); i++) |
181 | strcpy(p: data + (idx++) * ETH_GSTRING_LEN, |
182 | q: vport_rep_loopback_stats_desc[i].format); |
183 | return idx; |
184 | } |
185 | |
186 | static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep) |
187 | { |
188 | int i; |
189 | |
190 | for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++) |
191 | data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats, |
192 | vport_rep_stats_desc, i); |
193 | for (i = 0; i < NUM_VPORT_REP_LOOPBACK_COUNTERS(priv->mdev); i++) |
194 | data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats, |
195 | vport_rep_loopback_stats_desc, i); |
196 | return idx; |
197 | } |
198 | |
199 | static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep) |
200 | { |
201 | struct mlx5e_rep_stats *rep_stats = &priv->stats.rep_stats; |
202 | int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); |
203 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; |
204 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
205 | struct mlx5_eswitch_rep *rep = rpriv->rep; |
206 | u32 *out; |
207 | int err; |
208 | |
209 | out = kvzalloc(size: outlen, GFP_KERNEL); |
210 | if (!out) |
211 | return; |
212 | |
213 | err = mlx5_core_query_vport_counter(dev: esw->dev, other_vport: 1, vf: rep->vport - 1, port_num: 0, out); |
214 | if (err) { |
215 | netdev_warn(dev: priv->netdev, format: "vport %d error %d reading stats\n" , |
216 | rep->vport, err); |
217 | goto out; |
218 | } |
219 | |
220 | #define MLX5_GET_CTR(p, x) \ |
221 | MLX5_GET64(query_vport_counter_out, p, x) |
222 | /* flip tx/rx as we are reporting the counters for the switch vport */ |
223 | rep_stats->vport_rx_packets = |
224 | MLX5_GET_CTR(out, transmitted_ib_unicast.packets) + |
225 | MLX5_GET_CTR(out, transmitted_eth_unicast.packets) + |
226 | MLX5_GET_CTR(out, transmitted_ib_multicast.packets) + |
227 | MLX5_GET_CTR(out, transmitted_eth_multicast.packets) + |
228 | MLX5_GET_CTR(out, transmitted_eth_broadcast.packets); |
229 | |
230 | rep_stats->vport_tx_packets = |
231 | MLX5_GET_CTR(out, received_ib_unicast.packets) + |
232 | MLX5_GET_CTR(out, received_eth_unicast.packets) + |
233 | MLX5_GET_CTR(out, received_ib_multicast.packets) + |
234 | MLX5_GET_CTR(out, received_eth_multicast.packets) + |
235 | MLX5_GET_CTR(out, received_eth_broadcast.packets); |
236 | |
237 | rep_stats->vport_rx_bytes = |
238 | MLX5_GET_CTR(out, transmitted_ib_unicast.octets) + |
239 | MLX5_GET_CTR(out, transmitted_eth_unicast.octets) + |
240 | MLX5_GET_CTR(out, transmitted_ib_multicast.octets) + |
241 | MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); |
242 | |
243 | rep_stats->vport_tx_bytes = |
244 | MLX5_GET_CTR(out, received_ib_unicast.octets) + |
245 | MLX5_GET_CTR(out, received_eth_unicast.octets) + |
246 | MLX5_GET_CTR(out, received_ib_multicast.octets) + |
247 | MLX5_GET_CTR(out, received_eth_multicast.octets) + |
248 | MLX5_GET_CTR(out, received_eth_broadcast.octets); |
249 | |
250 | rep_stats->rx_vport_rdma_unicast_packets = |
251 | MLX5_GET_CTR(out, transmitted_ib_unicast.packets); |
252 | rep_stats->tx_vport_rdma_unicast_packets = |
253 | MLX5_GET_CTR(out, received_ib_unicast.packets); |
254 | rep_stats->rx_vport_rdma_unicast_bytes = |
255 | MLX5_GET_CTR(out, transmitted_ib_unicast.octets); |
256 | rep_stats->tx_vport_rdma_unicast_bytes = |
257 | MLX5_GET_CTR(out, received_ib_unicast.octets); |
258 | rep_stats->rx_vport_rdma_multicast_packets = |
259 | MLX5_GET_CTR(out, transmitted_ib_multicast.packets); |
260 | rep_stats->tx_vport_rdma_multicast_packets = |
261 | MLX5_GET_CTR(out, received_ib_multicast.packets); |
262 | rep_stats->rx_vport_rdma_multicast_bytes = |
263 | MLX5_GET_CTR(out, transmitted_ib_multicast.octets); |
264 | rep_stats->tx_vport_rdma_multicast_bytes = |
265 | MLX5_GET_CTR(out, received_ib_multicast.octets); |
266 | |
267 | if (MLX5_CAP_GEN(priv->mdev, vport_counter_local_loopback)) { |
268 | rep_stats->vport_loopback_packets = |
269 | MLX5_GET_CTR(out, local_loopback.packets); |
270 | rep_stats->vport_loopback_bytes = |
271 | MLX5_GET_CTR(out, local_loopback.octets); |
272 | } |
273 | |
274 | out: |
275 | kvfree(addr: out); |
276 | } |
277 | |
278 | static void mlx5e_rep_get_strings(struct net_device *dev, |
279 | u32 stringset, uint8_t *data) |
280 | { |
281 | struct mlx5e_priv *priv = netdev_priv(dev); |
282 | |
283 | switch (stringset) { |
284 | case ETH_SS_STATS: |
285 | mlx5e_stats_fill_strings(priv, data); |
286 | break; |
287 | } |
288 | } |
289 | |
290 | static void mlx5e_rep_get_ethtool_stats(struct net_device *dev, |
291 | struct ethtool_stats *stats, u64 *data) |
292 | { |
293 | struct mlx5e_priv *priv = netdev_priv(dev); |
294 | |
295 | mlx5e_ethtool_get_ethtool_stats(priv, stats, data); |
296 | } |
297 | |
298 | static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset) |
299 | { |
300 | struct mlx5e_priv *priv = netdev_priv(dev); |
301 | |
302 | switch (sset) { |
303 | case ETH_SS_STATS: |
304 | return mlx5e_stats_total_num(priv); |
305 | default: |
306 | return -EOPNOTSUPP; |
307 | } |
308 | } |
309 | |
310 | static void |
311 | mlx5e_rep_get_ringparam(struct net_device *dev, |
312 | struct ethtool_ringparam *param, |
313 | struct kernel_ethtool_ringparam *kernel_param, |
314 | struct netlink_ext_ack *extack) |
315 | { |
316 | struct mlx5e_priv *priv = netdev_priv(dev); |
317 | |
318 | mlx5e_ethtool_get_ringparam(priv, param, kernel_param); |
319 | } |
320 | |
321 | static int |
322 | mlx5e_rep_set_ringparam(struct net_device *dev, |
323 | struct ethtool_ringparam *param, |
324 | struct kernel_ethtool_ringparam *kernel_param, |
325 | struct netlink_ext_ack *extack) |
326 | { |
327 | struct mlx5e_priv *priv = netdev_priv(dev); |
328 | |
329 | return mlx5e_ethtool_set_ringparam(priv, param); |
330 | } |
331 | |
332 | static void mlx5e_rep_get_channels(struct net_device *dev, |
333 | struct ethtool_channels *ch) |
334 | { |
335 | struct mlx5e_priv *priv = netdev_priv(dev); |
336 | |
337 | mlx5e_ethtool_get_channels(priv, ch); |
338 | } |
339 | |
340 | static int mlx5e_rep_set_channels(struct net_device *dev, |
341 | struct ethtool_channels *ch) |
342 | { |
343 | struct mlx5e_priv *priv = netdev_priv(dev); |
344 | |
345 | return mlx5e_ethtool_set_channels(priv, ch); |
346 | } |
347 | |
348 | static int mlx5e_rep_get_coalesce(struct net_device *netdev, |
349 | struct ethtool_coalesce *coal, |
350 | struct kernel_ethtool_coalesce *kernel_coal, |
351 | struct netlink_ext_ack *extack) |
352 | { |
353 | struct mlx5e_priv *priv = netdev_priv(dev: netdev); |
354 | |
355 | return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal); |
356 | } |
357 | |
358 | static int mlx5e_rep_set_coalesce(struct net_device *netdev, |
359 | struct ethtool_coalesce *coal, |
360 | struct kernel_ethtool_coalesce *kernel_coal, |
361 | struct netlink_ext_ack *extack) |
362 | { |
363 | struct mlx5e_priv *priv = netdev_priv(dev: netdev); |
364 | |
365 | return mlx5e_ethtool_set_coalesce(priv, coal, kernel_coal, extack); |
366 | } |
367 | |
368 | static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev) |
369 | { |
370 | struct mlx5e_priv *priv = netdev_priv(dev: netdev); |
371 | |
372 | return mlx5e_ethtool_get_rxfh_key_size(priv); |
373 | } |
374 | |
375 | static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev) |
376 | { |
377 | struct mlx5e_priv *priv = netdev_priv(dev: netdev); |
378 | |
379 | return mlx5e_ethtool_get_rxfh_indir_size(priv); |
380 | } |
381 | |
382 | static const struct ethtool_ops mlx5e_rep_ethtool_ops = { |
383 | .supported_coalesce_params = ETHTOOL_COALESCE_USECS | |
384 | ETHTOOL_COALESCE_MAX_FRAMES | |
385 | ETHTOOL_COALESCE_USE_ADAPTIVE, |
386 | .get_drvinfo = mlx5e_rep_get_drvinfo, |
387 | .get_link = ethtool_op_get_link, |
388 | .get_strings = mlx5e_rep_get_strings, |
389 | .get_sset_count = mlx5e_rep_get_sset_count, |
390 | .get_ethtool_stats = mlx5e_rep_get_ethtool_stats, |
391 | .get_ringparam = mlx5e_rep_get_ringparam, |
392 | .set_ringparam = mlx5e_rep_set_ringparam, |
393 | .get_channels = mlx5e_rep_get_channels, |
394 | .set_channels = mlx5e_rep_set_channels, |
395 | .get_coalesce = mlx5e_rep_get_coalesce, |
396 | .set_coalesce = mlx5e_rep_set_coalesce, |
397 | .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size, |
398 | .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size, |
399 | }; |
400 | |
401 | static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw, |
402 | struct mlx5_eswitch_rep *rep) |
403 | { |
404 | struct mlx5e_rep_sq *rep_sq, *tmp; |
405 | struct mlx5e_rep_sq_peer *sq_peer; |
406 | struct mlx5e_rep_priv *rpriv; |
407 | unsigned long i; |
408 | |
409 | if (esw->mode != MLX5_ESWITCH_OFFLOADS) |
410 | return; |
411 | |
412 | rpriv = mlx5e_rep_to_rep_priv(rep); |
413 | list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) { |
414 | mlx5_eswitch_del_send_to_vport_rule(rule: rep_sq->send_to_vport_rule); |
415 | xa_for_each(&rep_sq->sq_peer, i, sq_peer) { |
416 | if (sq_peer->rule) |
417 | mlx5_eswitch_del_send_to_vport_rule(rule: sq_peer->rule); |
418 | |
419 | xa_erase(&rep_sq->sq_peer, index: i); |
420 | kfree(objp: sq_peer); |
421 | } |
422 | |
423 | xa_destroy(&rep_sq->sq_peer); |
424 | list_del(entry: &rep_sq->list); |
425 | kfree(objp: rep_sq); |
426 | } |
427 | } |
428 | |
429 | static int mlx5e_sqs2vport_add_peers_rules(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep, |
430 | struct mlx5e_rep_sq *rep_sq, int i) |
431 | { |
432 | struct mlx5_flow_handle *flow_rule; |
433 | struct mlx5_devcom_comp_dev *tmp; |
434 | struct mlx5_eswitch *peer_esw; |
435 | |
436 | mlx5_devcom_for_each_peer_entry(esw->devcom, peer_esw, tmp) { |
437 | u16 peer_rule_idx = MLX5_CAP_GEN(peer_esw->dev, vhca_id); |
438 | struct mlx5e_rep_sq_peer *sq_peer; |
439 | int err; |
440 | |
441 | sq_peer = kzalloc(size: sizeof(*sq_peer), GFP_KERNEL); |
442 | if (!sq_peer) |
443 | return -ENOMEM; |
444 | |
445 | flow_rule = mlx5_eswitch_add_send_to_vport_rule(on_esw: peer_esw, from_esw: esw, |
446 | rep, sqn: rep_sq->sqn); |
447 | if (IS_ERR(ptr: flow_rule)) { |
448 | kfree(objp: sq_peer); |
449 | return PTR_ERR(ptr: flow_rule); |
450 | } |
451 | |
452 | sq_peer->rule = flow_rule; |
453 | sq_peer->peer = peer_esw; |
454 | err = xa_insert(xa: &rep_sq->sq_peer, index: peer_rule_idx, entry: sq_peer, GFP_KERNEL); |
455 | if (err) { |
456 | kfree(objp: sq_peer); |
457 | mlx5_eswitch_del_send_to_vport_rule(rule: flow_rule); |
458 | return err; |
459 | } |
460 | } |
461 | |
462 | return 0; |
463 | } |
464 | |
465 | static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, |
466 | struct mlx5_eswitch_rep *rep, |
467 | u32 *sqns_array, int sqns_num) |
468 | { |
469 | struct mlx5_flow_handle *flow_rule; |
470 | struct mlx5e_rep_priv *rpriv; |
471 | struct mlx5e_rep_sq *rep_sq; |
472 | bool devcom_locked = false; |
473 | int err; |
474 | int i; |
475 | |
476 | if (esw->mode != MLX5_ESWITCH_OFFLOADS) |
477 | return 0; |
478 | |
479 | rpriv = mlx5e_rep_to_rep_priv(rep); |
480 | |
481 | if (mlx5_devcom_comp_is_ready(devcom: esw->devcom) && |
482 | mlx5_devcom_for_each_peer_begin(devcom: esw->devcom)) |
483 | devcom_locked = true; |
484 | |
485 | for (i = 0; i < sqns_num; i++) { |
486 | rep_sq = kzalloc(size: sizeof(*rep_sq), GFP_KERNEL); |
487 | if (!rep_sq) { |
488 | err = -ENOMEM; |
489 | goto out_err; |
490 | } |
491 | |
492 | /* Add re-inject rule to the PF/representor sqs */ |
493 | flow_rule = mlx5_eswitch_add_send_to_vport_rule(on_esw: esw, from_esw: esw, rep, |
494 | sqn: sqns_array[i]); |
495 | if (IS_ERR(ptr: flow_rule)) { |
496 | err = PTR_ERR(ptr: flow_rule); |
497 | kfree(objp: rep_sq); |
498 | goto out_err; |
499 | } |
500 | rep_sq->send_to_vport_rule = flow_rule; |
501 | rep_sq->sqn = sqns_array[i]; |
502 | |
503 | xa_init(xa: &rep_sq->sq_peer); |
504 | if (devcom_locked) { |
505 | err = mlx5e_sqs2vport_add_peers_rules(esw, rep, rep_sq, i); |
506 | if (err) { |
507 | mlx5_eswitch_del_send_to_vport_rule(rule: rep_sq->send_to_vport_rule); |
508 | xa_destroy(&rep_sq->sq_peer); |
509 | kfree(objp: rep_sq); |
510 | goto out_err; |
511 | } |
512 | } |
513 | |
514 | list_add(new: &rep_sq->list, head: &rpriv->vport_sqs_list); |
515 | } |
516 | |
517 | if (devcom_locked) |
518 | mlx5_devcom_for_each_peer_end(devcom: esw->devcom); |
519 | |
520 | return 0; |
521 | |
522 | out_err: |
523 | mlx5e_sqs2vport_stop(esw, rep); |
524 | |
525 | if (devcom_locked) |
526 | mlx5_devcom_for_each_peer_end(devcom: esw->devcom); |
527 | |
528 | return err; |
529 | } |
530 | |
531 | static int |
532 | mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) |
533 | { |
534 | int sqs_per_channel = mlx5e_get_dcb_num_tc(params: &priv->channels.params); |
535 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; |
536 | bool is_uplink_rep = mlx5e_is_uplink_rep(priv); |
537 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
538 | struct mlx5_eswitch_rep *rep = rpriv->rep; |
539 | int n, tc, nch, num_sqs = 0; |
540 | struct mlx5e_channel *c; |
541 | int err = -ENOMEM; |
542 | bool ptp_sq; |
543 | u32 *sqs; |
544 | |
545 | ptp_sq = !!(priv->channels.ptp && |
546 | MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS)); |
547 | nch = priv->channels.num + ptp_sq; |
548 | /* +2 for xdpsqs, they don't exist on the ptp channel but will not be |
549 | * counted for by num_sqs. |
550 | */ |
551 | if (is_uplink_rep) |
552 | sqs_per_channel += 2; |
553 | |
554 | sqs = kvcalloc(n: nch * sqs_per_channel, size: sizeof(*sqs), GFP_KERNEL); |
555 | if (!sqs) |
556 | goto out; |
557 | |
558 | for (n = 0; n < priv->channels.num; n++) { |
559 | c = priv->channels.c[n]; |
560 | for (tc = 0; tc < c->num_tc; tc++) |
561 | sqs[num_sqs++] = c->sq[tc].sqn; |
562 | |
563 | if (is_uplink_rep) { |
564 | if (c->xdp) |
565 | sqs[num_sqs++] = c->rq_xdpsq.sqn; |
566 | |
567 | sqs[num_sqs++] = c->xdpsq.sqn; |
568 | } |
569 | } |
570 | if (ptp_sq) { |
571 | struct mlx5e_ptp *ptp_ch = priv->channels.ptp; |
572 | |
573 | for (tc = 0; tc < ptp_ch->num_tc; tc++) |
574 | sqs[num_sqs++] = ptp_ch->ptpsq[tc].txqsq.sqn; |
575 | } |
576 | |
577 | err = mlx5e_sqs2vport_start(esw, rep, sqns_array: sqs, sqns_num: num_sqs); |
578 | kvfree(addr: sqs); |
579 | |
580 | out: |
581 | if (err) |
582 | netdev_warn(dev: priv->netdev, format: "Failed to add SQs FWD rules %d\n" , err); |
583 | return err; |
584 | } |
585 | |
586 | static void |
587 | mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) |
588 | { |
589 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; |
590 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
591 | struct mlx5_eswitch_rep *rep = rpriv->rep; |
592 | |
593 | mlx5e_sqs2vport_stop(esw, rep); |
594 | } |
595 | |
596 | static int |
597 | mlx5e_rep_add_meta_tunnel_rule(struct mlx5e_priv *priv) |
598 | { |
599 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; |
600 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
601 | struct mlx5_eswitch_rep *rep = rpriv->rep; |
602 | struct mlx5_flow_handle *flow_rule; |
603 | struct mlx5_flow_group *g; |
604 | |
605 | g = esw->fdb_table.offloads.send_to_vport_meta_grp; |
606 | if (!g) |
607 | return 0; |
608 | |
609 | flow_rule = mlx5_eswitch_add_send_to_vport_meta_rule(esw, vport_num: rep->vport); |
610 | if (IS_ERR(ptr: flow_rule)) |
611 | return PTR_ERR(ptr: flow_rule); |
612 | |
613 | rpriv->send_to_vport_meta_rule = flow_rule; |
614 | |
615 | return 0; |
616 | } |
617 | |
618 | static void |
619 | mlx5e_rep_del_meta_tunnel_rule(struct mlx5e_priv *priv) |
620 | { |
621 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
622 | |
623 | if (rpriv->send_to_vport_meta_rule) |
624 | mlx5_eswitch_del_send_to_vport_meta_rule(rule: rpriv->send_to_vport_meta_rule); |
625 | } |
626 | |
627 | void mlx5e_rep_activate_channels(struct mlx5e_priv *priv) |
628 | { |
629 | mlx5e_add_sqs_fwd_rules(priv); |
630 | mlx5e_rep_add_meta_tunnel_rule(priv); |
631 | } |
632 | |
633 | void mlx5e_rep_deactivate_channels(struct mlx5e_priv *priv) |
634 | { |
635 | mlx5e_rep_del_meta_tunnel_rule(priv); |
636 | mlx5e_remove_sqs_fwd_rules(priv); |
637 | } |
638 | |
639 | static int mlx5e_rep_open(struct net_device *dev) |
640 | { |
641 | struct mlx5e_priv *priv = netdev_priv(dev); |
642 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
643 | struct mlx5_eswitch_rep *rep = rpriv->rep; |
644 | int err; |
645 | |
646 | mutex_lock(&priv->state_lock); |
647 | err = mlx5e_open_locked(netdev: dev); |
648 | if (err) |
649 | goto unlock; |
650 | |
651 | if (!mlx5_modify_vport_admin_state(mdev: priv->mdev, |
652 | opmod: MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, |
653 | vport: rep->vport, other_vport: 1, |
654 | state: MLX5_VPORT_ADMIN_STATE_UP)) |
655 | netif_carrier_on(dev); |
656 | |
657 | unlock: |
658 | mutex_unlock(lock: &priv->state_lock); |
659 | return err; |
660 | } |
661 | |
662 | static int mlx5e_rep_close(struct net_device *dev) |
663 | { |
664 | struct mlx5e_priv *priv = netdev_priv(dev); |
665 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
666 | struct mlx5_eswitch_rep *rep = rpriv->rep; |
667 | int ret; |
668 | |
669 | mutex_lock(&priv->state_lock); |
670 | mlx5_modify_vport_admin_state(mdev: priv->mdev, |
671 | opmod: MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, |
672 | vport: rep->vport, other_vport: 1, |
673 | state: MLX5_VPORT_ADMIN_STATE_DOWN); |
674 | ret = mlx5e_close_locked(netdev: dev); |
675 | mutex_unlock(lock: &priv->state_lock); |
676 | return ret; |
677 | } |
678 | |
679 | bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) |
680 | { |
681 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
682 | struct mlx5_eswitch_rep *rep; |
683 | |
684 | if (!MLX5_ESWITCH_MANAGER(priv->mdev)) |
685 | return false; |
686 | |
687 | if (!rpriv) /* non vport rep mlx5e instances don't use this field */ |
688 | return false; |
689 | |
690 | rep = rpriv->rep; |
691 | return (rep->vport == MLX5_VPORT_UPLINK); |
692 | } |
693 | |
694 | bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id) |
695 | { |
696 | switch (attr_id) { |
697 | case IFLA_OFFLOAD_XSTATS_CPU_HIT: |
698 | return true; |
699 | } |
700 | |
701 | return false; |
702 | } |
703 | |
704 | static int |
705 | mlx5e_get_sw_stats64(const struct net_device *dev, |
706 | struct rtnl_link_stats64 *stats) |
707 | { |
708 | struct mlx5e_priv *priv = netdev_priv(dev); |
709 | |
710 | mlx5e_fold_sw_stats64(priv, s: stats); |
711 | return 0; |
712 | } |
713 | |
714 | int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev, |
715 | void *sp) |
716 | { |
717 | switch (attr_id) { |
718 | case IFLA_OFFLOAD_XSTATS_CPU_HIT: |
719 | return mlx5e_get_sw_stats64(dev, stats: sp); |
720 | } |
721 | |
722 | return -EINVAL; |
723 | } |
724 | |
725 | static void |
726 | mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) |
727 | { |
728 | struct mlx5e_priv *priv = netdev_priv(dev); |
729 | |
730 | /* update HW stats in background for next time */ |
731 | mlx5e_queue_update_stats(priv); |
732 | mlx5e_stats_copy_rep_stats(vf_vport: stats, rep_stats: &priv->stats.rep_stats); |
733 | } |
734 | |
735 | static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu) |
736 | { |
737 | return mlx5e_change_mtu(netdev, new_mtu, NULL); |
738 | } |
739 | |
740 | static int mlx5e_rep_change_carrier(struct net_device *dev, bool new_carrier) |
741 | { |
742 | struct mlx5e_priv *priv = netdev_priv(dev); |
743 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
744 | struct mlx5_eswitch_rep *rep = rpriv->rep; |
745 | int err; |
746 | |
747 | if (new_carrier) { |
748 | err = mlx5_modify_vport_admin_state(mdev: priv->mdev, opmod: MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, |
749 | vport: rep->vport, other_vport: 1, state: MLX5_VPORT_ADMIN_STATE_UP); |
750 | if (err) |
751 | return err; |
752 | netif_carrier_on(dev); |
753 | } else { |
754 | err = mlx5_modify_vport_admin_state(mdev: priv->mdev, opmod: MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, |
755 | vport: rep->vport, other_vport: 1, state: MLX5_VPORT_ADMIN_STATE_DOWN); |
756 | if (err) |
757 | return err; |
758 | netif_carrier_off(dev); |
759 | } |
760 | return 0; |
761 | } |
762 | |
763 | static const struct net_device_ops mlx5e_netdev_ops_rep = { |
764 | .ndo_open = mlx5e_rep_open, |
765 | .ndo_stop = mlx5e_rep_close, |
766 | .ndo_start_xmit = mlx5e_xmit, |
767 | .ndo_setup_tc = mlx5e_rep_setup_tc, |
768 | .ndo_get_stats64 = mlx5e_rep_get_stats, |
769 | .ndo_has_offload_stats = mlx5e_rep_has_offload_stats, |
770 | .ndo_get_offload_stats = mlx5e_rep_get_offload_stats, |
771 | .ndo_change_mtu = mlx5e_rep_change_mtu, |
772 | .ndo_change_carrier = mlx5e_rep_change_carrier, |
773 | }; |
774 | |
775 | bool mlx5e_eswitch_uplink_rep(const struct net_device *netdev) |
776 | { |
777 | return netdev->netdev_ops == &mlx5e_netdev_ops && |
778 | mlx5e_is_uplink_rep(priv: netdev_priv(dev: netdev)); |
779 | } |
780 | |
781 | bool mlx5e_eswitch_vf_rep(const struct net_device *netdev) |
782 | { |
783 | return netdev->netdev_ops == &mlx5e_netdev_ops_rep; |
784 | } |
785 | |
786 | /* One indirect TIR set for outer. Inner not supported in reps. */ |
787 | #define REP_NUM_INDIR_TIRS MLX5E_NUM_INDIR_TIRS |
788 | |
789 | static int mlx5e_rep_max_nch_limit(struct mlx5_core_dev *mdev) |
790 | { |
791 | int max_tir_num = 1 << MLX5_CAP_GEN(mdev, log_max_tir); |
792 | int num_vports = mlx5_eswitch_get_total_vports(dev: mdev); |
793 | |
794 | return (max_tir_num - mlx5e_get_pf_num_tirs(mdev) |
795 | - (num_vports * REP_NUM_INDIR_TIRS)) / num_vports; |
796 | } |
797 | |
798 | static void mlx5e_build_rep_params(struct net_device *netdev) |
799 | { |
800 | const bool take_rtnl = netdev->reg_state == NETREG_REGISTERED; |
801 | struct mlx5e_priv *priv = netdev_priv(dev: netdev); |
802 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
803 | struct mlx5_eswitch_rep *rep = rpriv->rep; |
804 | struct mlx5_core_dev *mdev = priv->mdev; |
805 | struct mlx5e_params *params; |
806 | |
807 | u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? |
808 | MLX5_CQ_PERIOD_MODE_START_FROM_CQE : |
809 | MLX5_CQ_PERIOD_MODE_START_FROM_EQE; |
810 | |
811 | params = &priv->channels.params; |
812 | |
813 | params->num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS; |
814 | params->hard_mtu = MLX5E_ETH_HARD_MTU; |
815 | params->sw_mtu = netdev->mtu; |
816 | |
817 | /* SQ */ |
818 | if (rep->vport == MLX5_VPORT_UPLINK) |
819 | params->log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; |
820 | else |
821 | params->log_sq_size = MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE; |
822 | |
823 | /* RQ */ |
824 | mlx5e_build_rq_params(mdev, params); |
825 | |
826 | /* If netdev is already registered (e.g. move from nic profile to uplink, |
827 | * RTNL lock must be held before triggering netdev notifiers. |
828 | */ |
829 | if (take_rtnl) |
830 | rtnl_lock(); |
831 | /* update XDP supported features */ |
832 | mlx5e_set_xdp_feature(netdev); |
833 | if (take_rtnl) |
834 | rtnl_unlock(); |
835 | |
836 | /* CQ moderation params */ |
837 | params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); |
838 | mlx5e_set_rx_cq_mode_params(params, cq_period_mode); |
839 | |
840 | params->mqprio.num_tc = 1; |
841 | if (rep->vport != MLX5_VPORT_UPLINK) |
842 | params->vlan_strip_disable = true; |
843 | |
844 | mlx5_query_min_inline(mdev, min_inline: ¶ms->tx_min_inline_mode); |
845 | } |
846 | |
847 | static void mlx5e_build_rep_netdev(struct net_device *netdev, |
848 | struct mlx5_core_dev *mdev) |
849 | { |
850 | SET_NETDEV_DEV(netdev, mdev->device); |
851 | netdev->netdev_ops = &mlx5e_netdev_ops_rep; |
852 | eth_hw_addr_random(dev: netdev); |
853 | netdev->ethtool_ops = &mlx5e_rep_ethtool_ops; |
854 | |
855 | netdev->watchdog_timeo = 15 * HZ; |
856 | |
857 | #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) |
858 | netdev->hw_features |= NETIF_F_HW_TC; |
859 | #endif |
860 | netdev->hw_features |= NETIF_F_SG; |
861 | netdev->hw_features |= NETIF_F_IP_CSUM; |
862 | netdev->hw_features |= NETIF_F_IPV6_CSUM; |
863 | netdev->hw_features |= NETIF_F_GRO; |
864 | netdev->hw_features |= NETIF_F_TSO; |
865 | netdev->hw_features |= NETIF_F_TSO6; |
866 | netdev->hw_features |= NETIF_F_RXCSUM; |
867 | |
868 | netdev->features |= netdev->hw_features; |
869 | netdev->features |= NETIF_F_NETNS_LOCAL; |
870 | } |
871 | |
872 | static int mlx5e_init_rep(struct mlx5_core_dev *mdev, |
873 | struct net_device *netdev) |
874 | { |
875 | struct mlx5e_priv *priv = netdev_priv(dev: netdev); |
876 | |
877 | priv->fs = |
878 | mlx5e_fs_init(profile: priv->profile, mdev, |
879 | state_destroy: !test_bit(MLX5E_STATE_DESTROYING, &priv->state), |
880 | dfs_root: priv->dfs_root); |
881 | if (!priv->fs) { |
882 | netdev_err(dev: priv->netdev, format: "FS allocation failed\n" ); |
883 | return -ENOMEM; |
884 | } |
885 | |
886 | mlx5e_build_rep_params(netdev); |
887 | mlx5e_timestamp_init(priv); |
888 | |
889 | return 0; |
890 | } |
891 | |
892 | static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev, |
893 | struct net_device *netdev) |
894 | { |
895 | struct mlx5e_priv *priv = netdev_priv(dev: netdev); |
896 | |
897 | priv->dfs_root = debugfs_create_dir(name: "nic" , |
898 | parent: mlx5_debugfs_get_dev_root(dev: mdev)); |
899 | |
900 | priv->fs = mlx5e_fs_init(profile: priv->profile, mdev, |
901 | state_destroy: !test_bit(MLX5E_STATE_DESTROYING, &priv->state), |
902 | dfs_root: priv->dfs_root); |
903 | if (!priv->fs) { |
904 | netdev_err(dev: priv->netdev, format: "FS allocation failed\n" ); |
905 | debugfs_remove_recursive(dentry: priv->dfs_root); |
906 | return -ENOMEM; |
907 | } |
908 | |
909 | mlx5e_vxlan_set_netdev_info(priv); |
910 | mlx5e_build_rep_params(netdev); |
911 | mlx5e_timestamp_init(priv); |
912 | return 0; |
913 | } |
914 | |
915 | static void mlx5e_cleanup_rep(struct mlx5e_priv *priv) |
916 | { |
917 | mlx5e_fs_cleanup(fs: priv->fs); |
918 | debugfs_remove_recursive(dentry: priv->dfs_root); |
919 | priv->fs = NULL; |
920 | } |
921 | |
922 | static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv) |
923 | { |
924 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
925 | struct mlx5_eswitch_rep *rep = rpriv->rep; |
926 | struct ttc_params ttc_params = {}; |
927 | int err; |
928 | |
929 | mlx5e_fs_set_ns(fs: priv->fs, |
930 | ns: mlx5_get_flow_namespace(dev: priv->mdev, |
931 | type: MLX5_FLOW_NAMESPACE_KERNEL), egress: false); |
932 | |
933 | /* The inner_ttc in the ttc params is intentionally not set */ |
934 | mlx5e_set_ttc_params(fs: priv->fs, rx_res: priv->rx_res, ttc_params: &ttc_params, tunnel: false); |
935 | |
936 | if (rep->vport != MLX5_VPORT_UPLINK) |
937 | /* To give uplik rep TTC a lower level for chaining from root ft */ |
938 | ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1; |
939 | |
940 | mlx5e_fs_set_ttc(fs: priv->fs, ttc: mlx5_create_ttc_table(dev: priv->mdev, params: &ttc_params), inner: false); |
941 | if (IS_ERR(ptr: mlx5e_fs_get_ttc(fs: priv->fs, inner: false))) { |
942 | err = PTR_ERR(ptr: mlx5e_fs_get_ttc(fs: priv->fs, inner: false)); |
943 | netdev_err(dev: priv->netdev, format: "Failed to create rep ttc table, err=%d\n" , |
944 | err); |
945 | return err; |
946 | } |
947 | return 0; |
948 | } |
949 | |
950 | static int mlx5e_create_rep_root_ft(struct mlx5e_priv *priv) |
951 | { |
952 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
953 | struct mlx5_eswitch_rep *rep = rpriv->rep; |
954 | struct mlx5_flow_table_attr ft_attr = {}; |
955 | struct mlx5_flow_namespace *ns; |
956 | int err = 0; |
957 | |
958 | if (rep->vport != MLX5_VPORT_UPLINK) { |
959 | /* non uplik reps will skip any bypass tables and go directly to |
960 | * their own ttc |
961 | */ |
962 | rpriv->root_ft = mlx5_get_ttc_flow_table(ttc: mlx5e_fs_get_ttc(fs: priv->fs, inner: false)); |
963 | return 0; |
964 | } |
965 | |
966 | /* uplink root ft will be used to auto chain, to ethtool or ttc tables */ |
967 | ns = mlx5_get_flow_namespace(dev: priv->mdev, type: MLX5_FLOW_NAMESPACE_OFFLOADS); |
968 | if (!ns) { |
969 | netdev_err(dev: priv->netdev, format: "Failed to get reps offloads namespace\n" ); |
970 | return -EOPNOTSUPP; |
971 | } |
972 | |
973 | ft_attr.max_fte = 0; /* Empty table, miss rule will always point to next table */ |
974 | ft_attr.prio = 1; |
975 | ft_attr.level = 1; |
976 | |
977 | rpriv->root_ft = mlx5_create_flow_table(ns, ft_attr: &ft_attr); |
978 | if (IS_ERR(ptr: rpriv->root_ft)) { |
979 | err = PTR_ERR(ptr: rpriv->root_ft); |
980 | rpriv->root_ft = NULL; |
981 | } |
982 | |
983 | return err; |
984 | } |
985 | |
986 | static void mlx5e_destroy_rep_root_ft(struct mlx5e_priv *priv) |
987 | { |
988 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
989 | struct mlx5_eswitch_rep *rep = rpriv->rep; |
990 | |
991 | if (rep->vport != MLX5_VPORT_UPLINK) |
992 | return; |
993 | mlx5_destroy_flow_table(ft: rpriv->root_ft); |
994 | } |
995 | |
996 | static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv) |
997 | { |
998 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; |
999 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
1000 | struct mlx5_eswitch_rep *rep = rpriv->rep; |
1001 | struct mlx5_flow_handle *flow_rule; |
1002 | struct mlx5_flow_destination dest; |
1003 | |
1004 | dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; |
1005 | dest.ft = rpriv->root_ft; |
1006 | |
1007 | flow_rule = mlx5_eswitch_create_vport_rx_rule(esw, vport: rep->vport, dest: &dest); |
1008 | if (IS_ERR(ptr: flow_rule)) |
1009 | return PTR_ERR(ptr: flow_rule); |
1010 | rpriv->vport_rx_rule = flow_rule; |
1011 | return 0; |
1012 | } |
1013 | |
1014 | static void rep_vport_rx_rule_destroy(struct mlx5e_priv *priv) |
1015 | { |
1016 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
1017 | |
1018 | if (!rpriv->vport_rx_rule) |
1019 | return; |
1020 | |
1021 | mlx5_del_flow_rules(fr: rpriv->vport_rx_rule); |
1022 | rpriv->vport_rx_rule = NULL; |
1023 | } |
1024 | |
1025 | int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup) |
1026 | { |
1027 | rep_vport_rx_rule_destroy(priv); |
1028 | |
1029 | return cleanup ? 0 : mlx5e_create_rep_vport_rx_rule(priv); |
1030 | } |
1031 | |
1032 | static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) |
1033 | { |
1034 | struct mlx5_core_dev *mdev = priv->mdev; |
1035 | int err; |
1036 | |
1037 | mlx5e_fs_init_l2_addr(fs: priv->fs, netdev: priv->netdev); |
1038 | |
1039 | err = mlx5e_open_drop_rq(priv, drop_rq: &priv->drop_rq); |
1040 | if (err) { |
1041 | mlx5_core_err(mdev, "open drop rq failed, %d\n" , err); |
1042 | goto err_free_fs; |
1043 | } |
1044 | |
1045 | priv->rx_res = mlx5e_rx_res_create(mdev: priv->mdev, features: 0, max_nch: priv->max_nch, drop_rqn: priv->drop_rq.rqn, |
1046 | init_pkt_merge_param: &priv->channels.params.packet_merge, |
1047 | init_nch: priv->channels.params.num_channels); |
1048 | if (IS_ERR(ptr: priv->rx_res)) { |
1049 | err = PTR_ERR(ptr: priv->rx_res); |
1050 | mlx5_core_err(mdev, "Create rx resources failed, err=%d\n" , err); |
1051 | goto err_close_drop_rq; |
1052 | } |
1053 | |
1054 | err = mlx5e_create_rep_ttc_table(priv); |
1055 | if (err) |
1056 | goto err_destroy_rx_res; |
1057 | |
1058 | err = mlx5e_create_rep_root_ft(priv); |
1059 | if (err) |
1060 | goto err_destroy_ttc_table; |
1061 | |
1062 | err = mlx5e_create_rep_vport_rx_rule(priv); |
1063 | if (err) |
1064 | goto err_destroy_root_ft; |
1065 | |
1066 | mlx5e_ethtool_init_steering(fs: priv->fs); |
1067 | |
1068 | return 0; |
1069 | |
1070 | err_destroy_root_ft: |
1071 | mlx5e_destroy_rep_root_ft(priv); |
1072 | err_destroy_ttc_table: |
1073 | mlx5_destroy_ttc_table(ttc: mlx5e_fs_get_ttc(fs: priv->fs, inner: false)); |
1074 | err_destroy_rx_res: |
1075 | mlx5e_rx_res_destroy(res: priv->rx_res); |
1076 | priv->rx_res = ERR_PTR(error: -EINVAL); |
1077 | err_close_drop_rq: |
1078 | mlx5e_close_drop_rq(drop_rq: &priv->drop_rq); |
1079 | err_free_fs: |
1080 | mlx5e_fs_cleanup(fs: priv->fs); |
1081 | priv->fs = NULL; |
1082 | return err; |
1083 | } |
1084 | |
1085 | static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) |
1086 | { |
1087 | mlx5e_ethtool_cleanup_steering(fs: priv->fs); |
1088 | rep_vport_rx_rule_destroy(priv); |
1089 | mlx5e_destroy_rep_root_ft(priv); |
1090 | mlx5_destroy_ttc_table(ttc: mlx5e_fs_get_ttc(fs: priv->fs, inner: false)); |
1091 | mlx5e_rx_res_destroy(res: priv->rx_res); |
1092 | priv->rx_res = ERR_PTR(error: -EINVAL); |
1093 | mlx5e_close_drop_rq(drop_rq: &priv->drop_rq); |
1094 | } |
1095 | |
1096 | static void mlx5e_rep_mpesw_work(struct work_struct *work) |
1097 | { |
1098 | struct mlx5_rep_uplink_priv *uplink_priv = |
1099 | container_of(work, struct mlx5_rep_uplink_priv, |
1100 | mpesw_work); |
1101 | struct mlx5e_rep_priv *rpriv = |
1102 | container_of(uplink_priv, struct mlx5e_rep_priv, |
1103 | uplink_priv); |
1104 | struct mlx5e_priv *priv = netdev_priv(dev: rpriv->netdev); |
1105 | |
1106 | rep_vport_rx_rule_destroy(priv); |
1107 | mlx5e_create_rep_vport_rx_rule(priv); |
1108 | } |
1109 | |
1110 | static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv) |
1111 | { |
1112 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
1113 | int err; |
1114 | |
1115 | mlx5e_create_q_counters(priv); |
1116 | err = mlx5e_init_rep_rx(priv); |
1117 | if (err) |
1118 | goto out; |
1119 | |
1120 | mlx5e_tc_int_port_init_rep_rx(priv); |
1121 | |
1122 | INIT_WORK(&rpriv->uplink_priv.mpesw_work, mlx5e_rep_mpesw_work); |
1123 | |
1124 | out: |
1125 | return err; |
1126 | } |
1127 | |
1128 | static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv) |
1129 | { |
1130 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
1131 | |
1132 | cancel_work_sync(work: &rpriv->uplink_priv.mpesw_work); |
1133 | mlx5e_tc_int_port_cleanup_rep_rx(priv); |
1134 | mlx5e_cleanup_rep_rx(priv); |
1135 | mlx5e_destroy_q_counters(priv); |
1136 | } |
1137 | |
1138 | static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv) |
1139 | { |
1140 | struct mlx5_rep_uplink_priv *uplink_priv; |
1141 | struct net_device *netdev; |
1142 | struct mlx5e_priv *priv; |
1143 | int err; |
1144 | |
1145 | netdev = rpriv->netdev; |
1146 | priv = netdev_priv(dev: netdev); |
1147 | uplink_priv = &rpriv->uplink_priv; |
1148 | |
1149 | err = mlx5e_rep_tc_init(rpriv); |
1150 | if (err) |
1151 | return err; |
1152 | |
1153 | mlx5_init_port_tun_entropy(tun_entropy: &uplink_priv->tun_entropy, mdev: priv->mdev); |
1154 | |
1155 | mlx5e_rep_bond_init(rpriv); |
1156 | err = mlx5e_rep_tc_netdevice_event_register(rpriv); |
1157 | if (err) { |
1158 | mlx5_core_err(priv->mdev, "Failed to register netdev notifier, err: %d\n" , |
1159 | err); |
1160 | goto err_event_reg; |
1161 | } |
1162 | |
1163 | return 0; |
1164 | |
1165 | err_event_reg: |
1166 | mlx5e_rep_bond_cleanup(rpriv); |
1167 | mlx5e_rep_tc_cleanup(rpriv); |
1168 | return err; |
1169 | } |
1170 | |
1171 | static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv) |
1172 | { |
1173 | mlx5e_rep_tc_netdevice_event_unregister(rpriv); |
1174 | mlx5e_rep_bond_cleanup(rpriv); |
1175 | mlx5e_rep_tc_cleanup(rpriv); |
1176 | } |
1177 | |
1178 | static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) |
1179 | { |
1180 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
1181 | int err; |
1182 | |
1183 | err = mlx5e_rep_neigh_init(rpriv); |
1184 | if (err) |
1185 | goto err_neigh_init; |
1186 | |
1187 | if (rpriv->rep->vport == MLX5_VPORT_UPLINK) { |
1188 | err = mlx5e_init_uplink_rep_tx(rpriv); |
1189 | if (err) |
1190 | goto err_init_tx; |
1191 | } |
1192 | |
1193 | err = mlx5e_tc_ht_init(tc_ht: &rpriv->tc_ht); |
1194 | if (err) |
1195 | goto err_ht_init; |
1196 | |
1197 | return 0; |
1198 | |
1199 | err_ht_init: |
1200 | if (rpriv->rep->vport == MLX5_VPORT_UPLINK) |
1201 | mlx5e_cleanup_uplink_rep_tx(rpriv); |
1202 | err_init_tx: |
1203 | mlx5e_rep_neigh_cleanup(rpriv); |
1204 | err_neigh_init: |
1205 | return err; |
1206 | } |
1207 | |
1208 | static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv) |
1209 | { |
1210 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
1211 | |
1212 | mlx5e_tc_ht_cleanup(tc_ht: &rpriv->tc_ht); |
1213 | |
1214 | if (rpriv->rep->vport == MLX5_VPORT_UPLINK) |
1215 | mlx5e_cleanup_uplink_rep_tx(rpriv); |
1216 | |
1217 | mlx5e_rep_neigh_cleanup(rpriv); |
1218 | } |
1219 | |
1220 | static void mlx5e_rep_enable(struct mlx5e_priv *priv) |
1221 | { |
1222 | mlx5e_set_netdev_mtu_boundaries(priv); |
1223 | } |
1224 | |
1225 | static void mlx5e_rep_disable(struct mlx5e_priv *priv) |
1226 | { |
1227 | } |
1228 | |
1229 | static int mlx5e_update_rep_rx(struct mlx5e_priv *priv) |
1230 | { |
1231 | return 0; |
1232 | } |
1233 | |
1234 | static int mlx5e_rep_event_mpesw(struct mlx5e_priv *priv) |
1235 | { |
1236 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
1237 | struct mlx5_eswitch_rep *rep = rpriv->rep; |
1238 | |
1239 | if (rep->vport != MLX5_VPORT_UPLINK) |
1240 | return NOTIFY_DONE; |
1241 | |
1242 | queue_work(wq: priv->wq, work: &rpriv->uplink_priv.mpesw_work); |
1243 | |
1244 | return NOTIFY_OK; |
1245 | } |
1246 | |
1247 | static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data) |
1248 | { |
1249 | struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb); |
1250 | |
1251 | if (event == MLX5_EVENT_TYPE_PORT_CHANGE) { |
1252 | struct mlx5_eqe *eqe = data; |
1253 | |
1254 | switch (eqe->sub_type) { |
1255 | case MLX5_PORT_CHANGE_SUBTYPE_DOWN: |
1256 | case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: |
1257 | queue_work(wq: priv->wq, work: &priv->update_carrier_work); |
1258 | break; |
1259 | default: |
1260 | return NOTIFY_DONE; |
1261 | } |
1262 | |
1263 | return NOTIFY_OK; |
1264 | } |
1265 | |
1266 | if (event == MLX5_DEV_EVENT_PORT_AFFINITY) |
1267 | return mlx5e_rep_tc_event_port_affinity(priv); |
1268 | else if (event == MLX5_DEV_EVENT_MULTIPORT_ESW) |
1269 | return mlx5e_rep_event_mpesw(priv); |
1270 | |
1271 | return NOTIFY_DONE; |
1272 | } |
1273 | |
1274 | static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv) |
1275 | { |
1276 | struct net_device *netdev = priv->netdev; |
1277 | struct mlx5_core_dev *mdev = priv->mdev; |
1278 | u16 max_mtu; |
1279 | |
1280 | mlx5e_ipsec_init(priv); |
1281 | |
1282 | netdev->min_mtu = ETH_MIN_MTU; |
1283 | mlx5_query_port_max_mtu(dev: priv->mdev, max_mtu: &max_mtu, port: 1); |
1284 | netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu); |
1285 | mlx5e_set_dev_port_mtu(priv); |
1286 | |
1287 | mlx5e_rep_tc_enable(priv); |
1288 | |
1289 | if (MLX5_CAP_GEN(mdev, uplink_follow)) |
1290 | mlx5_modify_vport_admin_state(mdev, opmod: MLX5_VPORT_STATE_OP_MOD_UPLINK, |
1291 | vport: 0, other_vport: 0, state: MLX5_VPORT_ADMIN_STATE_AUTO); |
1292 | mlx5_lag_add_netdev(dev: mdev, netdev); |
1293 | priv->events_nb.notifier_call = uplink_rep_async_event; |
1294 | mlx5_notifier_register(dev: mdev, nb: &priv->events_nb); |
1295 | mlx5e_dcbnl_initialize(priv); |
1296 | mlx5e_dcbnl_init_app(priv); |
1297 | mlx5e_rep_bridge_init(priv); |
1298 | |
1299 | netdev->wanted_features |= NETIF_F_HW_TC; |
1300 | |
1301 | rtnl_lock(); |
1302 | if (netif_running(dev: netdev)) |
1303 | mlx5e_open(netdev); |
1304 | udp_tunnel_nic_reset_ntf(dev: priv->netdev); |
1305 | netif_device_attach(dev: netdev); |
1306 | rtnl_unlock(); |
1307 | } |
1308 | |
1309 | static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv) |
1310 | { |
1311 | struct mlx5_core_dev *mdev = priv->mdev; |
1312 | |
1313 | rtnl_lock(); |
1314 | if (netif_running(dev: priv->netdev)) |
1315 | mlx5e_close(netdev: priv->netdev); |
1316 | netif_device_detach(dev: priv->netdev); |
1317 | rtnl_unlock(); |
1318 | |
1319 | mlx5e_rep_bridge_cleanup(priv); |
1320 | mlx5e_dcbnl_delete_app(priv); |
1321 | mlx5_notifier_unregister(dev: mdev, nb: &priv->events_nb); |
1322 | mlx5e_rep_tc_disable(priv); |
1323 | mlx5_lag_remove_netdev(dev: mdev, netdev: priv->netdev); |
1324 | mlx5_vxlan_reset_to_default(vxlan: mdev->vxlan); |
1325 | |
1326 | mlx5e_ipsec_cleanup(priv); |
1327 | } |
1328 | |
1329 | static MLX5E_DEFINE_STATS_GRP(sw_rep, 0); |
1330 | static MLX5E_DEFINE_STATS_GRP(vport_rep, MLX5E_NDO_UPDATE_STATS); |
1331 | |
1332 | /* The stats groups order is opposite to the update_stats() order calls */ |
1333 | static mlx5e_stats_grp_t mlx5e_rep_stats_grps[] = { |
1334 | &MLX5E_STATS_GRP(sw_rep), |
1335 | &MLX5E_STATS_GRP(vport_rep), |
1336 | }; |
1337 | |
1338 | static unsigned int mlx5e_rep_stats_grps_num(struct mlx5e_priv *priv) |
1339 | { |
1340 | return ARRAY_SIZE(mlx5e_rep_stats_grps); |
1341 | } |
1342 | |
1343 | /* The stats groups order is opposite to the update_stats() order calls */ |
1344 | static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = { |
1345 | &MLX5E_STATS_GRP(sw), |
1346 | &MLX5E_STATS_GRP(qcnt), |
1347 | &MLX5E_STATS_GRP(vnic_env), |
1348 | &MLX5E_STATS_GRP(vport), |
1349 | &MLX5E_STATS_GRP(802_3), |
1350 | &MLX5E_STATS_GRP(2863), |
1351 | &MLX5E_STATS_GRP(2819), |
1352 | &MLX5E_STATS_GRP(phy), |
1353 | &MLX5E_STATS_GRP(eth_ext), |
1354 | &MLX5E_STATS_GRP(pcie), |
1355 | &MLX5E_STATS_GRP(per_prio), |
1356 | &MLX5E_STATS_GRP(pme), |
1357 | &MLX5E_STATS_GRP(channels), |
1358 | &MLX5E_STATS_GRP(per_port_buff_congest), |
1359 | #ifdef CONFIG_MLX5_EN_IPSEC |
1360 | &MLX5E_STATS_GRP(ipsec_hw), |
1361 | &MLX5E_STATS_GRP(ipsec_sw), |
1362 | #endif |
1363 | &MLX5E_STATS_GRP(ptp), |
1364 | }; |
1365 | |
1366 | static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv) |
1367 | { |
1368 | return ARRAY_SIZE(mlx5e_ul_rep_stats_grps); |
1369 | } |
1370 | |
1371 | static int |
1372 | mlx5e_rep_vnic_reporter_diagnose(struct devlink_health_reporter *reporter, |
1373 | struct devlink_fmsg *fmsg, |
1374 | struct netlink_ext_ack *extack) |
1375 | { |
1376 | struct mlx5e_rep_priv *rpriv = devlink_health_reporter_priv(reporter); |
1377 | struct mlx5_eswitch_rep *rep = rpriv->rep; |
1378 | |
1379 | mlx5_reporter_vnic_diagnose_counters(dev: rep->esw->dev, fmsg, vport_num: rep->vport, |
1380 | other_vport: true); |
1381 | return 0; |
1382 | } |
1383 | |
1384 | static const struct devlink_health_reporter_ops mlx5_rep_vnic_reporter_ops = { |
1385 | .name = "vnic" , |
1386 | .diagnose = mlx5e_rep_vnic_reporter_diagnose, |
1387 | }; |
1388 | |
1389 | static void mlx5e_rep_vnic_reporter_create(struct mlx5e_priv *priv, |
1390 | struct devlink_port *dl_port) |
1391 | { |
1392 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
1393 | struct devlink_health_reporter *reporter; |
1394 | |
1395 | reporter = devl_port_health_reporter_create(port: dl_port, |
1396 | ops: &mlx5_rep_vnic_reporter_ops, |
1397 | graceful_period: 0, priv: rpriv); |
1398 | if (IS_ERR(ptr: reporter)) { |
1399 | mlx5_core_err(priv->mdev, |
1400 | "Failed to create representor vnic reporter, err = %ld\n" , |
1401 | PTR_ERR(reporter)); |
1402 | return; |
1403 | } |
1404 | |
1405 | rpriv->rep_vnic_reporter = reporter; |
1406 | } |
1407 | |
1408 | static void mlx5e_rep_vnic_reporter_destroy(struct mlx5e_priv *priv) |
1409 | { |
1410 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
1411 | |
1412 | if (!IS_ERR_OR_NULL(ptr: rpriv->rep_vnic_reporter)) |
1413 | devl_health_reporter_destroy(reporter: rpriv->rep_vnic_reporter); |
1414 | } |
1415 | |
1416 | static const struct mlx5e_profile mlx5e_rep_profile = { |
1417 | .init = mlx5e_init_rep, |
1418 | .cleanup = mlx5e_cleanup_rep, |
1419 | .init_rx = mlx5e_init_rep_rx, |
1420 | .cleanup_rx = mlx5e_cleanup_rep_rx, |
1421 | .init_tx = mlx5e_init_rep_tx, |
1422 | .cleanup_tx = mlx5e_cleanup_rep_tx, |
1423 | .enable = mlx5e_rep_enable, |
1424 | .disable = mlx5e_rep_disable, |
1425 | .update_rx = mlx5e_update_rep_rx, |
1426 | .update_stats = mlx5e_stats_update_ndo_stats, |
1427 | .rx_handlers = &mlx5e_rx_handlers_rep, |
1428 | .max_tc = 1, |
1429 | .stats_grps = mlx5e_rep_stats_grps, |
1430 | .stats_grps_num = mlx5e_rep_stats_grps_num, |
1431 | .max_nch_limit = mlx5e_rep_max_nch_limit, |
1432 | }; |
1433 | |
1434 | static const struct mlx5e_profile mlx5e_uplink_rep_profile = { |
1435 | .init = mlx5e_init_ul_rep, |
1436 | .cleanup = mlx5e_cleanup_rep, |
1437 | .init_rx = mlx5e_init_ul_rep_rx, |
1438 | .cleanup_rx = mlx5e_cleanup_ul_rep_rx, |
1439 | .init_tx = mlx5e_init_rep_tx, |
1440 | .cleanup_tx = mlx5e_cleanup_rep_tx, |
1441 | .enable = mlx5e_uplink_rep_enable, |
1442 | .disable = mlx5e_uplink_rep_disable, |
1443 | .update_rx = mlx5e_update_rep_rx, |
1444 | .update_stats = mlx5e_stats_update_ndo_stats, |
1445 | .update_carrier = mlx5e_update_carrier, |
1446 | .rx_handlers = &mlx5e_rx_handlers_rep, |
1447 | .max_tc = MLX5_MAX_NUM_TC, |
1448 | .stats_grps = mlx5e_ul_rep_stats_grps, |
1449 | .stats_grps_num = mlx5e_ul_rep_stats_grps_num, |
1450 | }; |
1451 | |
1452 | /* e-Switch vport representors */ |
1453 | static int |
1454 | mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) |
1455 | { |
1456 | struct mlx5e_priv *priv = netdev_priv(dev: mlx5_uplink_netdev_get(mdev: dev)); |
1457 | struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep); |
1458 | |
1459 | rpriv->netdev = priv->netdev; |
1460 | return mlx5e_netdev_change_profile(priv, new_profile: &mlx5e_uplink_rep_profile, |
1461 | new_ppriv: rpriv); |
1462 | } |
1463 | |
1464 | static void |
1465 | mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv *rpriv) |
1466 | { |
1467 | struct net_device *netdev = rpriv->netdev; |
1468 | struct mlx5e_priv *priv; |
1469 | |
1470 | priv = netdev_priv(dev: netdev); |
1471 | |
1472 | mlx5e_netdev_attach_nic_profile(priv); |
1473 | } |
1474 | |
1475 | static int |
1476 | mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) |
1477 | { |
1478 | struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep); |
1479 | const struct mlx5e_profile *profile; |
1480 | struct devlink_port *dl_port; |
1481 | struct net_device *netdev; |
1482 | struct mlx5e_priv *priv; |
1483 | int err; |
1484 | |
1485 | profile = &mlx5e_rep_profile; |
1486 | netdev = mlx5e_create_netdev(mdev: dev, profile); |
1487 | if (!netdev) { |
1488 | mlx5_core_warn(dev, |
1489 | "Failed to create representor netdev for vport %d\n" , |
1490 | rep->vport); |
1491 | return -EINVAL; |
1492 | } |
1493 | |
1494 | mlx5e_build_rep_netdev(netdev, mdev: dev); |
1495 | rpriv->netdev = netdev; |
1496 | |
1497 | priv = netdev_priv(dev: netdev); |
1498 | priv->profile = profile; |
1499 | priv->ppriv = rpriv; |
1500 | err = profile->init(dev, netdev); |
1501 | if (err) { |
1502 | netdev_warn(dev: netdev, format: "rep profile init failed, %d\n" , err); |
1503 | goto err_destroy_netdev; |
1504 | } |
1505 | |
1506 | err = mlx5e_attach_netdev(priv: netdev_priv(dev: netdev)); |
1507 | if (err) { |
1508 | netdev_warn(dev: netdev, |
1509 | format: "Failed to attach representor netdev for vport %d\n" , |
1510 | rep->vport); |
1511 | goto err_cleanup_profile; |
1512 | } |
1513 | |
1514 | dl_port = mlx5_esw_offloads_devlink_port(esw: dev->priv.eswitch, |
1515 | vport_num: rpriv->rep->vport); |
1516 | if (!IS_ERR(ptr: dl_port)) { |
1517 | SET_NETDEV_DEVLINK_PORT(netdev, dl_port); |
1518 | mlx5e_rep_vnic_reporter_create(priv, dl_port); |
1519 | } |
1520 | |
1521 | err = register_netdev(dev: netdev); |
1522 | if (err) { |
1523 | netdev_warn(dev: netdev, |
1524 | format: "Failed to register representor netdev for vport %d\n" , |
1525 | rep->vport); |
1526 | goto err_detach_netdev; |
1527 | } |
1528 | |
1529 | return 0; |
1530 | |
1531 | err_detach_netdev: |
1532 | mlx5e_rep_vnic_reporter_destroy(priv); |
1533 | mlx5e_detach_netdev(priv: netdev_priv(dev: netdev)); |
1534 | err_cleanup_profile: |
1535 | priv->profile->cleanup(priv); |
1536 | |
1537 | err_destroy_netdev: |
1538 | mlx5e_destroy_netdev(priv: netdev_priv(dev: netdev)); |
1539 | return err; |
1540 | } |
1541 | |
1542 | static int |
1543 | mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) |
1544 | { |
1545 | struct mlx5e_rep_priv *rpriv; |
1546 | int err; |
1547 | |
1548 | rpriv = kvzalloc(size: sizeof(*rpriv), GFP_KERNEL); |
1549 | if (!rpriv) |
1550 | return -ENOMEM; |
1551 | |
1552 | /* rpriv->rep to be looked up when profile->init() is called */ |
1553 | rpriv->rep = rep; |
1554 | rep->rep_data[REP_ETH].priv = rpriv; |
1555 | INIT_LIST_HEAD(list: &rpriv->vport_sqs_list); |
1556 | |
1557 | if (rep->vport == MLX5_VPORT_UPLINK) |
1558 | err = mlx5e_vport_uplink_rep_load(dev, rep); |
1559 | else |
1560 | err = mlx5e_vport_vf_rep_load(dev, rep); |
1561 | |
1562 | if (err) |
1563 | kvfree(addr: rpriv); |
1564 | |
1565 | return err; |
1566 | } |
1567 | |
1568 | static void |
1569 | mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep) |
1570 | { |
1571 | struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep); |
1572 | struct net_device *netdev = rpriv->netdev; |
1573 | struct mlx5e_priv *priv = netdev_priv(dev: netdev); |
1574 | void *ppriv = priv->ppriv; |
1575 | |
1576 | if (rep->vport == MLX5_VPORT_UPLINK) { |
1577 | mlx5e_vport_uplink_rep_unload(rpriv); |
1578 | goto free_ppriv; |
1579 | } |
1580 | |
1581 | unregister_netdev(dev: netdev); |
1582 | mlx5e_rep_vnic_reporter_destroy(priv); |
1583 | mlx5e_detach_netdev(priv); |
1584 | priv->profile->cleanup(priv); |
1585 | mlx5e_destroy_netdev(priv); |
1586 | free_ppriv: |
1587 | kvfree(addr: ppriv); /* mlx5e_rep_priv */ |
1588 | } |
1589 | |
1590 | static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep) |
1591 | { |
1592 | struct mlx5e_rep_priv *rpriv; |
1593 | |
1594 | rpriv = mlx5e_rep_to_rep_priv(rep); |
1595 | |
1596 | return rpriv->netdev; |
1597 | } |
1598 | |
1599 | static void mlx5e_vport_rep_event_unpair(struct mlx5_eswitch_rep *rep, |
1600 | struct mlx5_eswitch *peer_esw) |
1601 | { |
1602 | u16 i = MLX5_CAP_GEN(peer_esw->dev, vhca_id); |
1603 | struct mlx5e_rep_priv *rpriv; |
1604 | struct mlx5e_rep_sq *rep_sq; |
1605 | |
1606 | WARN_ON_ONCE(!peer_esw); |
1607 | rpriv = mlx5e_rep_to_rep_priv(rep); |
1608 | list_for_each_entry(rep_sq, &rpriv->vport_sqs_list, list) { |
1609 | struct mlx5e_rep_sq_peer *sq_peer = xa_load(&rep_sq->sq_peer, index: i); |
1610 | |
1611 | if (!sq_peer || sq_peer->peer != peer_esw) |
1612 | continue; |
1613 | |
1614 | mlx5_eswitch_del_send_to_vport_rule(rule: sq_peer->rule); |
1615 | xa_erase(&rep_sq->sq_peer, index: i); |
1616 | kfree(objp: sq_peer); |
1617 | } |
1618 | } |
1619 | |
1620 | static int mlx5e_vport_rep_event_pair(struct mlx5_eswitch *esw, |
1621 | struct mlx5_eswitch_rep *rep, |
1622 | struct mlx5_eswitch *peer_esw) |
1623 | { |
1624 | u16 i = MLX5_CAP_GEN(peer_esw->dev, vhca_id); |
1625 | struct mlx5_flow_handle *flow_rule; |
1626 | struct mlx5e_rep_sq_peer *sq_peer; |
1627 | struct mlx5e_rep_priv *rpriv; |
1628 | struct mlx5e_rep_sq *rep_sq; |
1629 | int err; |
1630 | |
1631 | rpriv = mlx5e_rep_to_rep_priv(rep); |
1632 | list_for_each_entry(rep_sq, &rpriv->vport_sqs_list, list) { |
1633 | sq_peer = xa_load(&rep_sq->sq_peer, index: i); |
1634 | |
1635 | if (sq_peer && sq_peer->peer) |
1636 | continue; |
1637 | |
1638 | flow_rule = mlx5_eswitch_add_send_to_vport_rule(on_esw: peer_esw, from_esw: esw, rep, |
1639 | sqn: rep_sq->sqn); |
1640 | if (IS_ERR(ptr: flow_rule)) { |
1641 | err = PTR_ERR(ptr: flow_rule); |
1642 | goto err_out; |
1643 | } |
1644 | |
1645 | if (sq_peer) { |
1646 | sq_peer->rule = flow_rule; |
1647 | sq_peer->peer = peer_esw; |
1648 | continue; |
1649 | } |
1650 | sq_peer = kzalloc(size: sizeof(*sq_peer), GFP_KERNEL); |
1651 | if (!sq_peer) { |
1652 | err = -ENOMEM; |
1653 | goto err_sq_alloc; |
1654 | } |
1655 | err = xa_insert(xa: &rep_sq->sq_peer, index: i, entry: sq_peer, GFP_KERNEL); |
1656 | if (err) |
1657 | goto err_xa; |
1658 | sq_peer->rule = flow_rule; |
1659 | sq_peer->peer = peer_esw; |
1660 | } |
1661 | |
1662 | return 0; |
1663 | err_xa: |
1664 | kfree(objp: sq_peer); |
1665 | err_sq_alloc: |
1666 | mlx5_eswitch_del_send_to_vport_rule(rule: flow_rule); |
1667 | err_out: |
1668 | mlx5e_vport_rep_event_unpair(rep, peer_esw); |
1669 | return err; |
1670 | } |
1671 | |
1672 | static int mlx5e_vport_rep_event(struct mlx5_eswitch *esw, |
1673 | struct mlx5_eswitch_rep *rep, |
1674 | enum mlx5_switchdev_event event, |
1675 | void *data) |
1676 | { |
1677 | int err = 0; |
1678 | |
1679 | if (event == MLX5_SWITCHDEV_EVENT_PAIR) |
1680 | err = mlx5e_vport_rep_event_pair(esw, rep, peer_esw: data); |
1681 | else if (event == MLX5_SWITCHDEV_EVENT_UNPAIR) |
1682 | mlx5e_vport_rep_event_unpair(rep, peer_esw: data); |
1683 | |
1684 | return err; |
1685 | } |
1686 | |
1687 | static const struct mlx5_eswitch_rep_ops rep_ops = { |
1688 | .load = mlx5e_vport_rep_load, |
1689 | .unload = mlx5e_vport_rep_unload, |
1690 | .get_proto_dev = mlx5e_vport_rep_get_proto_dev, |
1691 | .event = mlx5e_vport_rep_event, |
1692 | }; |
1693 | |
1694 | static int mlx5e_rep_probe(struct auxiliary_device *adev, |
1695 | const struct auxiliary_device_id *id) |
1696 | { |
1697 | struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev); |
1698 | struct mlx5_core_dev *mdev = edev->mdev; |
1699 | struct mlx5_eswitch *esw; |
1700 | |
1701 | esw = mdev->priv.eswitch; |
1702 | mlx5_eswitch_register_vport_reps(esw, ops: &rep_ops, rep_type: REP_ETH); |
1703 | return 0; |
1704 | } |
1705 | |
1706 | static void mlx5e_rep_remove(struct auxiliary_device *adev) |
1707 | { |
1708 | struct mlx5_adev *vdev = container_of(adev, struct mlx5_adev, adev); |
1709 | struct mlx5_core_dev *mdev = vdev->mdev; |
1710 | struct mlx5_eswitch *esw; |
1711 | |
1712 | esw = mdev->priv.eswitch; |
1713 | mlx5_eswitch_unregister_vport_reps(esw, rep_type: REP_ETH); |
1714 | } |
1715 | |
1716 | static const struct auxiliary_device_id mlx5e_rep_id_table[] = { |
1717 | { .name = MLX5_ADEV_NAME ".eth-rep" , }, |
1718 | {}, |
1719 | }; |
1720 | |
1721 | MODULE_DEVICE_TABLE(auxiliary, mlx5e_rep_id_table); |
1722 | |
1723 | static struct auxiliary_driver mlx5e_rep_driver = { |
1724 | .name = "eth-rep" , |
1725 | .probe = mlx5e_rep_probe, |
1726 | .remove = mlx5e_rep_remove, |
1727 | .id_table = mlx5e_rep_id_table, |
1728 | }; |
1729 | |
1730 | int mlx5e_rep_init(void) |
1731 | { |
1732 | return auxiliary_driver_register(&mlx5e_rep_driver); |
1733 | } |
1734 | |
1735 | void mlx5e_rep_cleanup(void) |
1736 | { |
1737 | auxiliary_driver_unregister(auxdrv: &mlx5e_rep_driver); |
1738 | } |
1739 | |