1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2019-2021, Intel Corporation. */
3
4#include "ice.h"
5#include "ice_lib.h"
6#include "ice_eswitch.h"
7#include "ice_eswitch_br.h"
8#include "ice_fltr.h"
9#include "ice_repr.h"
10#include "ice_devlink.h"
11#include "ice_tc_lib.h"
12
13/**
14 * ice_eswitch_del_sp_rules - delete adv rules added on PRs
15 * @pf: pointer to the PF struct
16 *
17 * Delete all advanced rules that were used to forward packets with the
18 * device's VSI index to the corresponding eswitch ctrl VSI queue.
19 */
20static void ice_eswitch_del_sp_rules(struct ice_pf *pf)
21{
22 struct ice_repr *repr;
23 unsigned long id;
24
25 xa_for_each(&pf->eswitch.reprs, id, repr) {
26 if (repr->sp_rule.rid)
27 ice_rem_adv_rule_by_id(hw: &pf->hw, remove_entry: &repr->sp_rule);
28 }
29}
30
31/**
32 * ice_eswitch_add_sp_rule - add adv rule with device's VSI index
33 * @pf: pointer to PF struct
34 * @repr: pointer to the repr struct
35 *
36 * This function adds advanced rule that forwards packets with
37 * device's VSI index to the corresponding eswitch ctrl VSI queue.
38 */
39static int ice_eswitch_add_sp_rule(struct ice_pf *pf, struct ice_repr *repr)
40{
41 struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
42 struct ice_adv_rule_info rule_info = { 0 };
43 struct ice_adv_lkup_elem *list;
44 struct ice_hw *hw = &pf->hw;
45 const u16 lkups_cnt = 1;
46 int err;
47
48 list = kcalloc(n: lkups_cnt, size: sizeof(*list), GFP_ATOMIC);
49 if (!list)
50 return -ENOMEM;
51
52 ice_rule_add_src_vsi_metadata(lkup: list);
53
54 rule_info.sw_act.flag = ICE_FLTR_TX;
55 rule_info.sw_act.vsi_handle = ctrl_vsi->idx;
56 rule_info.sw_act.fltr_act = ICE_FWD_TO_Q;
57 rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id +
58 ctrl_vsi->rxq_map[repr->q_id];
59 rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE;
60 rule_info.flags_info.act_valid = true;
61 rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN;
62 rule_info.src_vsi = repr->src_vsi->idx;
63
64 err = ice_add_adv_rule(hw, lkups: list, lkups_cnt, rinfo: &rule_info,
65 added_entry: &repr->sp_rule);
66 if (err)
67 dev_err(ice_pf_to_dev(pf), "Unable to add slow-path rule for eswitch for PR %d",
68 repr->id);
69
70 kfree(objp: list);
71 return err;
72}
73
74static int
75ice_eswitch_add_sp_rules(struct ice_pf *pf)
76{
77 struct ice_repr *repr;
78 unsigned long id;
79 int err;
80
81 xa_for_each(&pf->eswitch.reprs, id, repr) {
82 err = ice_eswitch_add_sp_rule(pf, repr);
83 if (err) {
84 ice_eswitch_del_sp_rules(pf);
85 return err;
86 }
87 }
88
89 return 0;
90}
91
92/**
93 * ice_eswitch_setup_env - configure eswitch HW filters
94 * @pf: pointer to PF struct
95 *
96 * This function adds HW filters configuration specific for switchdev
97 * mode.
98 */
99static int ice_eswitch_setup_env(struct ice_pf *pf)
100{
101 struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
102 struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
103 struct net_device *netdev = uplink_vsi->netdev;
104 struct ice_vsi_vlan_ops *vlan_ops;
105 bool rule_added = false;
106
107 ice_remove_vsi_fltr(hw: &pf->hw, vsi_handle: uplink_vsi->idx);
108
109 netif_addr_lock_bh(dev: netdev);
110 __dev_uc_unsync(dev: netdev, NULL);
111 __dev_mc_unsync(dev: netdev, NULL);
112 netif_addr_unlock_bh(dev: netdev);
113
114 if (ice_vsi_add_vlan_zero(vsi: uplink_vsi))
115 goto err_def_rx;
116
117 if (!ice_is_dflt_vsi_in_use(pi: uplink_vsi->port_info)) {
118 if (ice_set_dflt_vsi(vsi: uplink_vsi))
119 goto err_def_rx;
120 rule_added = true;
121 }
122
123 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi: uplink_vsi);
124 if (vlan_ops->dis_rx_filtering(uplink_vsi))
125 goto err_dis_rx;
126
127 if (ice_vsi_update_security(vsi: uplink_vsi, fill: ice_vsi_ctx_set_allow_override))
128 goto err_override_uplink;
129
130 if (ice_vsi_update_security(vsi: ctrl_vsi, fill: ice_vsi_ctx_set_allow_override))
131 goto err_override_control;
132
133 if (ice_vsi_update_local_lb(vsi: uplink_vsi, set: true))
134 goto err_override_local_lb;
135
136 return 0;
137
138err_override_local_lb:
139 ice_vsi_update_security(vsi: ctrl_vsi, fill: ice_vsi_ctx_clear_allow_override);
140err_override_control:
141 ice_vsi_update_security(vsi: uplink_vsi, fill: ice_vsi_ctx_clear_allow_override);
142err_override_uplink:
143 vlan_ops->ena_rx_filtering(uplink_vsi);
144err_dis_rx:
145 if (rule_added)
146 ice_clear_dflt_vsi(vsi: uplink_vsi);
147err_def_rx:
148 ice_fltr_add_mac_and_broadcast(vsi: uplink_vsi,
149 mac: uplink_vsi->port_info->mac.perm_addr,
150 action: ICE_FWD_TO_VSI);
151 return -ENODEV;
152}
153
154/**
155 * ice_eswitch_remap_rings_to_vectors - reconfigure rings of eswitch ctrl VSI
156 * @eswitch: pointer to eswitch struct
157 *
158 * In eswitch number of allocated Tx/Rx rings is equal.
159 *
160 * This function fills q_vectors structures associated with representor and
161 * move each ring pairs to port representor netdevs. Each port representor
162 * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to
163 * number of VFs.
164 */
165static void ice_eswitch_remap_rings_to_vectors(struct ice_eswitch *eswitch)
166{
167 struct ice_vsi *vsi = eswitch->control_vsi;
168 unsigned long repr_id = 0;
169 int q_id;
170
171 ice_for_each_txq(vsi, q_id) {
172 struct ice_q_vector *q_vector;
173 struct ice_tx_ring *tx_ring;
174 struct ice_rx_ring *rx_ring;
175 struct ice_repr *repr;
176
177 repr = xa_find(xa: &eswitch->reprs, index: &repr_id, U32_MAX,
178 XA_PRESENT);
179 if (!repr)
180 break;
181
182 repr_id += 1;
183 repr->q_id = q_id;
184 q_vector = repr->q_vector;
185 tx_ring = vsi->tx_rings[q_id];
186 rx_ring = vsi->rx_rings[q_id];
187
188 q_vector->vsi = vsi;
189 q_vector->reg_idx = vsi->q_vectors[0]->reg_idx;
190
191 q_vector->num_ring_tx = 1;
192 q_vector->tx.tx_ring = tx_ring;
193 tx_ring->q_vector = q_vector;
194 tx_ring->next = NULL;
195 tx_ring->netdev = repr->netdev;
196 /* In switchdev mode, from OS stack perspective, there is only
197 * one queue for given netdev, so it needs to be indexed as 0.
198 */
199 tx_ring->q_index = 0;
200
201 q_vector->num_ring_rx = 1;
202 q_vector->rx.rx_ring = rx_ring;
203 rx_ring->q_vector = q_vector;
204 rx_ring->next = NULL;
205 rx_ring->netdev = repr->netdev;
206 }
207}
208
209/**
210 * ice_eswitch_release_repr - clear PR VSI configuration
211 * @pf: poiner to PF struct
212 * @repr: pointer to PR
213 */
214static void
215ice_eswitch_release_repr(struct ice_pf *pf, struct ice_repr *repr)
216{
217 struct ice_vsi *vsi = repr->src_vsi;
218
219 /* Skip representors that aren't configured */
220 if (!repr->dst)
221 return;
222
223 ice_vsi_update_security(vsi, fill: ice_vsi_ctx_set_antispoof);
224 metadata_dst_free(repr->dst);
225 repr->dst = NULL;
226 ice_fltr_add_mac_and_broadcast(vsi, mac: repr->parent_mac,
227 action: ICE_FWD_TO_VSI);
228
229 netif_napi_del(napi: &repr->q_vector->napi);
230}
231
232/**
233 * ice_eswitch_setup_repr - configure PR to run in switchdev mode
234 * @pf: pointer to PF struct
235 * @repr: pointer to PR struct
236 */
237static int ice_eswitch_setup_repr(struct ice_pf *pf, struct ice_repr *repr)
238{
239 struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
240 struct ice_vsi *vsi = repr->src_vsi;
241 struct metadata_dst *dst;
242
243 ice_remove_vsi_fltr(hw: &pf->hw, vsi_handle: vsi->idx);
244 repr->dst = metadata_dst_alloc(optslen: 0, type: METADATA_HW_PORT_MUX,
245 GFP_KERNEL);
246 if (!repr->dst)
247 goto err_add_mac_fltr;
248
249 if (ice_vsi_update_security(vsi, fill: ice_vsi_ctx_clear_antispoof))
250 goto err_dst_free;
251
252 if (ice_vsi_add_vlan_zero(vsi))
253 goto err_update_security;
254
255 netif_napi_add(dev: repr->netdev, napi: &repr->q_vector->napi,
256 poll: ice_napi_poll);
257
258 netif_keep_dst(dev: repr->netdev);
259
260 dst = repr->dst;
261 dst->u.port_info.port_id = vsi->vsi_num;
262 dst->u.port_info.lower_dev = repr->netdev;
263 ice_repr_set_traffic_vsi(repr, vsi: ctrl_vsi);
264
265 return 0;
266
267err_update_security:
268 ice_vsi_update_security(vsi, fill: ice_vsi_ctx_set_antispoof);
269err_dst_free:
270 metadata_dst_free(repr->dst);
271 repr->dst = NULL;
272err_add_mac_fltr:
273 ice_fltr_add_mac_and_broadcast(vsi, mac: repr->parent_mac, action: ICE_FWD_TO_VSI);
274
275 return -ENODEV;
276}
277
278/**
279 * ice_eswitch_update_repr - reconfigure port representor
280 * @repr_id: representor ID
281 * @vsi: VSI for which port representor is configured
282 */
283void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi)
284{
285 struct ice_pf *pf = vsi->back;
286 struct ice_repr *repr;
287 int ret;
288
289 if (!ice_is_switchdev_running(pf))
290 return;
291
292 repr = xa_load(&pf->eswitch.reprs, index: repr_id);
293 if (!repr)
294 return;
295
296 repr->src_vsi = vsi;
297 repr->dst->u.port_info.port_id = vsi->vsi_num;
298
299 if (repr->br_port)
300 repr->br_port->vsi = vsi;
301
302 ret = ice_vsi_update_security(vsi, fill: ice_vsi_ctx_clear_antispoof);
303 if (ret) {
304 ice_fltr_add_mac_and_broadcast(vsi, mac: repr->parent_mac,
305 action: ICE_FWD_TO_VSI);
306 dev_err(ice_pf_to_dev(pf), "Failed to update VSI of port representor %d",
307 repr->id);
308 }
309}
310
311/**
312 * ice_eswitch_port_start_xmit - callback for packets transmit
313 * @skb: send buffer
314 * @netdev: network interface device structure
315 *
316 * Returns NETDEV_TX_OK if sent, else an error code
317 */
318netdev_tx_t
319ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
320{
321 struct ice_netdev_priv *np;
322 struct ice_repr *repr;
323 struct ice_vsi *vsi;
324
325 np = netdev_priv(dev: netdev);
326 vsi = np->vsi;
327
328 if (!vsi || !ice_is_switchdev_running(pf: vsi->back))
329 return NETDEV_TX_BUSY;
330
331 if (ice_is_reset_in_progress(state: vsi->back->state) ||
332 test_bit(ICE_VF_DIS, vsi->back->state))
333 return NETDEV_TX_BUSY;
334
335 repr = ice_netdev_to_repr(netdev);
336 skb_dst_drop(skb);
337 dst_hold(dst: (struct dst_entry *)repr->dst);
338 skb_dst_set(skb, dst: (struct dst_entry *)repr->dst);
339 skb->queue_mapping = repr->q_id;
340
341 return ice_start_xmit(skb, netdev);
342}
343
344/**
345 * ice_eswitch_set_target_vsi - set eswitch context in Tx context descriptor
346 * @skb: pointer to send buffer
347 * @off: pointer to offload struct
348 */
349void
350ice_eswitch_set_target_vsi(struct sk_buff *skb,
351 struct ice_tx_offload_params *off)
352{
353 struct metadata_dst *dst = skb_metadata_dst(skb);
354 u64 cd_cmd, dst_vsi;
355
356 if (!dst) {
357 cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S;
358 off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX);
359 } else {
360 cd_cmd = ICE_TX_CTX_DESC_SWTCH_VSI << ICE_TXD_CTX_QW1_CMD_S;
361 dst_vsi = FIELD_PREP(ICE_TXD_CTX_QW1_VSI_M,
362 dst->u.port_info.port_id);
363 off->cd_qw1 = cd_cmd | dst_vsi | ICE_TX_DESC_DTYPE_CTX;
364 }
365}
366
367/**
368 * ice_eswitch_release_env - clear eswitch HW filters
369 * @pf: pointer to PF struct
370 *
371 * This function removes HW filters configuration specific for switchdev
372 * mode and restores default legacy mode settings.
373 */
374static void ice_eswitch_release_env(struct ice_pf *pf)
375{
376 struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
377 struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
378 struct ice_vsi_vlan_ops *vlan_ops;
379
380 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi: uplink_vsi);
381
382 ice_vsi_update_local_lb(vsi: uplink_vsi, set: false);
383 ice_vsi_update_security(vsi: ctrl_vsi, fill: ice_vsi_ctx_clear_allow_override);
384 ice_vsi_update_security(vsi: uplink_vsi, fill: ice_vsi_ctx_clear_allow_override);
385 vlan_ops->ena_rx_filtering(uplink_vsi);
386 ice_clear_dflt_vsi(vsi: uplink_vsi);
387 ice_fltr_add_mac_and_broadcast(vsi: uplink_vsi,
388 mac: uplink_vsi->port_info->mac.perm_addr,
389 action: ICE_FWD_TO_VSI);
390}
391
392/**
393 * ice_eswitch_vsi_setup - configure eswitch control VSI
394 * @pf: pointer to PF structure
395 * @pi: pointer to port_info structure
396 */
397static struct ice_vsi *
398ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
399{
400 struct ice_vsi_cfg_params params = {};
401
402 params.type = ICE_VSI_SWITCHDEV_CTRL;
403 params.pi = pi;
404 params.flags = ICE_VSI_FLAG_INIT;
405
406 return ice_vsi_setup(pf, params: &params);
407}
408
409/**
410 * ice_eswitch_napi_enable - enable NAPI for all port representors
411 * @reprs: xarray of reprs
412 */
413static void ice_eswitch_napi_enable(struct xarray *reprs)
414{
415 struct ice_repr *repr;
416 unsigned long id;
417
418 xa_for_each(reprs, id, repr)
419 napi_enable(n: &repr->q_vector->napi);
420}
421
422/**
423 * ice_eswitch_napi_disable - disable NAPI for all port representors
424 * @reprs: xarray of reprs
425 */
426static void ice_eswitch_napi_disable(struct xarray *reprs)
427{
428 struct ice_repr *repr;
429 unsigned long id;
430
431 xa_for_each(reprs, id, repr)
432 napi_disable(n: &repr->q_vector->napi);
433}
434
435/**
436 * ice_eswitch_enable_switchdev - configure eswitch in switchdev mode
437 * @pf: pointer to PF structure
438 */
439static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
440{
441 struct ice_vsi *ctrl_vsi, *uplink_vsi;
442
443 uplink_vsi = ice_get_main_vsi(pf);
444 if (!uplink_vsi)
445 return -ENODEV;
446
447 if (netif_is_any_bridge_port(dev: uplink_vsi->netdev)) {
448 dev_err(ice_pf_to_dev(pf),
449 "Uplink port cannot be a bridge port\n");
450 return -EINVAL;
451 }
452
453 pf->eswitch.control_vsi = ice_eswitch_vsi_setup(pf, pi: pf->hw.port_info);
454 if (!pf->eswitch.control_vsi)
455 return -ENODEV;
456
457 ctrl_vsi = pf->eswitch.control_vsi;
458 /* cp VSI is createad with 1 queue as default */
459 pf->eswitch.qs.value = 1;
460 pf->eswitch.uplink_vsi = uplink_vsi;
461
462 if (ice_eswitch_setup_env(pf))
463 goto err_vsi;
464
465 if (ice_eswitch_br_offloads_init(pf))
466 goto err_br_offloads;
467
468 pf->eswitch.is_running = true;
469
470 return 0;
471
472err_br_offloads:
473 ice_eswitch_release_env(pf);
474err_vsi:
475 ice_vsi_release(vsi: ctrl_vsi);
476 return -ENODEV;
477}
478
479/**
480 * ice_eswitch_disable_switchdev - disable eswitch resources
481 * @pf: pointer to PF structure
482 */
483static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
484{
485 struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
486
487 ice_eswitch_br_offloads_deinit(pf);
488 ice_eswitch_release_env(pf);
489 ice_vsi_release(vsi: ctrl_vsi);
490
491 pf->eswitch.is_running = false;
492 pf->eswitch.qs.is_reaching = false;
493}
494
495/**
496 * ice_eswitch_mode_set - set new eswitch mode
497 * @devlink: pointer to devlink structure
498 * @mode: eswitch mode to switch to
499 * @extack: pointer to extack structure
500 */
501int
502ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
503 struct netlink_ext_ack *extack)
504{
505 struct ice_pf *pf = devlink_priv(devlink);
506
507 if (pf->eswitch_mode == mode)
508 return 0;
509
510 if (ice_has_vfs(pf)) {
511 dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created");
512 NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created");
513 return -EOPNOTSUPP;
514 }
515
516 switch (mode) {
517 case DEVLINK_ESWITCH_MODE_LEGACY:
518 dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy",
519 pf->hw.pf_id);
520 xa_destroy(&pf->eswitch.reprs);
521 NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy");
522 break;
523 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
524 {
525 if (ice_is_adq_active(pf)) {
526 dev_err(ice_pf_to_dev(pf), "Couldn't change eswitch mode to switchdev - ADQ is active. Delete ADQ configs and try again, e.g. tc qdisc del dev $PF root");
527 NL_SET_ERR_MSG_MOD(extack, "Couldn't change eswitch mode to switchdev - ADQ is active. Delete ADQ configs and try again, e.g. tc qdisc del dev $PF root");
528 return -EOPNOTSUPP;
529 }
530
531 dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev",
532 pf->hw.pf_id);
533 xa_init_flags(xa: &pf->eswitch.reprs, XA_FLAGS_ALLOC);
534 NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev");
535 break;
536 }
537 default:
538 NL_SET_ERR_MSG_MOD(extack, "Unknown eswitch mode");
539 return -EINVAL;
540 }
541
542 pf->eswitch_mode = mode;
543 return 0;
544}
545
546/**
547 * ice_eswitch_mode_get - get current eswitch mode
548 * @devlink: pointer to devlink structure
549 * @mode: output parameter for current eswitch mode
550 */
551int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
552{
553 struct ice_pf *pf = devlink_priv(devlink);
554
555 *mode = pf->eswitch_mode;
556 return 0;
557}
558
559/**
560 * ice_is_eswitch_mode_switchdev - check if eswitch mode is set to switchdev
561 * @pf: pointer to PF structure
562 *
563 * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV,
564 * false otherwise.
565 */
566bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
567{
568 return pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV;
569}
570
571/**
572 * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors
573 * @pf: pointer to PF structure
574 */
575static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
576{
577 struct ice_repr *repr;
578 unsigned long id;
579
580 if (test_bit(ICE_DOWN, pf->state))
581 return;
582
583 xa_for_each(&pf->eswitch.reprs, id, repr)
584 ice_repr_start_tx_queues(repr);
585}
586
587/**
588 * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors
589 * @pf: pointer to PF structure
590 */
591void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
592{
593 struct ice_repr *repr;
594 unsigned long id;
595
596 if (test_bit(ICE_DOWN, pf->state))
597 return;
598
599 xa_for_each(&pf->eswitch.reprs, id, repr)
600 ice_repr_stop_tx_queues(repr);
601}
602
603static void ice_eswitch_stop_reprs(struct ice_pf *pf)
604{
605 ice_eswitch_del_sp_rules(pf);
606 ice_eswitch_stop_all_tx_queues(pf);
607 ice_eswitch_napi_disable(reprs: &pf->eswitch.reprs);
608}
609
610static void ice_eswitch_start_reprs(struct ice_pf *pf)
611{
612 ice_eswitch_napi_enable(reprs: &pf->eswitch.reprs);
613 ice_eswitch_start_all_tx_queues(pf);
614 ice_eswitch_add_sp_rules(pf);
615}
616
617static void
618ice_eswitch_cp_change_queues(struct ice_eswitch *eswitch, int change)
619{
620 struct ice_vsi *cp = eswitch->control_vsi;
621 int queues = 0;
622
623 if (eswitch->qs.is_reaching) {
624 if (eswitch->qs.to_reach >= eswitch->qs.value + change) {
625 queues = eswitch->qs.to_reach;
626 eswitch->qs.is_reaching = false;
627 } else {
628 queues = 0;
629 }
630 } else if ((change > 0 && cp->alloc_txq <= eswitch->qs.value) ||
631 change < 0) {
632 queues = cp->alloc_txq + change;
633 }
634
635 if (queues) {
636 cp->req_txq = queues;
637 cp->req_rxq = queues;
638 ice_vsi_close(vsi: cp);
639 ice_vsi_rebuild(vsi: cp, ICE_VSI_FLAG_NO_INIT);
640 ice_vsi_open(vsi: cp);
641 } else if (!change) {
642 /* change == 0 means that VSI wasn't open, open it here */
643 ice_vsi_open(vsi: cp);
644 }
645
646 eswitch->qs.value += change;
647 ice_eswitch_remap_rings_to_vectors(eswitch);
648}
649
650int
651ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
652{
653 struct ice_repr *repr;
654 int change = 1;
655 int err;
656
657 if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
658 return 0;
659
660 if (xa_empty(xa: &pf->eswitch.reprs)) {
661 err = ice_eswitch_enable_switchdev(pf);
662 if (err)
663 return err;
664 /* Control plane VSI is created with 1 queue as default */
665 pf->eswitch.qs.to_reach -= 1;
666 change = 0;
667 }
668
669 ice_eswitch_stop_reprs(pf);
670
671 repr = ice_repr_add_vf(vf);
672 if (IS_ERR(ptr: repr)) {
673 err = PTR_ERR(ptr: repr);
674 goto err_create_repr;
675 }
676
677 err = ice_eswitch_setup_repr(pf, repr);
678 if (err)
679 goto err_setup_repr;
680
681 err = xa_alloc(xa: &pf->eswitch.reprs, id: &repr->id, entry: repr,
682 XA_LIMIT(1, INT_MAX), GFP_KERNEL);
683 if (err)
684 goto err_xa_alloc;
685
686 vf->repr_id = repr->id;
687
688 ice_eswitch_cp_change_queues(eswitch: &pf->eswitch, change);
689 ice_eswitch_start_reprs(pf);
690
691 return 0;
692
693err_xa_alloc:
694 ice_eswitch_release_repr(pf, repr);
695err_setup_repr:
696 ice_repr_rem_vf(repr);
697err_create_repr:
698 if (xa_empty(xa: &pf->eswitch.reprs))
699 ice_eswitch_disable_switchdev(pf);
700 ice_eswitch_start_reprs(pf);
701
702 return err;
703}
704
705void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf)
706{
707 struct ice_repr *repr = xa_load(&pf->eswitch.reprs, index: vf->repr_id);
708 struct devlink *devlink = priv_to_devlink(priv: pf);
709
710 if (!repr)
711 return;
712
713 ice_eswitch_stop_reprs(pf);
714 xa_erase(&pf->eswitch.reprs, index: repr->id);
715
716 if (xa_empty(xa: &pf->eswitch.reprs))
717 ice_eswitch_disable_switchdev(pf);
718 else
719 ice_eswitch_cp_change_queues(eswitch: &pf->eswitch, change: -1);
720
721 ice_eswitch_release_repr(pf, repr);
722 ice_repr_rem_vf(repr);
723
724 if (xa_empty(xa: &pf->eswitch.reprs)) {
725 /* since all port representors are destroyed, there is
726 * no point in keeping the nodes
727 */
728 ice_devlink_rate_clear_tx_topology(vsi: ice_get_main_vsi(pf));
729 devl_lock(devlink);
730 devl_rate_nodes_destroy(devlink);
731 devl_unlock(devlink);
732 } else {
733 ice_eswitch_start_reprs(pf);
734 }
735}
736
737/**
738 * ice_eswitch_rebuild - rebuild eswitch
739 * @pf: pointer to PF structure
740 */
741int ice_eswitch_rebuild(struct ice_pf *pf)
742{
743 struct ice_repr *repr;
744 unsigned long id;
745 int err;
746
747 if (!ice_is_switchdev_running(pf))
748 return 0;
749
750 err = ice_vsi_rebuild(vsi: pf->eswitch.control_vsi, ICE_VSI_FLAG_INIT);
751 if (err)
752 return err;
753
754 xa_for_each(&pf->eswitch.reprs, id, repr)
755 ice_eswitch_detach(pf, vf: repr->vf);
756
757 return 0;
758}
759
760/**
761 * ice_eswitch_reserve_cp_queues - reserve control plane VSI queues
762 * @pf: pointer to PF structure
763 * @change: how many more (or less) queues is needed
764 *
765 * Remember to call ice_eswitch_attach/detach() the "change" times.
766 */
767void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change)
768{
769 if (pf->eswitch.qs.value + change < 0)
770 return;
771
772 pf->eswitch.qs.to_reach = pf->eswitch.qs.value + change;
773 pf->eswitch.qs.is_reaching = true;
774}
775

source code of linux/drivers/net/ethernet/intel/ice/ice_eswitch.c