1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (c) 2019, Intel Corporation. */ |
3 | |
4 | #include <net/xdp_sock_drv.h> |
5 | #include "ice_base.h" |
6 | #include "ice_lib.h" |
7 | #include "ice_dcb_lib.h" |
8 | #include "ice_sriov.h" |
9 | |
10 | /** |
11 | * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI |
12 | * @qs_cfg: gathered variables needed for PF->VSI queues assignment |
13 | * |
14 | * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap |
15 | */ |
16 | static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg) |
17 | { |
18 | unsigned int offset, i; |
19 | |
20 | mutex_lock(qs_cfg->qs_mutex); |
21 | offset = bitmap_find_next_zero_area(map: qs_cfg->pf_map, size: qs_cfg->pf_map_size, |
22 | start: 0, nr: qs_cfg->q_count, align_mask: 0); |
23 | if (offset >= qs_cfg->pf_map_size) { |
24 | mutex_unlock(lock: qs_cfg->qs_mutex); |
25 | return -ENOMEM; |
26 | } |
27 | |
28 | bitmap_set(map: qs_cfg->pf_map, start: offset, nbits: qs_cfg->q_count); |
29 | for (i = 0; i < qs_cfg->q_count; i++) |
30 | qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)(i + offset); |
31 | mutex_unlock(lock: qs_cfg->qs_mutex); |
32 | |
33 | return 0; |
34 | } |
35 | |
36 | /** |
37 | * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI |
38 | * @qs_cfg: gathered variables needed for pf->vsi queues assignment |
39 | * |
40 | * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap |
41 | */ |
42 | static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg) |
43 | { |
44 | unsigned int i, index = 0; |
45 | |
46 | mutex_lock(qs_cfg->qs_mutex); |
47 | for (i = 0; i < qs_cfg->q_count; i++) { |
48 | index = find_next_zero_bit(addr: qs_cfg->pf_map, |
49 | size: qs_cfg->pf_map_size, offset: index); |
50 | if (index >= qs_cfg->pf_map_size) |
51 | goto err_scatter; |
52 | set_bit(nr: index, addr: qs_cfg->pf_map); |
53 | qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)index; |
54 | } |
55 | mutex_unlock(lock: qs_cfg->qs_mutex); |
56 | |
57 | return 0; |
58 | err_scatter: |
59 | for (index = 0; index < i; index++) { |
60 | clear_bit(nr: qs_cfg->vsi_map[index], addr: qs_cfg->pf_map); |
61 | qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0; |
62 | } |
63 | mutex_unlock(lock: qs_cfg->qs_mutex); |
64 | |
65 | return -ENOMEM; |
66 | } |
67 | |
68 | /** |
69 | * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled |
70 | * @pf: the PF being configured |
71 | * @pf_q: the PF queue |
72 | * @ena: enable or disable state of the queue |
73 | * |
74 | * This routine will wait for the given Rx queue of the PF to reach the |
75 | * enabled or disabled state. |
76 | * Returns -ETIMEDOUT in case of failing to reach the requested state after |
77 | * multiple retries; else will return 0 in case of success. |
78 | */ |
79 | static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) |
80 | { |
81 | int i; |
82 | |
83 | for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) { |
84 | if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) & |
85 | QRX_CTRL_QENA_STAT_M)) |
86 | return 0; |
87 | |
88 | usleep_range(min: 20, max: 40); |
89 | } |
90 | |
91 | return -ETIMEDOUT; |
92 | } |
93 | |
94 | /** |
95 | * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector |
96 | * @vsi: the VSI being configured |
97 | * @v_idx: index of the vector in the VSI struct |
98 | * |
99 | * We allocate one q_vector and set default value for ITR setting associated |
100 | * with this q_vector. If allocation fails we return -ENOMEM. |
101 | */ |
102 | static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx) |
103 | { |
104 | struct ice_pf *pf = vsi->back; |
105 | struct ice_q_vector *q_vector; |
106 | int err; |
107 | |
108 | /* allocate q_vector */ |
109 | q_vector = kzalloc(size: sizeof(*q_vector), GFP_KERNEL); |
110 | if (!q_vector) |
111 | return -ENOMEM; |
112 | |
113 | q_vector->vsi = vsi; |
114 | q_vector->v_idx = v_idx; |
115 | q_vector->tx.itr_setting = ICE_DFLT_TX_ITR; |
116 | q_vector->rx.itr_setting = ICE_DFLT_RX_ITR; |
117 | q_vector->tx.itr_mode = ITR_DYNAMIC; |
118 | q_vector->rx.itr_mode = ITR_DYNAMIC; |
119 | q_vector->tx.type = ICE_TX_CONTAINER; |
120 | q_vector->rx.type = ICE_RX_CONTAINER; |
121 | q_vector->irq.index = -ENOENT; |
122 | |
123 | if (vsi->type == ICE_VSI_VF) { |
124 | q_vector->reg_idx = ice_calc_vf_reg_idx(vf: vsi->vf, q_vector); |
125 | goto out; |
126 | } else if (vsi->type == ICE_VSI_CTRL && vsi->vf) { |
127 | struct ice_vsi *ctrl_vsi = ice_get_vf_ctrl_vsi(pf, vsi); |
128 | |
129 | if (ctrl_vsi) { |
130 | if (unlikely(!ctrl_vsi->q_vectors)) { |
131 | err = -ENOENT; |
132 | goto err_free_q_vector; |
133 | } |
134 | |
135 | q_vector->irq = ctrl_vsi->q_vectors[0]->irq; |
136 | goto skip_alloc; |
137 | } |
138 | } |
139 | |
140 | q_vector->irq = ice_alloc_irq(pf, dyn_only: vsi->irq_dyn_alloc); |
141 | if (q_vector->irq.index < 0) { |
142 | err = -ENOMEM; |
143 | goto err_free_q_vector; |
144 | } |
145 | |
146 | skip_alloc: |
147 | q_vector->reg_idx = q_vector->irq.index; |
148 | |
149 | /* only set affinity_mask if the CPU is online */ |
150 | if (cpu_online(cpu: v_idx)) |
151 | cpumask_set_cpu(cpu: v_idx, dstp: &q_vector->affinity_mask); |
152 | |
153 | /* This will not be called in the driver load path because the netdev |
154 | * will not be created yet. All other cases with register the NAPI |
155 | * handler here (i.e. resume, reset/rebuild, etc.) |
156 | */ |
157 | if (vsi->netdev) |
158 | netif_napi_add(dev: vsi->netdev, napi: &q_vector->napi, poll: ice_napi_poll); |
159 | |
160 | out: |
161 | /* tie q_vector and VSI together */ |
162 | vsi->q_vectors[v_idx] = q_vector; |
163 | |
164 | return 0; |
165 | |
166 | err_free_q_vector: |
167 | kfree(objp: q_vector); |
168 | |
169 | return err; |
170 | } |
171 | |
172 | /** |
173 | * ice_free_q_vector - Free memory allocated for a specific interrupt vector |
174 | * @vsi: VSI having the memory freed |
175 | * @v_idx: index of the vector to be freed |
176 | */ |
177 | static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) |
178 | { |
179 | struct ice_q_vector *q_vector; |
180 | struct ice_pf *pf = vsi->back; |
181 | struct ice_tx_ring *tx_ring; |
182 | struct ice_rx_ring *rx_ring; |
183 | struct device *dev; |
184 | |
185 | dev = ice_pf_to_dev(pf); |
186 | if (!vsi->q_vectors[v_idx]) { |
187 | dev_dbg(dev, "Queue vector at index %d not found\n" , v_idx); |
188 | return; |
189 | } |
190 | q_vector = vsi->q_vectors[v_idx]; |
191 | |
192 | ice_for_each_tx_ring(tx_ring, q_vector->tx) { |
193 | ice_queue_set_napi(vsi, queue_index: tx_ring->q_index, type: NETDEV_QUEUE_TYPE_TX, |
194 | NULL); |
195 | tx_ring->q_vector = NULL; |
196 | } |
197 | ice_for_each_rx_ring(rx_ring, q_vector->rx) { |
198 | ice_queue_set_napi(vsi, queue_index: rx_ring->q_index, type: NETDEV_QUEUE_TYPE_RX, |
199 | NULL); |
200 | rx_ring->q_vector = NULL; |
201 | } |
202 | |
203 | /* only VSI with an associated netdev is set up with NAPI */ |
204 | if (vsi->netdev) |
205 | netif_napi_del(napi: &q_vector->napi); |
206 | |
207 | /* release MSIX interrupt if q_vector had interrupt allocated */ |
208 | if (q_vector->irq.index < 0) |
209 | goto free_q_vector; |
210 | |
211 | /* only free last VF ctrl vsi interrupt */ |
212 | if (vsi->type == ICE_VSI_CTRL && vsi->vf && |
213 | ice_get_vf_ctrl_vsi(pf, vsi)) |
214 | goto free_q_vector; |
215 | |
216 | ice_free_irq(pf, map: q_vector->irq); |
217 | |
218 | free_q_vector: |
219 | kfree(objp: q_vector); |
220 | vsi->q_vectors[v_idx] = NULL; |
221 | } |
222 | |
223 | /** |
224 | * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set |
225 | * @hw: board specific structure |
226 | */ |
227 | static void ice_cfg_itr_gran(struct ice_hw *hw) |
228 | { |
229 | u32 regval = rd32(hw, GLINT_CTL); |
230 | |
231 | /* no need to update global register if ITR gran is already set */ |
232 | if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) && |
233 | (FIELD_GET(GLINT_CTL_ITR_GRAN_200_M, regval) == ICE_ITR_GRAN_US) && |
234 | (FIELD_GET(GLINT_CTL_ITR_GRAN_100_M, regval) == ICE_ITR_GRAN_US) && |
235 | (FIELD_GET(GLINT_CTL_ITR_GRAN_50_M, regval) == ICE_ITR_GRAN_US) && |
236 | (FIELD_GET(GLINT_CTL_ITR_GRAN_25_M, regval) == ICE_ITR_GRAN_US)) |
237 | return; |
238 | |
239 | regval = FIELD_PREP(GLINT_CTL_ITR_GRAN_200_M, ICE_ITR_GRAN_US) | |
240 | FIELD_PREP(GLINT_CTL_ITR_GRAN_100_M, ICE_ITR_GRAN_US) | |
241 | FIELD_PREP(GLINT_CTL_ITR_GRAN_50_M, ICE_ITR_GRAN_US) | |
242 | FIELD_PREP(GLINT_CTL_ITR_GRAN_25_M, ICE_ITR_GRAN_US); |
243 | wr32(hw, GLINT_CTL, regval); |
244 | } |
245 | |
246 | /** |
247 | * ice_calc_txq_handle - calculate the queue handle |
248 | * @vsi: VSI that ring belongs to |
249 | * @ring: ring to get the absolute queue index |
250 | * @tc: traffic class number |
251 | */ |
252 | static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 tc) |
253 | { |
254 | WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n" ); |
255 | |
256 | if (ring->ch) |
257 | return ring->q_index - ring->ch->base_q; |
258 | |
259 | /* Idea here for calculation is that we subtract the number of queue |
260 | * count from TC that ring belongs to from it's absolute queue index |
261 | * and as a result we get the queue's index within TC. |
262 | */ |
263 | return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset; |
264 | } |
265 | |
266 | /** |
267 | * ice_eswitch_calc_txq_handle |
268 | * @ring: pointer to ring which unique index is needed |
269 | * |
270 | * To correctly work with many netdevs ring->q_index of Tx rings on switchdev |
271 | * VSI can repeat. Hardware ring setup requires unique q_index. Calculate it |
272 | * here by finding index in vsi->tx_rings of this ring. |
273 | * |
274 | * Return ICE_INVAL_Q_INDEX when index wasn't found. Should never happen, |
275 | * because VSI is get from ring->vsi, so it has to be present in this VSI. |
276 | */ |
277 | static u16 ice_eswitch_calc_txq_handle(struct ice_tx_ring *ring) |
278 | { |
279 | const struct ice_vsi *vsi = ring->vsi; |
280 | int i; |
281 | |
282 | ice_for_each_txq(vsi, i) { |
283 | if (vsi->tx_rings[i] == ring) |
284 | return i; |
285 | } |
286 | |
287 | return ICE_INVAL_Q_INDEX; |
288 | } |
289 | |
290 | /** |
291 | * ice_cfg_xps_tx_ring - Configure XPS for a Tx ring |
292 | * @ring: The Tx ring to configure |
293 | * |
294 | * This enables/disables XPS for a given Tx descriptor ring |
295 | * based on the TCs enabled for the VSI that ring belongs to. |
296 | */ |
297 | static void ice_cfg_xps_tx_ring(struct ice_tx_ring *ring) |
298 | { |
299 | if (!ring->q_vector || !ring->netdev) |
300 | return; |
301 | |
302 | /* We only initialize XPS once, so as not to overwrite user settings */ |
303 | if (test_and_set_bit(nr: ICE_TX_XPS_INIT_DONE, addr: ring->xps_state)) |
304 | return; |
305 | |
306 | netif_set_xps_queue(dev: ring->netdev, mask: &ring->q_vector->affinity_mask, |
307 | index: ring->q_index); |
308 | } |
309 | |
310 | /** |
311 | * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance |
312 | * @ring: The Tx ring to configure |
313 | * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized |
314 | * @pf_q: queue index in the PF space |
315 | * |
316 | * Configure the Tx descriptor ring in TLAN context. |
317 | */ |
318 | static void |
319 | ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) |
320 | { |
321 | struct ice_vsi *vsi = ring->vsi; |
322 | struct ice_hw *hw = &vsi->back->hw; |
323 | |
324 | tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S; |
325 | |
326 | tlan_ctx->port_num = vsi->port_info->lport; |
327 | |
328 | /* Transmit Queue Length */ |
329 | tlan_ctx->qlen = ring->count; |
330 | |
331 | ice_set_cgd_num(tlan_ctx, dcb_tc: ring->dcb_tc); |
332 | |
333 | /* PF number */ |
334 | tlan_ctx->pf_num = hw->pf_id; |
335 | |
336 | /* queue belongs to a specific VSI type |
337 | * VF / VM index should be programmed per vmvf_type setting: |
338 | * for vmvf_type = VF, it is VF number between 0-256 |
339 | * for vmvf_type = VM, it is VM number between 0-767 |
340 | * for PF or EMP this field should be set to zero |
341 | */ |
342 | switch (vsi->type) { |
343 | case ICE_VSI_LB: |
344 | case ICE_VSI_CTRL: |
345 | case ICE_VSI_PF: |
346 | if (ring->ch) |
347 | tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ; |
348 | else |
349 | tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; |
350 | break; |
351 | case ICE_VSI_VF: |
352 | /* Firmware expects vmvf_num to be absolute VF ID */ |
353 | tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id; |
354 | tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; |
355 | break; |
356 | case ICE_VSI_SWITCHDEV_CTRL: |
357 | tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ; |
358 | break; |
359 | default: |
360 | return; |
361 | } |
362 | |
363 | /* make sure the context is associated with the right VSI */ |
364 | if (ring->ch) |
365 | tlan_ctx->src_vsi = ring->ch->vsi_num; |
366 | else |
367 | tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi_handle: vsi->idx); |
368 | |
369 | /* Restrict Tx timestamps to the PF VSI */ |
370 | switch (vsi->type) { |
371 | case ICE_VSI_PF: |
372 | tlan_ctx->tsyn_ena = 1; |
373 | break; |
374 | default: |
375 | break; |
376 | } |
377 | |
378 | tlan_ctx->tso_ena = ICE_TX_LEGACY; |
379 | tlan_ctx->tso_qnum = pf_q; |
380 | |
381 | /* Legacy or Advanced Host Interface: |
382 | * 0: Advanced Host Interface |
383 | * 1: Legacy Host Interface |
384 | */ |
385 | tlan_ctx->legacy_int = ICE_TX_LEGACY; |
386 | } |
387 | |
388 | /** |
389 | * ice_rx_offset - Return expected offset into page to access data |
390 | * @rx_ring: Ring we are requesting offset of |
391 | * |
392 | * Returns the offset value for ring into the data buffer. |
393 | */ |
394 | static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring) |
395 | { |
396 | if (ice_ring_uses_build_skb(ring: rx_ring)) |
397 | return ICE_SKB_PAD; |
398 | return 0; |
399 | } |
400 | |
401 | /** |
402 | * ice_setup_rx_ctx - Configure a receive ring context |
403 | * @ring: The Rx ring to configure |
404 | * |
405 | * Configure the Rx descriptor ring in RLAN context. |
406 | */ |
407 | static int ice_setup_rx_ctx(struct ice_rx_ring *ring) |
408 | { |
409 | struct ice_vsi *vsi = ring->vsi; |
410 | u32 rxdid = ICE_RXDID_FLEX_NIC; |
411 | struct ice_rlan_ctx rlan_ctx; |
412 | struct ice_hw *hw; |
413 | u16 pf_q; |
414 | int err; |
415 | |
416 | hw = &vsi->back->hw; |
417 | |
418 | /* what is Rx queue number in global space of 2K Rx queues */ |
419 | pf_q = vsi->rxq_map[ring->q_index]; |
420 | |
421 | /* clear the context structure first */ |
422 | memset(&rlan_ctx, 0, sizeof(rlan_ctx)); |
423 | |
424 | /* Receive Queue Base Address. |
425 | * Indicates the starting address of the descriptor queue defined in |
426 | * 128 Byte units. |
427 | */ |
428 | rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S; |
429 | |
430 | rlan_ctx.qlen = ring->count; |
431 | |
432 | /* Receive Packet Data Buffer Size. |
433 | * The Packet Data Buffer Size is defined in 128 byte units. |
434 | */ |
435 | rlan_ctx.dbuf = DIV_ROUND_UP(ring->rx_buf_len, |
436 | BIT_ULL(ICE_RLAN_CTX_DBUF_S)); |
437 | |
438 | /* use 32 byte descriptors */ |
439 | rlan_ctx.dsize = 1; |
440 | |
441 | /* Strip the Ethernet CRC bytes before the packet is posted to host |
442 | * memory. |
443 | */ |
444 | rlan_ctx.crcstrip = !(ring->flags & ICE_RX_FLAGS_CRC_STRIP_DIS); |
445 | |
446 | /* L2TSEL flag defines the reported L2 Tags in the receive descriptor |
447 | * and it needs to remain 1 for non-DVM capable configurations to not |
448 | * break backward compatibility for VF drivers. Setting this field to 0 |
449 | * will cause the single/outer VLAN tag to be stripped to the L2TAG2_2ND |
450 | * field in the Rx descriptor. Setting it to 1 allows the VLAN tag to |
451 | * be stripped in L2TAG1 of the Rx descriptor, which is where VFs will |
452 | * check for the tag |
453 | */ |
454 | if (ice_is_dvm_ena(hw)) |
455 | if (vsi->type == ICE_VSI_VF && |
456 | ice_vf_is_port_vlan_ena(vf: vsi->vf)) |
457 | rlan_ctx.l2tsel = 1; |
458 | else |
459 | rlan_ctx.l2tsel = 0; |
460 | else |
461 | rlan_ctx.l2tsel = 1; |
462 | |
463 | rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; |
464 | rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; |
465 | rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; |
466 | |
467 | /* This controls whether VLAN is stripped from inner headers |
468 | * The VLAN in the inner L2 header is stripped to the receive |
469 | * descriptor if enabled by this flag. |
470 | */ |
471 | rlan_ctx.showiv = 0; |
472 | |
473 | /* Max packet size for this queue - must not be set to a larger value |
474 | * than 5 x DBUF |
475 | */ |
476 | rlan_ctx.rxmax = min_t(u32, vsi->max_frame, |
477 | ICE_MAX_CHAINED_RX_BUFS * ring->rx_buf_len); |
478 | |
479 | /* Rx queue threshold in units of 64 */ |
480 | rlan_ctx.lrxqthresh = 1; |
481 | |
482 | /* Enable Flexible Descriptors in the queue context which |
483 | * allows this driver to select a specific receive descriptor format |
484 | * increasing context priority to pick up profile ID; default is 0x01; |
485 | * setting to 0x03 to ensure profile is programming if prev context is |
486 | * of same priority |
487 | */ |
488 | if (vsi->type != ICE_VSI_VF) |
489 | ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, prio: 0x3, ena_ts: true); |
490 | else |
491 | ice_write_qrxflxp_cntxt(hw, pf_q, rxdid: ICE_RXDID_LEGACY_1, prio: 0x3, |
492 | ena_ts: false); |
493 | |
494 | /* Absolute queue number out of 2K needs to be passed */ |
495 | err = ice_write_rxq_ctx(hw, rlan_ctx: &rlan_ctx, rxq_index: pf_q); |
496 | if (err) { |
497 | dev_err(ice_pf_to_dev(vsi->back), "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n" , |
498 | pf_q, err); |
499 | return -EIO; |
500 | } |
501 | |
502 | if (vsi->type == ICE_VSI_VF) |
503 | return 0; |
504 | |
505 | /* configure Rx buffer alignment */ |
506 | if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) |
507 | ice_clear_ring_build_skb_ena(ring); |
508 | else |
509 | ice_set_ring_build_skb_ena(ring); |
510 | |
511 | ring->rx_offset = ice_rx_offset(rx_ring: ring); |
512 | |
513 | /* init queue specific tail register */ |
514 | ring->tail = hw->hw_addr + QRX_TAIL(pf_q); |
515 | writel(val: 0, addr: ring->tail); |
516 | |
517 | return 0; |
518 | } |
519 | |
520 | static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring) |
521 | { |
522 | void *ctx_ptr = &ring->pkt_ctx; |
523 | struct xsk_cb_desc desc = {}; |
524 | |
525 | XSK_CHECK_PRIV_TYPE(struct ice_xdp_buff); |
526 | desc.src = &ctx_ptr; |
527 | desc.off = offsetof(struct ice_xdp_buff, pkt_ctx) - |
528 | sizeof(struct xdp_buff); |
529 | desc.bytes = sizeof(ctx_ptr); |
530 | xsk_pool_fill_cb(pool: ring->xsk_pool, desc: &desc); |
531 | } |
532 | |
533 | /** |
534 | * ice_vsi_cfg_rxq - Configure an Rx queue |
535 | * @ring: the ring being configured |
536 | * |
537 | * Return 0 on success and a negative value on error. |
538 | */ |
539 | static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) |
540 | { |
541 | struct device *dev = ice_pf_to_dev(ring->vsi->back); |
542 | u32 num_bufs = ICE_RX_DESC_UNUSED(ring); |
543 | int err; |
544 | |
545 | ring->rx_buf_len = ring->vsi->rx_buf_len; |
546 | |
547 | if (ring->vsi->type == ICE_VSI_PF) { |
548 | if (!xdp_rxq_info_is_reg(xdp_rxq: &ring->xdp_rxq)) { |
549 | err = __xdp_rxq_info_reg(xdp_rxq: &ring->xdp_rxq, dev: ring->netdev, |
550 | queue_index: ring->q_index, |
551 | napi_id: ring->q_vector->napi.napi_id, |
552 | frag_size: ring->rx_buf_len); |
553 | if (err) |
554 | return err; |
555 | } |
556 | |
557 | ring->xsk_pool = ice_xsk_pool(ring); |
558 | if (ring->xsk_pool) { |
559 | xdp_rxq_info_unreg(xdp_rxq: &ring->xdp_rxq); |
560 | |
561 | ring->rx_buf_len = |
562 | xsk_pool_get_rx_frame_size(pool: ring->xsk_pool); |
563 | err = __xdp_rxq_info_reg(xdp_rxq: &ring->xdp_rxq, dev: ring->netdev, |
564 | queue_index: ring->q_index, |
565 | napi_id: ring->q_vector->napi.napi_id, |
566 | frag_size: ring->rx_buf_len); |
567 | if (err) |
568 | return err; |
569 | err = xdp_rxq_info_reg_mem_model(xdp_rxq: &ring->xdp_rxq, |
570 | type: MEM_TYPE_XSK_BUFF_POOL, |
571 | NULL); |
572 | if (err) |
573 | return err; |
574 | xsk_pool_set_rxq_info(pool: ring->xsk_pool, rxq: &ring->xdp_rxq); |
575 | ice_xsk_pool_fill_cb(ring); |
576 | |
577 | dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n" , |
578 | ring->q_index); |
579 | } else { |
580 | if (!xdp_rxq_info_is_reg(xdp_rxq: &ring->xdp_rxq)) { |
581 | err = __xdp_rxq_info_reg(xdp_rxq: &ring->xdp_rxq, dev: ring->netdev, |
582 | queue_index: ring->q_index, |
583 | napi_id: ring->q_vector->napi.napi_id, |
584 | frag_size: ring->rx_buf_len); |
585 | if (err) |
586 | return err; |
587 | } |
588 | |
589 | err = xdp_rxq_info_reg_mem_model(xdp_rxq: &ring->xdp_rxq, |
590 | type: MEM_TYPE_PAGE_SHARED, |
591 | NULL); |
592 | if (err) |
593 | return err; |
594 | } |
595 | } |
596 | |
597 | xdp_init_buff(xdp: &ring->xdp, ice_rx_pg_size(ring) / 2, rxq: &ring->xdp_rxq); |
598 | ring->xdp.data = NULL; |
599 | ring->xdp_ext.pkt_ctx = &ring->pkt_ctx; |
600 | err = ice_setup_rx_ctx(ring); |
601 | if (err) { |
602 | dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n" , |
603 | ring->q_index, err); |
604 | return err; |
605 | } |
606 | |
607 | if (ring->xsk_pool) { |
608 | bool ok; |
609 | |
610 | if (!xsk_buff_can_alloc(pool: ring->xsk_pool, count: num_bufs)) { |
611 | dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n" , |
612 | num_bufs, ring->q_index); |
613 | dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n" ); |
614 | |
615 | return 0; |
616 | } |
617 | |
618 | ok = ice_alloc_rx_bufs_zc(rx_ring: ring, count: num_bufs); |
619 | if (!ok) { |
620 | u16 pf_q = ring->vsi->rxq_map[ring->q_index]; |
621 | |
622 | dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n" , |
623 | ring->q_index, pf_q); |
624 | } |
625 | |
626 | return 0; |
627 | } |
628 | |
629 | ice_alloc_rx_bufs(rxr: ring, cleaned_count: num_bufs); |
630 | |
631 | return 0; |
632 | } |
633 | |
634 | int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx) |
635 | { |
636 | if (q_idx >= vsi->num_rxq) |
637 | return -EINVAL; |
638 | |
639 | return ice_vsi_cfg_rxq(ring: vsi->rx_rings[q_idx]); |
640 | } |
641 | |
642 | /** |
643 | * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length |
644 | * @vsi: VSI |
645 | */ |
646 | static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi) |
647 | { |
648 | if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) { |
649 | vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX; |
650 | vsi->rx_buf_len = ICE_RXBUF_1664; |
651 | #if (PAGE_SIZE < 8192) |
652 | } else if (!ICE_2K_TOO_SMALL_WITH_PADDING && |
653 | (vsi->netdev->mtu <= ETH_DATA_LEN)) { |
654 | vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN; |
655 | vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN; |
656 | #endif |
657 | } else { |
658 | vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; |
659 | vsi->rx_buf_len = ICE_RXBUF_3072; |
660 | } |
661 | } |
662 | |
663 | /** |
664 | * ice_vsi_cfg_rxqs - Configure the VSI for Rx |
665 | * @vsi: the VSI being configured |
666 | * |
667 | * Return 0 on success and a negative value on error |
668 | * Configure the Rx VSI for operation. |
669 | */ |
670 | int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) |
671 | { |
672 | u16 i; |
673 | |
674 | if (vsi->type == ICE_VSI_VF) |
675 | goto setup_rings; |
676 | |
677 | ice_vsi_cfg_frame_size(vsi); |
678 | setup_rings: |
679 | /* set up individual rings */ |
680 | ice_for_each_rxq(vsi, i) { |
681 | int err = ice_vsi_cfg_rxq(ring: vsi->rx_rings[i]); |
682 | |
683 | if (err) |
684 | return err; |
685 | } |
686 | |
687 | return 0; |
688 | } |
689 | |
690 | /** |
691 | * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI |
692 | * @qs_cfg: gathered variables needed for pf->vsi queues assignment |
693 | * |
694 | * This function first tries to find contiguous space. If it is not successful, |
695 | * it tries with the scatter approach. |
696 | * |
697 | * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap |
698 | */ |
699 | int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg) |
700 | { |
701 | int ret = 0; |
702 | |
703 | ret = __ice_vsi_get_qs_contig(qs_cfg); |
704 | if (ret) { |
705 | /* contig failed, so try with scatter approach */ |
706 | qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER; |
707 | qs_cfg->q_count = min_t(unsigned int, qs_cfg->q_count, |
708 | qs_cfg->scatter_count); |
709 | ret = __ice_vsi_get_qs_sc(qs_cfg); |
710 | } |
711 | return ret; |
712 | } |
713 | |
714 | /** |
715 | * ice_vsi_ctrl_one_rx_ring - start/stop VSI's Rx ring with no busy wait |
716 | * @vsi: the VSI being configured |
717 | * @ena: start or stop the Rx ring |
718 | * @rxq_idx: 0-based Rx queue index for the VSI passed in |
719 | * @wait: wait or don't wait for configuration to finish in hardware |
720 | * |
721 | * Return 0 on success and negative on error. |
722 | */ |
723 | int |
724 | ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait) |
725 | { |
726 | int pf_q = vsi->rxq_map[rxq_idx]; |
727 | struct ice_pf *pf = vsi->back; |
728 | struct ice_hw *hw = &pf->hw; |
729 | u32 rx_reg; |
730 | |
731 | rx_reg = rd32(hw, QRX_CTRL(pf_q)); |
732 | |
733 | /* Skip if the queue is already in the requested state */ |
734 | if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) |
735 | return 0; |
736 | |
737 | /* turn on/off the queue */ |
738 | if (ena) |
739 | rx_reg |= QRX_CTRL_QENA_REQ_M; |
740 | else |
741 | rx_reg &= ~QRX_CTRL_QENA_REQ_M; |
742 | wr32(hw, QRX_CTRL(pf_q), rx_reg); |
743 | |
744 | if (!wait) |
745 | return 0; |
746 | |
747 | ice_flush(hw); |
748 | return ice_pf_rxq_wait(pf, pf_q, ena); |
749 | } |
750 | |
751 | /** |
752 | * ice_vsi_wait_one_rx_ring - wait for a VSI's Rx ring to be stopped/started |
753 | * @vsi: the VSI being configured |
754 | * @ena: true/false to verify Rx ring has been enabled/disabled respectively |
755 | * @rxq_idx: 0-based Rx queue index for the VSI passed in |
756 | * |
757 | * This routine will wait for the given Rx queue of the VSI to reach the |
758 | * enabled or disabled state. Returns -ETIMEDOUT in case of failing to reach |
759 | * the requested state after multiple retries; else will return 0 in case of |
760 | * success. |
761 | */ |
762 | int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx) |
763 | { |
764 | int pf_q = vsi->rxq_map[rxq_idx]; |
765 | struct ice_pf *pf = vsi->back; |
766 | |
767 | return ice_pf_rxq_wait(pf, pf_q, ena); |
768 | } |
769 | |
770 | /** |
771 | * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors |
772 | * @vsi: the VSI being configured |
773 | * |
774 | * We allocate one q_vector per queue interrupt. If allocation fails we |
775 | * return -ENOMEM. |
776 | */ |
777 | int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) |
778 | { |
779 | struct device *dev = ice_pf_to_dev(vsi->back); |
780 | u16 v_idx; |
781 | int err; |
782 | |
783 | if (vsi->q_vectors[0]) { |
784 | dev_dbg(dev, "VSI %d has existing q_vectors\n" , vsi->vsi_num); |
785 | return -EEXIST; |
786 | } |
787 | |
788 | for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) { |
789 | err = ice_vsi_alloc_q_vector(vsi, v_idx); |
790 | if (err) |
791 | goto err_out; |
792 | } |
793 | |
794 | return 0; |
795 | |
796 | err_out: |
797 | while (v_idx--) |
798 | ice_free_q_vector(vsi, v_idx); |
799 | |
800 | dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n" , |
801 | vsi->num_q_vectors, vsi->vsi_num, err); |
802 | vsi->num_q_vectors = 0; |
803 | return err; |
804 | } |
805 | |
806 | /** |
807 | * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors |
808 | * @vsi: the VSI being configured |
809 | * |
810 | * This function maps descriptor rings to the queue-specific vectors allotted |
811 | * through the MSI-X enabling code. On a constrained vector budget, we map Tx |
812 | * and Rx rings to the vector as "efficiently" as possible. |
813 | */ |
814 | void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) |
815 | { |
816 | int q_vectors = vsi->num_q_vectors; |
817 | u16 tx_rings_rem, rx_rings_rem; |
818 | int v_id; |
819 | |
820 | /* initially assigning remaining rings count to VSIs num queue value */ |
821 | tx_rings_rem = vsi->num_txq; |
822 | rx_rings_rem = vsi->num_rxq; |
823 | |
824 | for (v_id = 0; v_id < q_vectors; v_id++) { |
825 | struct ice_q_vector *q_vector = vsi->q_vectors[v_id]; |
826 | u8 tx_rings_per_v, rx_rings_per_v; |
827 | u16 q_id, q_base; |
828 | |
829 | /* Tx rings mapping to vector */ |
830 | tx_rings_per_v = (u8)DIV_ROUND_UP(tx_rings_rem, |
831 | q_vectors - v_id); |
832 | q_vector->num_ring_tx = tx_rings_per_v; |
833 | q_vector->tx.tx_ring = NULL; |
834 | q_vector->tx.itr_idx = ICE_TX_ITR; |
835 | q_base = vsi->num_txq - tx_rings_rem; |
836 | |
837 | for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { |
838 | struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id]; |
839 | |
840 | tx_ring->q_vector = q_vector; |
841 | tx_ring->next = q_vector->tx.tx_ring; |
842 | q_vector->tx.tx_ring = tx_ring; |
843 | } |
844 | tx_rings_rem -= tx_rings_per_v; |
845 | |
846 | /* Rx rings mapping to vector */ |
847 | rx_rings_per_v = (u8)DIV_ROUND_UP(rx_rings_rem, |
848 | q_vectors - v_id); |
849 | q_vector->num_ring_rx = rx_rings_per_v; |
850 | q_vector->rx.rx_ring = NULL; |
851 | q_vector->rx.itr_idx = ICE_RX_ITR; |
852 | q_base = vsi->num_rxq - rx_rings_rem; |
853 | |
854 | for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { |
855 | struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id]; |
856 | |
857 | rx_ring->q_vector = q_vector; |
858 | rx_ring->next = q_vector->rx.rx_ring; |
859 | q_vector->rx.rx_ring = rx_ring; |
860 | } |
861 | rx_rings_rem -= rx_rings_per_v; |
862 | } |
863 | } |
864 | |
865 | /** |
866 | * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors |
867 | * @vsi: the VSI having memory freed |
868 | */ |
869 | void ice_vsi_free_q_vectors(struct ice_vsi *vsi) |
870 | { |
871 | int v_idx; |
872 | |
873 | ice_for_each_q_vector(vsi, v_idx) |
874 | ice_free_q_vector(vsi, v_idx); |
875 | |
876 | vsi->num_q_vectors = 0; |
877 | } |
878 | |
879 | /** |
880 | * ice_vsi_cfg_txq - Configure single Tx queue |
881 | * @vsi: the VSI that queue belongs to |
882 | * @ring: Tx ring to be configured |
883 | * @qg_buf: queue group buffer |
884 | */ |
885 | static int |
886 | ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, |
887 | struct ice_aqc_add_tx_qgrp *qg_buf) |
888 | { |
889 | u8 buf_len = struct_size(qg_buf, txqs, 1); |
890 | struct ice_tlan_ctx tlan_ctx = { 0 }; |
891 | struct ice_aqc_add_txqs_perq *txq; |
892 | struct ice_channel *ch = ring->ch; |
893 | struct ice_pf *pf = vsi->back; |
894 | struct ice_hw *hw = &pf->hw; |
895 | int status; |
896 | u16 pf_q; |
897 | u8 tc; |
898 | |
899 | /* Configure XPS */ |
900 | ice_cfg_xps_tx_ring(ring); |
901 | |
902 | pf_q = ring->reg_idx; |
903 | ice_setup_tx_ctx(ring, tlan_ctx: &tlan_ctx, pf_q); |
904 | /* copy context contents into the qg_buf */ |
905 | qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); |
906 | ice_set_ctx(hw, src_ctx: (u8 *)&tlan_ctx, dest_ctx: qg_buf->txqs[0].txq_ctx, |
907 | ce_info: ice_tlan_ctx_info); |
908 | |
909 | /* init queue specific tail reg. It is referred as |
910 | * transmit comm scheduler queue doorbell. |
911 | */ |
912 | ring->tail = hw->hw_addr + QTX_COMM_DBELL(pf_q); |
913 | |
914 | if (IS_ENABLED(CONFIG_DCB)) |
915 | tc = ring->dcb_tc; |
916 | else |
917 | tc = 0; |
918 | |
919 | /* Add unique software queue handle of the Tx queue per |
920 | * TC into the VSI Tx ring |
921 | */ |
922 | if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) { |
923 | ring->q_handle = ice_eswitch_calc_txq_handle(ring); |
924 | |
925 | if (ring->q_handle == ICE_INVAL_Q_INDEX) |
926 | return -ENODEV; |
927 | } else { |
928 | ring->q_handle = ice_calc_txq_handle(vsi, ring, tc); |
929 | } |
930 | |
931 | if (ch) |
932 | status = ice_ena_vsi_txq(pi: vsi->port_info, vsi_handle: ch->ch_vsi->idx, tc: 0, |
933 | q_handle: ring->q_handle, num_qgrps: 1, buf: qg_buf, buf_size: buf_len, |
934 | NULL); |
935 | else |
936 | status = ice_ena_vsi_txq(pi: vsi->port_info, vsi_handle: vsi->idx, tc, |
937 | q_handle: ring->q_handle, num_qgrps: 1, buf: qg_buf, buf_size: buf_len, |
938 | NULL); |
939 | if (status) { |
940 | dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n" , |
941 | status); |
942 | return status; |
943 | } |
944 | |
945 | /* Add Tx Queue TEID into the VSI Tx ring from the |
946 | * response. This will complete configuring and |
947 | * enabling the queue. |
948 | */ |
949 | txq = &qg_buf->txqs[0]; |
950 | if (pf_q == le16_to_cpu(txq->txq_id)) |
951 | ring->txq_teid = le32_to_cpu(txq->q_teid); |
952 | |
953 | return 0; |
954 | } |
955 | |
956 | int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, |
957 | u16 q_idx) |
958 | { |
959 | DEFINE_RAW_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1); |
960 | |
961 | if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx]) |
962 | return -EINVAL; |
963 | |
964 | qg_buf->num_txqs = 1; |
965 | |
966 | return ice_vsi_cfg_txq(vsi, ring: tx_rings[q_idx], qg_buf); |
967 | } |
968 | |
969 | /** |
970 | * ice_vsi_cfg_txqs - Configure the VSI for Tx |
971 | * @vsi: the VSI being configured |
972 | * @rings: Tx ring array to be configured |
973 | * @count: number of Tx ring array elements |
974 | * |
975 | * Return 0 on success and a negative value on error |
976 | * Configure the Tx VSI for operation. |
977 | */ |
978 | static int |
979 | ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count) |
980 | { |
981 | DEFINE_RAW_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1); |
982 | int err = 0; |
983 | u16 q_idx; |
984 | |
985 | qg_buf->num_txqs = 1; |
986 | |
987 | for (q_idx = 0; q_idx < count; q_idx++) { |
988 | err = ice_vsi_cfg_txq(vsi, ring: rings[q_idx], qg_buf); |
989 | if (err) |
990 | break; |
991 | } |
992 | |
993 | return err; |
994 | } |
995 | |
996 | /** |
997 | * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx |
998 | * @vsi: the VSI being configured |
999 | * |
1000 | * Return 0 on success and a negative value on error |
1001 | * Configure the Tx VSI for operation. |
1002 | */ |
1003 | int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi) |
1004 | { |
1005 | return ice_vsi_cfg_txqs(vsi, rings: vsi->tx_rings, count: vsi->num_txq); |
1006 | } |
1007 | |
1008 | /** |
1009 | * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI |
1010 | * @vsi: the VSI being configured |
1011 | * |
1012 | * Return 0 on success and a negative value on error |
1013 | * Configure the Tx queues dedicated for XDP in given VSI for operation. |
1014 | */ |
1015 | int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi) |
1016 | { |
1017 | int ret; |
1018 | int i; |
1019 | |
1020 | ret = ice_vsi_cfg_txqs(vsi, rings: vsi->xdp_rings, count: vsi->num_xdp_txq); |
1021 | if (ret) |
1022 | return ret; |
1023 | |
1024 | ice_for_each_rxq(vsi, i) |
1025 | ice_tx_xsk_pool(vsi, qid: i); |
1026 | |
1027 | return 0; |
1028 | } |
1029 | |
1030 | /** |
1031 | * ice_cfg_itr - configure the initial interrupt throttle values |
1032 | * @hw: pointer to the HW structure |
1033 | * @q_vector: interrupt vector that's being configured |
1034 | * |
1035 | * Configure interrupt throttling values for the ring containers that are |
1036 | * associated with the interrupt vector passed in. |
1037 | */ |
1038 | void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector) |
1039 | { |
1040 | ice_cfg_itr_gran(hw); |
1041 | |
1042 | if (q_vector->num_ring_rx) |
1043 | ice_write_itr(rc: &q_vector->rx, itr: q_vector->rx.itr_setting); |
1044 | |
1045 | if (q_vector->num_ring_tx) |
1046 | ice_write_itr(rc: &q_vector->tx, itr: q_vector->tx.itr_setting); |
1047 | |
1048 | ice_write_intrl(q_vector, intrl: q_vector->intrl); |
1049 | } |
1050 | |
1051 | /** |
1052 | * ice_cfg_txq_interrupt - configure interrupt on Tx queue |
1053 | * @vsi: the VSI being configured |
1054 | * @txq: Tx queue being mapped to MSI-X vector |
1055 | * @msix_idx: MSI-X vector index within the function |
1056 | * @itr_idx: ITR index of the interrupt cause |
1057 | * |
1058 | * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector |
1059 | * within the function space. |
1060 | */ |
1061 | void |
1062 | ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx) |
1063 | { |
1064 | struct ice_pf *pf = vsi->back; |
1065 | struct ice_hw *hw = &pf->hw; |
1066 | u32 val; |
1067 | |
1068 | itr_idx = FIELD_PREP(QINT_TQCTL_ITR_INDX_M, itr_idx); |
1069 | |
1070 | val = QINT_TQCTL_CAUSE_ENA_M | itr_idx | |
1071 | FIELD_PREP(QINT_TQCTL_MSIX_INDX_M, msix_idx); |
1072 | |
1073 | wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); |
1074 | if (ice_is_xdp_ena_vsi(vsi)) { |
1075 | u32 xdp_txq = txq + vsi->num_xdp_txq; |
1076 | |
1077 | wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), |
1078 | val); |
1079 | } |
1080 | ice_flush(hw); |
1081 | } |
1082 | |
1083 | /** |
1084 | * ice_cfg_rxq_interrupt - configure interrupt on Rx queue |
1085 | * @vsi: the VSI being configured |
1086 | * @rxq: Rx queue being mapped to MSI-X vector |
1087 | * @msix_idx: MSI-X vector index within the function |
1088 | * @itr_idx: ITR index of the interrupt cause |
1089 | * |
1090 | * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector |
1091 | * within the function space. |
1092 | */ |
1093 | void |
1094 | ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx) |
1095 | { |
1096 | struct ice_pf *pf = vsi->back; |
1097 | struct ice_hw *hw = &pf->hw; |
1098 | u32 val; |
1099 | |
1100 | itr_idx = FIELD_PREP(QINT_RQCTL_ITR_INDX_M, itr_idx); |
1101 | |
1102 | val = QINT_RQCTL_CAUSE_ENA_M | itr_idx | |
1103 | FIELD_PREP(QINT_RQCTL_MSIX_INDX_M, msix_idx); |
1104 | |
1105 | wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); |
1106 | |
1107 | ice_flush(hw); |
1108 | } |
1109 | |
1110 | /** |
1111 | * ice_trigger_sw_intr - trigger a software interrupt |
1112 | * @hw: pointer to the HW structure |
1113 | * @q_vector: interrupt vector to trigger the software interrupt for |
1114 | */ |
1115 | void ice_trigger_sw_intr(struct ice_hw *hw, const struct ice_q_vector *q_vector) |
1116 | { |
1117 | wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), |
1118 | (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) | |
1119 | GLINT_DYN_CTL_SWINT_TRIG_M | |
1120 | GLINT_DYN_CTL_INTENA_M); |
1121 | } |
1122 | |
1123 | /** |
1124 | * ice_vsi_stop_tx_ring - Disable single Tx ring |
1125 | * @vsi: the VSI being configured |
1126 | * @rst_src: reset source |
1127 | * @rel_vmvf_num: Relative ID of VF/VM |
1128 | * @ring: Tx ring to be stopped |
1129 | * @txq_meta: Meta data of Tx ring to be stopped |
1130 | */ |
1131 | int |
1132 | ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, |
1133 | u16 rel_vmvf_num, struct ice_tx_ring *ring, |
1134 | struct ice_txq_meta *txq_meta) |
1135 | { |
1136 | struct ice_pf *pf = vsi->back; |
1137 | struct ice_q_vector *q_vector; |
1138 | struct ice_hw *hw = &pf->hw; |
1139 | int status; |
1140 | u32 val; |
1141 | |
1142 | /* clear cause_ena bit for disabled queues */ |
1143 | val = rd32(hw, QINT_TQCTL(ring->reg_idx)); |
1144 | val &= ~QINT_TQCTL_CAUSE_ENA_M; |
1145 | wr32(hw, QINT_TQCTL(ring->reg_idx), val); |
1146 | |
1147 | /* software is expected to wait for 100 ns */ |
1148 | ndelay(100); |
1149 | |
1150 | /* trigger a software interrupt for the vector |
1151 | * associated to the queue to schedule NAPI handler |
1152 | */ |
1153 | q_vector = ring->q_vector; |
1154 | if (q_vector && !(vsi->vf && ice_is_vf_disabled(vf: vsi->vf))) |
1155 | ice_trigger_sw_intr(hw, q_vector); |
1156 | |
1157 | status = ice_dis_vsi_txq(pi: vsi->port_info, vsi_handle: txq_meta->vsi_idx, |
1158 | tc: txq_meta->tc, num_queues: 1, q_handle: &txq_meta->q_handle, |
1159 | q_ids: &txq_meta->q_id, q_teids: &txq_meta->q_teid, rst_src, |
1160 | vmvf_num: rel_vmvf_num, NULL); |
1161 | |
1162 | /* if the disable queue command was exercised during an |
1163 | * active reset flow, -EBUSY is returned. |
1164 | * This is not an error as the reset operation disables |
1165 | * queues at the hardware level anyway. |
1166 | */ |
1167 | if (status == -EBUSY) { |
1168 | dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n" ); |
1169 | } else if (status == -ENOENT) { |
1170 | dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n" ); |
1171 | } else if (status) { |
1172 | dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n" , |
1173 | status); |
1174 | return status; |
1175 | } |
1176 | |
1177 | return 0; |
1178 | } |
1179 | |
1180 | /** |
1181 | * ice_fill_txq_meta - Prepare the Tx queue's meta data |
1182 | * @vsi: VSI that ring belongs to |
1183 | * @ring: ring that txq_meta will be based on |
1184 | * @txq_meta: a helper struct that wraps Tx queue's information |
1185 | * |
1186 | * Set up a helper struct that will contain all the necessary fields that |
1187 | * are needed for stopping Tx queue |
1188 | */ |
1189 | void |
1190 | ice_fill_txq_meta(const struct ice_vsi *vsi, struct ice_tx_ring *ring, |
1191 | struct ice_txq_meta *txq_meta) |
1192 | { |
1193 | struct ice_channel *ch = ring->ch; |
1194 | u8 tc; |
1195 | |
1196 | if (IS_ENABLED(CONFIG_DCB)) |
1197 | tc = ring->dcb_tc; |
1198 | else |
1199 | tc = 0; |
1200 | |
1201 | txq_meta->q_id = ring->reg_idx; |
1202 | txq_meta->q_teid = ring->txq_teid; |
1203 | txq_meta->q_handle = ring->q_handle; |
1204 | if (ch) { |
1205 | txq_meta->vsi_idx = ch->ch_vsi->idx; |
1206 | txq_meta->tc = 0; |
1207 | } else { |
1208 | txq_meta->vsi_idx = vsi->idx; |
1209 | txq_meta->tc = tc; |
1210 | } |
1211 | } |
1212 | |