1 | /* |
2 | * Copyright (c) 2018, Mellanox Technologies. All rights reserved. |
3 | * |
4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the |
8 | * OpenIB.org BSD license below: |
9 | * |
10 | * Redistribution and use in source and binary forms, with or |
11 | * without modification, are permitted provided that the following |
12 | * conditions are met: |
13 | * |
14 | * - Redistributions of source code must retain the above |
15 | * copyright notice, this list of conditions and the following |
16 | * disclaimer. |
17 | * |
18 | * - Redistributions in binary form must reproduce the above |
19 | * copyright notice, this list of conditions and the following |
20 | * disclaimer in the documentation and/or other materials |
21 | * provided with the distribution. |
22 | * |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
30 | * SOFTWARE. |
31 | */ |
32 | |
33 | #include <linux/bpf_trace.h> |
34 | #include <net/xdp_sock_drv.h> |
35 | #include "en/xdp.h" |
36 | #include "en/params.h" |
37 | #include <linux/bitfield.h> |
38 | #include <net/page_pool/helpers.h> |
39 | |
40 | int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) |
41 | { |
42 | int hr = mlx5e_get_linear_rq_headroom(params, xsk); |
43 | |
44 | /* Let S := SKB_DATA_ALIGN(sizeof(struct skb_shared_info)). |
45 | * The condition checked in mlx5e_rx_is_linear_skb is: |
46 | * SKB_DATA_ALIGN(sw_mtu + hard_mtu + hr) + S <= PAGE_SIZE (1) |
47 | * (Note that hw_mtu == sw_mtu + hard_mtu.) |
48 | * What is returned from this function is: |
49 | * max_mtu = PAGE_SIZE - S - hr - hard_mtu (2) |
50 | * After assigning sw_mtu := max_mtu, the left side of (1) turns to |
51 | * SKB_DATA_ALIGN(PAGE_SIZE - S) + S, which is equal to PAGE_SIZE, |
52 | * because both PAGE_SIZE and S are already aligned. Any number greater |
53 | * than max_mtu would make the left side of (1) greater than PAGE_SIZE, |
54 | * so max_mtu is the maximum MTU allowed. |
55 | */ |
56 | |
57 | return MLX5E_HW2SW_MTU(params, SKB_MAX_HEAD(hr)); |
58 | } |
59 | |
60 | static inline bool |
61 | mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, |
62 | struct xdp_buff *xdp) |
63 | { |
64 | struct page *page = virt_to_page(xdp->data); |
65 | struct mlx5e_xmit_data_frags xdptxdf = {}; |
66 | struct mlx5e_xmit_data *xdptxd; |
67 | struct xdp_frame *xdpf; |
68 | dma_addr_t dma_addr; |
69 | int i; |
70 | |
71 | xdpf = xdp_convert_buff_to_frame(xdp); |
72 | if (unlikely(!xdpf)) |
73 | return false; |
74 | |
75 | xdptxd = &xdptxdf.xd; |
76 | xdptxd->data = xdpf->data; |
77 | xdptxd->len = xdpf->len; |
78 | xdptxd->has_frags = xdp_frame_has_frags(frame: xdpf); |
79 | |
80 | if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) { |
81 | /* The xdp_buff was in the UMEM and was copied into a newly |
82 | * allocated page. The UMEM page was returned via the ZCA, and |
83 | * this new page has to be mapped at this point and has to be |
84 | * unmapped and returned via xdp_return_frame on completion. |
85 | */ |
86 | |
87 | /* Prevent double recycling of the UMEM page. Even in case this |
88 | * function returns false, the xdp_buff shouldn't be recycled, |
89 | * as it was already done in xdp_convert_zc_to_xdp_frame. |
90 | */ |
91 | __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */ |
92 | |
93 | if (unlikely(xdptxd->has_frags)) |
94 | return false; |
95 | |
96 | dma_addr = dma_map_single(sq->pdev, xdptxd->data, xdptxd->len, |
97 | DMA_TO_DEVICE); |
98 | if (dma_mapping_error(dev: sq->pdev, dma_addr)) { |
99 | xdp_return_frame(xdpf); |
100 | return false; |
101 | } |
102 | |
103 | xdptxd->dma_addr = dma_addr; |
104 | |
105 | if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe, |
106 | mlx5e_xmit_xdp_frame, sq, xdptxd, 0, NULL))) |
107 | return false; |
108 | |
109 | /* xmit_mode == MLX5E_XDP_XMIT_MODE_FRAME */ |
110 | mlx5e_xdpi_fifo_push(fifo: &sq->db.xdpi_fifo, |
111 | xi: (union mlx5e_xdp_info) { .mode = MLX5E_XDP_XMIT_MODE_FRAME }); |
112 | mlx5e_xdpi_fifo_push(fifo: &sq->db.xdpi_fifo, |
113 | xi: (union mlx5e_xdp_info) { .frame.xdpf = xdpf }); |
114 | mlx5e_xdpi_fifo_push(fifo: &sq->db.xdpi_fifo, |
115 | xi: (union mlx5e_xdp_info) { .frame.dma_addr = dma_addr }); |
116 | return true; |
117 | } |
118 | |
119 | /* Driver assumes that xdp_convert_buff_to_frame returns an xdp_frame |
120 | * that points to the same memory region as the original xdp_buff. It |
121 | * allows to map the memory only once and to use the DMA_BIDIRECTIONAL |
122 | * mode. |
123 | */ |
124 | |
125 | dma_addr = page_pool_get_dma_addr(page) + (xdpf->data - (void *)xdpf); |
126 | dma_sync_single_for_device(dev: sq->pdev, addr: dma_addr, size: xdptxd->len, dir: DMA_BIDIRECTIONAL); |
127 | |
128 | if (xdptxd->has_frags) { |
129 | xdptxdf.sinfo = xdp_get_shared_info_from_frame(frame: xdpf); |
130 | xdptxdf.dma_arr = NULL; |
131 | |
132 | for (i = 0; i < xdptxdf.sinfo->nr_frags; i++) { |
133 | skb_frag_t *frag = &xdptxdf.sinfo->frags[i]; |
134 | dma_addr_t addr; |
135 | u32 len; |
136 | |
137 | addr = page_pool_get_dma_addr(page: skb_frag_page(frag)) + |
138 | skb_frag_off(frag); |
139 | len = skb_frag_size(frag); |
140 | dma_sync_single_for_device(dev: sq->pdev, addr, size: len, |
141 | dir: DMA_BIDIRECTIONAL); |
142 | } |
143 | } |
144 | |
145 | xdptxd->dma_addr = dma_addr; |
146 | |
147 | if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe, |
148 | mlx5e_xmit_xdp_frame, sq, xdptxd, 0, NULL))) |
149 | return false; |
150 | |
151 | /* xmit_mode == MLX5E_XDP_XMIT_MODE_PAGE */ |
152 | mlx5e_xdpi_fifo_push(fifo: &sq->db.xdpi_fifo, |
153 | xi: (union mlx5e_xdp_info) { .mode = MLX5E_XDP_XMIT_MODE_PAGE }); |
154 | |
155 | if (xdptxd->has_frags) { |
156 | mlx5e_xdpi_fifo_push(fifo: &sq->db.xdpi_fifo, |
157 | xi: (union mlx5e_xdp_info) |
158 | { .page.num = 1 + xdptxdf.sinfo->nr_frags }); |
159 | mlx5e_xdpi_fifo_push(fifo: &sq->db.xdpi_fifo, |
160 | xi: (union mlx5e_xdp_info) { .page.page = page }); |
161 | for (i = 0; i < xdptxdf.sinfo->nr_frags; i++) { |
162 | skb_frag_t *frag = &xdptxdf.sinfo->frags[i]; |
163 | |
164 | mlx5e_xdpi_fifo_push(fifo: &sq->db.xdpi_fifo, |
165 | xi: (union mlx5e_xdp_info) |
166 | { .page.page = skb_frag_page(frag) }); |
167 | } |
168 | } else { |
169 | mlx5e_xdpi_fifo_push(fifo: &sq->db.xdpi_fifo, |
170 | xi: (union mlx5e_xdp_info) { .page.num = 1 }); |
171 | mlx5e_xdpi_fifo_push(fifo: &sq->db.xdpi_fifo, |
172 | xi: (union mlx5e_xdp_info) { .page.page = page }); |
173 | } |
174 | |
175 | return true; |
176 | } |
177 | |
178 | static int mlx5e_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp) |
179 | { |
180 | const struct mlx5e_xdp_buff *_ctx = (void *)ctx; |
181 | |
182 | if (unlikely(!mlx5e_rx_hw_stamp(_ctx->rq->tstamp))) |
183 | return -ENODATA; |
184 | |
185 | *timestamp = mlx5e_cqe_ts_to_ns(func: _ctx->rq->ptp_cyc2time, |
186 | clock: _ctx->rq->clock, cqe_ts: get_cqe_ts(cqe: _ctx->cqe)); |
187 | return 0; |
188 | } |
189 | |
190 | /* Mapping HW RSS Type bits CQE_RSS_HTYPE_IP + CQE_RSS_HTYPE_L4 into 4-bits*/ |
191 | #define 16 /* 4-bits max 16 entries */ |
192 | #define GENMASK(1, 0) |
193 | #define GENMASK(3, 2) /* Same as CQE_RSS_HTYPE_IP */ |
194 | |
195 | /* Valid combinations of CQE_RSS_HTYPE_IP + CQE_RSS_HTYPE_L4 sorted numerical */ |
196 | enum { |
197 | = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IP_NONE) | |
198 | FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_NONE)), |
199 | = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV4) | |
200 | FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_NONE)), |
201 | = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV4) | |
202 | FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_TCP)), |
203 | = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV4) | |
204 | FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_UDP)), |
205 | = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV4) | |
206 | FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_IPSEC)), |
207 | = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV6) | |
208 | FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_NONE)), |
209 | = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV6) | |
210 | FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_TCP)), |
211 | = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV6) | |
212 | FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_UDP)), |
213 | = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV6) | |
214 | FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_IPSEC)), |
215 | }; |
216 | |
217 | /* Invalid combinations will simply return zero, allows no boundary checks */ |
218 | static const enum xdp_rss_hash_type [RSS_TYPE_MAX_TABLE] = { |
219 | [RSS_TYPE_NO_HASH] = XDP_RSS_TYPE_NONE, |
220 | [1] = XDP_RSS_TYPE_NONE, /* Implicit zero */ |
221 | [2] = XDP_RSS_TYPE_NONE, /* Implicit zero */ |
222 | [3] = XDP_RSS_TYPE_NONE, /* Implicit zero */ |
223 | [RSS_TYPE_L3_IPV4] = XDP_RSS_TYPE_L3_IPV4, |
224 | [RSS_TYPE_L4_IPV4_TCP] = XDP_RSS_TYPE_L4_IPV4_TCP, |
225 | [RSS_TYPE_L4_IPV4_UDP] = XDP_RSS_TYPE_L4_IPV4_UDP, |
226 | [RSS_TYPE_L4_IPV4_IPSEC] = XDP_RSS_TYPE_L4_IPV4_IPSEC, |
227 | [RSS_TYPE_L3_IPV6] = XDP_RSS_TYPE_L3_IPV6, |
228 | [RSS_TYPE_L4_IPV6_TCP] = XDP_RSS_TYPE_L4_IPV6_TCP, |
229 | [RSS_TYPE_L4_IPV6_UDP] = XDP_RSS_TYPE_L4_IPV6_UDP, |
230 | [RSS_TYPE_L4_IPV6_IPSEC] = XDP_RSS_TYPE_L4_IPV6_IPSEC, |
231 | [12] = XDP_RSS_TYPE_NONE, /* Implicit zero */ |
232 | [13] = XDP_RSS_TYPE_NONE, /* Implicit zero */ |
233 | [14] = XDP_RSS_TYPE_NONE, /* Implicit zero */ |
234 | [15] = XDP_RSS_TYPE_NONE, /* Implicit zero */ |
235 | }; |
236 | |
237 | static int mlx5e_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash, |
238 | enum xdp_rss_hash_type *) |
239 | { |
240 | const struct mlx5e_xdp_buff *_ctx = (void *)ctx; |
241 | const struct mlx5_cqe64 *cqe = _ctx->cqe; |
242 | u32 hash_type, l4_type, ip_type, lookup; |
243 | |
244 | if (unlikely(!(_ctx->xdp.rxq->dev->features & NETIF_F_RXHASH))) |
245 | return -ENODATA; |
246 | |
247 | *hash = be32_to_cpu(cqe->rss_hash_result); |
248 | |
249 | hash_type = cqe->rss_hash_type; |
250 | BUILD_BUG_ON(CQE_RSS_HTYPE_IP != RSS_L3); /* same mask */ |
251 | ip_type = hash_type & CQE_RSS_HTYPE_IP; |
252 | l4_type = FIELD_GET(CQE_RSS_HTYPE_L4, hash_type); |
253 | lookup = ip_type | l4_type; |
254 | *rss_type = mlx5_xdp_rss_type[lookup]; |
255 | |
256 | return 0; |
257 | } |
258 | |
259 | static int mlx5e_xdp_rx_vlan_tag(const struct xdp_md *ctx, __be16 *vlan_proto, |
260 | u16 *vlan_tci) |
261 | { |
262 | const struct mlx5e_xdp_buff *_ctx = (void *)ctx; |
263 | const struct mlx5_cqe64 *cqe = _ctx->cqe; |
264 | |
265 | if (!cqe_has_vlan(cqe)) |
266 | return -ENODATA; |
267 | |
268 | *vlan_proto = htons(ETH_P_8021Q); |
269 | *vlan_tci = be16_to_cpu(cqe->vlan_info); |
270 | return 0; |
271 | } |
272 | |
273 | const struct xdp_metadata_ops mlx5e_xdp_metadata_ops = { |
274 | .xmo_rx_timestamp = mlx5e_xdp_rx_timestamp, |
275 | .xmo_rx_hash = mlx5e_xdp_rx_hash, |
276 | .xmo_rx_vlan_tag = mlx5e_xdp_rx_vlan_tag, |
277 | }; |
278 | |
279 | struct mlx5e_xsk_tx_complete { |
280 | struct mlx5_cqe64 *cqe; |
281 | struct mlx5e_cq *cq; |
282 | }; |
283 | |
284 | static u64 mlx5e_xsk_fill_timestamp(void *_priv) |
285 | { |
286 | struct mlx5e_xsk_tx_complete *priv = _priv; |
287 | u64 ts; |
288 | |
289 | ts = get_cqe_ts(cqe: priv->cqe); |
290 | |
291 | if (mlx5_is_real_time_rq(mdev: priv->cq->mdev) || mlx5_is_real_time_sq(mdev: priv->cq->mdev)) |
292 | return mlx5_real_time_cyc2time(clock: &priv->cq->mdev->clock, timestamp: ts); |
293 | |
294 | return mlx5_timecounter_cyc2time(clock: &priv->cq->mdev->clock, timestamp: ts); |
295 | } |
296 | |
297 | static void mlx5e_xsk_request_checksum(u16 csum_start, u16 csum_offset, void *priv) |
298 | { |
299 | struct mlx5_wqe_eth_seg *eseg = priv; |
300 | |
301 | /* HW/FW is doing parsing, so offsets are largely ignored. */ |
302 | eseg->cs_flags |= MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM; |
303 | } |
304 | |
305 | const struct xsk_tx_metadata_ops mlx5e_xsk_tx_metadata_ops = { |
306 | .tmo_fill_timestamp = mlx5e_xsk_fill_timestamp, |
307 | .tmo_request_checksum = mlx5e_xsk_request_checksum, |
308 | }; |
309 | |
310 | /* returns true if packet was consumed by xdp */ |
311 | bool mlx5e_xdp_handle(struct mlx5e_rq *rq, |
312 | struct bpf_prog *prog, struct mlx5e_xdp_buff *mxbuf) |
313 | { |
314 | struct xdp_buff *xdp = &mxbuf->xdp; |
315 | u32 act; |
316 | int err; |
317 | |
318 | act = bpf_prog_run_xdp(prog, xdp); |
319 | switch (act) { |
320 | case XDP_PASS: |
321 | return false; |
322 | case XDP_TX: |
323 | if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, xdp))) |
324 | goto xdp_abort; |
325 | __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */ |
326 | return true; |
327 | case XDP_REDIRECT: |
328 | /* When XDP enabled then page-refcnt==1 here */ |
329 | err = xdp_do_redirect(dev: rq->netdev, xdp, prog); |
330 | if (unlikely(err)) |
331 | goto xdp_abort; |
332 | __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); |
333 | __set_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags); |
334 | rq->stats->xdp_redirect++; |
335 | return true; |
336 | default: |
337 | bpf_warn_invalid_xdp_action(dev: rq->netdev, prog, act); |
338 | fallthrough; |
339 | case XDP_ABORTED: |
340 | xdp_abort: |
341 | trace_xdp_exception(dev: rq->netdev, xdp: prog, act); |
342 | fallthrough; |
343 | case XDP_DROP: |
344 | rq->stats->xdp_drop++; |
345 | return true; |
346 | } |
347 | } |
348 | |
349 | static u16 mlx5e_xdpsq_get_next_pi(struct mlx5e_xdpsq *sq, u16 size) |
350 | { |
351 | struct mlx5_wq_cyc *wq = &sq->wq; |
352 | u16 pi, contig_wqebbs; |
353 | |
354 | pi = mlx5_wq_cyc_ctr2ix(wq, ctr: sq->pc); |
355 | contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, ix: pi); |
356 | if (unlikely(contig_wqebbs < size)) { |
357 | struct mlx5e_xdp_wqe_info *wi, *edge_wi; |
358 | |
359 | wi = &sq->db.wqe_info[pi]; |
360 | edge_wi = wi + contig_wqebbs; |
361 | |
362 | /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */ |
363 | for (; wi < edge_wi; wi++) { |
364 | *wi = (struct mlx5e_xdp_wqe_info) { |
365 | .num_wqebbs = 1, |
366 | .num_pkts = 0, |
367 | }; |
368 | mlx5e_post_nop(wq, sqn: sq->sqn, pc: &sq->pc); |
369 | } |
370 | sq->stats->nops += contig_wqebbs; |
371 | |
372 | pi = mlx5_wq_cyc_ctr2ix(wq, ctr: sq->pc); |
373 | } |
374 | |
375 | return pi; |
376 | } |
377 | |
378 | static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq) |
379 | { |
380 | struct mlx5e_tx_mpwqe *session = &sq->mpwqe; |
381 | struct mlx5e_xdpsq_stats *stats = sq->stats; |
382 | struct mlx5e_tx_wqe *wqe; |
383 | u16 pi; |
384 | |
385 | pi = mlx5e_xdpsq_get_next_pi(sq, size: sq->max_sq_mpw_wqebbs); |
386 | wqe = MLX5E_TX_FETCH_WQE(sq, pi); |
387 | net_prefetchw(p: wqe->data); |
388 | |
389 | *session = (struct mlx5e_tx_mpwqe) { |
390 | .wqe = wqe, |
391 | .bytes_count = 0, |
392 | .ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT, |
393 | .pkt_count = 0, |
394 | .inline_on = mlx5e_xdp_get_inline_state(sq, cur: session->inline_on), |
395 | }; |
396 | |
397 | stats->mpwqe++; |
398 | } |
399 | |
400 | void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq) |
401 | { |
402 | struct mlx5_wq_cyc *wq = &sq->wq; |
403 | struct mlx5e_tx_mpwqe *session = &sq->mpwqe; |
404 | struct mlx5_wqe_ctrl_seg *cseg = &session->wqe->ctrl; |
405 | u16 ds_count = session->ds_count; |
406 | u16 pi = mlx5_wq_cyc_ctr2ix(wq, ctr: sq->pc); |
407 | struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[pi]; |
408 | |
409 | cseg->opmod_idx_opcode = |
410 | cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_ENHANCED_MPSW); |
411 | cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_count); |
412 | |
413 | wi->num_wqebbs = DIV_ROUND_UP(ds_count, MLX5_SEND_WQEBB_NUM_DS); |
414 | wi->num_pkts = session->pkt_count; |
415 | |
416 | sq->pc += wi->num_wqebbs; |
417 | |
418 | sq->doorbell_cseg = cseg; |
419 | |
420 | session->wqe = NULL; /* Close session */ |
421 | } |
422 | |
423 | enum { |
424 | MLX5E_XDP_CHECK_OK = 1, |
425 | MLX5E_XDP_CHECK_START_MPWQE = 2, |
426 | }; |
427 | |
428 | INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq) |
429 | { |
430 | if (unlikely(!sq->mpwqe.wqe)) { |
431 | if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, |
432 | sq->stop_room))) { |
433 | /* SQ is full, ring doorbell */ |
434 | mlx5e_xmit_xdp_doorbell(sq); |
435 | sq->stats->full++; |
436 | return -EBUSY; |
437 | } |
438 | |
439 | return MLX5E_XDP_CHECK_START_MPWQE; |
440 | } |
441 | |
442 | return MLX5E_XDP_CHECK_OK; |
443 | } |
444 | |
445 | INDIRECT_CALLABLE_SCOPE bool |
446 | mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, |
447 | int check_result, struct xsk_tx_metadata *meta); |
448 | |
449 | INDIRECT_CALLABLE_SCOPE bool |
450 | mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, |
451 | int check_result, struct xsk_tx_metadata *meta) |
452 | { |
453 | struct mlx5e_tx_mpwqe *session = &sq->mpwqe; |
454 | struct mlx5e_xdpsq_stats *stats = sq->stats; |
455 | struct mlx5e_xmit_data *p = xdptxd; |
456 | struct mlx5e_xmit_data tmp; |
457 | |
458 | if (xdptxd->has_frags) { |
459 | struct mlx5e_xmit_data_frags *xdptxdf = |
460 | container_of(xdptxd, struct mlx5e_xmit_data_frags, xd); |
461 | |
462 | if (!!xdptxd->len + xdptxdf->sinfo->nr_frags > 1) { |
463 | /* MPWQE is enabled, but a multi-buffer packet is queued for |
464 | * transmission. MPWQE can't send fragmented packets, so close |
465 | * the current session and fall back to a regular WQE. |
466 | */ |
467 | if (unlikely(sq->mpwqe.wqe)) |
468 | mlx5e_xdp_mpwqe_complete(sq); |
469 | return mlx5e_xmit_xdp_frame(sq, xdptxd, check_result: 0, meta); |
470 | } |
471 | if (!xdptxd->len) { |
472 | skb_frag_t *frag = &xdptxdf->sinfo->frags[0]; |
473 | |
474 | tmp.data = skb_frag_address(frag); |
475 | tmp.len = skb_frag_size(frag); |
476 | tmp.dma_addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[0] : |
477 | page_pool_get_dma_addr(page: skb_frag_page(frag)) + |
478 | skb_frag_off(frag); |
479 | p = &tmp; |
480 | } |
481 | } |
482 | |
483 | if (unlikely(p->len > sq->hw_mtu)) { |
484 | stats->err++; |
485 | return false; |
486 | } |
487 | |
488 | if (!check_result) |
489 | check_result = mlx5e_xmit_xdp_frame_check_mpwqe(sq); |
490 | if (unlikely(check_result < 0)) |
491 | return false; |
492 | |
493 | if (check_result == MLX5E_XDP_CHECK_START_MPWQE) { |
494 | /* Start the session when nothing can fail, so it's guaranteed |
495 | * that if there is an active session, it has at least one dseg, |
496 | * and it's safe to complete it at any time. |
497 | */ |
498 | mlx5e_xdp_mpwqe_session_start(sq); |
499 | xsk_tx_metadata_request(meta, ops: &mlx5e_xsk_tx_metadata_ops, priv: &session->wqe->eth); |
500 | } |
501 | |
502 | mlx5e_xdp_mpwqe_add_dseg(sq, xdptxd: p, stats); |
503 | |
504 | if (unlikely(mlx5e_xdp_mpwqe_is_full(session, sq->max_sq_mpw_wqebbs))) |
505 | mlx5e_xdp_mpwqe_complete(sq); |
506 | |
507 | stats->xmit++; |
508 | return true; |
509 | } |
510 | |
511 | static int mlx5e_xmit_xdp_frame_check_stop_room(struct mlx5e_xdpsq *sq, int stop_room) |
512 | { |
513 | if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, stop_room))) { |
514 | /* SQ is full, ring doorbell */ |
515 | mlx5e_xmit_xdp_doorbell(sq); |
516 | sq->stats->full++; |
517 | return -EBUSY; |
518 | } |
519 | |
520 | return MLX5E_XDP_CHECK_OK; |
521 | } |
522 | |
523 | INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq) |
524 | { |
525 | return mlx5e_xmit_xdp_frame_check_stop_room(sq, stop_room: 1); |
526 | } |
527 | |
528 | INDIRECT_CALLABLE_SCOPE bool |
529 | mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, |
530 | int check_result, struct xsk_tx_metadata *meta) |
531 | { |
532 | struct mlx5e_xmit_data_frags *xdptxdf = |
533 | container_of(xdptxd, struct mlx5e_xmit_data_frags, xd); |
534 | struct mlx5_wq_cyc *wq = &sq->wq; |
535 | struct mlx5_wqe_ctrl_seg *cseg; |
536 | struct mlx5_wqe_data_seg *dseg; |
537 | struct mlx5_wqe_eth_seg *eseg; |
538 | struct mlx5e_tx_wqe *wqe; |
539 | |
540 | dma_addr_t dma_addr = xdptxd->dma_addr; |
541 | u32 dma_len = xdptxd->len; |
542 | u16 ds_cnt, inline_hdr_sz; |
543 | unsigned int frags_size; |
544 | u8 num_wqebbs = 1; |
545 | int num_frags = 0; |
546 | bool inline_ok; |
547 | bool linear; |
548 | u16 pi; |
549 | |
550 | struct mlx5e_xdpsq_stats *stats = sq->stats; |
551 | |
552 | inline_ok = sq->min_inline_mode == MLX5_INLINE_MODE_NONE || |
553 | dma_len >= MLX5E_XDP_MIN_INLINE; |
554 | frags_size = xdptxd->has_frags ? xdptxdf->sinfo->xdp_frags_size : 0; |
555 | |
556 | if (unlikely(!inline_ok || sq->hw_mtu < dma_len + frags_size)) { |
557 | stats->err++; |
558 | return false; |
559 | } |
560 | |
561 | inline_hdr_sz = 0; |
562 | if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) |
563 | inline_hdr_sz = MLX5E_XDP_MIN_INLINE; |
564 | |
565 | linear = !!(dma_len - inline_hdr_sz); |
566 | ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + linear + !!inline_hdr_sz; |
567 | |
568 | /* check_result must be 0 if sinfo is passed. */ |
569 | if (!check_result) { |
570 | int stop_room = 1; |
571 | |
572 | if (xdptxd->has_frags) { |
573 | ds_cnt += xdptxdf->sinfo->nr_frags; |
574 | num_frags = xdptxdf->sinfo->nr_frags; |
575 | num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); |
576 | /* Assuming MLX5_CAP_GEN(mdev, max_wqe_sz_sq) is big |
577 | * enough to hold all fragments. |
578 | */ |
579 | stop_room = MLX5E_STOP_ROOM(num_wqebbs); |
580 | } |
581 | |
582 | check_result = mlx5e_xmit_xdp_frame_check_stop_room(sq, stop_room); |
583 | } |
584 | if (unlikely(check_result < 0)) |
585 | return false; |
586 | |
587 | pi = mlx5e_xdpsq_get_next_pi(sq, size: num_wqebbs); |
588 | wqe = mlx5_wq_cyc_get_wqe(wq, ix: pi); |
589 | net_prefetchw(p: wqe); |
590 | |
591 | cseg = &wqe->ctrl; |
592 | eseg = &wqe->eth; |
593 | dseg = wqe->data; |
594 | |
595 | /* copy the inline part if required */ |
596 | if (inline_hdr_sz) { |
597 | memcpy(eseg->inline_hdr.start, xdptxd->data, sizeof(eseg->inline_hdr.start)); |
598 | memcpy(dseg, xdptxd->data + sizeof(eseg->inline_hdr.start), |
599 | inline_hdr_sz - sizeof(eseg->inline_hdr.start)); |
600 | dma_len -= inline_hdr_sz; |
601 | dma_addr += inline_hdr_sz; |
602 | dseg++; |
603 | } |
604 | |
605 | /* write the dma part */ |
606 | if (linear) { |
607 | dseg->addr = cpu_to_be64(dma_addr); |
608 | dseg->byte_count = cpu_to_be32(dma_len); |
609 | dseg->lkey = sq->mkey_be; |
610 | dseg++; |
611 | } |
612 | |
613 | cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND); |
614 | |
615 | if (test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) { |
616 | int i; |
617 | |
618 | memset(&cseg->trailer, 0, sizeof(cseg->trailer)); |
619 | memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer)); |
620 | |
621 | eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz); |
622 | |
623 | for (i = 0; i < num_frags; i++) { |
624 | skb_frag_t *frag = &xdptxdf->sinfo->frags[i]; |
625 | dma_addr_t addr; |
626 | |
627 | addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] : |
628 | page_pool_get_dma_addr(page: skb_frag_page(frag)) + |
629 | skb_frag_off(frag); |
630 | |
631 | dseg->addr = cpu_to_be64(addr); |
632 | dseg->byte_count = cpu_to_be32(skb_frag_size(frag)); |
633 | dseg->lkey = sq->mkey_be; |
634 | dseg++; |
635 | } |
636 | |
637 | cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); |
638 | |
639 | sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) { |
640 | .num_wqebbs = num_wqebbs, |
641 | .num_pkts = 1, |
642 | }; |
643 | |
644 | sq->pc += num_wqebbs; |
645 | } else { |
646 | cseg->fm_ce_se = 0; |
647 | |
648 | sq->pc++; |
649 | } |
650 | |
651 | xsk_tx_metadata_request(meta, ops: &mlx5e_xsk_tx_metadata_ops, priv: eseg); |
652 | |
653 | sq->doorbell_cseg = cseg; |
654 | |
655 | stats->xmit++; |
656 | return true; |
657 | } |
658 | |
659 | static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq, |
660 | struct mlx5e_xdp_wqe_info *wi, |
661 | u32 *xsk_frames, |
662 | struct xdp_frame_bulk *bq, |
663 | struct mlx5e_cq *cq, |
664 | struct mlx5_cqe64 *cqe) |
665 | { |
666 | struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo; |
667 | u16 i; |
668 | |
669 | for (i = 0; i < wi->num_pkts; i++) { |
670 | union mlx5e_xdp_info xdpi = mlx5e_xdpi_fifo_pop(fifo: xdpi_fifo); |
671 | |
672 | switch (xdpi.mode) { |
673 | case MLX5E_XDP_XMIT_MODE_FRAME: { |
674 | /* XDP_TX from the XSK RQ and XDP_REDIRECT */ |
675 | struct xdp_frame *xdpf; |
676 | dma_addr_t dma_addr; |
677 | |
678 | xdpi = mlx5e_xdpi_fifo_pop(fifo: xdpi_fifo); |
679 | xdpf = xdpi.frame.xdpf; |
680 | xdpi = mlx5e_xdpi_fifo_pop(fifo: xdpi_fifo); |
681 | dma_addr = xdpi.frame.dma_addr; |
682 | |
683 | dma_unmap_single(sq->pdev, dma_addr, |
684 | xdpf->len, DMA_TO_DEVICE); |
685 | if (xdp_frame_has_frags(frame: xdpf)) { |
686 | struct skb_shared_info *sinfo; |
687 | int j; |
688 | |
689 | sinfo = xdp_get_shared_info_from_frame(frame: xdpf); |
690 | for (j = 0; j < sinfo->nr_frags; j++) { |
691 | skb_frag_t *frag = &sinfo->frags[j]; |
692 | |
693 | xdpi = mlx5e_xdpi_fifo_pop(fifo: xdpi_fifo); |
694 | dma_addr = xdpi.frame.dma_addr; |
695 | |
696 | dma_unmap_single(sq->pdev, dma_addr, |
697 | skb_frag_size(frag), DMA_TO_DEVICE); |
698 | } |
699 | } |
700 | xdp_return_frame_bulk(xdpf, bq); |
701 | break; |
702 | } |
703 | case MLX5E_XDP_XMIT_MODE_PAGE: { |
704 | /* XDP_TX from the regular RQ */ |
705 | u8 num, n = 0; |
706 | |
707 | xdpi = mlx5e_xdpi_fifo_pop(fifo: xdpi_fifo); |
708 | num = xdpi.page.num; |
709 | |
710 | do { |
711 | struct page *page; |
712 | |
713 | xdpi = mlx5e_xdpi_fifo_pop(fifo: xdpi_fifo); |
714 | page = xdpi.page.page; |
715 | |
716 | /* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE) |
717 | * as we know this is a page_pool page. |
718 | */ |
719 | page_pool_recycle_direct(pool: page->pp, page); |
720 | } while (++n < num); |
721 | |
722 | break; |
723 | } |
724 | case MLX5E_XDP_XMIT_MODE_XSK: { |
725 | /* AF_XDP send */ |
726 | struct xsk_tx_metadata_compl *compl = NULL; |
727 | struct mlx5e_xsk_tx_complete priv = { |
728 | .cqe = cqe, |
729 | .cq = cq, |
730 | }; |
731 | |
732 | if (xp_tx_metadata_enabled(pool: sq->xsk_pool)) { |
733 | xdpi = mlx5e_xdpi_fifo_pop(fifo: xdpi_fifo); |
734 | compl = &xdpi.xsk_meta; |
735 | |
736 | xsk_tx_metadata_complete(compl, ops: &mlx5e_xsk_tx_metadata_ops, priv: &priv); |
737 | } |
738 | |
739 | (*xsk_frames)++; |
740 | break; |
741 | } |
742 | default: |
743 | WARN_ON_ONCE(true); |
744 | } |
745 | } |
746 | } |
747 | |
748 | bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) |
749 | { |
750 | struct xdp_frame_bulk bq; |
751 | struct mlx5e_xdpsq *sq; |
752 | struct mlx5_cqe64 *cqe; |
753 | u32 xsk_frames = 0; |
754 | u16 sqcc; |
755 | int i; |
756 | |
757 | xdp_frame_bulk_init(bq: &bq); |
758 | |
759 | sq = container_of(cq, struct mlx5e_xdpsq, cq); |
760 | |
761 | if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) |
762 | return false; |
763 | |
764 | cqe = mlx5_cqwq_get_cqe(wq: &cq->wq); |
765 | if (!cqe) |
766 | return false; |
767 | |
768 | /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), |
769 | * otherwise a cq overrun may occur |
770 | */ |
771 | sqcc = sq->cc; |
772 | |
773 | i = 0; |
774 | do { |
775 | struct mlx5e_xdp_wqe_info *wi; |
776 | u16 wqe_counter, ci; |
777 | bool last_wqe; |
778 | |
779 | mlx5_cqwq_pop(wq: &cq->wq); |
780 | |
781 | wqe_counter = be16_to_cpu(cqe->wqe_counter); |
782 | |
783 | do { |
784 | last_wqe = (sqcc == wqe_counter); |
785 | ci = mlx5_wq_cyc_ctr2ix(wq: &sq->wq, ctr: sqcc); |
786 | wi = &sq->db.wqe_info[ci]; |
787 | |
788 | sqcc += wi->num_wqebbs; |
789 | |
790 | mlx5e_free_xdpsq_desc(sq, wi, xsk_frames: &xsk_frames, bq: &bq, cq, cqe); |
791 | } while (!last_wqe); |
792 | |
793 | if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { |
794 | netdev_WARN_ONCE(sq->channel->netdev, |
795 | "Bad OP in XDPSQ CQE: 0x%x\n" , |
796 | get_cqe_opcode(cqe)); |
797 | mlx5e_dump_error_cqe(cq: &sq->cq, qn: sq->sqn, |
798 | err_cqe: (struct mlx5_err_cqe *)cqe); |
799 | mlx5_wq_cyc_wqe_dump(wq: &sq->wq, ix: ci, nstrides: wi->num_wqebbs); |
800 | } |
801 | } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(wq: &cq->wq))); |
802 | |
803 | xdp_flush_frame_bulk(bq: &bq); |
804 | |
805 | if (xsk_frames) |
806 | xsk_tx_completed(pool: sq->xsk_pool, nb_entries: xsk_frames); |
807 | |
808 | sq->stats->cqes += i; |
809 | |
810 | mlx5_cqwq_update_db_record(wq: &cq->wq); |
811 | |
812 | /* ensure cq space is freed before enabling more cqes */ |
813 | wmb(); |
814 | |
815 | sq->cc = sqcc; |
816 | return (i == MLX5E_TX_CQ_POLL_BUDGET); |
817 | } |
818 | |
819 | void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq) |
820 | { |
821 | struct xdp_frame_bulk bq; |
822 | u32 xsk_frames = 0; |
823 | |
824 | xdp_frame_bulk_init(bq: &bq); |
825 | |
826 | rcu_read_lock(); /* need for xdp_return_frame_bulk */ |
827 | |
828 | while (sq->cc != sq->pc) { |
829 | struct mlx5e_xdp_wqe_info *wi; |
830 | u16 ci; |
831 | |
832 | ci = mlx5_wq_cyc_ctr2ix(wq: &sq->wq, ctr: sq->cc); |
833 | wi = &sq->db.wqe_info[ci]; |
834 | |
835 | sq->cc += wi->num_wqebbs; |
836 | |
837 | mlx5e_free_xdpsq_desc(sq, wi, xsk_frames: &xsk_frames, bq: &bq, NULL, NULL); |
838 | } |
839 | |
840 | xdp_flush_frame_bulk(bq: &bq); |
841 | rcu_read_unlock(); |
842 | |
843 | if (xsk_frames) |
844 | xsk_tx_completed(pool: sq->xsk_pool, nb_entries: xsk_frames); |
845 | } |
846 | |
847 | int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, |
848 | u32 flags) |
849 | { |
850 | struct mlx5e_priv *priv = netdev_priv(dev); |
851 | struct mlx5e_xdpsq *sq; |
852 | int nxmit = 0; |
853 | int sq_num; |
854 | int i; |
855 | |
856 | /* this flag is sufficient, no need to test internal sq state */ |
857 | if (unlikely(!mlx5e_xdp_tx_is_enabled(priv))) |
858 | return -ENETDOWN; |
859 | |
860 | if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) |
861 | return -EINVAL; |
862 | |
863 | sq_num = smp_processor_id(); |
864 | |
865 | if (unlikely(sq_num >= priv->channels.num)) |
866 | return -ENXIO; |
867 | |
868 | sq = &priv->channels.c[sq_num]->xdpsq; |
869 | |
870 | for (i = 0; i < n; i++) { |
871 | struct mlx5e_xmit_data_frags xdptxdf = {}; |
872 | struct xdp_frame *xdpf = frames[i]; |
873 | dma_addr_t dma_arr[MAX_SKB_FRAGS]; |
874 | struct mlx5e_xmit_data *xdptxd; |
875 | bool ret; |
876 | |
877 | xdptxd = &xdptxdf.xd; |
878 | xdptxd->data = xdpf->data; |
879 | xdptxd->len = xdpf->len; |
880 | xdptxd->has_frags = xdp_frame_has_frags(frame: xdpf); |
881 | xdptxd->dma_addr = dma_map_single(sq->pdev, xdptxd->data, |
882 | xdptxd->len, DMA_TO_DEVICE); |
883 | |
884 | if (unlikely(dma_mapping_error(sq->pdev, xdptxd->dma_addr))) |
885 | break; |
886 | |
887 | if (xdptxd->has_frags) { |
888 | int j; |
889 | |
890 | xdptxdf.sinfo = xdp_get_shared_info_from_frame(frame: xdpf); |
891 | xdptxdf.dma_arr = dma_arr; |
892 | for (j = 0; j < xdptxdf.sinfo->nr_frags; j++) { |
893 | skb_frag_t *frag = &xdptxdf.sinfo->frags[j]; |
894 | |
895 | dma_arr[j] = dma_map_single(sq->pdev, skb_frag_address(frag), |
896 | skb_frag_size(frag), DMA_TO_DEVICE); |
897 | |
898 | if (!dma_mapping_error(dev: sq->pdev, dma_addr: dma_arr[j])) |
899 | continue; |
900 | /* mapping error */ |
901 | while (--j >= 0) |
902 | dma_unmap_single(sq->pdev, dma_arr[j], |
903 | skb_frag_size(&xdptxdf.sinfo->frags[j]), |
904 | DMA_TO_DEVICE); |
905 | goto out; |
906 | } |
907 | } |
908 | |
909 | ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe, |
910 | mlx5e_xmit_xdp_frame, sq, xdptxd, 0, NULL); |
911 | if (unlikely(!ret)) { |
912 | int j; |
913 | |
914 | dma_unmap_single(sq->pdev, xdptxd->dma_addr, |
915 | xdptxd->len, DMA_TO_DEVICE); |
916 | if (!xdptxd->has_frags) |
917 | break; |
918 | for (j = 0; j < xdptxdf.sinfo->nr_frags; j++) |
919 | dma_unmap_single(sq->pdev, dma_arr[j], |
920 | skb_frag_size(&xdptxdf.sinfo->frags[j]), |
921 | DMA_TO_DEVICE); |
922 | break; |
923 | } |
924 | |
925 | /* xmit_mode == MLX5E_XDP_XMIT_MODE_FRAME */ |
926 | mlx5e_xdpi_fifo_push(fifo: &sq->db.xdpi_fifo, |
927 | xi: (union mlx5e_xdp_info) { .mode = MLX5E_XDP_XMIT_MODE_FRAME }); |
928 | mlx5e_xdpi_fifo_push(fifo: &sq->db.xdpi_fifo, |
929 | xi: (union mlx5e_xdp_info) { .frame.xdpf = xdpf }); |
930 | mlx5e_xdpi_fifo_push(fifo: &sq->db.xdpi_fifo, |
931 | xi: (union mlx5e_xdp_info) { .frame.dma_addr = xdptxd->dma_addr }); |
932 | if (xdptxd->has_frags) { |
933 | int j; |
934 | |
935 | for (j = 0; j < xdptxdf.sinfo->nr_frags; j++) |
936 | mlx5e_xdpi_fifo_push(fifo: &sq->db.xdpi_fifo, |
937 | xi: (union mlx5e_xdp_info) |
938 | { .frame.dma_addr = dma_arr[j] }); |
939 | } |
940 | nxmit++; |
941 | } |
942 | |
943 | out: |
944 | if (sq->mpwqe.wqe) |
945 | mlx5e_xdp_mpwqe_complete(sq); |
946 | |
947 | if (flags & XDP_XMIT_FLUSH) |
948 | mlx5e_xmit_xdp_doorbell(sq); |
949 | |
950 | return nxmit; |
951 | } |
952 | |
953 | void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq) |
954 | { |
955 | struct mlx5e_xdpsq *xdpsq = rq->xdpsq; |
956 | |
957 | if (xdpsq->mpwqe.wqe) |
958 | mlx5e_xdp_mpwqe_complete(sq: xdpsq); |
959 | |
960 | mlx5e_xmit_xdp_doorbell(sq: xdpsq); |
961 | |
962 | if (test_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags)) { |
963 | xdp_do_flush(); |
964 | __clear_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags); |
965 | } |
966 | } |
967 | |
968 | void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw) |
969 | { |
970 | sq->xmit_xdp_frame_check = is_mpw ? |
971 | mlx5e_xmit_xdp_frame_check_mpwqe : mlx5e_xmit_xdp_frame_check; |
972 | sq->xmit_xdp_frame = is_mpw ? |
973 | mlx5e_xmit_xdp_frame_mpwqe : mlx5e_xmit_xdp_frame; |
974 | } |
975 | |