1 | /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ |
2 | /* Copyright (c) 2019 Mellanox Technologies. */ |
3 | |
4 | #ifndef __MLX5_EN_TXRX_H___ |
5 | #define __MLX5_EN_TXRX_H___ |
6 | |
7 | #include "en.h" |
8 | #include <linux/indirect_call_wrapper.h> |
9 | |
10 | #define MLX5E_TX_WQE_EMPTY_DS_COUNT (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) |
11 | |
12 | #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start)) |
13 | |
14 | /* IPSEC inline data includes: |
15 | * 1. ESP trailer: up to 255 bytes of padding, 1 byte for pad length, 1 byte for |
16 | * next header. |
17 | * 2. ESP authentication data: 16 bytes for ICV. |
18 | */ |
19 | #define MLX5E_MAX_TX_IPSEC_DS DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + \ |
20 | 255 + 1 + 1 + 16, MLX5_SEND_WQE_DS) |
21 | |
22 | /* 366 should be big enough to cover all L2, L3 and L4 headers with possible |
23 | * encapsulations. |
24 | */ |
25 | #define MLX5E_MAX_TX_INLINE_DS DIV_ROUND_UP(366 - INL_HDR_START_SZ + VLAN_HLEN, \ |
26 | MLX5_SEND_WQE_DS) |
27 | |
28 | /* Sync the calculation with mlx5e_sq_calc_wqe_attr. */ |
29 | #define MLX5E_MAX_TX_WQEBBS DIV_ROUND_UP(MLX5E_TX_WQE_EMPTY_DS_COUNT + \ |
30 | MLX5E_MAX_TX_INLINE_DS + \ |
31 | MLX5E_MAX_TX_IPSEC_DS + \ |
32 | MAX_SKB_FRAGS + 1, \ |
33 | MLX5_SEND_WQEBB_NUM_DS) |
34 | |
35 | #define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND) |
36 | |
37 | static inline |
38 | ktime_t mlx5e_cqe_ts_to_ns(cqe_ts_to_ns func, struct mlx5_clock *clock, u64 cqe_ts) |
39 | { |
40 | return INDIRECT_CALL_2(func, mlx5_real_time_cyc2time, mlx5_timecounter_cyc2time, |
41 | clock, cqe_ts); |
42 | } |
43 | |
44 | enum mlx5e_icosq_wqe_type { |
45 | MLX5E_ICOSQ_WQE_NOP, |
46 | MLX5E_ICOSQ_WQE_UMR_RX, |
47 | MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR, |
48 | #ifdef CONFIG_MLX5_EN_TLS |
49 | MLX5E_ICOSQ_WQE_UMR_TLS, |
50 | MLX5E_ICOSQ_WQE_SET_PSV_TLS, |
51 | MLX5E_ICOSQ_WQE_GET_PSV_TLS, |
52 | #endif |
53 | }; |
54 | |
55 | /* General */ |
56 | static inline bool mlx5e_skb_is_multicast(struct sk_buff *skb) |
57 | { |
58 | return skb->pkt_type == PACKET_MULTICAST || skb->pkt_type == PACKET_BROADCAST; |
59 | } |
60 | |
61 | void mlx5e_trigger_irq(struct mlx5e_icosq *sq); |
62 | void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe); |
63 | void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); |
64 | int mlx5e_napi_poll(struct napi_struct *napi, int budget); |
65 | int mlx5e_poll_ico_cq(struct mlx5e_cq *cq); |
66 | |
67 | /* RX */ |
68 | INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)); |
69 | INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)); |
70 | int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); |
71 | void mlx5e_free_rx_descs(struct mlx5e_rq *rq); |
72 | void mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq); |
73 | |
74 | static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) |
75 | { |
76 | return config->rx_filter == HWTSTAMP_FILTER_ALL; |
77 | } |
78 | |
79 | /* TX */ |
80 | struct mlx5e_xmit_data { |
81 | dma_addr_t dma_addr; |
82 | void *data; |
83 | u32 len : 31; |
84 | u32 has_frags : 1; |
85 | }; |
86 | |
87 | struct mlx5e_xmit_data_frags { |
88 | struct mlx5e_xmit_data xd; |
89 | struct skb_shared_info *sinfo; |
90 | dma_addr_t *dma_arr; |
91 | }; |
92 | |
93 | netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); |
94 | bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); |
95 | void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq); |
96 | |
97 | static inline bool |
98 | mlx5e_skb_fifo_has_room(struct mlx5e_skb_fifo *fifo) |
99 | { |
100 | return (u16)(*fifo->pc - *fifo->cc) <= fifo->mask; |
101 | } |
102 | |
103 | static inline bool |
104 | mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n) |
105 | { |
106 | return (mlx5_wq_cyc_ctr2ix(wq, ctr: cc - pc) >= n) || (cc == pc); |
107 | } |
108 | |
109 | static inline void *mlx5e_fetch_wqe(struct mlx5_wq_cyc *wq, u16 pi, size_t wqe_size) |
110 | { |
111 | void *wqe; |
112 | |
113 | wqe = mlx5_wq_cyc_get_wqe(wq, ix: pi); |
114 | memset(wqe, 0, wqe_size); |
115 | |
116 | return wqe; |
117 | } |
118 | |
119 | #define MLX5E_TX_FETCH_WQE(sq, pi) \ |
120 | ((struct mlx5e_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_tx_wqe))) |
121 | |
122 | static inline struct mlx5e_tx_wqe * |
123 | mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) |
124 | { |
125 | u16 pi = mlx5_wq_cyc_ctr2ix(wq, ctr: *pc); |
126 | struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, ix: pi); |
127 | struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; |
128 | |
129 | memset(cseg, 0, sizeof(*cseg)); |
130 | |
131 | cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP); |
132 | cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01); |
133 | |
134 | (*pc)++; |
135 | |
136 | return wqe; |
137 | } |
138 | |
139 | static inline struct mlx5e_tx_wqe * |
140 | mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) |
141 | { |
142 | u16 pi = mlx5_wq_cyc_ctr2ix(wq, ctr: *pc); |
143 | struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, ix: pi); |
144 | struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; |
145 | |
146 | memset(cseg, 0, sizeof(*cseg)); |
147 | |
148 | cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP); |
149 | cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01); |
150 | cseg->fm_ce_se = MLX5_FENCE_MODE_INITIATOR_SMALL; |
151 | |
152 | (*pc)++; |
153 | |
154 | return wqe; |
155 | } |
156 | |
157 | struct mlx5e_tx_wqe_info { |
158 | struct sk_buff *skb; |
159 | u32 num_bytes; |
160 | u8 num_wqebbs; |
161 | u8 num_dma; |
162 | u8 num_fifo_pkts; |
163 | #ifdef CONFIG_MLX5_EN_TLS |
164 | struct page *resync_dump_frag_page; |
165 | #endif |
166 | }; |
167 | |
168 | static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size) |
169 | { |
170 | struct mlx5_wq_cyc *wq = &sq->wq; |
171 | u16 pi, contig_wqebbs; |
172 | |
173 | pi = mlx5_wq_cyc_ctr2ix(wq, ctr: sq->pc); |
174 | contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, ix: pi); |
175 | if (unlikely(contig_wqebbs < size)) { |
176 | struct mlx5e_tx_wqe_info *wi, *edge_wi; |
177 | |
178 | wi = &sq->db.wqe_info[pi]; |
179 | edge_wi = wi + contig_wqebbs; |
180 | |
181 | /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */ |
182 | for (; wi < edge_wi; wi++) { |
183 | *wi = (struct mlx5e_tx_wqe_info) { |
184 | .num_wqebbs = 1, |
185 | }; |
186 | mlx5e_post_nop(wq, sqn: sq->sqn, pc: &sq->pc); |
187 | } |
188 | sq->stats->nop += contig_wqebbs; |
189 | |
190 | pi = mlx5_wq_cyc_ctr2ix(wq, ctr: sq->pc); |
191 | } |
192 | |
193 | return pi; |
194 | } |
195 | |
196 | void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq); |
197 | |
198 | static inline u16 (struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) |
199 | { |
200 | return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1); |
201 | } |
202 | |
203 | struct mlx5e_shampo_umr { |
204 | u16 len; |
205 | }; |
206 | |
207 | struct mlx5e_icosq_wqe_info { |
208 | u8 wqe_type; |
209 | u8 num_wqebbs; |
210 | |
211 | /* Auxiliary data for different wqe types. */ |
212 | union { |
213 | struct { |
214 | struct mlx5e_rq *rq; |
215 | } umr; |
216 | struct mlx5e_shampo_umr shampo; |
217 | #ifdef CONFIG_MLX5_EN_TLS |
218 | struct { |
219 | struct mlx5e_ktls_offload_context_rx *priv_rx; |
220 | } tls_set_params; |
221 | struct { |
222 | struct mlx5e_ktls_rx_resync_buf *buf; |
223 | } tls_get_params; |
224 | #endif |
225 | }; |
226 | }; |
227 | |
228 | void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq); |
229 | |
230 | static inline u16 mlx5e_icosq_get_next_pi(struct mlx5e_icosq *sq, u16 size) |
231 | { |
232 | struct mlx5_wq_cyc *wq = &sq->wq; |
233 | u16 pi, contig_wqebbs; |
234 | |
235 | pi = mlx5_wq_cyc_ctr2ix(wq, ctr: sq->pc); |
236 | contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, ix: pi); |
237 | if (unlikely(contig_wqebbs < size)) { |
238 | struct mlx5e_icosq_wqe_info *wi, *edge_wi; |
239 | |
240 | wi = &sq->db.wqe_info[pi]; |
241 | edge_wi = wi + contig_wqebbs; |
242 | |
243 | /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */ |
244 | for (; wi < edge_wi; wi++) { |
245 | *wi = (struct mlx5e_icosq_wqe_info) { |
246 | .wqe_type = MLX5E_ICOSQ_WQE_NOP, |
247 | .num_wqebbs = 1, |
248 | }; |
249 | mlx5e_post_nop(wq, sqn: sq->sqn, pc: &sq->pc); |
250 | } |
251 | |
252 | pi = mlx5_wq_cyc_ctr2ix(wq, ctr: sq->pc); |
253 | } |
254 | |
255 | return pi; |
256 | } |
257 | |
258 | static inline void |
259 | mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map, |
260 | struct mlx5_wqe_ctrl_seg *ctrl) |
261 | { |
262 | ctrl->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE; |
263 | /* ensure wqe is visible to device before updating doorbell record */ |
264 | dma_wmb(); |
265 | |
266 | *wq->db = cpu_to_be32(pc); |
267 | |
268 | /* ensure doorbell record is visible to device before ringing the |
269 | * doorbell |
270 | */ |
271 | wmb(); |
272 | |
273 | mlx5_write64(val: (__be32 *)ctrl, dest: uar_map); |
274 | } |
275 | |
276 | static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) |
277 | { |
278 | struct mlx5_core_cq *mcq; |
279 | |
280 | mcq = &cq->mcq; |
281 | mlx5_cq_arm(cq: mcq, cmd: MLX5_CQ_DB_REQ_NOT, uar_page: mcq->uar->map, cons_index: cq->wq.cc); |
282 | } |
283 | |
284 | static inline struct mlx5e_sq_dma * |
285 | mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i) |
286 | { |
287 | return &sq->db.dma_fifo[i & sq->dma_fifo_mask]; |
288 | } |
289 | |
290 | static inline void |
291 | mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size, |
292 | enum mlx5e_dma_map_type map_type) |
293 | { |
294 | struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, i: sq->dma_fifo_pc++); |
295 | |
296 | dma->addr = addr; |
297 | dma->size = size; |
298 | dma->type = map_type; |
299 | } |
300 | |
301 | static inline |
302 | struct sk_buff **mlx5e_skb_fifo_get(struct mlx5e_skb_fifo *fifo, u16 i) |
303 | { |
304 | return &fifo->fifo[i & fifo->mask]; |
305 | } |
306 | |
307 | static inline |
308 | void mlx5e_skb_fifo_push(struct mlx5e_skb_fifo *fifo, struct sk_buff *skb) |
309 | { |
310 | struct sk_buff **skb_item = mlx5e_skb_fifo_get(fifo, i: (*fifo->pc)++); |
311 | |
312 | *skb_item = skb; |
313 | } |
314 | |
315 | static inline |
316 | struct sk_buff *mlx5e_skb_fifo_pop(struct mlx5e_skb_fifo *fifo) |
317 | { |
318 | WARN_ON_ONCE(*fifo->pc == *fifo->cc); |
319 | |
320 | return *mlx5e_skb_fifo_get(fifo, i: (*fifo->cc)++); |
321 | } |
322 | |
323 | static inline void |
324 | mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma) |
325 | { |
326 | switch (dma->type) { |
327 | case MLX5E_DMA_MAP_SINGLE: |
328 | dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE); |
329 | break; |
330 | case MLX5E_DMA_MAP_PAGE: |
331 | dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE); |
332 | break; |
333 | default: |
334 | WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n" ); |
335 | } |
336 | } |
337 | |
338 | void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq); |
339 | |
340 | static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs) |
341 | { |
342 | return session->ds_count == max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS; |
343 | } |
344 | |
345 | static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq) |
346 | { |
347 | if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { |
348 | mlx5_wq_ll_reset(wq: &rq->mpwqe.wq); |
349 | rq->mpwqe.actual_wq_head = 0; |
350 | } else { |
351 | mlx5_wq_cyc_reset(wq: &rq->wqe.wq); |
352 | } |
353 | } |
354 | |
355 | static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 qn, |
356 | struct mlx5_err_cqe *err_cqe) |
357 | { |
358 | struct mlx5_cqwq *wq = &cq->wq; |
359 | u32 ci; |
360 | |
361 | ci = mlx5_cqwq_ctr2ix(wq, ctr: wq->cc - 1); |
362 | |
363 | netdev_err(dev: cq->netdev, |
364 | format: "Error cqe on cqn 0x%x, ci 0x%x, qn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n" , |
365 | cq->mcq.cqn, ci, qn, |
366 | get_cqe_opcode(cqe: (struct mlx5_cqe64 *)err_cqe), |
367 | err_cqe->syndrome, err_cqe->vendor_err_synd); |
368 | mlx5_dump_err_cqe(dev: cq->mdev, err_cqe); |
369 | } |
370 | |
371 | static inline u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq) |
372 | { |
373 | switch (rq->wq_type) { |
374 | case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: |
375 | return mlx5_wq_ll_get_size(wq: &rq->mpwqe.wq); |
376 | default: |
377 | return mlx5_wq_cyc_get_size(wq: &rq->wqe.wq); |
378 | } |
379 | } |
380 | |
381 | static inline u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq) |
382 | { |
383 | switch (rq->wq_type) { |
384 | case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: |
385 | return rq->mpwqe.wq.cur_sz; |
386 | default: |
387 | return rq->wqe.wq.cur_sz; |
388 | } |
389 | } |
390 | |
391 | static inline u16 mlx5e_rqwq_get_head(struct mlx5e_rq *rq) |
392 | { |
393 | switch (rq->wq_type) { |
394 | case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: |
395 | return mlx5_wq_ll_get_head(wq: &rq->mpwqe.wq); |
396 | default: |
397 | return mlx5_wq_cyc_get_head(wq: &rq->wqe.wq); |
398 | } |
399 | } |
400 | |
401 | static inline u16 mlx5e_rqwq_get_wqe_counter(struct mlx5e_rq *rq) |
402 | { |
403 | switch (rq->wq_type) { |
404 | case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: |
405 | return mlx5_wq_ll_get_counter(wq: &rq->mpwqe.wq); |
406 | default: |
407 | return mlx5_wq_cyc_get_counter(wq: &rq->wqe.wq); |
408 | } |
409 | } |
410 | |
411 | /* SW parser related functions */ |
412 | |
413 | struct mlx5e_swp_spec { |
414 | __be16 l3_proto; |
415 | u8 l4_proto; |
416 | u8 is_tun; |
417 | __be16 tun_l3_proto; |
418 | u8 tun_l4_proto; |
419 | }; |
420 | |
421 | static inline void mlx5e_eseg_swp_offsets_add_vlan(struct mlx5_wqe_eth_seg *eseg) |
422 | { |
423 | /* SWP offsets are in 2-bytes words */ |
424 | eseg->swp_outer_l3_offset += VLAN_HLEN / 2; |
425 | eseg->swp_outer_l4_offset += VLAN_HLEN / 2; |
426 | eseg->swp_inner_l3_offset += VLAN_HLEN / 2; |
427 | eseg->swp_inner_l4_offset += VLAN_HLEN / 2; |
428 | } |
429 | |
430 | static inline void |
431 | mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, |
432 | struct mlx5e_swp_spec *swp_spec) |
433 | { |
434 | /* SWP offsets are in 2-bytes words */ |
435 | eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2; |
436 | if (swp_spec->l3_proto == htons(ETH_P_IPV6)) |
437 | eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6; |
438 | if (swp_spec->l4_proto) { |
439 | eseg->swp_outer_l4_offset = skb_transport_offset(skb) / 2; |
440 | if (swp_spec->l4_proto == IPPROTO_UDP) |
441 | eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP; |
442 | } |
443 | |
444 | if (swp_spec->is_tun) { |
445 | eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2; |
446 | if (swp_spec->tun_l3_proto == htons(ETH_P_IPV6)) |
447 | eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6; |
448 | } else { /* typically for ipsec when xfrm mode != XFRM_MODE_TUNNEL */ |
449 | eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2; |
450 | if (swp_spec->l3_proto == htons(ETH_P_IPV6)) |
451 | eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6; |
452 | } |
453 | switch (swp_spec->tun_l4_proto) { |
454 | case IPPROTO_UDP: |
455 | eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP; |
456 | fallthrough; |
457 | case IPPROTO_TCP: |
458 | eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2; |
459 | break; |
460 | } |
461 | } |
462 | |
463 | #define MLX5E_STOP_ROOM(wqebbs) ((wqebbs) * 2 - 1) |
464 | |
465 | static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size) |
466 | { |
467 | WARN_ON_ONCE(PAGE_SIZE / MLX5_SEND_WQE_BB < (u16)mlx5e_get_max_sq_wqebbs(mdev)); |
468 | |
469 | /* A WQE must not cross the page boundary, hence two conditions: |
470 | * 1. Its size must not exceed the page size. |
471 | * 2. If the WQE size is X, and the space remaining in a page is less |
472 | * than X, this space needs to be padded with NOPs. So, one WQE of |
473 | * size X may require up to X-1 WQEBBs of padding, which makes the |
474 | * stop room of X-1 + X. |
475 | * WQE size is also limited by the hardware limit. |
476 | */ |
477 | WARN_ONCE(wqe_size > mlx5e_get_max_sq_wqebbs(mdev), |
478 | "wqe_size %u is greater than max SQ WQEBBs %u" , |
479 | wqe_size, mlx5e_get_max_sq_wqebbs(mdev)); |
480 | |
481 | return MLX5E_STOP_ROOM(wqe_size); |
482 | } |
483 | |
484 | static inline u16 mlx5e_stop_room_for_max_wqe(struct mlx5_core_dev *mdev) |
485 | { |
486 | return MLX5E_STOP_ROOM(mlx5e_get_max_sq_wqebbs(mdev)); |
487 | } |
488 | |
489 | static inline u16 mlx5e_stop_room_for_mpwqe(struct mlx5_core_dev *mdev) |
490 | { |
491 | u8 mpwqe_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev); |
492 | |
493 | return mlx5e_stop_room_for_wqe(mdev, wqe_size: mpwqe_wqebbs); |
494 | } |
495 | |
496 | static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size) |
497 | { |
498 | u16 room = sq->reserved_room + MLX5E_STOP_ROOM(wqe_size); |
499 | |
500 | return mlx5e_wqc_has_room_for(wq: &sq->wq, cc: sq->cc, pc: sq->pc, n: room); |
501 | } |
502 | |
503 | static inline struct mlx5e_mpw_info *mlx5e_get_mpw_info(struct mlx5e_rq *rq, int i) |
504 | { |
505 | size_t isz = struct_size(rq->mpwqe.info, alloc_units.frag_pages, rq->mpwqe.pages_per_wqe); |
506 | |
507 | return (struct mlx5e_mpw_info *)((char *)rq->mpwqe.info + array_size(i, isz)); |
508 | } |
509 | #endif |
510 | |