1 | // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB |
2 | /* Copyright (c) 2019 Mellanox Technologies. */ |
3 | |
4 | #include "en/params.h" |
5 | #include "en/txrx.h" |
6 | #include "en/port.h" |
7 | #include "en_accel/en_accel.h" |
8 | #include "en_accel/ipsec.h" |
9 | #include <net/page_pool/types.h> |
10 | #include <net/xdp_sock_drv.h> |
11 | |
12 | static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev) |
13 | { |
14 | u8 min_page_shift = MLX5_CAP_GEN_2(mdev, log_min_mkey_entity_size); |
15 | |
16 | return min_page_shift ? : 12; |
17 | } |
18 | |
19 | u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk) |
20 | { |
21 | u8 req_page_shift = xsk ? order_base_2(xsk->chunk_size) : PAGE_SHIFT; |
22 | u8 min_page_shift = mlx5e_mpwrq_min_page_shift(mdev); |
23 | |
24 | /* Regular RQ uses order-0 pages, the NIC must be able to map them. */ |
25 | if (WARN_ON_ONCE(!xsk && req_page_shift < min_page_shift)) |
26 | min_page_shift = req_page_shift; |
27 | |
28 | return max(req_page_shift, min_page_shift); |
29 | } |
30 | |
31 | enum mlx5e_mpwrq_umr_mode |
32 | mlx5e_mpwrq_umr_mode(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk) |
33 | { |
34 | /* Different memory management schemes use different mechanisms to map |
35 | * user-mode memory. The stricter guarantees we have, the faster |
36 | * mechanisms we use: |
37 | * 1. MTT - direct mapping in page granularity. |
38 | * 2. KSM - indirect mapping to another MKey to arbitrary addresses, but |
39 | * all mappings have the same size. |
40 | * 3. KLM - indirect mapping to another MKey to arbitrary addresses, and |
41 | * mappings can have different sizes. |
42 | */ |
43 | u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); |
44 | bool unaligned = xsk ? xsk->unaligned : false; |
45 | bool oversized = false; |
46 | |
47 | if (xsk) { |
48 | oversized = xsk->chunk_size < (1 << page_shift); |
49 | WARN_ON_ONCE(xsk->chunk_size > (1 << page_shift)); |
50 | } |
51 | |
52 | /* XSK frame size doesn't match the UMR page size, either because the |
53 | * frame size is not a power of two, or it's smaller than the minimal |
54 | * page size supported by the firmware. |
55 | * It's possible to receive packets bigger than MTU in certain setups. |
56 | * To avoid writing over the XSK frame boundary, the top region of each |
57 | * stride is mapped to a garbage page, resulting in two mappings of |
58 | * different sizes per frame. |
59 | */ |
60 | if (oversized) { |
61 | /* An optimization for frame sizes equal to 3 * power_of_two. |
62 | * 3 KSMs point to the frame, and one KSM points to the garbage |
63 | * page, which works faster than KLM. |
64 | */ |
65 | if (xsk->chunk_size % 3 == 0 && is_power_of_2(n: xsk->chunk_size / 3)) |
66 | return MLX5E_MPWRQ_UMR_MODE_TRIPLE; |
67 | |
68 | return MLX5E_MPWRQ_UMR_MODE_OVERSIZED; |
69 | } |
70 | |
71 | /* XSK frames can start at arbitrary unaligned locations, but they all |
72 | * have the same size which is a power of two. It allows to optimize to |
73 | * one KSM per frame. |
74 | */ |
75 | if (unaligned) |
76 | return MLX5E_MPWRQ_UMR_MODE_UNALIGNED; |
77 | |
78 | /* XSK: frames are naturally aligned, MTT can be used. |
79 | * Non-XSK: Allocations happen in units of CPU pages, therefore, the |
80 | * mappings are naturally aligned. |
81 | */ |
82 | return MLX5E_MPWRQ_UMR_MODE_ALIGNED; |
83 | } |
84 | |
85 | u8 mlx5e_mpwrq_umr_entry_size(enum mlx5e_mpwrq_umr_mode mode) |
86 | { |
87 | switch (mode) { |
88 | case MLX5E_MPWRQ_UMR_MODE_ALIGNED: |
89 | return sizeof(struct mlx5_mtt); |
90 | case MLX5E_MPWRQ_UMR_MODE_UNALIGNED: |
91 | return sizeof(struct mlx5_ksm); |
92 | case MLX5E_MPWRQ_UMR_MODE_OVERSIZED: |
93 | return sizeof(struct mlx5_klm) * 2; |
94 | case MLX5E_MPWRQ_UMR_MODE_TRIPLE: |
95 | return sizeof(struct mlx5_ksm) * 4; |
96 | } |
97 | WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n" , mode); |
98 | return 0; |
99 | } |
100 | |
101 | u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, |
102 | enum mlx5e_mpwrq_umr_mode umr_mode) |
103 | { |
104 | u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(mode: umr_mode); |
105 | u8 max_pages_per_wqe, max_log_mpwqe_size; |
106 | u16 max_wqe_size; |
107 | |
108 | /* Keep in sync with MLX5_MPWRQ_MAX_PAGES_PER_WQE. */ |
109 | max_wqe_size = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB; |
110 | max_pages_per_wqe = ALIGN_DOWN(max_wqe_size - sizeof(struct mlx5e_umr_wqe), |
111 | MLX5_UMR_FLEX_ALIGNMENT) / umr_entry_size; |
112 | max_log_mpwqe_size = ilog2(max_pages_per_wqe) + page_shift; |
113 | |
114 | WARN_ON_ONCE(max_log_mpwqe_size < MLX5E_ORDER2_MAX_PACKET_MTU); |
115 | |
116 | return min_t(u8, max_log_mpwqe_size, MLX5_MPWRQ_MAX_LOG_WQE_SZ); |
117 | } |
118 | |
119 | u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, |
120 | enum mlx5e_mpwrq_umr_mode umr_mode) |
121 | { |
122 | u8 log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode); |
123 | u8 pages_per_wqe; |
124 | |
125 | pages_per_wqe = log_wqe_sz > page_shift ? (1 << (log_wqe_sz - page_shift)) : 1; |
126 | |
127 | /* Two MTTs are needed to form an octword. The number of MTTs is encoded |
128 | * in octwords in a UMR WQE, so we need at least two to avoid mapping |
129 | * garbage addresses. |
130 | */ |
131 | if (WARN_ON_ONCE(pages_per_wqe < 2 && umr_mode == MLX5E_MPWRQ_UMR_MODE_ALIGNED)) |
132 | pages_per_wqe = 2; |
133 | |
134 | /* Sanity check for further calculations to succeed. */ |
135 | BUILD_BUG_ON(MLX5_MPWRQ_MAX_PAGES_PER_WQE > 64); |
136 | if (WARN_ON_ONCE(pages_per_wqe > MLX5_MPWRQ_MAX_PAGES_PER_WQE)) |
137 | return MLX5_MPWRQ_MAX_PAGES_PER_WQE; |
138 | |
139 | return pages_per_wqe; |
140 | } |
141 | |
142 | u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, |
143 | enum mlx5e_mpwrq_umr_mode umr_mode) |
144 | { |
145 | u8 pages_per_wqe = mlx5e_mpwrq_pages_per_wqe(mdev, page_shift, umr_mode); |
146 | u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(mode: umr_mode); |
147 | u16 umr_wqe_sz; |
148 | |
149 | umr_wqe_sz = sizeof(struct mlx5e_umr_wqe) + |
150 | ALIGN(pages_per_wqe * umr_entry_size, MLX5_UMR_FLEX_ALIGNMENT); |
151 | |
152 | WARN_ON_ONCE(DIV_ROUND_UP(umr_wqe_sz, MLX5_SEND_WQE_DS) > MLX5_WQE_CTRL_DS_MASK); |
153 | |
154 | return umr_wqe_sz; |
155 | } |
156 | |
157 | u8 mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev *mdev, u8 page_shift, |
158 | enum mlx5e_mpwrq_umr_mode umr_mode) |
159 | { |
160 | return DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(mdev, page_shift, umr_mode), |
161 | MLX5_SEND_WQE_BB); |
162 | } |
163 | |
164 | u8 mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, |
165 | enum mlx5e_mpwrq_umr_mode umr_mode) |
166 | { |
167 | u8 pages_per_wqe = mlx5e_mpwrq_pages_per_wqe(mdev, page_shift, umr_mode); |
168 | |
169 | /* Add another page as a buffer between WQEs. This page will absorb |
170 | * write overflow by the hardware, when receiving packets larger than |
171 | * MTU. These oversize packets are dropped by the driver at a later |
172 | * stage. |
173 | */ |
174 | return ALIGN(pages_per_wqe + 1, |
175 | MLX5_SEND_WQE_BB / mlx5e_mpwrq_umr_entry_size(umr_mode)); |
176 | } |
177 | |
178 | u32 mlx5e_mpwrq_max_num_entries(struct mlx5_core_dev *mdev, |
179 | enum mlx5e_mpwrq_umr_mode umr_mode) |
180 | { |
181 | /* Same limits apply to KSMs and KLMs. */ |
182 | u32 klm_limit = min(MLX5E_MAX_RQ_NUM_KSMS, |
183 | 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size)); |
184 | |
185 | switch (umr_mode) { |
186 | case MLX5E_MPWRQ_UMR_MODE_ALIGNED: |
187 | return MLX5E_MAX_RQ_NUM_MTTS; |
188 | case MLX5E_MPWRQ_UMR_MODE_UNALIGNED: |
189 | return klm_limit; |
190 | case MLX5E_MPWRQ_UMR_MODE_OVERSIZED: |
191 | /* Each entry is two KLMs. */ |
192 | return klm_limit / 2; |
193 | case MLX5E_MPWRQ_UMR_MODE_TRIPLE: |
194 | /* Each entry is four KSMs. */ |
195 | return klm_limit / 4; |
196 | } |
197 | WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n" , umr_mode); |
198 | return 0; |
199 | } |
200 | |
201 | static u8 mlx5e_mpwrq_max_log_rq_size(struct mlx5_core_dev *mdev, u8 page_shift, |
202 | enum mlx5e_mpwrq_umr_mode umr_mode) |
203 | { |
204 | u8 mtts_per_wqe = mlx5e_mpwrq_mtts_per_wqe(mdev, page_shift, umr_mode); |
205 | u32 max_entries = mlx5e_mpwrq_max_num_entries(mdev, umr_mode); |
206 | |
207 | return ilog2(max_entries / mtts_per_wqe); |
208 | } |
209 | |
210 | u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift, |
211 | enum mlx5e_mpwrq_umr_mode umr_mode) |
212 | { |
213 | return mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, umr_mode) + |
214 | mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) - |
215 | MLX5E_ORDER2_MAX_PACKET_MTU; |
216 | } |
217 | |
218 | u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params, |
219 | struct mlx5e_xsk_param *xsk) |
220 | { |
221 | u16 headroom; |
222 | |
223 | if (xsk) |
224 | return xsk->headroom; |
225 | |
226 | headroom = NET_IP_ALIGN; |
227 | if (params->xdp_prog) |
228 | headroom += XDP_PACKET_HEADROOM; |
229 | else |
230 | headroom += MLX5_RX_HEADROOM; |
231 | |
232 | return headroom; |
233 | } |
234 | |
235 | static u32 mlx5e_rx_get_linear_sz_xsk(struct mlx5e_params *params, |
236 | struct mlx5e_xsk_param *xsk) |
237 | { |
238 | u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); |
239 | |
240 | return xsk->headroom + hw_mtu; |
241 | } |
242 | |
243 | static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool no_head_tail_room) |
244 | { |
245 | u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); |
246 | u16 headroom; |
247 | |
248 | if (no_head_tail_room) |
249 | return SKB_DATA_ALIGN(hw_mtu); |
250 | headroom = mlx5e_get_linear_rq_headroom(params, NULL); |
251 | |
252 | return MLX5_SKB_FRAG_SZ(headroom + hw_mtu); |
253 | } |
254 | |
255 | static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev, |
256 | struct mlx5e_params *params, |
257 | struct mlx5e_xsk_param *xsk, |
258 | bool mpwqe) |
259 | { |
260 | bool no_head_tail_room; |
261 | u32 sz; |
262 | |
263 | /* XSK frames are mapped as individual pages, because frames may come in |
264 | * an arbitrary order from random locations in the UMEM. |
265 | */ |
266 | if (xsk) |
267 | return mpwqe ? 1 << mlx5e_mpwrq_page_shift(mdev, xsk) : PAGE_SIZE; |
268 | |
269 | no_head_tail_room = params->xdp_prog && mpwqe && !mlx5e_rx_is_linear_skb(mdev, params, xsk); |
270 | |
271 | /* When no_head_tail_room is set, headroom and tailroom are excluded from skb calculations. |
272 | * no_head_tail_room should be set in the case of XDP with Striding RQ |
273 | * when SKB is not linear. This is because another page is allocated for the linear part. |
274 | */ |
275 | sz = roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, no_head_tail_room)); |
276 | |
277 | /* XDP in mlx5e doesn't support multiple packets per page. |
278 | * Do not assume sz <= PAGE_SIZE if params->xdp_prog is set. |
279 | */ |
280 | return params->xdp_prog && sz < PAGE_SIZE ? PAGE_SIZE : sz; |
281 | } |
282 | |
283 | static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5_core_dev *mdev, |
284 | struct mlx5e_params *params, |
285 | struct mlx5e_xsk_param *xsk) |
286 | { |
287 | u32 linear_stride_sz = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, mpwqe: true); |
288 | enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); |
289 | u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); |
290 | |
291 | return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) - |
292 | order_base_2(linear_stride_sz); |
293 | } |
294 | |
295 | bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev, |
296 | struct mlx5e_params *params, |
297 | struct mlx5e_xsk_param *xsk) |
298 | { |
299 | if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE) |
300 | return false; |
301 | |
302 | /* Call mlx5e_rx_get_linear_sz_skb with the no_head_tail_room parameter set |
303 | * to exclude headroom and tailroom from calculations. |
304 | * no_head_tail_room is true when SKB is built on XDP_PASS on XSK RQs |
305 | * since packet data buffers don't have headroom and tailroom resreved for the SKB. |
306 | * Both XSK and non-XSK cases allocate an SKB on XDP_PASS. Packet data |
307 | * must fit into a CPU page. |
308 | */ |
309 | if (mlx5e_rx_get_linear_sz_skb(params, no_head_tail_room: xsk) > PAGE_SIZE) |
310 | return false; |
311 | |
312 | /* XSK frames must be big enough to hold the packet data. */ |
313 | if (xsk && mlx5e_rx_get_linear_sz_xsk(params, xsk) > xsk->chunk_size) |
314 | return false; |
315 | |
316 | return true; |
317 | } |
318 | |
319 | static bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev, |
320 | u8 log_stride_sz, u8 log_num_strides, |
321 | u8 page_shift, |
322 | enum mlx5e_mpwrq_umr_mode umr_mode) |
323 | { |
324 | if (log_stride_sz + log_num_strides != |
325 | mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode)) |
326 | return false; |
327 | |
328 | if (log_stride_sz < MLX5_MPWQE_LOG_STRIDE_SZ_BASE || |
329 | log_stride_sz > MLX5_MPWQE_LOG_STRIDE_SZ_MAX) |
330 | return false; |
331 | |
332 | if (log_num_strides > MLX5_MPWQE_LOG_NUM_STRIDES_MAX) |
333 | return false; |
334 | |
335 | if (MLX5_CAP_GEN(mdev, ext_stride_num_range)) |
336 | return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE; |
337 | |
338 | return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_BASE; |
339 | } |
340 | |
341 | bool mlx5e_verify_params_rx_mpwqe_strides(struct mlx5_core_dev *mdev, |
342 | struct mlx5e_params *params, |
343 | struct mlx5e_xsk_param *xsk) |
344 | { |
345 | u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk); |
346 | u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); |
347 | enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); |
348 | u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); |
349 | |
350 | return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz: log_wqe_stride_size, |
351 | log_num_strides: log_wqe_num_of_strides, |
352 | page_shift, umr_mode); |
353 | } |
354 | |
355 | bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev, |
356 | struct mlx5e_params *params, |
357 | struct mlx5e_xsk_param *xsk) |
358 | { |
359 | enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); |
360 | u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); |
361 | u8 log_num_strides; |
362 | u8 log_stride_sz; |
363 | u8 log_wqe_sz; |
364 | |
365 | if (!mlx5e_rx_is_linear_skb(mdev, params, xsk)) |
366 | return false; |
367 | |
368 | log_stride_sz = order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true)); |
369 | log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode); |
370 | |
371 | if (log_wqe_sz < log_stride_sz) |
372 | return false; |
373 | |
374 | log_num_strides = log_wqe_sz - log_stride_sz; |
375 | |
376 | return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz, |
377 | log_num_strides, page_shift, |
378 | umr_mode); |
379 | } |
380 | |
381 | u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev, |
382 | struct mlx5e_params *params, |
383 | struct mlx5e_xsk_param *xsk) |
384 | { |
385 | enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); |
386 | u8 log_pkts_per_wqe, page_shift, max_log_rq_size; |
387 | |
388 | log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(mdev, params, xsk); |
389 | page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); |
390 | max_log_rq_size = mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, umr_mode); |
391 | |
392 | /* Numbers are unsigned, don't subtract to avoid underflow. */ |
393 | if (params->log_rq_mtu_frames < |
394 | log_pkts_per_wqe + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW) |
395 | return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW; |
396 | |
397 | /* Ethtool's rx_max_pending is calculated for regular RQ, that uses |
398 | * pages of PAGE_SIZE. Max length of an XSK RQ might differ if it uses a |
399 | * frame size not equal to PAGE_SIZE. |
400 | * A stricter condition is checked in mlx5e_mpwrq_validate_xsk, WARN on |
401 | * unexpected failure. |
402 | */ |
403 | if (WARN_ON_ONCE(params->log_rq_mtu_frames > log_pkts_per_wqe + max_log_rq_size)) |
404 | return max_log_rq_size; |
405 | |
406 | return params->log_rq_mtu_frames - log_pkts_per_wqe; |
407 | } |
408 | |
409 | u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev, |
410 | struct mlx5e_params *params) |
411 | { |
412 | return order_base_2(DIV_ROUND_UP(MLX5E_RX_MAX_HEAD, MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE)); |
413 | } |
414 | |
415 | u8 mlx5e_shampo_get_log_rsrv_size(struct mlx5_core_dev *mdev, |
416 | struct mlx5e_params *params) |
417 | { |
418 | return order_base_2(MLX5E_SHAMPO_WQ_RESRV_SIZE / MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE); |
419 | } |
420 | |
421 | u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev, |
422 | struct mlx5e_params *params) |
423 | { |
424 | u32 resrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * |
425 | PAGE_SIZE; |
426 | |
427 | return order_base_2(DIV_ROUND_UP(resrv_size, params->sw_mtu)); |
428 | } |
429 | |
430 | u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev, |
431 | struct mlx5e_params *params, |
432 | struct mlx5e_xsk_param *xsk) |
433 | { |
434 | if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) |
435 | return order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true)); |
436 | |
437 | /* XDP in mlx5e doesn't support multiple packets per page. */ |
438 | if (params->xdp_prog) |
439 | return PAGE_SHIFT; |
440 | |
441 | return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev); |
442 | } |
443 | |
444 | u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, |
445 | struct mlx5e_params *params, |
446 | struct mlx5e_xsk_param *xsk) |
447 | { |
448 | enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); |
449 | u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); |
450 | u8 log_wqe_size, log_stride_size; |
451 | |
452 | log_wqe_size = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode); |
453 | log_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); |
454 | WARN(log_wqe_size < log_stride_size, |
455 | "Log WQE size %u < log stride size %u (page shift %u, umr mode %d, xsk on? %d)\n" , |
456 | log_wqe_size, log_stride_size, page_shift, umr_mode, !!xsk); |
457 | return log_wqe_size - log_stride_size; |
458 | } |
459 | |
460 | u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz) |
461 | { |
462 | #define UMR_WQE_BULK (2) |
463 | return min_t(unsigned int, UMR_WQE_BULK, wq_sz / 2 - 1); |
464 | } |
465 | |
466 | u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, |
467 | struct mlx5e_params *params, |
468 | struct mlx5e_xsk_param *xsk) |
469 | { |
470 | u16 linear_headroom = mlx5e_get_linear_rq_headroom(params, xsk); |
471 | |
472 | if (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) |
473 | return linear_headroom; |
474 | |
475 | if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) |
476 | return linear_headroom; |
477 | |
478 | if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) |
479 | return linear_headroom; |
480 | |
481 | return 0; |
482 | } |
483 | |
484 | u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params) |
485 | { |
486 | bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE); |
487 | u16 stop_room; |
488 | |
489 | stop_room = mlx5e_ktls_get_stop_room(mdev, params); |
490 | stop_room += mlx5e_stop_room_for_max_wqe(mdev); |
491 | if (is_mpwqe) |
492 | /* A MPWQE can take up to the maximum cacheline-aligned WQE + |
493 | * all the normal stop room can be taken if a new packet breaks |
494 | * the active MPWQE session and allocates its WQEs right away. |
495 | */ |
496 | stop_room += mlx5e_stop_room_for_mpwqe(mdev); |
497 | |
498 | return stop_room; |
499 | } |
500 | |
501 | int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params) |
502 | { |
503 | size_t sq_size = 1 << params->log_sq_size; |
504 | u16 stop_room; |
505 | |
506 | stop_room = mlx5e_calc_sq_stop_room(mdev, params); |
507 | if (stop_room >= sq_size) { |
508 | mlx5_core_err(mdev, "Stop room %u is bigger than the SQ size %zu\n" , |
509 | stop_room, sq_size); |
510 | return -EINVAL; |
511 | } |
512 | |
513 | return 0; |
514 | } |
515 | |
516 | static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode) |
517 | { |
518 | struct dim_cq_moder moder = {}; |
519 | |
520 | moder.cq_period_mode = cq_period_mode; |
521 | moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; |
522 | moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; |
523 | if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) |
524 | moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE; |
525 | |
526 | return moder; |
527 | } |
528 | |
529 | static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode) |
530 | { |
531 | struct dim_cq_moder moder = {}; |
532 | |
533 | moder.cq_period_mode = cq_period_mode; |
534 | moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; |
535 | moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; |
536 | if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) |
537 | moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; |
538 | |
539 | return moder; |
540 | } |
541 | |
542 | static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode) |
543 | { |
544 | return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ? |
545 | DIM_CQ_PERIOD_MODE_START_FROM_CQE : |
546 | DIM_CQ_PERIOD_MODE_START_FROM_EQE; |
547 | } |
548 | |
549 | void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode) |
550 | { |
551 | if (params->tx_dim_enabled) { |
552 | u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); |
553 | |
554 | params->tx_cq_moderation = net_dim_get_def_tx_moderation(cq_period_mode: dim_period_mode); |
555 | } else { |
556 | params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode); |
557 | } |
558 | } |
559 | |
560 | void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode) |
561 | { |
562 | if (params->rx_dim_enabled) { |
563 | u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); |
564 | |
565 | params->rx_cq_moderation = net_dim_get_def_rx_moderation(cq_period_mode: dim_period_mode); |
566 | } else { |
567 | params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode); |
568 | } |
569 | } |
570 | |
571 | void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) |
572 | { |
573 | mlx5e_reset_tx_moderation(params, cq_period_mode); |
574 | MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER, |
575 | params->tx_cq_moderation.cq_period_mode == |
576 | MLX5_CQ_PERIOD_MODE_START_FROM_CQE); |
577 | } |
578 | |
579 | void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) |
580 | { |
581 | mlx5e_reset_rx_moderation(params, cq_period_mode); |
582 | MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER, |
583 | params->rx_cq_moderation.cq_period_mode == |
584 | MLX5_CQ_PERIOD_MODE_START_FROM_CQE); |
585 | } |
586 | |
587 | bool slow_pci_heuristic(struct mlx5_core_dev *mdev) |
588 | { |
589 | u32 link_speed = 0; |
590 | u32 pci_bw = 0; |
591 | |
592 | mlx5_port_max_linkspeed(mdev, speed: &link_speed); |
593 | pci_bw = pcie_bandwidth_available(dev: mdev->pdev, NULL, NULL, NULL); |
594 | mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n" , |
595 | link_speed, pci_bw); |
596 | |
597 | #define MLX5E_SLOW_PCI_RATIO (2) |
598 | |
599 | return link_speed && pci_bw && |
600 | link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw; |
601 | } |
602 | |
603 | int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params) |
604 | { |
605 | enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, NULL); |
606 | u8 page_shift = mlx5e_mpwrq_page_shift(mdev, NULL); |
607 | |
608 | if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode)) |
609 | return -EOPNOTSUPP; |
610 | |
611 | return 0; |
612 | } |
613 | |
614 | int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params, |
615 | struct mlx5e_xsk_param *xsk) |
616 | { |
617 | enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); |
618 | u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); |
619 | u16 max_mtu_pkts; |
620 | |
621 | if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode)) { |
622 | mlx5_core_err(mdev, "Striding RQ for XSK can't be activated with page_shift %u and umr_mode %d\n" , |
623 | page_shift, umr_mode); |
624 | return -EOPNOTSUPP; |
625 | } |
626 | |
627 | if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) { |
628 | mlx5_core_err(mdev, "Striding RQ linear mode for XSK can't be activated with current params\n" ); |
629 | return -EINVAL; |
630 | } |
631 | |
632 | /* Current RQ length is too big for the given frame size, the |
633 | * needed number of WQEs exceeds the maximum. |
634 | */ |
635 | max_mtu_pkts = min_t(u8, MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE, |
636 | mlx5e_mpwrq_max_log_rq_pkts(mdev, page_shift, xsk->unaligned)); |
637 | if (params->log_rq_mtu_frames > max_mtu_pkts) { |
638 | mlx5_core_err(mdev, "Current RQ length %d is too big for XSK with given frame size %u\n" , |
639 | 1 << params->log_rq_mtu_frames, xsk->chunk_size); |
640 | return -EINVAL; |
641 | } |
642 | |
643 | return 0; |
644 | } |
645 | |
646 | void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, |
647 | struct mlx5e_params *params) |
648 | { |
649 | params->log_rq_mtu_frames = is_kdump_kernel() ? |
650 | MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : |
651 | MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; |
652 | } |
653 | |
654 | void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params) |
655 | { |
656 | params->rq_wq_type = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ? |
657 | MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ : |
658 | MLX5_WQ_TYPE_CYCLIC; |
659 | } |
660 | |
661 | void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, |
662 | struct mlx5e_params *params) |
663 | { |
664 | /* Prefer Striding RQ, unless any of the following holds: |
665 | * - Striding RQ configuration is not possible/supported. |
666 | * - CQE compression is ON, and stride_index mini_cqe layout is not supported. |
667 | * - Legacy RQ would use linear SKB while Striding RQ would use non-linear. |
668 | * |
669 | * No XSK params: checking the availability of striding RQ in general. |
670 | */ |
671 | if ((!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) || |
672 | MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index)) && |
673 | !mlx5e_mpwrq_validate_regular(mdev, params) && |
674 | (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) || |
675 | !mlx5e_rx_is_linear_skb(mdev, params, NULL))) |
676 | MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true); |
677 | mlx5e_set_rq_type(mdev, params); |
678 | mlx5e_init_rq_type_params(mdev, params); |
679 | } |
680 | |
681 | /* Build queue parameters */ |
682 | |
683 | void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c) |
684 | { |
685 | *ccp = (struct mlx5e_create_cq_param) { |
686 | .netdev = c->netdev, |
687 | .wq = c->priv->wq, |
688 | .napi = &c->napi, |
689 | .ch_stats = c->stats, |
690 | .node = cpu_to_node(cpu: c->cpu), |
691 | .ix = c->vec_ix, |
692 | }; |
693 | } |
694 | |
695 | static int mlx5e_max_nonlinear_mtu(int first_frag_size, int frag_size, bool xdp) |
696 | { |
697 | if (xdp) |
698 | /* XDP requires all fragments to be of the same size. */ |
699 | return first_frag_size + (MLX5E_MAX_RX_FRAGS - 1) * frag_size; |
700 | |
701 | /* Optimization for small packets: the last fragment is bigger than the others. */ |
702 | return first_frag_size + (MLX5E_MAX_RX_FRAGS - 2) * frag_size + PAGE_SIZE; |
703 | } |
704 | |
705 | static void mlx5e_rx_compute_wqe_bulk_params(struct mlx5e_params *params, |
706 | struct mlx5e_rq_frags_info *info) |
707 | { |
708 | u16 bulk_bound_rq_size = (1 << params->log_rq_mtu_frames) / 4; |
709 | u32 bulk_bound_rq_size_in_bytes; |
710 | u32 sum_frag_strides = 0; |
711 | u32 wqe_bulk_in_bytes; |
712 | u16 split_factor; |
713 | u32 wqe_bulk; |
714 | int i; |
715 | |
716 | for (i = 0; i < info->num_frags; i++) |
717 | sum_frag_strides += info->arr[i].frag_stride; |
718 | |
719 | /* For MTUs larger than PAGE_SIZE, align to PAGE_SIZE to reflect |
720 | * amount of consumed pages per wqe in bytes. |
721 | */ |
722 | if (sum_frag_strides > PAGE_SIZE) |
723 | sum_frag_strides = ALIGN(sum_frag_strides, PAGE_SIZE); |
724 | |
725 | bulk_bound_rq_size_in_bytes = bulk_bound_rq_size * sum_frag_strides; |
726 | |
727 | #define MAX_WQE_BULK_BYTES(xdp) ((xdp ? 256 : 512) * 1024) |
728 | |
729 | /* A WQE bulk should not exceed min(512KB, 1/4 of rq size). For XDP |
730 | * keep bulk size smaller to avoid filling the page_pool cache on |
731 | * every bulk refill. |
732 | */ |
733 | wqe_bulk_in_bytes = min_t(u32, MAX_WQE_BULK_BYTES(params->xdp_prog), |
734 | bulk_bound_rq_size_in_bytes); |
735 | wqe_bulk = DIV_ROUND_UP(wqe_bulk_in_bytes, sum_frag_strides); |
736 | |
737 | /* Make sure that allocations don't start when the page is still used |
738 | * by older WQEs. |
739 | */ |
740 | info->wqe_bulk = max_t(u16, info->wqe_index_mask + 1, wqe_bulk); |
741 | |
742 | split_factor = DIV_ROUND_UP(MAX_WQE_BULK_BYTES(params->xdp_prog), |
743 | PP_ALLOC_CACHE_REFILL * PAGE_SIZE); |
744 | info->refill_unit = DIV_ROUND_UP(info->wqe_bulk, split_factor); |
745 | } |
746 | |
747 | #define DEFAULT_FRAG_SIZE (2048) |
748 | |
749 | static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev, |
750 | struct mlx5e_params *params, |
751 | struct mlx5e_xsk_param *xsk, |
752 | struct mlx5e_rq_frags_info *info, |
753 | u32 *xdp_frag_size) |
754 | { |
755 | u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu); |
756 | int frag_size_max = DEFAULT_FRAG_SIZE; |
757 | int first_frag_size_max; |
758 | u32 buf_size = 0; |
759 | u16 headroom; |
760 | int max_mtu; |
761 | int i; |
762 | |
763 | if (mlx5e_rx_is_linear_skb(mdev, params, xsk)) { |
764 | int frag_stride; |
765 | |
766 | frag_stride = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, mpwqe: false); |
767 | |
768 | info->arr[0].frag_size = byte_count; |
769 | info->arr[0].frag_stride = frag_stride; |
770 | info->num_frags = 1; |
771 | |
772 | /* N WQEs share the same page, N = PAGE_SIZE / frag_stride. The |
773 | * first WQE in the page is responsible for allocation of this |
774 | * page, this WQE's index is k*N. If WQEs [k*N+1; k*N+N-1] are |
775 | * still not completed, the allocation must stop before k*N. |
776 | */ |
777 | info->wqe_index_mask = (PAGE_SIZE / frag_stride) - 1; |
778 | |
779 | goto out; |
780 | } |
781 | |
782 | headroom = mlx5e_get_linear_rq_headroom(params, xsk); |
783 | first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom); |
784 | |
785 | max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size: first_frag_size_max, frag_size: frag_size_max, |
786 | xdp: params->xdp_prog); |
787 | if (byte_count > max_mtu || params->xdp_prog) { |
788 | frag_size_max = PAGE_SIZE; |
789 | first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom); |
790 | |
791 | max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size: first_frag_size_max, frag_size: frag_size_max, |
792 | xdp: params->xdp_prog); |
793 | if (byte_count > max_mtu) { |
794 | mlx5_core_err(mdev, "MTU %u is too big for non-linear legacy RQ (max %d)\n" , |
795 | params->sw_mtu, max_mtu); |
796 | return -EINVAL; |
797 | } |
798 | } |
799 | |
800 | i = 0; |
801 | while (buf_size < byte_count) { |
802 | int frag_size = byte_count - buf_size; |
803 | |
804 | if (i == 0) |
805 | frag_size = min(frag_size, first_frag_size_max); |
806 | else if (i < MLX5E_MAX_RX_FRAGS - 1) |
807 | frag_size = min(frag_size, frag_size_max); |
808 | |
809 | info->arr[i].frag_size = frag_size; |
810 | buf_size += frag_size; |
811 | |
812 | if (params->xdp_prog) { |
813 | /* XDP multi buffer expects fragments of the same size. */ |
814 | info->arr[i].frag_stride = frag_size_max; |
815 | } else { |
816 | if (i == 0) { |
817 | /* Ensure that headroom and tailroom are included. */ |
818 | frag_size += headroom; |
819 | frag_size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
820 | } |
821 | info->arr[i].frag_stride = roundup_pow_of_two(frag_size); |
822 | } |
823 | |
824 | i++; |
825 | } |
826 | info->num_frags = i; |
827 | |
828 | /* The last fragment of WQE with index 2*N may share the page with the |
829 | * first fragment of WQE with index 2*N+1 in certain cases. If WQE 2*N+1 |
830 | * is not completed yet, WQE 2*N must not be allocated, as it's |
831 | * responsible for allocating a new page. |
832 | */ |
833 | if (frag_size_max == PAGE_SIZE) { |
834 | /* No WQE can start in the middle of a page. */ |
835 | info->wqe_index_mask = 0; |
836 | } else { |
837 | /* PAGE_SIZEs starting from 8192 don't use 2K-sized fragments, |
838 | * because there would be more than MLX5E_MAX_RX_FRAGS of them. |
839 | */ |
840 | WARN_ON(PAGE_SIZE != 2 * DEFAULT_FRAG_SIZE); |
841 | |
842 | /* Odd number of fragments allows to pack the last fragment of |
843 | * the previous WQE and the first fragment of the next WQE into |
844 | * the same page. |
845 | * As long as DEFAULT_FRAG_SIZE is 2048, and MLX5E_MAX_RX_FRAGS |
846 | * is 4, the last fragment can be bigger than the rest only if |
847 | * it's the fourth one, so WQEs consisting of 3 fragments will |
848 | * always share a page. |
849 | * When a page is shared, WQE bulk size is 2, otherwise just 1. |
850 | */ |
851 | info->wqe_index_mask = info->num_frags % 2; |
852 | } |
853 | |
854 | out: |
855 | /* Bulking optimization to skip allocation until a large enough number |
856 | * of WQEs can be allocated in a row. Bulking also influences how well |
857 | * deferred page release works. |
858 | */ |
859 | mlx5e_rx_compute_wqe_bulk_params(params, info); |
860 | |
861 | mlx5_core_dbg(mdev, "%s: wqe_bulk = %u, wqe_bulk_refill_unit = %u\n" , |
862 | __func__, info->wqe_bulk, info->refill_unit); |
863 | |
864 | info->log_num_frags = order_base_2(info->num_frags); |
865 | |
866 | *xdp_frag_size = info->num_frags > 1 && params->xdp_prog ? PAGE_SIZE : 0; |
867 | |
868 | return 0; |
869 | } |
870 | |
871 | static u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs) |
872 | { |
873 | int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs; |
874 | |
875 | switch (wq_type) { |
876 | case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: |
877 | sz += sizeof(struct mlx5e_rx_wqe_ll); |
878 | break; |
879 | default: /* MLX5_WQ_TYPE_CYCLIC */ |
880 | sz += sizeof(struct mlx5e_rx_wqe_cyc); |
881 | } |
882 | |
883 | return order_base_2(sz); |
884 | } |
885 | |
886 | static void mlx5e_build_common_cq_param(struct mlx5_core_dev *mdev, |
887 | struct mlx5e_cq_param *param) |
888 | { |
889 | void *cqc = param->cqc; |
890 | |
891 | MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); |
892 | if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128) |
893 | MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD); |
894 | } |
895 | |
896 | static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev, |
897 | struct mlx5e_params *params, |
898 | struct mlx5e_xsk_param *xsk) |
899 | { |
900 | int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE; |
901 | u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk)); |
902 | int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params)); |
903 | u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); |
904 | int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk)); |
905 | int wqe_size = BIT(log_stride_sz) * num_strides; |
906 | |
907 | /* +1 is for the case that the pkt_per_rsrv dont consume the reservation |
908 | * so we get a filler cqe for the rest of the reservation. |
909 | */ |
910 | return order_base_2((wqe_size / rsrv_size) * wq_size * (pkt_per_rsrv + 1)); |
911 | } |
912 | |
913 | static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev, |
914 | struct mlx5e_params *params, |
915 | struct mlx5e_xsk_param *xsk, |
916 | struct mlx5e_cq_param *param) |
917 | { |
918 | bool hw_stridx = false; |
919 | void *cqc = param->cqc; |
920 | u8 log_cq_size; |
921 | |
922 | switch (params->rq_wq_type) { |
923 | case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: |
924 | hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index); |
925 | if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) |
926 | log_cq_size = mlx5e_shampo_get_log_cq_size(mdev, params, xsk); |
927 | else |
928 | log_cq_size = mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk) + |
929 | mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk); |
930 | break; |
931 | default: /* MLX5_WQ_TYPE_CYCLIC */ |
932 | log_cq_size = params->log_rq_mtu_frames; |
933 | } |
934 | |
935 | MLX5_SET(cqc, cqc, log_cq_size, log_cq_size); |
936 | if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) { |
937 | MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ? |
938 | MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM); |
939 | MLX5_SET(cqc, cqc, cqe_compression_layout, |
940 | MLX5_CAP_GEN(mdev, enhanced_cqe_compression) ? |
941 | MLX5_CQE_COMPRESS_LAYOUT_ENHANCED : |
942 | MLX5_CQE_COMPRESS_LAYOUT_BASIC); |
943 | MLX5_SET(cqc, cqc, cqe_comp_en, 1); |
944 | } |
945 | |
946 | mlx5e_build_common_cq_param(mdev, param); |
947 | param->cq_period_mode = params->rx_cq_moderation.cq_period_mode; |
948 | } |
949 | |
950 | static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params) |
951 | { |
952 | bool lro_en = params->packet_merge.type == MLX5E_PACKET_MERGE_LRO; |
953 | bool ro = MLX5_CAP_GEN(mdev, relaxed_ordering_write); |
954 | |
955 | return ro && lro_en ? |
956 | MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN; |
957 | } |
958 | |
959 | int mlx5e_build_rq_param(struct mlx5_core_dev *mdev, |
960 | struct mlx5e_params *params, |
961 | struct mlx5e_xsk_param *xsk, |
962 | struct mlx5e_rq_param *param) |
963 | { |
964 | void *rqc = param->rqc; |
965 | void *wq = MLX5_ADDR_OF(rqc, rqc, wq); |
966 | int ndsegs = 1; |
967 | int err; |
968 | |
969 | switch (params->rq_wq_type) { |
970 | case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: { |
971 | u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk); |
972 | u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); |
973 | enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); |
974 | u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); |
975 | |
976 | if (!mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz: log_wqe_stride_size, |
977 | log_num_strides: log_wqe_num_of_strides, |
978 | page_shift, umr_mode)) { |
979 | mlx5_core_err(mdev, |
980 | "Bad RX MPWQE params: log_stride_size %u, log_num_strides %u, umr_mode %d\n" , |
981 | log_wqe_stride_size, log_wqe_num_of_strides, |
982 | umr_mode); |
983 | return -EINVAL; |
984 | } |
985 | |
986 | MLX5_SET(wq, wq, log_wqe_num_of_strides, |
987 | log_wqe_num_of_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE); |
988 | MLX5_SET(wq, wq, log_wqe_stride_size, |
989 | log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE); |
990 | MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk)); |
991 | if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) { |
992 | MLX5_SET(wq, wq, shampo_enable, true); |
993 | MLX5_SET(wq, wq, log_reservation_size, |
994 | mlx5e_shampo_get_log_rsrv_size(mdev, params)); |
995 | MLX5_SET(wq, wq, |
996 | log_max_num_of_packets_per_reservation, |
997 | mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params)); |
998 | MLX5_SET(wq, wq, log_headers_entry_size, |
999 | mlx5e_shampo_get_log_hd_entry_size(mdev, params)); |
1000 | MLX5_SET(rqc, rqc, reservation_timeout, |
1001 | params->packet_merge.timeout); |
1002 | MLX5_SET(rqc, rqc, shampo_match_criteria_type, |
1003 | params->packet_merge.shampo.match_criteria_type); |
1004 | MLX5_SET(rqc, rqc, shampo_no_match_alignment_granularity, |
1005 | params->packet_merge.shampo.alignment_granularity); |
1006 | } |
1007 | break; |
1008 | } |
1009 | default: /* MLX5_WQ_TYPE_CYCLIC */ |
1010 | MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames); |
1011 | err = mlx5e_build_rq_frags_info(mdev, params, xsk, info: ¶m->frags_info, |
1012 | xdp_frag_size: ¶m->xdp_frag_size); |
1013 | if (err) |
1014 | return err; |
1015 | ndsegs = param->frags_info.num_frags; |
1016 | } |
1017 | |
1018 | MLX5_SET(wq, wq, wq_type, params->rq_wq_type); |
1019 | MLX5_SET(wq, wq, end_padding_mode, rq_end_pad_mode(mdev, params)); |
1020 | MLX5_SET(wq, wq, log_wq_stride, |
1021 | mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs)); |
1022 | MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn); |
1023 | MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable); |
1024 | MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en); |
1025 | |
1026 | param->wq.buf_numa_node = dev_to_node(dev: mlx5_core_dma_dev(dev: mdev)); |
1027 | mlx5e_build_rx_cq_param(mdev, params, xsk, param: ¶m->cqp); |
1028 | |
1029 | return 0; |
1030 | } |
1031 | |
1032 | void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev, |
1033 | struct mlx5e_rq_param *param) |
1034 | { |
1035 | void *rqc = param->rqc; |
1036 | void *wq = MLX5_ADDR_OF(rqc, rqc, wq); |
1037 | |
1038 | MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); |
1039 | MLX5_SET(wq, wq, log_wq_stride, |
1040 | mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1)); |
1041 | |
1042 | param->wq.buf_numa_node = dev_to_node(dev: mlx5_core_dma_dev(dev: mdev)); |
1043 | } |
1044 | |
1045 | void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev, |
1046 | struct mlx5e_params *params, |
1047 | struct mlx5e_cq_param *param) |
1048 | { |
1049 | void *cqc = param->cqc; |
1050 | |
1051 | MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size); |
1052 | |
1053 | mlx5e_build_common_cq_param(mdev, param); |
1054 | param->cq_period_mode = params->tx_cq_moderation.cq_period_mode; |
1055 | } |
1056 | |
1057 | void mlx5e_build_sq_param_common(struct mlx5_core_dev *mdev, |
1058 | struct mlx5e_sq_param *param) |
1059 | { |
1060 | void *sqc = param->sqc; |
1061 | void *wq = MLX5_ADDR_OF(sqc, sqc, wq); |
1062 | |
1063 | MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); |
1064 | MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn); |
1065 | |
1066 | param->wq.buf_numa_node = dev_to_node(dev: mlx5_core_dma_dev(dev: mdev)); |
1067 | } |
1068 | |
1069 | void mlx5e_build_sq_param(struct mlx5_core_dev *mdev, |
1070 | struct mlx5e_params *params, |
1071 | struct mlx5e_sq_param *param) |
1072 | { |
1073 | void *sqc = param->sqc; |
1074 | void *wq = MLX5_ADDR_OF(sqc, sqc, wq); |
1075 | bool allow_swp; |
1076 | |
1077 | allow_swp = mlx5_geneve_tx_allowed(mdev) || |
1078 | (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO); |
1079 | mlx5e_build_sq_param_common(mdev, param); |
1080 | MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); |
1081 | MLX5_SET(sqc, sqc, allow_swp, allow_swp); |
1082 | param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE); |
1083 | param->stop_room = mlx5e_calc_sq_stop_room(mdev, params); |
1084 | mlx5e_build_tx_cq_param(mdev, params, param: ¶m->cqp); |
1085 | } |
1086 | |
1087 | static void mlx5e_build_ico_cq_param(struct mlx5_core_dev *mdev, |
1088 | u8 log_wq_size, |
1089 | struct mlx5e_cq_param *param) |
1090 | { |
1091 | void *cqc = param->cqc; |
1092 | |
1093 | MLX5_SET(cqc, cqc, log_cq_size, log_wq_size); |
1094 | |
1095 | mlx5e_build_common_cq_param(mdev, param); |
1096 | |
1097 | param->cq_period_mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; |
1098 | } |
1099 | |
1100 | /* This function calculates the maximum number of headers entries that are needed |
1101 | * per WQE, the formula is based on the size of the reservations and the |
1102 | * restriction we have about max packets for reservation that is equal to max |
1103 | * headers per reservation. |
1104 | */ |
1105 | u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev, |
1106 | struct mlx5e_params *params, |
1107 | struct mlx5e_rq_param *rq_param) |
1108 | { |
1109 | int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE; |
1110 | u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, NULL)); |
1111 | int pkt_per_resv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params)); |
1112 | u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL); |
1113 | int wqe_size = BIT(log_stride_sz) * num_strides; |
1114 | u32 hd_per_wqe; |
1115 | |
1116 | /* Assumption: hd_per_wqe % 8 == 0. */ |
1117 | hd_per_wqe = (wqe_size / resv_size) * pkt_per_resv; |
1118 | mlx5_core_dbg(mdev, "%s hd_per_wqe = %d rsrv_size = %d wqe_size = %d pkt_per_resv = %d\n" , |
1119 | __func__, hd_per_wqe, resv_size, wqe_size, pkt_per_resv); |
1120 | return hd_per_wqe; |
1121 | } |
1122 | |
1123 | /* This function calculates the maximum number of headers entries that are needed |
1124 | * for the WQ, this value is uesed to allocate the header buffer in HW, thus |
1125 | * must be a pow of 2. |
1126 | */ |
1127 | u32 mlx5e_shampo_hd_per_wq(struct mlx5_core_dev *mdev, |
1128 | struct mlx5e_params *params, |
1129 | struct mlx5e_rq_param *rq_param) |
1130 | { |
1131 | void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq); |
1132 | int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz)); |
1133 | u32 hd_per_wqe, hd_per_wq; |
1134 | |
1135 | hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param); |
1136 | hd_per_wq = roundup_pow_of_two(hd_per_wqe * wq_size); |
1137 | return hd_per_wq; |
1138 | } |
1139 | |
1140 | static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev, |
1141 | struct mlx5e_params *params, |
1142 | struct mlx5e_rq_param *rq_param) |
1143 | { |
1144 | int max_num_of_umr_per_wqe, max_hd_per_wqe, max_klm_per_umr, rest; |
1145 | void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq); |
1146 | int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz)); |
1147 | u32 wqebbs; |
1148 | |
1149 | max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE(mdev); |
1150 | max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param); |
1151 | max_num_of_umr_per_wqe = max_hd_per_wqe / max_klm_per_umr; |
1152 | rest = max_hd_per_wqe % max_klm_per_umr; |
1153 | wqebbs = MLX5E_KLM_UMR_WQEBBS(max_klm_per_umr) * max_num_of_umr_per_wqe; |
1154 | if (rest) |
1155 | wqebbs += MLX5E_KLM_UMR_WQEBBS(rest); |
1156 | wqebbs *= wq_size; |
1157 | return wqebbs; |
1158 | } |
1159 | |
1160 | static u32 mlx5e_mpwrq_total_umr_wqebbs(struct mlx5_core_dev *mdev, |
1161 | struct mlx5e_params *params, |
1162 | struct mlx5e_xsk_param *xsk) |
1163 | { |
1164 | enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); |
1165 | u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); |
1166 | u8 umr_wqebbs; |
1167 | |
1168 | umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift, umr_mode); |
1169 | |
1170 | return umr_wqebbs * (1 << mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk)); |
1171 | } |
1172 | |
1173 | static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev, |
1174 | struct mlx5e_params *params, |
1175 | struct mlx5e_rq_param *rqp) |
1176 | { |
1177 | u32 wqebbs, total_pages, useful_space; |
1178 | |
1179 | /* MLX5_WQ_TYPE_CYCLIC */ |
1180 | if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) |
1181 | return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; |
1182 | |
1183 | /* UMR WQEs for the regular RQ. */ |
1184 | wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, NULL); |
1185 | |
1186 | /* If XDP program is attached, XSK may be turned on at any time without |
1187 | * restarting the channel. ICOSQ must be big enough to fit UMR WQEs of |
1188 | * both regular RQ and XSK RQ. |
1189 | * |
1190 | * XSK uses different values of page_shift, and the total number of UMR |
1191 | * WQEBBs depends on it. This dependency is complex and not monotonic, |
1192 | * especially taking into consideration that some of the parameters come |
1193 | * from capabilities. Hence, we have to try all valid values of XSK |
1194 | * frame size (and page_shift) to find the maximum. |
1195 | */ |
1196 | if (params->xdp_prog) { |
1197 | u32 max_xsk_wqebbs = 0; |
1198 | u8 frame_shift; |
1199 | |
1200 | for (frame_shift = XDP_UMEM_MIN_CHUNK_SHIFT; |
1201 | frame_shift <= PAGE_SHIFT; frame_shift++) { |
1202 | /* The headroom doesn't affect the calculation. */ |
1203 | struct mlx5e_xsk_param xsk = { |
1204 | .chunk_size = 1 << frame_shift, |
1205 | .unaligned = false, |
1206 | }; |
1207 | |
1208 | /* XSK aligned mode. */ |
1209 | max_xsk_wqebbs = max(max_xsk_wqebbs, |
1210 | mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk)); |
1211 | |
1212 | /* XSK unaligned mode, frame size is a power of two. */ |
1213 | xsk.unaligned = true; |
1214 | max_xsk_wqebbs = max(max_xsk_wqebbs, |
1215 | mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk)); |
1216 | |
1217 | /* XSK unaligned mode, frame size is not equal to stride size. */ |
1218 | xsk.chunk_size -= 1; |
1219 | max_xsk_wqebbs = max(max_xsk_wqebbs, |
1220 | mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk)); |
1221 | |
1222 | /* XSK unaligned mode, frame size is a triple power of two. */ |
1223 | xsk.chunk_size = (1 << frame_shift) / 4 * 3; |
1224 | max_xsk_wqebbs = max(max_xsk_wqebbs, |
1225 | mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk)); |
1226 | } |
1227 | |
1228 | wqebbs += max_xsk_wqebbs; |
1229 | } |
1230 | |
1231 | if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) |
1232 | wqebbs += mlx5e_shampo_icosq_sz(mdev, params, rq_param: rqp); |
1233 | |
1234 | /* UMR WQEs don't cross the page boundary, they are padded with NOPs. |
1235 | * This padding is always smaller than the max WQE size. That gives us |
1236 | * at least (PAGE_SIZE - (max WQE size - MLX5_SEND_WQE_BB)) useful bytes |
1237 | * per page. The number of pages is estimated as the total size of WQEs |
1238 | * divided by the useful space in page, rounding up. If some WQEs don't |
1239 | * fully fit into the useful space, they can occupy part of the padding, |
1240 | * which proves this estimation to be correct (reserve enough space). |
1241 | */ |
1242 | useful_space = PAGE_SIZE - mlx5e_get_max_sq_wqebbs(mdev) + MLX5_SEND_WQE_BB; |
1243 | total_pages = DIV_ROUND_UP(wqebbs * MLX5_SEND_WQE_BB, useful_space); |
1244 | wqebbs = total_pages * (PAGE_SIZE / MLX5_SEND_WQE_BB); |
1245 | |
1246 | return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, order_base_2(wqebbs)); |
1247 | } |
1248 | |
1249 | static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev) |
1250 | { |
1251 | if (mlx5e_is_ktls_rx(mdev)) |
1252 | return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; |
1253 | |
1254 | return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; |
1255 | } |
1256 | |
1257 | static void mlx5e_build_icosq_param(struct mlx5_core_dev *mdev, |
1258 | u8 log_wq_size, |
1259 | struct mlx5e_sq_param *param) |
1260 | { |
1261 | void *sqc = param->sqc; |
1262 | void *wq = MLX5_ADDR_OF(sqc, sqc, wq); |
1263 | |
1264 | mlx5e_build_sq_param_common(mdev, param); |
1265 | |
1266 | MLX5_SET(wq, wq, log_wq_sz, log_wq_size); |
1267 | MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq)); |
1268 | mlx5e_build_ico_cq_param(mdev, log_wq_size, param: ¶m->cqp); |
1269 | } |
1270 | |
1271 | static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev, |
1272 | u8 log_wq_size, |
1273 | struct mlx5e_sq_param *param) |
1274 | { |
1275 | void *sqc = param->sqc; |
1276 | void *wq = MLX5_ADDR_OF(sqc, sqc, wq); |
1277 | |
1278 | mlx5e_build_sq_param_common(mdev, param); |
1279 | param->stop_room = mlx5e_stop_room_for_wqe(mdev, wqe_size: 1); /* for XSK NOP */ |
1280 | param->is_tls = mlx5e_is_ktls_rx(mdev); |
1281 | if (param->is_tls) |
1282 | param->stop_room += mlx5e_stop_room_for_wqe(mdev, wqe_size: 1); /* for TLS RX resync NOP */ |
1283 | MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq)); |
1284 | MLX5_SET(wq, wq, log_wq_sz, log_wq_size); |
1285 | mlx5e_build_ico_cq_param(mdev, log_wq_size, param: ¶m->cqp); |
1286 | } |
1287 | |
1288 | void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev, |
1289 | struct mlx5e_params *params, |
1290 | struct mlx5e_xsk_param *xsk, |
1291 | struct mlx5e_sq_param *param) |
1292 | { |
1293 | void *sqc = param->sqc; |
1294 | void *wq = MLX5_ADDR_OF(sqc, sqc, wq); |
1295 | |
1296 | mlx5e_build_sq_param_common(mdev, param); |
1297 | MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); |
1298 | param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE); |
1299 | param->is_xdp_mb = !mlx5e_rx_is_linear_skb(mdev, params, xsk); |
1300 | mlx5e_build_tx_cq_param(mdev, params, param: ¶m->cqp); |
1301 | } |
1302 | |
1303 | int mlx5e_build_channel_param(struct mlx5_core_dev *mdev, |
1304 | struct mlx5e_params *params, |
1305 | struct mlx5e_channel_param *cparam) |
1306 | { |
1307 | u8 icosq_log_wq_sz, async_icosq_log_wq_sz; |
1308 | int err; |
1309 | |
1310 | err = mlx5e_build_rq_param(mdev, params, NULL, param: &cparam->rq); |
1311 | if (err) |
1312 | return err; |
1313 | |
1314 | icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(mdev, params, rqp: &cparam->rq); |
1315 | async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev); |
1316 | |
1317 | mlx5e_build_sq_param(mdev, params, param: &cparam->txq_sq); |
1318 | mlx5e_build_xdpsq_param(mdev, params, NULL, param: &cparam->xdp_sq); |
1319 | mlx5e_build_icosq_param(mdev, log_wq_size: icosq_log_wq_sz, param: &cparam->icosq); |
1320 | mlx5e_build_async_icosq_param(mdev, log_wq_size: async_icosq_log_wq_sz, param: &cparam->async_icosq); |
1321 | |
1322 | return 0; |
1323 | } |
1324 | |