1 | /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ |
2 | /* |
3 | * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved. |
4 | * Copyright (c) 2020, Intel Corporation. All rights reserved. |
5 | */ |
6 | |
7 | #ifndef MLX5_IB_H |
8 | #define MLX5_IB_H |
9 | |
10 | #include <linux/kernel.h> |
11 | #include <linux/sched.h> |
12 | #include <rdma/ib_verbs.h> |
13 | #include <rdma/ib_umem.h> |
14 | #include <rdma/ib_smi.h> |
15 | #include <linux/mlx5/driver.h> |
16 | #include <linux/mlx5/cq.h> |
17 | #include <linux/mlx5/fs.h> |
18 | #include <linux/mlx5/qp.h> |
19 | #include <linux/types.h> |
20 | #include <linux/mlx5/transobj.h> |
21 | #include <rdma/ib_user_verbs.h> |
22 | #include <rdma/mlx5-abi.h> |
23 | #include <rdma/uverbs_ioctl.h> |
24 | #include <rdma/mlx5_user_ioctl_cmds.h> |
25 | #include <rdma/mlx5_user_ioctl_verbs.h> |
26 | |
27 | #include "srq.h" |
28 | #include "qp.h" |
29 | #include "macsec.h" |
30 | |
31 | #define mlx5_ib_dbg(_dev, format, arg...) \ |
32 | dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \ |
33 | __LINE__, current->pid, ##arg) |
34 | |
35 | #define mlx5_ib_err(_dev, format, arg...) \ |
36 | dev_err(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \ |
37 | __LINE__, current->pid, ##arg) |
38 | |
39 | #define mlx5_ib_warn(_dev, format, arg...) \ |
40 | dev_warn(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \ |
41 | __LINE__, current->pid, ##arg) |
42 | |
43 | #define mlx5_ib_log(lvl, _dev, format, arg...) \ |
44 | dev_printk(lvl, &(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, \ |
45 | __func__, __LINE__, current->pid, ##arg) |
46 | |
47 | #define MLX5_IB_DEFAULT_UIDX 0xffffff |
48 | #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index) |
49 | |
50 | static __always_inline unsigned long |
51 | __mlx5_log_page_size_to_bitmap(unsigned int log_pgsz_bits, |
52 | unsigned int pgsz_shift) |
53 | { |
54 | unsigned int largest_pg_shift = |
55 | min_t(unsigned long, (1ULL << log_pgsz_bits) - 1 + pgsz_shift, |
56 | BITS_PER_LONG - 1); |
57 | |
58 | /* |
59 | * Despite a command allowing it, the device does not support lower than |
60 | * 4k page size. |
61 | */ |
62 | pgsz_shift = max_t(unsigned int, MLX5_ADAPTER_PAGE_SHIFT, pgsz_shift); |
63 | return GENMASK(largest_pg_shift, pgsz_shift); |
64 | } |
65 | |
66 | /* |
67 | * For mkc users, instead of a page_offset the command has a start_iova which |
68 | * specifies both the page_offset and the on-the-wire IOVA |
69 | */ |
70 | #define mlx5_umem_find_best_pgsz(umem, typ, log_pgsz_fld, pgsz_shift, iova) \ |
71 | ib_umem_find_best_pgsz(umem, \ |
72 | __mlx5_log_page_size_to_bitmap( \ |
73 | __mlx5_bit_sz(typ, log_pgsz_fld), \ |
74 | pgsz_shift), \ |
75 | iova) |
76 | |
77 | static __always_inline unsigned long |
78 | __mlx5_page_offset_to_bitmask(unsigned int page_offset_bits, |
79 | unsigned int offset_shift) |
80 | { |
81 | unsigned int largest_offset_shift = |
82 | min_t(unsigned long, page_offset_bits - 1 + offset_shift, |
83 | BITS_PER_LONG - 1); |
84 | |
85 | return GENMASK(largest_offset_shift, offset_shift); |
86 | } |
87 | |
88 | /* |
89 | * QP/CQ/WQ/etc type commands take a page offset that satisifies: |
90 | * page_offset_quantized * (page_size/scale) = page_offset |
91 | * Which restricts allowed page sizes to ones that satisify the above. |
92 | */ |
93 | unsigned long __mlx5_umem_find_best_quantized_pgoff( |
94 | struct ib_umem *umem, unsigned long pgsz_bitmap, |
95 | unsigned int page_offset_bits, u64 pgoff_bitmask, unsigned int scale, |
96 | unsigned int *page_offset_quantized); |
97 | #define mlx5_umem_find_best_quantized_pgoff(umem, typ, log_pgsz_fld, \ |
98 | pgsz_shift, page_offset_fld, \ |
99 | scale, page_offset_quantized) \ |
100 | __mlx5_umem_find_best_quantized_pgoff( \ |
101 | umem, \ |
102 | __mlx5_log_page_size_to_bitmap( \ |
103 | __mlx5_bit_sz(typ, log_pgsz_fld), pgsz_shift), \ |
104 | __mlx5_bit_sz(typ, page_offset_fld), \ |
105 | GENMASK(31, order_base_2(scale)), scale, \ |
106 | page_offset_quantized) |
107 | |
108 | #define mlx5_umem_find_best_cq_quantized_pgoff(umem, typ, log_pgsz_fld, \ |
109 | pgsz_shift, page_offset_fld, \ |
110 | scale, page_offset_quantized) \ |
111 | __mlx5_umem_find_best_quantized_pgoff( \ |
112 | umem, \ |
113 | __mlx5_log_page_size_to_bitmap( \ |
114 | __mlx5_bit_sz(typ, log_pgsz_fld), pgsz_shift), \ |
115 | __mlx5_bit_sz(typ, page_offset_fld), 0, scale, \ |
116 | page_offset_quantized) |
117 | |
118 | enum { |
119 | MLX5_IB_MMAP_OFFSET_START = 9, |
120 | MLX5_IB_MMAP_OFFSET_END = 255, |
121 | }; |
122 | |
123 | enum { |
124 | MLX5_IB_MMAP_CMD_SHIFT = 8, |
125 | MLX5_IB_MMAP_CMD_MASK = 0xff, |
126 | }; |
127 | |
128 | enum { |
129 | MLX5_RES_SCAT_DATA32_CQE = 0x1, |
130 | MLX5_RES_SCAT_DATA64_CQE = 0x2, |
131 | MLX5_REQ_SCAT_DATA32_CQE = 0x11, |
132 | MLX5_REQ_SCAT_DATA64_CQE = 0x22, |
133 | }; |
134 | |
135 | enum mlx5_ib_mad_ifc_flags { |
136 | MLX5_MAD_IFC_IGNORE_MKEY = 1, |
137 | MLX5_MAD_IFC_IGNORE_BKEY = 2, |
138 | MLX5_MAD_IFC_NET_VIEW = 4, |
139 | }; |
140 | |
141 | enum { |
142 | MLX5_CROSS_CHANNEL_BFREG = 0, |
143 | }; |
144 | |
145 | enum { |
146 | MLX5_CQE_VERSION_V0, |
147 | MLX5_CQE_VERSION_V1, |
148 | }; |
149 | |
150 | enum { |
151 | MLX5_TM_MAX_RNDV_MSG_SIZE = 64, |
152 | MLX5_TM_MAX_SGE = 1, |
153 | }; |
154 | |
155 | enum { |
156 | MLX5_IB_INVALID_UAR_INDEX = BIT(31), |
157 | MLX5_IB_INVALID_BFREG = BIT(31), |
158 | }; |
159 | |
160 | enum { |
161 | MLX5_MAX_MEMIC_PAGES = 0x100, |
162 | MLX5_MEMIC_ALLOC_SIZE_MASK = 0x3f, |
163 | }; |
164 | |
165 | enum { |
166 | MLX5_MEMIC_BASE_ALIGN = 6, |
167 | MLX5_MEMIC_BASE_SIZE = 1 << MLX5_MEMIC_BASE_ALIGN, |
168 | }; |
169 | |
170 | enum mlx5_ib_mmap_type { |
171 | MLX5_IB_MMAP_TYPE_MEMIC = 1, |
172 | MLX5_IB_MMAP_TYPE_VAR = 2, |
173 | MLX5_IB_MMAP_TYPE_UAR_WC = 3, |
174 | MLX5_IB_MMAP_TYPE_UAR_NC = 4, |
175 | MLX5_IB_MMAP_TYPE_MEMIC_OP = 5, |
176 | }; |
177 | |
178 | struct mlx5_bfreg_info { |
179 | u32 *sys_pages; |
180 | int num_low_latency_bfregs; |
181 | unsigned int *count; |
182 | |
183 | /* |
184 | * protect bfreg allocation data structs |
185 | */ |
186 | struct mutex lock; |
187 | u32 ver; |
188 | u8 lib_uar_4k : 1; |
189 | u8 lib_uar_dyn : 1; |
190 | u32 num_sys_pages; |
191 | u32 num_static_sys_pages; |
192 | u32 total_num_bfregs; |
193 | u32 num_dyn_bfregs; |
194 | }; |
195 | |
196 | struct mlx5_ib_ucontext { |
197 | struct ib_ucontext ibucontext; |
198 | struct list_head db_page_list; |
199 | |
200 | /* protect doorbell record alloc/free |
201 | */ |
202 | struct mutex db_page_mutex; |
203 | struct mlx5_bfreg_info bfregi; |
204 | u8 cqe_version; |
205 | /* Transport Domain number */ |
206 | u32 tdn; |
207 | |
208 | u64 lib_caps; |
209 | u16 devx_uid; |
210 | /* For RoCE LAG TX affinity */ |
211 | atomic_t tx_port_affinity; |
212 | }; |
213 | |
214 | static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext) |
215 | { |
216 | return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext); |
217 | } |
218 | |
219 | struct mlx5_ib_pd { |
220 | struct ib_pd ibpd; |
221 | u32 pdn; |
222 | u16 uid; |
223 | }; |
224 | |
225 | enum { |
226 | , |
227 | MLX5_IB_FLOW_ACTION_PACKET_REFORMAT, |
228 | MLX5_IB_FLOW_ACTION_DECAP, |
229 | }; |
230 | |
231 | #define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1) |
232 | #define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1) |
233 | #if (MLX5_IB_FLOW_LAST_PRIO <= 0) |
234 | #error "Invalid number of bypass priorities" |
235 | #endif |
236 | #define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1) |
237 | |
238 | #define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1) |
239 | #define MLX5_IB_NUM_SNIFFER_FTS 2 |
240 | #define MLX5_IB_NUM_EGRESS_FTS 1 |
241 | #define MLX5_IB_NUM_FDB_FTS MLX5_BY_PASS_NUM_REGULAR_PRIOS |
242 | |
243 | struct mlx5_ib_anchor { |
244 | struct mlx5_flow_table *ft; |
245 | struct mlx5_flow_group *fg_goto_table; |
246 | struct mlx5_flow_group *fg_drop; |
247 | struct mlx5_flow_handle *rule_goto_table; |
248 | struct mlx5_flow_handle *rule_drop; |
249 | unsigned int rule_goto_table_ref; |
250 | }; |
251 | |
252 | struct mlx5_ib_flow_prio { |
253 | struct mlx5_flow_table *flow_table; |
254 | struct mlx5_ib_anchor anchor; |
255 | unsigned int refcount; |
256 | }; |
257 | |
258 | struct mlx5_ib_flow_handler { |
259 | struct list_head list; |
260 | struct ib_flow ibflow; |
261 | struct mlx5_ib_flow_prio *prio; |
262 | struct mlx5_flow_handle *rule; |
263 | struct ib_counters *ibcounters; |
264 | struct mlx5_ib_dev *dev; |
265 | struct mlx5_ib_flow_matcher *flow_matcher; |
266 | }; |
267 | |
268 | struct mlx5_ib_flow_matcher { |
269 | struct mlx5_ib_match_params matcher_mask; |
270 | int mask_len; |
271 | enum mlx5_ib_flow_type flow_type; |
272 | enum mlx5_flow_namespace_type ns_type; |
273 | u16 priority; |
274 | struct mlx5_core_dev *mdev; |
275 | atomic_t usecnt; |
276 | u8 match_criteria_enable; |
277 | }; |
278 | |
279 | struct mlx5_ib_steering_anchor { |
280 | struct mlx5_ib_flow_prio *ft_prio; |
281 | struct mlx5_ib_dev *dev; |
282 | atomic_t usecnt; |
283 | }; |
284 | |
285 | struct mlx5_ib_pp { |
286 | u16 index; |
287 | struct mlx5_core_dev *mdev; |
288 | }; |
289 | |
290 | enum mlx5_ib_optional_counter_type { |
291 | MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS, |
292 | MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS, |
293 | MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS, |
294 | |
295 | MLX5_IB_OPCOUNTER_MAX, |
296 | }; |
297 | |
298 | struct mlx5_ib_flow_db { |
299 | struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT]; |
300 | struct mlx5_ib_flow_prio egress_prios[MLX5_IB_NUM_FLOW_FT]; |
301 | struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS]; |
302 | struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS]; |
303 | struct mlx5_ib_flow_prio fdb[MLX5_IB_NUM_FDB_FTS]; |
304 | struct mlx5_ib_flow_prio rdma_rx[MLX5_IB_NUM_FLOW_FT]; |
305 | struct mlx5_ib_flow_prio rdma_tx[MLX5_IB_NUM_FLOW_FT]; |
306 | struct mlx5_ib_flow_prio opfcs[MLX5_IB_OPCOUNTER_MAX]; |
307 | struct mlx5_flow_table *lag_demux_ft; |
308 | /* Protect flow steering bypass flow tables |
309 | * when add/del flow rules. |
310 | * only single add/removal of flow steering rule could be done |
311 | * simultaneously. |
312 | */ |
313 | struct mutex lock; |
314 | }; |
315 | |
316 | /* Use macros here so that don't have to duplicate |
317 | * enum ib_qp_type for low-level driver |
318 | */ |
319 | |
320 | #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1 |
321 | /* |
322 | * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI |
323 | * creates the actual hardware QP. |
324 | */ |
325 | #define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2 |
326 | #define MLX5_IB_QPT_DCI IB_QPT_RESERVED3 |
327 | #define MLX5_IB_QPT_DCT IB_QPT_RESERVED4 |
328 | #define MLX5_IB_WR_UMR IB_WR_RESERVED1 |
329 | |
330 | #define MLX5_IB_UPD_XLT_ZAP BIT(0) |
331 | #define MLX5_IB_UPD_XLT_ENABLE BIT(1) |
332 | #define MLX5_IB_UPD_XLT_ATOMIC BIT(2) |
333 | #define MLX5_IB_UPD_XLT_ADDR BIT(3) |
334 | #define MLX5_IB_UPD_XLT_PD BIT(4) |
335 | #define MLX5_IB_UPD_XLT_ACCESS BIT(5) |
336 | #define MLX5_IB_UPD_XLT_INDIRECT BIT(6) |
337 | |
338 | /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags. |
339 | * |
340 | * These flags are intended for internal use by the mlx5_ib driver, and they |
341 | * rely on the range reserved for that use in the ib_qp_create_flags enum. |
342 | */ |
343 | #define MLX5_IB_QP_CREATE_SQPN_QP1 IB_QP_CREATE_RESERVED_START |
344 | #define MLX5_IB_QP_CREATE_WC_TEST (IB_QP_CREATE_RESERVED_START << 1) |
345 | |
346 | struct wr_list { |
347 | u16 opcode; |
348 | u16 next; |
349 | }; |
350 | |
351 | enum mlx5_ib_rq_flags { |
352 | MLX5_IB_RQ_CVLAN_STRIPPING = 1 << 0, |
353 | MLX5_IB_RQ_PCI_WRITE_END_PADDING = 1 << 1, |
354 | }; |
355 | |
356 | struct mlx5_ib_wq { |
357 | struct mlx5_frag_buf_ctrl fbc; |
358 | u64 *wrid; |
359 | u32 *wr_data; |
360 | struct wr_list *w_list; |
361 | unsigned *wqe_head; |
362 | u16 unsig_count; |
363 | |
364 | /* serialize post to the work queue |
365 | */ |
366 | spinlock_t lock; |
367 | int wqe_cnt; |
368 | int max_post; |
369 | int max_gs; |
370 | int offset; |
371 | int wqe_shift; |
372 | unsigned head; |
373 | unsigned tail; |
374 | u16 cur_post; |
375 | u16 last_poll; |
376 | void *cur_edge; |
377 | }; |
378 | |
379 | enum mlx5_ib_wq_flags { |
380 | MLX5_IB_WQ_FLAGS_DELAY_DROP = 0x1, |
381 | MLX5_IB_WQ_FLAGS_STRIDING_RQ = 0x2, |
382 | }; |
383 | |
384 | #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9 |
385 | #define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16 |
386 | #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6 |
387 | #define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13 |
388 | #define MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES 3 |
389 | |
390 | struct mlx5_ib_rwq { |
391 | struct ib_wq ibwq; |
392 | struct mlx5_core_qp core_qp; |
393 | u32 rq_num_pas; |
394 | u32 log_rq_stride; |
395 | u32 log_rq_size; |
396 | u32 rq_page_offset; |
397 | u32 log_page_size; |
398 | u32 log_num_strides; |
399 | u32 two_byte_shift_en; |
400 | u32 single_stride_log_num_of_bytes; |
401 | struct ib_umem *umem; |
402 | size_t buf_size; |
403 | unsigned int page_shift; |
404 | struct mlx5_db db; |
405 | u32 user_index; |
406 | u32 wqe_count; |
407 | u32 wqe_shift; |
408 | int wq_sig; |
409 | u32 create_flags; /* Use enum mlx5_ib_wq_flags */ |
410 | }; |
411 | |
412 | struct mlx5_ib_rwq_ind_table { |
413 | struct ib_rwq_ind_table ib_rwq_ind_tbl; |
414 | u32 rqtn; |
415 | u16 uid; |
416 | }; |
417 | |
418 | struct mlx5_ib_ubuffer { |
419 | struct ib_umem *umem; |
420 | int buf_size; |
421 | u64 buf_addr; |
422 | }; |
423 | |
424 | struct mlx5_ib_qp_base { |
425 | struct mlx5_ib_qp *container_mibqp; |
426 | struct mlx5_core_qp mqp; |
427 | struct mlx5_ib_ubuffer ubuffer; |
428 | }; |
429 | |
430 | struct mlx5_ib_qp_trans { |
431 | struct mlx5_ib_qp_base base; |
432 | u16 xrcdn; |
433 | u32 alt_port; |
434 | u8 atomic_rd_en; |
435 | u8 resp_depth; |
436 | }; |
437 | |
438 | struct { |
439 | u32 ; |
440 | }; |
441 | |
442 | struct mlx5_ib_rq { |
443 | struct mlx5_ib_qp_base base; |
444 | struct mlx5_ib_wq *rq; |
445 | struct mlx5_ib_ubuffer ubuffer; |
446 | struct mlx5_db *doorbell; |
447 | u32 tirn; |
448 | u8 state; |
449 | u32 flags; |
450 | }; |
451 | |
452 | struct mlx5_ib_sq { |
453 | struct mlx5_ib_qp_base base; |
454 | struct mlx5_ib_wq *sq; |
455 | struct mlx5_ib_ubuffer ubuffer; |
456 | struct mlx5_db *doorbell; |
457 | struct mlx5_flow_handle *flow_rule; |
458 | u32 tisn; |
459 | u8 state; |
460 | }; |
461 | |
462 | struct mlx5_ib_raw_packet_qp { |
463 | struct mlx5_ib_sq sq; |
464 | struct mlx5_ib_rq rq; |
465 | }; |
466 | |
467 | struct mlx5_bf { |
468 | int buf_size; |
469 | unsigned long offset; |
470 | struct mlx5_sq_bfreg *bfreg; |
471 | }; |
472 | |
473 | struct mlx5_ib_dct { |
474 | struct mlx5_core_dct mdct; |
475 | u32 *in; |
476 | }; |
477 | |
478 | struct mlx5_ib_gsi_qp { |
479 | struct ib_qp *rx_qp; |
480 | u32 port_num; |
481 | struct ib_qp_cap cap; |
482 | struct ib_cq *cq; |
483 | struct mlx5_ib_gsi_wr *outstanding_wrs; |
484 | u32 outstanding_pi, outstanding_ci; |
485 | int num_qps; |
486 | /* Protects access to the tx_qps. Post send operations synchronize |
487 | * with tx_qp creation in setup_qp(). Also protects the |
488 | * outstanding_wrs array and indices. |
489 | */ |
490 | spinlock_t lock; |
491 | struct ib_qp **tx_qps; |
492 | }; |
493 | |
494 | struct mlx5_ib_qp { |
495 | struct ib_qp ibqp; |
496 | union { |
497 | struct mlx5_ib_qp_trans trans_qp; |
498 | struct mlx5_ib_raw_packet_qp raw_packet_qp; |
499 | struct mlx5_ib_rss_qp ; |
500 | struct mlx5_ib_dct dct; |
501 | struct mlx5_ib_gsi_qp gsi; |
502 | }; |
503 | struct mlx5_frag_buf buf; |
504 | |
505 | struct mlx5_db db; |
506 | struct mlx5_ib_wq rq; |
507 | |
508 | u8 sq_signal_bits; |
509 | u8 next_fence; |
510 | struct mlx5_ib_wq sq; |
511 | |
512 | /* serialize qp state modifications |
513 | */ |
514 | struct mutex mutex; |
515 | /* cached variant of create_flags from struct ib_qp_init_attr */ |
516 | u32 flags; |
517 | u32 port; |
518 | u8 state; |
519 | int max_inline_data; |
520 | struct mlx5_bf bf; |
521 | u8 has_rq:1; |
522 | u8 :1; |
523 | |
524 | /* only for user space QPs. For kernel |
525 | * we have it from the bf object |
526 | */ |
527 | int bfregn; |
528 | |
529 | struct list_head qps_list; |
530 | struct list_head cq_recv_list; |
531 | struct list_head cq_send_list; |
532 | struct mlx5_rate_limit rl; |
533 | u32 underlay_qpn; |
534 | u32 flags_en; |
535 | /* |
536 | * IB/core doesn't store low-level QP types, so |
537 | * store both MLX and IBTA types in the field below. |
538 | */ |
539 | enum ib_qp_type type; |
540 | /* A flag to indicate if there's a new counter is configured |
541 | * but not take effective |
542 | */ |
543 | u32 counter_pending; |
544 | u16 gsi_lag_port; |
545 | }; |
546 | |
547 | struct mlx5_ib_cq_buf { |
548 | struct mlx5_frag_buf_ctrl fbc; |
549 | struct mlx5_frag_buf frag_buf; |
550 | struct ib_umem *umem; |
551 | int cqe_size; |
552 | int nent; |
553 | }; |
554 | |
555 | enum mlx5_ib_cq_pr_flags { |
556 | MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0, |
557 | MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS = 1 << 1, |
558 | }; |
559 | |
560 | struct mlx5_ib_cq { |
561 | struct ib_cq ibcq; |
562 | struct mlx5_core_cq mcq; |
563 | struct mlx5_ib_cq_buf buf; |
564 | struct mlx5_db db; |
565 | |
566 | /* serialize access to the CQ |
567 | */ |
568 | spinlock_t lock; |
569 | |
570 | /* protect resize cq |
571 | */ |
572 | struct mutex resize_mutex; |
573 | struct mlx5_ib_cq_buf *resize_buf; |
574 | struct ib_umem *resize_umem; |
575 | int cqe_size; |
576 | struct list_head list_send_qp; |
577 | struct list_head list_recv_qp; |
578 | u32 create_flags; |
579 | struct list_head wc_list; |
580 | enum ib_cq_notify_flags notify_flags; |
581 | struct work_struct notify_work; |
582 | u16 private_flags; /* Use mlx5_ib_cq_pr_flags */ |
583 | }; |
584 | |
585 | struct mlx5_ib_wc { |
586 | struct ib_wc wc; |
587 | struct list_head list; |
588 | }; |
589 | |
590 | struct mlx5_ib_srq { |
591 | struct ib_srq ibsrq; |
592 | struct mlx5_core_srq msrq; |
593 | struct mlx5_frag_buf buf; |
594 | struct mlx5_db db; |
595 | struct mlx5_frag_buf_ctrl fbc; |
596 | u64 *wrid; |
597 | /* protect SRQ hanlding |
598 | */ |
599 | spinlock_t lock; |
600 | int head; |
601 | int tail; |
602 | u16 wqe_ctr; |
603 | struct ib_umem *umem; |
604 | /* serialize arming a SRQ |
605 | */ |
606 | struct mutex mutex; |
607 | int wq_sig; |
608 | }; |
609 | |
610 | struct mlx5_ib_xrcd { |
611 | struct ib_xrcd ibxrcd; |
612 | u32 xrcdn; |
613 | }; |
614 | |
615 | enum mlx5_ib_mtt_access_flags { |
616 | MLX5_IB_MTT_READ = (1 << 0), |
617 | MLX5_IB_MTT_WRITE = (1 << 1), |
618 | }; |
619 | |
620 | struct mlx5_user_mmap_entry { |
621 | struct rdma_user_mmap_entry rdma_entry; |
622 | u8 mmap_flag; |
623 | u64 address; |
624 | u32 page_idx; |
625 | }; |
626 | |
627 | enum mlx5_mkey_type { |
628 | MLX5_MKEY_MR = 1, |
629 | MLX5_MKEY_MW, |
630 | MLX5_MKEY_INDIRECT_DEVX, |
631 | }; |
632 | |
633 | struct mlx5r_cache_rb_key { |
634 | u8 ats:1; |
635 | unsigned int access_mode; |
636 | unsigned int access_flags; |
637 | unsigned int ndescs; |
638 | }; |
639 | |
640 | struct mlx5_ib_mkey { |
641 | u32 key; |
642 | enum mlx5_mkey_type type; |
643 | unsigned int ndescs; |
644 | struct wait_queue_head wait; |
645 | refcount_t usecount; |
646 | /* User Mkey must hold either a rb_key or a cache_ent. */ |
647 | struct mlx5r_cache_rb_key rb_key; |
648 | struct mlx5_cache_ent *cache_ent; |
649 | }; |
650 | |
651 | #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE) |
652 | |
653 | #define MLX5_IB_DM_MEMIC_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\ |
654 | IB_ACCESS_REMOTE_WRITE |\ |
655 | IB_ACCESS_REMOTE_READ |\ |
656 | IB_ACCESS_REMOTE_ATOMIC |\ |
657 | IB_ZERO_BASED) |
658 | |
659 | #define MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\ |
660 | IB_ACCESS_REMOTE_WRITE |\ |
661 | IB_ACCESS_REMOTE_READ |\ |
662 | IB_ZERO_BASED) |
663 | |
664 | #define mlx5_update_odp_stats(mr, counter_name, value) \ |
665 | atomic64_add(value, &((mr)->odp_stats.counter_name)) |
666 | |
667 | struct mlx5_ib_mr { |
668 | struct ib_mr ibmr; |
669 | struct mlx5_ib_mkey mmkey; |
670 | |
671 | struct ib_umem *umem; |
672 | |
673 | union { |
674 | /* Used only by kernel MRs (umem == NULL) */ |
675 | struct { |
676 | void *descs; |
677 | void *descs_alloc; |
678 | dma_addr_t desc_map; |
679 | int max_descs; |
680 | int desc_size; |
681 | int access_mode; |
682 | |
683 | /* For Kernel IB_MR_TYPE_INTEGRITY */ |
684 | struct mlx5_core_sig_ctx *sig; |
685 | struct mlx5_ib_mr *pi_mr; |
686 | struct mlx5_ib_mr *klm_mr; |
687 | struct mlx5_ib_mr *mtt_mr; |
688 | u64 data_iova; |
689 | u64 pi_iova; |
690 | int meta_ndescs; |
691 | int meta_length; |
692 | int data_length; |
693 | }; |
694 | |
695 | /* Used only by User MRs (umem != NULL) */ |
696 | struct { |
697 | unsigned int page_shift; |
698 | /* Current access_flags */ |
699 | int access_flags; |
700 | |
701 | /* For User ODP */ |
702 | struct mlx5_ib_mr *parent; |
703 | struct xarray implicit_children; |
704 | union { |
705 | struct work_struct work; |
706 | } odp_destroy; |
707 | struct ib_odp_counters odp_stats; |
708 | bool is_odp_implicit; |
709 | }; |
710 | }; |
711 | }; |
712 | |
713 | static inline bool is_odp_mr(struct mlx5_ib_mr *mr) |
714 | { |
715 | return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem && |
716 | mr->umem->is_odp; |
717 | } |
718 | |
719 | static inline bool is_dmabuf_mr(struct mlx5_ib_mr *mr) |
720 | { |
721 | return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem && |
722 | mr->umem->is_dmabuf; |
723 | } |
724 | |
725 | struct mlx5_ib_mw { |
726 | struct ib_mw ibmw; |
727 | struct mlx5_ib_mkey mmkey; |
728 | }; |
729 | |
730 | struct mlx5_ib_umr_context { |
731 | struct ib_cqe cqe; |
732 | enum ib_wc_status status; |
733 | struct completion done; |
734 | }; |
735 | |
736 | enum { |
737 | MLX5_UMR_STATE_UNINIT, |
738 | MLX5_UMR_STATE_ACTIVE, |
739 | MLX5_UMR_STATE_RECOVER, |
740 | MLX5_UMR_STATE_ERR, |
741 | }; |
742 | |
743 | struct umr_common { |
744 | struct ib_pd *pd; |
745 | struct ib_cq *cq; |
746 | struct ib_qp *qp; |
747 | /* Protects from UMR QP overflow |
748 | */ |
749 | struct semaphore sem; |
750 | /* Protects from using UMR while the UMR is not active |
751 | */ |
752 | struct mutex lock; |
753 | unsigned int state; |
754 | }; |
755 | |
756 | #define NUM_MKEYS_PER_PAGE \ |
757 | ((PAGE_SIZE - sizeof(struct list_head)) / sizeof(u32)) |
758 | |
759 | struct mlx5_mkeys_page { |
760 | u32 mkeys[NUM_MKEYS_PER_PAGE]; |
761 | struct list_head list; |
762 | }; |
763 | static_assert(sizeof(struct mlx5_mkeys_page) == PAGE_SIZE); |
764 | |
765 | struct mlx5_mkeys_queue { |
766 | struct list_head pages_list; |
767 | u32 num_pages; |
768 | unsigned long ci; |
769 | spinlock_t lock; /* sync list ops */ |
770 | }; |
771 | |
772 | struct mlx5_cache_ent { |
773 | struct mlx5_mkeys_queue mkeys_queue; |
774 | u32 pending; |
775 | |
776 | char name[4]; |
777 | |
778 | struct rb_node node; |
779 | struct mlx5r_cache_rb_key rb_key; |
780 | |
781 | u8 is_tmp:1; |
782 | u8 disabled:1; |
783 | u8 fill_to_high_water:1; |
784 | |
785 | /* |
786 | * - limit is the low water mark for stored mkeys, 2* limit is the |
787 | * upper water mark. |
788 | */ |
789 | u32 in_use; |
790 | u32 limit; |
791 | |
792 | /* Statistics */ |
793 | u32 miss; |
794 | |
795 | struct mlx5_ib_dev *dev; |
796 | struct delayed_work dwork; |
797 | }; |
798 | |
799 | struct mlx5r_async_create_mkey { |
800 | union { |
801 | u32 in[MLX5_ST_SZ_BYTES(create_mkey_in)]; |
802 | u32 out[MLX5_ST_SZ_DW(create_mkey_out)]; |
803 | }; |
804 | struct mlx5_async_work cb_work; |
805 | struct mlx5_cache_ent *ent; |
806 | u32 mkey; |
807 | }; |
808 | |
809 | struct mlx5_mkey_cache { |
810 | struct workqueue_struct *wq; |
811 | struct rb_root rb_root; |
812 | struct mutex rb_lock; |
813 | struct dentry *fs_root; |
814 | unsigned long last_add; |
815 | struct delayed_work remove_ent_dwork; |
816 | }; |
817 | |
818 | struct mlx5_ib_port_resources { |
819 | struct mlx5_ib_gsi_qp *gsi; |
820 | struct work_struct pkey_change_work; |
821 | }; |
822 | |
823 | struct mlx5_ib_resources { |
824 | struct ib_cq *c0; |
825 | u32 xrcdn0; |
826 | u32 xrcdn1; |
827 | struct ib_pd *p0; |
828 | struct ib_srq *s0; |
829 | struct ib_srq *s1; |
830 | struct mlx5_ib_port_resources ports[2]; |
831 | }; |
832 | |
833 | #define MAX_OPFC_RULES 2 |
834 | |
835 | struct mlx5_ib_op_fc { |
836 | struct mlx5_fc *fc; |
837 | struct mlx5_flow_handle *rule[MAX_OPFC_RULES]; |
838 | }; |
839 | |
840 | struct mlx5_ib_counters { |
841 | struct rdma_stat_desc *descs; |
842 | size_t *offsets; |
843 | u32 num_q_counters; |
844 | u32 num_cong_counters; |
845 | u32 num_ext_ppcnt_counters; |
846 | u32 num_op_counters; |
847 | u16 set_id; |
848 | struct mlx5_ib_op_fc opfcs[MLX5_IB_OPCOUNTER_MAX]; |
849 | }; |
850 | |
851 | int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num, |
852 | struct mlx5_ib_op_fc *opfc, |
853 | enum mlx5_ib_optional_counter_type type); |
854 | |
855 | void mlx5_ib_fs_remove_op_fc(struct mlx5_ib_dev *dev, |
856 | struct mlx5_ib_op_fc *opfc, |
857 | enum mlx5_ib_optional_counter_type type); |
858 | |
859 | struct mlx5_ib_multiport_info; |
860 | |
861 | struct mlx5_ib_multiport { |
862 | struct mlx5_ib_multiport_info *mpi; |
863 | /* To be held when accessing the multiport info */ |
864 | spinlock_t mpi_lock; |
865 | }; |
866 | |
867 | struct mlx5_roce { |
868 | /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL |
869 | * netdev pointer |
870 | */ |
871 | rwlock_t netdev_lock; |
872 | struct net_device *netdev; |
873 | struct notifier_block nb; |
874 | struct netdev_net_notifier nn; |
875 | struct notifier_block mdev_nb; |
876 | struct net_device *tracking_netdev; |
877 | atomic_t tx_port_affinity; |
878 | enum ib_port_state last_port_state; |
879 | struct mlx5_ib_dev *dev; |
880 | u32 native_port_num; |
881 | }; |
882 | |
883 | struct mlx5_ib_port { |
884 | struct mlx5_ib_counters cnts; |
885 | struct mlx5_ib_multiport mp; |
886 | struct mlx5_ib_dbg_cc_params *dbg_cc_params; |
887 | struct mlx5_roce roce; |
888 | struct mlx5_eswitch_rep *rep; |
889 | #ifdef CONFIG_MLX5_MACSEC |
890 | struct mlx5_reserved_gids *reserved_gids; |
891 | #endif |
892 | }; |
893 | |
894 | struct mlx5_ib_dbg_param { |
895 | int offset; |
896 | struct mlx5_ib_dev *dev; |
897 | struct dentry *dentry; |
898 | u32 port_num; |
899 | }; |
900 | |
901 | enum mlx5_ib_dbg_cc_types { |
902 | MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE, |
903 | MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI, |
904 | MLX5_IB_DBG_CC_RP_TIME_RESET, |
905 | MLX5_IB_DBG_CC_RP_BYTE_RESET, |
906 | MLX5_IB_DBG_CC_RP_THRESHOLD, |
907 | MLX5_IB_DBG_CC_RP_AI_RATE, |
908 | MLX5_IB_DBG_CC_RP_MAX_RATE, |
909 | MLX5_IB_DBG_CC_RP_HAI_RATE, |
910 | MLX5_IB_DBG_CC_RP_MIN_DEC_FAC, |
911 | MLX5_IB_DBG_CC_RP_MIN_RATE, |
912 | MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP, |
913 | MLX5_IB_DBG_CC_RP_DCE_TCP_G, |
914 | MLX5_IB_DBG_CC_RP_DCE_TCP_RTT, |
915 | MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD, |
916 | MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE, |
917 | MLX5_IB_DBG_CC_RP_GD, |
918 | MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS, |
919 | MLX5_IB_DBG_CC_NP_CNP_DSCP, |
920 | MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE, |
921 | MLX5_IB_DBG_CC_NP_CNP_PRIO, |
922 | MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP_VALID, |
923 | MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP, |
924 | MLX5_IB_DBG_CC_MAX, |
925 | }; |
926 | |
927 | struct mlx5_ib_dbg_cc_params { |
928 | struct dentry *root; |
929 | struct mlx5_ib_dbg_param params[MLX5_IB_DBG_CC_MAX]; |
930 | }; |
931 | |
932 | enum { |
933 | MLX5_MAX_DELAY_DROP_TIMEOUT_MS = 100, |
934 | }; |
935 | |
936 | struct mlx5_ib_delay_drop { |
937 | struct mlx5_ib_dev *dev; |
938 | struct work_struct delay_drop_work; |
939 | /* serialize setting of delay drop */ |
940 | struct mutex lock; |
941 | u32 timeout; |
942 | bool activate; |
943 | atomic_t events_cnt; |
944 | atomic_t rqs_cnt; |
945 | struct dentry *dir_debugfs; |
946 | }; |
947 | |
948 | enum mlx5_ib_stages { |
949 | MLX5_IB_STAGE_INIT, |
950 | MLX5_IB_STAGE_FS, |
951 | MLX5_IB_STAGE_CAPS, |
952 | MLX5_IB_STAGE_NON_DEFAULT_CB, |
953 | MLX5_IB_STAGE_ROCE, |
954 | MLX5_IB_STAGE_QP, |
955 | MLX5_IB_STAGE_SRQ, |
956 | MLX5_IB_STAGE_DEVICE_RESOURCES, |
957 | MLX5_IB_STAGE_DEVICE_NOTIFIER, |
958 | MLX5_IB_STAGE_ODP, |
959 | MLX5_IB_STAGE_COUNTERS, |
960 | MLX5_IB_STAGE_CONG_DEBUGFS, |
961 | MLX5_IB_STAGE_UAR, |
962 | MLX5_IB_STAGE_BFREG, |
963 | MLX5_IB_STAGE_PRE_IB_REG_UMR, |
964 | MLX5_IB_STAGE_WHITELIST_UID, |
965 | MLX5_IB_STAGE_IB_REG, |
966 | MLX5_IB_STAGE_POST_IB_REG_UMR, |
967 | MLX5_IB_STAGE_DELAY_DROP, |
968 | MLX5_IB_STAGE_RESTRACK, |
969 | MLX5_IB_STAGE_MAX, |
970 | }; |
971 | |
972 | struct mlx5_ib_stage { |
973 | int (*init)(struct mlx5_ib_dev *dev); |
974 | void (*cleanup)(struct mlx5_ib_dev *dev); |
975 | }; |
976 | |
977 | #define STAGE_CREATE(_stage, _init, _cleanup) \ |
978 | .stage[_stage] = {.init = _init, .cleanup = _cleanup} |
979 | |
980 | struct mlx5_ib_profile { |
981 | struct mlx5_ib_stage stage[MLX5_IB_STAGE_MAX]; |
982 | }; |
983 | |
984 | struct mlx5_ib_multiport_info { |
985 | struct list_head list; |
986 | struct mlx5_ib_dev *ibdev; |
987 | struct mlx5_core_dev *mdev; |
988 | struct notifier_block mdev_events; |
989 | struct completion unref_comp; |
990 | u64 sys_image_guid; |
991 | u32 mdev_refcnt; |
992 | bool is_master; |
993 | bool unaffiliate; |
994 | }; |
995 | |
996 | struct mlx5_ib_flow_action { |
997 | struct ib_flow_action ib_action; |
998 | union { |
999 | struct { |
1000 | u64 ib_flags; |
1001 | struct mlx5_accel_esp_xfrm *ctx; |
1002 | } esp_aes_gcm; |
1003 | struct { |
1004 | struct mlx5_ib_dev *dev; |
1005 | u32 sub_type; |
1006 | union { |
1007 | struct mlx5_modify_hdr *modify_hdr; |
1008 | struct mlx5_pkt_reformat *pkt_reformat; |
1009 | }; |
1010 | } flow_action_raw; |
1011 | }; |
1012 | }; |
1013 | |
1014 | struct mlx5_dm { |
1015 | struct mlx5_core_dev *dev; |
1016 | /* This lock is used to protect the access to the shared |
1017 | * allocation map when concurrent requests by different |
1018 | * processes are handled. |
1019 | */ |
1020 | spinlock_t lock; |
1021 | DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES); |
1022 | }; |
1023 | |
1024 | struct mlx5_read_counters_attr { |
1025 | struct mlx5_fc *hw_cntrs_hndl; |
1026 | u64 *out; |
1027 | u32 flags; |
1028 | }; |
1029 | |
1030 | enum mlx5_ib_counters_type { |
1031 | MLX5_IB_COUNTERS_FLOW, |
1032 | }; |
1033 | |
1034 | struct mlx5_ib_mcounters { |
1035 | struct ib_counters ibcntrs; |
1036 | enum mlx5_ib_counters_type type; |
1037 | /* number of counters supported for this counters type */ |
1038 | u32 counters_num; |
1039 | struct mlx5_fc *hw_cntrs_hndl; |
1040 | /* read function for this counters type */ |
1041 | int (*read_counters)(struct ib_device *ibdev, |
1042 | struct mlx5_read_counters_attr *read_attr); |
1043 | /* max index set as part of create_flow */ |
1044 | u32 cntrs_max_index; |
1045 | /* number of counters data entries (<description,index> pair) */ |
1046 | u32 ncounters; |
1047 | /* counters data array for descriptions and indexes */ |
1048 | struct mlx5_ib_flow_counters_desc *counters_data; |
1049 | /* protects access to mcounters internal data */ |
1050 | struct mutex mcntrs_mutex; |
1051 | }; |
1052 | |
1053 | static inline struct mlx5_ib_mcounters * |
1054 | to_mcounters(struct ib_counters *ibcntrs) |
1055 | { |
1056 | return container_of(ibcntrs, struct mlx5_ib_mcounters, ibcntrs); |
1057 | } |
1058 | |
1059 | int parse_flow_flow_action(struct mlx5_ib_flow_action *maction, |
1060 | bool is_egress, |
1061 | struct mlx5_flow_act *action); |
1062 | struct mlx5_ib_lb_state { |
1063 | /* protect the user_td */ |
1064 | struct mutex mutex; |
1065 | u32 user_td; |
1066 | int qps; |
1067 | bool enabled; |
1068 | }; |
1069 | |
1070 | struct mlx5_ib_pf_eq { |
1071 | struct notifier_block irq_nb; |
1072 | struct mlx5_ib_dev *dev; |
1073 | struct mlx5_eq *core; |
1074 | struct work_struct work; |
1075 | spinlock_t lock; /* Pagefaults spinlock */ |
1076 | struct workqueue_struct *wq; |
1077 | mempool_t *pool; |
1078 | }; |
1079 | |
1080 | struct mlx5_devx_event_table { |
1081 | struct mlx5_nb devx_nb; |
1082 | /* serialize updating the event_xa */ |
1083 | struct mutex event_xa_lock; |
1084 | struct xarray event_xa; |
1085 | }; |
1086 | |
1087 | struct mlx5_var_table { |
1088 | /* serialize updating the bitmap */ |
1089 | struct mutex bitmap_lock; |
1090 | unsigned long *bitmap; |
1091 | u64 hw_start_addr; |
1092 | u32 stride_size; |
1093 | u64 num_var_hw_entries; |
1094 | }; |
1095 | |
1096 | struct mlx5_port_caps { |
1097 | bool has_smi; |
1098 | u8 ext_port_cap; |
1099 | }; |
1100 | |
1101 | |
1102 | struct mlx5_special_mkeys { |
1103 | u32 dump_fill_mkey; |
1104 | __be32 null_mkey; |
1105 | __be32 terminate_scatter_list_mkey; |
1106 | }; |
1107 | |
1108 | struct mlx5_macsec { |
1109 | struct mutex lock; /* Protects mlx5_macsec internal contexts */ |
1110 | struct list_head macsec_devices_list; |
1111 | struct notifier_block blocking_events_nb; |
1112 | }; |
1113 | |
1114 | struct mlx5_ib_dev { |
1115 | struct ib_device ib_dev; |
1116 | struct mlx5_core_dev *mdev; |
1117 | struct notifier_block mdev_events; |
1118 | int num_ports; |
1119 | /* serialize update of capability mask |
1120 | */ |
1121 | struct mutex cap_mask_mutex; |
1122 | u8 ib_active:1; |
1123 | u8 is_rep:1; |
1124 | u8 lag_active:1; |
1125 | u8 wc_support:1; |
1126 | u8 fill_delay; |
1127 | struct umr_common umrc; |
1128 | /* sync used page count stats |
1129 | */ |
1130 | struct mlx5_ib_resources devr; |
1131 | |
1132 | atomic_t mkey_var; |
1133 | struct mlx5_mkey_cache cache; |
1134 | struct timer_list delay_timer; |
1135 | /* Prevents soft lock on massive reg MRs */ |
1136 | struct mutex slow_path_mutex; |
1137 | struct ib_odp_caps odp_caps; |
1138 | u64 odp_max_size; |
1139 | struct mutex odp_eq_mutex; |
1140 | struct mlx5_ib_pf_eq odp_pf_eq; |
1141 | |
1142 | struct xarray odp_mkeys; |
1143 | |
1144 | struct mlx5_ib_flow_db *flow_db; |
1145 | /* protect resources needed as part of reset flow */ |
1146 | spinlock_t reset_flow_resource_lock; |
1147 | struct list_head qp_list; |
1148 | /* Array with num_ports elements */ |
1149 | struct mlx5_ib_port *port; |
1150 | struct mlx5_sq_bfreg bfreg; |
1151 | struct mlx5_sq_bfreg wc_bfreg; |
1152 | struct mlx5_sq_bfreg fp_bfreg; |
1153 | struct mlx5_ib_delay_drop delay_drop; |
1154 | const struct mlx5_ib_profile *profile; |
1155 | |
1156 | struct mlx5_ib_lb_state lb; |
1157 | u8 umr_fence; |
1158 | struct list_head ib_dev_list; |
1159 | u64 sys_image_guid; |
1160 | struct mlx5_dm dm; |
1161 | u16 devx_whitelist_uid; |
1162 | struct mlx5_srq_table srq_table; |
1163 | struct mlx5_qp_table qp_table; |
1164 | struct mlx5_async_ctx async_ctx; |
1165 | struct mlx5_devx_event_table devx_event_table; |
1166 | struct mlx5_var_table var_table; |
1167 | |
1168 | struct xarray sig_mrs; |
1169 | struct mlx5_port_caps port_caps[MLX5_MAX_PORTS]; |
1170 | u16 pkey_table_len; |
1171 | u8 lag_ports; |
1172 | struct mlx5_special_mkeys mkeys; |
1173 | |
1174 | #ifdef CONFIG_MLX5_MACSEC |
1175 | struct mlx5_macsec macsec; |
1176 | #endif |
1177 | }; |
1178 | |
1179 | static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) |
1180 | { |
1181 | return container_of(mcq, struct mlx5_ib_cq, mcq); |
1182 | } |
1183 | |
1184 | static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd) |
1185 | { |
1186 | return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd); |
1187 | } |
1188 | |
1189 | static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev) |
1190 | { |
1191 | return container_of(ibdev, struct mlx5_ib_dev, ib_dev); |
1192 | } |
1193 | |
1194 | static inline struct mlx5_ib_dev *mr_to_mdev(struct mlx5_ib_mr *mr) |
1195 | { |
1196 | return to_mdev(ibdev: mr->ibmr.device); |
1197 | } |
1198 | |
1199 | static inline struct mlx5_ib_dev *mlx5_udata_to_mdev(struct ib_udata *udata) |
1200 | { |
1201 | struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context( |
1202 | udata, struct mlx5_ib_ucontext, ibucontext); |
1203 | |
1204 | return to_mdev(ibdev: context->ibucontext.device); |
1205 | } |
1206 | |
1207 | static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq) |
1208 | { |
1209 | return container_of(ibcq, struct mlx5_ib_cq, ibcq); |
1210 | } |
1211 | |
1212 | static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp) |
1213 | { |
1214 | return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp; |
1215 | } |
1216 | |
1217 | static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp) |
1218 | { |
1219 | return container_of(core_qp, struct mlx5_ib_rwq, core_qp); |
1220 | } |
1221 | |
1222 | static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd) |
1223 | { |
1224 | return container_of(ibpd, struct mlx5_ib_pd, ibpd); |
1225 | } |
1226 | |
1227 | static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq) |
1228 | { |
1229 | return container_of(ibsrq, struct mlx5_ib_srq, ibsrq); |
1230 | } |
1231 | |
1232 | static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp) |
1233 | { |
1234 | return container_of(ibqp, struct mlx5_ib_qp, ibqp); |
1235 | } |
1236 | |
1237 | static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq) |
1238 | { |
1239 | return container_of(ibwq, struct mlx5_ib_rwq, ibwq); |
1240 | } |
1241 | |
1242 | static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl) |
1243 | { |
1244 | return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl); |
1245 | } |
1246 | |
1247 | static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq) |
1248 | { |
1249 | return container_of(msrq, struct mlx5_ib_srq, msrq); |
1250 | } |
1251 | |
1252 | static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr) |
1253 | { |
1254 | return container_of(ibmr, struct mlx5_ib_mr, ibmr); |
1255 | } |
1256 | |
1257 | static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw) |
1258 | { |
1259 | return container_of(ibmw, struct mlx5_ib_mw, ibmw); |
1260 | } |
1261 | |
1262 | static inline struct mlx5_ib_flow_action * |
1263 | to_mflow_act(struct ib_flow_action *ibact) |
1264 | { |
1265 | return container_of(ibact, struct mlx5_ib_flow_action, ib_action); |
1266 | } |
1267 | |
1268 | static inline struct mlx5_user_mmap_entry * |
1269 | to_mmmap(struct rdma_user_mmap_entry *rdma_entry) |
1270 | { |
1271 | return container_of(rdma_entry, |
1272 | struct mlx5_user_mmap_entry, rdma_entry); |
1273 | } |
1274 | |
1275 | int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt, |
1276 | struct mlx5_db *db); |
1277 | void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db); |
1278 | void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); |
1279 | void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); |
1280 | void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index); |
1281 | int mlx5_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr, |
1282 | struct ib_udata *udata); |
1283 | int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); |
1284 | static inline int mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags) |
1285 | { |
1286 | return 0; |
1287 | } |
1288 | int mlx5_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr, |
1289 | struct ib_udata *udata); |
1290 | int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, |
1291 | enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); |
1292 | int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr); |
1293 | int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata); |
1294 | int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, |
1295 | const struct ib_recv_wr **bad_wr); |
1296 | int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp); |
1297 | void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp); |
1298 | int mlx5_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr, |
1299 | struct ib_udata *udata); |
1300 | int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
1301 | int attr_mask, struct ib_udata *udata); |
1302 | int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, |
1303 | struct ib_qp_init_attr *qp_init_attr); |
1304 | int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata); |
1305 | void mlx5_ib_drain_sq(struct ib_qp *qp); |
1306 | void mlx5_ib_drain_rq(struct ib_qp *qp); |
1307 | int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer, |
1308 | size_t buflen, size_t *bc); |
1309 | int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer, |
1310 | size_t buflen, size_t *bc); |
1311 | int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer, |
1312 | size_t buflen, size_t *bc); |
1313 | int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, |
1314 | struct ib_udata *udata); |
1315 | int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); |
1316 | int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); |
1317 | int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); |
1318 | int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); |
1319 | int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata); |
1320 | struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc); |
1321 | struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, |
1322 | u64 virt_addr, int access_flags, |
1323 | struct ib_udata *udata); |
1324 | struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start, |
1325 | u64 length, u64 virt_addr, |
1326 | int fd, int access_flags, |
1327 | struct ib_udata *udata); |
1328 | int mlx5_ib_advise_mr(struct ib_pd *pd, |
1329 | enum ib_uverbs_advise_mr_advice advice, |
1330 | u32 flags, |
1331 | struct ib_sge *sg_list, |
1332 | u32 num_sge, |
1333 | struct uverbs_attr_bundle *attrs); |
1334 | int mlx5_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata); |
1335 | int mlx5_ib_dealloc_mw(struct ib_mw *mw); |
1336 | struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd, |
1337 | int access_flags); |
1338 | void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr); |
1339 | void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr); |
1340 | struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, |
1341 | u64 length, u64 virt_addr, int access_flags, |
1342 | struct ib_pd *pd, struct ib_udata *udata); |
1343 | int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); |
1344 | struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, |
1345 | u32 max_num_sg); |
1346 | struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd, |
1347 | u32 max_num_sg, |
1348 | u32 max_num_meta_sg); |
1349 | int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, |
1350 | unsigned int *sg_offset); |
1351 | int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, |
1352 | int data_sg_nents, unsigned int *data_sg_offset, |
1353 | struct scatterlist *meta_sg, int meta_sg_nents, |
1354 | unsigned int *meta_sg_offset); |
1355 | int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num, |
1356 | const struct ib_wc *in_wc, const struct ib_grh *in_grh, |
1357 | const struct ib_mad *in, struct ib_mad *out, |
1358 | size_t *out_mad_size, u16 *out_mad_pkey_index); |
1359 | int mlx5_ib_alloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata); |
1360 | int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata); |
1361 | int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, unsigned int port); |
1362 | int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev, |
1363 | __be64 *sys_image_guid); |
1364 | int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev, |
1365 | u16 *max_pkeys); |
1366 | int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev, |
1367 | u32 *vendor_id); |
1368 | int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc); |
1369 | int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid); |
1370 | int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u32 port, u16 index, |
1371 | u16 *pkey); |
1372 | int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index, |
1373 | union ib_gid *gid); |
1374 | int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port, |
1375 | struct ib_port_attr *props); |
1376 | int mlx5_ib_query_port(struct ib_device *ibdev, u32 port, |
1377 | struct ib_port_attr *props); |
1378 | void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas, |
1379 | u64 access_flags); |
1380 | int mlx5_ib_get_cqe_size(struct ib_cq *ibcq); |
1381 | int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev); |
1382 | void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev); |
1383 | struct mlx5_cache_ent * |
1384 | mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev, |
1385 | struct mlx5r_cache_rb_key rb_key, |
1386 | bool persistent_entry); |
1387 | |
1388 | struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, |
1389 | int access_flags, int access_mode, |
1390 | int ndescs); |
1391 | |
1392 | int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, |
1393 | struct ib_mr_status *mr_status); |
1394 | struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd, |
1395 | struct ib_wq_init_attr *init_attr, |
1396 | struct ib_udata *udata); |
1397 | int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata); |
1398 | int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, |
1399 | u32 wq_attr_mask, struct ib_udata *udata); |
1400 | int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table, |
1401 | struct ib_rwq_ind_table_init_attr *init_attr, |
1402 | struct ib_udata *udata); |
1403 | int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); |
1404 | struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm, |
1405 | struct ib_dm_mr_attr *attr, |
1406 | struct uverbs_attr_bundle *attrs); |
1407 | |
1408 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
1409 | int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev); |
1410 | int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq); |
1411 | void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev); |
1412 | int __init mlx5_ib_odp_init(void); |
1413 | void mlx5_ib_odp_cleanup(void); |
1414 | int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev); |
1415 | void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries, |
1416 | struct mlx5_ib_mr *mr, int flags); |
1417 | |
1418 | int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, |
1419 | enum ib_uverbs_advise_mr_advice advice, |
1420 | u32 flags, struct ib_sge *sg_list, u32 num_sge); |
1421 | int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr); |
1422 | int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr); |
1423 | #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ |
1424 | static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; } |
1425 | static inline int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, |
1426 | struct mlx5_ib_pf_eq *eq) |
1427 | { |
1428 | return 0; |
1429 | } |
1430 | static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {} |
1431 | static inline int mlx5_ib_odp_init(void) { return 0; } |
1432 | static inline void mlx5_ib_odp_cleanup(void) {} |
1433 | static inline int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev) |
1434 | { |
1435 | return 0; |
1436 | } |
1437 | static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries, |
1438 | struct mlx5_ib_mr *mr, int flags) {} |
1439 | |
1440 | static inline int |
1441 | mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, |
1442 | enum ib_uverbs_advise_mr_advice advice, u32 flags, |
1443 | struct ib_sge *sg_list, u32 num_sge) |
1444 | { |
1445 | return -EOPNOTSUPP; |
1446 | } |
1447 | static inline int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr) |
1448 | { |
1449 | return -EOPNOTSUPP; |
1450 | } |
1451 | static inline int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr) |
1452 | { |
1453 | return -EOPNOTSUPP; |
1454 | } |
1455 | #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ |
1456 | |
1457 | extern const struct mmu_interval_notifier_ops mlx5_mn_ops; |
1458 | |
1459 | /* Needed for rep profile */ |
1460 | void __mlx5_ib_remove(struct mlx5_ib_dev *dev, |
1461 | const struct mlx5_ib_profile *profile, |
1462 | int stage); |
1463 | int __mlx5_ib_add(struct mlx5_ib_dev *dev, |
1464 | const struct mlx5_ib_profile *profile); |
1465 | |
1466 | int mlx5_ib_get_vf_config(struct ib_device *device, int vf, |
1467 | u32 port, struct ifla_vf_info *info); |
1468 | int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf, |
1469 | u32 port, int state); |
1470 | int mlx5_ib_get_vf_stats(struct ib_device *device, int vf, |
1471 | u32 port, struct ifla_vf_stats *stats); |
1472 | int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u32 port, |
1473 | struct ifla_vf_guid *node_guid, |
1474 | struct ifla_vf_guid *port_guid); |
1475 | int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u32 port, |
1476 | u64 guid, int type); |
1477 | |
1478 | __be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev, |
1479 | const struct ib_gid_attr *attr); |
1480 | |
1481 | void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num); |
1482 | void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num); |
1483 | |
1484 | /* GSI QP helper functions */ |
1485 | int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp, |
1486 | struct ib_qp_init_attr *attr); |
1487 | int mlx5_ib_destroy_gsi(struct mlx5_ib_qp *mqp); |
1488 | int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr, |
1489 | int attr_mask); |
1490 | int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, |
1491 | int qp_attr_mask, |
1492 | struct ib_qp_init_attr *qp_init_attr); |
1493 | int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr, |
1494 | const struct ib_send_wr **bad_wr); |
1495 | int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr, |
1496 | const struct ib_recv_wr **bad_wr); |
1497 | void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi); |
1498 | |
1499 | int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc); |
1500 | |
1501 | void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, |
1502 | int bfregn); |
1503 | struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi); |
1504 | struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev, |
1505 | u32 ib_port_num, |
1506 | u32 *native_port_num); |
1507 | void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev, |
1508 | u32 port_num); |
1509 | |
1510 | extern const struct uapi_definition mlx5_ib_devx_defs[]; |
1511 | extern const struct uapi_definition mlx5_ib_flow_defs[]; |
1512 | extern const struct uapi_definition mlx5_ib_qos_defs[]; |
1513 | extern const struct uapi_definition mlx5_ib_std_types_defs[]; |
1514 | |
1515 | static inline int is_qp1(enum ib_qp_type qp_type) |
1516 | { |
1517 | return qp_type == MLX5_IB_QPT_HW_GSI || qp_type == IB_QPT_GSI; |
1518 | } |
1519 | |
1520 | static inline u32 check_cq_create_flags(u32 flags) |
1521 | { |
1522 | /* |
1523 | * It returns non-zero value for unsupported CQ |
1524 | * create flags, otherwise it returns zero. |
1525 | */ |
1526 | return (flags & ~(IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN | |
1527 | IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION)); |
1528 | } |
1529 | |
1530 | static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx, |
1531 | u32 *user_index) |
1532 | { |
1533 | if (cqe_version) { |
1534 | if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) || |
1535 | (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK)) |
1536 | return -EINVAL; |
1537 | *user_index = cmd_uidx; |
1538 | } else { |
1539 | *user_index = MLX5_IB_DEFAULT_UIDX; |
1540 | } |
1541 | |
1542 | return 0; |
1543 | } |
1544 | |
1545 | static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext, |
1546 | struct mlx5_ib_create_qp *ucmd, |
1547 | int inlen, |
1548 | u32 *user_index) |
1549 | { |
1550 | u8 cqe_version = ucontext->cqe_version; |
1551 | |
1552 | if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version && |
1553 | (ucmd->uidx == MLX5_IB_DEFAULT_UIDX)) |
1554 | return 0; |
1555 | |
1556 | if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version) |
1557 | return -EINVAL; |
1558 | |
1559 | return verify_assign_uidx(cqe_version, cmd_uidx: ucmd->uidx, user_index); |
1560 | } |
1561 | |
1562 | static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext, |
1563 | struct mlx5_ib_create_srq *ucmd, |
1564 | int inlen, |
1565 | u32 *user_index) |
1566 | { |
1567 | u8 cqe_version = ucontext->cqe_version; |
1568 | |
1569 | if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version && |
1570 | (ucmd->uidx == MLX5_IB_DEFAULT_UIDX)) |
1571 | return 0; |
1572 | |
1573 | if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version) |
1574 | return -EINVAL; |
1575 | |
1576 | return verify_assign_uidx(cqe_version, cmd_uidx: ucmd->uidx, user_index); |
1577 | } |
1578 | |
1579 | static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support) |
1580 | { |
1581 | return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ? |
1582 | MLX5_UARS_IN_PAGE : 1; |
1583 | } |
1584 | |
1585 | extern void *xlt_emergency_page; |
1586 | |
1587 | int bfregn_to_uar_index(struct mlx5_ib_dev *dev, |
1588 | struct mlx5_bfreg_info *bfregi, u32 bfregn, |
1589 | bool dyn_bfreg); |
1590 | |
1591 | static inline int mlx5r_store_odp_mkey(struct mlx5_ib_dev *dev, |
1592 | struct mlx5_ib_mkey *mmkey) |
1593 | { |
1594 | refcount_set(r: &mmkey->usecount, n: 1); |
1595 | |
1596 | return xa_err(entry: xa_store(&dev->odp_mkeys, index: mlx5_base_mkey(key: mmkey->key), |
1597 | entry: mmkey, GFP_KERNEL)); |
1598 | } |
1599 | |
1600 | /* deref an mkey that can participate in ODP flow */ |
1601 | static inline void mlx5r_deref_odp_mkey(struct mlx5_ib_mkey *mmkey) |
1602 | { |
1603 | if (refcount_dec_and_test(r: &mmkey->usecount)) |
1604 | wake_up(&mmkey->wait); |
1605 | } |
1606 | |
1607 | /* deref an mkey that can participate in ODP flow and wait for relese */ |
1608 | static inline void mlx5r_deref_wait_odp_mkey(struct mlx5_ib_mkey *mmkey) |
1609 | { |
1610 | mlx5r_deref_odp_mkey(mmkey); |
1611 | wait_event(mmkey->wait, refcount_read(&mmkey->usecount) == 0); |
1612 | } |
1613 | |
1614 | int mlx5_ib_test_wc(struct mlx5_ib_dev *dev); |
1615 | |
1616 | static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev) |
1617 | { |
1618 | /* |
1619 | * If the driver is in hash mode and the port_select_flow_table_bypass cap |
1620 | * is supported, it means that the driver no longer needs to assign the port |
1621 | * affinity by default. If a user wants to set the port affinity explicitly, |
1622 | * the user has a dedicated API to do that, so there is no need to assign |
1623 | * the port affinity by default. |
1624 | */ |
1625 | if (dev->lag_active && |
1626 | mlx5_lag_mode_is_hash(dev: dev->mdev) && |
1627 | MLX5_CAP_PORT_SELECTION(dev->mdev, port_select_flow_table_bypass)) |
1628 | return 0; |
1629 | |
1630 | if (mlx5_lag_is_lacp_owner(dev: dev->mdev) && !dev->lag_active) |
1631 | return 0; |
1632 | |
1633 | return dev->lag_active || |
1634 | (MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 && |
1635 | MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity)); |
1636 | } |
1637 | |
1638 | static inline bool rt_supported(int ts_cap) |
1639 | { |
1640 | return ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_REAL_TIME || |
1641 | ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME; |
1642 | } |
1643 | |
1644 | /* |
1645 | * PCI Peer to Peer is a trainwreck. If no switch is present then things |
1646 | * sometimes work, depending on the pci_distance_p2p logic for excluding broken |
1647 | * root complexes. However if a switch is present in the path, then things get |
1648 | * really ugly depending on how the switch is setup. This table assumes that the |
1649 | * root complex is strict and is validating that all req/reps are matches |
1650 | * perfectly - so any scenario where it sees only half the transaction is a |
1651 | * failure. |
1652 | * |
1653 | * CR/RR/DT ATS RO P2P |
1654 | * 00X X X OK |
1655 | * 010 X X fails (request is routed to root but root never sees comp) |
1656 | * 011 0 X fails (request is routed to root but root never sees comp) |
1657 | * 011 1 X OK |
1658 | * 10X X 1 OK |
1659 | * 101 X 0 fails (completion is routed to root but root didn't see req) |
1660 | * 110 X 0 SLOW |
1661 | * 111 0 0 SLOW |
1662 | * 111 1 0 fails (completion is routed to root but root didn't see req) |
1663 | * 111 1 1 OK |
1664 | * |
1665 | * Unfortunately we cannot reliably know if a switch is present or what the |
1666 | * CR/RR/DT ACS settings are, as in a VM that is all hidden. Assume that |
1667 | * CR/RR/DT is 111 if the ATS cap is enabled and follow the last three rows. |
1668 | * |
1669 | * For now assume if the umem is a dma_buf then it is P2P. |
1670 | */ |
1671 | static inline bool mlx5_umem_needs_ats(struct mlx5_ib_dev *dev, |
1672 | struct ib_umem *umem, int access_flags) |
1673 | { |
1674 | if (!MLX5_CAP_GEN(dev->mdev, ats) || !umem->is_dmabuf) |
1675 | return false; |
1676 | return access_flags & IB_ACCESS_RELAXED_ORDERING; |
1677 | } |
1678 | |
1679 | int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num, |
1680 | unsigned int index, const union ib_gid *gid, |
1681 | const struct ib_gid_attr *attr); |
1682 | #endif /* MLX5_IB_H */ |
1683 | |