1 | /* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB */ |
2 | /* |
3 | * Copyright (c) 2006 - 2021 Intel Corporation. All rights reserved. |
4 | * Copyright (c) 2005 Topspin Communications. All rights reserved. |
5 | * Copyright (c) 2005 Cisco Systems. All rights reserved. |
6 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. |
7 | */ |
8 | |
9 | #ifndef IRDMA_ABI_H |
10 | #define IRDMA_ABI_H |
11 | |
12 | #include <linux/types.h> |
13 | |
14 | /* irdma must support legacy GEN_1 i40iw kernel |
15 | * and user-space whose last ABI ver is 5 |
16 | */ |
17 | #define IRDMA_ABI_VER 5 |
18 | |
19 | enum irdma_memreg_type { |
20 | IRDMA_MEMREG_TYPE_MEM = 0, |
21 | IRDMA_MEMREG_TYPE_QP = 1, |
22 | IRDMA_MEMREG_TYPE_CQ = 2, |
23 | }; |
24 | |
25 | enum { |
26 | IRDMA_ALLOC_UCTX_USE_RAW_ATTR = 1 << 0, |
27 | IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE = 1 << 1, |
28 | }; |
29 | |
30 | struct irdma_alloc_ucontext_req { |
31 | __u32 rsvd32; |
32 | __u8 userspace_ver; |
33 | __u8 rsvd8[3]; |
34 | __aligned_u64 comp_mask; |
35 | }; |
36 | |
37 | struct irdma_alloc_ucontext_resp { |
38 | __u32 max_pds; |
39 | __u32 max_qps; |
40 | __u32 wq_size; /* size of the WQs (SQ+RQ) in the mmaped area */ |
41 | __u8 kernel_ver; |
42 | __u8 rsvd[3]; |
43 | __aligned_u64 feature_flags; |
44 | __aligned_u64 db_mmap_key; |
45 | __u32 max_hw_wq_frags; |
46 | __u32 max_hw_read_sges; |
47 | __u32 max_hw_inline; |
48 | __u32 max_hw_rq_quanta; |
49 | __u32 max_hw_wq_quanta; |
50 | __u32 min_hw_cq_size; |
51 | __u32 max_hw_cq_size; |
52 | __u16 max_hw_sq_chunk; |
53 | __u8 hw_rev; |
54 | __u8 rsvd2; |
55 | __aligned_u64 comp_mask; |
56 | __u16 min_hw_wq_size; |
57 | __u8 rsvd3[6]; |
58 | }; |
59 | |
60 | struct irdma_alloc_pd_resp { |
61 | __u32 pd_id; |
62 | __u8 rsvd[4]; |
63 | }; |
64 | |
65 | struct irdma_resize_cq_req { |
66 | __aligned_u64 user_cq_buffer; |
67 | }; |
68 | |
69 | struct irdma_create_cq_req { |
70 | __aligned_u64 user_cq_buf; |
71 | __aligned_u64 user_shadow_area; |
72 | }; |
73 | |
74 | struct irdma_create_qp_req { |
75 | __aligned_u64 user_wqe_bufs; |
76 | __aligned_u64 user_compl_ctx; |
77 | }; |
78 | |
79 | struct irdma_mem_reg_req { |
80 | __u16 reg_type; /* enum irdma_memreg_type */ |
81 | __u16 cq_pages; |
82 | __u16 rq_pages; |
83 | __u16 sq_pages; |
84 | }; |
85 | |
86 | struct irdma_modify_qp_req { |
87 | __u8 sq_flush; |
88 | __u8 rq_flush; |
89 | __u8 rsvd[6]; |
90 | }; |
91 | |
92 | struct irdma_create_cq_resp { |
93 | __u32 cq_id; |
94 | __u32 cq_size; |
95 | }; |
96 | |
97 | struct irdma_create_qp_resp { |
98 | __u32 qp_id; |
99 | __u32 actual_sq_size; |
100 | __u32 actual_rq_size; |
101 | __u32 irdma_drv_opt; |
102 | __u16 push_idx; |
103 | __u8 lsmm; |
104 | __u8 rsvd; |
105 | __u32 qp_caps; |
106 | }; |
107 | |
108 | struct irdma_modify_qp_resp { |
109 | __aligned_u64 push_wqe_mmap_key; |
110 | __aligned_u64 push_db_mmap_key; |
111 | __u16 push_offset; |
112 | __u8 push_valid; |
113 | __u8 rsvd[5]; |
114 | }; |
115 | |
116 | struct irdma_create_ah_resp { |
117 | __u32 ah_id; |
118 | __u8 rsvd[4]; |
119 | }; |
120 | #endif /* IRDMA_ABI_H */ |
121 | |