1 | /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ |
2 | |
3 | /* Authors: Cheng Xu <chengyou@linux.alibaba.com> */ |
4 | /* Kai Shen <kaishen@linux.alibaba.com> */ |
5 | /* Copyright (c) 2020-2022, Alibaba Group. */ |
6 | |
7 | #ifndef __ERDMA_VERBS_H__ |
8 | #define __ERDMA_VERBS_H__ |
9 | |
10 | #include "erdma.h" |
11 | |
12 | /* RDMA Capability. */ |
13 | #define ERDMA_MAX_PD (128 * 1024) |
14 | #define ERDMA_MAX_SEND_WR 8192 |
15 | #define ERDMA_MAX_ORD 128 |
16 | #define ERDMA_MAX_IRD 128 |
17 | #define ERDMA_MAX_SGE_RD 1 |
18 | #define ERDMA_MAX_CONTEXT (128 * 1024) |
19 | #define ERDMA_MAX_SEND_SGE 6 |
20 | #define ERDMA_MAX_RECV_SGE 1 |
21 | #define ERDMA_MAX_INLINE (sizeof(struct erdma_sge) * (ERDMA_MAX_SEND_SGE)) |
22 | #define ERDMA_MAX_FRMR_PA 512 |
23 | |
24 | enum { |
25 | ERDMA_MMAP_IO_NC = 0, /* no cache */ |
26 | }; |
27 | |
28 | struct erdma_user_mmap_entry { |
29 | struct rdma_user_mmap_entry rdma_entry; |
30 | u64 address; |
31 | u8 mmap_flag; |
32 | }; |
33 | |
34 | struct erdma_ext_db_info { |
35 | bool enable; |
36 | u16 sdb_off; |
37 | u16 rdb_off; |
38 | u16 cdb_off; |
39 | }; |
40 | |
41 | struct erdma_ucontext { |
42 | struct ib_ucontext ibucontext; |
43 | |
44 | struct erdma_ext_db_info ext_db; |
45 | |
46 | u64 sdb; |
47 | u64 rdb; |
48 | u64 cdb; |
49 | |
50 | struct rdma_user_mmap_entry *sq_db_mmap_entry; |
51 | struct rdma_user_mmap_entry *rq_db_mmap_entry; |
52 | struct rdma_user_mmap_entry *cq_db_mmap_entry; |
53 | |
54 | /* doorbell records */ |
55 | struct list_head dbrecords_page_list; |
56 | struct mutex dbrecords_page_mutex; |
57 | }; |
58 | |
59 | struct erdma_pd { |
60 | struct ib_pd ibpd; |
61 | u32 pdn; |
62 | }; |
63 | |
64 | /* |
65 | * MemoryRegion definition. |
66 | */ |
67 | #define ERDMA_MAX_INLINE_MTT_ENTRIES 4 |
68 | #define MTT_SIZE(mtt_cnt) ((mtt_cnt) << 3) /* per mtt entry takes 8 Bytes. */ |
69 | #define ERDMA_MR_MAX_MTT_CNT 524288 |
70 | #define ERDMA_MTT_ENTRY_SIZE 8 |
71 | |
72 | #define ERDMA_MR_TYPE_NORMAL 0 |
73 | #define ERDMA_MR_TYPE_FRMR 1 |
74 | #define ERDMA_MR_TYPE_DMA 2 |
75 | |
76 | #define ERDMA_MR_MTT_0LEVEL 0 |
77 | #define ERDMA_MR_MTT_1LEVEL 1 |
78 | |
79 | #define ERDMA_MR_ACC_RA BIT(0) |
80 | #define ERDMA_MR_ACC_LR BIT(1) |
81 | #define ERDMA_MR_ACC_LW BIT(2) |
82 | #define ERDMA_MR_ACC_RR BIT(3) |
83 | #define ERDMA_MR_ACC_RW BIT(4) |
84 | |
85 | static inline u8 to_erdma_access_flags(int access) |
86 | { |
87 | return (access & IB_ACCESS_REMOTE_READ ? ERDMA_MR_ACC_RR : 0) | |
88 | (access & IB_ACCESS_LOCAL_WRITE ? ERDMA_MR_ACC_LW : 0) | |
89 | (access & IB_ACCESS_REMOTE_WRITE ? ERDMA_MR_ACC_RW : 0) | |
90 | (access & IB_ACCESS_REMOTE_ATOMIC ? ERDMA_MR_ACC_RA : 0); |
91 | } |
92 | |
93 | /* Hierarchical storage structure for MTT entries */ |
94 | struct erdma_mtt { |
95 | u64 *buf; |
96 | size_t size; |
97 | |
98 | bool continuous; |
99 | union { |
100 | dma_addr_t buf_dma; |
101 | struct { |
102 | struct scatterlist *sglist; |
103 | u32 nsg; |
104 | u32 level; |
105 | }; |
106 | }; |
107 | |
108 | struct erdma_mtt *low_level; |
109 | }; |
110 | |
111 | struct erdma_mem { |
112 | struct ib_umem *umem; |
113 | struct erdma_mtt *mtt; |
114 | |
115 | u32 page_size; |
116 | u32 page_offset; |
117 | u32 page_cnt; |
118 | u32 mtt_nents; |
119 | |
120 | u64 va; |
121 | u64 len; |
122 | }; |
123 | |
124 | struct erdma_mr { |
125 | struct ib_mr ibmr; |
126 | struct erdma_mem mem; |
127 | u8 type; |
128 | u8 access; |
129 | u8 valid; |
130 | }; |
131 | |
132 | struct erdma_user_dbrecords_page { |
133 | struct list_head list; |
134 | struct ib_umem *umem; |
135 | u64 va; |
136 | int refcnt; |
137 | }; |
138 | |
139 | struct erdma_uqp { |
140 | struct erdma_mem sq_mem; |
141 | struct erdma_mem rq_mem; |
142 | |
143 | dma_addr_t sq_db_info_dma_addr; |
144 | dma_addr_t rq_db_info_dma_addr; |
145 | |
146 | struct erdma_user_dbrecords_page *user_dbr_page; |
147 | |
148 | u32 rq_offset; |
149 | }; |
150 | |
151 | struct erdma_kqp { |
152 | u16 sq_pi; |
153 | u16 sq_ci; |
154 | |
155 | u16 rq_pi; |
156 | u16 rq_ci; |
157 | |
158 | u64 *swr_tbl; |
159 | u64 *rwr_tbl; |
160 | |
161 | void __iomem *hw_sq_db; |
162 | void __iomem *hw_rq_db; |
163 | |
164 | void *sq_buf; |
165 | dma_addr_t sq_buf_dma_addr; |
166 | |
167 | void *rq_buf; |
168 | dma_addr_t rq_buf_dma_addr; |
169 | |
170 | void *sq_db_info; |
171 | void *rq_db_info; |
172 | |
173 | u8 sig_all; |
174 | }; |
175 | |
176 | enum erdma_qp_state { |
177 | ERDMA_QP_STATE_IDLE = 0, |
178 | ERDMA_QP_STATE_RTR = 1, |
179 | ERDMA_QP_STATE_RTS = 2, |
180 | ERDMA_QP_STATE_CLOSING = 3, |
181 | ERDMA_QP_STATE_TERMINATE = 4, |
182 | ERDMA_QP_STATE_ERROR = 5, |
183 | ERDMA_QP_STATE_UNDEF = 7, |
184 | ERDMA_QP_STATE_COUNT = 8 |
185 | }; |
186 | |
187 | enum erdma_qp_attr_mask { |
188 | ERDMA_QP_ATTR_STATE = (1 << 0), |
189 | ERDMA_QP_ATTR_LLP_HANDLE = (1 << 2), |
190 | ERDMA_QP_ATTR_ORD = (1 << 3), |
191 | ERDMA_QP_ATTR_IRD = (1 << 4), |
192 | ERDMA_QP_ATTR_SQ_SIZE = (1 << 5), |
193 | ERDMA_QP_ATTR_RQ_SIZE = (1 << 6), |
194 | ERDMA_QP_ATTR_MPA = (1 << 7) |
195 | }; |
196 | |
197 | enum erdma_qp_flags { |
198 | ERDMA_QP_IN_FLUSHING = (1 << 0), |
199 | }; |
200 | |
201 | struct erdma_qp_attrs { |
202 | enum erdma_qp_state state; |
203 | enum erdma_cc_alg cc; /* Congestion control algorithm */ |
204 | u32 sq_size; |
205 | u32 rq_size; |
206 | u32 orq_size; |
207 | u32 irq_size; |
208 | u32 max_send_sge; |
209 | u32 max_recv_sge; |
210 | u32 cookie; |
211 | #define ERDMA_QP_ACTIVE 0 |
212 | #define ERDMA_QP_PASSIVE 1 |
213 | u8 qp_type; |
214 | u8 pd_len; |
215 | }; |
216 | |
217 | struct erdma_qp { |
218 | struct ib_qp ibqp; |
219 | struct kref ref; |
220 | struct completion safe_free; |
221 | struct erdma_dev *dev; |
222 | struct erdma_cep *cep; |
223 | struct rw_semaphore state_lock; |
224 | |
225 | unsigned long flags; |
226 | struct delayed_work reflush_dwork; |
227 | |
228 | union { |
229 | struct erdma_kqp kern_qp; |
230 | struct erdma_uqp user_qp; |
231 | }; |
232 | |
233 | struct erdma_cq *scq; |
234 | struct erdma_cq *rcq; |
235 | |
236 | struct erdma_qp_attrs attrs; |
237 | spinlock_t lock; |
238 | }; |
239 | |
240 | struct erdma_kcq_info { |
241 | void *qbuf; |
242 | dma_addr_t qbuf_dma_addr; |
243 | u32 ci; |
244 | u32 cmdsn; |
245 | u32 notify_cnt; |
246 | |
247 | spinlock_t lock; |
248 | u8 __iomem *db; |
249 | u64 *db_record; |
250 | }; |
251 | |
252 | struct erdma_ucq_info { |
253 | struct erdma_mem qbuf_mem; |
254 | struct erdma_user_dbrecords_page *user_dbr_page; |
255 | dma_addr_t db_info_dma_addr; |
256 | }; |
257 | |
258 | struct erdma_cq { |
259 | struct ib_cq ibcq; |
260 | u32 cqn; |
261 | |
262 | u32 depth; |
263 | u32 assoc_eqn; |
264 | |
265 | union { |
266 | struct erdma_kcq_info kern_cq; |
267 | struct erdma_ucq_info user_cq; |
268 | }; |
269 | }; |
270 | |
271 | #define QP_ID(qp) ((qp)->ibqp.qp_num) |
272 | |
273 | static inline struct erdma_qp *find_qp_by_qpn(struct erdma_dev *dev, int id) |
274 | { |
275 | return (struct erdma_qp *)xa_load(&dev->qp_xa, index: id); |
276 | } |
277 | |
278 | static inline struct erdma_cq *find_cq_by_cqn(struct erdma_dev *dev, int id) |
279 | { |
280 | return (struct erdma_cq *)xa_load(&dev->cq_xa, index: id); |
281 | } |
282 | |
283 | void erdma_qp_get(struct erdma_qp *qp); |
284 | void erdma_qp_put(struct erdma_qp *qp); |
285 | int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs, |
286 | enum erdma_qp_attr_mask mask); |
287 | void erdma_qp_llp_close(struct erdma_qp *qp); |
288 | void erdma_qp_cm_drop(struct erdma_qp *qp); |
289 | |
290 | static inline struct erdma_ucontext *to_ectx(struct ib_ucontext *ibctx) |
291 | { |
292 | return container_of(ibctx, struct erdma_ucontext, ibucontext); |
293 | } |
294 | |
295 | static inline struct erdma_pd *to_epd(struct ib_pd *pd) |
296 | { |
297 | return container_of(pd, struct erdma_pd, ibpd); |
298 | } |
299 | |
300 | static inline struct erdma_mr *to_emr(struct ib_mr *ibmr) |
301 | { |
302 | return container_of(ibmr, struct erdma_mr, ibmr); |
303 | } |
304 | |
305 | static inline struct erdma_qp *to_eqp(struct ib_qp *qp) |
306 | { |
307 | return container_of(qp, struct erdma_qp, ibqp); |
308 | } |
309 | |
310 | static inline struct erdma_cq *to_ecq(struct ib_cq *ibcq) |
311 | { |
312 | return container_of(ibcq, struct erdma_cq, ibcq); |
313 | } |
314 | |
315 | static inline struct erdma_user_mmap_entry * |
316 | to_emmap(struct rdma_user_mmap_entry *ibmmap) |
317 | { |
318 | return container_of(ibmmap, struct erdma_user_mmap_entry, rdma_entry); |
319 | } |
320 | |
321 | int erdma_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *data); |
322 | void erdma_dealloc_ucontext(struct ib_ucontext *ibctx); |
323 | int erdma_query_device(struct ib_device *dev, struct ib_device_attr *attr, |
324 | struct ib_udata *data); |
325 | int erdma_get_port_immutable(struct ib_device *dev, u32 port, |
326 | struct ib_port_immutable *ib_port_immutable); |
327 | int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, |
328 | struct ib_udata *data); |
329 | int erdma_query_port(struct ib_device *dev, u32 port, |
330 | struct ib_port_attr *attr); |
331 | int erdma_query_gid(struct ib_device *dev, u32 port, int idx, |
332 | union ib_gid *gid); |
333 | int erdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *data); |
334 | int erdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); |
335 | int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr, |
336 | struct ib_udata *data); |
337 | int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask, |
338 | struct ib_qp_init_attr *init_attr); |
339 | int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask, |
340 | struct ib_udata *data); |
341 | int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata); |
342 | int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); |
343 | int erdma_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); |
344 | struct ib_mr *erdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, |
345 | u64 virt, int access, struct ib_udata *udata); |
346 | struct ib_mr *erdma_get_dma_mr(struct ib_pd *ibpd, int rights); |
347 | int erdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *data); |
348 | int erdma_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma); |
349 | void erdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry); |
350 | void erdma_qp_get_ref(struct ib_qp *ibqp); |
351 | void erdma_qp_put_ref(struct ib_qp *ibqp); |
352 | struct ib_qp *erdma_get_ibqp(struct ib_device *dev, int id); |
353 | int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr, |
354 | const struct ib_send_wr **bad_send_wr); |
355 | int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr, |
356 | const struct ib_recv_wr **bad_recv_wr); |
357 | int erdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); |
358 | struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, |
359 | u32 max_num_sg); |
360 | int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, |
361 | unsigned int *sg_offset); |
362 | void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason); |
363 | void erdma_set_mtu(struct erdma_dev *dev, u32 mtu); |
364 | struct rdma_hw_stats *erdma_alloc_hw_port_stats(struct ib_device *device, |
365 | u32 port_num); |
366 | int erdma_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, |
367 | u32 port, int index); |
368 | |
369 | #endif |
370 | |