1 | /* QLogic qedr NIC Driver |
2 | * Copyright (c) 2015-2016 QLogic Corporation |
3 | * |
4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the |
8 | * OpenIB.org BSD license below: |
9 | * |
10 | * Redistribution and use in source and binary forms, with or |
11 | * without modification, are permitted provided that the following |
12 | * conditions are met: |
13 | * |
14 | * - Redistributions of source code must retain the above |
15 | * copyright notice, this list of conditions and the following |
16 | * disclaimer. |
17 | * |
18 | * - Redistributions in binary form must reproduce the above |
19 | * copyright notice, this list of conditions and the following |
20 | * disclaimer in the documentation and /or other materials |
21 | * provided with the distribution. |
22 | * |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
30 | * SOFTWARE. |
31 | */ |
32 | #ifndef __QEDR_H__ |
33 | #define __QEDR_H__ |
34 | |
35 | #include <linux/pci.h> |
36 | #include <linux/xarray.h> |
37 | #include <rdma/ib_addr.h> |
38 | #include <linux/qed/qed_if.h> |
39 | #include <linux/qed/qed_chain.h> |
40 | #include <linux/qed/qed_rdma_if.h> |
41 | #include <linux/qed/qede_rdma.h> |
42 | #include <linux/qed/roce_common.h> |
43 | #include <linux/completion.h> |
44 | #include "qedr_hsi_rdma.h" |
45 | |
46 | #define QEDR_NODE_DESC "QLogic 579xx RoCE HCA" |
47 | #define DP_NAME(_dev) dev_name(&(_dev)->ibdev.dev) |
48 | #define IS_IWARP(_dev) ((_dev)->rdma_type == QED_RDMA_TYPE_IWARP) |
49 | #define IS_ROCE(_dev) ((_dev)->rdma_type == QED_RDMA_TYPE_ROCE) |
50 | |
51 | #define DP_DEBUG(dev, module, fmt, ...) \ |
52 | pr_debug("(%s) " module ": " fmt, \ |
53 | DP_NAME(dev) ? DP_NAME(dev) : "", ## __VA_ARGS__) |
54 | |
55 | #define QEDR_MSG_INIT "INIT" |
56 | #define QEDR_MSG_MISC "MISC" |
57 | #define QEDR_MSG_CQ " CQ" |
58 | #define QEDR_MSG_MR " MR" |
59 | #define QEDR_MSG_RQ " RQ" |
60 | #define QEDR_MSG_SQ " SQ" |
61 | #define QEDR_MSG_QP " QP" |
62 | #define QEDR_MSG_SRQ " SRQ" |
63 | #define QEDR_MSG_GSI " GSI" |
64 | #define QEDR_MSG_IWARP " IW" |
65 | |
66 | #define QEDR_CQ_MAGIC_NUMBER (0x11223344) |
67 | |
68 | #define FW_PAGE_SIZE (RDMA_RING_PAGE_SIZE) |
69 | #define FW_PAGE_SHIFT (12) |
70 | |
71 | struct qedr_dev; |
72 | |
73 | struct qedr_cnq { |
74 | struct qedr_dev *dev; |
75 | struct qed_chain pbl; |
76 | struct qed_sb_info *sb; |
77 | char name[32]; |
78 | u64 n_comp; |
79 | __le16 *hw_cons_ptr; |
80 | u8 index; |
81 | }; |
82 | |
83 | #define QEDR_MAX_SGID 128 |
84 | |
85 | struct qedr_device_attr { |
86 | u32 vendor_id; |
87 | u32 vendor_part_id; |
88 | u32 hw_ver; |
89 | u64 fw_ver; |
90 | u64 node_guid; |
91 | u64 sys_image_guid; |
92 | u8 max_cnq; |
93 | u8 max_sge; |
94 | u16 max_inline; |
95 | u32 max_sqe; |
96 | u32 max_rqe; |
97 | u8 max_qp_resp_rd_atomic_resc; |
98 | u8 max_qp_req_rd_atomic_resc; |
99 | u64 max_dev_resp_rd_atomic_resc; |
100 | u32 max_cq; |
101 | u32 max_qp; |
102 | u32 max_mr; |
103 | u64 max_mr_size; |
104 | u32 max_cqe; |
105 | u32 max_mw; |
106 | u32 max_mr_mw_fmr_pbl; |
107 | u64 max_mr_mw_fmr_size; |
108 | u32 max_pd; |
109 | u32 max_ah; |
110 | u8 max_pkey; |
111 | u32 max_srq; |
112 | u32 max_srq_wr; |
113 | u8 max_srq_sge; |
114 | u8 max_stats_queues; |
115 | u32 dev_caps; |
116 | |
117 | u64 page_size_caps; |
118 | u8 dev_ack_delay; |
119 | u32 reserved_lkey; |
120 | u32 bad_pkey_counter; |
121 | struct qed_rdma_events events; |
122 | }; |
123 | |
124 | #define QEDR_ENET_STATE_BIT (0) |
125 | |
126 | struct qedr_dev { |
127 | struct ib_device ibdev; |
128 | struct qed_dev *cdev; |
129 | struct pci_dev *pdev; |
130 | struct net_device *ndev; |
131 | |
132 | enum ib_atomic_cap atomic_cap; |
133 | |
134 | void *rdma_ctx; |
135 | struct qedr_device_attr attr; |
136 | |
137 | const struct qed_rdma_ops *ops; |
138 | struct qed_int_info int_info; |
139 | |
140 | struct qed_sb_info *sb_array; |
141 | struct qedr_cnq *cnq_array; |
142 | int num_cnq; |
143 | int sb_start; |
144 | |
145 | void __iomem *db_addr; |
146 | u64 db_phys_addr; |
147 | u32 db_size; |
148 | u16 dpi; |
149 | |
150 | union ib_gid *sgid_tbl; |
151 | |
152 | /* Lock for sgid table */ |
153 | spinlock_t sgid_lock; |
154 | |
155 | u64 guid; |
156 | |
157 | u32 dp_module; |
158 | u8 dp_level; |
159 | u8 num_hwfns; |
160 | #define QEDR_IS_CMT(dev) ((dev)->num_hwfns > 1) |
161 | u8 affin_hwfn_idx; |
162 | u8 gsi_ll2_handle; |
163 | |
164 | uint wq_multiplier; |
165 | u8 gsi_ll2_mac_address[ETH_ALEN]; |
166 | int gsi_qp_created; |
167 | struct qedr_cq *gsi_sqcq; |
168 | struct qedr_cq *gsi_rqcq; |
169 | struct qedr_qp *gsi_qp; |
170 | enum qed_rdma_type rdma_type; |
171 | struct xarray qps; |
172 | struct xarray srqs; |
173 | struct workqueue_struct *iwarp_wq; |
174 | u16 iwarp_max_mtu; |
175 | |
176 | unsigned long enet_state; |
177 | |
178 | u8 user_dpm_enabled; |
179 | }; |
180 | |
181 | #define QEDR_MAX_SQ_PBL (0x8000) |
182 | #define QEDR_MAX_SQ_PBL_ENTRIES (0x10000 / sizeof(void *)) |
183 | #define QEDR_SQE_ELEMENT_SIZE (sizeof(struct rdma_sq_sge)) |
184 | #define QEDR_MAX_SQE_ELEMENTS_PER_SQE (ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE / \ |
185 | QEDR_SQE_ELEMENT_SIZE) |
186 | #define QEDR_MAX_SQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \ |
187 | QEDR_SQE_ELEMENT_SIZE) |
188 | #define QEDR_MAX_SQE ((QEDR_MAX_SQ_PBL_ENTRIES) *\ |
189 | (RDMA_RING_PAGE_SIZE) / \ |
190 | (QEDR_SQE_ELEMENT_SIZE) /\ |
191 | (QEDR_MAX_SQE_ELEMENTS_PER_SQE)) |
192 | /* RQ */ |
193 | #define QEDR_MAX_RQ_PBL (0x2000) |
194 | #define QEDR_MAX_RQ_PBL_ENTRIES (0x10000 / sizeof(void *)) |
195 | #define QEDR_RQE_ELEMENT_SIZE (sizeof(struct rdma_rq_sge)) |
196 | #define QEDR_MAX_RQE_ELEMENTS_PER_RQE (RDMA_MAX_SGE_PER_RQ_WQE) |
197 | #define QEDR_MAX_RQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \ |
198 | QEDR_RQE_ELEMENT_SIZE) |
199 | #define QEDR_MAX_RQE ((QEDR_MAX_RQ_PBL_ENTRIES) *\ |
200 | (RDMA_RING_PAGE_SIZE) / \ |
201 | (QEDR_RQE_ELEMENT_SIZE) /\ |
202 | (QEDR_MAX_RQE_ELEMENTS_PER_RQE)) |
203 | |
204 | #define QEDR_CQE_SIZE (sizeof(union rdma_cqe)) |
205 | #define QEDR_MAX_CQE_PBL_SIZE (512 * 1024) |
206 | #define QEDR_MAX_CQE_PBL_ENTRIES (((QEDR_MAX_CQE_PBL_SIZE) / \ |
207 | sizeof(u64)) - 1) |
208 | #define QEDR_MAX_CQES ((u32)((QEDR_MAX_CQE_PBL_ENTRIES) * \ |
209 | (QED_CHAIN_PAGE_SIZE) / QEDR_CQE_SIZE)) |
210 | |
211 | #define QEDR_ROCE_MAX_CNQ_SIZE (0x4000) |
212 | |
213 | #define QEDR_MAX_PORT (1) |
214 | #define QEDR_PORT (1) |
215 | |
216 | #define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) |
217 | |
218 | #define QEDR_ROCE_PKEY_MAX 1 |
219 | #define QEDR_ROCE_PKEY_TABLE_LEN 1 |
220 | #define QEDR_ROCE_PKEY_DEFAULT 0xffff |
221 | |
222 | struct qedr_pbl { |
223 | struct list_head list_entry; |
224 | void *va; |
225 | dma_addr_t pa; |
226 | }; |
227 | |
228 | struct qedr_ucontext { |
229 | struct ib_ucontext ibucontext; |
230 | struct qedr_dev *dev; |
231 | struct qedr_pd *pd; |
232 | void __iomem *dpi_addr; |
233 | struct rdma_user_mmap_entry *db_mmap_entry; |
234 | u64 dpi_phys_addr; |
235 | u32 dpi_size; |
236 | u16 dpi; |
237 | bool db_rec; |
238 | u8 edpm_mode; |
239 | }; |
240 | |
241 | union db_prod32 { |
242 | struct rdma_pwm_val16_data data; |
243 | u32 raw; |
244 | }; |
245 | |
246 | union db_prod64 { |
247 | struct rdma_pwm_val32_data data; |
248 | u64 raw; |
249 | }; |
250 | |
251 | enum qedr_cq_type { |
252 | QEDR_CQ_TYPE_GSI, |
253 | QEDR_CQ_TYPE_KERNEL, |
254 | QEDR_CQ_TYPE_USER, |
255 | }; |
256 | |
257 | struct qedr_pbl_info { |
258 | u32 num_pbls; |
259 | u32 num_pbes; |
260 | u32 pbl_size; |
261 | u32 pbe_size; |
262 | bool two_layered; |
263 | }; |
264 | |
265 | struct qedr_userq { |
266 | struct ib_umem *umem; |
267 | struct qedr_pbl_info pbl_info; |
268 | struct qedr_pbl *pbl_tbl; |
269 | u64 buf_addr; |
270 | size_t buf_len; |
271 | |
272 | /* doorbell recovery */ |
273 | void __iomem *db_addr; |
274 | struct qedr_user_db_rec *db_rec_data; |
275 | struct rdma_user_mmap_entry *db_mmap_entry; |
276 | void __iomem *db_rec_db2_addr; |
277 | union db_prod32 db_rec_db2_data; |
278 | }; |
279 | |
280 | struct qedr_cq { |
281 | struct ib_cq ibcq; |
282 | |
283 | enum qedr_cq_type cq_type; |
284 | u32 sig; |
285 | |
286 | u16 icid; |
287 | |
288 | /* Lock to protect multiplem CQ's */ |
289 | spinlock_t cq_lock; |
290 | u8 arm_flags; |
291 | struct qed_chain pbl; |
292 | |
293 | void __iomem *db_addr; |
294 | union db_prod64 db; |
295 | |
296 | u8 pbl_toggle; |
297 | union rdma_cqe *latest_cqe; |
298 | union rdma_cqe *toggle_cqe; |
299 | |
300 | u32 cq_cons; |
301 | |
302 | struct qedr_userq q; |
303 | u8 destroyed; |
304 | u16 cnq_notif; |
305 | }; |
306 | |
307 | struct qedr_pd { |
308 | struct ib_pd ibpd; |
309 | u32 pd_id; |
310 | struct qedr_ucontext *uctx; |
311 | }; |
312 | |
313 | struct qedr_xrcd { |
314 | struct ib_xrcd ibxrcd; |
315 | u16 xrcd_id; |
316 | }; |
317 | |
318 | struct qedr_qp_hwq_info { |
319 | /* WQE Elements */ |
320 | struct qed_chain pbl; |
321 | u64 p_phys_addr_tbl; |
322 | u32 max_sges; |
323 | |
324 | /* WQE */ |
325 | u16 prod; |
326 | u16 cons; |
327 | u16 wqe_cons; |
328 | u16 gsi_cons; |
329 | u16 max_wr; |
330 | |
331 | /* DB */ |
332 | void __iomem *db; |
333 | union db_prod32 db_data; |
334 | |
335 | void __iomem *iwarp_db2; |
336 | union db_prod32 iwarp_db2_data; |
337 | }; |
338 | |
339 | #define QEDR_INC_SW_IDX(p_info, index) \ |
340 | do { \ |
341 | p_info->index = (p_info->index + 1) & \ |
342 | qed_chain_get_capacity(p_info->pbl) \ |
343 | } while (0) |
344 | |
345 | struct qedr_srq_hwq_info { |
346 | u32 max_sges; |
347 | u32 max_wr; |
348 | struct qed_chain pbl; |
349 | u64 p_phys_addr_tbl; |
350 | u32 wqe_prod; |
351 | u32 sge_prod; |
352 | u32 wr_prod_cnt; |
353 | atomic_t wr_cons_cnt; |
354 | u32 num_elems; |
355 | |
356 | struct rdma_srq_producers *virt_prod_pair_addr; |
357 | dma_addr_t phy_prod_pair_addr; |
358 | }; |
359 | |
360 | struct qedr_srq { |
361 | struct ib_srq ibsrq; |
362 | struct qedr_dev *dev; |
363 | |
364 | struct qedr_userq usrq; |
365 | struct qedr_srq_hwq_info hw_srq; |
366 | struct ib_umem *prod_umem; |
367 | u16 srq_id; |
368 | u32 srq_limit; |
369 | bool is_xrc; |
370 | /* lock to protect srq recv post */ |
371 | spinlock_t lock; |
372 | }; |
373 | |
374 | enum qedr_qp_err_bitmap { |
375 | QEDR_QP_ERR_SQ_FULL = 1, |
376 | QEDR_QP_ERR_RQ_FULL = 2, |
377 | QEDR_QP_ERR_BAD_SR = 4, |
378 | QEDR_QP_ERR_BAD_RR = 8, |
379 | QEDR_QP_ERR_SQ_PBL_FULL = 16, |
380 | QEDR_QP_ERR_RQ_PBL_FULL = 32, |
381 | }; |
382 | |
383 | enum qedr_qp_create_type { |
384 | QEDR_QP_CREATE_NONE, |
385 | QEDR_QP_CREATE_USER, |
386 | QEDR_QP_CREATE_KERNEL, |
387 | }; |
388 | |
389 | enum qedr_iwarp_cm_flags { |
390 | QEDR_IWARP_CM_WAIT_FOR_CONNECT = BIT(0), |
391 | QEDR_IWARP_CM_WAIT_FOR_DISCONNECT = BIT(1), |
392 | }; |
393 | |
394 | struct qedr_qp { |
395 | struct ib_qp ibqp; /* must be first */ |
396 | struct qedr_dev *dev; |
397 | struct qedr_qp_hwq_info sq; |
398 | struct qedr_qp_hwq_info rq; |
399 | |
400 | u32 max_inline_data; |
401 | |
402 | /* Lock for QP's */ |
403 | spinlock_t q_lock; |
404 | struct qedr_cq *sq_cq; |
405 | struct qedr_cq *rq_cq; |
406 | struct qedr_srq *srq; |
407 | enum qed_roce_qp_state state; |
408 | u32 id; |
409 | struct qedr_pd *pd; |
410 | enum ib_qp_type qp_type; |
411 | enum qedr_qp_create_type create_type; |
412 | struct qed_rdma_qp *qed_qp; |
413 | u32 qp_id; |
414 | u16 icid; |
415 | u16 mtu; |
416 | int sgid_idx; |
417 | u32 rq_psn; |
418 | u32 sq_psn; |
419 | u32 qkey; |
420 | u32 dest_qp_num; |
421 | u8 timeout; |
422 | |
423 | /* Relevant to qps created from kernel space only (ULPs) */ |
424 | u8 prev_wqe_size; |
425 | u16 wqe_cons; |
426 | u32 err_bitmap; |
427 | bool signaled; |
428 | |
429 | /* SQ shadow */ |
430 | struct { |
431 | u64 wr_id; |
432 | enum ib_wc_opcode opcode; |
433 | u32 bytes_len; |
434 | u8 wqe_size; |
435 | bool signaled; |
436 | dma_addr_t icrc_mapping; |
437 | u32 *icrc; |
438 | struct qedr_mr *mr; |
439 | } *wqe_wr_id; |
440 | |
441 | /* RQ shadow */ |
442 | struct { |
443 | u64 wr_id; |
444 | struct ib_sge sg_list[RDMA_MAX_SGE_PER_RQ_WQE]; |
445 | u8 wqe_size; |
446 | |
447 | u8 smac[ETH_ALEN]; |
448 | u16 vlan; |
449 | int rc; |
450 | } *rqe_wr_id; |
451 | |
452 | /* Relevant to qps created from user space only (applications) */ |
453 | struct qedr_userq usq; |
454 | struct qedr_userq urq; |
455 | |
456 | /* synchronization objects used with iwarp ep */ |
457 | struct kref refcnt; |
458 | struct completion iwarp_cm_comp; |
459 | struct completion qp_rel_comp; |
460 | unsigned long iwarp_cm_flags; /* enum iwarp_cm_flags */ |
461 | }; |
462 | |
463 | struct qedr_ah { |
464 | struct ib_ah ibah; |
465 | struct rdma_ah_attr attr; |
466 | }; |
467 | |
468 | enum qedr_mr_type { |
469 | QEDR_MR_USER, |
470 | QEDR_MR_KERNEL, |
471 | QEDR_MR_DMA, |
472 | QEDR_MR_FRMR, |
473 | }; |
474 | |
475 | struct mr_info { |
476 | struct qedr_pbl *pbl_table; |
477 | struct qedr_pbl_info pbl_info; |
478 | struct list_head free_pbl_list; |
479 | struct list_head inuse_pbl_list; |
480 | u32 completed; |
481 | u32 completed_handled; |
482 | }; |
483 | |
484 | struct qedr_mr { |
485 | struct ib_mr ibmr; |
486 | struct ib_umem *umem; |
487 | |
488 | struct qed_rdma_register_tid_in_params hw_mr; |
489 | enum qedr_mr_type type; |
490 | |
491 | struct qedr_dev *dev; |
492 | struct mr_info info; |
493 | |
494 | u64 *pages; |
495 | u32 npages; |
496 | }; |
497 | |
498 | struct qedr_user_mmap_entry { |
499 | struct rdma_user_mmap_entry rdma_entry; |
500 | struct qedr_dev *dev; |
501 | union { |
502 | u64 io_address; |
503 | void *address; |
504 | }; |
505 | size_t length; |
506 | u16 dpi; |
507 | u8 mmap_flag; |
508 | }; |
509 | |
510 | #define SET_FIELD2(value, name, flag) ((value) |= ((flag) << (name ## _SHIFT))) |
511 | |
512 | #define QEDR_RESP_IMM (RDMA_CQE_RESPONDER_IMM_FLG_MASK << \ |
513 | RDMA_CQE_RESPONDER_IMM_FLG_SHIFT) |
514 | #define QEDR_RESP_RDMA (RDMA_CQE_RESPONDER_RDMA_FLG_MASK << \ |
515 | RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT) |
516 | #define QEDR_RESP_INV (RDMA_CQE_RESPONDER_INV_FLG_MASK << \ |
517 | RDMA_CQE_RESPONDER_INV_FLG_SHIFT) |
518 | |
519 | static inline void qedr_inc_sw_cons(struct qedr_qp_hwq_info *info) |
520 | { |
521 | info->cons = (info->cons + 1) % info->max_wr; |
522 | info->wqe_cons++; |
523 | } |
524 | |
525 | static inline void qedr_inc_sw_prod(struct qedr_qp_hwq_info *info) |
526 | { |
527 | info->prod = (info->prod + 1) % info->max_wr; |
528 | } |
529 | |
530 | static inline int qedr_get_dmac(struct qedr_dev *dev, |
531 | struct rdma_ah_attr *ah_attr, u8 *mac_addr) |
532 | { |
533 | union ib_gid zero_sgid = { { 0 } }; |
534 | struct in6_addr in6; |
535 | const struct ib_global_route *grh = rdma_ah_read_grh(attr: ah_attr); |
536 | u8 *dmac; |
537 | |
538 | if (!memcmp(p: &grh->dgid, q: &zero_sgid, size: sizeof(union ib_gid))) { |
539 | DP_ERR(dev, "Local port GID not supported\n" ); |
540 | eth_zero_addr(addr: mac_addr); |
541 | return -EINVAL; |
542 | } |
543 | |
544 | memcpy(&in6, grh->dgid.raw, sizeof(in6)); |
545 | dmac = rdma_ah_retrieve_dmac(attr: ah_attr); |
546 | if (!dmac) |
547 | return -EINVAL; |
548 | ether_addr_copy(dst: mac_addr, src: dmac); |
549 | |
550 | return 0; |
551 | } |
552 | |
553 | struct qedr_iw_listener { |
554 | struct qedr_dev *dev; |
555 | struct iw_cm_id *cm_id; |
556 | int backlog; |
557 | void *qed_handle; |
558 | }; |
559 | |
560 | struct qedr_iw_ep { |
561 | struct qedr_dev *dev; |
562 | struct iw_cm_id *cm_id; |
563 | struct qedr_qp *qp; |
564 | void *qed_context; |
565 | struct kref refcnt; |
566 | }; |
567 | |
568 | static inline |
569 | struct qedr_ucontext *get_qedr_ucontext(struct ib_ucontext *ibucontext) |
570 | { |
571 | return container_of(ibucontext, struct qedr_ucontext, ibucontext); |
572 | } |
573 | |
574 | static inline struct qedr_dev *get_qedr_dev(struct ib_device *ibdev) |
575 | { |
576 | return container_of(ibdev, struct qedr_dev, ibdev); |
577 | } |
578 | |
579 | static inline struct qedr_pd *get_qedr_pd(struct ib_pd *ibpd) |
580 | { |
581 | return container_of(ibpd, struct qedr_pd, ibpd); |
582 | } |
583 | |
584 | static inline struct qedr_xrcd *get_qedr_xrcd(struct ib_xrcd *ibxrcd) |
585 | { |
586 | return container_of(ibxrcd, struct qedr_xrcd, ibxrcd); |
587 | } |
588 | |
589 | static inline struct qedr_cq *get_qedr_cq(struct ib_cq *ibcq) |
590 | { |
591 | return container_of(ibcq, struct qedr_cq, ibcq); |
592 | } |
593 | |
594 | static inline struct qedr_qp *get_qedr_qp(struct ib_qp *ibqp) |
595 | { |
596 | return container_of(ibqp, struct qedr_qp, ibqp); |
597 | } |
598 | |
599 | static inline struct qedr_ah *get_qedr_ah(struct ib_ah *ibah) |
600 | { |
601 | return container_of(ibah, struct qedr_ah, ibah); |
602 | } |
603 | |
604 | static inline struct qedr_mr *get_qedr_mr(struct ib_mr *ibmr) |
605 | { |
606 | return container_of(ibmr, struct qedr_mr, ibmr); |
607 | } |
608 | |
609 | static inline struct qedr_srq *get_qedr_srq(struct ib_srq *ibsrq) |
610 | { |
611 | return container_of(ibsrq, struct qedr_srq, ibsrq); |
612 | } |
613 | |
614 | static inline bool qedr_qp_has_srq(struct qedr_qp *qp) |
615 | { |
616 | return qp->srq; |
617 | } |
618 | |
619 | static inline bool qedr_qp_has_sq(struct qedr_qp *qp) |
620 | { |
621 | if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_XRC_TGT) |
622 | return false; |
623 | |
624 | return true; |
625 | } |
626 | |
627 | static inline bool qedr_qp_has_rq(struct qedr_qp *qp) |
628 | { |
629 | if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_XRC_INI || |
630 | qp->qp_type == IB_QPT_XRC_TGT || qedr_qp_has_srq(qp)) |
631 | return false; |
632 | |
633 | return true; |
634 | } |
635 | |
636 | static inline struct qedr_user_mmap_entry * |
637 | get_qedr_mmap_entry(struct rdma_user_mmap_entry *rdma_entry) |
638 | { |
639 | return container_of(rdma_entry, struct qedr_user_mmap_entry, |
640 | rdma_entry); |
641 | } |
642 | #endif |
643 | |