1 | /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ |
2 | /* QLogic qed NIC Driver |
3 | * Copyright (c) 2015-2017 QLogic Corporation |
4 | * Copyright (c) 2019-2020 Marvell International Ltd. |
5 | */ |
6 | |
7 | #ifndef _QED_RDMA_IF_H |
8 | #define _QED_RDMA_IF_H |
9 | #include <linux/types.h> |
10 | #include <linux/delay.h> |
11 | #include <linux/list.h> |
12 | #include <linux/slab.h> |
13 | #include <linux/qed/qed_if.h> |
14 | #include <linux/qed/qed_ll2_if.h> |
15 | #include <linux/qed/rdma_common.h> |
16 | |
17 | #define QED_RDMA_MAX_CNQ_SIZE (0xFFFF) |
18 | |
19 | /* rdma interface */ |
20 | |
21 | enum qed_roce_qp_state { |
22 | QED_ROCE_QP_STATE_RESET, |
23 | QED_ROCE_QP_STATE_INIT, |
24 | QED_ROCE_QP_STATE_RTR, |
25 | QED_ROCE_QP_STATE_RTS, |
26 | QED_ROCE_QP_STATE_SQD, |
27 | QED_ROCE_QP_STATE_ERR, |
28 | QED_ROCE_QP_STATE_SQE |
29 | }; |
30 | |
31 | enum qed_rdma_qp_type { |
32 | QED_RDMA_QP_TYPE_RC, |
33 | QED_RDMA_QP_TYPE_XRC_INI, |
34 | QED_RDMA_QP_TYPE_XRC_TGT, |
35 | QED_RDMA_QP_TYPE_INVAL = 0xffff, |
36 | }; |
37 | |
38 | enum qed_rdma_tid_type { |
39 | QED_RDMA_TID_REGISTERED_MR, |
40 | QED_RDMA_TID_FMR, |
41 | QED_RDMA_TID_MW |
42 | }; |
43 | |
44 | struct qed_rdma_events { |
45 | void *context; |
46 | void (*affiliated_event)(void *context, u8 fw_event_code, |
47 | void *fw_handle); |
48 | void (*unaffiliated_event)(void *context, u8 event_code); |
49 | }; |
50 | |
51 | struct qed_rdma_device { |
52 | u32 vendor_id; |
53 | u32 vendor_part_id; |
54 | u32 hw_ver; |
55 | u64 fw_ver; |
56 | |
57 | u64 node_guid; |
58 | u64 sys_image_guid; |
59 | |
60 | u8 max_cnq; |
61 | u8 max_sge; |
62 | u8 max_srq_sge; |
63 | u16 max_inline; |
64 | u32 max_wqe; |
65 | u32 max_srq_wqe; |
66 | u8 max_qp_resp_rd_atomic_resc; |
67 | u8 max_qp_req_rd_atomic_resc; |
68 | u64 max_dev_resp_rd_atomic_resc; |
69 | u32 max_cq; |
70 | u32 max_qp; |
71 | u32 max_srq; |
72 | u32 max_mr; |
73 | u64 max_mr_size; |
74 | u32 max_cqe; |
75 | u32 max_mw; |
76 | u32 max_mr_mw_fmr_pbl; |
77 | u64 max_mr_mw_fmr_size; |
78 | u32 max_pd; |
79 | u32 max_ah; |
80 | u8 max_pkey; |
81 | u16 max_srq_wr; |
82 | u8 max_stats_queues; |
83 | u32 dev_caps; |
84 | |
85 | /* Abilty to support RNR-NAK generation */ |
86 | |
87 | #define QED_RDMA_DEV_CAP_RNR_NAK_MASK 0x1 |
88 | #define QED_RDMA_DEV_CAP_RNR_NAK_SHIFT 0 |
89 | /* Abilty to support shutdown port */ |
90 | #define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK 0x1 |
91 | #define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT 1 |
92 | /* Abilty to support port active event */ |
93 | #define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK 0x1 |
94 | #define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT 2 |
95 | /* Abilty to support port change event */ |
96 | #define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK 0x1 |
97 | #define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT 3 |
98 | /* Abilty to support system image GUID */ |
99 | #define QED_RDMA_DEV_CAP_SYS_IMAGE_MASK 0x1 |
100 | #define QED_RDMA_DEV_CAP_SYS_IMAGE_SHIFT 4 |
101 | /* Abilty to support bad P_Key counter support */ |
102 | #define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK 0x1 |
103 | #define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT 5 |
104 | /* Abilty to support atomic operations */ |
105 | #define QED_RDMA_DEV_CAP_ATOMIC_OP_MASK 0x1 |
106 | #define QED_RDMA_DEV_CAP_ATOMIC_OP_SHIFT 6 |
107 | #define QED_RDMA_DEV_CAP_RESIZE_CQ_MASK 0x1 |
108 | #define QED_RDMA_DEV_CAP_RESIZE_CQ_SHIFT 7 |
109 | /* Abilty to support modifying the maximum number of |
110 | * outstanding work requests per QP |
111 | */ |
112 | #define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK 0x1 |
113 | #define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT 8 |
114 | /* Abilty to support automatic path migration */ |
115 | #define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK 0x1 |
116 | #define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT 9 |
117 | /* Abilty to support the base memory management extensions */ |
118 | #define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK 0x1 |
119 | #define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT 10 |
120 | #define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK 0x1 |
121 | #define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT 11 |
122 | /* Abilty to support multipile page sizes per memory region */ |
123 | #define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK 0x1 |
124 | #define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT 12 |
125 | /* Abilty to support block list physical buffer list */ |
126 | #define QED_RDMA_DEV_CAP_BLOCK_MODE_MASK 0x1 |
127 | #define QED_RDMA_DEV_CAP_BLOCK_MODE_SHIFT 13 |
128 | /* Abilty to support zero based virtual addresses */ |
129 | #define QED_RDMA_DEV_CAP_ZBVA_MASK 0x1 |
130 | #define QED_RDMA_DEV_CAP_ZBVA_SHIFT 14 |
131 | /* Abilty to support local invalidate fencing */ |
132 | #define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK 0x1 |
133 | #define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT 15 |
134 | /* Abilty to support Loopback on QP */ |
135 | #define QED_RDMA_DEV_CAP_LB_INDICATOR_MASK 0x1 |
136 | #define QED_RDMA_DEV_CAP_LB_INDICATOR_SHIFT 16 |
137 | u64 page_size_caps; |
138 | u8 dev_ack_delay; |
139 | u32 reserved_lkey; |
140 | u32 bad_pkey_counter; |
141 | struct qed_rdma_events events; |
142 | }; |
143 | |
144 | enum qed_port_state { |
145 | QED_RDMA_PORT_UP, |
146 | QED_RDMA_PORT_DOWN, |
147 | }; |
148 | |
149 | enum qed_roce_capability { |
150 | QED_ROCE_V1 = 1 << 0, |
151 | QED_ROCE_V2 = 1 << 1, |
152 | }; |
153 | |
154 | struct qed_rdma_port { |
155 | enum qed_port_state port_state; |
156 | int link_speed; |
157 | u64 max_msg_size; |
158 | u8 source_gid_table_len; |
159 | void *source_gid_table_ptr; |
160 | u8 pkey_table_len; |
161 | void *pkey_table_ptr; |
162 | u32 pkey_bad_counter; |
163 | enum qed_roce_capability capability; |
164 | }; |
165 | |
166 | struct qed_rdma_cnq_params { |
167 | u8 num_pbl_pages; |
168 | u64 pbl_ptr; |
169 | }; |
170 | |
171 | /* The CQ Mode affects the CQ doorbell transaction size. |
172 | * 64/32 bit machines should configure to 32/16 bits respectively. |
173 | */ |
174 | enum qed_rdma_cq_mode { |
175 | QED_RDMA_CQ_MODE_16_BITS, |
176 | QED_RDMA_CQ_MODE_32_BITS, |
177 | }; |
178 | |
179 | struct qed_roce_dcqcn_params { |
180 | u8 notification_point; |
181 | u8 reaction_point; |
182 | |
183 | /* fields for notification point */ |
184 | u32 cnp_send_timeout; |
185 | |
186 | /* fields for reaction point */ |
187 | u32 rl_bc_rate; |
188 | u16 rl_max_rate; |
189 | u16 rl_r_ai; |
190 | u16 rl_r_hai; |
191 | u16 dcqcn_g; |
192 | u32 dcqcn_k_us; |
193 | u32 dcqcn_timeout_us; |
194 | }; |
195 | |
196 | struct qed_rdma_start_in_params { |
197 | struct qed_rdma_events *events; |
198 | struct qed_rdma_cnq_params cnq_pbl_list[128]; |
199 | u8 desired_cnq; |
200 | enum qed_rdma_cq_mode cq_mode; |
201 | struct qed_roce_dcqcn_params dcqcn_params; |
202 | u16 max_mtu; |
203 | u8 mac_addr[ETH_ALEN]; |
204 | u8 iwarp_flags; |
205 | }; |
206 | |
207 | struct qed_rdma_add_user_out_params { |
208 | u16 dpi; |
209 | void __iomem *dpi_addr; |
210 | u64 dpi_phys_addr; |
211 | u32 dpi_size; |
212 | u16 wid_count; |
213 | }; |
214 | |
215 | enum roce_mode { |
216 | ROCE_V1, |
217 | ROCE_V2_IPV4, |
218 | ROCE_V2_IPV6, |
219 | MAX_ROCE_MODE |
220 | }; |
221 | |
222 | union qed_gid { |
223 | u8 bytes[16]; |
224 | u16 words[8]; |
225 | u32 dwords[4]; |
226 | u64 qwords[2]; |
227 | u32 ipv4_addr; |
228 | }; |
229 | |
230 | struct qed_rdma_register_tid_in_params { |
231 | u32 itid; |
232 | enum qed_rdma_tid_type tid_type; |
233 | u8 key; |
234 | u16 pd; |
235 | bool local_read; |
236 | bool local_write; |
237 | bool remote_read; |
238 | bool remote_write; |
239 | bool remote_atomic; |
240 | bool mw_bind; |
241 | u64 pbl_ptr; |
242 | bool pbl_two_level; |
243 | u8 pbl_page_size_log; |
244 | u8 page_size_log; |
245 | u64 length; |
246 | u64 vaddr; |
247 | bool phy_mr; |
248 | bool dma_mr; |
249 | |
250 | bool dif_enabled; |
251 | u64 dif_error_addr; |
252 | }; |
253 | |
254 | struct qed_rdma_create_cq_in_params { |
255 | u32 cq_handle_lo; |
256 | u32 cq_handle_hi; |
257 | u32 cq_size; |
258 | u16 dpi; |
259 | bool pbl_two_level; |
260 | u64 pbl_ptr; |
261 | u16 pbl_num_pages; |
262 | u8 pbl_page_size_log; |
263 | u8 cnq_id; |
264 | u16 int_timeout; |
265 | }; |
266 | |
267 | struct qed_rdma_create_srq_in_params { |
268 | u64 pbl_base_addr; |
269 | u64 prod_pair_addr; |
270 | u16 num_pages; |
271 | u16 pd_id; |
272 | u16 page_size; |
273 | |
274 | /* XRC related only */ |
275 | bool reserved_key_en; |
276 | bool is_xrc; |
277 | u32 cq_cid; |
278 | u16 xrcd_id; |
279 | }; |
280 | |
281 | struct qed_rdma_destroy_cq_in_params { |
282 | u16 icid; |
283 | }; |
284 | |
285 | struct qed_rdma_destroy_cq_out_params { |
286 | u16 num_cq_notif; |
287 | }; |
288 | |
289 | struct qed_rdma_create_qp_in_params { |
290 | u32 qp_handle_lo; |
291 | u32 qp_handle_hi; |
292 | u32 qp_handle_async_lo; |
293 | u32 qp_handle_async_hi; |
294 | bool use_srq; |
295 | bool signal_all; |
296 | bool fmr_and_reserved_lkey; |
297 | u16 pd; |
298 | u16 dpi; |
299 | u16 sq_cq_id; |
300 | u16 sq_num_pages; |
301 | u64 sq_pbl_ptr; |
302 | u8 max_sq_sges; |
303 | u16 rq_cq_id; |
304 | u16 rq_num_pages; |
305 | u64 rq_pbl_ptr; |
306 | u16 srq_id; |
307 | u16 xrcd_id; |
308 | u8 stats_queue; |
309 | enum qed_rdma_qp_type qp_type; |
310 | u8 flags; |
311 | #define QED_ROCE_EDPM_MODE_MASK 0x1 |
312 | #define QED_ROCE_EDPM_MODE_SHIFT 0 |
313 | }; |
314 | |
315 | struct qed_rdma_create_qp_out_params { |
316 | u32 qp_id; |
317 | u16 icid; |
318 | void *rq_pbl_virt; |
319 | dma_addr_t rq_pbl_phys; |
320 | void *sq_pbl_virt; |
321 | dma_addr_t sq_pbl_phys; |
322 | }; |
323 | |
324 | struct qed_rdma_modify_qp_in_params { |
325 | u32 modify_flags; |
326 | #define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK 0x1 |
327 | #define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT 0 |
328 | #define QED_ROCE_MODIFY_QP_VALID_PKEY_MASK 0x1 |
329 | #define QED_ROCE_MODIFY_QP_VALID_PKEY_SHIFT 1 |
330 | #define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK 0x1 |
331 | #define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT 2 |
332 | #define QED_ROCE_MODIFY_QP_VALID_DEST_QP_MASK 0x1 |
333 | #define QED_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT 3 |
334 | #define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK 0x1 |
335 | #define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT 4 |
336 | #define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK 0x1 |
337 | #define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT 5 |
338 | #define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK 0x1 |
339 | #define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT 6 |
340 | #define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK 0x1 |
341 | #define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT 7 |
342 | #define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK 0x1 |
343 | #define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT 8 |
344 | #define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK 0x1 |
345 | #define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT 9 |
346 | #define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK 0x1 |
347 | #define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT 10 |
348 | #define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK 0x1 |
349 | #define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT 11 |
350 | #define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK 0x1 |
351 | #define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT 12 |
352 | #define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK 0x1 |
353 | #define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT 13 |
354 | #define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK 0x1 |
355 | #define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT 14 |
356 | |
357 | enum qed_roce_qp_state new_state; |
358 | u16 pkey; |
359 | bool incoming_rdma_read_en; |
360 | bool incoming_rdma_write_en; |
361 | bool incoming_atomic_en; |
362 | bool e2e_flow_control_en; |
363 | u32 dest_qp; |
364 | bool lb_indication; |
365 | u16 mtu; |
366 | u8 traffic_class_tos; |
367 | u8 hop_limit_ttl; |
368 | u32 flow_label; |
369 | union qed_gid sgid; |
370 | union qed_gid dgid; |
371 | u16 udp_src_port; |
372 | |
373 | u16 vlan_id; |
374 | |
375 | u32 rq_psn; |
376 | u32 sq_psn; |
377 | u8 max_rd_atomic_resp; |
378 | u8 max_rd_atomic_req; |
379 | u32 ack_timeout; |
380 | u8 retry_cnt; |
381 | u8 rnr_retry_cnt; |
382 | u8 min_rnr_nak_timer; |
383 | bool sqd_async; |
384 | u8 remote_mac_addr[6]; |
385 | u8 local_mac_addr[6]; |
386 | bool use_local_mac; |
387 | enum roce_mode roce_mode; |
388 | }; |
389 | |
390 | struct qed_rdma_query_qp_out_params { |
391 | enum qed_roce_qp_state state; |
392 | u32 rq_psn; |
393 | u32 sq_psn; |
394 | bool draining; |
395 | u16 mtu; |
396 | u32 dest_qp; |
397 | bool incoming_rdma_read_en; |
398 | bool incoming_rdma_write_en; |
399 | bool incoming_atomic_en; |
400 | bool e2e_flow_control_en; |
401 | union qed_gid sgid; |
402 | union qed_gid dgid; |
403 | u32 flow_label; |
404 | u8 hop_limit_ttl; |
405 | u8 traffic_class_tos; |
406 | u32 timeout; |
407 | u8 rnr_retry; |
408 | u8 retry_cnt; |
409 | u8 min_rnr_nak_timer; |
410 | u16 pkey_index; |
411 | u8 max_rd_atomic; |
412 | u8 max_dest_rd_atomic; |
413 | bool sqd_async; |
414 | }; |
415 | |
416 | struct qed_rdma_create_srq_out_params { |
417 | u16 srq_id; |
418 | }; |
419 | |
420 | struct qed_rdma_destroy_srq_in_params { |
421 | u16 srq_id; |
422 | bool is_xrc; |
423 | }; |
424 | |
425 | struct qed_rdma_modify_srq_in_params { |
426 | u32 wqe_limit; |
427 | u16 srq_id; |
428 | bool is_xrc; |
429 | }; |
430 | |
431 | struct qed_rdma_stats_out_params { |
432 | u64 sent_bytes; |
433 | u64 sent_pkts; |
434 | u64 rcv_bytes; |
435 | u64 rcv_pkts; |
436 | }; |
437 | |
438 | struct qed_rdma_counters_out_params { |
439 | u64 pd_count; |
440 | u64 max_pd; |
441 | u64 dpi_count; |
442 | u64 max_dpi; |
443 | u64 cq_count; |
444 | u64 max_cq; |
445 | u64 qp_count; |
446 | u64 max_qp; |
447 | u64 tid_count; |
448 | u64 max_tid; |
449 | }; |
450 | |
451 | #define QED_ROCE_TX_HEAD_FAILURE (1) |
452 | #define QED_ROCE_TX_FRAG_FAILURE (2) |
453 | |
454 | enum qed_iwarp_event_type { |
455 | QED_IWARP_EVENT_MPA_REQUEST, /* Passive side request received */ |
456 | QED_IWARP_EVENT_PASSIVE_COMPLETE, /* ack on mpa response */ |
457 | QED_IWARP_EVENT_ACTIVE_COMPLETE, /* Active side reply received */ |
458 | QED_IWARP_EVENT_DISCONNECT, |
459 | QED_IWARP_EVENT_CLOSE, |
460 | QED_IWARP_EVENT_IRQ_FULL, |
461 | QED_IWARP_EVENT_RQ_EMPTY, |
462 | QED_IWARP_EVENT_LLP_TIMEOUT, |
463 | QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR, |
464 | QED_IWARP_EVENT_CQ_OVERFLOW, |
465 | QED_IWARP_EVENT_QP_CATASTROPHIC, |
466 | QED_IWARP_EVENT_ACTIVE_MPA_REPLY, |
467 | QED_IWARP_EVENT_LOCAL_ACCESS_ERROR, |
468 | QED_IWARP_EVENT_REMOTE_OPERATION_ERROR, |
469 | QED_IWARP_EVENT_TERMINATE_RECEIVED, |
470 | QED_IWARP_EVENT_SRQ_LIMIT, |
471 | QED_IWARP_EVENT_SRQ_EMPTY, |
472 | }; |
473 | |
474 | enum qed_tcp_ip_version { |
475 | QED_TCP_IPV4, |
476 | QED_TCP_IPV6, |
477 | }; |
478 | |
479 | struct qed_iwarp_cm_info { |
480 | enum qed_tcp_ip_version ip_version; |
481 | u32 remote_ip[4]; |
482 | u32 local_ip[4]; |
483 | u16 remote_port; |
484 | u16 local_port; |
485 | u16 vlan; |
486 | u8 ord; |
487 | u8 ird; |
488 | u16 private_data_len; |
489 | const void *private_data; |
490 | }; |
491 | |
492 | struct qed_iwarp_cm_event_params { |
493 | enum qed_iwarp_event_type event; |
494 | const struct qed_iwarp_cm_info *cm_info; |
495 | void *ep_context; /* To be passed to accept call */ |
496 | int status; |
497 | }; |
498 | |
499 | typedef int (*iwarp_event_handler) (void *context, |
500 | struct qed_iwarp_cm_event_params *event); |
501 | |
502 | struct qed_iwarp_connect_in { |
503 | iwarp_event_handler event_cb; |
504 | void *cb_context; |
505 | struct qed_rdma_qp *qp; |
506 | struct qed_iwarp_cm_info cm_info; |
507 | u16 mss; |
508 | u8 remote_mac_addr[ETH_ALEN]; |
509 | u8 local_mac_addr[ETH_ALEN]; |
510 | }; |
511 | |
512 | struct qed_iwarp_connect_out { |
513 | void *ep_context; |
514 | }; |
515 | |
516 | struct qed_iwarp_listen_in { |
517 | iwarp_event_handler event_cb; |
518 | void *cb_context; /* passed to event_cb */ |
519 | u32 max_backlog; |
520 | enum qed_tcp_ip_version ip_version; |
521 | u32 ip_addr[4]; |
522 | u16 port; |
523 | u16 vlan; |
524 | }; |
525 | |
526 | struct qed_iwarp_listen_out { |
527 | void *handle; |
528 | }; |
529 | |
530 | struct qed_iwarp_accept_in { |
531 | void *ep_context; |
532 | void *cb_context; |
533 | struct qed_rdma_qp *qp; |
534 | const void *private_data; |
535 | u16 private_data_len; |
536 | u8 ord; |
537 | u8 ird; |
538 | }; |
539 | |
540 | struct qed_iwarp_reject_in { |
541 | void *ep_context; |
542 | void *cb_context; |
543 | const void *private_data; |
544 | u16 private_data_len; |
545 | }; |
546 | |
547 | struct qed_iwarp_send_rtr_in { |
548 | void *ep_context; |
549 | }; |
550 | |
551 | struct { |
552 | void *; |
553 | dma_addr_t ; |
554 | size_t ; |
555 | }; |
556 | |
557 | struct qed_roce_ll2_buffer { |
558 | dma_addr_t baddr; |
559 | size_t len; |
560 | }; |
561 | |
562 | struct qed_roce_ll2_packet { |
563 | struct qed_roce_ll2_header ; |
564 | int n_seg; |
565 | struct qed_roce_ll2_buffer payload[RDMA_MAX_SGE_PER_SQ_WQE]; |
566 | int roce_mode; |
567 | enum qed_ll2_tx_dest tx_dest; |
568 | }; |
569 | |
570 | enum qed_rdma_type { |
571 | QED_RDMA_TYPE_ROCE, |
572 | QED_RDMA_TYPE_IWARP |
573 | }; |
574 | |
575 | struct qed_dev_rdma_info { |
576 | struct qed_dev_info common; |
577 | enum qed_rdma_type rdma_type; |
578 | u8 user_dpm_enabled; |
579 | }; |
580 | |
581 | struct qed_rdma_ops { |
582 | const struct qed_common_ops *common; |
583 | |
584 | int (*fill_dev_info)(struct qed_dev *cdev, |
585 | struct qed_dev_rdma_info *info); |
586 | void *(*rdma_get_rdma_ctx)(struct qed_dev *cdev); |
587 | |
588 | int (*rdma_init)(struct qed_dev *dev, |
589 | struct qed_rdma_start_in_params *iparams); |
590 | |
591 | int (*rdma_add_user)(void *rdma_cxt, |
592 | struct qed_rdma_add_user_out_params *oparams); |
593 | |
594 | void (*rdma_remove_user)(void *rdma_cxt, u16 dpi); |
595 | int (*rdma_stop)(void *rdma_cxt); |
596 | struct qed_rdma_device* (*rdma_query_device)(void *rdma_cxt); |
597 | struct qed_rdma_port* (*rdma_query_port)(void *rdma_cxt); |
598 | int (*rdma_get_start_sb)(struct qed_dev *cdev); |
599 | int (*rdma_get_min_cnq_msix)(struct qed_dev *cdev); |
600 | void (*rdma_cnq_prod_update)(void *rdma_cxt, u8 cnq_index, u16 prod); |
601 | int (*rdma_get_rdma_int)(struct qed_dev *cdev, |
602 | struct qed_int_info *info); |
603 | int (*rdma_set_rdma_int)(struct qed_dev *cdev, u16 cnt); |
604 | int (*rdma_alloc_pd)(void *rdma_cxt, u16 *pd); |
605 | void (*rdma_dealloc_pd)(void *rdma_cxt, u16 pd); |
606 | int (*rdma_alloc_xrcd)(void *rdma_cxt, u16 *xrcd); |
607 | void (*rdma_dealloc_xrcd)(void *rdma_cxt, u16 xrcd); |
608 | int (*rdma_create_cq)(void *rdma_cxt, |
609 | struct qed_rdma_create_cq_in_params *params, |
610 | u16 *icid); |
611 | int (*rdma_destroy_cq)(void *rdma_cxt, |
612 | struct qed_rdma_destroy_cq_in_params *iparams, |
613 | struct qed_rdma_destroy_cq_out_params *oparams); |
614 | struct qed_rdma_qp * |
615 | (*rdma_create_qp)(void *rdma_cxt, |
616 | struct qed_rdma_create_qp_in_params *iparams, |
617 | struct qed_rdma_create_qp_out_params *oparams); |
618 | |
619 | int (*rdma_modify_qp)(void *roce_cxt, struct qed_rdma_qp *qp, |
620 | struct qed_rdma_modify_qp_in_params *iparams); |
621 | |
622 | int (*rdma_query_qp)(void *rdma_cxt, struct qed_rdma_qp *qp, |
623 | struct qed_rdma_query_qp_out_params *oparams); |
624 | int (*rdma_destroy_qp)(void *rdma_cxt, struct qed_rdma_qp *qp); |
625 | |
626 | int |
627 | (*rdma_register_tid)(void *rdma_cxt, |
628 | struct qed_rdma_register_tid_in_params *iparams); |
629 | |
630 | int (*rdma_deregister_tid)(void *rdma_cxt, u32 itid); |
631 | int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid); |
632 | void (*rdma_free_tid)(void *rdma_cxt, u32 itid); |
633 | |
634 | int (*rdma_create_srq)(void *rdma_cxt, |
635 | struct qed_rdma_create_srq_in_params *iparams, |
636 | struct qed_rdma_create_srq_out_params *oparams); |
637 | int (*rdma_destroy_srq)(void *rdma_cxt, |
638 | struct qed_rdma_destroy_srq_in_params *iparams); |
639 | int (*rdma_modify_srq)(void *rdma_cxt, |
640 | struct qed_rdma_modify_srq_in_params *iparams); |
641 | |
642 | int (*ll2_acquire_connection)(void *rdma_cxt, |
643 | struct qed_ll2_acquire_data *data); |
644 | |
645 | int (*ll2_establish_connection)(void *rdma_cxt, u8 connection_handle); |
646 | int (*ll2_terminate_connection)(void *rdma_cxt, u8 connection_handle); |
647 | void (*ll2_release_connection)(void *rdma_cxt, u8 connection_handle); |
648 | |
649 | int (*ll2_prepare_tx_packet)(void *rdma_cxt, |
650 | u8 connection_handle, |
651 | struct qed_ll2_tx_pkt_info *pkt, |
652 | bool notify_fw); |
653 | |
654 | int (*ll2_set_fragment_of_tx_packet)(void *rdma_cxt, |
655 | u8 connection_handle, |
656 | dma_addr_t addr, |
657 | u16 nbytes); |
658 | int (*ll2_post_rx_buffer)(void *rdma_cxt, u8 connection_handle, |
659 | dma_addr_t addr, u16 buf_len, void *cookie, |
660 | u8 notify_fw); |
661 | int (*ll2_get_stats)(void *rdma_cxt, |
662 | u8 connection_handle, |
663 | struct qed_ll2_stats *p_stats); |
664 | int (*ll2_set_mac_filter)(struct qed_dev *cdev, |
665 | u8 *old_mac_address, |
666 | const u8 *new_mac_address); |
667 | |
668 | int (*iwarp_set_engine_affin)(struct qed_dev *cdev, bool b_reset); |
669 | |
670 | int (*iwarp_connect)(void *rdma_cxt, |
671 | struct qed_iwarp_connect_in *iparams, |
672 | struct qed_iwarp_connect_out *oparams); |
673 | |
674 | int (*iwarp_create_listen)(void *rdma_cxt, |
675 | struct qed_iwarp_listen_in *iparams, |
676 | struct qed_iwarp_listen_out *oparams); |
677 | |
678 | int (*iwarp_accept)(void *rdma_cxt, |
679 | struct qed_iwarp_accept_in *iparams); |
680 | |
681 | int (*iwarp_reject)(void *rdma_cxt, |
682 | struct qed_iwarp_reject_in *iparams); |
683 | |
684 | int (*iwarp_destroy_listen)(void *rdma_cxt, void *handle); |
685 | |
686 | int (*iwarp_send_rtr)(void *rdma_cxt, |
687 | struct qed_iwarp_send_rtr_in *iparams); |
688 | }; |
689 | |
690 | const struct qed_rdma_ops *qed_get_rdma_ops(void); |
691 | |
692 | #endif |
693 | |