1 | /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ |
2 | /* |
3 | * Copyright (c) 2014-2017 Oracle. All rights reserved. |
4 | * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. |
5 | * |
6 | * This software is available to you under a choice of one of two |
7 | * licenses. You may choose to be licensed under the terms of the GNU |
8 | * General Public License (GPL) Version 2, available from the file |
9 | * COPYING in the main directory of this source tree, or the BSD-type |
10 | * license below: |
11 | * |
12 | * Redistribution and use in source and binary forms, with or without |
13 | * modification, are permitted provided that the following conditions |
14 | * are met: |
15 | * |
16 | * Redistributions of source code must retain the above copyright |
17 | * notice, this list of conditions and the following disclaimer. |
18 | * |
19 | * Redistributions in binary form must reproduce the above |
20 | * copyright notice, this list of conditions and the following |
21 | * disclaimer in the documentation and/or other materials provided |
22 | * with the distribution. |
23 | * |
24 | * Neither the name of the Network Appliance, Inc. nor the names of |
25 | * its contributors may be used to endorse or promote products |
26 | * derived from this software without specific prior written |
27 | * permission. |
28 | * |
29 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
30 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
31 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
32 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
33 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
34 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
35 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
36 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
37 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
38 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
39 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
40 | */ |
41 | |
42 | #ifndef _LINUX_SUNRPC_XPRT_RDMA_H |
43 | #define _LINUX_SUNRPC_XPRT_RDMA_H |
44 | |
45 | #include <linux/wait.h> /* wait_queue_head_t, etc */ |
46 | #include <linux/spinlock.h> /* spinlock_t, etc */ |
47 | #include <linux/atomic.h> /* atomic_t, etc */ |
48 | #include <linux/kref.h> /* struct kref */ |
49 | #include <linux/workqueue.h> /* struct work_struct */ |
50 | #include <linux/llist.h> |
51 | |
52 | #include <rdma/rdma_cm.h> /* RDMA connection api */ |
53 | #include <rdma/ib_verbs.h> /* RDMA verbs api */ |
54 | |
55 | #include <linux/sunrpc/clnt.h> /* rpc_xprt */ |
56 | #include <linux/sunrpc/rpc_rdma_cid.h> /* completion IDs */ |
57 | #include <linux/sunrpc/rpc_rdma.h> /* RPC/RDMA protocol */ |
58 | #include <linux/sunrpc/xprtrdma.h> /* xprt parameters */ |
59 | |
60 | #define RDMA_RESOLVE_TIMEOUT (5000) /* 5 seconds */ |
61 | #define RDMA_CONNECT_RETRY_MAX (2) /* retries if no listener backlog */ |
62 | |
63 | #define RPCRDMA_BIND_TO (60U * HZ) |
64 | #define RPCRDMA_INIT_REEST_TO (5U * HZ) |
65 | #define RPCRDMA_MAX_REEST_TO (30U * HZ) |
66 | #define RPCRDMA_IDLE_DISC_TO (5U * 60 * HZ) |
67 | |
68 | /* |
69 | * RDMA Endpoint -- connection endpoint details |
70 | */ |
71 | struct rpcrdma_mr; |
72 | struct rpcrdma_ep { |
73 | struct kref re_kref; |
74 | struct rdma_cm_id *re_id; |
75 | struct ib_pd *re_pd; |
76 | unsigned int re_max_rdma_segs; |
77 | unsigned int re_max_fr_depth; |
78 | struct rpcrdma_mr *re_write_pad_mr; |
79 | enum ib_mr_type re_mrtype; |
80 | struct completion re_done; |
81 | unsigned int re_send_count; |
82 | unsigned int re_send_batch; |
83 | unsigned int re_max_inline_send; |
84 | unsigned int re_max_inline_recv; |
85 | int re_async_rc; |
86 | int re_connect_status; |
87 | atomic_t re_receiving; |
88 | atomic_t re_force_disconnect; |
89 | struct ib_qp_init_attr re_attr; |
90 | wait_queue_head_t re_connect_wait; |
91 | struct rpc_xprt *re_xprt; |
92 | struct rpcrdma_connect_private |
93 | re_cm_private; |
94 | struct rdma_conn_param re_remote_cma; |
95 | int re_receive_count; |
96 | unsigned int re_max_requests; /* depends on device */ |
97 | unsigned int re_inline_send; /* negotiated */ |
98 | unsigned int re_inline_recv; /* negotiated */ |
99 | |
100 | atomic_t re_completion_ids; |
101 | |
102 | char re_write_pad[XDR_UNIT]; |
103 | }; |
104 | |
105 | /* Pre-allocate extra Work Requests for handling reverse-direction |
106 | * Receives and Sends. This is a fixed value because the Work Queues |
107 | * are allocated when the forward channel is set up, long before the |
108 | * backchannel is provisioned. This value is two times |
109 | * NFS4_DEF_CB_SLOT_TABLE_SIZE. |
110 | */ |
111 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
112 | #define RPCRDMA_BACKWARD_WRS (32) |
113 | #else |
114 | #define RPCRDMA_BACKWARD_WRS (0) |
115 | #endif |
116 | |
117 | /* Registered buffer -- registered kmalloc'd memory for RDMA SEND/RECV |
118 | */ |
119 | |
120 | struct rpcrdma_regbuf { |
121 | struct ib_sge rg_iov; |
122 | struct ib_device *rg_device; |
123 | enum dma_data_direction rg_direction; |
124 | void *rg_data; |
125 | }; |
126 | |
127 | static inline u64 rdmab_addr(struct rpcrdma_regbuf *rb) |
128 | { |
129 | return rb->rg_iov.addr; |
130 | } |
131 | |
132 | static inline u32 rdmab_length(struct rpcrdma_regbuf *rb) |
133 | { |
134 | return rb->rg_iov.length; |
135 | } |
136 | |
137 | static inline u32 rdmab_lkey(struct rpcrdma_regbuf *rb) |
138 | { |
139 | return rb->rg_iov.lkey; |
140 | } |
141 | |
142 | static inline struct ib_device *rdmab_device(struct rpcrdma_regbuf *rb) |
143 | { |
144 | return rb->rg_device; |
145 | } |
146 | |
147 | static inline void *rdmab_data(const struct rpcrdma_regbuf *rb) |
148 | { |
149 | return rb->rg_data; |
150 | } |
151 | |
152 | /* Do not use emergency memory reserves, and fail quickly if memory |
153 | * cannot be allocated easily. These flags may be used wherever there |
154 | * is robust logic to handle a failure to allocate. |
155 | */ |
156 | #define XPRTRDMA_GFP_FLAGS (__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) |
157 | |
158 | /* To ensure a transport can always make forward progress, |
159 | * the number of RDMA segments allowed in header chunk lists |
160 | * is capped at 16. This prevents less-capable devices from |
161 | * overrunning the Send buffer while building chunk lists. |
162 | * |
163 | * Elements of the Read list take up more room than the |
164 | * Write list or Reply chunk. 16 read segments means the |
165 | * chunk lists cannot consume more than |
166 | * |
167 | * ((16 + 2) * read segment size) + 1 XDR words, |
168 | * |
169 | * or about 400 bytes. The fixed part of the header is |
170 | * another 24 bytes. Thus when the inline threshold is |
171 | * 1024 bytes, at least 600 bytes are available for RPC |
172 | * message bodies. |
173 | */ |
174 | enum { |
175 | RPCRDMA_MAX_HDR_SEGS = 16, |
176 | }; |
177 | |
178 | /* |
179 | * struct rpcrdma_rep -- this structure encapsulates state required |
180 | * to receive and complete an RPC Reply, asychronously. It needs |
181 | * several pieces of state: |
182 | * |
183 | * o receive buffer and ib_sge (donated to provider) |
184 | * o status of receive (success or not, length, inv rkey) |
185 | * o bookkeeping state to get run by reply handler (XDR stream) |
186 | * |
187 | * These structures are allocated during transport initialization. |
188 | * N of these are associated with a transport instance, managed by |
189 | * struct rpcrdma_buffer. N is the max number of outstanding RPCs. |
190 | */ |
191 | |
192 | struct rpcrdma_rep { |
193 | struct ib_cqe rr_cqe; |
194 | struct rpc_rdma_cid rr_cid; |
195 | |
196 | __be32 rr_xid; |
197 | __be32 rr_vers; |
198 | __be32 rr_proc; |
199 | int rr_wc_flags; |
200 | u32 rr_inv_rkey; |
201 | bool rr_temp; |
202 | struct rpcrdma_regbuf *rr_rdmabuf; |
203 | struct rpcrdma_xprt *rr_rxprt; |
204 | struct rpc_rqst *rr_rqst; |
205 | struct xdr_buf rr_hdrbuf; |
206 | struct xdr_stream rr_stream; |
207 | struct llist_node rr_node; |
208 | struct ib_recv_wr rr_recv_wr; |
209 | struct list_head rr_all; |
210 | }; |
211 | |
212 | /* To reduce the rate at which a transport invokes ib_post_recv |
213 | * (and thus the hardware doorbell rate), xprtrdma posts Receive |
214 | * WRs in batches. |
215 | * |
216 | * Setting this to zero disables Receive post batching. |
217 | */ |
218 | enum { |
219 | RPCRDMA_MAX_RECV_BATCH = 7, |
220 | }; |
221 | |
222 | /* struct rpcrdma_sendctx - DMA mapped SGEs to unmap after Send completes |
223 | */ |
224 | struct rpcrdma_req; |
225 | struct rpcrdma_sendctx { |
226 | struct ib_cqe sc_cqe; |
227 | struct rpc_rdma_cid sc_cid; |
228 | struct rpcrdma_req *sc_req; |
229 | unsigned int sc_unmap_count; |
230 | struct ib_sge sc_sges[]; |
231 | }; |
232 | |
233 | /* |
234 | * struct rpcrdma_mr - external memory region metadata |
235 | * |
236 | * An external memory region is any buffer or page that is registered |
237 | * on the fly (ie, not pre-registered). |
238 | */ |
239 | struct rpcrdma_req; |
240 | struct rpcrdma_mr { |
241 | struct list_head mr_list; |
242 | struct rpcrdma_req *mr_req; |
243 | |
244 | struct ib_mr *mr_ibmr; |
245 | struct ib_device *mr_device; |
246 | struct scatterlist *mr_sg; |
247 | int mr_nents; |
248 | enum dma_data_direction mr_dir; |
249 | struct ib_cqe mr_cqe; |
250 | struct completion mr_linv_done; |
251 | union { |
252 | struct ib_reg_wr mr_regwr; |
253 | struct ib_send_wr mr_invwr; |
254 | }; |
255 | struct rpcrdma_xprt *mr_xprt; |
256 | u32 mr_handle; |
257 | u32 mr_length; |
258 | u64 mr_offset; |
259 | struct list_head mr_all; |
260 | struct rpc_rdma_cid mr_cid; |
261 | }; |
262 | |
263 | /* |
264 | * struct rpcrdma_req -- structure central to the request/reply sequence. |
265 | * |
266 | * N of these are associated with a transport instance, and stored in |
267 | * struct rpcrdma_buffer. N is the max number of outstanding requests. |
268 | * |
269 | * It includes pre-registered buffer memory for send AND recv. |
270 | * The recv buffer, however, is not owned by this structure, and |
271 | * is "donated" to the hardware when a recv is posted. When a |
272 | * reply is handled, the recv buffer used is given back to the |
273 | * struct rpcrdma_req associated with the request. |
274 | * |
275 | * In addition to the basic memory, this structure includes an array |
276 | * of iovs for send operations. The reason is that the iovs passed to |
277 | * ib_post_{send,recv} must not be modified until the work request |
278 | * completes. |
279 | */ |
280 | |
281 | /* Maximum number of page-sized "segments" per chunk list to be |
282 | * registered or invalidated. Must handle a Reply chunk: |
283 | */ |
284 | enum { |
285 | RPCRDMA_MAX_IOV_SEGS = 3, |
286 | RPCRDMA_MAX_DATA_SEGS = ((1 * 1024 * 1024) / PAGE_SIZE) + 1, |
287 | RPCRDMA_MAX_SEGS = RPCRDMA_MAX_DATA_SEGS + |
288 | RPCRDMA_MAX_IOV_SEGS, |
289 | }; |
290 | |
291 | /* Arguments for DMA mapping and registration */ |
292 | struct rpcrdma_mr_seg { |
293 | u32 mr_len; /* length of segment */ |
294 | struct page *mr_page; /* underlying struct page */ |
295 | u64 mr_offset; /* IN: page offset, OUT: iova */ |
296 | }; |
297 | |
298 | /* The Send SGE array is provisioned to send a maximum size |
299 | * inline request: |
300 | * - RPC-over-RDMA header |
301 | * - xdr_buf head iovec |
302 | * - RPCRDMA_MAX_INLINE bytes, in pages |
303 | * - xdr_buf tail iovec |
304 | * |
305 | * The actual number of array elements consumed by each RPC |
306 | * depends on the device's max_sge limit. |
307 | */ |
308 | enum { |
309 | RPCRDMA_MIN_SEND_SGES = 3, |
310 | RPCRDMA_MAX_PAGE_SGES = RPCRDMA_MAX_INLINE >> PAGE_SHIFT, |
311 | RPCRDMA_MAX_SEND_SGES = 1 + 1 + RPCRDMA_MAX_PAGE_SGES + 1, |
312 | }; |
313 | |
314 | struct rpcrdma_buffer; |
315 | struct rpcrdma_req { |
316 | struct list_head rl_list; |
317 | struct rpc_rqst rl_slot; |
318 | struct rpcrdma_rep *rl_reply; |
319 | struct xdr_stream rl_stream; |
320 | struct xdr_buf rl_hdrbuf; |
321 | struct ib_send_wr rl_wr; |
322 | struct rpcrdma_sendctx *rl_sendctx; |
323 | struct rpcrdma_regbuf *rl_rdmabuf; /* xprt header */ |
324 | struct rpcrdma_regbuf *rl_sendbuf; /* rq_snd_buf */ |
325 | struct rpcrdma_regbuf *rl_recvbuf; /* rq_rcv_buf */ |
326 | |
327 | struct list_head rl_all; |
328 | struct kref rl_kref; |
329 | |
330 | struct list_head rl_free_mrs; |
331 | struct list_head rl_registered; |
332 | struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS]; |
333 | }; |
334 | |
335 | static inline struct rpcrdma_req * |
336 | rpcr_to_rdmar(const struct rpc_rqst *rqst) |
337 | { |
338 | return container_of(rqst, struct rpcrdma_req, rl_slot); |
339 | } |
340 | |
341 | static inline void |
342 | rpcrdma_mr_push(struct rpcrdma_mr *mr, struct list_head *list) |
343 | { |
344 | list_add(new: &mr->mr_list, head: list); |
345 | } |
346 | |
347 | static inline struct rpcrdma_mr * |
348 | rpcrdma_mr_pop(struct list_head *list) |
349 | { |
350 | struct rpcrdma_mr *mr; |
351 | |
352 | mr = list_first_entry_or_null(list, struct rpcrdma_mr, mr_list); |
353 | if (mr) |
354 | list_del_init(entry: &mr->mr_list); |
355 | return mr; |
356 | } |
357 | |
358 | /* |
359 | * struct rpcrdma_buffer -- holds list/queue of pre-registered memory for |
360 | * inline requests/replies, and client/server credits. |
361 | * |
362 | * One of these is associated with a transport instance |
363 | */ |
364 | struct rpcrdma_buffer { |
365 | spinlock_t rb_lock; |
366 | struct list_head rb_send_bufs; |
367 | struct list_head rb_mrs; |
368 | |
369 | unsigned long rb_sc_head; |
370 | unsigned long rb_sc_tail; |
371 | unsigned long rb_sc_last; |
372 | struct rpcrdma_sendctx **rb_sc_ctxs; |
373 | |
374 | struct list_head rb_allreqs; |
375 | struct list_head rb_all_mrs; |
376 | struct list_head rb_all_reps; |
377 | |
378 | struct llist_head rb_free_reps; |
379 | |
380 | __be32 rb_max_requests; |
381 | u32 rb_credits; /* most recent credit grant */ |
382 | |
383 | u32 rb_bc_srv_max_requests; |
384 | u32 rb_bc_max_requests; |
385 | |
386 | struct work_struct rb_refresh_worker; |
387 | }; |
388 | |
389 | /* |
390 | * Statistics for RPCRDMA |
391 | */ |
392 | struct rpcrdma_stats { |
393 | /* accessed when sending a call */ |
394 | unsigned long read_chunk_count; |
395 | unsigned long write_chunk_count; |
396 | unsigned long reply_chunk_count; |
397 | unsigned long long total_rdma_request; |
398 | |
399 | /* rarely accessed error counters */ |
400 | unsigned long long pullup_copy_count; |
401 | unsigned long hardway_register_count; |
402 | unsigned long failed_marshal_count; |
403 | unsigned long bad_reply_count; |
404 | unsigned long mrs_recycled; |
405 | unsigned long mrs_orphaned; |
406 | unsigned long mrs_allocated; |
407 | unsigned long empty_sendctx_q; |
408 | |
409 | /* accessed when receiving a reply */ |
410 | unsigned long long total_rdma_reply; |
411 | unsigned long long fixup_copy_count; |
412 | unsigned long reply_waits_for_send; |
413 | unsigned long local_inv_needed; |
414 | unsigned long nomsg_call_count; |
415 | unsigned long bcall_count; |
416 | }; |
417 | |
418 | /* |
419 | * RPCRDMA transport -- encapsulates the structures above for |
420 | * integration with RPC. |
421 | * |
422 | * The contained structures are embedded, not pointers, |
423 | * for convenience. This structure need not be visible externally. |
424 | * |
425 | * It is allocated and initialized during mount, and released |
426 | * during unmount. |
427 | */ |
428 | struct rpcrdma_xprt { |
429 | struct rpc_xprt rx_xprt; |
430 | struct rpcrdma_ep *rx_ep; |
431 | struct rpcrdma_buffer rx_buf; |
432 | struct delayed_work rx_connect_worker; |
433 | struct rpc_timeout rx_timeout; |
434 | struct rpcrdma_stats rx_stats; |
435 | }; |
436 | |
437 | #define rpcx_to_rdmax(x) container_of(x, struct rpcrdma_xprt, rx_xprt) |
438 | |
439 | static inline const char * |
440 | rpcrdma_addrstr(const struct rpcrdma_xprt *r_xprt) |
441 | { |
442 | return r_xprt->rx_xprt.address_strings[RPC_DISPLAY_ADDR]; |
443 | } |
444 | |
445 | static inline const char * |
446 | rpcrdma_portstr(const struct rpcrdma_xprt *r_xprt) |
447 | { |
448 | return r_xprt->rx_xprt.address_strings[RPC_DISPLAY_PORT]; |
449 | } |
450 | |
451 | /* Setting this to 0 ensures interoperability with early servers. |
452 | * Setting this to 1 enhances certain unaligned read/write performance. |
453 | * Default is 0, see sysctl entry and rpc_rdma.c rpcrdma_convert_iovs() */ |
454 | extern int xprt_rdma_pad_optimize; |
455 | |
456 | /* This setting controls the hunt for a supported memory |
457 | * registration strategy. |
458 | */ |
459 | extern unsigned int xprt_rdma_memreg_strategy; |
460 | |
461 | /* |
462 | * Endpoint calls - xprtrdma/verbs.c |
463 | */ |
464 | void rpcrdma_force_disconnect(struct rpcrdma_ep *ep); |
465 | void rpcrdma_flush_disconnect(struct rpcrdma_xprt *r_xprt, struct ib_wc *wc); |
466 | int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt); |
467 | void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt); |
468 | |
469 | void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp); |
470 | |
471 | /* |
472 | * Buffer calls - xprtrdma/verbs.c |
473 | */ |
474 | struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, |
475 | size_t size); |
476 | int rpcrdma_req_setup(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req); |
477 | void rpcrdma_req_destroy(struct rpcrdma_req *req); |
478 | int rpcrdma_buffer_create(struct rpcrdma_xprt *); |
479 | void rpcrdma_buffer_destroy(struct rpcrdma_buffer *); |
480 | struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_xprt *r_xprt); |
481 | |
482 | struct rpcrdma_mr *rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt); |
483 | void rpcrdma_mrs_refresh(struct rpcrdma_xprt *r_xprt); |
484 | |
485 | struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *); |
486 | void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, |
487 | struct rpcrdma_req *req); |
488 | void rpcrdma_rep_put(struct rpcrdma_buffer *buf, struct rpcrdma_rep *rep); |
489 | void rpcrdma_reply_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req); |
490 | |
491 | bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size, |
492 | gfp_t flags); |
493 | bool __rpcrdma_regbuf_dma_map(struct rpcrdma_xprt *r_xprt, |
494 | struct rpcrdma_regbuf *rb); |
495 | |
496 | /** |
497 | * rpcrdma_regbuf_is_mapped - check if buffer is DMA mapped |
498 | * |
499 | * Returns true if the buffer is now mapped to rb->rg_device. |
500 | */ |
501 | static inline bool rpcrdma_regbuf_is_mapped(struct rpcrdma_regbuf *rb) |
502 | { |
503 | return rb->rg_device != NULL; |
504 | } |
505 | |
506 | /** |
507 | * rpcrdma_regbuf_dma_map - DMA-map a regbuf |
508 | * @r_xprt: controlling transport instance |
509 | * @rb: regbuf to be mapped |
510 | * |
511 | * Returns true if the buffer is currently DMA mapped. |
512 | */ |
513 | static inline bool rpcrdma_regbuf_dma_map(struct rpcrdma_xprt *r_xprt, |
514 | struct rpcrdma_regbuf *rb) |
515 | { |
516 | if (likely(rpcrdma_regbuf_is_mapped(rb))) |
517 | return true; |
518 | return __rpcrdma_regbuf_dma_map(r_xprt, rb); |
519 | } |
520 | |
521 | /* |
522 | * Wrappers for chunk registration, shared by read/write chunk code. |
523 | */ |
524 | |
525 | static inline enum dma_data_direction |
526 | rpcrdma_data_dir(bool writing) |
527 | { |
528 | return writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE; |
529 | } |
530 | |
531 | /* Memory registration calls xprtrdma/frwr_ops.c |
532 | */ |
533 | void frwr_reset(struct rpcrdma_req *req); |
534 | int frwr_query_device(struct rpcrdma_ep *ep, const struct ib_device *device); |
535 | int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr); |
536 | void frwr_mr_release(struct rpcrdma_mr *mr); |
537 | struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt, |
538 | struct rpcrdma_mr_seg *seg, |
539 | int nsegs, bool writing, __be32 xid, |
540 | struct rpcrdma_mr *mr); |
541 | int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req); |
542 | void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs); |
543 | void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req); |
544 | void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req); |
545 | int frwr_wp_create(struct rpcrdma_xprt *r_xprt); |
546 | |
547 | /* |
548 | * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c |
549 | */ |
550 | |
551 | enum rpcrdma_chunktype { |
552 | rpcrdma_noch = 0, |
553 | rpcrdma_noch_pullup, |
554 | rpcrdma_noch_mapped, |
555 | rpcrdma_readch, |
556 | rpcrdma_areadch, |
557 | rpcrdma_writech, |
558 | rpcrdma_replych |
559 | }; |
560 | |
561 | int rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt, |
562 | struct rpcrdma_req *req, u32 hdrlen, |
563 | struct xdr_buf *xdr, |
564 | enum rpcrdma_chunktype rtype); |
565 | void rpcrdma_sendctx_unmap(struct rpcrdma_sendctx *sc); |
566 | int rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst); |
567 | void (struct rpcrdma_ep *ep); |
568 | void rpcrdma_reset_cwnd(struct rpcrdma_xprt *r_xprt); |
569 | void rpcrdma_complete_rqst(struct rpcrdma_rep *rep); |
570 | void rpcrdma_unpin_rqst(struct rpcrdma_rep *rep); |
571 | void rpcrdma_reply_handler(struct rpcrdma_rep *rep); |
572 | |
573 | static inline void rpcrdma_set_xdrlen(struct xdr_buf *xdr, size_t len) |
574 | { |
575 | xdr->head[0].iov_len = len; |
576 | xdr->len = len; |
577 | } |
578 | |
579 | /* RPC/RDMA module init - xprtrdma/transport.c |
580 | */ |
581 | extern unsigned int xprt_rdma_max_inline_read; |
582 | extern unsigned int xprt_rdma_max_inline_write; |
583 | void xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap); |
584 | void xprt_rdma_free_addresses(struct rpc_xprt *xprt); |
585 | void xprt_rdma_close(struct rpc_xprt *xprt); |
586 | void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq); |
587 | int xprt_rdma_init(void); |
588 | void xprt_rdma_cleanup(void); |
589 | |
590 | /* Backchannel calls - xprtrdma/backchannel.c |
591 | */ |
592 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
593 | int xprt_rdma_bc_setup(struct rpc_xprt *, unsigned int); |
594 | size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *); |
595 | unsigned int xprt_rdma_bc_max_slots(struct rpc_xprt *); |
596 | void rpcrdma_bc_receive_call(struct rpcrdma_xprt *, struct rpcrdma_rep *); |
597 | int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst); |
598 | void xprt_rdma_bc_free_rqst(struct rpc_rqst *); |
599 | void xprt_rdma_bc_destroy(struct rpc_xprt *, unsigned int); |
600 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ |
601 | |
602 | extern struct xprt_class xprt_rdma_bc; |
603 | |
604 | #endif /* _LINUX_SUNRPC_XPRT_RDMA_H */ |
605 | |