1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * RDMA transport layer based on the trans_fd.c implementation. |
4 | * |
5 | * Copyright (C) 2008 by Tom Tucker <tom@opengridcomputing.com> |
6 | * Copyright (C) 2006 by Russ Cox <rsc@swtch.com> |
7 | * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net> |
8 | * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com> |
9 | * Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com> |
10 | */ |
11 | |
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
13 | |
14 | #include <linux/in.h> |
15 | #include <linux/module.h> |
16 | #include <linux/net.h> |
17 | #include <linux/ipv6.h> |
18 | #include <linux/kthread.h> |
19 | #include <linux/errno.h> |
20 | #include <linux/kernel.h> |
21 | #include <linux/un.h> |
22 | #include <linux/uaccess.h> |
23 | #include <linux/inet.h> |
24 | #include <linux/file.h> |
25 | #include <linux/parser.h> |
26 | #include <linux/semaphore.h> |
27 | #include <linux/slab.h> |
28 | #include <linux/seq_file.h> |
29 | #include <net/9p/9p.h> |
30 | #include <net/9p/client.h> |
31 | #include <net/9p/transport.h> |
32 | #include <rdma/ib_verbs.h> |
33 | #include <rdma/rdma_cm.h> |
34 | |
35 | #define P9_PORT 5640 |
36 | #define P9_RDMA_SQ_DEPTH 32 |
37 | #define P9_RDMA_RQ_DEPTH 32 |
38 | #define P9_RDMA_SEND_SGE 4 |
39 | #define P9_RDMA_RECV_SGE 4 |
40 | #define P9_RDMA_IRD 0 |
41 | #define P9_RDMA_ORD 0 |
42 | #define P9_RDMA_TIMEOUT 30000 /* 30 seconds */ |
43 | #define P9_RDMA_MAXSIZE (1024*1024) /* 1MB */ |
44 | |
45 | /** |
46 | * struct p9_trans_rdma - RDMA transport instance |
47 | * |
48 | * @state: tracks the transport state machine for connection setup and tear down |
49 | * @cm_id: The RDMA CM ID |
50 | * @pd: Protection Domain pointer |
51 | * @qp: Queue Pair pointer |
52 | * @cq: Completion Queue pointer |
53 | * @timeout: Number of uSecs to wait for connection management events |
54 | * @privport: Whether a privileged port may be used |
55 | * @port: The port to use |
56 | * @sq_depth: The depth of the Send Queue |
57 | * @sq_sem: Semaphore for the SQ |
58 | * @rq_depth: The depth of the Receive Queue. |
59 | * @rq_sem: Semaphore for the RQ |
60 | * @excess_rc : Amount of posted Receive Contexts without a pending request. |
61 | * See rdma_request() |
62 | * @addr: The remote peer's address |
63 | * @req_lock: Protects the active request list |
64 | * @cm_done: Completion event for connection management tracking |
65 | */ |
66 | struct p9_trans_rdma { |
67 | enum { |
68 | P9_RDMA_INIT, |
69 | P9_RDMA_ADDR_RESOLVED, |
70 | P9_RDMA_ROUTE_RESOLVED, |
71 | P9_RDMA_CONNECTED, |
72 | P9_RDMA_FLUSHING, |
73 | P9_RDMA_CLOSING, |
74 | P9_RDMA_CLOSED, |
75 | } state; |
76 | struct rdma_cm_id *cm_id; |
77 | struct ib_pd *pd; |
78 | struct ib_qp *qp; |
79 | struct ib_cq *cq; |
80 | long timeout; |
81 | bool privport; |
82 | u16 port; |
83 | int sq_depth; |
84 | struct semaphore sq_sem; |
85 | int rq_depth; |
86 | struct semaphore rq_sem; |
87 | atomic_t excess_rc; |
88 | struct sockaddr_in addr; |
89 | spinlock_t req_lock; |
90 | |
91 | struct completion cm_done; |
92 | }; |
93 | |
94 | struct p9_rdma_req; |
95 | |
96 | /** |
97 | * struct p9_rdma_context - Keeps track of in-process WR |
98 | * |
99 | * @cqe: completion queue entry |
100 | * @busa: Bus address to unmap when the WR completes |
101 | * @req: Keeps track of requests (send) |
102 | * @rc: Keepts track of replies (receive) |
103 | */ |
104 | struct p9_rdma_context { |
105 | struct ib_cqe cqe; |
106 | dma_addr_t busa; |
107 | union { |
108 | struct p9_req_t *req; |
109 | struct p9_fcall rc; |
110 | }; |
111 | }; |
112 | |
113 | /** |
114 | * struct p9_rdma_opts - Collection of mount options |
115 | * @port: port of connection |
116 | * @privport: Whether a privileged port may be used |
117 | * @sq_depth: The requested depth of the SQ. This really doesn't need |
118 | * to be any deeper than the number of threads used in the client |
119 | * @rq_depth: The depth of the RQ. Should be greater than or equal to SQ depth |
120 | * @timeout: Time to wait in msecs for CM events |
121 | */ |
122 | struct p9_rdma_opts { |
123 | short port; |
124 | bool privport; |
125 | int sq_depth; |
126 | int rq_depth; |
127 | long timeout; |
128 | }; |
129 | |
130 | /* |
131 | * Option Parsing (code inspired by NFS code) |
132 | */ |
133 | enum { |
134 | /* Options that take integer arguments */ |
135 | Opt_port, Opt_rq_depth, Opt_sq_depth, Opt_timeout, |
136 | /* Options that take no argument */ |
137 | Opt_privport, |
138 | Opt_err, |
139 | }; |
140 | |
141 | static match_table_t tokens = { |
142 | {Opt_port, "port=%u" }, |
143 | {Opt_sq_depth, "sq=%u" }, |
144 | {Opt_rq_depth, "rq=%u" }, |
145 | {Opt_timeout, "timeout=%u" }, |
146 | {Opt_privport, "privport" }, |
147 | {Opt_err, NULL}, |
148 | }; |
149 | |
150 | static int p9_rdma_show_options(struct seq_file *m, struct p9_client *clnt) |
151 | { |
152 | struct p9_trans_rdma *rdma = clnt->trans; |
153 | |
154 | if (rdma->port != P9_PORT) |
155 | seq_printf(m, fmt: ",port=%u" , rdma->port); |
156 | if (rdma->sq_depth != P9_RDMA_SQ_DEPTH) |
157 | seq_printf(m, fmt: ",sq=%u" , rdma->sq_depth); |
158 | if (rdma->rq_depth != P9_RDMA_RQ_DEPTH) |
159 | seq_printf(m, fmt: ",rq=%u" , rdma->rq_depth); |
160 | if (rdma->timeout != P9_RDMA_TIMEOUT) |
161 | seq_printf(m, fmt: ",timeout=%lu" , rdma->timeout); |
162 | if (rdma->privport) |
163 | seq_puts(m, s: ",privport" ); |
164 | return 0; |
165 | } |
166 | |
167 | /** |
168 | * parse_opts - parse mount options into rdma options structure |
169 | * @params: options string passed from mount |
170 | * @opts: rdma transport-specific structure to parse options into |
171 | * |
172 | * Returns 0 upon success, -ERRNO upon failure |
173 | */ |
174 | static int parse_opts(char *params, struct p9_rdma_opts *opts) |
175 | { |
176 | char *p; |
177 | substring_t args[MAX_OPT_ARGS]; |
178 | int option; |
179 | char *options, *tmp_options; |
180 | |
181 | opts->port = P9_PORT; |
182 | opts->sq_depth = P9_RDMA_SQ_DEPTH; |
183 | opts->rq_depth = P9_RDMA_RQ_DEPTH; |
184 | opts->timeout = P9_RDMA_TIMEOUT; |
185 | opts->privport = false; |
186 | |
187 | if (!params) |
188 | return 0; |
189 | |
190 | tmp_options = kstrdup(s: params, GFP_KERNEL); |
191 | if (!tmp_options) { |
192 | p9_debug(P9_DEBUG_ERROR, |
193 | "failed to allocate copy of option string\n" ); |
194 | return -ENOMEM; |
195 | } |
196 | options = tmp_options; |
197 | |
198 | while ((p = strsep(&options, "," )) != NULL) { |
199 | int token; |
200 | int r; |
201 | if (!*p) |
202 | continue; |
203 | token = match_token(p, table: tokens, args); |
204 | if ((token != Opt_err) && (token != Opt_privport)) { |
205 | r = match_int(&args[0], result: &option); |
206 | if (r < 0) { |
207 | p9_debug(P9_DEBUG_ERROR, |
208 | "integer field, but no integer?\n" ); |
209 | continue; |
210 | } |
211 | } |
212 | switch (token) { |
213 | case Opt_port: |
214 | opts->port = option; |
215 | break; |
216 | case Opt_sq_depth: |
217 | opts->sq_depth = option; |
218 | break; |
219 | case Opt_rq_depth: |
220 | opts->rq_depth = option; |
221 | break; |
222 | case Opt_timeout: |
223 | opts->timeout = option; |
224 | break; |
225 | case Opt_privport: |
226 | opts->privport = true; |
227 | break; |
228 | default: |
229 | continue; |
230 | } |
231 | } |
232 | /* RQ must be at least as large as the SQ */ |
233 | opts->rq_depth = max(opts->rq_depth, opts->sq_depth); |
234 | kfree(objp: tmp_options); |
235 | return 0; |
236 | } |
237 | |
238 | static int |
239 | p9_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) |
240 | { |
241 | struct p9_client *c = id->context; |
242 | struct p9_trans_rdma *rdma = c->trans; |
243 | switch (event->event) { |
244 | case RDMA_CM_EVENT_ADDR_RESOLVED: |
245 | BUG_ON(rdma->state != P9_RDMA_INIT); |
246 | rdma->state = P9_RDMA_ADDR_RESOLVED; |
247 | break; |
248 | |
249 | case RDMA_CM_EVENT_ROUTE_RESOLVED: |
250 | BUG_ON(rdma->state != P9_RDMA_ADDR_RESOLVED); |
251 | rdma->state = P9_RDMA_ROUTE_RESOLVED; |
252 | break; |
253 | |
254 | case RDMA_CM_EVENT_ESTABLISHED: |
255 | BUG_ON(rdma->state != P9_RDMA_ROUTE_RESOLVED); |
256 | rdma->state = P9_RDMA_CONNECTED; |
257 | break; |
258 | |
259 | case RDMA_CM_EVENT_DISCONNECTED: |
260 | if (rdma) |
261 | rdma->state = P9_RDMA_CLOSED; |
262 | c->status = Disconnected; |
263 | break; |
264 | |
265 | case RDMA_CM_EVENT_TIMEWAIT_EXIT: |
266 | break; |
267 | |
268 | case RDMA_CM_EVENT_ADDR_CHANGE: |
269 | case RDMA_CM_EVENT_ROUTE_ERROR: |
270 | case RDMA_CM_EVENT_DEVICE_REMOVAL: |
271 | case RDMA_CM_EVENT_MULTICAST_JOIN: |
272 | case RDMA_CM_EVENT_MULTICAST_ERROR: |
273 | case RDMA_CM_EVENT_REJECTED: |
274 | case RDMA_CM_EVENT_CONNECT_REQUEST: |
275 | case RDMA_CM_EVENT_CONNECT_RESPONSE: |
276 | case RDMA_CM_EVENT_CONNECT_ERROR: |
277 | case RDMA_CM_EVENT_ADDR_ERROR: |
278 | case RDMA_CM_EVENT_UNREACHABLE: |
279 | c->status = Disconnected; |
280 | rdma_disconnect(id: rdma->cm_id); |
281 | break; |
282 | default: |
283 | BUG(); |
284 | } |
285 | complete(&rdma->cm_done); |
286 | return 0; |
287 | } |
288 | |
289 | static void |
290 | recv_done(struct ib_cq *cq, struct ib_wc *wc) |
291 | { |
292 | struct p9_client *client = cq->cq_context; |
293 | struct p9_trans_rdma *rdma = client->trans; |
294 | struct p9_rdma_context *c = |
295 | container_of(wc->wr_cqe, struct p9_rdma_context, cqe); |
296 | struct p9_req_t *req; |
297 | int err = 0; |
298 | int16_t tag; |
299 | |
300 | req = NULL; |
301 | ib_dma_unmap_single(dev: rdma->cm_id->device, addr: c->busa, size: client->msize, |
302 | direction: DMA_FROM_DEVICE); |
303 | |
304 | if (wc->status != IB_WC_SUCCESS) |
305 | goto err_out; |
306 | |
307 | c->rc.size = wc->byte_len; |
308 | err = p9_parse_header(pdu: &c->rc, NULL, NULL, tag: &tag, rewind: 1); |
309 | if (err) |
310 | goto err_out; |
311 | |
312 | req = p9_tag_lookup(c: client, tag); |
313 | if (!req) |
314 | goto err_out; |
315 | |
316 | /* Check that we have not yet received a reply for this request. |
317 | */ |
318 | if (unlikely(req->rc.sdata)) { |
319 | pr_err("Duplicate reply for request %d" , tag); |
320 | goto err_out; |
321 | } |
322 | |
323 | req->rc.size = c->rc.size; |
324 | req->rc.sdata = c->rc.sdata; |
325 | p9_client_cb(c: client, req, status: REQ_STATUS_RCVD); |
326 | |
327 | out: |
328 | up(sem: &rdma->rq_sem); |
329 | kfree(objp: c); |
330 | return; |
331 | |
332 | err_out: |
333 | p9_debug(P9_DEBUG_ERROR, "req %p err %d status %d\n" , |
334 | req, err, wc->status); |
335 | rdma->state = P9_RDMA_FLUSHING; |
336 | client->status = Disconnected; |
337 | goto out; |
338 | } |
339 | |
340 | static void |
341 | send_done(struct ib_cq *cq, struct ib_wc *wc) |
342 | { |
343 | struct p9_client *client = cq->cq_context; |
344 | struct p9_trans_rdma *rdma = client->trans; |
345 | struct p9_rdma_context *c = |
346 | container_of(wc->wr_cqe, struct p9_rdma_context, cqe); |
347 | |
348 | ib_dma_unmap_single(dev: rdma->cm_id->device, |
349 | addr: c->busa, size: c->req->tc.size, |
350 | direction: DMA_TO_DEVICE); |
351 | up(sem: &rdma->sq_sem); |
352 | p9_req_put(c: client, r: c->req); |
353 | kfree(objp: c); |
354 | } |
355 | |
356 | static void qp_event_handler(struct ib_event *event, void *context) |
357 | { |
358 | p9_debug(P9_DEBUG_ERROR, "QP event %d context %p\n" , |
359 | event->event, context); |
360 | } |
361 | |
362 | static void rdma_destroy_trans(struct p9_trans_rdma *rdma) |
363 | { |
364 | if (!rdma) |
365 | return; |
366 | |
367 | if (rdma->qp && !IS_ERR(ptr: rdma->qp)) |
368 | ib_destroy_qp(qp: rdma->qp); |
369 | |
370 | if (rdma->pd && !IS_ERR(ptr: rdma->pd)) |
371 | ib_dealloc_pd(pd: rdma->pd); |
372 | |
373 | if (rdma->cq && !IS_ERR(ptr: rdma->cq)) |
374 | ib_free_cq(cq: rdma->cq); |
375 | |
376 | if (rdma->cm_id && !IS_ERR(ptr: rdma->cm_id)) |
377 | rdma_destroy_id(id: rdma->cm_id); |
378 | |
379 | kfree(objp: rdma); |
380 | } |
381 | |
382 | static int |
383 | post_recv(struct p9_client *client, struct p9_rdma_context *c) |
384 | { |
385 | struct p9_trans_rdma *rdma = client->trans; |
386 | struct ib_recv_wr wr; |
387 | struct ib_sge sge; |
388 | int ret; |
389 | |
390 | c->busa = ib_dma_map_single(dev: rdma->cm_id->device, |
391 | cpu_addr: c->rc.sdata, size: client->msize, |
392 | direction: DMA_FROM_DEVICE); |
393 | if (ib_dma_mapping_error(dev: rdma->cm_id->device, dma_addr: c->busa)) |
394 | goto error; |
395 | |
396 | c->cqe.done = recv_done; |
397 | |
398 | sge.addr = c->busa; |
399 | sge.length = client->msize; |
400 | sge.lkey = rdma->pd->local_dma_lkey; |
401 | |
402 | wr.next = NULL; |
403 | wr.wr_cqe = &c->cqe; |
404 | wr.sg_list = &sge; |
405 | wr.num_sge = 1; |
406 | |
407 | ret = ib_post_recv(qp: rdma->qp, recv_wr: &wr, NULL); |
408 | if (ret) |
409 | ib_dma_unmap_single(dev: rdma->cm_id->device, addr: c->busa, |
410 | size: client->msize, direction: DMA_FROM_DEVICE); |
411 | return ret; |
412 | |
413 | error: |
414 | p9_debug(P9_DEBUG_ERROR, "EIO\n" ); |
415 | return -EIO; |
416 | } |
417 | |
418 | static int rdma_request(struct p9_client *client, struct p9_req_t *req) |
419 | { |
420 | struct p9_trans_rdma *rdma = client->trans; |
421 | struct ib_send_wr wr; |
422 | struct ib_sge sge; |
423 | int err = 0; |
424 | unsigned long flags; |
425 | struct p9_rdma_context *c = NULL; |
426 | struct p9_rdma_context *rpl_context = NULL; |
427 | |
428 | /* When an error occurs between posting the recv and the send, |
429 | * there will be a receive context posted without a pending request. |
430 | * Since there is no way to "un-post" it, we remember it and skip |
431 | * post_recv() for the next request. |
432 | * So here, |
433 | * see if we are this `next request' and need to absorb an excess rc. |
434 | * If yes, then drop and free our own, and do not recv_post(). |
435 | **/ |
436 | if (unlikely(atomic_read(&rdma->excess_rc) > 0)) { |
437 | if ((atomic_sub_return(i: 1, v: &rdma->excess_rc) >= 0)) { |
438 | /* Got one! */ |
439 | p9_fcall_fini(fc: &req->rc); |
440 | req->rc.sdata = NULL; |
441 | goto dont_need_post_recv; |
442 | } else { |
443 | /* We raced and lost. */ |
444 | atomic_inc(v: &rdma->excess_rc); |
445 | } |
446 | } |
447 | |
448 | /* Allocate an fcall for the reply */ |
449 | rpl_context = kmalloc(size: sizeof *rpl_context, GFP_NOFS); |
450 | if (!rpl_context) { |
451 | err = -ENOMEM; |
452 | goto recv_error; |
453 | } |
454 | rpl_context->rc.sdata = req->rc.sdata; |
455 | |
456 | /* |
457 | * Post a receive buffer for this request. We need to ensure |
458 | * there is a reply buffer available for every outstanding |
459 | * request. A flushed request can result in no reply for an |
460 | * outstanding request, so we must keep a count to avoid |
461 | * overflowing the RQ. |
462 | */ |
463 | if (down_interruptible(sem: &rdma->rq_sem)) { |
464 | err = -EINTR; |
465 | goto recv_error; |
466 | } |
467 | |
468 | err = post_recv(client, c: rpl_context); |
469 | if (err) { |
470 | p9_debug(P9_DEBUG_ERROR, "POST RECV failed: %d\n" , err); |
471 | goto recv_error; |
472 | } |
473 | /* remove posted receive buffer from request structure */ |
474 | req->rc.sdata = NULL; |
475 | |
476 | dont_need_post_recv: |
477 | /* Post the request */ |
478 | c = kmalloc(size: sizeof *c, GFP_NOFS); |
479 | if (!c) { |
480 | err = -ENOMEM; |
481 | goto send_error; |
482 | } |
483 | c->req = req; |
484 | |
485 | c->busa = ib_dma_map_single(dev: rdma->cm_id->device, |
486 | cpu_addr: c->req->tc.sdata, size: c->req->tc.size, |
487 | direction: DMA_TO_DEVICE); |
488 | if (ib_dma_mapping_error(dev: rdma->cm_id->device, dma_addr: c->busa)) { |
489 | err = -EIO; |
490 | goto send_error; |
491 | } |
492 | |
493 | c->cqe.done = send_done; |
494 | |
495 | sge.addr = c->busa; |
496 | sge.length = c->req->tc.size; |
497 | sge.lkey = rdma->pd->local_dma_lkey; |
498 | |
499 | wr.next = NULL; |
500 | wr.wr_cqe = &c->cqe; |
501 | wr.opcode = IB_WR_SEND; |
502 | wr.send_flags = IB_SEND_SIGNALED; |
503 | wr.sg_list = &sge; |
504 | wr.num_sge = 1; |
505 | |
506 | if (down_interruptible(sem: &rdma->sq_sem)) { |
507 | err = -EINTR; |
508 | goto dma_unmap; |
509 | } |
510 | |
511 | /* Mark request as `sent' *before* we actually send it, |
512 | * because doing if after could erase the REQ_STATUS_RCVD |
513 | * status in case of a very fast reply. |
514 | */ |
515 | WRITE_ONCE(req->status, REQ_STATUS_SENT); |
516 | err = ib_post_send(qp: rdma->qp, send_wr: &wr, NULL); |
517 | if (err) |
518 | goto dma_unmap; |
519 | |
520 | /* Success */ |
521 | return 0; |
522 | |
523 | dma_unmap: |
524 | ib_dma_unmap_single(dev: rdma->cm_id->device, addr: c->busa, |
525 | size: c->req->tc.size, direction: DMA_TO_DEVICE); |
526 | /* Handle errors that happened during or while preparing the send: */ |
527 | send_error: |
528 | WRITE_ONCE(req->status, REQ_STATUS_ERROR); |
529 | kfree(objp: c); |
530 | p9_debug(P9_DEBUG_ERROR, "Error %d in rdma_request()\n" , err); |
531 | |
532 | /* Ach. |
533 | * We did recv_post(), but not send. We have one recv_post in excess. |
534 | */ |
535 | atomic_inc(v: &rdma->excess_rc); |
536 | return err; |
537 | |
538 | /* Handle errors that happened during or while preparing post_recv(): */ |
539 | recv_error: |
540 | kfree(objp: rpl_context); |
541 | spin_lock_irqsave(&rdma->req_lock, flags); |
542 | if (err != -EINTR && rdma->state < P9_RDMA_CLOSING) { |
543 | rdma->state = P9_RDMA_CLOSING; |
544 | spin_unlock_irqrestore(lock: &rdma->req_lock, flags); |
545 | rdma_disconnect(id: rdma->cm_id); |
546 | } else |
547 | spin_unlock_irqrestore(lock: &rdma->req_lock, flags); |
548 | return err; |
549 | } |
550 | |
551 | static void rdma_close(struct p9_client *client) |
552 | { |
553 | struct p9_trans_rdma *rdma; |
554 | |
555 | if (!client) |
556 | return; |
557 | |
558 | rdma = client->trans; |
559 | if (!rdma) |
560 | return; |
561 | |
562 | client->status = Disconnected; |
563 | rdma_disconnect(id: rdma->cm_id); |
564 | rdma_destroy_trans(rdma); |
565 | } |
566 | |
567 | /** |
568 | * alloc_rdma - Allocate and initialize the rdma transport structure |
569 | * @opts: Mount options structure |
570 | */ |
571 | static struct p9_trans_rdma *alloc_rdma(struct p9_rdma_opts *opts) |
572 | { |
573 | struct p9_trans_rdma *rdma; |
574 | |
575 | rdma = kzalloc(size: sizeof(struct p9_trans_rdma), GFP_KERNEL); |
576 | if (!rdma) |
577 | return NULL; |
578 | |
579 | rdma->port = opts->port; |
580 | rdma->privport = opts->privport; |
581 | rdma->sq_depth = opts->sq_depth; |
582 | rdma->rq_depth = opts->rq_depth; |
583 | rdma->timeout = opts->timeout; |
584 | spin_lock_init(&rdma->req_lock); |
585 | init_completion(x: &rdma->cm_done); |
586 | sema_init(sem: &rdma->sq_sem, val: rdma->sq_depth); |
587 | sema_init(sem: &rdma->rq_sem, val: rdma->rq_depth); |
588 | atomic_set(v: &rdma->excess_rc, i: 0); |
589 | |
590 | return rdma; |
591 | } |
592 | |
593 | static int rdma_cancel(struct p9_client *client, struct p9_req_t *req) |
594 | { |
595 | /* Nothing to do here. |
596 | * We will take care of it (if we have to) in rdma_cancelled() |
597 | */ |
598 | return 1; |
599 | } |
600 | |
601 | /* A request has been fully flushed without a reply. |
602 | * That means we have posted one buffer in excess. |
603 | */ |
604 | static int rdma_cancelled(struct p9_client *client, struct p9_req_t *req) |
605 | { |
606 | struct p9_trans_rdma *rdma = client->trans; |
607 | atomic_inc(v: &rdma->excess_rc); |
608 | return 0; |
609 | } |
610 | |
611 | static int p9_rdma_bind_privport(struct p9_trans_rdma *rdma) |
612 | { |
613 | struct sockaddr_in cl = { |
614 | .sin_family = AF_INET, |
615 | .sin_addr.s_addr = htonl(INADDR_ANY), |
616 | }; |
617 | int port, err = -EINVAL; |
618 | |
619 | for (port = P9_DEF_MAX_RESVPORT; port >= P9_DEF_MIN_RESVPORT; port--) { |
620 | cl.sin_port = htons((ushort)port); |
621 | err = rdma_bind_addr(id: rdma->cm_id, addr: (struct sockaddr *)&cl); |
622 | if (err != -EADDRINUSE) |
623 | break; |
624 | } |
625 | return err; |
626 | } |
627 | |
628 | /** |
629 | * rdma_create_trans - Transport method for creating a transport instance |
630 | * @client: client instance |
631 | * @addr: IP address string |
632 | * @args: Mount options string |
633 | */ |
634 | static int |
635 | rdma_create_trans(struct p9_client *client, const char *addr, char *args) |
636 | { |
637 | int err; |
638 | struct p9_rdma_opts opts; |
639 | struct p9_trans_rdma *rdma; |
640 | struct rdma_conn_param conn_param; |
641 | struct ib_qp_init_attr qp_attr; |
642 | |
643 | if (addr == NULL) |
644 | return -EINVAL; |
645 | |
646 | /* Parse the transport specific mount options */ |
647 | err = parse_opts(params: args, opts: &opts); |
648 | if (err < 0) |
649 | return err; |
650 | |
651 | /* Create and initialize the RDMA transport structure */ |
652 | rdma = alloc_rdma(opts: &opts); |
653 | if (!rdma) |
654 | return -ENOMEM; |
655 | |
656 | /* Create the RDMA CM ID */ |
657 | rdma->cm_id = rdma_create_id(&init_net, p9_cm_event_handler, client, |
658 | RDMA_PS_TCP, IB_QPT_RC); |
659 | if (IS_ERR(ptr: rdma->cm_id)) |
660 | goto error; |
661 | |
662 | /* Associate the client with the transport */ |
663 | client->trans = rdma; |
664 | |
665 | /* Bind to a privileged port if we need to */ |
666 | if (opts.privport) { |
667 | err = p9_rdma_bind_privport(rdma); |
668 | if (err < 0) { |
669 | pr_err("%s (%d): problem binding to privport: %d\n" , |
670 | __func__, task_pid_nr(current), -err); |
671 | goto error; |
672 | } |
673 | } |
674 | |
675 | /* Resolve the server's address */ |
676 | rdma->addr.sin_family = AF_INET; |
677 | rdma->addr.sin_addr.s_addr = in_aton(str: addr); |
678 | rdma->addr.sin_port = htons(opts.port); |
679 | err = rdma_resolve_addr(id: rdma->cm_id, NULL, |
680 | dst_addr: (struct sockaddr *)&rdma->addr, |
681 | timeout_ms: rdma->timeout); |
682 | if (err) |
683 | goto error; |
684 | err = wait_for_completion_interruptible(x: &rdma->cm_done); |
685 | if (err || (rdma->state != P9_RDMA_ADDR_RESOLVED)) |
686 | goto error; |
687 | |
688 | /* Resolve the route to the server */ |
689 | err = rdma_resolve_route(id: rdma->cm_id, timeout_ms: rdma->timeout); |
690 | if (err) |
691 | goto error; |
692 | err = wait_for_completion_interruptible(x: &rdma->cm_done); |
693 | if (err || (rdma->state != P9_RDMA_ROUTE_RESOLVED)) |
694 | goto error; |
695 | |
696 | /* Create the Completion Queue */ |
697 | rdma->cq = ib_alloc_cq_any(dev: rdma->cm_id->device, private: client, |
698 | nr_cqe: opts.sq_depth + opts.rq_depth + 1, |
699 | poll_ctx: IB_POLL_SOFTIRQ); |
700 | if (IS_ERR(ptr: rdma->cq)) |
701 | goto error; |
702 | |
703 | /* Create the Protection Domain */ |
704 | rdma->pd = ib_alloc_pd(rdma->cm_id->device, 0); |
705 | if (IS_ERR(ptr: rdma->pd)) |
706 | goto error; |
707 | |
708 | /* Create the Queue Pair */ |
709 | memset(&qp_attr, 0, sizeof qp_attr); |
710 | qp_attr.event_handler = qp_event_handler; |
711 | qp_attr.qp_context = client; |
712 | qp_attr.cap.max_send_wr = opts.sq_depth; |
713 | qp_attr.cap.max_recv_wr = opts.rq_depth; |
714 | qp_attr.cap.max_send_sge = P9_RDMA_SEND_SGE; |
715 | qp_attr.cap.max_recv_sge = P9_RDMA_RECV_SGE; |
716 | qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; |
717 | qp_attr.qp_type = IB_QPT_RC; |
718 | qp_attr.send_cq = rdma->cq; |
719 | qp_attr.recv_cq = rdma->cq; |
720 | err = rdma_create_qp(id: rdma->cm_id, pd: rdma->pd, qp_init_attr: &qp_attr); |
721 | if (err) |
722 | goto error; |
723 | rdma->qp = rdma->cm_id->qp; |
724 | |
725 | /* Request a connection */ |
726 | memset(&conn_param, 0, sizeof(conn_param)); |
727 | conn_param.private_data = NULL; |
728 | conn_param.private_data_len = 0; |
729 | conn_param.responder_resources = P9_RDMA_IRD; |
730 | conn_param.initiator_depth = P9_RDMA_ORD; |
731 | err = rdma_connect(id: rdma->cm_id, conn_param: &conn_param); |
732 | if (err) |
733 | goto error; |
734 | err = wait_for_completion_interruptible(x: &rdma->cm_done); |
735 | if (err || (rdma->state != P9_RDMA_CONNECTED)) |
736 | goto error; |
737 | |
738 | client->status = Connected; |
739 | |
740 | return 0; |
741 | |
742 | error: |
743 | rdma_destroy_trans(rdma); |
744 | return -ENOTCONN; |
745 | } |
746 | |
747 | static struct p9_trans_module p9_rdma_trans = { |
748 | .name = "rdma" , |
749 | .maxsize = P9_RDMA_MAXSIZE, |
750 | .pooled_rbuffers = true, |
751 | .def = 0, |
752 | .owner = THIS_MODULE, |
753 | .create = rdma_create_trans, |
754 | .close = rdma_close, |
755 | .request = rdma_request, |
756 | .cancel = rdma_cancel, |
757 | .cancelled = rdma_cancelled, |
758 | .show_options = p9_rdma_show_options, |
759 | }; |
760 | |
761 | /** |
762 | * p9_trans_rdma_init - Register the 9P RDMA transport driver |
763 | */ |
764 | static int __init p9_trans_rdma_init(void) |
765 | { |
766 | v9fs_register_trans(m: &p9_rdma_trans); |
767 | return 0; |
768 | } |
769 | |
770 | static void __exit p9_trans_rdma_exit(void) |
771 | { |
772 | v9fs_unregister_trans(m: &p9_rdma_trans); |
773 | } |
774 | |
775 | module_init(p9_trans_rdma_init); |
776 | module_exit(p9_trans_rdma_exit); |
777 | MODULE_ALIAS_9P("rdma" ); |
778 | |
779 | MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>" ); |
780 | MODULE_DESCRIPTION("RDMA Transport for 9P" ); |
781 | MODULE_LICENSE("Dual BSD/GPL" ); |
782 | |