1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * NVMe over Fabrics RDMA target. |
4 | * Copyright (c) 2015-2016 HGST, a Western Digital Company. |
5 | */ |
6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
7 | #include <linux/atomic.h> |
8 | #include <linux/blk-integrity.h> |
9 | #include <linux/ctype.h> |
10 | #include <linux/delay.h> |
11 | #include <linux/err.h> |
12 | #include <linux/init.h> |
13 | #include <linux/module.h> |
14 | #include <linux/nvme.h> |
15 | #include <linux/slab.h> |
16 | #include <linux/string.h> |
17 | #include <linux/wait.h> |
18 | #include <linux/inet.h> |
19 | #include <asm/unaligned.h> |
20 | |
21 | #include <rdma/ib_verbs.h> |
22 | #include <rdma/rdma_cm.h> |
23 | #include <rdma/rw.h> |
24 | #include <rdma/ib_cm.h> |
25 | |
26 | #include <linux/nvme-rdma.h> |
27 | #include "nvmet.h" |
28 | |
29 | /* |
30 | * We allow at least 1 page, up to 4 SGEs, and up to 16KB of inline data |
31 | */ |
32 | #define NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE PAGE_SIZE |
33 | #define NVMET_RDMA_MAX_INLINE_SGE 4 |
34 | #define NVMET_RDMA_MAX_INLINE_DATA_SIZE max_t(int, SZ_16K, PAGE_SIZE) |
35 | |
36 | /* Assume mpsmin == device_page_size == 4KB */ |
37 | #define NVMET_RDMA_MAX_MDTS 8 |
38 | #define NVMET_RDMA_MAX_METADATA_MDTS 5 |
39 | |
40 | struct nvmet_rdma_srq; |
41 | |
42 | struct nvmet_rdma_cmd { |
43 | struct ib_sge sge[NVMET_RDMA_MAX_INLINE_SGE + 1]; |
44 | struct ib_cqe cqe; |
45 | struct ib_recv_wr wr; |
46 | struct scatterlist inline_sg[NVMET_RDMA_MAX_INLINE_SGE]; |
47 | struct nvme_command *nvme_cmd; |
48 | struct nvmet_rdma_queue *queue; |
49 | struct nvmet_rdma_srq *nsrq; |
50 | }; |
51 | |
52 | enum { |
53 | NVMET_RDMA_REQ_INLINE_DATA = (1 << 0), |
54 | NVMET_RDMA_REQ_INVALIDATE_RKEY = (1 << 1), |
55 | }; |
56 | |
57 | struct nvmet_rdma_rsp { |
58 | struct ib_sge send_sge; |
59 | struct ib_cqe send_cqe; |
60 | struct ib_send_wr send_wr; |
61 | |
62 | struct nvmet_rdma_cmd *cmd; |
63 | struct nvmet_rdma_queue *queue; |
64 | |
65 | struct ib_cqe read_cqe; |
66 | struct ib_cqe write_cqe; |
67 | struct rdma_rw_ctx rw; |
68 | |
69 | struct nvmet_req req; |
70 | |
71 | bool allocated; |
72 | u8 n_rdma; |
73 | u32 flags; |
74 | u32 invalidate_rkey; |
75 | |
76 | struct list_head wait_list; |
77 | struct list_head free_list; |
78 | }; |
79 | |
80 | enum nvmet_rdma_queue_state { |
81 | NVMET_RDMA_Q_CONNECTING, |
82 | NVMET_RDMA_Q_LIVE, |
83 | NVMET_RDMA_Q_DISCONNECTING, |
84 | }; |
85 | |
86 | struct nvmet_rdma_queue { |
87 | struct rdma_cm_id *cm_id; |
88 | struct ib_qp *qp; |
89 | struct nvmet_port *port; |
90 | struct ib_cq *cq; |
91 | atomic_t sq_wr_avail; |
92 | struct nvmet_rdma_device *dev; |
93 | struct nvmet_rdma_srq *nsrq; |
94 | spinlock_t state_lock; |
95 | enum nvmet_rdma_queue_state state; |
96 | struct nvmet_cq nvme_cq; |
97 | struct nvmet_sq nvme_sq; |
98 | |
99 | struct nvmet_rdma_rsp *rsps; |
100 | struct list_head free_rsps; |
101 | spinlock_t rsps_lock; |
102 | struct nvmet_rdma_cmd *cmds; |
103 | |
104 | struct work_struct release_work; |
105 | struct list_head rsp_wait_list; |
106 | struct list_head rsp_wr_wait_list; |
107 | spinlock_t rsp_wr_wait_lock; |
108 | |
109 | int idx; |
110 | int host_qid; |
111 | int comp_vector; |
112 | int recv_queue_size; |
113 | int send_queue_size; |
114 | |
115 | struct list_head queue_list; |
116 | }; |
117 | |
118 | struct nvmet_rdma_port { |
119 | struct nvmet_port *nport; |
120 | struct sockaddr_storage addr; |
121 | struct rdma_cm_id *cm_id; |
122 | struct delayed_work repair_work; |
123 | }; |
124 | |
125 | struct nvmet_rdma_srq { |
126 | struct ib_srq *srq; |
127 | struct nvmet_rdma_cmd *cmds; |
128 | struct nvmet_rdma_device *ndev; |
129 | }; |
130 | |
131 | struct nvmet_rdma_device { |
132 | struct ib_device *device; |
133 | struct ib_pd *pd; |
134 | struct nvmet_rdma_srq **srqs; |
135 | int srq_count; |
136 | size_t srq_size; |
137 | struct kref ref; |
138 | struct list_head entry; |
139 | int inline_data_size; |
140 | int inline_page_count; |
141 | }; |
142 | |
143 | static bool nvmet_rdma_use_srq; |
144 | module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444); |
145 | MODULE_PARM_DESC(use_srq, "Use shared receive queue." ); |
146 | |
147 | static int srq_size_set(const char *val, const struct kernel_param *kp); |
148 | static const struct kernel_param_ops srq_size_ops = { |
149 | .set = srq_size_set, |
150 | .get = param_get_int, |
151 | }; |
152 | |
153 | static int nvmet_rdma_srq_size = 1024; |
154 | module_param_cb(srq_size, &srq_size_ops, &nvmet_rdma_srq_size, 0644); |
155 | MODULE_PARM_DESC(srq_size, "set Shared Receive Queue (SRQ) size, should >= 256 (default: 1024)" ); |
156 | |
157 | static DEFINE_IDA(nvmet_rdma_queue_ida); |
158 | static LIST_HEAD(nvmet_rdma_queue_list); |
159 | static DEFINE_MUTEX(nvmet_rdma_queue_mutex); |
160 | |
161 | static LIST_HEAD(device_list); |
162 | static DEFINE_MUTEX(device_list_mutex); |
163 | |
164 | static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp); |
165 | static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc); |
166 | static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); |
167 | static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc); |
168 | static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc); |
169 | static void nvmet_rdma_qp_event(struct ib_event *event, void *priv); |
170 | static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); |
171 | static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, |
172 | struct nvmet_rdma_rsp *r); |
173 | static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, |
174 | struct nvmet_rdma_rsp *r); |
175 | |
176 | static const struct nvmet_fabrics_ops nvmet_rdma_ops; |
177 | |
178 | static int srq_size_set(const char *val, const struct kernel_param *kp) |
179 | { |
180 | int n = 0, ret; |
181 | |
182 | ret = kstrtoint(s: val, base: 10, res: &n); |
183 | if (ret != 0 || n < 256) |
184 | return -EINVAL; |
185 | |
186 | return param_set_int(val, kp); |
187 | } |
188 | |
189 | static int num_pages(int len) |
190 | { |
191 | return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT); |
192 | } |
193 | |
194 | static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp) |
195 | { |
196 | return nvme_is_write(cmd: rsp->req.cmd) && |
197 | rsp->req.transfer_len && |
198 | !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); |
199 | } |
200 | |
201 | static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp) |
202 | { |
203 | return !nvme_is_write(cmd: rsp->req.cmd) && |
204 | rsp->req.transfer_len && |
205 | !rsp->req.cqe->status && |
206 | !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); |
207 | } |
208 | |
209 | static inline struct nvmet_rdma_rsp * |
210 | nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) |
211 | { |
212 | struct nvmet_rdma_rsp *rsp; |
213 | unsigned long flags; |
214 | |
215 | spin_lock_irqsave(&queue->rsps_lock, flags); |
216 | rsp = list_first_entry_or_null(&queue->free_rsps, |
217 | struct nvmet_rdma_rsp, free_list); |
218 | if (likely(rsp)) |
219 | list_del(entry: &rsp->free_list); |
220 | spin_unlock_irqrestore(lock: &queue->rsps_lock, flags); |
221 | |
222 | if (unlikely(!rsp)) { |
223 | int ret; |
224 | |
225 | rsp = kzalloc(size: sizeof(*rsp), GFP_KERNEL); |
226 | if (unlikely(!rsp)) |
227 | return NULL; |
228 | ret = nvmet_rdma_alloc_rsp(ndev: queue->dev, r: rsp); |
229 | if (unlikely(ret)) { |
230 | kfree(objp: rsp); |
231 | return NULL; |
232 | } |
233 | |
234 | rsp->allocated = true; |
235 | } |
236 | |
237 | return rsp; |
238 | } |
239 | |
240 | static inline void |
241 | nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) |
242 | { |
243 | unsigned long flags; |
244 | |
245 | if (unlikely(rsp->allocated)) { |
246 | nvmet_rdma_free_rsp(ndev: rsp->queue->dev, r: rsp); |
247 | kfree(objp: rsp); |
248 | return; |
249 | } |
250 | |
251 | spin_lock_irqsave(&rsp->queue->rsps_lock, flags); |
252 | list_add_tail(new: &rsp->free_list, head: &rsp->queue->free_rsps); |
253 | spin_unlock_irqrestore(lock: &rsp->queue->rsps_lock, flags); |
254 | } |
255 | |
256 | static void nvmet_rdma_free_inline_pages(struct nvmet_rdma_device *ndev, |
257 | struct nvmet_rdma_cmd *c) |
258 | { |
259 | struct scatterlist *sg; |
260 | struct ib_sge *sge; |
261 | int i; |
262 | |
263 | if (!ndev->inline_data_size) |
264 | return; |
265 | |
266 | sg = c->inline_sg; |
267 | sge = &c->sge[1]; |
268 | |
269 | for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) { |
270 | if (sge->length) |
271 | ib_dma_unmap_page(dev: ndev->device, addr: sge->addr, |
272 | size: sge->length, direction: DMA_FROM_DEVICE); |
273 | if (sg_page(sg)) |
274 | __free_page(sg_page(sg)); |
275 | } |
276 | } |
277 | |
278 | static int nvmet_rdma_alloc_inline_pages(struct nvmet_rdma_device *ndev, |
279 | struct nvmet_rdma_cmd *c) |
280 | { |
281 | struct scatterlist *sg; |
282 | struct ib_sge *sge; |
283 | struct page *pg; |
284 | int len; |
285 | int i; |
286 | |
287 | if (!ndev->inline_data_size) |
288 | return 0; |
289 | |
290 | sg = c->inline_sg; |
291 | sg_init_table(sg, ndev->inline_page_count); |
292 | sge = &c->sge[1]; |
293 | len = ndev->inline_data_size; |
294 | |
295 | for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) { |
296 | pg = alloc_page(GFP_KERNEL); |
297 | if (!pg) |
298 | goto out_err; |
299 | sg_assign_page(sg, page: pg); |
300 | sge->addr = ib_dma_map_page(dev: ndev->device, |
301 | page: pg, offset: 0, PAGE_SIZE, direction: DMA_FROM_DEVICE); |
302 | if (ib_dma_mapping_error(dev: ndev->device, dma_addr: sge->addr)) |
303 | goto out_err; |
304 | sge->length = min_t(int, len, PAGE_SIZE); |
305 | sge->lkey = ndev->pd->local_dma_lkey; |
306 | len -= sge->length; |
307 | } |
308 | |
309 | return 0; |
310 | out_err: |
311 | for (; i >= 0; i--, sg--, sge--) { |
312 | if (sge->length) |
313 | ib_dma_unmap_page(dev: ndev->device, addr: sge->addr, |
314 | size: sge->length, direction: DMA_FROM_DEVICE); |
315 | if (sg_page(sg)) |
316 | __free_page(sg_page(sg)); |
317 | } |
318 | return -ENOMEM; |
319 | } |
320 | |
321 | static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev, |
322 | struct nvmet_rdma_cmd *c, bool admin) |
323 | { |
324 | /* NVMe command / RDMA RECV */ |
325 | c->nvme_cmd = kmalloc(size: sizeof(*c->nvme_cmd), GFP_KERNEL); |
326 | if (!c->nvme_cmd) |
327 | goto out; |
328 | |
329 | c->sge[0].addr = ib_dma_map_single(dev: ndev->device, cpu_addr: c->nvme_cmd, |
330 | size: sizeof(*c->nvme_cmd), direction: DMA_FROM_DEVICE); |
331 | if (ib_dma_mapping_error(dev: ndev->device, dma_addr: c->sge[0].addr)) |
332 | goto out_free_cmd; |
333 | |
334 | c->sge[0].length = sizeof(*c->nvme_cmd); |
335 | c->sge[0].lkey = ndev->pd->local_dma_lkey; |
336 | |
337 | if (!admin && nvmet_rdma_alloc_inline_pages(ndev, c)) |
338 | goto out_unmap_cmd; |
339 | |
340 | c->cqe.done = nvmet_rdma_recv_done; |
341 | |
342 | c->wr.wr_cqe = &c->cqe; |
343 | c->wr.sg_list = c->sge; |
344 | c->wr.num_sge = admin ? 1 : ndev->inline_page_count + 1; |
345 | |
346 | return 0; |
347 | |
348 | out_unmap_cmd: |
349 | ib_dma_unmap_single(dev: ndev->device, addr: c->sge[0].addr, |
350 | size: sizeof(*c->nvme_cmd), direction: DMA_FROM_DEVICE); |
351 | out_free_cmd: |
352 | kfree(objp: c->nvme_cmd); |
353 | |
354 | out: |
355 | return -ENOMEM; |
356 | } |
357 | |
358 | static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev, |
359 | struct nvmet_rdma_cmd *c, bool admin) |
360 | { |
361 | if (!admin) |
362 | nvmet_rdma_free_inline_pages(ndev, c); |
363 | ib_dma_unmap_single(dev: ndev->device, addr: c->sge[0].addr, |
364 | size: sizeof(*c->nvme_cmd), direction: DMA_FROM_DEVICE); |
365 | kfree(objp: c->nvme_cmd); |
366 | } |
367 | |
368 | static struct nvmet_rdma_cmd * |
369 | nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev, |
370 | int nr_cmds, bool admin) |
371 | { |
372 | struct nvmet_rdma_cmd *cmds; |
373 | int ret = -EINVAL, i; |
374 | |
375 | cmds = kcalloc(n: nr_cmds, size: sizeof(struct nvmet_rdma_cmd), GFP_KERNEL); |
376 | if (!cmds) |
377 | goto out; |
378 | |
379 | for (i = 0; i < nr_cmds; i++) { |
380 | ret = nvmet_rdma_alloc_cmd(ndev, c: cmds + i, admin); |
381 | if (ret) |
382 | goto out_free; |
383 | } |
384 | |
385 | return cmds; |
386 | |
387 | out_free: |
388 | while (--i >= 0) |
389 | nvmet_rdma_free_cmd(ndev, c: cmds + i, admin); |
390 | kfree(objp: cmds); |
391 | out: |
392 | return ERR_PTR(error: ret); |
393 | } |
394 | |
395 | static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev, |
396 | struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin) |
397 | { |
398 | int i; |
399 | |
400 | for (i = 0; i < nr_cmds; i++) |
401 | nvmet_rdma_free_cmd(ndev, c: cmds + i, admin); |
402 | kfree(objp: cmds); |
403 | } |
404 | |
405 | static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, |
406 | struct nvmet_rdma_rsp *r) |
407 | { |
408 | /* NVMe CQE / RDMA SEND */ |
409 | r->req.cqe = kmalloc(size: sizeof(*r->req.cqe), GFP_KERNEL); |
410 | if (!r->req.cqe) |
411 | goto out; |
412 | |
413 | r->send_sge.addr = ib_dma_map_single(dev: ndev->device, cpu_addr: r->req.cqe, |
414 | size: sizeof(*r->req.cqe), direction: DMA_TO_DEVICE); |
415 | if (ib_dma_mapping_error(dev: ndev->device, dma_addr: r->send_sge.addr)) |
416 | goto out_free_rsp; |
417 | |
418 | if (ib_dma_pci_p2p_dma_supported(dev: ndev->device)) |
419 | r->req.p2p_client = &ndev->device->dev; |
420 | r->send_sge.length = sizeof(*r->req.cqe); |
421 | r->send_sge.lkey = ndev->pd->local_dma_lkey; |
422 | |
423 | r->send_cqe.done = nvmet_rdma_send_done; |
424 | |
425 | r->send_wr.wr_cqe = &r->send_cqe; |
426 | r->send_wr.sg_list = &r->send_sge; |
427 | r->send_wr.num_sge = 1; |
428 | r->send_wr.send_flags = IB_SEND_SIGNALED; |
429 | |
430 | /* Data In / RDMA READ */ |
431 | r->read_cqe.done = nvmet_rdma_read_data_done; |
432 | /* Data Out / RDMA WRITE */ |
433 | r->write_cqe.done = nvmet_rdma_write_data_done; |
434 | |
435 | return 0; |
436 | |
437 | out_free_rsp: |
438 | kfree(objp: r->req.cqe); |
439 | out: |
440 | return -ENOMEM; |
441 | } |
442 | |
443 | static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, |
444 | struct nvmet_rdma_rsp *r) |
445 | { |
446 | ib_dma_unmap_single(dev: ndev->device, addr: r->send_sge.addr, |
447 | size: sizeof(*r->req.cqe), direction: DMA_TO_DEVICE); |
448 | kfree(objp: r->req.cqe); |
449 | } |
450 | |
451 | static int |
452 | nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) |
453 | { |
454 | struct nvmet_rdma_device *ndev = queue->dev; |
455 | int nr_rsps = queue->recv_queue_size * 2; |
456 | int ret = -EINVAL, i; |
457 | |
458 | queue->rsps = kcalloc(n: nr_rsps, size: sizeof(struct nvmet_rdma_rsp), |
459 | GFP_KERNEL); |
460 | if (!queue->rsps) |
461 | goto out; |
462 | |
463 | for (i = 0; i < nr_rsps; i++) { |
464 | struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; |
465 | |
466 | ret = nvmet_rdma_alloc_rsp(ndev, r: rsp); |
467 | if (ret) |
468 | goto out_free; |
469 | |
470 | list_add_tail(new: &rsp->free_list, head: &queue->free_rsps); |
471 | } |
472 | |
473 | return 0; |
474 | |
475 | out_free: |
476 | while (--i >= 0) { |
477 | struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; |
478 | |
479 | list_del(entry: &rsp->free_list); |
480 | nvmet_rdma_free_rsp(ndev, r: rsp); |
481 | } |
482 | kfree(objp: queue->rsps); |
483 | out: |
484 | return ret; |
485 | } |
486 | |
487 | static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue) |
488 | { |
489 | struct nvmet_rdma_device *ndev = queue->dev; |
490 | int i, nr_rsps = queue->recv_queue_size * 2; |
491 | |
492 | for (i = 0; i < nr_rsps; i++) { |
493 | struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; |
494 | |
495 | list_del(entry: &rsp->free_list); |
496 | nvmet_rdma_free_rsp(ndev, r: rsp); |
497 | } |
498 | kfree(objp: queue->rsps); |
499 | } |
500 | |
501 | static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev, |
502 | struct nvmet_rdma_cmd *cmd) |
503 | { |
504 | int ret; |
505 | |
506 | ib_dma_sync_single_for_device(dev: ndev->device, |
507 | addr: cmd->sge[0].addr, size: cmd->sge[0].length, |
508 | dir: DMA_FROM_DEVICE); |
509 | |
510 | if (cmd->nsrq) |
511 | ret = ib_post_srq_recv(srq: cmd->nsrq->srq, recv_wr: &cmd->wr, NULL); |
512 | else |
513 | ret = ib_post_recv(qp: cmd->queue->qp, recv_wr: &cmd->wr, NULL); |
514 | |
515 | if (unlikely(ret)) |
516 | pr_err("post_recv cmd failed\n" ); |
517 | |
518 | return ret; |
519 | } |
520 | |
521 | static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue) |
522 | { |
523 | spin_lock(lock: &queue->rsp_wr_wait_lock); |
524 | while (!list_empty(head: &queue->rsp_wr_wait_list)) { |
525 | struct nvmet_rdma_rsp *rsp; |
526 | bool ret; |
527 | |
528 | rsp = list_entry(queue->rsp_wr_wait_list.next, |
529 | struct nvmet_rdma_rsp, wait_list); |
530 | list_del(entry: &rsp->wait_list); |
531 | |
532 | spin_unlock(lock: &queue->rsp_wr_wait_lock); |
533 | ret = nvmet_rdma_execute_command(rsp); |
534 | spin_lock(lock: &queue->rsp_wr_wait_lock); |
535 | |
536 | if (!ret) { |
537 | list_add(new: &rsp->wait_list, head: &queue->rsp_wr_wait_list); |
538 | break; |
539 | } |
540 | } |
541 | spin_unlock(lock: &queue->rsp_wr_wait_lock); |
542 | } |
543 | |
544 | static u16 nvmet_rdma_check_pi_status(struct ib_mr *sig_mr) |
545 | { |
546 | struct ib_mr_status mr_status; |
547 | int ret; |
548 | u16 status = 0; |
549 | |
550 | ret = ib_check_mr_status(mr: sig_mr, check_mask: IB_MR_CHECK_SIG_STATUS, mr_status: &mr_status); |
551 | if (ret) { |
552 | pr_err("ib_check_mr_status failed, ret %d\n" , ret); |
553 | return NVME_SC_INVALID_PI; |
554 | } |
555 | |
556 | if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { |
557 | switch (mr_status.sig_err.err_type) { |
558 | case IB_SIG_BAD_GUARD: |
559 | status = NVME_SC_GUARD_CHECK; |
560 | break; |
561 | case IB_SIG_BAD_REFTAG: |
562 | status = NVME_SC_REFTAG_CHECK; |
563 | break; |
564 | case IB_SIG_BAD_APPTAG: |
565 | status = NVME_SC_APPTAG_CHECK; |
566 | break; |
567 | } |
568 | pr_err("PI error found type %d expected 0x%x vs actual 0x%x\n" , |
569 | mr_status.sig_err.err_type, |
570 | mr_status.sig_err.expected, |
571 | mr_status.sig_err.actual); |
572 | } |
573 | |
574 | return status; |
575 | } |
576 | |
577 | static void nvmet_rdma_set_sig_domain(struct blk_integrity *bi, |
578 | struct nvme_command *cmd, struct ib_sig_domain *domain, |
579 | u16 control, u8 pi_type) |
580 | { |
581 | domain->sig_type = IB_SIG_TYPE_T10_DIF; |
582 | domain->sig.dif.bg_type = IB_T10DIF_CRC; |
583 | domain->sig.dif.pi_interval = 1 << bi->interval_exp; |
584 | domain->sig.dif.ref_tag = le32_to_cpu(cmd->rw.reftag); |
585 | if (control & NVME_RW_PRINFO_PRCHK_REF) |
586 | domain->sig.dif.ref_remap = true; |
587 | |
588 | domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag); |
589 | domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask); |
590 | domain->sig.dif.app_escape = true; |
591 | if (pi_type == NVME_NS_DPS_PI_TYPE3) |
592 | domain->sig.dif.ref_escape = true; |
593 | } |
594 | |
595 | static void nvmet_rdma_set_sig_attrs(struct nvmet_req *req, |
596 | struct ib_sig_attrs *sig_attrs) |
597 | { |
598 | struct nvme_command *cmd = req->cmd; |
599 | u16 control = le16_to_cpu(cmd->rw.control); |
600 | u8 pi_type = req->ns->pi_type; |
601 | struct blk_integrity *bi; |
602 | |
603 | bi = bdev_get_integrity(bdev: req->ns->bdev); |
604 | |
605 | memset(sig_attrs, 0, sizeof(*sig_attrs)); |
606 | |
607 | if (control & NVME_RW_PRINFO_PRACT) { |
608 | /* for WRITE_INSERT/READ_STRIP no wire domain */ |
609 | sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE; |
610 | nvmet_rdma_set_sig_domain(bi, cmd, domain: &sig_attrs->mem, control, |
611 | pi_type); |
612 | /* Clear the PRACT bit since HCA will generate/verify the PI */ |
613 | control &= ~NVME_RW_PRINFO_PRACT; |
614 | cmd->rw.control = cpu_to_le16(control); |
615 | /* PI is added by the HW */ |
616 | req->transfer_len += req->metadata_len; |
617 | } else { |
618 | /* for WRITE_PASS/READ_PASS both wire/memory domains exist */ |
619 | nvmet_rdma_set_sig_domain(bi, cmd, domain: &sig_attrs->wire, control, |
620 | pi_type); |
621 | nvmet_rdma_set_sig_domain(bi, cmd, domain: &sig_attrs->mem, control, |
622 | pi_type); |
623 | } |
624 | |
625 | if (control & NVME_RW_PRINFO_PRCHK_REF) |
626 | sig_attrs->check_mask |= IB_SIG_CHECK_REFTAG; |
627 | if (control & NVME_RW_PRINFO_PRCHK_GUARD) |
628 | sig_attrs->check_mask |= IB_SIG_CHECK_GUARD; |
629 | if (control & NVME_RW_PRINFO_PRCHK_APP) |
630 | sig_attrs->check_mask |= IB_SIG_CHECK_APPTAG; |
631 | } |
632 | |
633 | static int nvmet_rdma_rw_ctx_init(struct nvmet_rdma_rsp *rsp, u64 addr, u32 key, |
634 | struct ib_sig_attrs *sig_attrs) |
635 | { |
636 | struct rdma_cm_id *cm_id = rsp->queue->cm_id; |
637 | struct nvmet_req *req = &rsp->req; |
638 | int ret; |
639 | |
640 | if (req->metadata_len) |
641 | ret = rdma_rw_ctx_signature_init(ctx: &rsp->rw, qp: cm_id->qp, |
642 | port_num: cm_id->port_num, sg: req->sg, sg_cnt: req->sg_cnt, |
643 | prot_sg: req->metadata_sg, prot_sg_cnt: req->metadata_sg_cnt, sig_attrs, |
644 | remote_addr: addr, rkey: key, dir: nvmet_data_dir(req)); |
645 | else |
646 | ret = rdma_rw_ctx_init(ctx: &rsp->rw, qp: cm_id->qp, port_num: cm_id->port_num, |
647 | sg: req->sg, sg_cnt: req->sg_cnt, sg_offset: 0, remote_addr: addr, rkey: key, |
648 | dir: nvmet_data_dir(req)); |
649 | |
650 | return ret; |
651 | } |
652 | |
653 | static void nvmet_rdma_rw_ctx_destroy(struct nvmet_rdma_rsp *rsp) |
654 | { |
655 | struct rdma_cm_id *cm_id = rsp->queue->cm_id; |
656 | struct nvmet_req *req = &rsp->req; |
657 | |
658 | if (req->metadata_len) |
659 | rdma_rw_ctx_destroy_signature(ctx: &rsp->rw, qp: cm_id->qp, |
660 | port_num: cm_id->port_num, sg: req->sg, sg_cnt: req->sg_cnt, |
661 | prot_sg: req->metadata_sg, prot_sg_cnt: req->metadata_sg_cnt, |
662 | dir: nvmet_data_dir(req)); |
663 | else |
664 | rdma_rw_ctx_destroy(ctx: &rsp->rw, qp: cm_id->qp, port_num: cm_id->port_num, |
665 | sg: req->sg, sg_cnt: req->sg_cnt, dir: nvmet_data_dir(req)); |
666 | } |
667 | |
668 | static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp) |
669 | { |
670 | struct nvmet_rdma_queue *queue = rsp->queue; |
671 | |
672 | atomic_add(i: 1 + rsp->n_rdma, v: &queue->sq_wr_avail); |
673 | |
674 | if (rsp->n_rdma) |
675 | nvmet_rdma_rw_ctx_destroy(rsp); |
676 | |
677 | if (rsp->req.sg != rsp->cmd->inline_sg) |
678 | nvmet_req_free_sgls(req: &rsp->req); |
679 | |
680 | if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list))) |
681 | nvmet_rdma_process_wr_wait_list(queue); |
682 | |
683 | nvmet_rdma_put_rsp(rsp); |
684 | } |
685 | |
686 | static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue) |
687 | { |
688 | if (queue->nvme_sq.ctrl) { |
689 | nvmet_ctrl_fatal_error(ctrl: queue->nvme_sq.ctrl); |
690 | } else { |
691 | /* |
692 | * we didn't setup the controller yet in case |
693 | * of admin connect error, just disconnect and |
694 | * cleanup the queue |
695 | */ |
696 | nvmet_rdma_queue_disconnect(queue); |
697 | } |
698 | } |
699 | |
700 | static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) |
701 | { |
702 | struct nvmet_rdma_rsp *rsp = |
703 | container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe); |
704 | struct nvmet_rdma_queue *queue = wc->qp->qp_context; |
705 | |
706 | nvmet_rdma_release_rsp(rsp); |
707 | |
708 | if (unlikely(wc->status != IB_WC_SUCCESS && |
709 | wc->status != IB_WC_WR_FLUSH_ERR)) { |
710 | pr_err("SEND for CQE 0x%p failed with status %s (%d).\n" , |
711 | wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); |
712 | nvmet_rdma_error_comp(queue); |
713 | } |
714 | } |
715 | |
716 | static void nvmet_rdma_queue_response(struct nvmet_req *req) |
717 | { |
718 | struct nvmet_rdma_rsp *rsp = |
719 | container_of(req, struct nvmet_rdma_rsp, req); |
720 | struct rdma_cm_id *cm_id = rsp->queue->cm_id; |
721 | struct ib_send_wr *first_wr; |
722 | |
723 | if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) { |
724 | rsp->send_wr.opcode = IB_WR_SEND_WITH_INV; |
725 | rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey; |
726 | } else { |
727 | rsp->send_wr.opcode = IB_WR_SEND; |
728 | } |
729 | |
730 | if (nvmet_rdma_need_data_out(rsp)) { |
731 | if (rsp->req.metadata_len) |
732 | first_wr = rdma_rw_ctx_wrs(ctx: &rsp->rw, qp: cm_id->qp, |
733 | port_num: cm_id->port_num, cqe: &rsp->write_cqe, NULL); |
734 | else |
735 | first_wr = rdma_rw_ctx_wrs(ctx: &rsp->rw, qp: cm_id->qp, |
736 | port_num: cm_id->port_num, NULL, chain_wr: &rsp->send_wr); |
737 | } else { |
738 | first_wr = &rsp->send_wr; |
739 | } |
740 | |
741 | nvmet_rdma_post_recv(ndev: rsp->queue->dev, cmd: rsp->cmd); |
742 | |
743 | ib_dma_sync_single_for_device(dev: rsp->queue->dev->device, |
744 | addr: rsp->send_sge.addr, size: rsp->send_sge.length, |
745 | dir: DMA_TO_DEVICE); |
746 | |
747 | if (unlikely(ib_post_send(cm_id->qp, first_wr, NULL))) { |
748 | pr_err("sending cmd response failed\n" ); |
749 | nvmet_rdma_release_rsp(rsp); |
750 | } |
751 | } |
752 | |
753 | static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc) |
754 | { |
755 | struct nvmet_rdma_rsp *rsp = |
756 | container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe); |
757 | struct nvmet_rdma_queue *queue = wc->qp->qp_context; |
758 | u16 status = 0; |
759 | |
760 | WARN_ON(rsp->n_rdma <= 0); |
761 | atomic_add(i: rsp->n_rdma, v: &queue->sq_wr_avail); |
762 | rsp->n_rdma = 0; |
763 | |
764 | if (unlikely(wc->status != IB_WC_SUCCESS)) { |
765 | nvmet_rdma_rw_ctx_destroy(rsp); |
766 | nvmet_req_uninit(req: &rsp->req); |
767 | nvmet_rdma_release_rsp(rsp); |
768 | if (wc->status != IB_WC_WR_FLUSH_ERR) { |
769 | pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n" , |
770 | wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); |
771 | nvmet_rdma_error_comp(queue); |
772 | } |
773 | return; |
774 | } |
775 | |
776 | if (rsp->req.metadata_len) |
777 | status = nvmet_rdma_check_pi_status(sig_mr: rsp->rw.reg->mr); |
778 | nvmet_rdma_rw_ctx_destroy(rsp); |
779 | |
780 | if (unlikely(status)) |
781 | nvmet_req_complete(req: &rsp->req, status); |
782 | else |
783 | rsp->req.execute(&rsp->req); |
784 | } |
785 | |
786 | static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc) |
787 | { |
788 | struct nvmet_rdma_rsp *rsp = |
789 | container_of(wc->wr_cqe, struct nvmet_rdma_rsp, write_cqe); |
790 | struct nvmet_rdma_queue *queue = wc->qp->qp_context; |
791 | struct rdma_cm_id *cm_id = rsp->queue->cm_id; |
792 | u16 status; |
793 | |
794 | if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) |
795 | return; |
796 | |
797 | WARN_ON(rsp->n_rdma <= 0); |
798 | atomic_add(i: rsp->n_rdma, v: &queue->sq_wr_avail); |
799 | rsp->n_rdma = 0; |
800 | |
801 | if (unlikely(wc->status != IB_WC_SUCCESS)) { |
802 | nvmet_rdma_rw_ctx_destroy(rsp); |
803 | nvmet_req_uninit(req: &rsp->req); |
804 | nvmet_rdma_release_rsp(rsp); |
805 | if (wc->status != IB_WC_WR_FLUSH_ERR) { |
806 | pr_info("RDMA WRITE for CQE failed with status %s (%d).\n" , |
807 | ib_wc_status_msg(wc->status), wc->status); |
808 | nvmet_rdma_error_comp(queue); |
809 | } |
810 | return; |
811 | } |
812 | |
813 | /* |
814 | * Upon RDMA completion check the signature status |
815 | * - if succeeded send good NVMe response |
816 | * - if failed send bad NVMe response with appropriate error |
817 | */ |
818 | status = nvmet_rdma_check_pi_status(sig_mr: rsp->rw.reg->mr); |
819 | if (unlikely(status)) |
820 | rsp->req.cqe->status = cpu_to_le16(status << 1); |
821 | nvmet_rdma_rw_ctx_destroy(rsp); |
822 | |
823 | if (unlikely(ib_post_send(cm_id->qp, &rsp->send_wr, NULL))) { |
824 | pr_err("sending cmd response failed\n" ); |
825 | nvmet_rdma_release_rsp(rsp); |
826 | } |
827 | } |
828 | |
829 | static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len, |
830 | u64 off) |
831 | { |
832 | int sg_count = num_pages(len); |
833 | struct scatterlist *sg; |
834 | int i; |
835 | |
836 | sg = rsp->cmd->inline_sg; |
837 | for (i = 0; i < sg_count; i++, sg++) { |
838 | if (i < sg_count - 1) |
839 | sg_unmark_end(sg); |
840 | else |
841 | sg_mark_end(sg); |
842 | sg->offset = off; |
843 | sg->length = min_t(int, len, PAGE_SIZE - off); |
844 | len -= sg->length; |
845 | if (!i) |
846 | off = 0; |
847 | } |
848 | |
849 | rsp->req.sg = rsp->cmd->inline_sg; |
850 | rsp->req.sg_cnt = sg_count; |
851 | } |
852 | |
853 | static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp) |
854 | { |
855 | struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl; |
856 | u64 off = le64_to_cpu(sgl->addr); |
857 | u32 len = le32_to_cpu(sgl->length); |
858 | |
859 | if (!nvme_is_write(cmd: rsp->req.cmd)) { |
860 | rsp->req.error_loc = |
861 | offsetof(struct nvme_common_command, opcode); |
862 | return NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
863 | } |
864 | |
865 | if (off + len > rsp->queue->dev->inline_data_size) { |
866 | pr_err("invalid inline data offset!\n" ); |
867 | return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR; |
868 | } |
869 | |
870 | /* no data command? */ |
871 | if (!len) |
872 | return 0; |
873 | |
874 | nvmet_rdma_use_inline_sg(rsp, len, off); |
875 | rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA; |
876 | rsp->req.transfer_len += len; |
877 | return 0; |
878 | } |
879 | |
880 | static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp, |
881 | struct nvme_keyed_sgl_desc *sgl, bool invalidate) |
882 | { |
883 | u64 addr = le64_to_cpu(sgl->addr); |
884 | u32 key = get_unaligned_le32(p: sgl->key); |
885 | struct ib_sig_attrs sig_attrs; |
886 | int ret; |
887 | |
888 | rsp->req.transfer_len = get_unaligned_le24(p: sgl->length); |
889 | |
890 | /* no data command? */ |
891 | if (!rsp->req.transfer_len) |
892 | return 0; |
893 | |
894 | if (rsp->req.metadata_len) |
895 | nvmet_rdma_set_sig_attrs(req: &rsp->req, sig_attrs: &sig_attrs); |
896 | |
897 | ret = nvmet_req_alloc_sgls(req: &rsp->req); |
898 | if (unlikely(ret < 0)) |
899 | goto error_out; |
900 | |
901 | ret = nvmet_rdma_rw_ctx_init(rsp, addr, key, sig_attrs: &sig_attrs); |
902 | if (unlikely(ret < 0)) |
903 | goto error_out; |
904 | rsp->n_rdma += ret; |
905 | |
906 | if (invalidate) { |
907 | rsp->invalidate_rkey = key; |
908 | rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY; |
909 | } |
910 | |
911 | return 0; |
912 | |
913 | error_out: |
914 | rsp->req.transfer_len = 0; |
915 | return NVME_SC_INTERNAL; |
916 | } |
917 | |
918 | static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp) |
919 | { |
920 | struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl; |
921 | |
922 | switch (sgl->type >> 4) { |
923 | case NVME_SGL_FMT_DATA_DESC: |
924 | switch (sgl->type & 0xf) { |
925 | case NVME_SGL_FMT_OFFSET: |
926 | return nvmet_rdma_map_sgl_inline(rsp); |
927 | default: |
928 | pr_err("invalid SGL subtype: %#x\n" , sgl->type); |
929 | rsp->req.error_loc = |
930 | offsetof(struct nvme_common_command, dptr); |
931 | return NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
932 | } |
933 | case NVME_KEY_SGL_FMT_DATA_DESC: |
934 | switch (sgl->type & 0xf) { |
935 | case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE: |
936 | return nvmet_rdma_map_sgl_keyed(rsp, sgl, invalidate: true); |
937 | case NVME_SGL_FMT_ADDRESS: |
938 | return nvmet_rdma_map_sgl_keyed(rsp, sgl, invalidate: false); |
939 | default: |
940 | pr_err("invalid SGL subtype: %#x\n" , sgl->type); |
941 | rsp->req.error_loc = |
942 | offsetof(struct nvme_common_command, dptr); |
943 | return NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
944 | } |
945 | default: |
946 | pr_err("invalid SGL type: %#x\n" , sgl->type); |
947 | rsp->req.error_loc = offsetof(struct nvme_common_command, dptr); |
948 | return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR; |
949 | } |
950 | } |
951 | |
952 | static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp) |
953 | { |
954 | struct nvmet_rdma_queue *queue = rsp->queue; |
955 | |
956 | if (unlikely(atomic_sub_return(1 + rsp->n_rdma, |
957 | &queue->sq_wr_avail) < 0)) { |
958 | pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n" , |
959 | 1 + rsp->n_rdma, queue->idx, |
960 | queue->nvme_sq.ctrl->cntlid); |
961 | atomic_add(i: 1 + rsp->n_rdma, v: &queue->sq_wr_avail); |
962 | return false; |
963 | } |
964 | |
965 | if (nvmet_rdma_need_data_in(rsp)) { |
966 | if (rdma_rw_ctx_post(ctx: &rsp->rw, qp: queue->qp, |
967 | port_num: queue->cm_id->port_num, cqe: &rsp->read_cqe, NULL)) |
968 | nvmet_req_complete(req: &rsp->req, status: NVME_SC_DATA_XFER_ERROR); |
969 | } else { |
970 | rsp->req.execute(&rsp->req); |
971 | } |
972 | |
973 | return true; |
974 | } |
975 | |
976 | static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, |
977 | struct nvmet_rdma_rsp *cmd) |
978 | { |
979 | u16 status; |
980 | |
981 | ib_dma_sync_single_for_cpu(dev: queue->dev->device, |
982 | addr: cmd->cmd->sge[0].addr, size: cmd->cmd->sge[0].length, |
983 | dir: DMA_FROM_DEVICE); |
984 | ib_dma_sync_single_for_cpu(dev: queue->dev->device, |
985 | addr: cmd->send_sge.addr, size: cmd->send_sge.length, |
986 | dir: DMA_TO_DEVICE); |
987 | |
988 | if (!nvmet_req_init(req: &cmd->req, cq: &queue->nvme_cq, |
989 | sq: &queue->nvme_sq, ops: &nvmet_rdma_ops)) |
990 | return; |
991 | |
992 | status = nvmet_rdma_map_sgl(rsp: cmd); |
993 | if (status) |
994 | goto out_err; |
995 | |
996 | if (unlikely(!nvmet_rdma_execute_command(cmd))) { |
997 | spin_lock(lock: &queue->rsp_wr_wait_lock); |
998 | list_add_tail(new: &cmd->wait_list, head: &queue->rsp_wr_wait_list); |
999 | spin_unlock(lock: &queue->rsp_wr_wait_lock); |
1000 | } |
1001 | |
1002 | return; |
1003 | |
1004 | out_err: |
1005 | nvmet_req_complete(req: &cmd->req, status); |
1006 | } |
1007 | |
1008 | static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) |
1009 | { |
1010 | struct nvmet_rdma_cmd *cmd = |
1011 | container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe); |
1012 | struct nvmet_rdma_queue *queue = wc->qp->qp_context; |
1013 | struct nvmet_rdma_rsp *rsp; |
1014 | |
1015 | if (unlikely(wc->status != IB_WC_SUCCESS)) { |
1016 | if (wc->status != IB_WC_WR_FLUSH_ERR) { |
1017 | pr_err("RECV for CQE 0x%p failed with status %s (%d)\n" , |
1018 | wc->wr_cqe, ib_wc_status_msg(wc->status), |
1019 | wc->status); |
1020 | nvmet_rdma_error_comp(queue); |
1021 | } |
1022 | return; |
1023 | } |
1024 | |
1025 | if (unlikely(wc->byte_len < sizeof(struct nvme_command))) { |
1026 | pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n" ); |
1027 | nvmet_rdma_error_comp(queue); |
1028 | return; |
1029 | } |
1030 | |
1031 | cmd->queue = queue; |
1032 | rsp = nvmet_rdma_get_rsp(queue); |
1033 | if (unlikely(!rsp)) { |
1034 | /* |
1035 | * we get here only under memory pressure, |
1036 | * silently drop and have the host retry |
1037 | * as we can't even fail it. |
1038 | */ |
1039 | nvmet_rdma_post_recv(ndev: queue->dev, cmd); |
1040 | return; |
1041 | } |
1042 | rsp->queue = queue; |
1043 | rsp->cmd = cmd; |
1044 | rsp->flags = 0; |
1045 | rsp->req.cmd = cmd->nvme_cmd; |
1046 | rsp->req.port = queue->port; |
1047 | rsp->n_rdma = 0; |
1048 | |
1049 | if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { |
1050 | unsigned long flags; |
1051 | |
1052 | spin_lock_irqsave(&queue->state_lock, flags); |
1053 | if (queue->state == NVMET_RDMA_Q_CONNECTING) |
1054 | list_add_tail(new: &rsp->wait_list, head: &queue->rsp_wait_list); |
1055 | else |
1056 | nvmet_rdma_put_rsp(rsp); |
1057 | spin_unlock_irqrestore(lock: &queue->state_lock, flags); |
1058 | return; |
1059 | } |
1060 | |
1061 | nvmet_rdma_handle_command(queue, cmd: rsp); |
1062 | } |
1063 | |
1064 | static void nvmet_rdma_destroy_srq(struct nvmet_rdma_srq *nsrq) |
1065 | { |
1066 | nvmet_rdma_free_cmds(ndev: nsrq->ndev, cmds: nsrq->cmds, nr_cmds: nsrq->ndev->srq_size, |
1067 | admin: false); |
1068 | ib_destroy_srq(srq: nsrq->srq); |
1069 | |
1070 | kfree(objp: nsrq); |
1071 | } |
1072 | |
1073 | static void nvmet_rdma_destroy_srqs(struct nvmet_rdma_device *ndev) |
1074 | { |
1075 | int i; |
1076 | |
1077 | if (!ndev->srqs) |
1078 | return; |
1079 | |
1080 | for (i = 0; i < ndev->srq_count; i++) |
1081 | nvmet_rdma_destroy_srq(nsrq: ndev->srqs[i]); |
1082 | |
1083 | kfree(objp: ndev->srqs); |
1084 | } |
1085 | |
1086 | static struct nvmet_rdma_srq * |
1087 | nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev) |
1088 | { |
1089 | struct ib_srq_init_attr srq_attr = { NULL, }; |
1090 | size_t srq_size = ndev->srq_size; |
1091 | struct nvmet_rdma_srq *nsrq; |
1092 | struct ib_srq *srq; |
1093 | int ret, i; |
1094 | |
1095 | nsrq = kzalloc(size: sizeof(*nsrq), GFP_KERNEL); |
1096 | if (!nsrq) |
1097 | return ERR_PTR(error: -ENOMEM); |
1098 | |
1099 | srq_attr.attr.max_wr = srq_size; |
1100 | srq_attr.attr.max_sge = 1 + ndev->inline_page_count; |
1101 | srq_attr.attr.srq_limit = 0; |
1102 | srq_attr.srq_type = IB_SRQT_BASIC; |
1103 | srq = ib_create_srq(pd: ndev->pd, srq_init_attr: &srq_attr); |
1104 | if (IS_ERR(ptr: srq)) { |
1105 | ret = PTR_ERR(ptr: srq); |
1106 | goto out_free; |
1107 | } |
1108 | |
1109 | nsrq->cmds = nvmet_rdma_alloc_cmds(ndev, nr_cmds: srq_size, admin: false); |
1110 | if (IS_ERR(ptr: nsrq->cmds)) { |
1111 | ret = PTR_ERR(ptr: nsrq->cmds); |
1112 | goto out_destroy_srq; |
1113 | } |
1114 | |
1115 | nsrq->srq = srq; |
1116 | nsrq->ndev = ndev; |
1117 | |
1118 | for (i = 0; i < srq_size; i++) { |
1119 | nsrq->cmds[i].nsrq = nsrq; |
1120 | ret = nvmet_rdma_post_recv(ndev, cmd: &nsrq->cmds[i]); |
1121 | if (ret) |
1122 | goto out_free_cmds; |
1123 | } |
1124 | |
1125 | return nsrq; |
1126 | |
1127 | out_free_cmds: |
1128 | nvmet_rdma_free_cmds(ndev, cmds: nsrq->cmds, nr_cmds: srq_size, admin: false); |
1129 | out_destroy_srq: |
1130 | ib_destroy_srq(srq); |
1131 | out_free: |
1132 | kfree(objp: nsrq); |
1133 | return ERR_PTR(error: ret); |
1134 | } |
1135 | |
1136 | static int nvmet_rdma_init_srqs(struct nvmet_rdma_device *ndev) |
1137 | { |
1138 | int i, ret; |
1139 | |
1140 | if (!ndev->device->attrs.max_srq_wr || !ndev->device->attrs.max_srq) { |
1141 | /* |
1142 | * If SRQs aren't supported we just go ahead and use normal |
1143 | * non-shared receive queues. |
1144 | */ |
1145 | pr_info("SRQ requested but not supported.\n" ); |
1146 | return 0; |
1147 | } |
1148 | |
1149 | ndev->srq_size = min(ndev->device->attrs.max_srq_wr, |
1150 | nvmet_rdma_srq_size); |
1151 | ndev->srq_count = min(ndev->device->num_comp_vectors, |
1152 | ndev->device->attrs.max_srq); |
1153 | |
1154 | ndev->srqs = kcalloc(n: ndev->srq_count, size: sizeof(*ndev->srqs), GFP_KERNEL); |
1155 | if (!ndev->srqs) |
1156 | return -ENOMEM; |
1157 | |
1158 | for (i = 0; i < ndev->srq_count; i++) { |
1159 | ndev->srqs[i] = nvmet_rdma_init_srq(ndev); |
1160 | if (IS_ERR(ptr: ndev->srqs[i])) { |
1161 | ret = PTR_ERR(ptr: ndev->srqs[i]); |
1162 | goto err_srq; |
1163 | } |
1164 | } |
1165 | |
1166 | return 0; |
1167 | |
1168 | err_srq: |
1169 | while (--i >= 0) |
1170 | nvmet_rdma_destroy_srq(nsrq: ndev->srqs[i]); |
1171 | kfree(objp: ndev->srqs); |
1172 | return ret; |
1173 | } |
1174 | |
1175 | static void nvmet_rdma_free_dev(struct kref *ref) |
1176 | { |
1177 | struct nvmet_rdma_device *ndev = |
1178 | container_of(ref, struct nvmet_rdma_device, ref); |
1179 | |
1180 | mutex_lock(&device_list_mutex); |
1181 | list_del(entry: &ndev->entry); |
1182 | mutex_unlock(lock: &device_list_mutex); |
1183 | |
1184 | nvmet_rdma_destroy_srqs(ndev); |
1185 | ib_dealloc_pd(pd: ndev->pd); |
1186 | |
1187 | kfree(objp: ndev); |
1188 | } |
1189 | |
1190 | static struct nvmet_rdma_device * |
1191 | nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id) |
1192 | { |
1193 | struct nvmet_rdma_port *port = cm_id->context; |
1194 | struct nvmet_port *nport = port->nport; |
1195 | struct nvmet_rdma_device *ndev; |
1196 | int inline_page_count; |
1197 | int inline_sge_count; |
1198 | int ret; |
1199 | |
1200 | mutex_lock(&device_list_mutex); |
1201 | list_for_each_entry(ndev, &device_list, entry) { |
1202 | if (ndev->device->node_guid == cm_id->device->node_guid && |
1203 | kref_get_unless_zero(kref: &ndev->ref)) |
1204 | goto out_unlock; |
1205 | } |
1206 | |
1207 | ndev = kzalloc(size: sizeof(*ndev), GFP_KERNEL); |
1208 | if (!ndev) |
1209 | goto out_err; |
1210 | |
1211 | inline_page_count = num_pages(len: nport->inline_data_size); |
1212 | inline_sge_count = max(cm_id->device->attrs.max_sge_rd, |
1213 | cm_id->device->attrs.max_recv_sge) - 1; |
1214 | if (inline_page_count > inline_sge_count) { |
1215 | pr_warn("inline_data_size %d cannot be supported by device %s. Reducing to %lu.\n" , |
1216 | nport->inline_data_size, cm_id->device->name, |
1217 | inline_sge_count * PAGE_SIZE); |
1218 | nport->inline_data_size = inline_sge_count * PAGE_SIZE; |
1219 | inline_page_count = inline_sge_count; |
1220 | } |
1221 | ndev->inline_data_size = nport->inline_data_size; |
1222 | ndev->inline_page_count = inline_page_count; |
1223 | |
1224 | if (nport->pi_enable && !(cm_id->device->attrs.kernel_cap_flags & |
1225 | IBK_INTEGRITY_HANDOVER)) { |
1226 | pr_warn("T10-PI is not supported by device %s. Disabling it\n" , |
1227 | cm_id->device->name); |
1228 | nport->pi_enable = false; |
1229 | } |
1230 | |
1231 | ndev->device = cm_id->device; |
1232 | kref_init(kref: &ndev->ref); |
1233 | |
1234 | ndev->pd = ib_alloc_pd(ndev->device, 0); |
1235 | if (IS_ERR(ptr: ndev->pd)) |
1236 | goto out_free_dev; |
1237 | |
1238 | if (nvmet_rdma_use_srq) { |
1239 | ret = nvmet_rdma_init_srqs(ndev); |
1240 | if (ret) |
1241 | goto out_free_pd; |
1242 | } |
1243 | |
1244 | list_add(new: &ndev->entry, head: &device_list); |
1245 | out_unlock: |
1246 | mutex_unlock(lock: &device_list_mutex); |
1247 | pr_debug("added %s.\n" , ndev->device->name); |
1248 | return ndev; |
1249 | |
1250 | out_free_pd: |
1251 | ib_dealloc_pd(pd: ndev->pd); |
1252 | out_free_dev: |
1253 | kfree(objp: ndev); |
1254 | out_err: |
1255 | mutex_unlock(lock: &device_list_mutex); |
1256 | return NULL; |
1257 | } |
1258 | |
1259 | static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) |
1260 | { |
1261 | struct ib_qp_init_attr qp_attr = { }; |
1262 | struct nvmet_rdma_device *ndev = queue->dev; |
1263 | int nr_cqe, ret, i, factor; |
1264 | |
1265 | /* |
1266 | * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND. |
1267 | */ |
1268 | nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size; |
1269 | |
1270 | queue->cq = ib_cq_pool_get(dev: ndev->device, nr_cqe: nr_cqe + 1, |
1271 | comp_vector_hint: queue->comp_vector, poll_ctx: IB_POLL_WORKQUEUE); |
1272 | if (IS_ERR(ptr: queue->cq)) { |
1273 | ret = PTR_ERR(ptr: queue->cq); |
1274 | pr_err("failed to create CQ cqe= %d ret= %d\n" , |
1275 | nr_cqe + 1, ret); |
1276 | goto out; |
1277 | } |
1278 | |
1279 | qp_attr.qp_context = queue; |
1280 | qp_attr.event_handler = nvmet_rdma_qp_event; |
1281 | qp_attr.send_cq = queue->cq; |
1282 | qp_attr.recv_cq = queue->cq; |
1283 | qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; |
1284 | qp_attr.qp_type = IB_QPT_RC; |
1285 | /* +1 for drain */ |
1286 | qp_attr.cap.max_send_wr = queue->send_queue_size + 1; |
1287 | factor = rdma_rw_mr_factor(device: ndev->device, port_num: queue->cm_id->port_num, |
1288 | maxpages: 1 << NVMET_RDMA_MAX_MDTS); |
1289 | qp_attr.cap.max_rdma_ctxs = queue->send_queue_size * factor; |
1290 | qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd, |
1291 | ndev->device->attrs.max_send_sge); |
1292 | |
1293 | if (queue->nsrq) { |
1294 | qp_attr.srq = queue->nsrq->srq; |
1295 | } else { |
1296 | /* +1 for drain */ |
1297 | qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size; |
1298 | qp_attr.cap.max_recv_sge = 1 + ndev->inline_page_count; |
1299 | } |
1300 | |
1301 | if (queue->port->pi_enable && queue->host_qid) |
1302 | qp_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN; |
1303 | |
1304 | ret = rdma_create_qp(id: queue->cm_id, pd: ndev->pd, qp_init_attr: &qp_attr); |
1305 | if (ret) { |
1306 | pr_err("failed to create_qp ret= %d\n" , ret); |
1307 | goto err_destroy_cq; |
1308 | } |
1309 | queue->qp = queue->cm_id->qp; |
1310 | |
1311 | atomic_set(v: &queue->sq_wr_avail, i: qp_attr.cap.max_send_wr); |
1312 | |
1313 | pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n" , |
1314 | __func__, queue->cq->cqe, qp_attr.cap.max_send_sge, |
1315 | qp_attr.cap.max_send_wr, queue->cm_id); |
1316 | |
1317 | if (!queue->nsrq) { |
1318 | for (i = 0; i < queue->recv_queue_size; i++) { |
1319 | queue->cmds[i].queue = queue; |
1320 | ret = nvmet_rdma_post_recv(ndev, cmd: &queue->cmds[i]); |
1321 | if (ret) |
1322 | goto err_destroy_qp; |
1323 | } |
1324 | } |
1325 | |
1326 | out: |
1327 | return ret; |
1328 | |
1329 | err_destroy_qp: |
1330 | rdma_destroy_qp(id: queue->cm_id); |
1331 | err_destroy_cq: |
1332 | ib_cq_pool_put(cq: queue->cq, nr_cqe: nr_cqe + 1); |
1333 | goto out; |
1334 | } |
1335 | |
1336 | static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) |
1337 | { |
1338 | ib_drain_qp(qp: queue->qp); |
1339 | if (queue->cm_id) |
1340 | rdma_destroy_id(id: queue->cm_id); |
1341 | ib_destroy_qp(qp: queue->qp); |
1342 | ib_cq_pool_put(cq: queue->cq, nr_cqe: queue->recv_queue_size + 2 * |
1343 | queue->send_queue_size + 1); |
1344 | } |
1345 | |
1346 | static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue) |
1347 | { |
1348 | pr_debug("freeing queue %d\n" , queue->idx); |
1349 | |
1350 | nvmet_sq_destroy(sq: &queue->nvme_sq); |
1351 | |
1352 | nvmet_rdma_destroy_queue_ib(queue); |
1353 | if (!queue->nsrq) { |
1354 | nvmet_rdma_free_cmds(ndev: queue->dev, cmds: queue->cmds, |
1355 | nr_cmds: queue->recv_queue_size, |
1356 | admin: !queue->host_qid); |
1357 | } |
1358 | nvmet_rdma_free_rsps(queue); |
1359 | ida_free(&nvmet_rdma_queue_ida, id: queue->idx); |
1360 | kfree(objp: queue); |
1361 | } |
1362 | |
1363 | static void nvmet_rdma_release_queue_work(struct work_struct *w) |
1364 | { |
1365 | struct nvmet_rdma_queue *queue = |
1366 | container_of(w, struct nvmet_rdma_queue, release_work); |
1367 | struct nvmet_rdma_device *dev = queue->dev; |
1368 | |
1369 | nvmet_rdma_free_queue(queue); |
1370 | |
1371 | kref_put(kref: &dev->ref, release: nvmet_rdma_free_dev); |
1372 | } |
1373 | |
1374 | static int |
1375 | nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn, |
1376 | struct nvmet_rdma_queue *queue) |
1377 | { |
1378 | struct nvme_rdma_cm_req *req; |
1379 | |
1380 | req = (struct nvme_rdma_cm_req *)conn->private_data; |
1381 | if (!req || conn->private_data_len == 0) |
1382 | return NVME_RDMA_CM_INVALID_LEN; |
1383 | |
1384 | if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0) |
1385 | return NVME_RDMA_CM_INVALID_RECFMT; |
1386 | |
1387 | queue->host_qid = le16_to_cpu(req->qid); |
1388 | |
1389 | /* |
1390 | * req->hsqsize corresponds to our recv queue size plus 1 |
1391 | * req->hrqsize corresponds to our send queue size |
1392 | */ |
1393 | queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1; |
1394 | queue->send_queue_size = le16_to_cpu(req->hrqsize); |
1395 | |
1396 | if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH) |
1397 | return NVME_RDMA_CM_INVALID_HSQSIZE; |
1398 | |
1399 | /* XXX: Should we enforce some kind of max for IO queues? */ |
1400 | |
1401 | return 0; |
1402 | } |
1403 | |
1404 | static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id, |
1405 | enum nvme_rdma_cm_status status) |
1406 | { |
1407 | struct nvme_rdma_cm_rej rej; |
1408 | |
1409 | pr_debug("rejecting connect request: status %d (%s)\n" , |
1410 | status, nvme_rdma_cm_msg(status)); |
1411 | |
1412 | rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); |
1413 | rej.sts = cpu_to_le16(status); |
1414 | |
1415 | return rdma_reject(id: cm_id, private_data: (void *)&rej, private_data_len: sizeof(rej), |
1416 | reason: IB_CM_REJ_CONSUMER_DEFINED); |
1417 | } |
1418 | |
1419 | static struct nvmet_rdma_queue * |
1420 | nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev, |
1421 | struct rdma_cm_id *cm_id, |
1422 | struct rdma_cm_event *event) |
1423 | { |
1424 | struct nvmet_rdma_port *port = cm_id->context; |
1425 | struct nvmet_rdma_queue *queue; |
1426 | int ret; |
1427 | |
1428 | queue = kzalloc(size: sizeof(*queue), GFP_KERNEL); |
1429 | if (!queue) { |
1430 | ret = NVME_RDMA_CM_NO_RSC; |
1431 | goto out_reject; |
1432 | } |
1433 | |
1434 | ret = nvmet_sq_init(sq: &queue->nvme_sq); |
1435 | if (ret) { |
1436 | ret = NVME_RDMA_CM_NO_RSC; |
1437 | goto out_free_queue; |
1438 | } |
1439 | |
1440 | ret = nvmet_rdma_parse_cm_connect_req(conn: &event->param.conn, queue); |
1441 | if (ret) |
1442 | goto out_destroy_sq; |
1443 | |
1444 | /* |
1445 | * Schedules the actual release because calling rdma_destroy_id from |
1446 | * inside a CM callback would trigger a deadlock. (great API design..) |
1447 | */ |
1448 | INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work); |
1449 | queue->dev = ndev; |
1450 | queue->cm_id = cm_id; |
1451 | queue->port = port->nport; |
1452 | |
1453 | spin_lock_init(&queue->state_lock); |
1454 | queue->state = NVMET_RDMA_Q_CONNECTING; |
1455 | INIT_LIST_HEAD(list: &queue->rsp_wait_list); |
1456 | INIT_LIST_HEAD(list: &queue->rsp_wr_wait_list); |
1457 | spin_lock_init(&queue->rsp_wr_wait_lock); |
1458 | INIT_LIST_HEAD(list: &queue->free_rsps); |
1459 | spin_lock_init(&queue->rsps_lock); |
1460 | INIT_LIST_HEAD(list: &queue->queue_list); |
1461 | |
1462 | queue->idx = ida_alloc(ida: &nvmet_rdma_queue_ida, GFP_KERNEL); |
1463 | if (queue->idx < 0) { |
1464 | ret = NVME_RDMA_CM_NO_RSC; |
1465 | goto out_destroy_sq; |
1466 | } |
1467 | |
1468 | /* |
1469 | * Spread the io queues across completion vectors, |
1470 | * but still keep all admin queues on vector 0. |
1471 | */ |
1472 | queue->comp_vector = !queue->host_qid ? 0 : |
1473 | queue->idx % ndev->device->num_comp_vectors; |
1474 | |
1475 | |
1476 | ret = nvmet_rdma_alloc_rsps(queue); |
1477 | if (ret) { |
1478 | ret = NVME_RDMA_CM_NO_RSC; |
1479 | goto out_ida_remove; |
1480 | } |
1481 | |
1482 | if (ndev->srqs) { |
1483 | queue->nsrq = ndev->srqs[queue->comp_vector % ndev->srq_count]; |
1484 | } else { |
1485 | queue->cmds = nvmet_rdma_alloc_cmds(ndev, |
1486 | nr_cmds: queue->recv_queue_size, |
1487 | admin: !queue->host_qid); |
1488 | if (IS_ERR(ptr: queue->cmds)) { |
1489 | ret = NVME_RDMA_CM_NO_RSC; |
1490 | goto out_free_responses; |
1491 | } |
1492 | } |
1493 | |
1494 | ret = nvmet_rdma_create_queue_ib(queue); |
1495 | if (ret) { |
1496 | pr_err("%s: creating RDMA queue failed (%d).\n" , |
1497 | __func__, ret); |
1498 | ret = NVME_RDMA_CM_NO_RSC; |
1499 | goto out_free_cmds; |
1500 | } |
1501 | |
1502 | return queue; |
1503 | |
1504 | out_free_cmds: |
1505 | if (!queue->nsrq) { |
1506 | nvmet_rdma_free_cmds(ndev: queue->dev, cmds: queue->cmds, |
1507 | nr_cmds: queue->recv_queue_size, |
1508 | admin: !queue->host_qid); |
1509 | } |
1510 | out_free_responses: |
1511 | nvmet_rdma_free_rsps(queue); |
1512 | out_ida_remove: |
1513 | ida_free(&nvmet_rdma_queue_ida, id: queue->idx); |
1514 | out_destroy_sq: |
1515 | nvmet_sq_destroy(sq: &queue->nvme_sq); |
1516 | out_free_queue: |
1517 | kfree(objp: queue); |
1518 | out_reject: |
1519 | nvmet_rdma_cm_reject(cm_id, status: ret); |
1520 | return NULL; |
1521 | } |
1522 | |
1523 | static void nvmet_rdma_qp_event(struct ib_event *event, void *priv) |
1524 | { |
1525 | struct nvmet_rdma_queue *queue = priv; |
1526 | |
1527 | switch (event->event) { |
1528 | case IB_EVENT_COMM_EST: |
1529 | rdma_notify(id: queue->cm_id, event: event->event); |
1530 | break; |
1531 | case IB_EVENT_QP_LAST_WQE_REACHED: |
1532 | pr_debug("received last WQE reached event for queue=0x%p\n" , |
1533 | queue); |
1534 | break; |
1535 | default: |
1536 | pr_err("received IB QP event: %s (%d)\n" , |
1537 | ib_event_msg(event->event), event->event); |
1538 | break; |
1539 | } |
1540 | } |
1541 | |
1542 | static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id, |
1543 | struct nvmet_rdma_queue *queue, |
1544 | struct rdma_conn_param *p) |
1545 | { |
1546 | struct rdma_conn_param param = { }; |
1547 | struct nvme_rdma_cm_rep priv = { }; |
1548 | int ret = -ENOMEM; |
1549 | |
1550 | param.rnr_retry_count = 7; |
1551 | param.flow_control = 1; |
1552 | param.initiator_depth = min_t(u8, p->initiator_depth, |
1553 | queue->dev->device->attrs.max_qp_init_rd_atom); |
1554 | param.private_data = &priv; |
1555 | param.private_data_len = sizeof(priv); |
1556 | priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); |
1557 | priv.crqsize = cpu_to_le16(queue->recv_queue_size); |
1558 | |
1559 | ret = rdma_accept(id: cm_id, conn_param: ¶m); |
1560 | if (ret) |
1561 | pr_err("rdma_accept failed (error code = %d)\n" , ret); |
1562 | |
1563 | return ret; |
1564 | } |
1565 | |
1566 | static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, |
1567 | struct rdma_cm_event *event) |
1568 | { |
1569 | struct nvmet_rdma_device *ndev; |
1570 | struct nvmet_rdma_queue *queue; |
1571 | int ret = -EINVAL; |
1572 | |
1573 | ndev = nvmet_rdma_find_get_device(cm_id); |
1574 | if (!ndev) { |
1575 | nvmet_rdma_cm_reject(cm_id, status: NVME_RDMA_CM_NO_RSC); |
1576 | return -ECONNREFUSED; |
1577 | } |
1578 | |
1579 | queue = nvmet_rdma_alloc_queue(ndev, cm_id, event); |
1580 | if (!queue) { |
1581 | ret = -ENOMEM; |
1582 | goto put_device; |
1583 | } |
1584 | |
1585 | if (queue->host_qid == 0) { |
1586 | /* Let inflight controller teardown complete */ |
1587 | flush_workqueue(nvmet_wq); |
1588 | } |
1589 | |
1590 | ret = nvmet_rdma_cm_accept(cm_id, queue, p: &event->param.conn); |
1591 | if (ret) { |
1592 | /* |
1593 | * Don't destroy the cm_id in free path, as we implicitly |
1594 | * destroy the cm_id here with non-zero ret code. |
1595 | */ |
1596 | queue->cm_id = NULL; |
1597 | goto free_queue; |
1598 | } |
1599 | |
1600 | mutex_lock(&nvmet_rdma_queue_mutex); |
1601 | list_add_tail(new: &queue->queue_list, head: &nvmet_rdma_queue_list); |
1602 | mutex_unlock(lock: &nvmet_rdma_queue_mutex); |
1603 | |
1604 | return 0; |
1605 | |
1606 | free_queue: |
1607 | nvmet_rdma_free_queue(queue); |
1608 | put_device: |
1609 | kref_put(kref: &ndev->ref, release: nvmet_rdma_free_dev); |
1610 | |
1611 | return ret; |
1612 | } |
1613 | |
1614 | static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue) |
1615 | { |
1616 | unsigned long flags; |
1617 | |
1618 | spin_lock_irqsave(&queue->state_lock, flags); |
1619 | if (queue->state != NVMET_RDMA_Q_CONNECTING) { |
1620 | pr_warn("trying to establish a connected queue\n" ); |
1621 | goto out_unlock; |
1622 | } |
1623 | queue->state = NVMET_RDMA_Q_LIVE; |
1624 | |
1625 | while (!list_empty(head: &queue->rsp_wait_list)) { |
1626 | struct nvmet_rdma_rsp *cmd; |
1627 | |
1628 | cmd = list_first_entry(&queue->rsp_wait_list, |
1629 | struct nvmet_rdma_rsp, wait_list); |
1630 | list_del(entry: &cmd->wait_list); |
1631 | |
1632 | spin_unlock_irqrestore(lock: &queue->state_lock, flags); |
1633 | nvmet_rdma_handle_command(queue, cmd); |
1634 | spin_lock_irqsave(&queue->state_lock, flags); |
1635 | } |
1636 | |
1637 | out_unlock: |
1638 | spin_unlock_irqrestore(lock: &queue->state_lock, flags); |
1639 | } |
1640 | |
1641 | static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) |
1642 | { |
1643 | bool disconnect = false; |
1644 | unsigned long flags; |
1645 | |
1646 | pr_debug("cm_id= %p queue->state= %d\n" , queue->cm_id, queue->state); |
1647 | |
1648 | spin_lock_irqsave(&queue->state_lock, flags); |
1649 | switch (queue->state) { |
1650 | case NVMET_RDMA_Q_CONNECTING: |
1651 | while (!list_empty(head: &queue->rsp_wait_list)) { |
1652 | struct nvmet_rdma_rsp *rsp; |
1653 | |
1654 | rsp = list_first_entry(&queue->rsp_wait_list, |
1655 | struct nvmet_rdma_rsp, |
1656 | wait_list); |
1657 | list_del(entry: &rsp->wait_list); |
1658 | nvmet_rdma_put_rsp(rsp); |
1659 | } |
1660 | fallthrough; |
1661 | case NVMET_RDMA_Q_LIVE: |
1662 | queue->state = NVMET_RDMA_Q_DISCONNECTING; |
1663 | disconnect = true; |
1664 | break; |
1665 | case NVMET_RDMA_Q_DISCONNECTING: |
1666 | break; |
1667 | } |
1668 | spin_unlock_irqrestore(lock: &queue->state_lock, flags); |
1669 | |
1670 | if (disconnect) { |
1671 | rdma_disconnect(id: queue->cm_id); |
1672 | queue_work(wq: nvmet_wq, work: &queue->release_work); |
1673 | } |
1674 | } |
1675 | |
1676 | static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) |
1677 | { |
1678 | bool disconnect = false; |
1679 | |
1680 | mutex_lock(&nvmet_rdma_queue_mutex); |
1681 | if (!list_empty(head: &queue->queue_list)) { |
1682 | list_del_init(entry: &queue->queue_list); |
1683 | disconnect = true; |
1684 | } |
1685 | mutex_unlock(lock: &nvmet_rdma_queue_mutex); |
1686 | |
1687 | if (disconnect) |
1688 | __nvmet_rdma_queue_disconnect(queue); |
1689 | } |
1690 | |
1691 | static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, |
1692 | struct nvmet_rdma_queue *queue) |
1693 | { |
1694 | WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING); |
1695 | |
1696 | mutex_lock(&nvmet_rdma_queue_mutex); |
1697 | if (!list_empty(head: &queue->queue_list)) |
1698 | list_del_init(entry: &queue->queue_list); |
1699 | mutex_unlock(lock: &nvmet_rdma_queue_mutex); |
1700 | |
1701 | pr_err("failed to connect queue %d\n" , queue->idx); |
1702 | queue_work(wq: nvmet_wq, work: &queue->release_work); |
1703 | } |
1704 | |
1705 | /** |
1706 | * nvmet_rdma_device_removal() - Handle RDMA device removal |
1707 | * @cm_id: rdma_cm id, used for nvmet port |
1708 | * @queue: nvmet rdma queue (cm id qp_context) |
1709 | * |
1710 | * DEVICE_REMOVAL event notifies us that the RDMA device is about |
1711 | * to unplug. Note that this event can be generated on a normal |
1712 | * queue cm_id and/or a device bound listener cm_id (where in this |
1713 | * case queue will be null). |
1714 | * |
1715 | * We registered an ib_client to handle device removal for queues, |
1716 | * so we only need to handle the listening port cm_ids. In this case |
1717 | * we nullify the priv to prevent double cm_id destruction and destroying |
1718 | * the cm_id implicitely by returning a non-zero rc to the callout. |
1719 | */ |
1720 | static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id, |
1721 | struct nvmet_rdma_queue *queue) |
1722 | { |
1723 | struct nvmet_rdma_port *port; |
1724 | |
1725 | if (queue) { |
1726 | /* |
1727 | * This is a queue cm_id. we have registered |
1728 | * an ib_client to handle queues removal |
1729 | * so don't interfear and just return. |
1730 | */ |
1731 | return 0; |
1732 | } |
1733 | |
1734 | port = cm_id->context; |
1735 | |
1736 | /* |
1737 | * This is a listener cm_id. Make sure that |
1738 | * future remove_port won't invoke a double |
1739 | * cm_id destroy. use atomic xchg to make sure |
1740 | * we don't compete with remove_port. |
1741 | */ |
1742 | if (xchg(&port->cm_id, NULL) != cm_id) |
1743 | return 0; |
1744 | |
1745 | /* |
1746 | * We need to return 1 so that the core will destroy |
1747 | * it's own ID. What a great API design.. |
1748 | */ |
1749 | return 1; |
1750 | } |
1751 | |
1752 | static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, |
1753 | struct rdma_cm_event *event) |
1754 | { |
1755 | struct nvmet_rdma_queue *queue = NULL; |
1756 | int ret = 0; |
1757 | |
1758 | if (cm_id->qp) |
1759 | queue = cm_id->qp->qp_context; |
1760 | |
1761 | pr_debug("%s (%d): status %d id %p\n" , |
1762 | rdma_event_msg(event->event), event->event, |
1763 | event->status, cm_id); |
1764 | |
1765 | switch (event->event) { |
1766 | case RDMA_CM_EVENT_CONNECT_REQUEST: |
1767 | ret = nvmet_rdma_queue_connect(cm_id, event); |
1768 | break; |
1769 | case RDMA_CM_EVENT_ESTABLISHED: |
1770 | nvmet_rdma_queue_established(queue); |
1771 | break; |
1772 | case RDMA_CM_EVENT_ADDR_CHANGE: |
1773 | if (!queue) { |
1774 | struct nvmet_rdma_port *port = cm_id->context; |
1775 | |
1776 | queue_delayed_work(wq: nvmet_wq, dwork: &port->repair_work, delay: 0); |
1777 | break; |
1778 | } |
1779 | fallthrough; |
1780 | case RDMA_CM_EVENT_DISCONNECTED: |
1781 | case RDMA_CM_EVENT_TIMEWAIT_EXIT: |
1782 | nvmet_rdma_queue_disconnect(queue); |
1783 | break; |
1784 | case RDMA_CM_EVENT_DEVICE_REMOVAL: |
1785 | ret = nvmet_rdma_device_removal(cm_id, queue); |
1786 | break; |
1787 | case RDMA_CM_EVENT_REJECTED: |
1788 | pr_debug("Connection rejected: %s\n" , |
1789 | rdma_reject_msg(cm_id, event->status)); |
1790 | fallthrough; |
1791 | case RDMA_CM_EVENT_UNREACHABLE: |
1792 | case RDMA_CM_EVENT_CONNECT_ERROR: |
1793 | nvmet_rdma_queue_connect_fail(cm_id, queue); |
1794 | break; |
1795 | default: |
1796 | pr_err("received unrecognized RDMA CM event %d\n" , |
1797 | event->event); |
1798 | break; |
1799 | } |
1800 | |
1801 | return ret; |
1802 | } |
1803 | |
1804 | static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl) |
1805 | { |
1806 | struct nvmet_rdma_queue *queue; |
1807 | |
1808 | restart: |
1809 | mutex_lock(&nvmet_rdma_queue_mutex); |
1810 | list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) { |
1811 | if (queue->nvme_sq.ctrl == ctrl) { |
1812 | list_del_init(entry: &queue->queue_list); |
1813 | mutex_unlock(lock: &nvmet_rdma_queue_mutex); |
1814 | |
1815 | __nvmet_rdma_queue_disconnect(queue); |
1816 | goto restart; |
1817 | } |
1818 | } |
1819 | mutex_unlock(lock: &nvmet_rdma_queue_mutex); |
1820 | } |
1821 | |
1822 | static void nvmet_rdma_destroy_port_queues(struct nvmet_rdma_port *port) |
1823 | { |
1824 | struct nvmet_rdma_queue *queue, *tmp; |
1825 | struct nvmet_port *nport = port->nport; |
1826 | |
1827 | mutex_lock(&nvmet_rdma_queue_mutex); |
1828 | list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list, |
1829 | queue_list) { |
1830 | if (queue->port != nport) |
1831 | continue; |
1832 | |
1833 | list_del_init(entry: &queue->queue_list); |
1834 | __nvmet_rdma_queue_disconnect(queue); |
1835 | } |
1836 | mutex_unlock(lock: &nvmet_rdma_queue_mutex); |
1837 | } |
1838 | |
1839 | static void nvmet_rdma_disable_port(struct nvmet_rdma_port *port) |
1840 | { |
1841 | struct rdma_cm_id *cm_id = xchg(&port->cm_id, NULL); |
1842 | |
1843 | if (cm_id) |
1844 | rdma_destroy_id(id: cm_id); |
1845 | |
1846 | /* |
1847 | * Destroy the remaining queues, which are not belong to any |
1848 | * controller yet. Do it here after the RDMA-CM was destroyed |
1849 | * guarantees that no new queue will be created. |
1850 | */ |
1851 | nvmet_rdma_destroy_port_queues(port); |
1852 | } |
1853 | |
1854 | static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port) |
1855 | { |
1856 | struct sockaddr *addr = (struct sockaddr *)&port->addr; |
1857 | struct rdma_cm_id *cm_id; |
1858 | int ret; |
1859 | |
1860 | cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port, |
1861 | RDMA_PS_TCP, IB_QPT_RC); |
1862 | if (IS_ERR(ptr: cm_id)) { |
1863 | pr_err("CM ID creation failed\n" ); |
1864 | return PTR_ERR(ptr: cm_id); |
1865 | } |
1866 | |
1867 | /* |
1868 | * Allow both IPv4 and IPv6 sockets to bind a single port |
1869 | * at the same time. |
1870 | */ |
1871 | ret = rdma_set_afonly(id: cm_id, afonly: 1); |
1872 | if (ret) { |
1873 | pr_err("rdma_set_afonly failed (%d)\n" , ret); |
1874 | goto out_destroy_id; |
1875 | } |
1876 | |
1877 | ret = rdma_bind_addr(id: cm_id, addr); |
1878 | if (ret) { |
1879 | pr_err("binding CM ID to %pISpcs failed (%d)\n" , addr, ret); |
1880 | goto out_destroy_id; |
1881 | } |
1882 | |
1883 | ret = rdma_listen(id: cm_id, backlog: 128); |
1884 | if (ret) { |
1885 | pr_err("listening to %pISpcs failed (%d)\n" , addr, ret); |
1886 | goto out_destroy_id; |
1887 | } |
1888 | |
1889 | port->cm_id = cm_id; |
1890 | return 0; |
1891 | |
1892 | out_destroy_id: |
1893 | rdma_destroy_id(id: cm_id); |
1894 | return ret; |
1895 | } |
1896 | |
1897 | static void nvmet_rdma_repair_port_work(struct work_struct *w) |
1898 | { |
1899 | struct nvmet_rdma_port *port = container_of(to_delayed_work(w), |
1900 | struct nvmet_rdma_port, repair_work); |
1901 | int ret; |
1902 | |
1903 | nvmet_rdma_disable_port(port); |
1904 | ret = nvmet_rdma_enable_port(port); |
1905 | if (ret) |
1906 | queue_delayed_work(wq: nvmet_wq, dwork: &port->repair_work, delay: 5 * HZ); |
1907 | } |
1908 | |
1909 | static int nvmet_rdma_add_port(struct nvmet_port *nport) |
1910 | { |
1911 | struct nvmet_rdma_port *port; |
1912 | __kernel_sa_family_t af; |
1913 | int ret; |
1914 | |
1915 | port = kzalloc(size: sizeof(*port), GFP_KERNEL); |
1916 | if (!port) |
1917 | return -ENOMEM; |
1918 | |
1919 | nport->priv = port; |
1920 | port->nport = nport; |
1921 | INIT_DELAYED_WORK(&port->repair_work, nvmet_rdma_repair_port_work); |
1922 | |
1923 | switch (nport->disc_addr.adrfam) { |
1924 | case NVMF_ADDR_FAMILY_IP4: |
1925 | af = AF_INET; |
1926 | break; |
1927 | case NVMF_ADDR_FAMILY_IP6: |
1928 | af = AF_INET6; |
1929 | break; |
1930 | default: |
1931 | pr_err("address family %d not supported\n" , |
1932 | nport->disc_addr.adrfam); |
1933 | ret = -EINVAL; |
1934 | goto out_free_port; |
1935 | } |
1936 | |
1937 | if (nport->inline_data_size < 0) { |
1938 | nport->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE; |
1939 | } else if (nport->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) { |
1940 | pr_warn("inline_data_size %u is too large, reducing to %u\n" , |
1941 | nport->inline_data_size, |
1942 | NVMET_RDMA_MAX_INLINE_DATA_SIZE); |
1943 | nport->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE; |
1944 | } |
1945 | |
1946 | ret = inet_pton_with_scope(net: &init_net, af, src: nport->disc_addr.traddr, |
1947 | port: nport->disc_addr.trsvcid, addr: &port->addr); |
1948 | if (ret) { |
1949 | pr_err("malformed ip/port passed: %s:%s\n" , |
1950 | nport->disc_addr.traddr, nport->disc_addr.trsvcid); |
1951 | goto out_free_port; |
1952 | } |
1953 | |
1954 | ret = nvmet_rdma_enable_port(port); |
1955 | if (ret) |
1956 | goto out_free_port; |
1957 | |
1958 | pr_info("enabling port %d (%pISpcs)\n" , |
1959 | le16_to_cpu(nport->disc_addr.portid), |
1960 | (struct sockaddr *)&port->addr); |
1961 | |
1962 | return 0; |
1963 | |
1964 | out_free_port: |
1965 | kfree(objp: port); |
1966 | return ret; |
1967 | } |
1968 | |
1969 | static void nvmet_rdma_remove_port(struct nvmet_port *nport) |
1970 | { |
1971 | struct nvmet_rdma_port *port = nport->priv; |
1972 | |
1973 | cancel_delayed_work_sync(dwork: &port->repair_work); |
1974 | nvmet_rdma_disable_port(port); |
1975 | kfree(objp: port); |
1976 | } |
1977 | |
1978 | static void nvmet_rdma_disc_port_addr(struct nvmet_req *req, |
1979 | struct nvmet_port *nport, char *traddr) |
1980 | { |
1981 | struct nvmet_rdma_port *port = nport->priv; |
1982 | struct rdma_cm_id *cm_id = port->cm_id; |
1983 | |
1984 | if (inet_addr_is_any(addr: (struct sockaddr *)&cm_id->route.addr.src_addr)) { |
1985 | struct nvmet_rdma_rsp *rsp = |
1986 | container_of(req, struct nvmet_rdma_rsp, req); |
1987 | struct rdma_cm_id *req_cm_id = rsp->queue->cm_id; |
1988 | struct sockaddr *addr = (void *)&req_cm_id->route.addr.src_addr; |
1989 | |
1990 | sprintf(buf: traddr, fmt: "%pISc" , addr); |
1991 | } else { |
1992 | memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE); |
1993 | } |
1994 | } |
1995 | |
1996 | static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl) |
1997 | { |
1998 | if (ctrl->pi_support) |
1999 | return NVMET_RDMA_MAX_METADATA_MDTS; |
2000 | return NVMET_RDMA_MAX_MDTS; |
2001 | } |
2002 | |
2003 | static u16 nvmet_rdma_get_max_queue_size(const struct nvmet_ctrl *ctrl) |
2004 | { |
2005 | return NVME_RDMA_MAX_QUEUE_SIZE; |
2006 | } |
2007 | |
2008 | static const struct nvmet_fabrics_ops nvmet_rdma_ops = { |
2009 | .owner = THIS_MODULE, |
2010 | .type = NVMF_TRTYPE_RDMA, |
2011 | .msdbd = 1, |
2012 | .flags = NVMF_KEYED_SGLS | NVMF_METADATA_SUPPORTED, |
2013 | .add_port = nvmet_rdma_add_port, |
2014 | .remove_port = nvmet_rdma_remove_port, |
2015 | .queue_response = nvmet_rdma_queue_response, |
2016 | .delete_ctrl = nvmet_rdma_delete_ctrl, |
2017 | .disc_traddr = nvmet_rdma_disc_port_addr, |
2018 | .get_mdts = nvmet_rdma_get_mdts, |
2019 | .get_max_queue_size = nvmet_rdma_get_max_queue_size, |
2020 | }; |
2021 | |
2022 | static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data) |
2023 | { |
2024 | struct nvmet_rdma_queue *queue, *tmp; |
2025 | struct nvmet_rdma_device *ndev; |
2026 | bool found = false; |
2027 | |
2028 | mutex_lock(&device_list_mutex); |
2029 | list_for_each_entry(ndev, &device_list, entry) { |
2030 | if (ndev->device == ib_device) { |
2031 | found = true; |
2032 | break; |
2033 | } |
2034 | } |
2035 | mutex_unlock(lock: &device_list_mutex); |
2036 | |
2037 | if (!found) |
2038 | return; |
2039 | |
2040 | /* |
2041 | * IB Device that is used by nvmet controllers is being removed, |
2042 | * delete all queues using this device. |
2043 | */ |
2044 | mutex_lock(&nvmet_rdma_queue_mutex); |
2045 | list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list, |
2046 | queue_list) { |
2047 | if (queue->dev->device != ib_device) |
2048 | continue; |
2049 | |
2050 | pr_info("Removing queue %d\n" , queue->idx); |
2051 | list_del_init(entry: &queue->queue_list); |
2052 | __nvmet_rdma_queue_disconnect(queue); |
2053 | } |
2054 | mutex_unlock(lock: &nvmet_rdma_queue_mutex); |
2055 | |
2056 | flush_workqueue(nvmet_wq); |
2057 | } |
2058 | |
2059 | static struct ib_client nvmet_rdma_ib_client = { |
2060 | .name = "nvmet_rdma" , |
2061 | .remove = nvmet_rdma_remove_one |
2062 | }; |
2063 | |
2064 | static int __init nvmet_rdma_init(void) |
2065 | { |
2066 | int ret; |
2067 | |
2068 | ret = ib_register_client(client: &nvmet_rdma_ib_client); |
2069 | if (ret) |
2070 | return ret; |
2071 | |
2072 | ret = nvmet_register_transport(ops: &nvmet_rdma_ops); |
2073 | if (ret) |
2074 | goto err_ib_client; |
2075 | |
2076 | return 0; |
2077 | |
2078 | err_ib_client: |
2079 | ib_unregister_client(client: &nvmet_rdma_ib_client); |
2080 | return ret; |
2081 | } |
2082 | |
2083 | static void __exit nvmet_rdma_exit(void) |
2084 | { |
2085 | nvmet_unregister_transport(ops: &nvmet_rdma_ops); |
2086 | ib_unregister_client(client: &nvmet_rdma_ib_client); |
2087 | WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list)); |
2088 | ida_destroy(ida: &nvmet_rdma_queue_ida); |
2089 | } |
2090 | |
2091 | module_init(nvmet_rdma_init); |
2092 | module_exit(nvmet_rdma_exit); |
2093 | |
2094 | MODULE_LICENSE("GPL v2" ); |
2095 | MODULE_ALIAS("nvmet-transport-1" ); /* 1 == NVMF_TRTYPE_RDMA */ |
2096 | |