1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (c) 2016 Avago Technologies. All rights reserved. |
4 | */ |
5 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
6 | #include <linux/module.h> |
7 | #include <linux/slab.h> |
8 | #include <linux/blk-mq.h> |
9 | #include <linux/parser.h> |
10 | #include <linux/random.h> |
11 | #include <uapi/scsi/fc/fc_fs.h> |
12 | #include <uapi/scsi/fc/fc_els.h> |
13 | |
14 | #include "nvmet.h" |
15 | #include <linux/nvme-fc-driver.h> |
16 | #include <linux/nvme-fc.h> |
17 | #include "../host/fc.h" |
18 | |
19 | |
20 | /* *************************** Data Structures/Defines ****************** */ |
21 | |
22 | |
23 | #define NVMET_LS_CTX_COUNT 256 |
24 | |
25 | struct nvmet_fc_tgtport; |
26 | struct nvmet_fc_tgt_assoc; |
27 | |
28 | struct nvmet_fc_ls_iod { /* for an LS RQST RCV */ |
29 | struct nvmefc_ls_rsp *lsrsp; |
30 | struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */ |
31 | |
32 | struct list_head ls_rcv_list; /* tgtport->ls_rcv_list */ |
33 | |
34 | struct nvmet_fc_tgtport *tgtport; |
35 | struct nvmet_fc_tgt_assoc *assoc; |
36 | void *hosthandle; |
37 | |
38 | union nvmefc_ls_requests *rqstbuf; |
39 | union nvmefc_ls_responses *rspbuf; |
40 | u16 rqstdatalen; |
41 | dma_addr_t rspdma; |
42 | |
43 | struct scatterlist sg[2]; |
44 | |
45 | struct work_struct work; |
46 | } __aligned(sizeof(unsigned long long)); |
47 | |
48 | struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */ |
49 | struct nvmefc_ls_req ls_req; |
50 | |
51 | struct nvmet_fc_tgtport *tgtport; |
52 | void *hosthandle; |
53 | |
54 | int ls_error; |
55 | struct list_head lsreq_list; /* tgtport->ls_req_list */ |
56 | bool req_queued; |
57 | }; |
58 | |
59 | |
60 | /* desired maximum for a single sequence - if sg list allows it */ |
61 | #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024) |
62 | |
63 | enum nvmet_fcp_datadir { |
64 | NVMET_FCP_NODATA, |
65 | NVMET_FCP_WRITE, |
66 | NVMET_FCP_READ, |
67 | NVMET_FCP_ABORTED, |
68 | }; |
69 | |
70 | struct nvmet_fc_fcp_iod { |
71 | struct nvmefc_tgt_fcp_req *fcpreq; |
72 | |
73 | struct nvme_fc_cmd_iu cmdiubuf; |
74 | struct nvme_fc_ersp_iu rspiubuf; |
75 | dma_addr_t rspdma; |
76 | struct scatterlist *next_sg; |
77 | struct scatterlist *data_sg; |
78 | int data_sg_cnt; |
79 | u32 offset; |
80 | enum nvmet_fcp_datadir io_dir; |
81 | bool active; |
82 | bool abort; |
83 | bool aborted; |
84 | bool writedataactive; |
85 | spinlock_t flock; |
86 | |
87 | struct nvmet_req req; |
88 | struct work_struct defer_work; |
89 | |
90 | struct nvmet_fc_tgtport *tgtport; |
91 | struct nvmet_fc_tgt_queue *queue; |
92 | |
93 | struct list_head fcp_list; /* tgtport->fcp_list */ |
94 | }; |
95 | |
96 | struct nvmet_fc_tgtport { |
97 | struct nvmet_fc_target_port fc_target_port; |
98 | |
99 | struct list_head tgt_list; /* nvmet_fc_target_list */ |
100 | struct device *dev; /* dev for dma mapping */ |
101 | struct nvmet_fc_target_template *ops; |
102 | |
103 | struct nvmet_fc_ls_iod *iod; |
104 | spinlock_t lock; |
105 | struct list_head ls_rcv_list; |
106 | struct list_head ls_req_list; |
107 | struct list_head ls_busylist; |
108 | struct list_head assoc_list; |
109 | struct list_head host_list; |
110 | struct ida assoc_cnt; |
111 | struct nvmet_fc_port_entry *pe; |
112 | struct kref ref; |
113 | u32 max_sg_cnt; |
114 | }; |
115 | |
116 | struct nvmet_fc_port_entry { |
117 | struct nvmet_fc_tgtport *tgtport; |
118 | struct nvmet_port *port; |
119 | u64 node_name; |
120 | u64 port_name; |
121 | struct list_head pe_list; |
122 | }; |
123 | |
124 | struct nvmet_fc_defer_fcp_req { |
125 | struct list_head req_list; |
126 | struct nvmefc_tgt_fcp_req *fcp_req; |
127 | }; |
128 | |
129 | struct nvmet_fc_tgt_queue { |
130 | bool ninetypercent; |
131 | u16 qid; |
132 | u16 sqsize; |
133 | u16 ersp_ratio; |
134 | __le16 sqhd; |
135 | atomic_t connected; |
136 | atomic_t sqtail; |
137 | atomic_t zrspcnt; |
138 | atomic_t rsn; |
139 | spinlock_t qlock; |
140 | struct nvmet_cq nvme_cq; |
141 | struct nvmet_sq nvme_sq; |
142 | struct nvmet_fc_tgt_assoc *assoc; |
143 | struct list_head fod_list; |
144 | struct list_head pending_cmd_list; |
145 | struct list_head avail_defer_list; |
146 | struct workqueue_struct *work_q; |
147 | struct kref ref; |
148 | struct rcu_head rcu; |
149 | /* array of fcp_iods */ |
150 | struct nvmet_fc_fcp_iod fod[] __counted_by(sqsize); |
151 | } __aligned(sizeof(unsigned long long)); |
152 | |
153 | struct nvmet_fc_hostport { |
154 | struct nvmet_fc_tgtport *tgtport; |
155 | void *hosthandle; |
156 | struct list_head host_list; |
157 | struct kref ref; |
158 | u8 invalid; |
159 | }; |
160 | |
161 | struct nvmet_fc_tgt_assoc { |
162 | u64 association_id; |
163 | u32 a_id; |
164 | atomic_t terminating; |
165 | struct nvmet_fc_tgtport *tgtport; |
166 | struct nvmet_fc_hostport *hostport; |
167 | struct nvmet_fc_ls_iod *rcv_disconn; |
168 | struct list_head a_list; |
169 | struct nvmet_fc_tgt_queue __rcu *queues[NVMET_NR_QUEUES + 1]; |
170 | struct kref ref; |
171 | struct work_struct del_work; |
172 | struct rcu_head rcu; |
173 | }; |
174 | |
175 | |
176 | static inline int |
177 | nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr) |
178 | { |
179 | return (iodptr - iodptr->tgtport->iod); |
180 | } |
181 | |
182 | static inline int |
183 | nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr) |
184 | { |
185 | return (fodptr - fodptr->queue->fod); |
186 | } |
187 | |
188 | |
189 | /* |
190 | * Association and Connection IDs: |
191 | * |
192 | * Association ID will have random number in upper 6 bytes and zero |
193 | * in lower 2 bytes |
194 | * |
195 | * Connection IDs will be Association ID with QID or'd in lower 2 bytes |
196 | * |
197 | * note: Association ID = Connection ID for queue 0 |
198 | */ |
199 | #define BYTES_FOR_QID sizeof(u16) |
200 | #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8) |
201 | #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1)) |
202 | |
203 | static inline u64 |
204 | nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid) |
205 | { |
206 | return (assoc->association_id | qid); |
207 | } |
208 | |
209 | static inline u64 |
210 | nvmet_fc_getassociationid(u64 connectionid) |
211 | { |
212 | return connectionid & ~NVMET_FC_QUEUEID_MASK; |
213 | } |
214 | |
215 | static inline u16 |
216 | nvmet_fc_getqueueid(u64 connectionid) |
217 | { |
218 | return (u16)(connectionid & NVMET_FC_QUEUEID_MASK); |
219 | } |
220 | |
221 | static inline struct nvmet_fc_tgtport * |
222 | targetport_to_tgtport(struct nvmet_fc_target_port *targetport) |
223 | { |
224 | return container_of(targetport, struct nvmet_fc_tgtport, |
225 | fc_target_port); |
226 | } |
227 | |
228 | static inline struct nvmet_fc_fcp_iod * |
229 | nvmet_req_to_fod(struct nvmet_req *nvme_req) |
230 | { |
231 | return container_of(nvme_req, struct nvmet_fc_fcp_iod, req); |
232 | } |
233 | |
234 | |
235 | /* *************************** Globals **************************** */ |
236 | |
237 | |
238 | static DEFINE_SPINLOCK(nvmet_fc_tgtlock); |
239 | |
240 | static LIST_HEAD(nvmet_fc_target_list); |
241 | static DEFINE_IDA(nvmet_fc_tgtport_cnt); |
242 | static LIST_HEAD(nvmet_fc_portentry_list); |
243 | |
244 | |
245 | static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work); |
246 | static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work); |
247 | static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc); |
248 | static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc); |
249 | static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue); |
250 | static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); |
251 | static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); |
252 | static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); |
253 | static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, |
254 | struct nvmet_fc_fcp_iod *fod); |
255 | static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc); |
256 | static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, |
257 | struct nvmet_fc_ls_iod *iod); |
258 | |
259 | |
260 | /* *********************** FC-NVME DMA Handling **************************** */ |
261 | |
262 | /* |
263 | * The fcloop device passes in a NULL device pointer. Real LLD's will |
264 | * pass in a valid device pointer. If NULL is passed to the dma mapping |
265 | * routines, depending on the platform, it may or may not succeed, and |
266 | * may crash. |
267 | * |
268 | * As such: |
269 | * Wrapper all the dma routines and check the dev pointer. |
270 | * |
271 | * If simple mappings (return just a dma address, we'll noop them, |
272 | * returning a dma address of 0. |
273 | * |
274 | * On more complex mappings (dma_map_sg), a pseudo routine fills |
275 | * in the scatter list, setting all dma addresses to 0. |
276 | */ |
277 | |
278 | static inline dma_addr_t |
279 | fc_dma_map_single(struct device *dev, void *ptr, size_t size, |
280 | enum dma_data_direction dir) |
281 | { |
282 | return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L; |
283 | } |
284 | |
285 | static inline int |
286 | fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
287 | { |
288 | return dev ? dma_mapping_error(dev, dma_addr) : 0; |
289 | } |
290 | |
291 | static inline void |
292 | fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, |
293 | enum dma_data_direction dir) |
294 | { |
295 | if (dev) |
296 | dma_unmap_single(dev, addr, size, dir); |
297 | } |
298 | |
299 | static inline void |
300 | fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, |
301 | enum dma_data_direction dir) |
302 | { |
303 | if (dev) |
304 | dma_sync_single_for_cpu(dev, addr, size, dir); |
305 | } |
306 | |
307 | static inline void |
308 | fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, |
309 | enum dma_data_direction dir) |
310 | { |
311 | if (dev) |
312 | dma_sync_single_for_device(dev, addr, size, dir); |
313 | } |
314 | |
315 | /* pseudo dma_map_sg call */ |
316 | static int |
317 | fc_map_sg(struct scatterlist *sg, int nents) |
318 | { |
319 | struct scatterlist *s; |
320 | int i; |
321 | |
322 | WARN_ON(nents == 0 || sg[0].length == 0); |
323 | |
324 | for_each_sg(sg, s, nents, i) { |
325 | s->dma_address = 0L; |
326 | #ifdef CONFIG_NEED_SG_DMA_LENGTH |
327 | s->dma_length = s->length; |
328 | #endif |
329 | } |
330 | return nents; |
331 | } |
332 | |
333 | static inline int |
334 | fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
335 | enum dma_data_direction dir) |
336 | { |
337 | return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents); |
338 | } |
339 | |
340 | static inline void |
341 | fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
342 | enum dma_data_direction dir) |
343 | { |
344 | if (dev) |
345 | dma_unmap_sg(dev, sg, nents, dir); |
346 | } |
347 | |
348 | |
349 | /* ********************** FC-NVME LS XMT Handling ************************* */ |
350 | |
351 | |
352 | static void |
353 | __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop) |
354 | { |
355 | struct nvmet_fc_tgtport *tgtport = lsop->tgtport; |
356 | struct nvmefc_ls_req *lsreq = &lsop->ls_req; |
357 | unsigned long flags; |
358 | |
359 | spin_lock_irqsave(&tgtport->lock, flags); |
360 | |
361 | if (!lsop->req_queued) { |
362 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
363 | return; |
364 | } |
365 | |
366 | list_del(entry: &lsop->lsreq_list); |
367 | |
368 | lsop->req_queued = false; |
369 | |
370 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
371 | |
372 | fc_dma_unmap_single(dev: tgtport->dev, addr: lsreq->rqstdma, |
373 | size: (lsreq->rqstlen + lsreq->rsplen), |
374 | dir: DMA_BIDIRECTIONAL); |
375 | |
376 | nvmet_fc_tgtport_put(tgtport); |
377 | } |
378 | |
379 | static int |
380 | __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport, |
381 | struct nvmet_fc_ls_req_op *lsop, |
382 | void (*done)(struct nvmefc_ls_req *req, int status)) |
383 | { |
384 | struct nvmefc_ls_req *lsreq = &lsop->ls_req; |
385 | unsigned long flags; |
386 | int ret = 0; |
387 | |
388 | if (!tgtport->ops->ls_req) |
389 | return -EOPNOTSUPP; |
390 | |
391 | if (!nvmet_fc_tgtport_get(tgtport)) |
392 | return -ESHUTDOWN; |
393 | |
394 | lsreq->done = done; |
395 | lsop->req_queued = false; |
396 | INIT_LIST_HEAD(list: &lsop->lsreq_list); |
397 | |
398 | lsreq->rqstdma = fc_dma_map_single(dev: tgtport->dev, ptr: lsreq->rqstaddr, |
399 | size: lsreq->rqstlen + lsreq->rsplen, |
400 | dir: DMA_BIDIRECTIONAL); |
401 | if (fc_dma_mapping_error(dev: tgtport->dev, dma_addr: lsreq->rqstdma)) { |
402 | ret = -EFAULT; |
403 | goto out_puttgtport; |
404 | } |
405 | lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; |
406 | |
407 | spin_lock_irqsave(&tgtport->lock, flags); |
408 | |
409 | list_add_tail(new: &lsop->lsreq_list, head: &tgtport->ls_req_list); |
410 | |
411 | lsop->req_queued = true; |
412 | |
413 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
414 | |
415 | ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle, |
416 | lsreq); |
417 | if (ret) |
418 | goto out_unlink; |
419 | |
420 | return 0; |
421 | |
422 | out_unlink: |
423 | lsop->ls_error = ret; |
424 | spin_lock_irqsave(&tgtport->lock, flags); |
425 | lsop->req_queued = false; |
426 | list_del(entry: &lsop->lsreq_list); |
427 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
428 | fc_dma_unmap_single(dev: tgtport->dev, addr: lsreq->rqstdma, |
429 | size: (lsreq->rqstlen + lsreq->rsplen), |
430 | dir: DMA_BIDIRECTIONAL); |
431 | out_puttgtport: |
432 | nvmet_fc_tgtport_put(tgtport); |
433 | |
434 | return ret; |
435 | } |
436 | |
437 | static int |
438 | nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport, |
439 | struct nvmet_fc_ls_req_op *lsop, |
440 | void (*done)(struct nvmefc_ls_req *req, int status)) |
441 | { |
442 | /* don't wait for completion */ |
443 | |
444 | return __nvmet_fc_send_ls_req(tgtport, lsop, done); |
445 | } |
446 | |
447 | static void |
448 | nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) |
449 | { |
450 | struct nvmet_fc_ls_req_op *lsop = |
451 | container_of(lsreq, struct nvmet_fc_ls_req_op, ls_req); |
452 | |
453 | __nvmet_fc_finish_ls_req(lsop); |
454 | |
455 | /* fc-nvme target doesn't care about success or failure of cmd */ |
456 | |
457 | kfree(objp: lsop); |
458 | } |
459 | |
460 | /* |
461 | * This routine sends a FC-NVME LS to disconnect (aka terminate) |
462 | * the FC-NVME Association. Terminating the association also |
463 | * terminates the FC-NVME connections (per queue, both admin and io |
464 | * queues) that are part of the association. E.g. things are torn |
465 | * down, and the related FC-NVME Association ID and Connection IDs |
466 | * become invalid. |
467 | * |
468 | * The behavior of the fc-nvme target is such that it's |
469 | * understanding of the association and connections will implicitly |
470 | * be torn down. The action is implicit as it may be due to a loss of |
471 | * connectivity with the fc-nvme host, so the target may never get a |
472 | * response even if it tried. As such, the action of this routine |
473 | * is to asynchronously send the LS, ignore any results of the LS, and |
474 | * continue on with terminating the association. If the fc-nvme host |
475 | * is present and receives the LS, it too can tear down. |
476 | */ |
477 | static void |
478 | nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc) |
479 | { |
480 | struct nvmet_fc_tgtport *tgtport = assoc->tgtport; |
481 | struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst; |
482 | struct fcnvme_ls_disconnect_assoc_acc *discon_acc; |
483 | struct nvmet_fc_ls_req_op *lsop; |
484 | struct nvmefc_ls_req *lsreq; |
485 | int ret; |
486 | |
487 | /* |
488 | * If ls_req is NULL or no hosthandle, it's an older lldd and no |
489 | * message is normal. Otherwise, send unless the hostport has |
490 | * already been invalidated by the lldd. |
491 | */ |
492 | if (!tgtport->ops->ls_req || !assoc->hostport || |
493 | assoc->hostport->invalid) |
494 | return; |
495 | |
496 | lsop = kzalloc(size: (sizeof(*lsop) + |
497 | sizeof(*discon_rqst) + sizeof(*discon_acc) + |
498 | tgtport->ops->lsrqst_priv_sz), GFP_KERNEL); |
499 | if (!lsop) { |
500 | dev_info(tgtport->dev, |
501 | "{%d:%d} send Disconnect Association failed: ENOMEM\n" , |
502 | tgtport->fc_target_port.port_num, assoc->a_id); |
503 | return; |
504 | } |
505 | |
506 | discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1]; |
507 | discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1]; |
508 | lsreq = &lsop->ls_req; |
509 | if (tgtport->ops->lsrqst_priv_sz) |
510 | lsreq->private = (void *)&discon_acc[1]; |
511 | else |
512 | lsreq->private = NULL; |
513 | |
514 | lsop->tgtport = tgtport; |
515 | lsop->hosthandle = assoc->hostport->hosthandle; |
516 | |
517 | nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc, |
518 | association_id: assoc->association_id); |
519 | |
520 | ret = nvmet_fc_send_ls_req_async(tgtport, lsop, |
521 | done: nvmet_fc_disconnect_assoc_done); |
522 | if (ret) { |
523 | dev_info(tgtport->dev, |
524 | "{%d:%d} XMT Disconnect Association failed: %d\n" , |
525 | tgtport->fc_target_port.port_num, assoc->a_id, ret); |
526 | kfree(objp: lsop); |
527 | } |
528 | } |
529 | |
530 | |
531 | /* *********************** FC-NVME Port Management ************************ */ |
532 | |
533 | |
534 | static int |
535 | nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport) |
536 | { |
537 | struct nvmet_fc_ls_iod *iod; |
538 | int i; |
539 | |
540 | iod = kcalloc(NVMET_LS_CTX_COUNT, size: sizeof(struct nvmet_fc_ls_iod), |
541 | GFP_KERNEL); |
542 | if (!iod) |
543 | return -ENOMEM; |
544 | |
545 | tgtport->iod = iod; |
546 | |
547 | for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { |
548 | INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work); |
549 | iod->tgtport = tgtport; |
550 | list_add_tail(new: &iod->ls_rcv_list, head: &tgtport->ls_rcv_list); |
551 | |
552 | iod->rqstbuf = kzalloc(size: sizeof(union nvmefc_ls_requests) + |
553 | sizeof(union nvmefc_ls_responses), |
554 | GFP_KERNEL); |
555 | if (!iod->rqstbuf) |
556 | goto out_fail; |
557 | |
558 | iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1]; |
559 | |
560 | iod->rspdma = fc_dma_map_single(dev: tgtport->dev, ptr: iod->rspbuf, |
561 | size: sizeof(*iod->rspbuf), |
562 | dir: DMA_TO_DEVICE); |
563 | if (fc_dma_mapping_error(dev: tgtport->dev, dma_addr: iod->rspdma)) |
564 | goto out_fail; |
565 | } |
566 | |
567 | return 0; |
568 | |
569 | out_fail: |
570 | kfree(objp: iod->rqstbuf); |
571 | list_del(entry: &iod->ls_rcv_list); |
572 | for (iod--, i--; i >= 0; iod--, i--) { |
573 | fc_dma_unmap_single(dev: tgtport->dev, addr: iod->rspdma, |
574 | size: sizeof(*iod->rspbuf), dir: DMA_TO_DEVICE); |
575 | kfree(objp: iod->rqstbuf); |
576 | list_del(entry: &iod->ls_rcv_list); |
577 | } |
578 | |
579 | kfree(objp: iod); |
580 | |
581 | return -EFAULT; |
582 | } |
583 | |
584 | static void |
585 | nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport) |
586 | { |
587 | struct nvmet_fc_ls_iod *iod = tgtport->iod; |
588 | int i; |
589 | |
590 | for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { |
591 | fc_dma_unmap_single(dev: tgtport->dev, |
592 | addr: iod->rspdma, size: sizeof(*iod->rspbuf), |
593 | dir: DMA_TO_DEVICE); |
594 | kfree(objp: iod->rqstbuf); |
595 | list_del(entry: &iod->ls_rcv_list); |
596 | } |
597 | kfree(objp: tgtport->iod); |
598 | } |
599 | |
600 | static struct nvmet_fc_ls_iod * |
601 | nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport) |
602 | { |
603 | struct nvmet_fc_ls_iod *iod; |
604 | unsigned long flags; |
605 | |
606 | spin_lock_irqsave(&tgtport->lock, flags); |
607 | iod = list_first_entry_or_null(&tgtport->ls_rcv_list, |
608 | struct nvmet_fc_ls_iod, ls_rcv_list); |
609 | if (iod) |
610 | list_move_tail(list: &iod->ls_rcv_list, head: &tgtport->ls_busylist); |
611 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
612 | return iod; |
613 | } |
614 | |
615 | |
616 | static void |
617 | nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport, |
618 | struct nvmet_fc_ls_iod *iod) |
619 | { |
620 | unsigned long flags; |
621 | |
622 | spin_lock_irqsave(&tgtport->lock, flags); |
623 | list_move(list: &iod->ls_rcv_list, head: &tgtport->ls_rcv_list); |
624 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
625 | } |
626 | |
627 | static void |
628 | nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, |
629 | struct nvmet_fc_tgt_queue *queue) |
630 | { |
631 | struct nvmet_fc_fcp_iod *fod = queue->fod; |
632 | int i; |
633 | |
634 | for (i = 0; i < queue->sqsize; fod++, i++) { |
635 | INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work); |
636 | fod->tgtport = tgtport; |
637 | fod->queue = queue; |
638 | fod->active = false; |
639 | fod->abort = false; |
640 | fod->aborted = false; |
641 | fod->fcpreq = NULL; |
642 | list_add_tail(new: &fod->fcp_list, head: &queue->fod_list); |
643 | spin_lock_init(&fod->flock); |
644 | |
645 | fod->rspdma = fc_dma_map_single(dev: tgtport->dev, ptr: &fod->rspiubuf, |
646 | size: sizeof(fod->rspiubuf), dir: DMA_TO_DEVICE); |
647 | if (fc_dma_mapping_error(dev: tgtport->dev, dma_addr: fod->rspdma)) { |
648 | list_del(entry: &fod->fcp_list); |
649 | for (fod--, i--; i >= 0; fod--, i--) { |
650 | fc_dma_unmap_single(dev: tgtport->dev, addr: fod->rspdma, |
651 | size: sizeof(fod->rspiubuf), |
652 | dir: DMA_TO_DEVICE); |
653 | fod->rspdma = 0L; |
654 | list_del(entry: &fod->fcp_list); |
655 | } |
656 | |
657 | return; |
658 | } |
659 | } |
660 | } |
661 | |
662 | static void |
663 | nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, |
664 | struct nvmet_fc_tgt_queue *queue) |
665 | { |
666 | struct nvmet_fc_fcp_iod *fod = queue->fod; |
667 | int i; |
668 | |
669 | for (i = 0; i < queue->sqsize; fod++, i++) { |
670 | if (fod->rspdma) |
671 | fc_dma_unmap_single(dev: tgtport->dev, addr: fod->rspdma, |
672 | size: sizeof(fod->rspiubuf), dir: DMA_TO_DEVICE); |
673 | } |
674 | } |
675 | |
676 | static struct nvmet_fc_fcp_iod * |
677 | nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) |
678 | { |
679 | struct nvmet_fc_fcp_iod *fod; |
680 | |
681 | lockdep_assert_held(&queue->qlock); |
682 | |
683 | fod = list_first_entry_or_null(&queue->fod_list, |
684 | struct nvmet_fc_fcp_iod, fcp_list); |
685 | if (fod) { |
686 | list_del(entry: &fod->fcp_list); |
687 | fod->active = true; |
688 | /* |
689 | * no queue reference is taken, as it was taken by the |
690 | * queue lookup just prior to the allocation. The iod |
691 | * will "inherit" that reference. |
692 | */ |
693 | } |
694 | return fod; |
695 | } |
696 | |
697 | |
698 | static void |
699 | nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport, |
700 | struct nvmet_fc_tgt_queue *queue, |
701 | struct nvmefc_tgt_fcp_req *fcpreq) |
702 | { |
703 | struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; |
704 | |
705 | /* |
706 | * put all admin cmds on hw queue id 0. All io commands go to |
707 | * the respective hw queue based on a modulo basis |
708 | */ |
709 | fcpreq->hwqid = queue->qid ? |
710 | ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; |
711 | |
712 | nvmet_fc_handle_fcp_rqst(tgtport, fod); |
713 | } |
714 | |
715 | static void |
716 | nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work) |
717 | { |
718 | struct nvmet_fc_fcp_iod *fod = |
719 | container_of(work, struct nvmet_fc_fcp_iod, defer_work); |
720 | |
721 | /* Submit deferred IO for processing */ |
722 | nvmet_fc_queue_fcp_req(tgtport: fod->tgtport, queue: fod->queue, fcpreq: fod->fcpreq); |
723 | |
724 | } |
725 | |
726 | static void |
727 | nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, |
728 | struct nvmet_fc_fcp_iod *fod) |
729 | { |
730 | struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; |
731 | struct nvmet_fc_tgtport *tgtport = fod->tgtport; |
732 | struct nvmet_fc_defer_fcp_req *deferfcp; |
733 | unsigned long flags; |
734 | |
735 | fc_dma_sync_single_for_cpu(dev: tgtport->dev, addr: fod->rspdma, |
736 | size: sizeof(fod->rspiubuf), dir: DMA_TO_DEVICE); |
737 | |
738 | fcpreq->nvmet_fc_private = NULL; |
739 | |
740 | fod->active = false; |
741 | fod->abort = false; |
742 | fod->aborted = false; |
743 | fod->writedataactive = false; |
744 | fod->fcpreq = NULL; |
745 | |
746 | tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); |
747 | |
748 | /* release the queue lookup reference on the completed IO */ |
749 | nvmet_fc_tgt_q_put(queue); |
750 | |
751 | spin_lock_irqsave(&queue->qlock, flags); |
752 | deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, |
753 | struct nvmet_fc_defer_fcp_req, req_list); |
754 | if (!deferfcp) { |
755 | list_add_tail(new: &fod->fcp_list, head: &fod->queue->fod_list); |
756 | spin_unlock_irqrestore(lock: &queue->qlock, flags); |
757 | return; |
758 | } |
759 | |
760 | /* Re-use the fod for the next pending cmd that was deferred */ |
761 | list_del(entry: &deferfcp->req_list); |
762 | |
763 | fcpreq = deferfcp->fcp_req; |
764 | |
765 | /* deferfcp can be reused for another IO at a later date */ |
766 | list_add_tail(new: &deferfcp->req_list, head: &queue->avail_defer_list); |
767 | |
768 | spin_unlock_irqrestore(lock: &queue->qlock, flags); |
769 | |
770 | /* Save NVME CMD IO in fod */ |
771 | memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen); |
772 | |
773 | /* Setup new fcpreq to be processed */ |
774 | fcpreq->rspaddr = NULL; |
775 | fcpreq->rsplen = 0; |
776 | fcpreq->nvmet_fc_private = fod; |
777 | fod->fcpreq = fcpreq; |
778 | fod->active = true; |
779 | |
780 | /* inform LLDD IO is now being processed */ |
781 | tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq); |
782 | |
783 | /* |
784 | * Leave the queue lookup get reference taken when |
785 | * fod was originally allocated. |
786 | */ |
787 | |
788 | queue_work(wq: queue->work_q, work: &fod->defer_work); |
789 | } |
790 | |
791 | static struct nvmet_fc_tgt_queue * |
792 | nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, |
793 | u16 qid, u16 sqsize) |
794 | { |
795 | struct nvmet_fc_tgt_queue *queue; |
796 | int ret; |
797 | |
798 | if (qid > NVMET_NR_QUEUES) |
799 | return NULL; |
800 | |
801 | queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL); |
802 | if (!queue) |
803 | return NULL; |
804 | |
805 | if (!nvmet_fc_tgt_a_get(assoc)) |
806 | goto out_free_queue; |
807 | |
808 | queue->work_q = alloc_workqueue(fmt: "ntfc%d.%d.%d" , flags: 0, max_active: 0, |
809 | assoc->tgtport->fc_target_port.port_num, |
810 | assoc->a_id, qid); |
811 | if (!queue->work_q) |
812 | goto out_a_put; |
813 | |
814 | queue->qid = qid; |
815 | queue->sqsize = sqsize; |
816 | queue->assoc = assoc; |
817 | INIT_LIST_HEAD(list: &queue->fod_list); |
818 | INIT_LIST_HEAD(list: &queue->avail_defer_list); |
819 | INIT_LIST_HEAD(list: &queue->pending_cmd_list); |
820 | atomic_set(v: &queue->connected, i: 0); |
821 | atomic_set(v: &queue->sqtail, i: 0); |
822 | atomic_set(v: &queue->rsn, i: 1); |
823 | atomic_set(v: &queue->zrspcnt, i: 0); |
824 | spin_lock_init(&queue->qlock); |
825 | kref_init(kref: &queue->ref); |
826 | |
827 | nvmet_fc_prep_fcp_iodlist(tgtport: assoc->tgtport, queue); |
828 | |
829 | ret = nvmet_sq_init(sq: &queue->nvme_sq); |
830 | if (ret) |
831 | goto out_fail_iodlist; |
832 | |
833 | WARN_ON(assoc->queues[qid]); |
834 | rcu_assign_pointer(assoc->queues[qid], queue); |
835 | |
836 | return queue; |
837 | |
838 | out_fail_iodlist: |
839 | nvmet_fc_destroy_fcp_iodlist(tgtport: assoc->tgtport, queue); |
840 | destroy_workqueue(wq: queue->work_q); |
841 | out_a_put: |
842 | nvmet_fc_tgt_a_put(assoc); |
843 | out_free_queue: |
844 | kfree(objp: queue); |
845 | return NULL; |
846 | } |
847 | |
848 | |
849 | static void |
850 | nvmet_fc_tgt_queue_free(struct kref *ref) |
851 | { |
852 | struct nvmet_fc_tgt_queue *queue = |
853 | container_of(ref, struct nvmet_fc_tgt_queue, ref); |
854 | |
855 | rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL); |
856 | |
857 | nvmet_fc_destroy_fcp_iodlist(tgtport: queue->assoc->tgtport, queue); |
858 | |
859 | nvmet_fc_tgt_a_put(assoc: queue->assoc); |
860 | |
861 | destroy_workqueue(wq: queue->work_q); |
862 | |
863 | kfree_rcu(queue, rcu); |
864 | } |
865 | |
866 | static void |
867 | nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue) |
868 | { |
869 | kref_put(kref: &queue->ref, release: nvmet_fc_tgt_queue_free); |
870 | } |
871 | |
872 | static int |
873 | nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue) |
874 | { |
875 | return kref_get_unless_zero(kref: &queue->ref); |
876 | } |
877 | |
878 | |
879 | static void |
880 | nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) |
881 | { |
882 | struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; |
883 | struct nvmet_fc_fcp_iod *fod = queue->fod; |
884 | struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr; |
885 | unsigned long flags; |
886 | int i; |
887 | bool disconnect; |
888 | |
889 | disconnect = atomic_xchg(v: &queue->connected, new: 0); |
890 | |
891 | /* if not connected, nothing to do */ |
892 | if (!disconnect) |
893 | return; |
894 | |
895 | spin_lock_irqsave(&queue->qlock, flags); |
896 | /* abort outstanding io's */ |
897 | for (i = 0; i < queue->sqsize; fod++, i++) { |
898 | if (fod->active) { |
899 | spin_lock(lock: &fod->flock); |
900 | fod->abort = true; |
901 | /* |
902 | * only call lldd abort routine if waiting for |
903 | * writedata. other outstanding ops should finish |
904 | * on their own. |
905 | */ |
906 | if (fod->writedataactive) { |
907 | fod->aborted = true; |
908 | spin_unlock(lock: &fod->flock); |
909 | tgtport->ops->fcp_abort( |
910 | &tgtport->fc_target_port, fod->fcpreq); |
911 | } else |
912 | spin_unlock(lock: &fod->flock); |
913 | } |
914 | } |
915 | |
916 | /* Cleanup defer'ed IOs in queue */ |
917 | list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list, |
918 | req_list) { |
919 | list_del(entry: &deferfcp->req_list); |
920 | kfree(objp: deferfcp); |
921 | } |
922 | |
923 | for (;;) { |
924 | deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, |
925 | struct nvmet_fc_defer_fcp_req, req_list); |
926 | if (!deferfcp) |
927 | break; |
928 | |
929 | list_del(entry: &deferfcp->req_list); |
930 | spin_unlock_irqrestore(lock: &queue->qlock, flags); |
931 | |
932 | tgtport->ops->defer_rcv(&tgtport->fc_target_port, |
933 | deferfcp->fcp_req); |
934 | |
935 | tgtport->ops->fcp_abort(&tgtport->fc_target_port, |
936 | deferfcp->fcp_req); |
937 | |
938 | tgtport->ops->fcp_req_release(&tgtport->fc_target_port, |
939 | deferfcp->fcp_req); |
940 | |
941 | /* release the queue lookup reference */ |
942 | nvmet_fc_tgt_q_put(queue); |
943 | |
944 | kfree(objp: deferfcp); |
945 | |
946 | spin_lock_irqsave(&queue->qlock, flags); |
947 | } |
948 | spin_unlock_irqrestore(lock: &queue->qlock, flags); |
949 | |
950 | flush_workqueue(queue->work_q); |
951 | |
952 | nvmet_sq_destroy(sq: &queue->nvme_sq); |
953 | |
954 | nvmet_fc_tgt_q_put(queue); |
955 | } |
956 | |
957 | static struct nvmet_fc_tgt_queue * |
958 | nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport, |
959 | u64 connection_id) |
960 | { |
961 | struct nvmet_fc_tgt_assoc *assoc; |
962 | struct nvmet_fc_tgt_queue *queue; |
963 | u64 association_id = nvmet_fc_getassociationid(connectionid: connection_id); |
964 | u16 qid = nvmet_fc_getqueueid(connectionid: connection_id); |
965 | |
966 | if (qid > NVMET_NR_QUEUES) |
967 | return NULL; |
968 | |
969 | rcu_read_lock(); |
970 | list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { |
971 | if (association_id == assoc->association_id) { |
972 | queue = rcu_dereference(assoc->queues[qid]); |
973 | if (queue && |
974 | (!atomic_read(v: &queue->connected) || |
975 | !nvmet_fc_tgt_q_get(queue))) |
976 | queue = NULL; |
977 | rcu_read_unlock(); |
978 | return queue; |
979 | } |
980 | } |
981 | rcu_read_unlock(); |
982 | return NULL; |
983 | } |
984 | |
985 | static void |
986 | nvmet_fc_hostport_free(struct kref *ref) |
987 | { |
988 | struct nvmet_fc_hostport *hostport = |
989 | container_of(ref, struct nvmet_fc_hostport, ref); |
990 | struct nvmet_fc_tgtport *tgtport = hostport->tgtport; |
991 | unsigned long flags; |
992 | |
993 | spin_lock_irqsave(&tgtport->lock, flags); |
994 | list_del(entry: &hostport->host_list); |
995 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
996 | if (tgtport->ops->host_release && hostport->invalid) |
997 | tgtport->ops->host_release(hostport->hosthandle); |
998 | kfree(objp: hostport); |
999 | nvmet_fc_tgtport_put(tgtport); |
1000 | } |
1001 | |
1002 | static void |
1003 | nvmet_fc_hostport_put(struct nvmet_fc_hostport *hostport) |
1004 | { |
1005 | kref_put(kref: &hostport->ref, release: nvmet_fc_hostport_free); |
1006 | } |
1007 | |
1008 | static int |
1009 | nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport) |
1010 | { |
1011 | return kref_get_unless_zero(kref: &hostport->ref); |
1012 | } |
1013 | |
1014 | static void |
1015 | nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport) |
1016 | { |
1017 | /* if LLDD not implemented, leave as NULL */ |
1018 | if (!hostport || !hostport->hosthandle) |
1019 | return; |
1020 | |
1021 | nvmet_fc_hostport_put(hostport); |
1022 | } |
1023 | |
1024 | static struct nvmet_fc_hostport * |
1025 | nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) |
1026 | { |
1027 | struct nvmet_fc_hostport *host; |
1028 | |
1029 | lockdep_assert_held(&tgtport->lock); |
1030 | |
1031 | list_for_each_entry(host, &tgtport->host_list, host_list) { |
1032 | if (host->hosthandle == hosthandle && !host->invalid) { |
1033 | if (nvmet_fc_hostport_get(hostport: host)) |
1034 | return (host); |
1035 | } |
1036 | } |
1037 | |
1038 | return NULL; |
1039 | } |
1040 | |
1041 | static struct nvmet_fc_hostport * |
1042 | nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) |
1043 | { |
1044 | struct nvmet_fc_hostport *newhost, *match = NULL; |
1045 | unsigned long flags; |
1046 | |
1047 | /* if LLDD not implemented, leave as NULL */ |
1048 | if (!hosthandle) |
1049 | return NULL; |
1050 | |
1051 | /* |
1052 | * take reference for what will be the newly allocated hostport if |
1053 | * we end up using a new allocation |
1054 | */ |
1055 | if (!nvmet_fc_tgtport_get(tgtport)) |
1056 | return ERR_PTR(error: -EINVAL); |
1057 | |
1058 | spin_lock_irqsave(&tgtport->lock, flags); |
1059 | match = nvmet_fc_match_hostport(tgtport, hosthandle); |
1060 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
1061 | |
1062 | if (match) { |
1063 | /* no new allocation - release reference */ |
1064 | nvmet_fc_tgtport_put(tgtport); |
1065 | return match; |
1066 | } |
1067 | |
1068 | newhost = kzalloc(size: sizeof(*newhost), GFP_KERNEL); |
1069 | if (!newhost) { |
1070 | /* no new allocation - release reference */ |
1071 | nvmet_fc_tgtport_put(tgtport); |
1072 | return ERR_PTR(error: -ENOMEM); |
1073 | } |
1074 | |
1075 | spin_lock_irqsave(&tgtport->lock, flags); |
1076 | match = nvmet_fc_match_hostport(tgtport, hosthandle); |
1077 | if (match) { |
1078 | /* new allocation not needed */ |
1079 | kfree(objp: newhost); |
1080 | newhost = match; |
1081 | /* no new allocation - release reference */ |
1082 | nvmet_fc_tgtport_put(tgtport); |
1083 | } else { |
1084 | newhost->tgtport = tgtport; |
1085 | newhost->hosthandle = hosthandle; |
1086 | INIT_LIST_HEAD(list: &newhost->host_list); |
1087 | kref_init(kref: &newhost->ref); |
1088 | |
1089 | list_add_tail(new: &newhost->host_list, head: &tgtport->host_list); |
1090 | } |
1091 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
1092 | |
1093 | return newhost; |
1094 | } |
1095 | |
1096 | static void |
1097 | nvmet_fc_delete_assoc(struct work_struct *work) |
1098 | { |
1099 | struct nvmet_fc_tgt_assoc *assoc = |
1100 | container_of(work, struct nvmet_fc_tgt_assoc, del_work); |
1101 | |
1102 | nvmet_fc_delete_target_assoc(assoc); |
1103 | nvmet_fc_tgt_a_put(assoc); |
1104 | } |
1105 | |
1106 | static struct nvmet_fc_tgt_assoc * |
1107 | nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle) |
1108 | { |
1109 | struct nvmet_fc_tgt_assoc *assoc, *tmpassoc; |
1110 | unsigned long flags; |
1111 | u64 ran; |
1112 | int idx; |
1113 | bool needrandom = true; |
1114 | |
1115 | assoc = kzalloc(size: sizeof(*assoc), GFP_KERNEL); |
1116 | if (!assoc) |
1117 | return NULL; |
1118 | |
1119 | idx = ida_alloc(ida: &tgtport->assoc_cnt, GFP_KERNEL); |
1120 | if (idx < 0) |
1121 | goto out_free_assoc; |
1122 | |
1123 | if (!nvmet_fc_tgtport_get(tgtport)) |
1124 | goto out_ida; |
1125 | |
1126 | assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle); |
1127 | if (IS_ERR(ptr: assoc->hostport)) |
1128 | goto out_put; |
1129 | |
1130 | assoc->tgtport = tgtport; |
1131 | assoc->a_id = idx; |
1132 | INIT_LIST_HEAD(list: &assoc->a_list); |
1133 | kref_init(kref: &assoc->ref); |
1134 | INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc); |
1135 | atomic_set(v: &assoc->terminating, i: 0); |
1136 | |
1137 | while (needrandom) { |
1138 | get_random_bytes(buf: &ran, len: sizeof(ran) - BYTES_FOR_QID); |
1139 | ran = ran << BYTES_FOR_QID_SHIFT; |
1140 | |
1141 | spin_lock_irqsave(&tgtport->lock, flags); |
1142 | needrandom = false; |
1143 | list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) { |
1144 | if (ran == tmpassoc->association_id) { |
1145 | needrandom = true; |
1146 | break; |
1147 | } |
1148 | } |
1149 | if (!needrandom) { |
1150 | assoc->association_id = ran; |
1151 | list_add_tail_rcu(new: &assoc->a_list, head: &tgtport->assoc_list); |
1152 | } |
1153 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
1154 | } |
1155 | |
1156 | return assoc; |
1157 | |
1158 | out_put: |
1159 | nvmet_fc_tgtport_put(tgtport); |
1160 | out_ida: |
1161 | ida_free(&tgtport->assoc_cnt, id: idx); |
1162 | out_free_assoc: |
1163 | kfree(objp: assoc); |
1164 | return NULL; |
1165 | } |
1166 | |
1167 | static void |
1168 | nvmet_fc_target_assoc_free(struct kref *ref) |
1169 | { |
1170 | struct nvmet_fc_tgt_assoc *assoc = |
1171 | container_of(ref, struct nvmet_fc_tgt_assoc, ref); |
1172 | struct nvmet_fc_tgtport *tgtport = assoc->tgtport; |
1173 | struct nvmet_fc_ls_iod *oldls; |
1174 | unsigned long flags; |
1175 | |
1176 | /* Send Disconnect now that all i/o has completed */ |
1177 | nvmet_fc_xmt_disconnect_assoc(assoc); |
1178 | |
1179 | nvmet_fc_free_hostport(hostport: assoc->hostport); |
1180 | spin_lock_irqsave(&tgtport->lock, flags); |
1181 | list_del_rcu(entry: &assoc->a_list); |
1182 | oldls = assoc->rcv_disconn; |
1183 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
1184 | /* if pending Rcv Disconnect Association LS, send rsp now */ |
1185 | if (oldls) |
1186 | nvmet_fc_xmt_ls_rsp(tgtport, iod: oldls); |
1187 | ida_free(&tgtport->assoc_cnt, id: assoc->a_id); |
1188 | dev_info(tgtport->dev, |
1189 | "{%d:%d} Association freed\n" , |
1190 | tgtport->fc_target_port.port_num, assoc->a_id); |
1191 | kfree_rcu(assoc, rcu); |
1192 | nvmet_fc_tgtport_put(tgtport); |
1193 | } |
1194 | |
1195 | static void |
1196 | nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc) |
1197 | { |
1198 | kref_put(kref: &assoc->ref, release: nvmet_fc_target_assoc_free); |
1199 | } |
1200 | |
1201 | static int |
1202 | nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc) |
1203 | { |
1204 | return kref_get_unless_zero(kref: &assoc->ref); |
1205 | } |
1206 | |
1207 | static void |
1208 | nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc) |
1209 | { |
1210 | struct nvmet_fc_tgtport *tgtport = assoc->tgtport; |
1211 | struct nvmet_fc_tgt_queue *queue; |
1212 | int i, terminating; |
1213 | |
1214 | terminating = atomic_xchg(v: &assoc->terminating, new: 1); |
1215 | |
1216 | /* if already terminating, do nothing */ |
1217 | if (terminating) |
1218 | return; |
1219 | |
1220 | |
1221 | for (i = NVMET_NR_QUEUES; i >= 0; i--) { |
1222 | rcu_read_lock(); |
1223 | queue = rcu_dereference(assoc->queues[i]); |
1224 | if (!queue) { |
1225 | rcu_read_unlock(); |
1226 | continue; |
1227 | } |
1228 | |
1229 | if (!nvmet_fc_tgt_q_get(queue)) { |
1230 | rcu_read_unlock(); |
1231 | continue; |
1232 | } |
1233 | rcu_read_unlock(); |
1234 | nvmet_fc_delete_target_queue(queue); |
1235 | nvmet_fc_tgt_q_put(queue); |
1236 | } |
1237 | |
1238 | dev_info(tgtport->dev, |
1239 | "{%d:%d} Association deleted\n" , |
1240 | tgtport->fc_target_port.port_num, assoc->a_id); |
1241 | |
1242 | nvmet_fc_tgt_a_put(assoc); |
1243 | } |
1244 | |
1245 | static struct nvmet_fc_tgt_assoc * |
1246 | nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport, |
1247 | u64 association_id) |
1248 | { |
1249 | struct nvmet_fc_tgt_assoc *assoc; |
1250 | struct nvmet_fc_tgt_assoc *ret = NULL; |
1251 | |
1252 | rcu_read_lock(); |
1253 | list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { |
1254 | if (association_id == assoc->association_id) { |
1255 | ret = assoc; |
1256 | if (!nvmet_fc_tgt_a_get(assoc)) |
1257 | ret = NULL; |
1258 | break; |
1259 | } |
1260 | } |
1261 | rcu_read_unlock(); |
1262 | |
1263 | return ret; |
1264 | } |
1265 | |
1266 | static void |
1267 | nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport, |
1268 | struct nvmet_fc_port_entry *pe, |
1269 | struct nvmet_port *port) |
1270 | { |
1271 | lockdep_assert_held(&nvmet_fc_tgtlock); |
1272 | |
1273 | pe->tgtport = tgtport; |
1274 | tgtport->pe = pe; |
1275 | |
1276 | pe->port = port; |
1277 | port->priv = pe; |
1278 | |
1279 | pe->node_name = tgtport->fc_target_port.node_name; |
1280 | pe->port_name = tgtport->fc_target_port.port_name; |
1281 | INIT_LIST_HEAD(list: &pe->pe_list); |
1282 | |
1283 | list_add_tail(new: &pe->pe_list, head: &nvmet_fc_portentry_list); |
1284 | } |
1285 | |
1286 | static void |
1287 | nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe) |
1288 | { |
1289 | unsigned long flags; |
1290 | |
1291 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
1292 | if (pe->tgtport) |
1293 | pe->tgtport->pe = NULL; |
1294 | list_del(entry: &pe->pe_list); |
1295 | spin_unlock_irqrestore(lock: &nvmet_fc_tgtlock, flags); |
1296 | } |
1297 | |
1298 | /* |
1299 | * called when a targetport deregisters. Breaks the relationship |
1300 | * with the nvmet port, but leaves the port_entry in place so that |
1301 | * re-registration can resume operation. |
1302 | */ |
1303 | static void |
1304 | nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport) |
1305 | { |
1306 | struct nvmet_fc_port_entry *pe; |
1307 | unsigned long flags; |
1308 | |
1309 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
1310 | pe = tgtport->pe; |
1311 | if (pe) |
1312 | pe->tgtport = NULL; |
1313 | tgtport->pe = NULL; |
1314 | spin_unlock_irqrestore(lock: &nvmet_fc_tgtlock, flags); |
1315 | } |
1316 | |
1317 | /* |
1318 | * called when a new targetport is registered. Looks in the |
1319 | * existing nvmet port_entries to see if the nvmet layer is |
1320 | * configured for the targetport's wwn's. (the targetport existed, |
1321 | * nvmet configured, the lldd unregistered the tgtport, and is now |
1322 | * reregistering the same targetport). If so, set the nvmet port |
1323 | * port entry on the targetport. |
1324 | */ |
1325 | static void |
1326 | nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport) |
1327 | { |
1328 | struct nvmet_fc_port_entry *pe; |
1329 | unsigned long flags; |
1330 | |
1331 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
1332 | list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) { |
1333 | if (tgtport->fc_target_port.node_name == pe->node_name && |
1334 | tgtport->fc_target_port.port_name == pe->port_name) { |
1335 | WARN_ON(pe->tgtport); |
1336 | tgtport->pe = pe; |
1337 | pe->tgtport = tgtport; |
1338 | break; |
1339 | } |
1340 | } |
1341 | spin_unlock_irqrestore(lock: &nvmet_fc_tgtlock, flags); |
1342 | } |
1343 | |
1344 | /** |
1345 | * nvmet_fc_register_targetport - transport entry point called by an |
1346 | * LLDD to register the existence of a local |
1347 | * NVME subystem FC port. |
1348 | * @pinfo: pointer to information about the port to be registered |
1349 | * @template: LLDD entrypoints and operational parameters for the port |
1350 | * @dev: physical hardware device node port corresponds to. Will be |
1351 | * used for DMA mappings |
1352 | * @portptr: pointer to a local port pointer. Upon success, the routine |
1353 | * will allocate a nvme_fc_local_port structure and place its |
1354 | * address in the local port pointer. Upon failure, local port |
1355 | * pointer will be set to NULL. |
1356 | * |
1357 | * Returns: |
1358 | * a completion status. Must be 0 upon success; a negative errno |
1359 | * (ex: -ENXIO) upon failure. |
1360 | */ |
1361 | int |
1362 | nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, |
1363 | struct nvmet_fc_target_template *template, |
1364 | struct device *dev, |
1365 | struct nvmet_fc_target_port **portptr) |
1366 | { |
1367 | struct nvmet_fc_tgtport *newrec; |
1368 | unsigned long flags; |
1369 | int ret, idx; |
1370 | |
1371 | if (!template->xmt_ls_rsp || !template->fcp_op || |
1372 | !template->fcp_abort || |
1373 | !template->fcp_req_release || !template->targetport_delete || |
1374 | !template->max_hw_queues || !template->max_sgl_segments || |
1375 | !template->max_dif_sgl_segments || !template->dma_boundary) { |
1376 | ret = -EINVAL; |
1377 | goto out_regtgt_failed; |
1378 | } |
1379 | |
1380 | newrec = kzalloc(size: (sizeof(*newrec) + template->target_priv_sz), |
1381 | GFP_KERNEL); |
1382 | if (!newrec) { |
1383 | ret = -ENOMEM; |
1384 | goto out_regtgt_failed; |
1385 | } |
1386 | |
1387 | idx = ida_alloc(ida: &nvmet_fc_tgtport_cnt, GFP_KERNEL); |
1388 | if (idx < 0) { |
1389 | ret = -ENOSPC; |
1390 | goto out_fail_kfree; |
1391 | } |
1392 | |
1393 | if (!get_device(dev) && dev) { |
1394 | ret = -ENODEV; |
1395 | goto out_ida_put; |
1396 | } |
1397 | |
1398 | newrec->fc_target_port.node_name = pinfo->node_name; |
1399 | newrec->fc_target_port.port_name = pinfo->port_name; |
1400 | if (template->target_priv_sz) |
1401 | newrec->fc_target_port.private = &newrec[1]; |
1402 | else |
1403 | newrec->fc_target_port.private = NULL; |
1404 | newrec->fc_target_port.port_id = pinfo->port_id; |
1405 | newrec->fc_target_port.port_num = idx; |
1406 | INIT_LIST_HEAD(list: &newrec->tgt_list); |
1407 | newrec->dev = dev; |
1408 | newrec->ops = template; |
1409 | spin_lock_init(&newrec->lock); |
1410 | INIT_LIST_HEAD(list: &newrec->ls_rcv_list); |
1411 | INIT_LIST_HEAD(list: &newrec->ls_req_list); |
1412 | INIT_LIST_HEAD(list: &newrec->ls_busylist); |
1413 | INIT_LIST_HEAD(list: &newrec->assoc_list); |
1414 | INIT_LIST_HEAD(list: &newrec->host_list); |
1415 | kref_init(kref: &newrec->ref); |
1416 | ida_init(ida: &newrec->assoc_cnt); |
1417 | newrec->max_sg_cnt = template->max_sgl_segments; |
1418 | |
1419 | ret = nvmet_fc_alloc_ls_iodlist(tgtport: newrec); |
1420 | if (ret) { |
1421 | ret = -ENOMEM; |
1422 | goto out_free_newrec; |
1423 | } |
1424 | |
1425 | nvmet_fc_portentry_rebind_tgt(tgtport: newrec); |
1426 | |
1427 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
1428 | list_add_tail(new: &newrec->tgt_list, head: &nvmet_fc_target_list); |
1429 | spin_unlock_irqrestore(lock: &nvmet_fc_tgtlock, flags); |
1430 | |
1431 | *portptr = &newrec->fc_target_port; |
1432 | return 0; |
1433 | |
1434 | out_free_newrec: |
1435 | put_device(dev); |
1436 | out_ida_put: |
1437 | ida_free(&nvmet_fc_tgtport_cnt, id: idx); |
1438 | out_fail_kfree: |
1439 | kfree(objp: newrec); |
1440 | out_regtgt_failed: |
1441 | *portptr = NULL; |
1442 | return ret; |
1443 | } |
1444 | EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport); |
1445 | |
1446 | |
1447 | static void |
1448 | nvmet_fc_free_tgtport(struct kref *ref) |
1449 | { |
1450 | struct nvmet_fc_tgtport *tgtport = |
1451 | container_of(ref, struct nvmet_fc_tgtport, ref); |
1452 | struct device *dev = tgtport->dev; |
1453 | unsigned long flags; |
1454 | |
1455 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
1456 | list_del(entry: &tgtport->tgt_list); |
1457 | spin_unlock_irqrestore(lock: &nvmet_fc_tgtlock, flags); |
1458 | |
1459 | nvmet_fc_free_ls_iodlist(tgtport); |
1460 | |
1461 | /* let the LLDD know we've finished tearing it down */ |
1462 | tgtport->ops->targetport_delete(&tgtport->fc_target_port); |
1463 | |
1464 | ida_free(&nvmet_fc_tgtport_cnt, |
1465 | id: tgtport->fc_target_port.port_num); |
1466 | |
1467 | ida_destroy(ida: &tgtport->assoc_cnt); |
1468 | |
1469 | kfree(objp: tgtport); |
1470 | |
1471 | put_device(dev); |
1472 | } |
1473 | |
1474 | static void |
1475 | nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport) |
1476 | { |
1477 | kref_put(kref: &tgtport->ref, release: nvmet_fc_free_tgtport); |
1478 | } |
1479 | |
1480 | static int |
1481 | nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport) |
1482 | { |
1483 | return kref_get_unless_zero(kref: &tgtport->ref); |
1484 | } |
1485 | |
1486 | static void |
1487 | __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport) |
1488 | { |
1489 | struct nvmet_fc_tgt_assoc *assoc; |
1490 | |
1491 | rcu_read_lock(); |
1492 | list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { |
1493 | if (!nvmet_fc_tgt_a_get(assoc)) |
1494 | continue; |
1495 | if (!queue_work(wq: nvmet_wq, work: &assoc->del_work)) |
1496 | /* already deleting - release local reference */ |
1497 | nvmet_fc_tgt_a_put(assoc); |
1498 | } |
1499 | rcu_read_unlock(); |
1500 | } |
1501 | |
1502 | /** |
1503 | * nvmet_fc_invalidate_host - transport entry point called by an LLDD |
1504 | * to remove references to a hosthandle for LS's. |
1505 | * |
1506 | * The nvmet-fc layer ensures that any references to the hosthandle |
1507 | * on the targetport are forgotten (set to NULL). The LLDD will |
1508 | * typically call this when a login with a remote host port has been |
1509 | * lost, thus LS's for the remote host port are no longer possible. |
1510 | * |
1511 | * If an LS request is outstanding to the targetport/hosthandle (or |
1512 | * issued concurrently with the call to invalidate the host), the |
1513 | * LLDD is responsible for terminating/aborting the LS and completing |
1514 | * the LS request. It is recommended that these terminations/aborts |
1515 | * occur after calling to invalidate the host handle to avoid additional |
1516 | * retries by the nvmet-fc transport. The nvmet-fc transport may |
1517 | * continue to reference host handle while it cleans up outstanding |
1518 | * NVME associations. The nvmet-fc transport will call the |
1519 | * ops->host_release() callback to notify the LLDD that all references |
1520 | * are complete and the related host handle can be recovered. |
1521 | * Note: if there are no references, the callback may be called before |
1522 | * the invalidate host call returns. |
1523 | * |
1524 | * @target_port: pointer to the (registered) target port that a prior |
1525 | * LS was received on and which supplied the transport the |
1526 | * hosthandle. |
1527 | * @hosthandle: the handle (pointer) that represents the host port |
1528 | * that no longer has connectivity and that LS's should |
1529 | * no longer be directed to. |
1530 | */ |
1531 | void |
1532 | nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port, |
1533 | void *hosthandle) |
1534 | { |
1535 | struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(targetport: target_port); |
1536 | struct nvmet_fc_tgt_assoc *assoc, *next; |
1537 | unsigned long flags; |
1538 | bool noassoc = true; |
1539 | |
1540 | spin_lock_irqsave(&tgtport->lock, flags); |
1541 | list_for_each_entry_safe(assoc, next, |
1542 | &tgtport->assoc_list, a_list) { |
1543 | if (!assoc->hostport || |
1544 | assoc->hostport->hosthandle != hosthandle) |
1545 | continue; |
1546 | if (!nvmet_fc_tgt_a_get(assoc)) |
1547 | continue; |
1548 | assoc->hostport->invalid = 1; |
1549 | noassoc = false; |
1550 | if (!queue_work(wq: nvmet_wq, work: &assoc->del_work)) |
1551 | /* already deleting - release local reference */ |
1552 | nvmet_fc_tgt_a_put(assoc); |
1553 | } |
1554 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
1555 | |
1556 | /* if there's nothing to wait for - call the callback */ |
1557 | if (noassoc && tgtport->ops->host_release) |
1558 | tgtport->ops->host_release(hosthandle); |
1559 | } |
1560 | EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host); |
1561 | |
1562 | /* |
1563 | * nvmet layer has called to terminate an association |
1564 | */ |
1565 | static void |
1566 | nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl) |
1567 | { |
1568 | struct nvmet_fc_tgtport *tgtport, *next; |
1569 | struct nvmet_fc_tgt_assoc *assoc; |
1570 | struct nvmet_fc_tgt_queue *queue; |
1571 | unsigned long flags; |
1572 | bool found_ctrl = false; |
1573 | |
1574 | /* this is a bit ugly, but don't want to make locks layered */ |
1575 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
1576 | list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list, |
1577 | tgt_list) { |
1578 | if (!nvmet_fc_tgtport_get(tgtport)) |
1579 | continue; |
1580 | spin_unlock_irqrestore(lock: &nvmet_fc_tgtlock, flags); |
1581 | |
1582 | rcu_read_lock(); |
1583 | list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { |
1584 | queue = rcu_dereference(assoc->queues[0]); |
1585 | if (queue && queue->nvme_sq.ctrl == ctrl) { |
1586 | if (nvmet_fc_tgt_a_get(assoc)) |
1587 | found_ctrl = true; |
1588 | break; |
1589 | } |
1590 | } |
1591 | rcu_read_unlock(); |
1592 | |
1593 | nvmet_fc_tgtport_put(tgtport); |
1594 | |
1595 | if (found_ctrl) { |
1596 | if (!queue_work(wq: nvmet_wq, work: &assoc->del_work)) |
1597 | /* already deleting - release local reference */ |
1598 | nvmet_fc_tgt_a_put(assoc); |
1599 | return; |
1600 | } |
1601 | |
1602 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
1603 | } |
1604 | spin_unlock_irqrestore(lock: &nvmet_fc_tgtlock, flags); |
1605 | } |
1606 | |
1607 | /** |
1608 | * nvmet_fc_unregister_targetport - transport entry point called by an |
1609 | * LLDD to deregister/remove a previously |
1610 | * registered a local NVME subsystem FC port. |
1611 | * @target_port: pointer to the (registered) target port that is to be |
1612 | * deregistered. |
1613 | * |
1614 | * Returns: |
1615 | * a completion status. Must be 0 upon success; a negative errno |
1616 | * (ex: -ENXIO) upon failure. |
1617 | */ |
1618 | int |
1619 | nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port) |
1620 | { |
1621 | struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(targetport: target_port); |
1622 | |
1623 | nvmet_fc_portentry_unbind_tgt(tgtport); |
1624 | |
1625 | /* terminate any outstanding associations */ |
1626 | __nvmet_fc_free_assocs(tgtport); |
1627 | |
1628 | /* |
1629 | * should terminate LS's as well. However, LS's will be generated |
1630 | * at the tail end of association termination, so they likely don't |
1631 | * exist yet. And even if they did, it's worthwhile to just let |
1632 | * them finish and targetport ref counting will clean things up. |
1633 | */ |
1634 | |
1635 | nvmet_fc_tgtport_put(tgtport); |
1636 | |
1637 | return 0; |
1638 | } |
1639 | EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport); |
1640 | |
1641 | |
1642 | /* ********************** FC-NVME LS RCV Handling ************************* */ |
1643 | |
1644 | |
1645 | static void |
1646 | nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, |
1647 | struct nvmet_fc_ls_iod *iod) |
1648 | { |
1649 | struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc; |
1650 | struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc; |
1651 | struct nvmet_fc_tgt_queue *queue; |
1652 | int ret = 0; |
1653 | |
1654 | memset(acc, 0, sizeof(*acc)); |
1655 | |
1656 | /* |
1657 | * FC-NVME spec changes. There are initiators sending different |
1658 | * lengths as padding sizes for Create Association Cmd descriptor |
1659 | * was incorrect. |
1660 | * Accept anything of "minimum" length. Assume format per 1.15 |
1661 | * spec (with HOSTID reduced to 16 bytes), ignore how long the |
1662 | * trailing pad length is. |
1663 | */ |
1664 | if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN) |
1665 | ret = VERR_CR_ASSOC_LEN; |
1666 | else if (be32_to_cpu(rqst->desc_list_len) < |
1667 | FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN) |
1668 | ret = VERR_CR_ASSOC_RQST_LEN; |
1669 | else if (rqst->assoc_cmd.desc_tag != |
1670 | cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD)) |
1671 | ret = VERR_CR_ASSOC_CMD; |
1672 | else if (be32_to_cpu(rqst->assoc_cmd.desc_len) < |
1673 | FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN) |
1674 | ret = VERR_CR_ASSOC_CMD_LEN; |
1675 | else if (!rqst->assoc_cmd.ersp_ratio || |
1676 | (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >= |
1677 | be16_to_cpu(rqst->assoc_cmd.sqsize))) |
1678 | ret = VERR_ERSP_RATIO; |
1679 | |
1680 | else { |
1681 | /* new association w/ admin queue */ |
1682 | iod->assoc = nvmet_fc_alloc_target_assoc( |
1683 | tgtport, hosthandle: iod->hosthandle); |
1684 | if (!iod->assoc) |
1685 | ret = VERR_ASSOC_ALLOC_FAIL; |
1686 | else { |
1687 | queue = nvmet_fc_alloc_target_queue(assoc: iod->assoc, qid: 0, |
1688 | be16_to_cpu(rqst->assoc_cmd.sqsize)); |
1689 | if (!queue) { |
1690 | ret = VERR_QUEUE_ALLOC_FAIL; |
1691 | nvmet_fc_tgt_a_put(assoc: iod->assoc); |
1692 | } |
1693 | } |
1694 | } |
1695 | |
1696 | if (ret) { |
1697 | dev_err(tgtport->dev, |
1698 | "Create Association LS failed: %s\n" , |
1699 | validation_errors[ret]); |
1700 | iod->lsrsp->rsplen = nvme_fc_format_rjt(buf: acc, |
1701 | buflen: sizeof(*acc), ls_cmd: rqst->w0.ls_cmd, |
1702 | reason: FCNVME_RJT_RC_LOGIC, |
1703 | explanation: FCNVME_RJT_EXP_NONE, vendor: 0); |
1704 | return; |
1705 | } |
1706 | |
1707 | queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio); |
1708 | atomic_set(v: &queue->connected, i: 1); |
1709 | queue->sqhd = 0; /* best place to init value */ |
1710 | |
1711 | dev_info(tgtport->dev, |
1712 | "{%d:%d} Association created\n" , |
1713 | tgtport->fc_target_port.port_num, iod->assoc->a_id); |
1714 | |
1715 | /* format a response */ |
1716 | |
1717 | iod->lsrsp->rsplen = sizeof(*acc); |
1718 | |
1719 | nvme_fc_format_rsp_hdr(buf: acc, ls_cmd: FCNVME_LS_ACC, |
1720 | desc_len: fcnvme_lsdesc_len( |
1721 | sz: sizeof(struct fcnvme_ls_cr_assoc_acc)), |
1722 | rqst_ls_cmd: FCNVME_LS_CREATE_ASSOCIATION); |
1723 | acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); |
1724 | acc->associd.desc_len = |
1725 | fcnvme_lsdesc_len( |
1726 | sz: sizeof(struct fcnvme_lsdesc_assoc_id)); |
1727 | acc->associd.association_id = |
1728 | cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0)); |
1729 | acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); |
1730 | acc->connectid.desc_len = |
1731 | fcnvme_lsdesc_len( |
1732 | sz: sizeof(struct fcnvme_lsdesc_conn_id)); |
1733 | acc->connectid.connection_id = acc->associd.association_id; |
1734 | } |
1735 | |
1736 | static void |
1737 | nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport, |
1738 | struct nvmet_fc_ls_iod *iod) |
1739 | { |
1740 | struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn; |
1741 | struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn; |
1742 | struct nvmet_fc_tgt_queue *queue; |
1743 | int ret = 0; |
1744 | |
1745 | memset(acc, 0, sizeof(*acc)); |
1746 | |
1747 | if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst)) |
1748 | ret = VERR_CR_CONN_LEN; |
1749 | else if (rqst->desc_list_len != |
1750 | fcnvme_lsdesc_len( |
1751 | sz: sizeof(struct fcnvme_ls_cr_conn_rqst))) |
1752 | ret = VERR_CR_CONN_RQST_LEN; |
1753 | else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) |
1754 | ret = VERR_ASSOC_ID; |
1755 | else if (rqst->associd.desc_len != |
1756 | fcnvme_lsdesc_len( |
1757 | sz: sizeof(struct fcnvme_lsdesc_assoc_id))) |
1758 | ret = VERR_ASSOC_ID_LEN; |
1759 | else if (rqst->connect_cmd.desc_tag != |
1760 | cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD)) |
1761 | ret = VERR_CR_CONN_CMD; |
1762 | else if (rqst->connect_cmd.desc_len != |
1763 | fcnvme_lsdesc_len( |
1764 | sz: sizeof(struct fcnvme_lsdesc_cr_conn_cmd))) |
1765 | ret = VERR_CR_CONN_CMD_LEN; |
1766 | else if (!rqst->connect_cmd.ersp_ratio || |
1767 | (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >= |
1768 | be16_to_cpu(rqst->connect_cmd.sqsize))) |
1769 | ret = VERR_ERSP_RATIO; |
1770 | |
1771 | else { |
1772 | /* new io queue */ |
1773 | iod->assoc = nvmet_fc_find_target_assoc(tgtport, |
1774 | be64_to_cpu(rqst->associd.association_id)); |
1775 | if (!iod->assoc) |
1776 | ret = VERR_NO_ASSOC; |
1777 | else { |
1778 | queue = nvmet_fc_alloc_target_queue(assoc: iod->assoc, |
1779 | be16_to_cpu(rqst->connect_cmd.qid), |
1780 | be16_to_cpu(rqst->connect_cmd.sqsize)); |
1781 | if (!queue) |
1782 | ret = VERR_QUEUE_ALLOC_FAIL; |
1783 | |
1784 | /* release get taken in nvmet_fc_find_target_assoc */ |
1785 | nvmet_fc_tgt_a_put(assoc: iod->assoc); |
1786 | } |
1787 | } |
1788 | |
1789 | if (ret) { |
1790 | dev_err(tgtport->dev, |
1791 | "Create Connection LS failed: %s\n" , |
1792 | validation_errors[ret]); |
1793 | iod->lsrsp->rsplen = nvme_fc_format_rjt(buf: acc, |
1794 | buflen: sizeof(*acc), ls_cmd: rqst->w0.ls_cmd, |
1795 | reason: (ret == VERR_NO_ASSOC) ? |
1796 | FCNVME_RJT_RC_INV_ASSOC : |
1797 | FCNVME_RJT_RC_LOGIC, |
1798 | explanation: FCNVME_RJT_EXP_NONE, vendor: 0); |
1799 | return; |
1800 | } |
1801 | |
1802 | queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio); |
1803 | atomic_set(v: &queue->connected, i: 1); |
1804 | queue->sqhd = 0; /* best place to init value */ |
1805 | |
1806 | /* format a response */ |
1807 | |
1808 | iod->lsrsp->rsplen = sizeof(*acc); |
1809 | |
1810 | nvme_fc_format_rsp_hdr(buf: acc, ls_cmd: FCNVME_LS_ACC, |
1811 | desc_len: fcnvme_lsdesc_len(sz: sizeof(struct fcnvme_ls_cr_conn_acc)), |
1812 | rqst_ls_cmd: FCNVME_LS_CREATE_CONNECTION); |
1813 | acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); |
1814 | acc->connectid.desc_len = |
1815 | fcnvme_lsdesc_len( |
1816 | sz: sizeof(struct fcnvme_lsdesc_conn_id)); |
1817 | acc->connectid.connection_id = |
1818 | cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, |
1819 | be16_to_cpu(rqst->connect_cmd.qid))); |
1820 | } |
1821 | |
1822 | /* |
1823 | * Returns true if the LS response is to be transmit |
1824 | * Returns false if the LS response is to be delayed |
1825 | */ |
1826 | static int |
1827 | nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, |
1828 | struct nvmet_fc_ls_iod *iod) |
1829 | { |
1830 | struct fcnvme_ls_disconnect_assoc_rqst *rqst = |
1831 | &iod->rqstbuf->rq_dis_assoc; |
1832 | struct fcnvme_ls_disconnect_assoc_acc *acc = |
1833 | &iod->rspbuf->rsp_dis_assoc; |
1834 | struct nvmet_fc_tgt_assoc *assoc = NULL; |
1835 | struct nvmet_fc_ls_iod *oldls = NULL; |
1836 | unsigned long flags; |
1837 | int ret = 0; |
1838 | |
1839 | memset(acc, 0, sizeof(*acc)); |
1840 | |
1841 | ret = nvmefc_vldt_lsreq_discon_assoc(rqstlen: iod->rqstdatalen, rqst); |
1842 | if (!ret) { |
1843 | /* match an active association - takes an assoc ref if !NULL */ |
1844 | assoc = nvmet_fc_find_target_assoc(tgtport, |
1845 | be64_to_cpu(rqst->associd.association_id)); |
1846 | iod->assoc = assoc; |
1847 | if (!assoc) |
1848 | ret = VERR_NO_ASSOC; |
1849 | } |
1850 | |
1851 | if (ret || !assoc) { |
1852 | dev_err(tgtport->dev, |
1853 | "Disconnect LS failed: %s\n" , |
1854 | validation_errors[ret]); |
1855 | iod->lsrsp->rsplen = nvme_fc_format_rjt(buf: acc, |
1856 | buflen: sizeof(*acc), ls_cmd: rqst->w0.ls_cmd, |
1857 | reason: (ret == VERR_NO_ASSOC) ? |
1858 | FCNVME_RJT_RC_INV_ASSOC : |
1859 | FCNVME_RJT_RC_LOGIC, |
1860 | explanation: FCNVME_RJT_EXP_NONE, vendor: 0); |
1861 | return true; |
1862 | } |
1863 | |
1864 | /* format a response */ |
1865 | |
1866 | iod->lsrsp->rsplen = sizeof(*acc); |
1867 | |
1868 | nvme_fc_format_rsp_hdr(buf: acc, ls_cmd: FCNVME_LS_ACC, |
1869 | desc_len: fcnvme_lsdesc_len( |
1870 | sz: sizeof(struct fcnvme_ls_disconnect_assoc_acc)), |
1871 | rqst_ls_cmd: FCNVME_LS_DISCONNECT_ASSOC); |
1872 | |
1873 | /* release get taken in nvmet_fc_find_target_assoc */ |
1874 | nvmet_fc_tgt_a_put(assoc); |
1875 | |
1876 | /* |
1877 | * The rules for LS response says the response cannot |
1878 | * go back until ABTS's have been sent for all outstanding |
1879 | * I/O and a Disconnect Association LS has been sent. |
1880 | * So... save off the Disconnect LS to send the response |
1881 | * later. If there was a prior LS already saved, replace |
1882 | * it with the newer one and send a can't perform reject |
1883 | * on the older one. |
1884 | */ |
1885 | spin_lock_irqsave(&tgtport->lock, flags); |
1886 | oldls = assoc->rcv_disconn; |
1887 | assoc->rcv_disconn = iod; |
1888 | spin_unlock_irqrestore(lock: &tgtport->lock, flags); |
1889 | |
1890 | nvmet_fc_delete_target_assoc(assoc); |
1891 | |
1892 | if (oldls) { |
1893 | dev_info(tgtport->dev, |
1894 | "{%d:%d} Multiple Disconnect Association LS's " |
1895 | "received\n" , |
1896 | tgtport->fc_target_port.port_num, assoc->a_id); |
1897 | /* overwrite good response with bogus failure */ |
1898 | oldls->lsrsp->rsplen = nvme_fc_format_rjt(buf: oldls->rspbuf, |
1899 | buflen: sizeof(*iod->rspbuf), |
1900 | /* ok to use rqst, LS is same */ |
1901 | ls_cmd: rqst->w0.ls_cmd, |
1902 | reason: FCNVME_RJT_RC_UNAB, |
1903 | explanation: FCNVME_RJT_EXP_NONE, vendor: 0); |
1904 | nvmet_fc_xmt_ls_rsp(tgtport, iod: oldls); |
1905 | } |
1906 | |
1907 | return false; |
1908 | } |
1909 | |
1910 | |
1911 | /* *********************** NVME Ctrl Routines **************************** */ |
1912 | |
1913 | |
1914 | static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req); |
1915 | |
1916 | static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops; |
1917 | |
1918 | static void |
1919 | nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) |
1920 | { |
1921 | struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private; |
1922 | struct nvmet_fc_tgtport *tgtport = iod->tgtport; |
1923 | |
1924 | fc_dma_sync_single_for_cpu(dev: tgtport->dev, addr: iod->rspdma, |
1925 | size: sizeof(*iod->rspbuf), dir: DMA_TO_DEVICE); |
1926 | nvmet_fc_free_ls_iod(tgtport, iod); |
1927 | nvmet_fc_tgtport_put(tgtport); |
1928 | } |
1929 | |
1930 | static void |
1931 | nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, |
1932 | struct nvmet_fc_ls_iod *iod) |
1933 | { |
1934 | int ret; |
1935 | |
1936 | fc_dma_sync_single_for_device(dev: tgtport->dev, addr: iod->rspdma, |
1937 | size: sizeof(*iod->rspbuf), dir: DMA_TO_DEVICE); |
1938 | |
1939 | ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp); |
1940 | if (ret) |
1941 | nvmet_fc_xmt_ls_rsp_done(lsrsp: iod->lsrsp); |
1942 | } |
1943 | |
1944 | /* |
1945 | * Actual processing routine for received FC-NVME LS Requests from the LLD |
1946 | */ |
1947 | static void |
1948 | nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport, |
1949 | struct nvmet_fc_ls_iod *iod) |
1950 | { |
1951 | struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0; |
1952 | bool sendrsp = true; |
1953 | |
1954 | iod->lsrsp->nvme_fc_private = iod; |
1955 | iod->lsrsp->rspbuf = iod->rspbuf; |
1956 | iod->lsrsp->rspdma = iod->rspdma; |
1957 | iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done; |
1958 | /* Be preventative. handlers will later set to valid length */ |
1959 | iod->lsrsp->rsplen = 0; |
1960 | |
1961 | iod->assoc = NULL; |
1962 | |
1963 | /* |
1964 | * handlers: |
1965 | * parse request input, execute the request, and format the |
1966 | * LS response |
1967 | */ |
1968 | switch (w0->ls_cmd) { |
1969 | case FCNVME_LS_CREATE_ASSOCIATION: |
1970 | /* Creates Association and initial Admin Queue/Connection */ |
1971 | nvmet_fc_ls_create_association(tgtport, iod); |
1972 | break; |
1973 | case FCNVME_LS_CREATE_CONNECTION: |
1974 | /* Creates an IO Queue/Connection */ |
1975 | nvmet_fc_ls_create_connection(tgtport, iod); |
1976 | break; |
1977 | case FCNVME_LS_DISCONNECT_ASSOC: |
1978 | /* Terminate a Queue/Connection or the Association */ |
1979 | sendrsp = nvmet_fc_ls_disconnect(tgtport, iod); |
1980 | break; |
1981 | default: |
1982 | iod->lsrsp->rsplen = nvme_fc_format_rjt(buf: iod->rspbuf, |
1983 | buflen: sizeof(*iod->rspbuf), ls_cmd: w0->ls_cmd, |
1984 | reason: FCNVME_RJT_RC_INVAL, explanation: FCNVME_RJT_EXP_NONE, vendor: 0); |
1985 | } |
1986 | |
1987 | if (sendrsp) |
1988 | nvmet_fc_xmt_ls_rsp(tgtport, iod); |
1989 | } |
1990 | |
1991 | /* |
1992 | * Actual processing routine for received FC-NVME LS Requests from the LLD |
1993 | */ |
1994 | static void |
1995 | nvmet_fc_handle_ls_rqst_work(struct work_struct *work) |
1996 | { |
1997 | struct nvmet_fc_ls_iod *iod = |
1998 | container_of(work, struct nvmet_fc_ls_iod, work); |
1999 | struct nvmet_fc_tgtport *tgtport = iod->tgtport; |
2000 | |
2001 | nvmet_fc_handle_ls_rqst(tgtport, iod); |
2002 | } |
2003 | |
2004 | |
2005 | /** |
2006 | * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD |
2007 | * upon the reception of a NVME LS request. |
2008 | * |
2009 | * The nvmet-fc layer will copy payload to an internal structure for |
2010 | * processing. As such, upon completion of the routine, the LLDD may |
2011 | * immediately free/reuse the LS request buffer passed in the call. |
2012 | * |
2013 | * If this routine returns error, the LLDD should abort the exchange. |
2014 | * |
2015 | * @target_port: pointer to the (registered) target port the LS was |
2016 | * received on. |
2017 | * @hosthandle: pointer to the host specific data, gets stored in iod. |
2018 | * @lsrsp: pointer to a lsrsp structure to be used to reference |
2019 | * the exchange corresponding to the LS. |
2020 | * @lsreqbuf: pointer to the buffer containing the LS Request |
2021 | * @lsreqbuf_len: length, in bytes, of the received LS request |
2022 | */ |
2023 | int |
2024 | nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port, |
2025 | void *hosthandle, |
2026 | struct nvmefc_ls_rsp *lsrsp, |
2027 | void *lsreqbuf, u32 lsreqbuf_len) |
2028 | { |
2029 | struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(targetport: target_port); |
2030 | struct nvmet_fc_ls_iod *iod; |
2031 | struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf; |
2032 | |
2033 | if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) { |
2034 | dev_info(tgtport->dev, |
2035 | "RCV %s LS failed: payload too large (%d)\n" , |
2036 | (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? |
2037 | nvmefc_ls_names[w0->ls_cmd] : "" , |
2038 | lsreqbuf_len); |
2039 | return -E2BIG; |
2040 | } |
2041 | |
2042 | if (!nvmet_fc_tgtport_get(tgtport)) { |
2043 | dev_info(tgtport->dev, |
2044 | "RCV %s LS failed: target deleting\n" , |
2045 | (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? |
2046 | nvmefc_ls_names[w0->ls_cmd] : "" ); |
2047 | return -ESHUTDOWN; |
2048 | } |
2049 | |
2050 | iod = nvmet_fc_alloc_ls_iod(tgtport); |
2051 | if (!iod) { |
2052 | dev_info(tgtport->dev, |
2053 | "RCV %s LS failed: context allocation failed\n" , |
2054 | (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? |
2055 | nvmefc_ls_names[w0->ls_cmd] : "" ); |
2056 | nvmet_fc_tgtport_put(tgtport); |
2057 | return -ENOENT; |
2058 | } |
2059 | |
2060 | iod->lsrsp = lsrsp; |
2061 | iod->fcpreq = NULL; |
2062 | memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len); |
2063 | iod->rqstdatalen = lsreqbuf_len; |
2064 | iod->hosthandle = hosthandle; |
2065 | |
2066 | queue_work(wq: nvmet_wq, work: &iod->work); |
2067 | |
2068 | return 0; |
2069 | } |
2070 | EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req); |
2071 | |
2072 | |
2073 | /* |
2074 | * ********************** |
2075 | * Start of FCP handling |
2076 | * ********************** |
2077 | */ |
2078 | |
2079 | static int |
2080 | nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod) |
2081 | { |
2082 | struct scatterlist *sg; |
2083 | unsigned int nent; |
2084 | |
2085 | sg = sgl_alloc(length: fod->req.transfer_len, GFP_KERNEL, nent_p: &nent); |
2086 | if (!sg) |
2087 | goto out; |
2088 | |
2089 | fod->data_sg = sg; |
2090 | fod->data_sg_cnt = nent; |
2091 | fod->data_sg_cnt = fc_dma_map_sg(dev: fod->tgtport->dev, sg, nents: nent, |
2092 | dir: ((fod->io_dir == NVMET_FCP_WRITE) ? |
2093 | DMA_FROM_DEVICE : DMA_TO_DEVICE)); |
2094 | /* note: write from initiator perspective */ |
2095 | fod->next_sg = fod->data_sg; |
2096 | |
2097 | return 0; |
2098 | |
2099 | out: |
2100 | return NVME_SC_INTERNAL; |
2101 | } |
2102 | |
2103 | static void |
2104 | nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod) |
2105 | { |
2106 | if (!fod->data_sg || !fod->data_sg_cnt) |
2107 | return; |
2108 | |
2109 | fc_dma_unmap_sg(dev: fod->tgtport->dev, sg: fod->data_sg, nents: fod->data_sg_cnt, |
2110 | dir: ((fod->io_dir == NVMET_FCP_WRITE) ? |
2111 | DMA_FROM_DEVICE : DMA_TO_DEVICE)); |
2112 | sgl_free(sgl: fod->data_sg); |
2113 | fod->data_sg = NULL; |
2114 | fod->data_sg_cnt = 0; |
2115 | } |
2116 | |
2117 | |
2118 | static bool |
2119 | queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd) |
2120 | { |
2121 | u32 sqtail, used; |
2122 | |
2123 | /* egad, this is ugly. And sqtail is just a best guess */ |
2124 | sqtail = atomic_read(v: &q->sqtail) % q->sqsize; |
2125 | |
2126 | used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd); |
2127 | return ((used * 10) >= (((u32)(q->sqsize - 1) * 9))); |
2128 | } |
2129 | |
2130 | /* |
2131 | * Prep RSP payload. |
2132 | * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op |
2133 | */ |
2134 | static void |
2135 | nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport, |
2136 | struct nvmet_fc_fcp_iod *fod) |
2137 | { |
2138 | struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf; |
2139 | struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; |
2140 | struct nvme_completion *cqe = &ersp->cqe; |
2141 | u32 *cqewd = (u32 *)cqe; |
2142 | bool send_ersp = false; |
2143 | u32 rsn, rspcnt, xfr_length; |
2144 | |
2145 | if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP) |
2146 | xfr_length = fod->req.transfer_len; |
2147 | else |
2148 | xfr_length = fod->offset; |
2149 | |
2150 | /* |
2151 | * check to see if we can send a 0's rsp. |
2152 | * Note: to send a 0's response, the NVME-FC host transport will |
2153 | * recreate the CQE. The host transport knows: sq id, SQHD (last |
2154 | * seen in an ersp), and command_id. Thus it will create a |
2155 | * zero-filled CQE with those known fields filled in. Transport |
2156 | * must send an ersp for any condition where the cqe won't match |
2157 | * this. |
2158 | * |
2159 | * Here are the FC-NVME mandated cases where we must send an ersp: |
2160 | * every N responses, where N=ersp_ratio |
2161 | * force fabric commands to send ersp's (not in FC-NVME but good |
2162 | * practice) |
2163 | * normal cmds: any time status is non-zero, or status is zero |
2164 | * but words 0 or 1 are non-zero. |
2165 | * the SQ is 90% or more full |
2166 | * the cmd is a fused command |
2167 | * transferred data length not equal to cmd iu length |
2168 | */ |
2169 | rspcnt = atomic_inc_return(v: &fod->queue->zrspcnt); |
2170 | if (!(rspcnt % fod->queue->ersp_ratio) || |
2171 | nvme_is_fabrics(cmd: (struct nvme_command *) sqe) || |
2172 | xfr_length != fod->req.transfer_len || |
2173 | (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] || |
2174 | (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) || |
2175 | queue_90percent_full(q: fod->queue, le16_to_cpu(cqe->sq_head))) |
2176 | send_ersp = true; |
2177 | |
2178 | /* re-set the fields */ |
2179 | fod->fcpreq->rspaddr = ersp; |
2180 | fod->fcpreq->rspdma = fod->rspdma; |
2181 | |
2182 | if (!send_ersp) { |
2183 | memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP); |
2184 | fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP; |
2185 | } else { |
2186 | ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32)); |
2187 | rsn = atomic_inc_return(v: &fod->queue->rsn); |
2188 | ersp->rsn = cpu_to_be32(rsn); |
2189 | ersp->xfrd_len = cpu_to_be32(xfr_length); |
2190 | fod->fcpreq->rsplen = sizeof(*ersp); |
2191 | } |
2192 | |
2193 | fc_dma_sync_single_for_device(dev: tgtport->dev, addr: fod->rspdma, |
2194 | size: sizeof(fod->rspiubuf), dir: DMA_TO_DEVICE); |
2195 | } |
2196 | |
2197 | static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq); |
2198 | |
2199 | static void |
2200 | nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport, |
2201 | struct nvmet_fc_fcp_iod *fod) |
2202 | { |
2203 | struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; |
2204 | |
2205 | /* data no longer needed */ |
2206 | nvmet_fc_free_tgt_pgs(fod); |
2207 | |
2208 | /* |
2209 | * if an ABTS was received or we issued the fcp_abort early |
2210 | * don't call abort routine again. |
2211 | */ |
2212 | /* no need to take lock - lock was taken earlier to get here */ |
2213 | if (!fod->aborted) |
2214 | tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq); |
2215 | |
2216 | nvmet_fc_free_fcp_iod(queue: fod->queue, fod); |
2217 | } |
2218 | |
2219 | static void |
2220 | nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport, |
2221 | struct nvmet_fc_fcp_iod *fod) |
2222 | { |
2223 | int ret; |
2224 | |
2225 | fod->fcpreq->op = NVMET_FCOP_RSP; |
2226 | fod->fcpreq->timeout = 0; |
2227 | |
2228 | nvmet_fc_prep_fcp_rsp(tgtport, fod); |
2229 | |
2230 | ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); |
2231 | if (ret) |
2232 | nvmet_fc_abort_op(tgtport, fod); |
2233 | } |
2234 | |
2235 | static void |
2236 | nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport, |
2237 | struct nvmet_fc_fcp_iod *fod, u8 op) |
2238 | { |
2239 | struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; |
2240 | struct scatterlist *sg = fod->next_sg; |
2241 | unsigned long flags; |
2242 | u32 remaininglen = fod->req.transfer_len - fod->offset; |
2243 | u32 tlen = 0; |
2244 | int ret; |
2245 | |
2246 | fcpreq->op = op; |
2247 | fcpreq->offset = fod->offset; |
2248 | fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC; |
2249 | |
2250 | /* |
2251 | * for next sequence: |
2252 | * break at a sg element boundary |
2253 | * attempt to keep sequence length capped at |
2254 | * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to |
2255 | * be longer if a single sg element is larger |
2256 | * than that amount. This is done to avoid creating |
2257 | * a new sg list to use for the tgtport api. |
2258 | */ |
2259 | fcpreq->sg = sg; |
2260 | fcpreq->sg_cnt = 0; |
2261 | while (tlen < remaininglen && |
2262 | fcpreq->sg_cnt < tgtport->max_sg_cnt && |
2263 | tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) { |
2264 | fcpreq->sg_cnt++; |
2265 | tlen += sg_dma_len(sg); |
2266 | sg = sg_next(sg); |
2267 | } |
2268 | if (tlen < remaininglen && fcpreq->sg_cnt == 0) { |
2269 | fcpreq->sg_cnt++; |
2270 | tlen += min_t(u32, sg_dma_len(sg), remaininglen); |
2271 | sg = sg_next(sg); |
2272 | } |
2273 | if (tlen < remaininglen) |
2274 | fod->next_sg = sg; |
2275 | else |
2276 | fod->next_sg = NULL; |
2277 | |
2278 | fcpreq->transfer_length = tlen; |
2279 | fcpreq->transferred_length = 0; |
2280 | fcpreq->fcp_error = 0; |
2281 | fcpreq->rsplen = 0; |
2282 | |
2283 | /* |
2284 | * If the last READDATA request: check if LLDD supports |
2285 | * combined xfr with response. |
2286 | */ |
2287 | if ((op == NVMET_FCOP_READDATA) && |
2288 | ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) && |
2289 | (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) { |
2290 | fcpreq->op = NVMET_FCOP_READDATA_RSP; |
2291 | nvmet_fc_prep_fcp_rsp(tgtport, fod); |
2292 | } |
2293 | |
2294 | ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); |
2295 | if (ret) { |
2296 | /* |
2297 | * should be ok to set w/o lock as its in the thread of |
2298 | * execution (not an async timer routine) and doesn't |
2299 | * contend with any clearing action |
2300 | */ |
2301 | fod->abort = true; |
2302 | |
2303 | if (op == NVMET_FCOP_WRITEDATA) { |
2304 | spin_lock_irqsave(&fod->flock, flags); |
2305 | fod->writedataactive = false; |
2306 | spin_unlock_irqrestore(lock: &fod->flock, flags); |
2307 | nvmet_req_complete(req: &fod->req, status: NVME_SC_INTERNAL); |
2308 | } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ { |
2309 | fcpreq->fcp_error = ret; |
2310 | fcpreq->transferred_length = 0; |
2311 | nvmet_fc_xmt_fcp_op_done(fcpreq: fod->fcpreq); |
2312 | } |
2313 | } |
2314 | } |
2315 | |
2316 | static inline bool |
2317 | __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort) |
2318 | { |
2319 | struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; |
2320 | struct nvmet_fc_tgtport *tgtport = fod->tgtport; |
2321 | |
2322 | /* if in the middle of an io and we need to tear down */ |
2323 | if (abort) { |
2324 | if (fcpreq->op == NVMET_FCOP_WRITEDATA) { |
2325 | nvmet_req_complete(req: &fod->req, status: NVME_SC_INTERNAL); |
2326 | return true; |
2327 | } |
2328 | |
2329 | nvmet_fc_abort_op(tgtport, fod); |
2330 | return true; |
2331 | } |
2332 | |
2333 | return false; |
2334 | } |
2335 | |
2336 | /* |
2337 | * actual done handler for FCP operations when completed by the lldd |
2338 | */ |
2339 | static void |
2340 | nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) |
2341 | { |
2342 | struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; |
2343 | struct nvmet_fc_tgtport *tgtport = fod->tgtport; |
2344 | unsigned long flags; |
2345 | bool abort; |
2346 | |
2347 | spin_lock_irqsave(&fod->flock, flags); |
2348 | abort = fod->abort; |
2349 | fod->writedataactive = false; |
2350 | spin_unlock_irqrestore(lock: &fod->flock, flags); |
2351 | |
2352 | switch (fcpreq->op) { |
2353 | |
2354 | case NVMET_FCOP_WRITEDATA: |
2355 | if (__nvmet_fc_fod_op_abort(fod, abort)) |
2356 | return; |
2357 | if (fcpreq->fcp_error || |
2358 | fcpreq->transferred_length != fcpreq->transfer_length) { |
2359 | spin_lock_irqsave(&fod->flock, flags); |
2360 | fod->abort = true; |
2361 | spin_unlock_irqrestore(lock: &fod->flock, flags); |
2362 | |
2363 | nvmet_req_complete(req: &fod->req, status: NVME_SC_INTERNAL); |
2364 | return; |
2365 | } |
2366 | |
2367 | fod->offset += fcpreq->transferred_length; |
2368 | if (fod->offset != fod->req.transfer_len) { |
2369 | spin_lock_irqsave(&fod->flock, flags); |
2370 | fod->writedataactive = true; |
2371 | spin_unlock_irqrestore(lock: &fod->flock, flags); |
2372 | |
2373 | /* transfer the next chunk */ |
2374 | nvmet_fc_transfer_fcp_data(tgtport, fod, |
2375 | op: NVMET_FCOP_WRITEDATA); |
2376 | return; |
2377 | } |
2378 | |
2379 | /* data transfer complete, resume with nvmet layer */ |
2380 | fod->req.execute(&fod->req); |
2381 | break; |
2382 | |
2383 | case NVMET_FCOP_READDATA: |
2384 | case NVMET_FCOP_READDATA_RSP: |
2385 | if (__nvmet_fc_fod_op_abort(fod, abort)) |
2386 | return; |
2387 | if (fcpreq->fcp_error || |
2388 | fcpreq->transferred_length != fcpreq->transfer_length) { |
2389 | nvmet_fc_abort_op(tgtport, fod); |
2390 | return; |
2391 | } |
2392 | |
2393 | /* success */ |
2394 | |
2395 | if (fcpreq->op == NVMET_FCOP_READDATA_RSP) { |
2396 | /* data no longer needed */ |
2397 | nvmet_fc_free_tgt_pgs(fod); |
2398 | nvmet_fc_free_fcp_iod(queue: fod->queue, fod); |
2399 | return; |
2400 | } |
2401 | |
2402 | fod->offset += fcpreq->transferred_length; |
2403 | if (fod->offset != fod->req.transfer_len) { |
2404 | /* transfer the next chunk */ |
2405 | nvmet_fc_transfer_fcp_data(tgtport, fod, |
2406 | op: NVMET_FCOP_READDATA); |
2407 | return; |
2408 | } |
2409 | |
2410 | /* data transfer complete, send response */ |
2411 | |
2412 | /* data no longer needed */ |
2413 | nvmet_fc_free_tgt_pgs(fod); |
2414 | |
2415 | nvmet_fc_xmt_fcp_rsp(tgtport, fod); |
2416 | |
2417 | break; |
2418 | |
2419 | case NVMET_FCOP_RSP: |
2420 | if (__nvmet_fc_fod_op_abort(fod, abort)) |
2421 | return; |
2422 | nvmet_fc_free_fcp_iod(queue: fod->queue, fod); |
2423 | break; |
2424 | |
2425 | default: |
2426 | break; |
2427 | } |
2428 | } |
2429 | |
2430 | static void |
2431 | nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq) |
2432 | { |
2433 | struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; |
2434 | |
2435 | nvmet_fc_fod_op_done(fod); |
2436 | } |
2437 | |
2438 | /* |
2439 | * actual completion handler after execution by the nvmet layer |
2440 | */ |
2441 | static void |
2442 | __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport, |
2443 | struct nvmet_fc_fcp_iod *fod, int status) |
2444 | { |
2445 | struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; |
2446 | struct nvme_completion *cqe = &fod->rspiubuf.cqe; |
2447 | unsigned long flags; |
2448 | bool abort; |
2449 | |
2450 | spin_lock_irqsave(&fod->flock, flags); |
2451 | abort = fod->abort; |
2452 | spin_unlock_irqrestore(lock: &fod->flock, flags); |
2453 | |
2454 | /* if we have a CQE, snoop the last sq_head value */ |
2455 | if (!status) |
2456 | fod->queue->sqhd = cqe->sq_head; |
2457 | |
2458 | if (abort) { |
2459 | nvmet_fc_abort_op(tgtport, fod); |
2460 | return; |
2461 | } |
2462 | |
2463 | /* if an error handling the cmd post initial parsing */ |
2464 | if (status) { |
2465 | /* fudge up a failed CQE status for our transport error */ |
2466 | memset(cqe, 0, sizeof(*cqe)); |
2467 | cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */ |
2468 | cqe->sq_id = cpu_to_le16(fod->queue->qid); |
2469 | cqe->command_id = sqe->command_id; |
2470 | cqe->status = cpu_to_le16(status); |
2471 | } else { |
2472 | |
2473 | /* |
2474 | * try to push the data even if the SQE status is non-zero. |
2475 | * There may be a status where data still was intended to |
2476 | * be moved |
2477 | */ |
2478 | if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) { |
2479 | /* push the data over before sending rsp */ |
2480 | nvmet_fc_transfer_fcp_data(tgtport, fod, |
2481 | op: NVMET_FCOP_READDATA); |
2482 | return; |
2483 | } |
2484 | |
2485 | /* writes & no data - fall thru */ |
2486 | } |
2487 | |
2488 | /* data no longer needed */ |
2489 | nvmet_fc_free_tgt_pgs(fod); |
2490 | |
2491 | nvmet_fc_xmt_fcp_rsp(tgtport, fod); |
2492 | } |
2493 | |
2494 | |
2495 | static void |
2496 | nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req) |
2497 | { |
2498 | struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req); |
2499 | struct nvmet_fc_tgtport *tgtport = fod->tgtport; |
2500 | |
2501 | __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, status: 0); |
2502 | } |
2503 | |
2504 | |
2505 | /* |
2506 | * Actual processing routine for received FC-NVME I/O Requests from the LLD |
2507 | */ |
2508 | static void |
2509 | nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, |
2510 | struct nvmet_fc_fcp_iod *fod) |
2511 | { |
2512 | struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf; |
2513 | u32 xfrlen = be32_to_cpu(cmdiu->data_len); |
2514 | int ret; |
2515 | |
2516 | /* |
2517 | * Fused commands are currently not supported in the linux |
2518 | * implementation. |
2519 | * |
2520 | * As such, the implementation of the FC transport does not |
2521 | * look at the fused commands and order delivery to the upper |
2522 | * layer until we have both based on csn. |
2523 | */ |
2524 | |
2525 | fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done; |
2526 | |
2527 | if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) { |
2528 | fod->io_dir = NVMET_FCP_WRITE; |
2529 | if (!nvme_is_write(cmd: &cmdiu->sqe)) |
2530 | goto transport_error; |
2531 | } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) { |
2532 | fod->io_dir = NVMET_FCP_READ; |
2533 | if (nvme_is_write(cmd: &cmdiu->sqe)) |
2534 | goto transport_error; |
2535 | } else { |
2536 | fod->io_dir = NVMET_FCP_NODATA; |
2537 | if (xfrlen) |
2538 | goto transport_error; |
2539 | } |
2540 | |
2541 | fod->req.cmd = &fod->cmdiubuf.sqe; |
2542 | fod->req.cqe = &fod->rspiubuf.cqe; |
2543 | if (tgtport->pe) |
2544 | fod->req.port = tgtport->pe->port; |
2545 | |
2546 | /* clear any response payload */ |
2547 | memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf)); |
2548 | |
2549 | fod->data_sg = NULL; |
2550 | fod->data_sg_cnt = 0; |
2551 | |
2552 | ret = nvmet_req_init(req: &fod->req, |
2553 | cq: &fod->queue->nvme_cq, |
2554 | sq: &fod->queue->nvme_sq, |
2555 | ops: &nvmet_fc_tgt_fcp_ops); |
2556 | if (!ret) { |
2557 | /* bad SQE content or invalid ctrl state */ |
2558 | /* nvmet layer has already called op done to send rsp. */ |
2559 | return; |
2560 | } |
2561 | |
2562 | fod->req.transfer_len = xfrlen; |
2563 | |
2564 | /* keep a running counter of tail position */ |
2565 | atomic_inc(v: &fod->queue->sqtail); |
2566 | |
2567 | if (fod->req.transfer_len) { |
2568 | ret = nvmet_fc_alloc_tgt_pgs(fod); |
2569 | if (ret) { |
2570 | nvmet_req_complete(req: &fod->req, status: ret); |
2571 | return; |
2572 | } |
2573 | } |
2574 | fod->req.sg = fod->data_sg; |
2575 | fod->req.sg_cnt = fod->data_sg_cnt; |
2576 | fod->offset = 0; |
2577 | |
2578 | if (fod->io_dir == NVMET_FCP_WRITE) { |
2579 | /* pull the data over before invoking nvmet layer */ |
2580 | nvmet_fc_transfer_fcp_data(tgtport, fod, op: NVMET_FCOP_WRITEDATA); |
2581 | return; |
2582 | } |
2583 | |
2584 | /* |
2585 | * Reads or no data: |
2586 | * |
2587 | * can invoke the nvmet_layer now. If read data, cmd completion will |
2588 | * push the data |
2589 | */ |
2590 | fod->req.execute(&fod->req); |
2591 | return; |
2592 | |
2593 | transport_error: |
2594 | nvmet_fc_abort_op(tgtport, fod); |
2595 | } |
2596 | |
2597 | /** |
2598 | * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD |
2599 | * upon the reception of a NVME FCP CMD IU. |
2600 | * |
2601 | * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc |
2602 | * layer for processing. |
2603 | * |
2604 | * The nvmet_fc layer allocates a local job structure (struct |
2605 | * nvmet_fc_fcp_iod) from the queue for the io and copies the |
2606 | * CMD IU buffer to the job structure. As such, on a successful |
2607 | * completion (returns 0), the LLDD may immediately free/reuse |
2608 | * the CMD IU buffer passed in the call. |
2609 | * |
2610 | * However, in some circumstances, due to the packetized nature of FC |
2611 | * and the api of the FC LLDD which may issue a hw command to send the |
2612 | * response, but the LLDD may not get the hw completion for that command |
2613 | * and upcall the nvmet_fc layer before a new command may be |
2614 | * asynchronously received - its possible for a command to be received |
2615 | * before the LLDD and nvmet_fc have recycled the job structure. It gives |
2616 | * the appearance of more commands received than fits in the sq. |
2617 | * To alleviate this scenario, a temporary queue is maintained in the |
2618 | * transport for pending LLDD requests waiting for a queue job structure. |
2619 | * In these "overrun" cases, a temporary queue element is allocated |
2620 | * the LLDD request and CMD iu buffer information remembered, and the |
2621 | * routine returns a -EOVERFLOW status. Subsequently, when a queue job |
2622 | * structure is freed, it is immediately reallocated for anything on the |
2623 | * pending request list. The LLDDs defer_rcv() callback is called, |
2624 | * informing the LLDD that it may reuse the CMD IU buffer, and the io |
2625 | * is then started normally with the transport. |
2626 | * |
2627 | * The LLDD, when receiving an -EOVERFLOW completion status, is to treat |
2628 | * the completion as successful but must not reuse the CMD IU buffer |
2629 | * until the LLDD's defer_rcv() callback has been called for the |
2630 | * corresponding struct nvmefc_tgt_fcp_req pointer. |
2631 | * |
2632 | * If there is any other condition in which an error occurs, the |
2633 | * transport will return a non-zero status indicating the error. |
2634 | * In all cases other than -EOVERFLOW, the transport has not accepted the |
2635 | * request and the LLDD should abort the exchange. |
2636 | * |
2637 | * @target_port: pointer to the (registered) target port the FCP CMD IU |
2638 | * was received on. |
2639 | * @fcpreq: pointer to a fcpreq request structure to be used to reference |
2640 | * the exchange corresponding to the FCP Exchange. |
2641 | * @cmdiubuf: pointer to the buffer containing the FCP CMD IU |
2642 | * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU |
2643 | */ |
2644 | int |
2645 | nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port, |
2646 | struct nvmefc_tgt_fcp_req *fcpreq, |
2647 | void *cmdiubuf, u32 cmdiubuf_len) |
2648 | { |
2649 | struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(targetport: target_port); |
2650 | struct nvme_fc_cmd_iu *cmdiu = cmdiubuf; |
2651 | struct nvmet_fc_tgt_queue *queue; |
2652 | struct nvmet_fc_fcp_iod *fod; |
2653 | struct nvmet_fc_defer_fcp_req *deferfcp; |
2654 | unsigned long flags; |
2655 | |
2656 | /* validate iu, so the connection id can be used to find the queue */ |
2657 | if ((cmdiubuf_len != sizeof(*cmdiu)) || |
2658 | (cmdiu->format_id != NVME_CMD_FORMAT_ID) || |
2659 | (cmdiu->fc_id != NVME_CMD_FC_ID) || |
2660 | (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4))) |
2661 | return -EIO; |
2662 | |
2663 | queue = nvmet_fc_find_target_queue(tgtport, |
2664 | be64_to_cpu(cmdiu->connection_id)); |
2665 | if (!queue) |
2666 | return -ENOTCONN; |
2667 | |
2668 | /* |
2669 | * note: reference taken by find_target_queue |
2670 | * After successful fod allocation, the fod will inherit the |
2671 | * ownership of that reference and will remove the reference |
2672 | * when the fod is freed. |
2673 | */ |
2674 | |
2675 | spin_lock_irqsave(&queue->qlock, flags); |
2676 | |
2677 | fod = nvmet_fc_alloc_fcp_iod(queue); |
2678 | if (fod) { |
2679 | spin_unlock_irqrestore(lock: &queue->qlock, flags); |
2680 | |
2681 | fcpreq->nvmet_fc_private = fod; |
2682 | fod->fcpreq = fcpreq; |
2683 | |
2684 | memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); |
2685 | |
2686 | nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq); |
2687 | |
2688 | return 0; |
2689 | } |
2690 | |
2691 | if (!tgtport->ops->defer_rcv) { |
2692 | spin_unlock_irqrestore(lock: &queue->qlock, flags); |
2693 | /* release the queue lookup reference */ |
2694 | nvmet_fc_tgt_q_put(queue); |
2695 | return -ENOENT; |
2696 | } |
2697 | |
2698 | deferfcp = list_first_entry_or_null(&queue->avail_defer_list, |
2699 | struct nvmet_fc_defer_fcp_req, req_list); |
2700 | if (deferfcp) { |
2701 | /* Just re-use one that was previously allocated */ |
2702 | list_del(entry: &deferfcp->req_list); |
2703 | } else { |
2704 | spin_unlock_irqrestore(lock: &queue->qlock, flags); |
2705 | |
2706 | /* Now we need to dynamically allocate one */ |
2707 | deferfcp = kmalloc(size: sizeof(*deferfcp), GFP_KERNEL); |
2708 | if (!deferfcp) { |
2709 | /* release the queue lookup reference */ |
2710 | nvmet_fc_tgt_q_put(queue); |
2711 | return -ENOMEM; |
2712 | } |
2713 | spin_lock_irqsave(&queue->qlock, flags); |
2714 | } |
2715 | |
2716 | /* For now, use rspaddr / rsplen to save payload information */ |
2717 | fcpreq->rspaddr = cmdiubuf; |
2718 | fcpreq->rsplen = cmdiubuf_len; |
2719 | deferfcp->fcp_req = fcpreq; |
2720 | |
2721 | /* defer processing till a fod becomes available */ |
2722 | list_add_tail(new: &deferfcp->req_list, head: &queue->pending_cmd_list); |
2723 | |
2724 | /* NOTE: the queue lookup reference is still valid */ |
2725 | |
2726 | spin_unlock_irqrestore(lock: &queue->qlock, flags); |
2727 | |
2728 | return -EOVERFLOW; |
2729 | } |
2730 | EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req); |
2731 | |
2732 | /** |
2733 | * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD |
2734 | * upon the reception of an ABTS for a FCP command |
2735 | * |
2736 | * Notify the transport that an ABTS has been received for a FCP command |
2737 | * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The |
2738 | * LLDD believes the command is still being worked on |
2739 | * (template_ops->fcp_req_release() has not been called). |
2740 | * |
2741 | * The transport will wait for any outstanding work (an op to the LLDD, |
2742 | * which the lldd should complete with error due to the ABTS; or the |
2743 | * completion from the nvmet layer of the nvme command), then will |
2744 | * stop processing and call the nvmet_fc_rcv_fcp_req() callback to |
2745 | * return the i/o context to the LLDD. The LLDD may send the BA_ACC |
2746 | * to the ABTS either after return from this function (assuming any |
2747 | * outstanding op work has been terminated) or upon the callback being |
2748 | * called. |
2749 | * |
2750 | * @target_port: pointer to the (registered) target port the FCP CMD IU |
2751 | * was received on. |
2752 | * @fcpreq: pointer to the fcpreq request structure that corresponds |
2753 | * to the exchange that received the ABTS. |
2754 | */ |
2755 | void |
2756 | nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port, |
2757 | struct nvmefc_tgt_fcp_req *fcpreq) |
2758 | { |
2759 | struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; |
2760 | struct nvmet_fc_tgt_queue *queue; |
2761 | unsigned long flags; |
2762 | |
2763 | if (!fod || fod->fcpreq != fcpreq) |
2764 | /* job appears to have already completed, ignore abort */ |
2765 | return; |
2766 | |
2767 | queue = fod->queue; |
2768 | |
2769 | spin_lock_irqsave(&queue->qlock, flags); |
2770 | if (fod->active) { |
2771 | /* |
2772 | * mark as abort. The abort handler, invoked upon completion |
2773 | * of any work, will detect the aborted status and do the |
2774 | * callback. |
2775 | */ |
2776 | spin_lock(lock: &fod->flock); |
2777 | fod->abort = true; |
2778 | fod->aborted = true; |
2779 | spin_unlock(lock: &fod->flock); |
2780 | } |
2781 | spin_unlock_irqrestore(lock: &queue->qlock, flags); |
2782 | } |
2783 | EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort); |
2784 | |
2785 | |
2786 | struct nvmet_fc_traddr { |
2787 | u64 nn; |
2788 | u64 pn; |
2789 | }; |
2790 | |
2791 | static int |
2792 | __nvme_fc_parse_u64(substring_t *sstr, u64 *val) |
2793 | { |
2794 | u64 token64; |
2795 | |
2796 | if (match_u64(sstr, result: &token64)) |
2797 | return -EINVAL; |
2798 | *val = token64; |
2799 | |
2800 | return 0; |
2801 | } |
2802 | |
2803 | /* |
2804 | * This routine validates and extracts the WWN's from the TRADDR string. |
2805 | * As kernel parsers need the 0x to determine number base, universally |
2806 | * build string to parse with 0x prefix before parsing name strings. |
2807 | */ |
2808 | static int |
2809 | nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen) |
2810 | { |
2811 | char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1]; |
2812 | substring_t wwn = { name, &name[sizeof(name)-1] }; |
2813 | int nnoffset, pnoffset; |
2814 | |
2815 | /* validate if string is one of the 2 allowed formats */ |
2816 | if (strnlen(p: buf, maxlen: blen) == NVME_FC_TRADDR_MAXLENGTH && |
2817 | !strncmp(buf, "nn-0x" , NVME_FC_TRADDR_OXNNLEN) && |
2818 | !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET], |
2819 | "pn-0x" , NVME_FC_TRADDR_OXNNLEN)) { |
2820 | nnoffset = NVME_FC_TRADDR_OXNNLEN; |
2821 | pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET + |
2822 | NVME_FC_TRADDR_OXNNLEN; |
2823 | } else if ((strnlen(p: buf, maxlen: blen) == NVME_FC_TRADDR_MINLENGTH && |
2824 | !strncmp(buf, "nn-" , NVME_FC_TRADDR_NNLEN) && |
2825 | !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET], |
2826 | "pn-" , NVME_FC_TRADDR_NNLEN))) { |
2827 | nnoffset = NVME_FC_TRADDR_NNLEN; |
2828 | pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN; |
2829 | } else |
2830 | goto out_einval; |
2831 | |
2832 | name[0] = '0'; |
2833 | name[1] = 'x'; |
2834 | name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0; |
2835 | |
2836 | memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN); |
2837 | if (__nvme_fc_parse_u64(sstr: &wwn, val: &traddr->nn)) |
2838 | goto out_einval; |
2839 | |
2840 | memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN); |
2841 | if (__nvme_fc_parse_u64(sstr: &wwn, val: &traddr->pn)) |
2842 | goto out_einval; |
2843 | |
2844 | return 0; |
2845 | |
2846 | out_einval: |
2847 | pr_warn("%s: bad traddr string\n" , __func__); |
2848 | return -EINVAL; |
2849 | } |
2850 | |
2851 | static int |
2852 | nvmet_fc_add_port(struct nvmet_port *port) |
2853 | { |
2854 | struct nvmet_fc_tgtport *tgtport; |
2855 | struct nvmet_fc_port_entry *pe; |
2856 | struct nvmet_fc_traddr traddr = { 0L, 0L }; |
2857 | unsigned long flags; |
2858 | int ret; |
2859 | |
2860 | /* validate the address info */ |
2861 | if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) || |
2862 | (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC)) |
2863 | return -EINVAL; |
2864 | |
2865 | /* map the traddr address info to a target port */ |
2866 | |
2867 | ret = nvme_fc_parse_traddr(traddr: &traddr, buf: port->disc_addr.traddr, |
2868 | blen: sizeof(port->disc_addr.traddr)); |
2869 | if (ret) |
2870 | return ret; |
2871 | |
2872 | pe = kzalloc(size: sizeof(*pe), GFP_KERNEL); |
2873 | if (!pe) |
2874 | return -ENOMEM; |
2875 | |
2876 | ret = -ENXIO; |
2877 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
2878 | list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) { |
2879 | if ((tgtport->fc_target_port.node_name == traddr.nn) && |
2880 | (tgtport->fc_target_port.port_name == traddr.pn)) { |
2881 | /* a FC port can only be 1 nvmet port id */ |
2882 | if (!tgtport->pe) { |
2883 | nvmet_fc_portentry_bind(tgtport, pe, port); |
2884 | ret = 0; |
2885 | } else |
2886 | ret = -EALREADY; |
2887 | break; |
2888 | } |
2889 | } |
2890 | spin_unlock_irqrestore(lock: &nvmet_fc_tgtlock, flags); |
2891 | |
2892 | if (ret) |
2893 | kfree(objp: pe); |
2894 | |
2895 | return ret; |
2896 | } |
2897 | |
2898 | static void |
2899 | nvmet_fc_remove_port(struct nvmet_port *port) |
2900 | { |
2901 | struct nvmet_fc_port_entry *pe = port->priv; |
2902 | |
2903 | nvmet_fc_portentry_unbind(pe); |
2904 | |
2905 | kfree(objp: pe); |
2906 | } |
2907 | |
2908 | static void |
2909 | nvmet_fc_discovery_chg(struct nvmet_port *port) |
2910 | { |
2911 | struct nvmet_fc_port_entry *pe = port->priv; |
2912 | struct nvmet_fc_tgtport *tgtport = pe->tgtport; |
2913 | |
2914 | if (tgtport && tgtport->ops->discovery_event) |
2915 | tgtport->ops->discovery_event(&tgtport->fc_target_port); |
2916 | } |
2917 | |
2918 | static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { |
2919 | .owner = THIS_MODULE, |
2920 | .type = NVMF_TRTYPE_FC, |
2921 | .msdbd = 1, |
2922 | .add_port = nvmet_fc_add_port, |
2923 | .remove_port = nvmet_fc_remove_port, |
2924 | .queue_response = nvmet_fc_fcp_nvme_cmd_done, |
2925 | .delete_ctrl = nvmet_fc_delete_ctrl, |
2926 | .discovery_chg = nvmet_fc_discovery_chg, |
2927 | }; |
2928 | |
2929 | static int __init nvmet_fc_init_module(void) |
2930 | { |
2931 | return nvmet_register_transport(ops: &nvmet_fc_tgt_fcp_ops); |
2932 | } |
2933 | |
2934 | static void __exit nvmet_fc_exit_module(void) |
2935 | { |
2936 | /* sanity check - all lports should be removed */ |
2937 | if (!list_empty(head: &nvmet_fc_target_list)) |
2938 | pr_warn("%s: targetport list not empty\n" , __func__); |
2939 | |
2940 | nvmet_unregister_transport(ops: &nvmet_fc_tgt_fcp_ops); |
2941 | |
2942 | ida_destroy(ida: &nvmet_fc_tgtport_cnt); |
2943 | } |
2944 | |
2945 | module_init(nvmet_fc_init_module); |
2946 | module_exit(nvmet_fc_exit_module); |
2947 | |
2948 | MODULE_LICENSE("GPL v2" ); |
2949 | |