1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. |
4 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. |
5 | */ |
6 | #include <linux/mempool.h> |
7 | #include <linux/errno.h> |
8 | #include <linux/init.h> |
9 | #include <linux/workqueue.h> |
10 | #include <linux/pci.h> |
11 | #include <linux/scatterlist.h> |
12 | #include <linux/skbuff.h> |
13 | #include <linux/spinlock.h> |
14 | #include <linux/etherdevice.h> |
15 | #include <linux/if_ether.h> |
16 | #include <linux/if_vlan.h> |
17 | #include <linux/delay.h> |
18 | #include <linux/gfp.h> |
19 | #include <scsi/scsi.h> |
20 | #include <scsi/scsi_host.h> |
21 | #include <scsi/scsi_device.h> |
22 | #include <scsi/scsi_cmnd.h> |
23 | #include <scsi/scsi_tcq.h> |
24 | #include <scsi/fc/fc_els.h> |
25 | #include <scsi/fc/fc_fcoe.h> |
26 | #include <scsi/libfc.h> |
27 | #include <scsi/fc_frame.h> |
28 | #include "fnic_io.h" |
29 | #include "fnic.h" |
30 | |
31 | const char *fnic_state_str[] = { |
32 | [FNIC_IN_FC_MODE] = "FNIC_IN_FC_MODE" , |
33 | [FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE" , |
34 | [FNIC_IN_ETH_MODE] = "FNIC_IN_ETH_MODE" , |
35 | [FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE" , |
36 | }; |
37 | |
38 | static const char *fnic_ioreq_state_str[] = { |
39 | [FNIC_IOREQ_NOT_INITED] = "FNIC_IOREQ_NOT_INITED" , |
40 | [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING" , |
41 | [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING" , |
42 | [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE" , |
43 | [FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE" , |
44 | }; |
45 | |
46 | static const char *fcpio_status_str[] = { |
47 | [FCPIO_SUCCESS] = "FCPIO_SUCCESS" , /*0x0*/ |
48 | [FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER" , |
49 | [FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE" , |
50 | [FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]" , |
51 | [FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED" , |
52 | [FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND" , |
53 | [FCPIO_ABORTED] = "FCPIO_ABORTED" , /*0x41*/ |
54 | [FCPIO_TIMEOUT] = "FCPIO_TIMEOUT" , |
55 | [FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID" , |
56 | [FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID" , |
57 | [FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH" , |
58 | [FCPIO_FW_ERR] = "FCPIO_FW_ERR" , |
59 | [FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED" , |
60 | [FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED" , |
61 | [FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN" , |
62 | [FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED" , |
63 | [FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL" , |
64 | [FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED" , |
65 | [FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND" , |
66 | }; |
67 | |
68 | const char *fnic_state_to_str(unsigned int state) |
69 | { |
70 | if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state]) |
71 | return "unknown" ; |
72 | |
73 | return fnic_state_str[state]; |
74 | } |
75 | |
76 | static const char *fnic_ioreq_state_to_str(unsigned int state) |
77 | { |
78 | if (state >= ARRAY_SIZE(fnic_ioreq_state_str) || |
79 | !fnic_ioreq_state_str[state]) |
80 | return "unknown" ; |
81 | |
82 | return fnic_ioreq_state_str[state]; |
83 | } |
84 | |
85 | static const char *fnic_fcpio_status_to_str(unsigned int status) |
86 | { |
87 | if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status]) |
88 | return "unknown" ; |
89 | |
90 | return fcpio_status_str[status]; |
91 | } |
92 | |
93 | static void fnic_cleanup_io(struct fnic *fnic); |
94 | |
95 | static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic, |
96 | struct scsi_cmnd *sc) |
97 | { |
98 | u32 hash = scsi_cmd_to_rq(scmd: sc)->tag & (FNIC_IO_LOCKS - 1); |
99 | |
100 | return &fnic->io_req_lock[hash]; |
101 | } |
102 | |
103 | static inline spinlock_t *fnic_io_lock_tag(struct fnic *fnic, |
104 | int tag) |
105 | { |
106 | return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)]; |
107 | } |
108 | |
109 | /* |
110 | * Unmap the data buffer and sense buffer for an io_req, |
111 | * also unmap and free the device-private scatter/gather list. |
112 | */ |
113 | static void fnic_release_ioreq_buf(struct fnic *fnic, |
114 | struct fnic_io_req *io_req, |
115 | struct scsi_cmnd *sc) |
116 | { |
117 | if (io_req->sgl_list_pa) |
118 | dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa, |
119 | sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt, |
120 | DMA_TO_DEVICE); |
121 | scsi_dma_unmap(cmd: sc); |
122 | |
123 | if (io_req->sgl_cnt) |
124 | mempool_free(element: io_req->sgl_list_alloc, |
125 | pool: fnic->io_sgl_pool[io_req->sgl_type]); |
126 | if (io_req->sense_buf_pa) |
127 | dma_unmap_single(&fnic->pdev->dev, io_req->sense_buf_pa, |
128 | SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); |
129 | } |
130 | |
131 | /* Free up Copy Wq descriptors. Called with copy_wq lock held */ |
132 | static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq) |
133 | { |
134 | /* if no Ack received from firmware, then nothing to clean */ |
135 | if (!fnic->fw_ack_recd[0]) |
136 | return 1; |
137 | |
138 | /* |
139 | * Update desc_available count based on number of freed descriptors |
140 | * Account for wraparound |
141 | */ |
142 | if (wq->to_clean_index <= fnic->fw_ack_index[0]) |
143 | wq->ring.desc_avail += (fnic->fw_ack_index[0] |
144 | - wq->to_clean_index + 1); |
145 | else |
146 | wq->ring.desc_avail += (wq->ring.desc_count |
147 | - wq->to_clean_index |
148 | + fnic->fw_ack_index[0] + 1); |
149 | |
150 | /* |
151 | * just bump clean index to ack_index+1 accounting for wraparound |
152 | * this will essentially free up all descriptors between |
153 | * to_clean_index and fw_ack_index, both inclusive |
154 | */ |
155 | wq->to_clean_index = |
156 | (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count; |
157 | |
158 | /* we have processed the acks received so far */ |
159 | fnic->fw_ack_recd[0] = 0; |
160 | return 0; |
161 | } |
162 | |
163 | |
164 | /* |
165 | * __fnic_set_state_flags |
166 | * Sets/Clears bits in fnic's state_flags |
167 | **/ |
168 | void |
169 | __fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags, |
170 | unsigned long clearbits) |
171 | { |
172 | unsigned long flags = 0; |
173 | unsigned long host_lock_flags = 0; |
174 | |
175 | spin_lock_irqsave(&fnic->fnic_lock, flags); |
176 | spin_lock_irqsave(fnic->lport->host->host_lock, host_lock_flags); |
177 | |
178 | if (clearbits) |
179 | fnic->state_flags &= ~st_flags; |
180 | else |
181 | fnic->state_flags |= st_flags; |
182 | |
183 | spin_unlock_irqrestore(lock: fnic->lport->host->host_lock, flags: host_lock_flags); |
184 | spin_unlock_irqrestore(lock: &fnic->fnic_lock, flags); |
185 | |
186 | return; |
187 | } |
188 | |
189 | |
190 | /* |
191 | * fnic_fw_reset_handler |
192 | * Routine to send reset msg to fw |
193 | */ |
194 | int fnic_fw_reset_handler(struct fnic *fnic) |
195 | { |
196 | struct vnic_wq_copy *wq = &fnic->wq_copy[0]; |
197 | int ret = 0; |
198 | unsigned long flags; |
199 | |
200 | /* indicate fwreset to io path */ |
201 | fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET); |
202 | |
203 | skb_queue_purge(list: &fnic->frame_queue); |
204 | skb_queue_purge(list: &fnic->tx_queue); |
205 | |
206 | /* wait for io cmpl */ |
207 | while (atomic_read(v: &fnic->in_flight)) |
208 | schedule_timeout(timeout: msecs_to_jiffies(m: 1)); |
209 | |
210 | spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); |
211 | |
212 | if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) |
213 | free_wq_copy_descs(fnic, wq); |
214 | |
215 | if (!vnic_wq_copy_desc_avail(wq)) |
216 | ret = -EAGAIN; |
217 | else { |
218 | fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG); |
219 | atomic64_inc(v: &fnic->fnic_stats.fw_stats.active_fw_reqs); |
220 | if (atomic64_read(v: &fnic->fnic_stats.fw_stats.active_fw_reqs) > |
221 | atomic64_read(v: &fnic->fnic_stats.fw_stats.max_fw_reqs)) |
222 | atomic64_set(v: &fnic->fnic_stats.fw_stats.max_fw_reqs, |
223 | i: atomic64_read( |
224 | v: &fnic->fnic_stats.fw_stats.active_fw_reqs)); |
225 | } |
226 | |
227 | spin_unlock_irqrestore(lock: &fnic->wq_copy_lock[0], flags); |
228 | |
229 | if (!ret) { |
230 | atomic64_inc(v: &fnic->fnic_stats.reset_stats.fw_resets); |
231 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
232 | "Issued fw reset\n" ); |
233 | } else { |
234 | fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET); |
235 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
236 | "Failed to issue fw reset\n" ); |
237 | } |
238 | |
239 | return ret; |
240 | } |
241 | |
242 | |
243 | /* |
244 | * fnic_flogi_reg_handler |
245 | * Routine to send flogi register msg to fw |
246 | */ |
247 | int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id) |
248 | { |
249 | struct vnic_wq_copy *wq = &fnic->wq_copy[0]; |
250 | enum fcpio_flogi_reg_format_type format; |
251 | struct fc_lport *lp = fnic->lport; |
252 | u8 gw_mac[ETH_ALEN]; |
253 | int ret = 0; |
254 | unsigned long flags; |
255 | |
256 | spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); |
257 | |
258 | if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) |
259 | free_wq_copy_descs(fnic, wq); |
260 | |
261 | if (!vnic_wq_copy_desc_avail(wq)) { |
262 | ret = -EAGAIN; |
263 | goto flogi_reg_ioreq_end; |
264 | } |
265 | |
266 | if (fnic->ctlr.map_dest) { |
267 | eth_broadcast_addr(addr: gw_mac); |
268 | format = FCPIO_FLOGI_REG_DEF_DEST; |
269 | } else { |
270 | memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN); |
271 | format = FCPIO_FLOGI_REG_GW_DEST; |
272 | } |
273 | |
274 | if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) { |
275 | fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG, |
276 | s_id: fc_id, fcf_mac: gw_mac, |
277 | ha_mac: fnic->data_src_addr, |
278 | r_a_tov: lp->r_a_tov, e_d_tov: lp->e_d_tov); |
279 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
280 | "FLOGI FIP reg issued fcid %x src %pM dest %pM\n" , |
281 | fc_id, fnic->data_src_addr, gw_mac); |
282 | } else { |
283 | fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG, |
284 | format, s_id: fc_id, gw_mac); |
285 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
286 | "FLOGI reg issued fcid %x map %d dest %pM\n" , |
287 | fc_id, fnic->ctlr.map_dest, gw_mac); |
288 | } |
289 | |
290 | atomic64_inc(v: &fnic->fnic_stats.fw_stats.active_fw_reqs); |
291 | if (atomic64_read(v: &fnic->fnic_stats.fw_stats.active_fw_reqs) > |
292 | atomic64_read(v: &fnic->fnic_stats.fw_stats.max_fw_reqs)) |
293 | atomic64_set(v: &fnic->fnic_stats.fw_stats.max_fw_reqs, |
294 | i: atomic64_read(v: &fnic->fnic_stats.fw_stats.active_fw_reqs)); |
295 | |
296 | flogi_reg_ioreq_end: |
297 | spin_unlock_irqrestore(lock: &fnic->wq_copy_lock[0], flags); |
298 | return ret; |
299 | } |
300 | |
301 | /* |
302 | * fnic_queue_wq_copy_desc |
303 | * Routine to enqueue a wq copy desc |
304 | */ |
305 | static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, |
306 | struct vnic_wq_copy *wq, |
307 | struct fnic_io_req *io_req, |
308 | struct scsi_cmnd *sc, |
309 | int sg_count) |
310 | { |
311 | struct scatterlist *sg; |
312 | struct fc_rport *rport = starget_to_rport(scsi_target(sc->device)); |
313 | struct fc_rport_libfc_priv *rp = rport->dd_data; |
314 | struct host_sg_desc *desc; |
315 | struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; |
316 | unsigned int i; |
317 | unsigned long intr_flags; |
318 | int flags; |
319 | u8 exch_flags; |
320 | struct scsi_lun fc_lun; |
321 | |
322 | if (sg_count) { |
323 | /* For each SGE, create a device desc entry */ |
324 | desc = io_req->sgl_list; |
325 | for_each_sg(scsi_sglist(sc), sg, sg_count, i) { |
326 | desc->addr = cpu_to_le64(sg_dma_address(sg)); |
327 | desc->len = cpu_to_le32(sg_dma_len(sg)); |
328 | desc->_resvd = 0; |
329 | desc++; |
330 | } |
331 | |
332 | io_req->sgl_list_pa = dma_map_single(&fnic->pdev->dev, |
333 | io_req->sgl_list, |
334 | sizeof(io_req->sgl_list[0]) * sg_count, |
335 | DMA_TO_DEVICE); |
336 | if (dma_mapping_error(dev: &fnic->pdev->dev, dma_addr: io_req->sgl_list_pa)) { |
337 | printk(KERN_ERR "DMA mapping failed\n" ); |
338 | return SCSI_MLQUEUE_HOST_BUSY; |
339 | } |
340 | } |
341 | |
342 | io_req->sense_buf_pa = dma_map_single(&fnic->pdev->dev, |
343 | sc->sense_buffer, |
344 | SCSI_SENSE_BUFFERSIZE, |
345 | DMA_FROM_DEVICE); |
346 | if (dma_mapping_error(dev: &fnic->pdev->dev, dma_addr: io_req->sense_buf_pa)) { |
347 | dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa, |
348 | sizeof(io_req->sgl_list[0]) * sg_count, |
349 | DMA_TO_DEVICE); |
350 | printk(KERN_ERR "DMA mapping failed\n" ); |
351 | return SCSI_MLQUEUE_HOST_BUSY; |
352 | } |
353 | |
354 | int_to_scsilun(sc->device->lun, &fc_lun); |
355 | |
356 | /* Enqueue the descriptor in the Copy WQ */ |
357 | spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags); |
358 | |
359 | if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) |
360 | free_wq_copy_descs(fnic, wq); |
361 | |
362 | if (unlikely(!vnic_wq_copy_desc_avail(wq))) { |
363 | spin_unlock_irqrestore(lock: &fnic->wq_copy_lock[0], flags: intr_flags); |
364 | FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, |
365 | "fnic_queue_wq_copy_desc failure - no descriptors\n" ); |
366 | atomic64_inc(v: &misc_stats->io_cpwq_alloc_failures); |
367 | return SCSI_MLQUEUE_HOST_BUSY; |
368 | } |
369 | |
370 | flags = 0; |
371 | if (sc->sc_data_direction == DMA_FROM_DEVICE) |
372 | flags = FCPIO_ICMND_RDDATA; |
373 | else if (sc->sc_data_direction == DMA_TO_DEVICE) |
374 | flags = FCPIO_ICMND_WRDATA; |
375 | |
376 | exch_flags = 0; |
377 | if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) && |
378 | (rp->flags & FC_RP_FLAGS_RETRY)) |
379 | exch_flags |= FCPIO_ICMND_SRFLAG_RETRY; |
380 | |
381 | fnic_queue_wq_copy_desc_icmnd_16(wq, req_id: scsi_cmd_to_rq(scmd: sc)->tag, |
382 | lunmap_id: 0, spl_flags: exch_flags, sgl_cnt: io_req->sgl_cnt, |
383 | SCSI_SENSE_BUFFERSIZE, |
384 | sgl_addr: io_req->sgl_list_pa, |
385 | sns_addr: io_req->sense_buf_pa, |
386 | crn: 0, /* scsi cmd ref, always 0 */ |
387 | FCPIO_ICMND_PTA_SIMPLE, |
388 | /* scsi pri and tag */ |
389 | flags, /* command flags */ |
390 | scsi_cdb: sc->cmnd, cdb_len: sc->cmd_len, |
391 | data_len: scsi_bufflen(cmd: sc), |
392 | lun: fc_lun.scsi_lun, d_id: io_req->port_id, |
393 | mss: rport->maxframe_size, ratov: rp->r_a_tov, |
394 | edtov: rp->e_d_tov); |
395 | |
396 | atomic64_inc(v: &fnic->fnic_stats.fw_stats.active_fw_reqs); |
397 | if (atomic64_read(v: &fnic->fnic_stats.fw_stats.active_fw_reqs) > |
398 | atomic64_read(v: &fnic->fnic_stats.fw_stats.max_fw_reqs)) |
399 | atomic64_set(v: &fnic->fnic_stats.fw_stats.max_fw_reqs, |
400 | i: atomic64_read(v: &fnic->fnic_stats.fw_stats.active_fw_reqs)); |
401 | |
402 | spin_unlock_irqrestore(lock: &fnic->wq_copy_lock[0], flags: intr_flags); |
403 | return 0; |
404 | } |
405 | |
406 | /* |
407 | * fnic_queuecommand |
408 | * Routine to send a scsi cdb |
409 | * Called with host_lock held and interrupts disabled. |
410 | */ |
411 | static int fnic_queuecommand_lck(struct scsi_cmnd *sc) |
412 | { |
413 | void (*done)(struct scsi_cmnd *) = scsi_done; |
414 | const int tag = scsi_cmd_to_rq(scmd: sc)->tag; |
415 | struct fc_lport *lp = shost_priv(shost: sc->device->host); |
416 | struct fc_rport *rport; |
417 | struct fnic_io_req *io_req = NULL; |
418 | struct fnic *fnic = lport_priv(lport: lp); |
419 | struct fnic_stats *fnic_stats = &fnic->fnic_stats; |
420 | struct vnic_wq_copy *wq; |
421 | int ret; |
422 | u64 cmd_trace; |
423 | int sg_count = 0; |
424 | unsigned long flags = 0; |
425 | unsigned long ptr; |
426 | spinlock_t *io_lock = NULL; |
427 | int io_lock_acquired = 0; |
428 | struct fc_rport_libfc_priv *rp; |
429 | |
430 | if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) |
431 | return SCSI_MLQUEUE_HOST_BUSY; |
432 | |
433 | if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) |
434 | return SCSI_MLQUEUE_HOST_BUSY; |
435 | |
436 | rport = starget_to_rport(scsi_target(sc->device)); |
437 | if (!rport) { |
438 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
439 | "returning DID_NO_CONNECT for IO as rport is NULL\n" ); |
440 | sc->result = DID_NO_CONNECT << 16; |
441 | done(sc); |
442 | return 0; |
443 | } |
444 | |
445 | ret = fc_remote_port_chkready(rport); |
446 | if (ret) { |
447 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
448 | "rport is not ready\n" ); |
449 | atomic64_inc(v: &fnic_stats->misc_stats.rport_not_ready); |
450 | sc->result = ret; |
451 | done(sc); |
452 | return 0; |
453 | } |
454 | |
455 | rp = rport->dd_data; |
456 | if (!rp || rp->rp_state == RPORT_ST_DELETE) { |
457 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
458 | "rport 0x%x removed, returning DID_NO_CONNECT\n" , |
459 | rport->port_id); |
460 | |
461 | atomic64_inc(v: &fnic_stats->misc_stats.rport_not_ready); |
462 | sc->result = DID_NO_CONNECT<<16; |
463 | done(sc); |
464 | return 0; |
465 | } |
466 | |
467 | if (rp->rp_state != RPORT_ST_READY) { |
468 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
469 | "rport 0x%x in state 0x%x, returning DID_IMM_RETRY\n" , |
470 | rport->port_id, rp->rp_state); |
471 | |
472 | sc->result = DID_IMM_RETRY << 16; |
473 | done(sc); |
474 | return 0; |
475 | } |
476 | |
477 | if (lp->state != LPORT_ST_READY || !(lp->link_up)) |
478 | return SCSI_MLQUEUE_HOST_BUSY; |
479 | |
480 | atomic_inc(v: &fnic->in_flight); |
481 | |
482 | /* |
483 | * Release host lock, use driver resource specific locks from here. |
484 | * Don't re-enable interrupts in case they were disabled prior to the |
485 | * caller disabling them. |
486 | */ |
487 | spin_unlock(lock: lp->host->host_lock); |
488 | fnic_priv(cmd: sc)->state = FNIC_IOREQ_NOT_INITED; |
489 | fnic_priv(cmd: sc)->flags = FNIC_NO_FLAGS; |
490 | |
491 | /* Get a new io_req for this SCSI IO */ |
492 | io_req = mempool_alloc(pool: fnic->io_req_pool, GFP_ATOMIC); |
493 | if (!io_req) { |
494 | atomic64_inc(v: &fnic_stats->io_stats.alloc_failures); |
495 | ret = SCSI_MLQUEUE_HOST_BUSY; |
496 | goto out; |
497 | } |
498 | memset(io_req, 0, sizeof(*io_req)); |
499 | |
500 | /* Map the data buffer */ |
501 | sg_count = scsi_dma_map(cmd: sc); |
502 | if (sg_count < 0) { |
503 | FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, |
504 | tag, sc, 0, sc->cmnd[0], sg_count, fnic_priv(sc)->state); |
505 | mempool_free(element: io_req, pool: fnic->io_req_pool); |
506 | goto out; |
507 | } |
508 | |
509 | /* Determine the type of scatter/gather list we need */ |
510 | io_req->sgl_cnt = sg_count; |
511 | io_req->sgl_type = FNIC_SGL_CACHE_DFLT; |
512 | if (sg_count > FNIC_DFLT_SG_DESC_CNT) |
513 | io_req->sgl_type = FNIC_SGL_CACHE_MAX; |
514 | |
515 | if (sg_count) { |
516 | io_req->sgl_list = |
517 | mempool_alloc(pool: fnic->io_sgl_pool[io_req->sgl_type], |
518 | GFP_ATOMIC); |
519 | if (!io_req->sgl_list) { |
520 | atomic64_inc(v: &fnic_stats->io_stats.alloc_failures); |
521 | ret = SCSI_MLQUEUE_HOST_BUSY; |
522 | scsi_dma_unmap(cmd: sc); |
523 | mempool_free(element: io_req, pool: fnic->io_req_pool); |
524 | goto out; |
525 | } |
526 | |
527 | /* Cache sgl list allocated address before alignment */ |
528 | io_req->sgl_list_alloc = io_req->sgl_list; |
529 | ptr = (unsigned long) io_req->sgl_list; |
530 | if (ptr % FNIC_SG_DESC_ALIGN) { |
531 | io_req->sgl_list = (struct host_sg_desc *) |
532 | (((unsigned long) ptr |
533 | + FNIC_SG_DESC_ALIGN - 1) |
534 | & ~(FNIC_SG_DESC_ALIGN - 1)); |
535 | } |
536 | } |
537 | |
538 | /* |
539 | * Will acquire lock defore setting to IO initialized. |
540 | */ |
541 | |
542 | io_lock = fnic_io_lock_hash(fnic, sc); |
543 | spin_lock_irqsave(io_lock, flags); |
544 | |
545 | /* initialize rest of io_req */ |
546 | io_lock_acquired = 1; |
547 | io_req->port_id = rport->port_id; |
548 | io_req->start_time = jiffies; |
549 | fnic_priv(cmd: sc)->state = FNIC_IOREQ_CMD_PENDING; |
550 | fnic_priv(cmd: sc)->io_req = io_req; |
551 | fnic_priv(cmd: sc)->flags |= FNIC_IO_INITIALIZED; |
552 | |
553 | /* create copy wq desc and enqueue it */ |
554 | wq = &fnic->wq_copy[0]; |
555 | ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count); |
556 | if (ret) { |
557 | /* |
558 | * In case another thread cancelled the request, |
559 | * refetch the pointer under the lock. |
560 | */ |
561 | FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, |
562 | tag, sc, 0, 0, 0, fnic_flags_and_state(sc)); |
563 | io_req = fnic_priv(cmd: sc)->io_req; |
564 | fnic_priv(cmd: sc)->io_req = NULL; |
565 | fnic_priv(cmd: sc)->state = FNIC_IOREQ_CMD_COMPLETE; |
566 | spin_unlock_irqrestore(lock: io_lock, flags); |
567 | if (io_req) { |
568 | fnic_release_ioreq_buf(fnic, io_req, sc); |
569 | mempool_free(element: io_req, pool: fnic->io_req_pool); |
570 | } |
571 | atomic_dec(v: &fnic->in_flight); |
572 | /* acquire host lock before returning to SCSI */ |
573 | spin_lock(lock: lp->host->host_lock); |
574 | return ret; |
575 | } else { |
576 | atomic64_inc(v: &fnic_stats->io_stats.active_ios); |
577 | atomic64_inc(v: &fnic_stats->io_stats.num_ios); |
578 | if (atomic64_read(v: &fnic_stats->io_stats.active_ios) > |
579 | atomic64_read(v: &fnic_stats->io_stats.max_active_ios)) |
580 | atomic64_set(v: &fnic_stats->io_stats.max_active_ios, |
581 | i: atomic64_read(v: &fnic_stats->io_stats.active_ios)); |
582 | |
583 | /* REVISIT: Use per IO lock in the final code */ |
584 | fnic_priv(cmd: sc)->flags |= FNIC_IO_ISSUED; |
585 | } |
586 | out: |
587 | cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 | |
588 | (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 | |
589 | (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 | |
590 | sc->cmnd[5]); |
591 | |
592 | FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, |
593 | tag, sc, io_req, sg_count, cmd_trace, |
594 | fnic_flags_and_state(sc)); |
595 | |
596 | /* if only we issued IO, will we have the io lock */ |
597 | if (io_lock_acquired) |
598 | spin_unlock_irqrestore(lock: io_lock, flags); |
599 | |
600 | atomic_dec(v: &fnic->in_flight); |
601 | /* acquire host lock before returning to SCSI */ |
602 | spin_lock(lock: lp->host->host_lock); |
603 | return ret; |
604 | } |
605 | |
606 | DEF_SCSI_QCMD(fnic_queuecommand) |
607 | |
608 | /* |
609 | * fnic_fcpio_fw_reset_cmpl_handler |
610 | * Routine to handle fw reset completion |
611 | */ |
612 | static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, |
613 | struct fcpio_fw_req *desc) |
614 | { |
615 | u8 type; |
616 | u8 hdr_status; |
617 | struct fcpio_tag tag; |
618 | int ret = 0; |
619 | unsigned long flags; |
620 | struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats; |
621 | |
622 | fcpio_header_dec(hdr: &desc->hdr, type: &type, status: &hdr_status, tag: &tag); |
623 | |
624 | atomic64_inc(v: &reset_stats->fw_reset_completions); |
625 | |
626 | /* Clean up all outstanding io requests */ |
627 | fnic_cleanup_io(fnic); |
628 | |
629 | atomic64_set(v: &fnic->fnic_stats.fw_stats.active_fw_reqs, i: 0); |
630 | atomic64_set(v: &fnic->fnic_stats.io_stats.active_ios, i: 0); |
631 | atomic64_set(v: &fnic->io_cmpl_skip, i: 0); |
632 | |
633 | spin_lock_irqsave(&fnic->fnic_lock, flags); |
634 | |
635 | /* fnic should be in FC_TRANS_ETH_MODE */ |
636 | if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) { |
637 | /* Check status of reset completion */ |
638 | if (!hdr_status) { |
639 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
640 | "reset cmpl success\n" ); |
641 | /* Ready to send flogi out */ |
642 | fnic->state = FNIC_IN_ETH_MODE; |
643 | } else { |
644 | FNIC_SCSI_DBG(KERN_DEBUG, |
645 | fnic->lport->host, |
646 | "fnic fw_reset : failed %s\n" , |
647 | fnic_fcpio_status_to_str(hdr_status)); |
648 | |
649 | /* |
650 | * Unable to change to eth mode, cannot send out flogi |
651 | * Change state to fc mode, so that subsequent Flogi |
652 | * requests from libFC will cause more attempts to |
653 | * reset the firmware. Free the cached flogi |
654 | */ |
655 | fnic->state = FNIC_IN_FC_MODE; |
656 | atomic64_inc(v: &reset_stats->fw_reset_failures); |
657 | ret = -1; |
658 | } |
659 | } else { |
660 | FNIC_SCSI_DBG(KERN_DEBUG, |
661 | fnic->lport->host, |
662 | "Unexpected state %s while processing" |
663 | " reset cmpl\n" , fnic_state_to_str(fnic->state)); |
664 | atomic64_inc(v: &reset_stats->fw_reset_failures); |
665 | ret = -1; |
666 | } |
667 | |
668 | /* Thread removing device blocks till firmware reset is complete */ |
669 | if (fnic->remove_wait) |
670 | complete(fnic->remove_wait); |
671 | |
672 | /* |
673 | * If fnic is being removed, or fw reset failed |
674 | * free the flogi frame. Else, send it out |
675 | */ |
676 | if (fnic->remove_wait || ret) { |
677 | spin_unlock_irqrestore(lock: &fnic->fnic_lock, flags); |
678 | skb_queue_purge(list: &fnic->tx_queue); |
679 | goto reset_cmpl_handler_end; |
680 | } |
681 | |
682 | spin_unlock_irqrestore(lock: &fnic->fnic_lock, flags); |
683 | |
684 | fnic_flush_tx(fnic); |
685 | |
686 | reset_cmpl_handler_end: |
687 | fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET); |
688 | |
689 | return ret; |
690 | } |
691 | |
692 | /* |
693 | * fnic_fcpio_flogi_reg_cmpl_handler |
694 | * Routine to handle flogi register completion |
695 | */ |
696 | static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic, |
697 | struct fcpio_fw_req *desc) |
698 | { |
699 | u8 type; |
700 | u8 hdr_status; |
701 | struct fcpio_tag tag; |
702 | int ret = 0; |
703 | unsigned long flags; |
704 | |
705 | fcpio_header_dec(hdr: &desc->hdr, type: &type, status: &hdr_status, tag: &tag); |
706 | |
707 | /* Update fnic state based on status of flogi reg completion */ |
708 | spin_lock_irqsave(&fnic->fnic_lock, flags); |
709 | |
710 | if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) { |
711 | |
712 | /* Check flogi registration completion status */ |
713 | if (!hdr_status) { |
714 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
715 | "flog reg succeeded\n" ); |
716 | fnic->state = FNIC_IN_FC_MODE; |
717 | } else { |
718 | FNIC_SCSI_DBG(KERN_DEBUG, |
719 | fnic->lport->host, |
720 | "fnic flogi reg :failed %s\n" , |
721 | fnic_fcpio_status_to_str(hdr_status)); |
722 | fnic->state = FNIC_IN_ETH_MODE; |
723 | ret = -1; |
724 | } |
725 | } else { |
726 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
727 | "Unexpected fnic state %s while" |
728 | " processing flogi reg completion\n" , |
729 | fnic_state_to_str(fnic->state)); |
730 | ret = -1; |
731 | } |
732 | |
733 | if (!ret) { |
734 | if (fnic->stop_rx_link_events) { |
735 | spin_unlock_irqrestore(lock: &fnic->fnic_lock, flags); |
736 | goto reg_cmpl_handler_end; |
737 | } |
738 | spin_unlock_irqrestore(lock: &fnic->fnic_lock, flags); |
739 | |
740 | fnic_flush_tx(fnic); |
741 | queue_work(wq: fnic_event_queue, work: &fnic->frame_work); |
742 | } else { |
743 | spin_unlock_irqrestore(lock: &fnic->fnic_lock, flags); |
744 | } |
745 | |
746 | reg_cmpl_handler_end: |
747 | return ret; |
748 | } |
749 | |
750 | static inline int is_ack_index_in_range(struct vnic_wq_copy *wq, |
751 | u16 request_out) |
752 | { |
753 | if (wq->to_clean_index <= wq->to_use_index) { |
754 | /* out of range, stale request_out index */ |
755 | if (request_out < wq->to_clean_index || |
756 | request_out >= wq->to_use_index) |
757 | return 0; |
758 | } else { |
759 | /* out of range, stale request_out index */ |
760 | if (request_out < wq->to_clean_index && |
761 | request_out >= wq->to_use_index) |
762 | return 0; |
763 | } |
764 | /* request_out index is in range */ |
765 | return 1; |
766 | } |
767 | |
768 | |
769 | /* |
770 | * Mark that ack received and store the Ack index. If there are multiple |
771 | * acks received before Tx thread cleans it up, the latest value will be |
772 | * used which is correct behavior. This state should be in the copy Wq |
773 | * instead of in the fnic |
774 | */ |
775 | static inline void fnic_fcpio_ack_handler(struct fnic *fnic, |
776 | unsigned int cq_index, |
777 | struct fcpio_fw_req *desc) |
778 | { |
779 | struct vnic_wq_copy *wq; |
780 | u16 request_out = desc->u.ack.request_out; |
781 | unsigned long flags; |
782 | u64 *ox_id_tag = (u64 *)(void *)desc; |
783 | |
784 | /* mark the ack state */ |
785 | wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count]; |
786 | spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); |
787 | |
788 | fnic->fnic_stats.misc_stats.last_ack_time = jiffies; |
789 | if (is_ack_index_in_range(wq, request_out)) { |
790 | fnic->fw_ack_index[0] = request_out; |
791 | fnic->fw_ack_recd[0] = 1; |
792 | } else |
793 | atomic64_inc( |
794 | v: &fnic->fnic_stats.misc_stats.ack_index_out_of_range); |
795 | |
796 | spin_unlock_irqrestore(lock: &fnic->wq_copy_lock[0], flags); |
797 | FNIC_TRACE(fnic_fcpio_ack_handler, |
798 | fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3], |
799 | ox_id_tag[4], ox_id_tag[5]); |
800 | } |
801 | |
802 | /* |
803 | * fnic_fcpio_icmnd_cmpl_handler |
804 | * Routine to handle icmnd completions |
805 | */ |
806 | static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, |
807 | struct fcpio_fw_req *desc) |
808 | { |
809 | u8 type; |
810 | u8 hdr_status; |
811 | struct fcpio_tag tag; |
812 | u32 id; |
813 | u64 xfer_len = 0; |
814 | struct fcpio_icmnd_cmpl *icmnd_cmpl; |
815 | struct fnic_io_req *io_req; |
816 | struct scsi_cmnd *sc; |
817 | struct fnic_stats *fnic_stats = &fnic->fnic_stats; |
818 | unsigned long flags; |
819 | spinlock_t *io_lock; |
820 | u64 cmd_trace; |
821 | unsigned long start_time; |
822 | unsigned long io_duration_time; |
823 | |
824 | /* Decode the cmpl description to get the io_req id */ |
825 | fcpio_header_dec(hdr: &desc->hdr, type: &type, status: &hdr_status, tag: &tag); |
826 | fcpio_tag_id_dec(tag: &tag, id: &id); |
827 | icmnd_cmpl = &desc->u.icmnd_cmpl; |
828 | |
829 | if (id >= fnic->fnic_max_tag_id) { |
830 | shost_printk(KERN_ERR, fnic->lport->host, |
831 | "Tag out of range tag %x hdr status = %s\n" , |
832 | id, fnic_fcpio_status_to_str(hdr_status)); |
833 | return; |
834 | } |
835 | |
836 | sc = scsi_host_find_tag(shost: fnic->lport->host, tag: id); |
837 | WARN_ON_ONCE(!sc); |
838 | if (!sc) { |
839 | atomic64_inc(v: &fnic_stats->io_stats.sc_null); |
840 | shost_printk(KERN_ERR, fnic->lport->host, |
841 | "icmnd_cmpl sc is null - " |
842 | "hdr status = %s tag = 0x%x desc = 0x%p\n" , |
843 | fnic_fcpio_status_to_str(hdr_status), id, desc); |
844 | FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler, |
845 | fnic->lport->host->host_no, id, |
846 | ((u64)icmnd_cmpl->_resvd0[1] << 16 | |
847 | (u64)icmnd_cmpl->_resvd0[0]), |
848 | ((u64)hdr_status << 16 | |
849 | (u64)icmnd_cmpl->scsi_status << 8 | |
850 | (u64)icmnd_cmpl->flags), desc, |
851 | (u64)icmnd_cmpl->residual, 0); |
852 | return; |
853 | } |
854 | |
855 | io_lock = fnic_io_lock_hash(fnic, sc); |
856 | spin_lock_irqsave(io_lock, flags); |
857 | io_req = fnic_priv(cmd: sc)->io_req; |
858 | WARN_ON_ONCE(!io_req); |
859 | if (!io_req) { |
860 | atomic64_inc(v: &fnic_stats->io_stats.ioreq_null); |
861 | fnic_priv(cmd: sc)->flags |= FNIC_IO_REQ_NULL; |
862 | spin_unlock_irqrestore(lock: io_lock, flags); |
863 | shost_printk(KERN_ERR, fnic->lport->host, |
864 | "icmnd_cmpl io_req is null - " |
865 | "hdr status = %s tag = 0x%x sc 0x%p\n" , |
866 | fnic_fcpio_status_to_str(hdr_status), id, sc); |
867 | return; |
868 | } |
869 | start_time = io_req->start_time; |
870 | |
871 | /* firmware completed the io */ |
872 | io_req->io_completed = 1; |
873 | |
874 | /* |
875 | * if SCSI-ML has already issued abort on this command, |
876 | * set completion of the IO. The abts path will clean it up |
877 | */ |
878 | if (fnic_priv(cmd: sc)->state == FNIC_IOREQ_ABTS_PENDING) { |
879 | |
880 | /* |
881 | * set the FNIC_IO_DONE so that this doesn't get |
882 | * flagged as 'out of order' if it was not aborted |
883 | */ |
884 | fnic_priv(cmd: sc)->flags |= FNIC_IO_DONE; |
885 | fnic_priv(cmd: sc)->flags |= FNIC_IO_ABTS_PENDING; |
886 | spin_unlock_irqrestore(lock: io_lock, flags); |
887 | if(FCPIO_ABORTED == hdr_status) |
888 | fnic_priv(cmd: sc)->flags |= FNIC_IO_ABORTED; |
889 | |
890 | FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, |
891 | "icmnd_cmpl abts pending " |
892 | "hdr status = %s tag = 0x%x sc = 0x%p " |
893 | "scsi_status = %x residual = %d\n" , |
894 | fnic_fcpio_status_to_str(hdr_status), |
895 | id, sc, |
896 | icmnd_cmpl->scsi_status, |
897 | icmnd_cmpl->residual); |
898 | return; |
899 | } |
900 | |
901 | /* Mark the IO as complete */ |
902 | fnic_priv(cmd: sc)->state = FNIC_IOREQ_CMD_COMPLETE; |
903 | |
904 | icmnd_cmpl = &desc->u.icmnd_cmpl; |
905 | |
906 | switch (hdr_status) { |
907 | case FCPIO_SUCCESS: |
908 | sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status; |
909 | xfer_len = scsi_bufflen(cmd: sc); |
910 | |
911 | if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER) { |
912 | xfer_len -= icmnd_cmpl->residual; |
913 | scsi_set_resid(cmd: sc, resid: icmnd_cmpl->residual); |
914 | } |
915 | |
916 | if (icmnd_cmpl->scsi_status == SAM_STAT_CHECK_CONDITION) |
917 | atomic64_inc(v: &fnic_stats->misc_stats.check_condition); |
918 | |
919 | if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL) |
920 | atomic64_inc(v: &fnic_stats->misc_stats.queue_fulls); |
921 | break; |
922 | |
923 | case FCPIO_TIMEOUT: /* request was timed out */ |
924 | atomic64_inc(v: &fnic_stats->misc_stats.fcpio_timeout); |
925 | sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status; |
926 | break; |
927 | |
928 | case FCPIO_ABORTED: /* request was aborted */ |
929 | atomic64_inc(v: &fnic_stats->misc_stats.fcpio_aborted); |
930 | sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; |
931 | break; |
932 | |
933 | case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */ |
934 | atomic64_inc(v: &fnic_stats->misc_stats.data_count_mismatch); |
935 | scsi_set_resid(cmd: sc, resid: icmnd_cmpl->residual); |
936 | sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; |
937 | break; |
938 | |
939 | case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */ |
940 | atomic64_inc(v: &fnic_stats->fw_stats.fw_out_of_resources); |
941 | sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status; |
942 | break; |
943 | |
944 | case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */ |
945 | atomic64_inc(v: &fnic_stats->io_stats.io_not_found); |
946 | sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; |
947 | break; |
948 | |
949 | case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */ |
950 | atomic64_inc(v: &fnic_stats->misc_stats.sgl_invalid); |
951 | sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; |
952 | break; |
953 | |
954 | case FCPIO_FW_ERR: /* request was terminated due fw error */ |
955 | atomic64_inc(v: &fnic_stats->fw_stats.io_fw_errs); |
956 | sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; |
957 | break; |
958 | |
959 | case FCPIO_MSS_INVALID: /* request was aborted due to mss error */ |
960 | atomic64_inc(v: &fnic_stats->misc_stats.mss_invalid); |
961 | sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; |
962 | break; |
963 | |
964 | case FCPIO_INVALID_HEADER: /* header contains invalid data */ |
965 | case FCPIO_INVALID_PARAM: /* some parameter in request invalid */ |
966 | case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */ |
967 | default: |
968 | sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; |
969 | break; |
970 | } |
971 | |
972 | /* Break link with the SCSI command */ |
973 | fnic_priv(cmd: sc)->io_req = NULL; |
974 | fnic_priv(cmd: sc)->flags |= FNIC_IO_DONE; |
975 | |
976 | if (hdr_status != FCPIO_SUCCESS) { |
977 | atomic64_inc(v: &fnic_stats->io_stats.io_failures); |
978 | shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n" , |
979 | fnic_fcpio_status_to_str(hdr_status)); |
980 | } |
981 | |
982 | fnic_release_ioreq_buf(fnic, io_req, sc); |
983 | |
984 | cmd_trace = ((u64)hdr_status << 56) | |
985 | (u64)icmnd_cmpl->scsi_status << 48 | |
986 | (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 | |
987 | (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | |
988 | (u64)sc->cmnd[4] << 8 | sc->cmnd[5]; |
989 | |
990 | FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler, |
991 | sc->device->host->host_no, id, sc, |
992 | ((u64)icmnd_cmpl->_resvd0[1] << 56 | |
993 | (u64)icmnd_cmpl->_resvd0[0] << 48 | |
994 | jiffies_to_msecs(jiffies - start_time)), |
995 | desc, cmd_trace, fnic_flags_and_state(sc)); |
996 | |
997 | if (sc->sc_data_direction == DMA_FROM_DEVICE) { |
998 | fnic->lport->host_stats.fcp_input_requests++; |
999 | fnic->fcp_input_bytes += xfer_len; |
1000 | } else if (sc->sc_data_direction == DMA_TO_DEVICE) { |
1001 | fnic->lport->host_stats.fcp_output_requests++; |
1002 | fnic->fcp_output_bytes += xfer_len; |
1003 | } else |
1004 | fnic->lport->host_stats.fcp_control_requests++; |
1005 | |
1006 | /* Call SCSI completion function to complete the IO */ |
1007 | scsi_done(cmd: sc); |
1008 | spin_unlock_irqrestore(lock: io_lock, flags); |
1009 | |
1010 | mempool_free(element: io_req, pool: fnic->io_req_pool); |
1011 | |
1012 | atomic64_dec(v: &fnic_stats->io_stats.active_ios); |
1013 | if (atomic64_read(v: &fnic->io_cmpl_skip)) |
1014 | atomic64_dec(v: &fnic->io_cmpl_skip); |
1015 | else |
1016 | atomic64_inc(v: &fnic_stats->io_stats.io_completions); |
1017 | |
1018 | |
1019 | io_duration_time = jiffies_to_msecs(j: jiffies) - |
1020 | jiffies_to_msecs(j: start_time); |
1021 | |
1022 | if(io_duration_time <= 10) |
1023 | atomic64_inc(v: &fnic_stats->io_stats.io_btw_0_to_10_msec); |
1024 | else if(io_duration_time <= 100) |
1025 | atomic64_inc(v: &fnic_stats->io_stats.io_btw_10_to_100_msec); |
1026 | else if(io_duration_time <= 500) |
1027 | atomic64_inc(v: &fnic_stats->io_stats.io_btw_100_to_500_msec); |
1028 | else if(io_duration_time <= 5000) |
1029 | atomic64_inc(v: &fnic_stats->io_stats.io_btw_500_to_5000_msec); |
1030 | else if(io_duration_time <= 10000) |
1031 | atomic64_inc(v: &fnic_stats->io_stats.io_btw_5000_to_10000_msec); |
1032 | else if(io_duration_time <= 30000) |
1033 | atomic64_inc(v: &fnic_stats->io_stats.io_btw_10000_to_30000_msec); |
1034 | else { |
1035 | atomic64_inc(v: &fnic_stats->io_stats.io_greater_than_30000_msec); |
1036 | |
1037 | if(io_duration_time > atomic64_read(v: &fnic_stats->io_stats.current_max_io_time)) |
1038 | atomic64_set(v: &fnic_stats->io_stats.current_max_io_time, i: io_duration_time); |
1039 | } |
1040 | } |
1041 | |
1042 | /* fnic_fcpio_itmf_cmpl_handler |
1043 | * Routine to handle itmf completions |
1044 | */ |
1045 | static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, |
1046 | struct fcpio_fw_req *desc) |
1047 | { |
1048 | u8 type; |
1049 | u8 hdr_status; |
1050 | struct fcpio_tag ftag; |
1051 | u32 id; |
1052 | struct scsi_cmnd *sc = NULL; |
1053 | struct fnic_io_req *io_req; |
1054 | struct fnic_stats *fnic_stats = &fnic->fnic_stats; |
1055 | struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats; |
1056 | struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; |
1057 | struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; |
1058 | unsigned long flags; |
1059 | spinlock_t *io_lock; |
1060 | unsigned long start_time; |
1061 | unsigned int tag; |
1062 | |
1063 | fcpio_header_dec(hdr: &desc->hdr, type: &type, status: &hdr_status, tag: &ftag); |
1064 | fcpio_tag_id_dec(tag: &ftag, id: &id); |
1065 | |
1066 | tag = id & FNIC_TAG_MASK; |
1067 | if (tag == fnic->fnic_max_tag_id) { |
1068 | if (!(id & FNIC_TAG_DEV_RST)) { |
1069 | shost_printk(KERN_ERR, fnic->lport->host, |
1070 | "Tag out of range id 0x%x hdr status = %s\n" , |
1071 | id, fnic_fcpio_status_to_str(hdr_status)); |
1072 | return; |
1073 | } |
1074 | } else if (tag > fnic->fnic_max_tag_id) { |
1075 | shost_printk(KERN_ERR, fnic->lport->host, |
1076 | "Tag out of range tag 0x%x hdr status = %s\n" , |
1077 | tag, fnic_fcpio_status_to_str(hdr_status)); |
1078 | return; |
1079 | } |
1080 | |
1081 | if ((tag == fnic->fnic_max_tag_id) && (id & FNIC_TAG_DEV_RST)) { |
1082 | sc = fnic->sgreset_sc; |
1083 | io_lock = &fnic->sgreset_lock; |
1084 | } else { |
1085 | sc = scsi_host_find_tag(shost: fnic->lport->host, tag: id & FNIC_TAG_MASK); |
1086 | io_lock = fnic_io_lock_hash(fnic, sc); |
1087 | } |
1088 | |
1089 | WARN_ON_ONCE(!sc); |
1090 | if (!sc) { |
1091 | atomic64_inc(v: &fnic_stats->io_stats.sc_null); |
1092 | shost_printk(KERN_ERR, fnic->lport->host, |
1093 | "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n" , |
1094 | fnic_fcpio_status_to_str(hdr_status), tag); |
1095 | return; |
1096 | } |
1097 | |
1098 | spin_lock_irqsave(io_lock, flags); |
1099 | io_req = fnic_priv(cmd: sc)->io_req; |
1100 | WARN_ON_ONCE(!io_req); |
1101 | if (!io_req) { |
1102 | atomic64_inc(v: &fnic_stats->io_stats.ioreq_null); |
1103 | spin_unlock_irqrestore(lock: io_lock, flags); |
1104 | fnic_priv(cmd: sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL; |
1105 | shost_printk(KERN_ERR, fnic->lport->host, |
1106 | "itmf_cmpl io_req is null - " |
1107 | "hdr status = %s tag = 0x%x sc 0x%p\n" , |
1108 | fnic_fcpio_status_to_str(hdr_status), tag, sc); |
1109 | return; |
1110 | } |
1111 | start_time = io_req->start_time; |
1112 | |
1113 | if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) { |
1114 | /* Abort and terminate completion of device reset req */ |
1115 | /* REVISIT : Add asserts about various flags */ |
1116 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
1117 | "dev reset abts cmpl recd. id %x status %s\n" , |
1118 | id, fnic_fcpio_status_to_str(hdr_status)); |
1119 | fnic_priv(cmd: sc)->state = FNIC_IOREQ_ABTS_COMPLETE; |
1120 | fnic_priv(cmd: sc)->abts_status = hdr_status; |
1121 | fnic_priv(cmd: sc)->flags |= FNIC_DEV_RST_DONE; |
1122 | if (io_req->abts_done) |
1123 | complete(io_req->abts_done); |
1124 | spin_unlock_irqrestore(lock: io_lock, flags); |
1125 | } else if (id & FNIC_TAG_ABORT) { |
1126 | /* Completion of abort cmd */ |
1127 | switch (hdr_status) { |
1128 | case FCPIO_SUCCESS: |
1129 | break; |
1130 | case FCPIO_TIMEOUT: |
1131 | if (fnic_priv(cmd: sc)->flags & FNIC_IO_ABTS_ISSUED) |
1132 | atomic64_inc(v: &abts_stats->abort_fw_timeouts); |
1133 | else |
1134 | atomic64_inc( |
1135 | v: &term_stats->terminate_fw_timeouts); |
1136 | break; |
1137 | case FCPIO_ITMF_REJECTED: |
1138 | FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, |
1139 | "abort reject recd. id %d\n" , |
1140 | (int)(id & FNIC_TAG_MASK)); |
1141 | break; |
1142 | case FCPIO_IO_NOT_FOUND: |
1143 | if (fnic_priv(cmd: sc)->flags & FNIC_IO_ABTS_ISSUED) |
1144 | atomic64_inc(v: &abts_stats->abort_io_not_found); |
1145 | else |
1146 | atomic64_inc( |
1147 | v: &term_stats->terminate_io_not_found); |
1148 | break; |
1149 | default: |
1150 | if (fnic_priv(cmd: sc)->flags & FNIC_IO_ABTS_ISSUED) |
1151 | atomic64_inc(v: &abts_stats->abort_failures); |
1152 | else |
1153 | atomic64_inc( |
1154 | v: &term_stats->terminate_failures); |
1155 | break; |
1156 | } |
1157 | if (fnic_priv(cmd: sc)->state != FNIC_IOREQ_ABTS_PENDING) { |
1158 | /* This is a late completion. Ignore it */ |
1159 | spin_unlock_irqrestore(lock: io_lock, flags); |
1160 | return; |
1161 | } |
1162 | |
1163 | fnic_priv(cmd: sc)->flags |= FNIC_IO_ABT_TERM_DONE; |
1164 | fnic_priv(cmd: sc)->abts_status = hdr_status; |
1165 | |
1166 | /* If the status is IO not found consider it as success */ |
1167 | if (hdr_status == FCPIO_IO_NOT_FOUND) |
1168 | fnic_priv(cmd: sc)->abts_status = FCPIO_SUCCESS; |
1169 | |
1170 | if (!(fnic_priv(cmd: sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) |
1171 | atomic64_inc(v: &misc_stats->no_icmnd_itmf_cmpls); |
1172 | |
1173 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
1174 | "abts cmpl recd. id %d status %s\n" , |
1175 | (int)(id & FNIC_TAG_MASK), |
1176 | fnic_fcpio_status_to_str(hdr_status)); |
1177 | |
1178 | /* |
1179 | * If scsi_eh thread is blocked waiting for abts to complete, |
1180 | * signal completion to it. IO will be cleaned in the thread |
1181 | * else clean it in this context |
1182 | */ |
1183 | if (io_req->abts_done) { |
1184 | complete(io_req->abts_done); |
1185 | spin_unlock_irqrestore(lock: io_lock, flags); |
1186 | } else { |
1187 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
1188 | "abts cmpl, completing IO\n" ); |
1189 | fnic_priv(cmd: sc)->io_req = NULL; |
1190 | sc->result = (DID_ERROR << 16); |
1191 | |
1192 | spin_unlock_irqrestore(lock: io_lock, flags); |
1193 | |
1194 | fnic_release_ioreq_buf(fnic, io_req, sc); |
1195 | mempool_free(element: io_req, pool: fnic->io_req_pool); |
1196 | FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, |
1197 | sc->device->host->host_no, id, |
1198 | sc, |
1199 | jiffies_to_msecs(jiffies - start_time), |
1200 | desc, |
1201 | (((u64)hdr_status << 40) | |
1202 | (u64)sc->cmnd[0] << 32 | |
1203 | (u64)sc->cmnd[2] << 24 | |
1204 | (u64)sc->cmnd[3] << 16 | |
1205 | (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), |
1206 | fnic_flags_and_state(sc)); |
1207 | scsi_done(cmd: sc); |
1208 | atomic64_dec(v: &fnic_stats->io_stats.active_ios); |
1209 | if (atomic64_read(v: &fnic->io_cmpl_skip)) |
1210 | atomic64_dec(v: &fnic->io_cmpl_skip); |
1211 | else |
1212 | atomic64_inc(v: &fnic_stats->io_stats.io_completions); |
1213 | } |
1214 | } else if (id & FNIC_TAG_DEV_RST) { |
1215 | /* Completion of device reset */ |
1216 | fnic_priv(cmd: sc)->lr_status = hdr_status; |
1217 | if (fnic_priv(cmd: sc)->state == FNIC_IOREQ_ABTS_PENDING) { |
1218 | spin_unlock_irqrestore(lock: io_lock, flags); |
1219 | fnic_priv(cmd: sc)->flags |= FNIC_DEV_RST_ABTS_PENDING; |
1220 | FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, |
1221 | sc->device->host->host_no, id, sc, |
1222 | jiffies_to_msecs(jiffies - start_time), |
1223 | desc, 0, fnic_flags_and_state(sc)); |
1224 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
1225 | "Terminate pending " |
1226 | "dev reset cmpl recd. id %d status %s\n" , |
1227 | (int)(id & FNIC_TAG_MASK), |
1228 | fnic_fcpio_status_to_str(hdr_status)); |
1229 | return; |
1230 | } |
1231 | if (fnic_priv(cmd: sc)->flags & FNIC_DEV_RST_TIMED_OUT) { |
1232 | /* Need to wait for terminate completion */ |
1233 | spin_unlock_irqrestore(lock: io_lock, flags); |
1234 | FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, |
1235 | sc->device->host->host_no, id, sc, |
1236 | jiffies_to_msecs(jiffies - start_time), |
1237 | desc, 0, fnic_flags_and_state(sc)); |
1238 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
1239 | "dev reset cmpl recd after time out. " |
1240 | "id %d status %s\n" , |
1241 | (int)(id & FNIC_TAG_MASK), |
1242 | fnic_fcpio_status_to_str(hdr_status)); |
1243 | return; |
1244 | } |
1245 | fnic_priv(cmd: sc)->state = FNIC_IOREQ_CMD_COMPLETE; |
1246 | fnic_priv(cmd: sc)->flags |= FNIC_DEV_RST_DONE; |
1247 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
1248 | "dev reset cmpl recd. id %d status %s\n" , |
1249 | (int)(id & FNIC_TAG_MASK), |
1250 | fnic_fcpio_status_to_str(hdr_status)); |
1251 | if (io_req->dr_done) |
1252 | complete(io_req->dr_done); |
1253 | spin_unlock_irqrestore(lock: io_lock, flags); |
1254 | |
1255 | } else { |
1256 | shost_printk(KERN_ERR, fnic->lport->host, |
1257 | "Unexpected itmf io state %s tag %x\n" , |
1258 | fnic_ioreq_state_to_str(fnic_priv(sc)->state), id); |
1259 | spin_unlock_irqrestore(lock: io_lock, flags); |
1260 | } |
1261 | |
1262 | } |
1263 | |
1264 | /* |
1265 | * fnic_fcpio_cmpl_handler |
1266 | * Routine to service the cq for wq_copy |
1267 | */ |
1268 | static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev, |
1269 | unsigned int cq_index, |
1270 | struct fcpio_fw_req *desc) |
1271 | { |
1272 | struct fnic *fnic = vnic_dev_priv(vdev); |
1273 | |
1274 | switch (desc->hdr.type) { |
1275 | case FCPIO_ICMND_CMPL: /* fw completed a command */ |
1276 | case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/ |
1277 | case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */ |
1278 | case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */ |
1279 | case FCPIO_RESET_CMPL: /* fw completed reset */ |
1280 | atomic64_dec(v: &fnic->fnic_stats.fw_stats.active_fw_reqs); |
1281 | break; |
1282 | default: |
1283 | break; |
1284 | } |
1285 | |
1286 | switch (desc->hdr.type) { |
1287 | case FCPIO_ACK: /* fw copied copy wq desc to its queue */ |
1288 | fnic_fcpio_ack_handler(fnic, cq_index, desc); |
1289 | break; |
1290 | |
1291 | case FCPIO_ICMND_CMPL: /* fw completed a command */ |
1292 | fnic_fcpio_icmnd_cmpl_handler(fnic, desc); |
1293 | break; |
1294 | |
1295 | case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/ |
1296 | fnic_fcpio_itmf_cmpl_handler(fnic, desc); |
1297 | break; |
1298 | |
1299 | case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */ |
1300 | case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */ |
1301 | fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc); |
1302 | break; |
1303 | |
1304 | case FCPIO_RESET_CMPL: /* fw completed reset */ |
1305 | fnic_fcpio_fw_reset_cmpl_handler(fnic, desc); |
1306 | break; |
1307 | |
1308 | default: |
1309 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
1310 | "firmware completion type %d\n" , |
1311 | desc->hdr.type); |
1312 | break; |
1313 | } |
1314 | |
1315 | return 0; |
1316 | } |
1317 | |
1318 | /* |
1319 | * fnic_wq_copy_cmpl_handler |
1320 | * Routine to process wq copy |
1321 | */ |
1322 | int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do) |
1323 | { |
1324 | unsigned int wq_work_done = 0; |
1325 | unsigned int i, cq_index; |
1326 | unsigned int cur_work_done; |
1327 | struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; |
1328 | u64 start_jiffies = 0; |
1329 | u64 end_jiffies = 0; |
1330 | u64 delta_jiffies = 0; |
1331 | u64 delta_ms = 0; |
1332 | |
1333 | for (i = 0; i < fnic->wq_copy_count; i++) { |
1334 | cq_index = i + fnic->raw_wq_count + fnic->rq_count; |
1335 | |
1336 | start_jiffies = jiffies; |
1337 | cur_work_done = vnic_cq_copy_service(cq: &fnic->cq[cq_index], |
1338 | q_service: fnic_fcpio_cmpl_handler, |
1339 | work_to_do: copy_work_to_do); |
1340 | end_jiffies = jiffies; |
1341 | |
1342 | wq_work_done += cur_work_done; |
1343 | delta_jiffies = end_jiffies - start_jiffies; |
1344 | if (delta_jiffies > |
1345 | (u64) atomic64_read(v: &misc_stats->max_isr_jiffies)) { |
1346 | atomic64_set(v: &misc_stats->max_isr_jiffies, |
1347 | i: delta_jiffies); |
1348 | delta_ms = jiffies_to_msecs(j: delta_jiffies); |
1349 | atomic64_set(v: &misc_stats->max_isr_time_ms, i: delta_ms); |
1350 | atomic64_set(v: &misc_stats->corr_work_done, |
1351 | i: cur_work_done); |
1352 | } |
1353 | } |
1354 | return wq_work_done; |
1355 | } |
1356 | |
1357 | static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data) |
1358 | { |
1359 | const int tag = scsi_cmd_to_rq(scmd: sc)->tag; |
1360 | struct fnic *fnic = data; |
1361 | struct fnic_io_req *io_req; |
1362 | unsigned long flags = 0; |
1363 | spinlock_t *io_lock; |
1364 | unsigned long start_time = 0; |
1365 | struct fnic_stats *fnic_stats = &fnic->fnic_stats; |
1366 | |
1367 | io_lock = fnic_io_lock_tag(fnic, tag); |
1368 | spin_lock_irqsave(io_lock, flags); |
1369 | |
1370 | io_req = fnic_priv(cmd: sc)->io_req; |
1371 | if ((fnic_priv(cmd: sc)->flags & FNIC_DEVICE_RESET) && |
1372 | !(fnic_priv(cmd: sc)->flags & FNIC_DEV_RST_DONE)) { |
1373 | /* |
1374 | * We will be here only when FW completes reset |
1375 | * without sending completions for outstanding ios. |
1376 | */ |
1377 | fnic_priv(cmd: sc)->flags |= FNIC_DEV_RST_DONE; |
1378 | if (io_req && io_req->dr_done) |
1379 | complete(io_req->dr_done); |
1380 | else if (io_req && io_req->abts_done) |
1381 | complete(io_req->abts_done); |
1382 | spin_unlock_irqrestore(lock: io_lock, flags); |
1383 | return true; |
1384 | } else if (fnic_priv(cmd: sc)->flags & FNIC_DEVICE_RESET) { |
1385 | spin_unlock_irqrestore(lock: io_lock, flags); |
1386 | return true; |
1387 | } |
1388 | if (!io_req) { |
1389 | spin_unlock_irqrestore(lock: io_lock, flags); |
1390 | goto cleanup_scsi_cmd; |
1391 | } |
1392 | |
1393 | fnic_priv(cmd: sc)->io_req = NULL; |
1394 | |
1395 | spin_unlock_irqrestore(lock: io_lock, flags); |
1396 | |
1397 | /* |
1398 | * If there is a scsi_cmnd associated with this io_req, then |
1399 | * free the corresponding state |
1400 | */ |
1401 | start_time = io_req->start_time; |
1402 | fnic_release_ioreq_buf(fnic, io_req, sc); |
1403 | mempool_free(element: io_req, pool: fnic->io_req_pool); |
1404 | |
1405 | cleanup_scsi_cmd: |
1406 | sc->result = DID_TRANSPORT_DISRUPTED << 16; |
1407 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
1408 | "fnic_cleanup_io: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n" , |
1409 | tag, sc, jiffies - start_time); |
1410 | |
1411 | if (atomic64_read(v: &fnic->io_cmpl_skip)) |
1412 | atomic64_dec(v: &fnic->io_cmpl_skip); |
1413 | else |
1414 | atomic64_inc(v: &fnic_stats->io_stats.io_completions); |
1415 | |
1416 | /* Complete the command to SCSI */ |
1417 | if (!(fnic_priv(cmd: sc)->flags & FNIC_IO_ISSUED)) |
1418 | shost_printk(KERN_ERR, fnic->lport->host, |
1419 | "Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n" , |
1420 | tag, sc); |
1421 | |
1422 | FNIC_TRACE(fnic_cleanup_io, |
1423 | sc->device->host->host_no, tag, sc, |
1424 | jiffies_to_msecs(jiffies - start_time), |
1425 | 0, ((u64)sc->cmnd[0] << 32 | |
1426 | (u64)sc->cmnd[2] << 24 | |
1427 | (u64)sc->cmnd[3] << 16 | |
1428 | (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), |
1429 | fnic_flags_and_state(sc)); |
1430 | |
1431 | scsi_done(cmd: sc); |
1432 | |
1433 | return true; |
1434 | } |
1435 | |
1436 | static void fnic_cleanup_io(struct fnic *fnic) |
1437 | { |
1438 | scsi_host_busy_iter(fnic->lport->host, |
1439 | fn: fnic_cleanup_io_iter, priv: fnic); |
1440 | } |
1441 | |
1442 | void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, |
1443 | struct fcpio_host_req *desc) |
1444 | { |
1445 | u32 id; |
1446 | struct fnic *fnic = vnic_dev_priv(vdev: wq->vdev); |
1447 | struct fnic_io_req *io_req; |
1448 | struct scsi_cmnd *sc; |
1449 | unsigned long flags; |
1450 | spinlock_t *io_lock; |
1451 | unsigned long start_time = 0; |
1452 | |
1453 | /* get the tag reference */ |
1454 | fcpio_tag_id_dec(tag: &desc->hdr.tag, id: &id); |
1455 | id &= FNIC_TAG_MASK; |
1456 | |
1457 | if (id >= fnic->fnic_max_tag_id) |
1458 | return; |
1459 | |
1460 | sc = scsi_host_find_tag(shost: fnic->lport->host, tag: id); |
1461 | if (!sc) |
1462 | return; |
1463 | |
1464 | io_lock = fnic_io_lock_hash(fnic, sc); |
1465 | spin_lock_irqsave(io_lock, flags); |
1466 | |
1467 | /* Get the IO context which this desc refers to */ |
1468 | io_req = fnic_priv(cmd: sc)->io_req; |
1469 | |
1470 | /* fnic interrupts are turned off by now */ |
1471 | |
1472 | if (!io_req) { |
1473 | spin_unlock_irqrestore(lock: io_lock, flags); |
1474 | goto wq_copy_cleanup_scsi_cmd; |
1475 | } |
1476 | |
1477 | fnic_priv(cmd: sc)->io_req = NULL; |
1478 | |
1479 | spin_unlock_irqrestore(lock: io_lock, flags); |
1480 | |
1481 | start_time = io_req->start_time; |
1482 | fnic_release_ioreq_buf(fnic, io_req, sc); |
1483 | mempool_free(element: io_req, pool: fnic->io_req_pool); |
1484 | |
1485 | wq_copy_cleanup_scsi_cmd: |
1486 | sc->result = DID_NO_CONNECT << 16; |
1487 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:" |
1488 | " DID_NO_CONNECT\n" ); |
1489 | |
1490 | FNIC_TRACE(fnic_wq_copy_cleanup_handler, |
1491 | sc->device->host->host_no, id, sc, |
1492 | jiffies_to_msecs(jiffies - start_time), |
1493 | 0, ((u64)sc->cmnd[0] << 32 | |
1494 | (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | |
1495 | (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), |
1496 | fnic_flags_and_state(sc)); |
1497 | |
1498 | scsi_done(cmd: sc); |
1499 | } |
1500 | |
1501 | static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag, |
1502 | u32 task_req, u8 *fc_lun, |
1503 | struct fnic_io_req *io_req) |
1504 | { |
1505 | struct vnic_wq_copy *wq = &fnic->wq_copy[0]; |
1506 | struct Scsi_Host *host = fnic->lport->host; |
1507 | struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; |
1508 | unsigned long flags; |
1509 | |
1510 | spin_lock_irqsave(host->host_lock, flags); |
1511 | if (unlikely(fnic_chk_state_flags_locked(fnic, |
1512 | FNIC_FLAGS_IO_BLOCKED))) { |
1513 | spin_unlock_irqrestore(lock: host->host_lock, flags); |
1514 | return 1; |
1515 | } else |
1516 | atomic_inc(v: &fnic->in_flight); |
1517 | spin_unlock_irqrestore(lock: host->host_lock, flags); |
1518 | |
1519 | spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); |
1520 | |
1521 | if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) |
1522 | free_wq_copy_descs(fnic, wq); |
1523 | |
1524 | if (!vnic_wq_copy_desc_avail(wq)) { |
1525 | spin_unlock_irqrestore(lock: &fnic->wq_copy_lock[0], flags); |
1526 | atomic_dec(v: &fnic->in_flight); |
1527 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
1528 | "fnic_queue_abort_io_req: failure: no descriptors\n" ); |
1529 | atomic64_inc(v: &misc_stats->abts_cpwq_alloc_failures); |
1530 | return 1; |
1531 | } |
1532 | fnic_queue_wq_copy_desc_itmf(wq, req_id: tag | FNIC_TAG_ABORT, |
1533 | lunmap_id: 0, tm_req: task_req, tm_id: tag, lun: fc_lun, d_id: io_req->port_id, |
1534 | r_a_tov: fnic->config.ra_tov, e_d_tov: fnic->config.ed_tov); |
1535 | |
1536 | atomic64_inc(v: &fnic->fnic_stats.fw_stats.active_fw_reqs); |
1537 | if (atomic64_read(v: &fnic->fnic_stats.fw_stats.active_fw_reqs) > |
1538 | atomic64_read(v: &fnic->fnic_stats.fw_stats.max_fw_reqs)) |
1539 | atomic64_set(v: &fnic->fnic_stats.fw_stats.max_fw_reqs, |
1540 | i: atomic64_read(v: &fnic->fnic_stats.fw_stats.active_fw_reqs)); |
1541 | |
1542 | spin_unlock_irqrestore(lock: &fnic->wq_copy_lock[0], flags); |
1543 | atomic_dec(v: &fnic->in_flight); |
1544 | |
1545 | return 0; |
1546 | } |
1547 | |
1548 | struct fnic_rport_abort_io_iter_data { |
1549 | struct fnic *fnic; |
1550 | u32 port_id; |
1551 | int term_cnt; |
1552 | }; |
1553 | |
1554 | static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data) |
1555 | { |
1556 | struct fnic_rport_abort_io_iter_data *iter_data = data; |
1557 | struct fnic *fnic = iter_data->fnic; |
1558 | int abt_tag = scsi_cmd_to_rq(scmd: sc)->tag; |
1559 | struct fnic_io_req *io_req; |
1560 | spinlock_t *io_lock; |
1561 | unsigned long flags; |
1562 | struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats; |
1563 | struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; |
1564 | struct scsi_lun fc_lun; |
1565 | enum fnic_ioreq_state old_ioreq_state; |
1566 | |
1567 | io_lock = fnic_io_lock_tag(fnic, tag: abt_tag); |
1568 | spin_lock_irqsave(io_lock, flags); |
1569 | |
1570 | io_req = fnic_priv(cmd: sc)->io_req; |
1571 | |
1572 | if (!io_req || io_req->port_id != iter_data->port_id) { |
1573 | spin_unlock_irqrestore(lock: io_lock, flags); |
1574 | return true; |
1575 | } |
1576 | |
1577 | if ((fnic_priv(cmd: sc)->flags & FNIC_DEVICE_RESET) && |
1578 | !(fnic_priv(cmd: sc)->flags & FNIC_DEV_RST_ISSUED)) { |
1579 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
1580 | "fnic_rport_exch_reset dev rst not pending sc 0x%p\n" , |
1581 | sc); |
1582 | spin_unlock_irqrestore(lock: io_lock, flags); |
1583 | return true; |
1584 | } |
1585 | |
1586 | /* |
1587 | * Found IO that is still pending with firmware and |
1588 | * belongs to rport that went away |
1589 | */ |
1590 | if (fnic_priv(cmd: sc)->state == FNIC_IOREQ_ABTS_PENDING) { |
1591 | spin_unlock_irqrestore(lock: io_lock, flags); |
1592 | return true; |
1593 | } |
1594 | if (io_req->abts_done) { |
1595 | shost_printk(KERN_ERR, fnic->lport->host, |
1596 | "fnic_rport_exch_reset: io_req->abts_done is set " |
1597 | "state is %s\n" , |
1598 | fnic_ioreq_state_to_str(fnic_priv(sc)->state)); |
1599 | } |
1600 | |
1601 | if (!(fnic_priv(cmd: sc)->flags & FNIC_IO_ISSUED)) { |
1602 | shost_printk(KERN_ERR, fnic->lport->host, |
1603 | "rport_exch_reset " |
1604 | "IO not yet issued %p tag 0x%x flags " |
1605 | "%x state %d\n" , |
1606 | sc, abt_tag, fnic_priv(sc)->flags, fnic_priv(sc)->state); |
1607 | } |
1608 | old_ioreq_state = fnic_priv(cmd: sc)->state; |
1609 | fnic_priv(cmd: sc)->state = FNIC_IOREQ_ABTS_PENDING; |
1610 | fnic_priv(cmd: sc)->abts_status = FCPIO_INVALID_CODE; |
1611 | if (fnic_priv(cmd: sc)->flags & FNIC_DEVICE_RESET) { |
1612 | atomic64_inc(v: &reset_stats->device_reset_terminates); |
1613 | abt_tag |= FNIC_TAG_DEV_RST; |
1614 | } |
1615 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
1616 | "fnic_rport_exch_reset dev rst sc 0x%p\n" , sc); |
1617 | BUG_ON(io_req->abts_done); |
1618 | |
1619 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
1620 | "fnic_rport_reset_exch: Issuing abts\n" ); |
1621 | |
1622 | spin_unlock_irqrestore(lock: io_lock, flags); |
1623 | |
1624 | /* Now queue the abort command to firmware */ |
1625 | int_to_scsilun(sc->device->lun, &fc_lun); |
1626 | |
1627 | if (fnic_queue_abort_io_req(fnic, tag: abt_tag, |
1628 | task_req: FCPIO_ITMF_ABT_TASK_TERM, |
1629 | fc_lun: fc_lun.scsi_lun, io_req)) { |
1630 | /* |
1631 | * Revert the cmd state back to old state, if |
1632 | * it hasn't changed in between. This cmd will get |
1633 | * aborted later by scsi_eh, or cleaned up during |
1634 | * lun reset |
1635 | */ |
1636 | spin_lock_irqsave(io_lock, flags); |
1637 | if (fnic_priv(cmd: sc)->state == FNIC_IOREQ_ABTS_PENDING) |
1638 | fnic_priv(cmd: sc)->state = old_ioreq_state; |
1639 | spin_unlock_irqrestore(lock: io_lock, flags); |
1640 | } else { |
1641 | spin_lock_irqsave(io_lock, flags); |
1642 | if (fnic_priv(cmd: sc)->flags & FNIC_DEVICE_RESET) |
1643 | fnic_priv(cmd: sc)->flags |= FNIC_DEV_RST_TERM_ISSUED; |
1644 | else |
1645 | fnic_priv(cmd: sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED; |
1646 | spin_unlock_irqrestore(lock: io_lock, flags); |
1647 | atomic64_inc(v: &term_stats->terminates); |
1648 | iter_data->term_cnt++; |
1649 | } |
1650 | return true; |
1651 | } |
1652 | |
1653 | static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) |
1654 | { |
1655 | struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; |
1656 | struct fnic_rport_abort_io_iter_data iter_data = { |
1657 | .fnic = fnic, |
1658 | .port_id = port_id, |
1659 | .term_cnt = 0, |
1660 | }; |
1661 | |
1662 | FNIC_SCSI_DBG(KERN_DEBUG, |
1663 | fnic->lport->host, |
1664 | "fnic_rport_exch_reset called portid 0x%06x\n" , |
1665 | port_id); |
1666 | |
1667 | if (fnic->in_remove) |
1668 | return; |
1669 | |
1670 | scsi_host_busy_iter(fnic->lport->host, fn: fnic_rport_abort_io_iter, |
1671 | priv: &iter_data); |
1672 | if (iter_data.term_cnt > atomic64_read(v: &term_stats->max_terminates)) |
1673 | atomic64_set(v: &term_stats->max_terminates, i: iter_data.term_cnt); |
1674 | |
1675 | } |
1676 | |
1677 | void fnic_terminate_rport_io(struct fc_rport *rport) |
1678 | { |
1679 | struct fc_rport_libfc_priv *rdata; |
1680 | struct fc_lport *lport; |
1681 | struct fnic *fnic; |
1682 | |
1683 | if (!rport) { |
1684 | printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n" ); |
1685 | return; |
1686 | } |
1687 | rdata = rport->dd_data; |
1688 | |
1689 | if (!rdata) { |
1690 | printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n" ); |
1691 | return; |
1692 | } |
1693 | lport = rdata->local_port; |
1694 | |
1695 | if (!lport) { |
1696 | printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n" ); |
1697 | return; |
1698 | } |
1699 | fnic = lport_priv(lport); |
1700 | FNIC_SCSI_DBG(KERN_DEBUG, |
1701 | fnic->lport->host, "fnic_terminate_rport_io called" |
1702 | " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n" , |
1703 | rport->port_name, rport->node_name, rport, |
1704 | rport->port_id); |
1705 | |
1706 | if (fnic->in_remove) |
1707 | return; |
1708 | |
1709 | fnic_rport_exch_reset(fnic, port_id: rport->port_id); |
1710 | } |
1711 | |
1712 | /* |
1713 | * This function is exported to SCSI for sending abort cmnds. |
1714 | * A SCSI IO is represented by a io_req in the driver. |
1715 | * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO. |
1716 | */ |
1717 | int fnic_abort_cmd(struct scsi_cmnd *sc) |
1718 | { |
1719 | struct request *const rq = scsi_cmd_to_rq(scmd: sc); |
1720 | struct fc_lport *lp; |
1721 | struct fnic *fnic; |
1722 | struct fnic_io_req *io_req = NULL; |
1723 | struct fc_rport *rport; |
1724 | spinlock_t *io_lock; |
1725 | unsigned long flags; |
1726 | unsigned long start_time = 0; |
1727 | int ret = SUCCESS; |
1728 | u32 task_req = 0; |
1729 | struct scsi_lun fc_lun; |
1730 | struct fnic_stats *fnic_stats; |
1731 | struct abort_stats *abts_stats; |
1732 | struct terminate_stats *term_stats; |
1733 | enum fnic_ioreq_state old_ioreq_state; |
1734 | const int tag = rq->tag; |
1735 | unsigned long abt_issued_time; |
1736 | DECLARE_COMPLETION_ONSTACK(tm_done); |
1737 | |
1738 | /* Wait for rport to unblock */ |
1739 | fc_block_scsi_eh(cmnd: sc); |
1740 | |
1741 | /* Get local-port, check ready and link up */ |
1742 | lp = shost_priv(shost: sc->device->host); |
1743 | |
1744 | fnic = lport_priv(lport: lp); |
1745 | fnic_stats = &fnic->fnic_stats; |
1746 | abts_stats = &fnic->fnic_stats.abts_stats; |
1747 | term_stats = &fnic->fnic_stats.term_stats; |
1748 | |
1749 | rport = starget_to_rport(scsi_target(sc->device)); |
1750 | FNIC_SCSI_DBG(KERN_DEBUG, |
1751 | fnic->lport->host, |
1752 | "Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n" , |
1753 | rport->port_id, sc->device->lun, tag, fnic_priv(sc)->flags); |
1754 | |
1755 | fnic_priv(cmd: sc)->flags = FNIC_NO_FLAGS; |
1756 | |
1757 | if (lp->state != LPORT_ST_READY || !(lp->link_up)) { |
1758 | ret = FAILED; |
1759 | goto fnic_abort_cmd_end; |
1760 | } |
1761 | |
1762 | /* |
1763 | * Avoid a race between SCSI issuing the abort and the device |
1764 | * completing the command. |
1765 | * |
1766 | * If the command is already completed by the fw cmpl code, |
1767 | * we just return SUCCESS from here. This means that the abort |
1768 | * succeeded. In the SCSI ML, since the timeout for command has |
1769 | * happened, the completion wont actually complete the command |
1770 | * and it will be considered as an aborted command |
1771 | * |
1772 | * .io_req will not be cleared except while holding io_req_lock. |
1773 | */ |
1774 | io_lock = fnic_io_lock_hash(fnic, sc); |
1775 | spin_lock_irqsave(io_lock, flags); |
1776 | io_req = fnic_priv(cmd: sc)->io_req; |
1777 | if (!io_req) { |
1778 | spin_unlock_irqrestore(lock: io_lock, flags); |
1779 | goto fnic_abort_cmd_end; |
1780 | } |
1781 | |
1782 | io_req->abts_done = &tm_done; |
1783 | |
1784 | if (fnic_priv(cmd: sc)->state == FNIC_IOREQ_ABTS_PENDING) { |
1785 | spin_unlock_irqrestore(lock: io_lock, flags); |
1786 | goto wait_pending; |
1787 | } |
1788 | |
1789 | abt_issued_time = jiffies_to_msecs(j: jiffies) - jiffies_to_msecs(j: io_req->start_time); |
1790 | if (abt_issued_time <= 6000) |
1791 | atomic64_inc(v: &abts_stats->abort_issued_btw_0_to_6_sec); |
1792 | else if (abt_issued_time > 6000 && abt_issued_time <= 20000) |
1793 | atomic64_inc(v: &abts_stats->abort_issued_btw_6_to_20_sec); |
1794 | else if (abt_issued_time > 20000 && abt_issued_time <= 30000) |
1795 | atomic64_inc(v: &abts_stats->abort_issued_btw_20_to_30_sec); |
1796 | else if (abt_issued_time > 30000 && abt_issued_time <= 40000) |
1797 | atomic64_inc(v: &abts_stats->abort_issued_btw_30_to_40_sec); |
1798 | else if (abt_issued_time > 40000 && abt_issued_time <= 50000) |
1799 | atomic64_inc(v: &abts_stats->abort_issued_btw_40_to_50_sec); |
1800 | else if (abt_issued_time > 50000 && abt_issued_time <= 60000) |
1801 | atomic64_inc(v: &abts_stats->abort_issued_btw_50_to_60_sec); |
1802 | else |
1803 | atomic64_inc(v: &abts_stats->abort_issued_greater_than_60_sec); |
1804 | |
1805 | FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, |
1806 | "CBD Opcode: %02x Abort issued time: %lu msec\n" , sc->cmnd[0], abt_issued_time); |
1807 | /* |
1808 | * Command is still pending, need to abort it |
1809 | * If the firmware completes the command after this point, |
1810 | * the completion wont be done till mid-layer, since abort |
1811 | * has already started. |
1812 | */ |
1813 | old_ioreq_state = fnic_priv(cmd: sc)->state; |
1814 | fnic_priv(cmd: sc)->state = FNIC_IOREQ_ABTS_PENDING; |
1815 | fnic_priv(cmd: sc)->abts_status = FCPIO_INVALID_CODE; |
1816 | |
1817 | spin_unlock_irqrestore(lock: io_lock, flags); |
1818 | |
1819 | /* |
1820 | * Check readiness of the remote port. If the path to remote |
1821 | * port is up, then send abts to the remote port to terminate |
1822 | * the IO. Else, just locally terminate the IO in the firmware |
1823 | */ |
1824 | if (fc_remote_port_chkready(rport) == 0) |
1825 | task_req = FCPIO_ITMF_ABT_TASK; |
1826 | else { |
1827 | atomic64_inc(v: &fnic_stats->misc_stats.rport_not_ready); |
1828 | task_req = FCPIO_ITMF_ABT_TASK_TERM; |
1829 | } |
1830 | |
1831 | /* Now queue the abort command to firmware */ |
1832 | int_to_scsilun(sc->device->lun, &fc_lun); |
1833 | |
1834 | if (fnic_queue_abort_io_req(fnic, tag, task_req, fc_lun: fc_lun.scsi_lun, |
1835 | io_req)) { |
1836 | spin_lock_irqsave(io_lock, flags); |
1837 | if (fnic_priv(cmd: sc)->state == FNIC_IOREQ_ABTS_PENDING) |
1838 | fnic_priv(cmd: sc)->state = old_ioreq_state; |
1839 | io_req = fnic_priv(cmd: sc)->io_req; |
1840 | if (io_req) |
1841 | io_req->abts_done = NULL; |
1842 | spin_unlock_irqrestore(lock: io_lock, flags); |
1843 | ret = FAILED; |
1844 | goto fnic_abort_cmd_end; |
1845 | } |
1846 | if (task_req == FCPIO_ITMF_ABT_TASK) { |
1847 | fnic_priv(cmd: sc)->flags |= FNIC_IO_ABTS_ISSUED; |
1848 | atomic64_inc(v: &fnic_stats->abts_stats.aborts); |
1849 | } else { |
1850 | fnic_priv(cmd: sc)->flags |= FNIC_IO_TERM_ISSUED; |
1851 | atomic64_inc(v: &fnic_stats->term_stats.terminates); |
1852 | } |
1853 | |
1854 | /* |
1855 | * We queued an abort IO, wait for its completion. |
1856 | * Once the firmware completes the abort command, it will |
1857 | * wake up this thread. |
1858 | */ |
1859 | wait_pending: |
1860 | wait_for_completion_timeout(x: &tm_done, |
1861 | timeout: msecs_to_jiffies |
1862 | (m: 2 * fnic->config.ra_tov + |
1863 | fnic->config.ed_tov)); |
1864 | |
1865 | /* Check the abort status */ |
1866 | spin_lock_irqsave(io_lock, flags); |
1867 | |
1868 | io_req = fnic_priv(cmd: sc)->io_req; |
1869 | if (!io_req) { |
1870 | atomic64_inc(v: &fnic_stats->io_stats.ioreq_null); |
1871 | spin_unlock_irqrestore(lock: io_lock, flags); |
1872 | fnic_priv(cmd: sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL; |
1873 | ret = FAILED; |
1874 | goto fnic_abort_cmd_end; |
1875 | } |
1876 | io_req->abts_done = NULL; |
1877 | |
1878 | /* fw did not complete abort, timed out */ |
1879 | if (fnic_priv(cmd: sc)->abts_status == FCPIO_INVALID_CODE) { |
1880 | spin_unlock_irqrestore(lock: io_lock, flags); |
1881 | if (task_req == FCPIO_ITMF_ABT_TASK) { |
1882 | atomic64_inc(v: &abts_stats->abort_drv_timeouts); |
1883 | } else { |
1884 | atomic64_inc(v: &term_stats->terminate_drv_timeouts); |
1885 | } |
1886 | fnic_priv(cmd: sc)->flags |= FNIC_IO_ABT_TERM_TIMED_OUT; |
1887 | ret = FAILED; |
1888 | goto fnic_abort_cmd_end; |
1889 | } |
1890 | |
1891 | /* IO out of order */ |
1892 | |
1893 | if (!(fnic_priv(cmd: sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) { |
1894 | spin_unlock_irqrestore(lock: io_lock, flags); |
1895 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
1896 | "Issuing Host reset due to out of order IO\n" ); |
1897 | |
1898 | ret = FAILED; |
1899 | goto fnic_abort_cmd_end; |
1900 | } |
1901 | |
1902 | fnic_priv(cmd: sc)->state = FNIC_IOREQ_ABTS_COMPLETE; |
1903 | |
1904 | start_time = io_req->start_time; |
1905 | /* |
1906 | * firmware completed the abort, check the status, |
1907 | * free the io_req if successful. If abort fails, |
1908 | * Device reset will clean the I/O. |
1909 | */ |
1910 | if (fnic_priv(cmd: sc)->abts_status == FCPIO_SUCCESS) { |
1911 | fnic_priv(cmd: sc)->io_req = NULL; |
1912 | } else { |
1913 | ret = FAILED; |
1914 | spin_unlock_irqrestore(lock: io_lock, flags); |
1915 | goto fnic_abort_cmd_end; |
1916 | } |
1917 | |
1918 | spin_unlock_irqrestore(lock: io_lock, flags); |
1919 | |
1920 | fnic_release_ioreq_buf(fnic, io_req, sc); |
1921 | mempool_free(element: io_req, pool: fnic->io_req_pool); |
1922 | |
1923 | /* Call SCSI completion function to complete the IO */ |
1924 | sc->result = DID_ABORT << 16; |
1925 | scsi_done(cmd: sc); |
1926 | atomic64_dec(v: &fnic_stats->io_stats.active_ios); |
1927 | if (atomic64_read(v: &fnic->io_cmpl_skip)) |
1928 | atomic64_dec(v: &fnic->io_cmpl_skip); |
1929 | else |
1930 | atomic64_inc(v: &fnic_stats->io_stats.io_completions); |
1931 | |
1932 | fnic_abort_cmd_end: |
1933 | FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no, tag, sc, |
1934 | jiffies_to_msecs(jiffies - start_time), |
1935 | 0, ((u64)sc->cmnd[0] << 32 | |
1936 | (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | |
1937 | (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), |
1938 | fnic_flags_and_state(sc)); |
1939 | |
1940 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
1941 | "Returning from abort cmd type %x %s\n" , task_req, |
1942 | (ret == SUCCESS) ? |
1943 | "SUCCESS" : "FAILED" ); |
1944 | return ret; |
1945 | } |
1946 | |
1947 | static inline int fnic_queue_dr_io_req(struct fnic *fnic, |
1948 | struct scsi_cmnd *sc, |
1949 | struct fnic_io_req *io_req) |
1950 | { |
1951 | struct vnic_wq_copy *wq = &fnic->wq_copy[0]; |
1952 | struct Scsi_Host *host = fnic->lport->host; |
1953 | struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; |
1954 | struct scsi_lun fc_lun; |
1955 | int ret = 0; |
1956 | unsigned long intr_flags; |
1957 | unsigned int tag = scsi_cmd_to_rq(scmd: sc)->tag; |
1958 | |
1959 | if (tag == SCSI_NO_TAG) |
1960 | tag = io_req->tag; |
1961 | |
1962 | spin_lock_irqsave(host->host_lock, intr_flags); |
1963 | if (unlikely(fnic_chk_state_flags_locked(fnic, |
1964 | FNIC_FLAGS_IO_BLOCKED))) { |
1965 | spin_unlock_irqrestore(lock: host->host_lock, flags: intr_flags); |
1966 | return FAILED; |
1967 | } else |
1968 | atomic_inc(v: &fnic->in_flight); |
1969 | spin_unlock_irqrestore(lock: host->host_lock, flags: intr_flags); |
1970 | |
1971 | spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags); |
1972 | |
1973 | if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) |
1974 | free_wq_copy_descs(fnic, wq); |
1975 | |
1976 | if (!vnic_wq_copy_desc_avail(wq)) { |
1977 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
1978 | "queue_dr_io_req failure - no descriptors\n" ); |
1979 | atomic64_inc(v: &misc_stats->devrst_cpwq_alloc_failures); |
1980 | ret = -EAGAIN; |
1981 | goto lr_io_req_end; |
1982 | } |
1983 | |
1984 | /* fill in the lun info */ |
1985 | int_to_scsilun(sc->device->lun, &fc_lun); |
1986 | |
1987 | tag |= FNIC_TAG_DEV_RST; |
1988 | fnic_queue_wq_copy_desc_itmf(wq, req_id: tag, |
1989 | lunmap_id: 0, tm_req: FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG, |
1990 | lun: fc_lun.scsi_lun, d_id: io_req->port_id, |
1991 | r_a_tov: fnic->config.ra_tov, e_d_tov: fnic->config.ed_tov); |
1992 | |
1993 | atomic64_inc(v: &fnic->fnic_stats.fw_stats.active_fw_reqs); |
1994 | if (atomic64_read(v: &fnic->fnic_stats.fw_stats.active_fw_reqs) > |
1995 | atomic64_read(v: &fnic->fnic_stats.fw_stats.max_fw_reqs)) |
1996 | atomic64_set(v: &fnic->fnic_stats.fw_stats.max_fw_reqs, |
1997 | i: atomic64_read(v: &fnic->fnic_stats.fw_stats.active_fw_reqs)); |
1998 | |
1999 | lr_io_req_end: |
2000 | spin_unlock_irqrestore(lock: &fnic->wq_copy_lock[0], flags: intr_flags); |
2001 | atomic_dec(v: &fnic->in_flight); |
2002 | |
2003 | return ret; |
2004 | } |
2005 | |
2006 | struct fnic_pending_aborts_iter_data { |
2007 | struct fnic *fnic; |
2008 | struct scsi_cmnd *lr_sc; |
2009 | struct scsi_device *lun_dev; |
2010 | int ret; |
2011 | }; |
2012 | |
2013 | static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data) |
2014 | { |
2015 | struct fnic_pending_aborts_iter_data *iter_data = data; |
2016 | struct fnic *fnic = iter_data->fnic; |
2017 | struct scsi_device *lun_dev = iter_data->lun_dev; |
2018 | int abt_tag = scsi_cmd_to_rq(scmd: sc)->tag; |
2019 | struct fnic_io_req *io_req; |
2020 | spinlock_t *io_lock; |
2021 | unsigned long flags; |
2022 | struct scsi_lun fc_lun; |
2023 | DECLARE_COMPLETION_ONSTACK(tm_done); |
2024 | enum fnic_ioreq_state old_ioreq_state; |
2025 | |
2026 | if (sc == iter_data->lr_sc || sc->device != lun_dev) |
2027 | return true; |
2028 | |
2029 | io_lock = fnic_io_lock_tag(fnic, tag: abt_tag); |
2030 | spin_lock_irqsave(io_lock, flags); |
2031 | io_req = fnic_priv(cmd: sc)->io_req; |
2032 | if (!io_req) { |
2033 | spin_unlock_irqrestore(lock: io_lock, flags); |
2034 | return true; |
2035 | } |
2036 | |
2037 | /* |
2038 | * Found IO that is still pending with firmware and |
2039 | * belongs to the LUN that we are resetting |
2040 | */ |
2041 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
2042 | "Found IO in %s on lun\n" , |
2043 | fnic_ioreq_state_to_str(fnic_priv(sc)->state)); |
2044 | |
2045 | if (fnic_priv(cmd: sc)->state == FNIC_IOREQ_ABTS_PENDING) { |
2046 | spin_unlock_irqrestore(lock: io_lock, flags); |
2047 | return true; |
2048 | } |
2049 | if ((fnic_priv(cmd: sc)->flags & FNIC_DEVICE_RESET) && |
2050 | (!(fnic_priv(cmd: sc)->flags & FNIC_DEV_RST_ISSUED))) { |
2051 | FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, |
2052 | "%s dev rst not pending sc 0x%p\n" , __func__, |
2053 | sc); |
2054 | spin_unlock_irqrestore(lock: io_lock, flags); |
2055 | return true; |
2056 | } |
2057 | |
2058 | if (io_req->abts_done) |
2059 | shost_printk(KERN_ERR, fnic->lport->host, |
2060 | "%s: io_req->abts_done is set state is %s\n" , |
2061 | __func__, fnic_ioreq_state_to_str(fnic_priv(sc)->state)); |
2062 | old_ioreq_state = fnic_priv(cmd: sc)->state; |
2063 | /* |
2064 | * Any pending IO issued prior to reset is expected to be |
2065 | * in abts pending state, if not we need to set |
2066 | * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending. |
2067 | * When IO is completed, the IO will be handed over and |
2068 | * handled in this function. |
2069 | */ |
2070 | fnic_priv(cmd: sc)->state = FNIC_IOREQ_ABTS_PENDING; |
2071 | |
2072 | BUG_ON(io_req->abts_done); |
2073 | |
2074 | if (fnic_priv(cmd: sc)->flags & FNIC_DEVICE_RESET) { |
2075 | abt_tag |= FNIC_TAG_DEV_RST; |
2076 | FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, |
2077 | "%s: dev rst sc 0x%p\n" , __func__, sc); |
2078 | } |
2079 | |
2080 | fnic_priv(cmd: sc)->abts_status = FCPIO_INVALID_CODE; |
2081 | io_req->abts_done = &tm_done; |
2082 | spin_unlock_irqrestore(lock: io_lock, flags); |
2083 | |
2084 | /* Now queue the abort command to firmware */ |
2085 | int_to_scsilun(sc->device->lun, &fc_lun); |
2086 | |
2087 | if (fnic_queue_abort_io_req(fnic, tag: abt_tag, |
2088 | task_req: FCPIO_ITMF_ABT_TASK_TERM, |
2089 | fc_lun: fc_lun.scsi_lun, io_req)) { |
2090 | spin_lock_irqsave(io_lock, flags); |
2091 | io_req = fnic_priv(cmd: sc)->io_req; |
2092 | if (io_req) |
2093 | io_req->abts_done = NULL; |
2094 | if (fnic_priv(cmd: sc)->state == FNIC_IOREQ_ABTS_PENDING) |
2095 | fnic_priv(cmd: sc)->state = old_ioreq_state; |
2096 | spin_unlock_irqrestore(lock: io_lock, flags); |
2097 | iter_data->ret = FAILED; |
2098 | return false; |
2099 | } else { |
2100 | spin_lock_irqsave(io_lock, flags); |
2101 | if (fnic_priv(cmd: sc)->flags & FNIC_DEVICE_RESET) |
2102 | fnic_priv(cmd: sc)->flags |= FNIC_DEV_RST_TERM_ISSUED; |
2103 | spin_unlock_irqrestore(lock: io_lock, flags); |
2104 | } |
2105 | fnic_priv(cmd: sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED; |
2106 | |
2107 | wait_for_completion_timeout(x: &tm_done, timeout: msecs_to_jiffies |
2108 | (m: fnic->config.ed_tov)); |
2109 | |
2110 | /* Recheck cmd state to check if it is now aborted */ |
2111 | spin_lock_irqsave(io_lock, flags); |
2112 | io_req = fnic_priv(cmd: sc)->io_req; |
2113 | if (!io_req) { |
2114 | spin_unlock_irqrestore(lock: io_lock, flags); |
2115 | fnic_priv(cmd: sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL; |
2116 | return true; |
2117 | } |
2118 | |
2119 | io_req->abts_done = NULL; |
2120 | |
2121 | /* if abort is still pending with fw, fail */ |
2122 | if (fnic_priv(cmd: sc)->abts_status == FCPIO_INVALID_CODE) { |
2123 | spin_unlock_irqrestore(lock: io_lock, flags); |
2124 | fnic_priv(cmd: sc)->flags |= FNIC_IO_ABT_TERM_DONE; |
2125 | iter_data->ret = FAILED; |
2126 | return false; |
2127 | } |
2128 | fnic_priv(cmd: sc)->state = FNIC_IOREQ_ABTS_COMPLETE; |
2129 | |
2130 | /* original sc used for lr is handled by dev reset code */ |
2131 | if (sc != iter_data->lr_sc) |
2132 | fnic_priv(cmd: sc)->io_req = NULL; |
2133 | spin_unlock_irqrestore(lock: io_lock, flags); |
2134 | |
2135 | /* original sc used for lr is handled by dev reset code */ |
2136 | if (sc != iter_data->lr_sc) { |
2137 | fnic_release_ioreq_buf(fnic, io_req, sc); |
2138 | mempool_free(element: io_req, pool: fnic->io_req_pool); |
2139 | } |
2140 | |
2141 | /* |
2142 | * Any IO is returned during reset, it needs to call scsi_done |
2143 | * to return the scsi_cmnd to upper layer. |
2144 | */ |
2145 | /* Set result to let upper SCSI layer retry */ |
2146 | sc->result = DID_RESET << 16; |
2147 | scsi_done(cmd: sc); |
2148 | |
2149 | return true; |
2150 | } |
2151 | |
2152 | /* |
2153 | * Clean up any pending aborts on the lun |
2154 | * For each outstanding IO on this lun, whose abort is not completed by fw, |
2155 | * issue a local abort. Wait for abort to complete. Return 0 if all commands |
2156 | * successfully aborted, 1 otherwise |
2157 | */ |
2158 | static int fnic_clean_pending_aborts(struct fnic *fnic, |
2159 | struct scsi_cmnd *lr_sc, |
2160 | bool new_sc) |
2161 | |
2162 | { |
2163 | int ret = 0; |
2164 | struct fnic_pending_aborts_iter_data iter_data = { |
2165 | .fnic = fnic, |
2166 | .lun_dev = lr_sc->device, |
2167 | .ret = SUCCESS, |
2168 | }; |
2169 | |
2170 | iter_data.lr_sc = lr_sc; |
2171 | |
2172 | scsi_host_busy_iter(fnic->lport->host, |
2173 | fn: fnic_pending_aborts_iter, priv: &iter_data); |
2174 | if (iter_data.ret == FAILED) { |
2175 | ret = iter_data.ret; |
2176 | goto clean_pending_aborts_end; |
2177 | } |
2178 | schedule_timeout(timeout: msecs_to_jiffies(m: 2 * fnic->config.ed_tov)); |
2179 | |
2180 | /* walk again to check, if IOs are still pending in fw */ |
2181 | if (fnic_is_abts_pending(fnic, lr_sc)) |
2182 | ret = 1; |
2183 | |
2184 | clean_pending_aborts_end: |
2185 | FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, |
2186 | "%s: exit status: %d\n" , __func__, ret); |
2187 | return ret; |
2188 | } |
2189 | |
2190 | /* |
2191 | * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN |
2192 | * fail to get aborted. It calls driver's eh_device_reset with a SCSI command |
2193 | * on the LUN. |
2194 | */ |
2195 | int fnic_device_reset(struct scsi_cmnd *sc) |
2196 | { |
2197 | struct request *rq = scsi_cmd_to_rq(scmd: sc); |
2198 | struct fc_lport *lp; |
2199 | struct fnic *fnic; |
2200 | struct fnic_io_req *io_req = NULL; |
2201 | struct fc_rport *rport; |
2202 | int status; |
2203 | int ret = FAILED; |
2204 | spinlock_t *io_lock; |
2205 | unsigned long flags; |
2206 | unsigned long start_time = 0; |
2207 | struct scsi_lun fc_lun; |
2208 | struct fnic_stats *fnic_stats; |
2209 | struct reset_stats *reset_stats; |
2210 | int tag = rq->tag; |
2211 | DECLARE_COMPLETION_ONSTACK(tm_done); |
2212 | bool new_sc = 0; |
2213 | |
2214 | /* Wait for rport to unblock */ |
2215 | fc_block_scsi_eh(cmnd: sc); |
2216 | |
2217 | /* Get local-port, check ready and link up */ |
2218 | lp = shost_priv(shost: sc->device->host); |
2219 | |
2220 | fnic = lport_priv(lport: lp); |
2221 | fnic_stats = &fnic->fnic_stats; |
2222 | reset_stats = &fnic->fnic_stats.reset_stats; |
2223 | |
2224 | atomic64_inc(v: &reset_stats->device_resets); |
2225 | |
2226 | rport = starget_to_rport(scsi_target(sc->device)); |
2227 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
2228 | "Device reset called FCID 0x%x, LUN 0x%llx sc 0x%p\n" , |
2229 | rport->port_id, sc->device->lun, sc); |
2230 | |
2231 | if (lp->state != LPORT_ST_READY || !(lp->link_up)) |
2232 | goto fnic_device_reset_end; |
2233 | |
2234 | /* Check if remote port up */ |
2235 | if (fc_remote_port_chkready(rport)) { |
2236 | atomic64_inc(v: &fnic_stats->misc_stats.rport_not_ready); |
2237 | goto fnic_device_reset_end; |
2238 | } |
2239 | |
2240 | fnic_priv(cmd: sc)->flags = FNIC_DEVICE_RESET; |
2241 | |
2242 | if (unlikely(tag < 0)) { |
2243 | /* |
2244 | * For device reset issued through sg3utils, we let |
2245 | * only one LUN_RESET to go through and use a special |
2246 | * tag equal to max_tag_id so that we don't have to allocate |
2247 | * or free it. It won't interact with tags |
2248 | * allocated by mid layer. |
2249 | */ |
2250 | mutex_lock(&fnic->sgreset_mutex); |
2251 | tag = fnic->fnic_max_tag_id; |
2252 | new_sc = 1; |
2253 | fnic->sgreset_sc = sc; |
2254 | io_lock = &fnic->sgreset_lock; |
2255 | FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, |
2256 | "fcid: 0x%x lun: 0x%llx flags: 0x%x tag: 0x%x Issuing sgreset\n" , |
2257 | rport->port_id, sc->device->lun, fnic_priv(sc)->flags, tag); |
2258 | } else |
2259 | io_lock = fnic_io_lock_hash(fnic, sc); |
2260 | |
2261 | spin_lock_irqsave(io_lock, flags); |
2262 | io_req = fnic_priv(cmd: sc)->io_req; |
2263 | |
2264 | /* |
2265 | * If there is a io_req attached to this command, then use it, |
2266 | * else allocate a new one. |
2267 | */ |
2268 | if (!io_req) { |
2269 | io_req = mempool_alloc(pool: fnic->io_req_pool, GFP_ATOMIC); |
2270 | if (!io_req) { |
2271 | spin_unlock_irqrestore(lock: io_lock, flags); |
2272 | goto fnic_device_reset_end; |
2273 | } |
2274 | memset(io_req, 0, sizeof(*io_req)); |
2275 | io_req->port_id = rport->port_id; |
2276 | io_req->tag = tag; |
2277 | io_req->sc = sc; |
2278 | fnic_priv(cmd: sc)->io_req = io_req; |
2279 | } |
2280 | io_req->dr_done = &tm_done; |
2281 | fnic_priv(cmd: sc)->state = FNIC_IOREQ_CMD_PENDING; |
2282 | fnic_priv(cmd: sc)->lr_status = FCPIO_INVALID_CODE; |
2283 | spin_unlock_irqrestore(lock: io_lock, flags); |
2284 | |
2285 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n" , tag); |
2286 | |
2287 | /* |
2288 | * issue the device reset, if enqueue failed, clean up the ioreq |
2289 | * and break assoc with scsi cmd |
2290 | */ |
2291 | if (fnic_queue_dr_io_req(fnic, sc, io_req)) { |
2292 | spin_lock_irqsave(io_lock, flags); |
2293 | io_req = fnic_priv(cmd: sc)->io_req; |
2294 | if (io_req) |
2295 | io_req->dr_done = NULL; |
2296 | goto fnic_device_reset_clean; |
2297 | } |
2298 | spin_lock_irqsave(io_lock, flags); |
2299 | fnic_priv(cmd: sc)->flags |= FNIC_DEV_RST_ISSUED; |
2300 | spin_unlock_irqrestore(lock: io_lock, flags); |
2301 | |
2302 | /* |
2303 | * Wait on the local completion for LUN reset. The io_req may be |
2304 | * freed while we wait since we hold no lock. |
2305 | */ |
2306 | wait_for_completion_timeout(x: &tm_done, |
2307 | timeout: msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT)); |
2308 | |
2309 | spin_lock_irqsave(io_lock, flags); |
2310 | io_req = fnic_priv(cmd: sc)->io_req; |
2311 | if (!io_req) { |
2312 | spin_unlock_irqrestore(lock: io_lock, flags); |
2313 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
2314 | "io_req is null tag 0x%x sc 0x%p\n" , tag, sc); |
2315 | goto fnic_device_reset_end; |
2316 | } |
2317 | io_req->dr_done = NULL; |
2318 | |
2319 | status = fnic_priv(cmd: sc)->lr_status; |
2320 | |
2321 | /* |
2322 | * If lun reset not completed, bail out with failed. io_req |
2323 | * gets cleaned up during higher levels of EH |
2324 | */ |
2325 | if (status == FCPIO_INVALID_CODE) { |
2326 | atomic64_inc(v: &reset_stats->device_reset_timeouts); |
2327 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
2328 | "Device reset timed out\n" ); |
2329 | fnic_priv(cmd: sc)->flags |= FNIC_DEV_RST_TIMED_OUT; |
2330 | spin_unlock_irqrestore(lock: io_lock, flags); |
2331 | int_to_scsilun(sc->device->lun, &fc_lun); |
2332 | /* |
2333 | * Issue abort and terminate on device reset request. |
2334 | * If q'ing of terminate fails, retry it after a delay. |
2335 | */ |
2336 | while (1) { |
2337 | spin_lock_irqsave(io_lock, flags); |
2338 | if (fnic_priv(cmd: sc)->flags & FNIC_DEV_RST_TERM_ISSUED) { |
2339 | spin_unlock_irqrestore(lock: io_lock, flags); |
2340 | break; |
2341 | } |
2342 | spin_unlock_irqrestore(lock: io_lock, flags); |
2343 | if (fnic_queue_abort_io_req(fnic, |
2344 | tag: tag | FNIC_TAG_DEV_RST, |
2345 | task_req: FCPIO_ITMF_ABT_TASK_TERM, |
2346 | fc_lun: fc_lun.scsi_lun, io_req)) { |
2347 | wait_for_completion_timeout(x: &tm_done, |
2348 | timeout: msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT)); |
2349 | } else { |
2350 | spin_lock_irqsave(io_lock, flags); |
2351 | fnic_priv(cmd: sc)->flags |= FNIC_DEV_RST_TERM_ISSUED; |
2352 | fnic_priv(cmd: sc)->state = FNIC_IOREQ_ABTS_PENDING; |
2353 | io_req->abts_done = &tm_done; |
2354 | spin_unlock_irqrestore(lock: io_lock, flags); |
2355 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
2356 | "Abort and terminate issued on Device reset " |
2357 | "tag 0x%x sc 0x%p\n" , tag, sc); |
2358 | break; |
2359 | } |
2360 | } |
2361 | while (1) { |
2362 | spin_lock_irqsave(io_lock, flags); |
2363 | if (!(fnic_priv(cmd: sc)->flags & FNIC_DEV_RST_DONE)) { |
2364 | spin_unlock_irqrestore(lock: io_lock, flags); |
2365 | wait_for_completion_timeout(x: &tm_done, |
2366 | timeout: msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT)); |
2367 | break; |
2368 | } else { |
2369 | io_req = fnic_priv(cmd: sc)->io_req; |
2370 | io_req->abts_done = NULL; |
2371 | goto fnic_device_reset_clean; |
2372 | } |
2373 | } |
2374 | } else { |
2375 | spin_unlock_irqrestore(lock: io_lock, flags); |
2376 | } |
2377 | |
2378 | /* Completed, but not successful, clean up the io_req, return fail */ |
2379 | if (status != FCPIO_SUCCESS) { |
2380 | spin_lock_irqsave(io_lock, flags); |
2381 | FNIC_SCSI_DBG(KERN_DEBUG, |
2382 | fnic->lport->host, |
2383 | "Device reset completed - failed\n" ); |
2384 | io_req = fnic_priv(cmd: sc)->io_req; |
2385 | goto fnic_device_reset_clean; |
2386 | } |
2387 | |
2388 | /* |
2389 | * Clean up any aborts on this lun that have still not |
2390 | * completed. If any of these fail, then LUN reset fails. |
2391 | * clean_pending_aborts cleans all cmds on this lun except |
2392 | * the lun reset cmd. If all cmds get cleaned, the lun reset |
2393 | * succeeds |
2394 | */ |
2395 | if (fnic_clean_pending_aborts(fnic, lr_sc: sc, new_sc)) { |
2396 | spin_lock_irqsave(io_lock, flags); |
2397 | io_req = fnic_priv(cmd: sc)->io_req; |
2398 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
2399 | "Device reset failed" |
2400 | " since could not abort all IOs\n" ); |
2401 | goto fnic_device_reset_clean; |
2402 | } |
2403 | |
2404 | /* Clean lun reset command */ |
2405 | spin_lock_irqsave(io_lock, flags); |
2406 | io_req = fnic_priv(cmd: sc)->io_req; |
2407 | if (io_req) |
2408 | /* Completed, and successful */ |
2409 | ret = SUCCESS; |
2410 | |
2411 | fnic_device_reset_clean: |
2412 | if (io_req) |
2413 | fnic_priv(cmd: sc)->io_req = NULL; |
2414 | |
2415 | spin_unlock_irqrestore(lock: io_lock, flags); |
2416 | |
2417 | if (io_req) { |
2418 | start_time = io_req->start_time; |
2419 | fnic_release_ioreq_buf(fnic, io_req, sc); |
2420 | mempool_free(element: io_req, pool: fnic->io_req_pool); |
2421 | } |
2422 | |
2423 | fnic_device_reset_end: |
2424 | FNIC_TRACE(fnic_device_reset, sc->device->host->host_no, rq->tag, sc, |
2425 | jiffies_to_msecs(jiffies - start_time), |
2426 | 0, ((u64)sc->cmnd[0] << 32 | |
2427 | (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | |
2428 | (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), |
2429 | fnic_flags_and_state(sc)); |
2430 | |
2431 | if (new_sc) { |
2432 | fnic->sgreset_sc = NULL; |
2433 | mutex_unlock(lock: &fnic->sgreset_mutex); |
2434 | } |
2435 | |
2436 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
2437 | "Returning from device reset %s\n" , |
2438 | (ret == SUCCESS) ? |
2439 | "SUCCESS" : "FAILED" ); |
2440 | |
2441 | if (ret == FAILED) |
2442 | atomic64_inc(v: &reset_stats->device_reset_failures); |
2443 | |
2444 | return ret; |
2445 | } |
2446 | |
2447 | /* Clean up all IOs, clean up libFC local port */ |
2448 | int fnic_reset(struct Scsi_Host *shost) |
2449 | { |
2450 | struct fc_lport *lp; |
2451 | struct fnic *fnic; |
2452 | int ret = 0; |
2453 | struct reset_stats *reset_stats; |
2454 | |
2455 | lp = shost_priv(shost); |
2456 | fnic = lport_priv(lport: lp); |
2457 | reset_stats = &fnic->fnic_stats.reset_stats; |
2458 | |
2459 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
2460 | "fnic_reset called\n" ); |
2461 | |
2462 | atomic64_inc(v: &reset_stats->fnic_resets); |
2463 | |
2464 | /* |
2465 | * Reset local port, this will clean up libFC exchanges, |
2466 | * reset remote port sessions, and if link is up, begin flogi |
2467 | */ |
2468 | ret = fc_lport_reset(lp); |
2469 | |
2470 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
2471 | "Returning from fnic reset %s\n" , |
2472 | (ret == 0) ? |
2473 | "SUCCESS" : "FAILED" ); |
2474 | |
2475 | if (ret == 0) |
2476 | atomic64_inc(v: &reset_stats->fnic_reset_completions); |
2477 | else |
2478 | atomic64_inc(v: &reset_stats->fnic_reset_failures); |
2479 | |
2480 | return ret; |
2481 | } |
2482 | |
2483 | /* |
2484 | * SCSI Error handling calls driver's eh_host_reset if all prior |
2485 | * error handling levels return FAILED. If host reset completes |
2486 | * successfully, and if link is up, then Fabric login begins. |
2487 | * |
2488 | * Host Reset is the highest level of error recovery. If this fails, then |
2489 | * host is offlined by SCSI. |
2490 | * |
2491 | */ |
2492 | int fnic_host_reset(struct scsi_cmnd *sc) |
2493 | { |
2494 | int ret; |
2495 | unsigned long wait_host_tmo; |
2496 | struct Scsi_Host *shost = sc->device->host; |
2497 | struct fc_lport *lp = shost_priv(shost); |
2498 | struct fnic *fnic = lport_priv(lport: lp); |
2499 | unsigned long flags; |
2500 | |
2501 | spin_lock_irqsave(&fnic->fnic_lock, flags); |
2502 | if (!fnic->internal_reset_inprogress) { |
2503 | fnic->internal_reset_inprogress = true; |
2504 | } else { |
2505 | spin_unlock_irqrestore(lock: &fnic->fnic_lock, flags); |
2506 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
2507 | "host reset in progress skipping another host reset\n" ); |
2508 | return SUCCESS; |
2509 | } |
2510 | spin_unlock_irqrestore(lock: &fnic->fnic_lock, flags); |
2511 | |
2512 | /* |
2513 | * If fnic_reset is successful, wait for fabric login to complete |
2514 | * scsi-ml tries to send a TUR to every device if host reset is |
2515 | * successful, so before returning to scsi, fabric should be up |
2516 | */ |
2517 | ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED; |
2518 | if (ret == SUCCESS) { |
2519 | wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ; |
2520 | ret = FAILED; |
2521 | while (time_before(jiffies, wait_host_tmo)) { |
2522 | if ((lp->state == LPORT_ST_READY) && |
2523 | (lp->link_up)) { |
2524 | ret = SUCCESS; |
2525 | break; |
2526 | } |
2527 | ssleep(seconds: 1); |
2528 | } |
2529 | } |
2530 | |
2531 | spin_lock_irqsave(&fnic->fnic_lock, flags); |
2532 | fnic->internal_reset_inprogress = false; |
2533 | spin_unlock_irqrestore(lock: &fnic->fnic_lock, flags); |
2534 | return ret; |
2535 | } |
2536 | |
2537 | /* |
2538 | * This fxn is called from libFC when host is removed |
2539 | */ |
2540 | void fnic_scsi_abort_io(struct fc_lport *lp) |
2541 | { |
2542 | int err = 0; |
2543 | unsigned long flags; |
2544 | enum fnic_state old_state; |
2545 | struct fnic *fnic = lport_priv(lport: lp); |
2546 | DECLARE_COMPLETION_ONSTACK(remove_wait); |
2547 | |
2548 | /* Issue firmware reset for fnic, wait for reset to complete */ |
2549 | retry_fw_reset: |
2550 | spin_lock_irqsave(&fnic->fnic_lock, flags); |
2551 | if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) && |
2552 | fnic->link_events) { |
2553 | /* fw reset is in progress, poll for its completion */ |
2554 | spin_unlock_irqrestore(lock: &fnic->fnic_lock, flags); |
2555 | schedule_timeout(timeout: msecs_to_jiffies(m: 100)); |
2556 | goto retry_fw_reset; |
2557 | } |
2558 | |
2559 | fnic->remove_wait = &remove_wait; |
2560 | old_state = fnic->state; |
2561 | fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; |
2562 | fnic_update_mac_locked(fnic, new: fnic->ctlr.ctl_src_addr); |
2563 | spin_unlock_irqrestore(lock: &fnic->fnic_lock, flags); |
2564 | |
2565 | err = fnic_fw_reset_handler(fnic); |
2566 | if (err) { |
2567 | spin_lock_irqsave(&fnic->fnic_lock, flags); |
2568 | if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) |
2569 | fnic->state = old_state; |
2570 | fnic->remove_wait = NULL; |
2571 | spin_unlock_irqrestore(lock: &fnic->fnic_lock, flags); |
2572 | return; |
2573 | } |
2574 | |
2575 | /* Wait for firmware reset to complete */ |
2576 | wait_for_completion_timeout(x: &remove_wait, |
2577 | timeout: msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT)); |
2578 | |
2579 | spin_lock_irqsave(&fnic->fnic_lock, flags); |
2580 | fnic->remove_wait = NULL; |
2581 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
2582 | "fnic_scsi_abort_io %s\n" , |
2583 | (fnic->state == FNIC_IN_ETH_MODE) ? |
2584 | "SUCCESS" : "FAILED" ); |
2585 | spin_unlock_irqrestore(lock: &fnic->fnic_lock, flags); |
2586 | |
2587 | } |
2588 | |
2589 | /* |
2590 | * This fxn called from libFC to clean up driver IO state on link down |
2591 | */ |
2592 | void fnic_scsi_cleanup(struct fc_lport *lp) |
2593 | { |
2594 | unsigned long flags; |
2595 | enum fnic_state old_state; |
2596 | struct fnic *fnic = lport_priv(lport: lp); |
2597 | |
2598 | /* issue fw reset */ |
2599 | retry_fw_reset: |
2600 | spin_lock_irqsave(&fnic->fnic_lock, flags); |
2601 | if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) { |
2602 | /* fw reset is in progress, poll for its completion */ |
2603 | spin_unlock_irqrestore(lock: &fnic->fnic_lock, flags); |
2604 | schedule_timeout(timeout: msecs_to_jiffies(m: 100)); |
2605 | goto retry_fw_reset; |
2606 | } |
2607 | old_state = fnic->state; |
2608 | fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; |
2609 | fnic_update_mac_locked(fnic, new: fnic->ctlr.ctl_src_addr); |
2610 | spin_unlock_irqrestore(lock: &fnic->fnic_lock, flags); |
2611 | |
2612 | if (fnic_fw_reset_handler(fnic)) { |
2613 | spin_lock_irqsave(&fnic->fnic_lock, flags); |
2614 | if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) |
2615 | fnic->state = old_state; |
2616 | spin_unlock_irqrestore(lock: &fnic->fnic_lock, flags); |
2617 | } |
2618 | |
2619 | } |
2620 | |
2621 | void fnic_empty_scsi_cleanup(struct fc_lport *lp) |
2622 | { |
2623 | } |
2624 | |
2625 | void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did) |
2626 | { |
2627 | struct fnic *fnic = lport_priv(lport: lp); |
2628 | |
2629 | /* Non-zero sid, nothing to do */ |
2630 | if (sid) |
2631 | goto call_fc_exch_mgr_reset; |
2632 | |
2633 | if (did) { |
2634 | fnic_rport_exch_reset(fnic, port_id: did); |
2635 | goto call_fc_exch_mgr_reset; |
2636 | } |
2637 | |
2638 | /* |
2639 | * sid = 0, did = 0 |
2640 | * link down or device being removed |
2641 | */ |
2642 | if (!fnic->in_remove) |
2643 | fnic_scsi_cleanup(lp); |
2644 | else |
2645 | fnic_scsi_abort_io(lp); |
2646 | |
2647 | /* call libFC exch mgr reset to reset its exchanges */ |
2648 | call_fc_exch_mgr_reset: |
2649 | fc_exch_mgr_reset(lp, s_id: sid, d_id: did); |
2650 | |
2651 | } |
2652 | |
2653 | static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data) |
2654 | { |
2655 | struct fnic_pending_aborts_iter_data *iter_data = data; |
2656 | struct fnic *fnic = iter_data->fnic; |
2657 | int cmd_state; |
2658 | struct fnic_io_req *io_req; |
2659 | spinlock_t *io_lock; |
2660 | unsigned long flags; |
2661 | |
2662 | /* |
2663 | * ignore this lun reset cmd or cmds that do not belong to |
2664 | * this lun |
2665 | */ |
2666 | if (iter_data->lr_sc && sc == iter_data->lr_sc) |
2667 | return true; |
2668 | if (iter_data->lun_dev && sc->device != iter_data->lun_dev) |
2669 | return true; |
2670 | |
2671 | io_lock = fnic_io_lock_hash(fnic, sc); |
2672 | spin_lock_irqsave(io_lock, flags); |
2673 | |
2674 | io_req = fnic_priv(cmd: sc)->io_req; |
2675 | if (!io_req) { |
2676 | spin_unlock_irqrestore(lock: io_lock, flags); |
2677 | return true; |
2678 | } |
2679 | |
2680 | /* |
2681 | * Found IO that is still pending with firmware and |
2682 | * belongs to the LUN that we are resetting |
2683 | */ |
2684 | FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, |
2685 | "Found IO in %s on lun\n" , |
2686 | fnic_ioreq_state_to_str(fnic_priv(sc)->state)); |
2687 | cmd_state = fnic_priv(cmd: sc)->state; |
2688 | spin_unlock_irqrestore(lock: io_lock, flags); |
2689 | if (cmd_state == FNIC_IOREQ_ABTS_PENDING) |
2690 | iter_data->ret = 1; |
2691 | |
2692 | return iter_data->ret ? false : true; |
2693 | } |
2694 | |
2695 | /* |
2696 | * fnic_is_abts_pending() is a helper function that |
2697 | * walks through tag map to check if there is any IOs pending,if there is one, |
2698 | * then it returns 1 (true), otherwise 0 (false) |
2699 | * if @lr_sc is non NULL, then it checks IOs specific to particular LUN, |
2700 | * otherwise, it checks for all IOs. |
2701 | */ |
2702 | int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc) |
2703 | { |
2704 | struct fnic_pending_aborts_iter_data iter_data = { |
2705 | .fnic = fnic, |
2706 | .lun_dev = NULL, |
2707 | .ret = 0, |
2708 | }; |
2709 | |
2710 | if (lr_sc) { |
2711 | iter_data.lun_dev = lr_sc->device; |
2712 | iter_data.lr_sc = lr_sc; |
2713 | } |
2714 | |
2715 | /* walk again to check, if IOs are still pending in fw */ |
2716 | scsi_host_busy_iter(fnic->lport->host, |
2717 | fn: fnic_abts_pending_iter, priv: &iter_data); |
2718 | |
2719 | return iter_data.ret; |
2720 | } |
2721 | |