1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter
4 *
5 * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
6 *
7 * Copyright (C) IBM Corporation, 2008
8 */
9
10#include <linux/module.h>
11#include <linux/moduleparam.h>
12#include <linux/dma-mapping.h>
13#include <linux/dmapool.h>
14#include <linux/delay.h>
15#include <linux/interrupt.h>
16#include <linux/irqdomain.h>
17#include <linux/kthread.h>
18#include <linux/slab.h>
19#include <linux/of.h>
20#include <linux/pm.h>
21#include <linux/stringify.h>
22#include <linux/bsg-lib.h>
23#include <asm/firmware.h>
24#include <asm/irq.h>
25#include <asm/vio.h>
26#include <scsi/scsi.h>
27#include <scsi/scsi_cmnd.h>
28#include <scsi/scsi_host.h>
29#include <scsi/scsi_device.h>
30#include <scsi/scsi_tcq.h>
31#include <scsi/scsi_transport_fc.h>
32#include <scsi/scsi_bsg_fc.h>
33#include "ibmvfc.h"
34
35static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT;
36static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
37static u64 max_lun = IBMVFC_MAX_LUN;
38static unsigned int max_targets = IBMVFC_MAX_TARGETS;
39static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
40static u16 scsi_qdepth = IBMVFC_SCSI_QDEPTH;
41static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
42static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
43static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
44static unsigned int cls3_error = IBMVFC_CLS3_ERROR;
45static unsigned int mq_enabled = IBMVFC_MQ;
46static unsigned int nr_scsi_hw_queues = IBMVFC_SCSI_HW_QUEUES;
47static unsigned int nr_scsi_channels = IBMVFC_SCSI_CHANNELS;
48static unsigned int mig_channels_only = IBMVFC_MIG_NO_SUB_TO_CRQ;
49static unsigned int mig_no_less_channels = IBMVFC_MIG_NO_N_TO_M;
50
51static LIST_HEAD(ibmvfc_head);
52static DEFINE_SPINLOCK(ibmvfc_driver_lock);
53static struct scsi_transport_template *ibmvfc_transport_template;
54
55MODULE_DESCRIPTION("IBM Virtual Fibre Channel Driver");
56MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
57MODULE_LICENSE("GPL");
58MODULE_VERSION(IBMVFC_DRIVER_VERSION);
59
60module_param_named(mq, mq_enabled, uint, S_IRUGO);
61MODULE_PARM_DESC(mq, "Enable multiqueue support. "
62 "[Default=" __stringify(IBMVFC_MQ) "]");
63module_param_named(scsi_host_queues, nr_scsi_hw_queues, uint, S_IRUGO);
64MODULE_PARM_DESC(scsi_host_queues, "Number of SCSI Host submission queues. "
65 "[Default=" __stringify(IBMVFC_SCSI_HW_QUEUES) "]");
66module_param_named(scsi_hw_channels, nr_scsi_channels, uint, S_IRUGO);
67MODULE_PARM_DESC(scsi_hw_channels, "Number of hw scsi channels to request. "
68 "[Default=" __stringify(IBMVFC_SCSI_CHANNELS) "]");
69module_param_named(mig_channels_only, mig_channels_only, uint, S_IRUGO);
70MODULE_PARM_DESC(mig_channels_only, "Prevent migration to non-channelized system. "
71 "[Default=" __stringify(IBMVFC_MIG_NO_SUB_TO_CRQ) "]");
72module_param_named(mig_no_less_channels, mig_no_less_channels, uint, S_IRUGO);
73MODULE_PARM_DESC(mig_no_less_channels, "Prevent migration to system with less channels. "
74 "[Default=" __stringify(IBMVFC_MIG_NO_N_TO_M) "]");
75
76module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR);
77MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. "
78 "[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]");
79module_param_named(default_timeout, default_timeout, uint, S_IRUGO | S_IWUSR);
80MODULE_PARM_DESC(default_timeout,
81 "Default timeout in seconds for initialization and EH commands. "
82 "[Default=" __stringify(IBMVFC_DEFAULT_TIMEOUT) "]");
83module_param_named(max_requests, max_requests, uint, S_IRUGO);
84MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
85 "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
86module_param_named(scsi_qdepth, scsi_qdepth, ushort, S_IRUGO);
87MODULE_PARM_DESC(scsi_qdepth, "Maximum scsi command depth per adapter queue. "
88 "[Default=" __stringify(IBMVFC_SCSI_QDEPTH) "]");
89module_param_named(max_lun, max_lun, ullong, S_IRUGO);
90MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
91 "[Default=" __stringify(IBMVFC_MAX_LUN) "]");
92module_param_named(max_targets, max_targets, uint, S_IRUGO);
93MODULE_PARM_DESC(max_targets, "Maximum allowed targets. "
94 "[Default=" __stringify(IBMVFC_MAX_TARGETS) "]");
95module_param_named(disc_threads, disc_threads, uint, S_IRUGO);
96MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. "
97 "[Default=" __stringify(IBMVFC_MAX_DISC_THREADS) "]");
98module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR);
99MODULE_PARM_DESC(debug, "Enable driver debug information. "
100 "[Default=" __stringify(IBMVFC_DEBUG) "]");
101module_param_named(log_level, log_level, uint, 0);
102MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
103 "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
104module_param_named(cls3_error, cls3_error, uint, 0);
105MODULE_PARM_DESC(cls3_error, "Enable FC Class 3 Error Recovery. "
106 "[Default=" __stringify(IBMVFC_CLS3_ERROR) "]");
107
108static const struct {
109 u16 status;
110 u16 error;
111 u8 result;
112 u8 retry;
113 int log;
114 char *name;
115} cmd_status [] = {
116 { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" },
117 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" },
118 { IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" },
119 { IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_TRANSPORT_DISRUPTED, 1, 1, "network down" },
120 { IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" },
121 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" },
122 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" },
123 { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_REGISTER, DID_ERROR, 1, 1, "unable to register" },
124 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_BUSY, DID_BUS_BUSY, 1, 0, "transport busy" },
125 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
126 { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
127 { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
128 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" },
129 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
130
131 { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
132 { IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" },
133 { IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ERROR, 0, 1, "invalid parameter" },
134 { IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ERROR, 0, 1, "missing parameter" },
135 { IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" },
136 { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ERROR, 0, 1, "transaction cancelled" },
137 { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ERROR, 0, 1, "transaction cancelled implicit" },
138 { IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
139 { IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" },
140 { IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
141
142 { IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
143 { IBMVFC_FC_FAILURE, IBMVFC_INVALID_VERSION, DID_ERROR, 0, 1, "invalid version level" },
144 { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_ERROR, DID_ERROR, 1, 1, "logical error" },
145 { IBMVFC_FC_FAILURE, IBMVFC_INVALID_CT_IU_SIZE, DID_ERROR, 0, 1, "invalid CT_IU size" },
146 { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_BUSY, DID_REQUEUE, 1, 0, "logical busy" },
147 { IBMVFC_FC_FAILURE, IBMVFC_PROTOCOL_ERROR, DID_ERROR, 1, 1, "protocol error" },
148 { IBMVFC_FC_FAILURE, IBMVFC_UNABLE_TO_PERFORM_REQ, DID_ERROR, 1, 1, "unable to perform request" },
149 { IBMVFC_FC_FAILURE, IBMVFC_CMD_NOT_SUPPORTED, DID_ERROR, 0, 0, "command not supported" },
150 { IBMVFC_FC_FAILURE, IBMVFC_SERVER_NOT_AVAIL, DID_ERROR, 0, 1, "server not available" },
151 { IBMVFC_FC_FAILURE, IBMVFC_CMD_IN_PROGRESS, DID_ERROR, 0, 1, "command already in progress" },
152 { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
153
154 { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
155 { IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." },
156};
157
158static void ibmvfc_npiv_login(struct ibmvfc_host *);
159static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
160static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
161static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
162static void ibmvfc_npiv_logout(struct ibmvfc_host *);
163static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
164static void ibmvfc_tgt_move_login(struct ibmvfc_target *);
165
166static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *, struct ibmvfc_channels *);
167static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *, struct ibmvfc_channels *);
168
169static const char *unknown_error = "unknown error";
170
171static long h_reg_sub_crq(unsigned long unit_address, unsigned long ioba,
172 unsigned long length, unsigned long *cookie,
173 unsigned long *irq)
174{
175 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
176 long rc;
177
178 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, ioba, length);
179 *cookie = retbuf[0];
180 *irq = retbuf[1];
181
182 return rc;
183}
184
185static int ibmvfc_check_caps(struct ibmvfc_host *vhost, unsigned long cap_flags)
186{
187 u64 host_caps = be64_to_cpu(vhost->login_buf->resp.capabilities);
188
189 return (host_caps & cap_flags) ? 1 : 0;
190}
191
192static struct ibmvfc_fcp_cmd_iu *ibmvfc_get_fcp_iu(struct ibmvfc_host *vhost,
193 struct ibmvfc_cmd *vfc_cmd)
194{
195 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
196 return &vfc_cmd->v2.iu;
197 else
198 return &vfc_cmd->v1.iu;
199}
200
201static struct ibmvfc_fcp_rsp *ibmvfc_get_fcp_rsp(struct ibmvfc_host *vhost,
202 struct ibmvfc_cmd *vfc_cmd)
203{
204 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
205 return &vfc_cmd->v2.rsp;
206 else
207 return &vfc_cmd->v1.rsp;
208}
209
210#ifdef CONFIG_SCSI_IBMVFC_TRACE
211/**
212 * ibmvfc_trc_start - Log a start trace entry
213 * @evt: ibmvfc event struct
214 *
215 **/
216static void ibmvfc_trc_start(struct ibmvfc_event *evt)
217{
218 struct ibmvfc_host *vhost = evt->vhost;
219 struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
220 struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
221 struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
222 struct ibmvfc_trace_entry *entry;
223 int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;
224
225 entry = &vhost->trace[index];
226 entry->evt = evt;
227 entry->time = jiffies;
228 entry->fmt = evt->crq.format;
229 entry->type = IBMVFC_TRC_START;
230
231 switch (entry->fmt) {
232 case IBMVFC_CMD_FORMAT:
233 entry->op_code = iu->cdb[0];
234 entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
235 entry->lun = scsilun_to_int(&iu->lun);
236 entry->tmf_flags = iu->tmf_flags;
237 entry->u.start.xfer_len = be32_to_cpu(iu->xfer_len);
238 break;
239 case IBMVFC_MAD_FORMAT:
240 entry->op_code = be32_to_cpu(mad->opcode);
241 break;
242 default:
243 break;
244 }
245}
246
247/**
248 * ibmvfc_trc_end - Log an end trace entry
249 * @evt: ibmvfc event struct
250 *
251 **/
252static void ibmvfc_trc_end(struct ibmvfc_event *evt)
253{
254 struct ibmvfc_host *vhost = evt->vhost;
255 struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
256 struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
257 struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
258 struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
259 struct ibmvfc_trace_entry *entry;
260 int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;
261
262 entry = &vhost->trace[index];
263 entry->evt = evt;
264 entry->time = jiffies;
265 entry->fmt = evt->crq.format;
266 entry->type = IBMVFC_TRC_END;
267
268 switch (entry->fmt) {
269 case IBMVFC_CMD_FORMAT:
270 entry->op_code = iu->cdb[0];
271 entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
272 entry->lun = scsilun_to_int(&iu->lun);
273 entry->tmf_flags = iu->tmf_flags;
274 entry->u.end.status = be16_to_cpu(vfc_cmd->status);
275 entry->u.end.error = be16_to_cpu(vfc_cmd->error);
276 entry->u.end.fcp_rsp_flags = rsp->flags;
277 entry->u.end.rsp_code = rsp->data.info.rsp_code;
278 entry->u.end.scsi_status = rsp->scsi_status;
279 break;
280 case IBMVFC_MAD_FORMAT:
281 entry->op_code = be32_to_cpu(mad->opcode);
282 entry->u.end.status = be16_to_cpu(mad->status);
283 break;
284 default:
285 break;
286
287 }
288}
289
290#else
291#define ibmvfc_trc_start(evt) do { } while (0)
292#define ibmvfc_trc_end(evt) do { } while (0)
293#endif
294
295/**
296 * ibmvfc_get_err_index - Find the index into cmd_status for the fcp response
297 * @status: status / error class
298 * @error: error
299 *
300 * Return value:
301 * index into cmd_status / -EINVAL on failure
302 **/
303static int ibmvfc_get_err_index(u16 status, u16 error)
304{
305 int i;
306
307 for (i = 0; i < ARRAY_SIZE(cmd_status); i++)
308 if ((cmd_status[i].status & status) == cmd_status[i].status &&
309 cmd_status[i].error == error)
310 return i;
311
312 return -EINVAL;
313}
314
315/**
316 * ibmvfc_get_cmd_error - Find the error description for the fcp response
317 * @status: status / error class
318 * @error: error
319 *
320 * Return value:
321 * error description string
322 **/
323static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
324{
325 int rc = ibmvfc_get_err_index(status, error);
326 if (rc >= 0)
327 return cmd_status[rc].name;
328 return unknown_error;
329}
330
331/**
332 * ibmvfc_get_err_result - Find the scsi status to return for the fcp response
333 * @vhost: ibmvfc host struct
334 * @vfc_cmd: ibmvfc command struct
335 *
336 * Return value:
337 * SCSI result value to return for completed command
338 **/
339static int ibmvfc_get_err_result(struct ibmvfc_host *vhost, struct ibmvfc_cmd *vfc_cmd)
340{
341 int err;
342 struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
343 int fc_rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
344
345 if ((rsp->flags & FCP_RSP_LEN_VALID) &&
346 ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
347 rsp->data.info.rsp_code))
348 return DID_ERROR << 16;
349
350 err = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
351 if (err >= 0)
352 return rsp->scsi_status | (cmd_status[err].result << 16);
353 return rsp->scsi_status | (DID_ERROR << 16);
354}
355
356/**
357 * ibmvfc_retry_cmd - Determine if error status is retryable
358 * @status: status / error class
359 * @error: error
360 *
361 * Return value:
362 * 1 if error should be retried / 0 if it should not
363 **/
364static int ibmvfc_retry_cmd(u16 status, u16 error)
365{
366 int rc = ibmvfc_get_err_index(status, error);
367
368 if (rc >= 0)
369 return cmd_status[rc].retry;
370 return 1;
371}
372
373static const char *unknown_fc_explain = "unknown fc explain";
374
375static const struct {
376 u16 fc_explain;
377 char *name;
378} ls_explain [] = {
379 { 0x00, "no additional explanation" },
380 { 0x01, "service parameter error - options" },
381 { 0x03, "service parameter error - initiator control" },
382 { 0x05, "service parameter error - recipient control" },
383 { 0x07, "service parameter error - received data field size" },
384 { 0x09, "service parameter error - concurrent seq" },
385 { 0x0B, "service parameter error - credit" },
386 { 0x0D, "invalid N_Port/F_Port_Name" },
387 { 0x0E, "invalid node/Fabric Name" },
388 { 0x0F, "invalid common service parameters" },
389 { 0x11, "invalid association header" },
390 { 0x13, "association header required" },
391 { 0x15, "invalid originator S_ID" },
392 { 0x17, "invalid OX_ID-RX-ID combination" },
393 { 0x19, "command (request) already in progress" },
394 { 0x1E, "N_Port Login requested" },
395 { 0x1F, "Invalid N_Port_ID" },
396};
397
398static const struct {
399 u16 fc_explain;
400 char *name;
401} gs_explain [] = {
402 { 0x00, "no additional explanation" },
403 { 0x01, "port identifier not registered" },
404 { 0x02, "port name not registered" },
405 { 0x03, "node name not registered" },
406 { 0x04, "class of service not registered" },
407 { 0x06, "initial process associator not registered" },
408 { 0x07, "FC-4 TYPEs not registered" },
409 { 0x08, "symbolic port name not registered" },
410 { 0x09, "symbolic node name not registered" },
411 { 0x0A, "port type not registered" },
412 { 0xF0, "authorization exception" },
413 { 0xF1, "authentication exception" },
414 { 0xF2, "data base full" },
415 { 0xF3, "data base empty" },
416 { 0xF4, "processing request" },
417 { 0xF5, "unable to verify connection" },
418 { 0xF6, "devices not in a common zone" },
419};
420
421/**
422 * ibmvfc_get_ls_explain - Return the FC Explain description text
423 * @status: FC Explain status
424 *
425 * Returns:
426 * error string
427 **/
428static const char *ibmvfc_get_ls_explain(u16 status)
429{
430 int i;
431
432 for (i = 0; i < ARRAY_SIZE(ls_explain); i++)
433 if (ls_explain[i].fc_explain == status)
434 return ls_explain[i].name;
435
436 return unknown_fc_explain;
437}
438
439/**
440 * ibmvfc_get_gs_explain - Return the FC Explain description text
441 * @status: FC Explain status
442 *
443 * Returns:
444 * error string
445 **/
446static const char *ibmvfc_get_gs_explain(u16 status)
447{
448 int i;
449
450 for (i = 0; i < ARRAY_SIZE(gs_explain); i++)
451 if (gs_explain[i].fc_explain == status)
452 return gs_explain[i].name;
453
454 return unknown_fc_explain;
455}
456
457static const struct {
458 enum ibmvfc_fc_type fc_type;
459 char *name;
460} fc_type [] = {
461 { IBMVFC_FABRIC_REJECT, "fabric reject" },
462 { IBMVFC_PORT_REJECT, "port reject" },
463 { IBMVFC_LS_REJECT, "ELS reject" },
464 { IBMVFC_FABRIC_BUSY, "fabric busy" },
465 { IBMVFC_PORT_BUSY, "port busy" },
466 { IBMVFC_BASIC_REJECT, "basic reject" },
467};
468
469static const char *unknown_fc_type = "unknown fc type";
470
471/**
472 * ibmvfc_get_fc_type - Return the FC Type description text
473 * @status: FC Type error status
474 *
475 * Returns:
476 * error string
477 **/
478static const char *ibmvfc_get_fc_type(u16 status)
479{
480 int i;
481
482 for (i = 0; i < ARRAY_SIZE(fc_type); i++)
483 if (fc_type[i].fc_type == status)
484 return fc_type[i].name;
485
486 return unknown_fc_type;
487}
488
489/**
490 * ibmvfc_set_tgt_action - Set the next init action for the target
491 * @tgt: ibmvfc target struct
492 * @action: action to perform
493 *
494 * Returns:
495 * 0 if action changed / non-zero if not changed
496 **/
497static int ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
498 enum ibmvfc_target_action action)
499{
500 int rc = -EINVAL;
501
502 switch (tgt->action) {
503 case IBMVFC_TGT_ACTION_LOGOUT_RPORT:
504 if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT ||
505 action == IBMVFC_TGT_ACTION_DEL_RPORT) {
506 tgt->action = action;
507 rc = 0;
508 }
509 break;
510 case IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT:
511 if (action == IBMVFC_TGT_ACTION_DEL_RPORT ||
512 action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
513 tgt->action = action;
514 rc = 0;
515 }
516 break;
517 case IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT:
518 if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
519 tgt->action = action;
520 rc = 0;
521 }
522 break;
523 case IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT:
524 if (action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
525 tgt->action = action;
526 rc = 0;
527 }
528 break;
529 case IBMVFC_TGT_ACTION_DEL_RPORT:
530 if (action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
531 tgt->action = action;
532 rc = 0;
533 }
534 break;
535 case IBMVFC_TGT_ACTION_DELETED_RPORT:
536 break;
537 default:
538 tgt->action = action;
539 rc = 0;
540 break;
541 }
542
543 if (action >= IBMVFC_TGT_ACTION_LOGOUT_RPORT)
544 tgt->add_rport = 0;
545
546 return rc;
547}
548
549/**
550 * ibmvfc_set_host_state - Set the state for the host
551 * @vhost: ibmvfc host struct
552 * @state: state to set host to
553 *
554 * Returns:
555 * 0 if state changed / non-zero if not changed
556 **/
557static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
558 enum ibmvfc_host_state state)
559{
560 int rc = 0;
561
562 switch (vhost->state) {
563 case IBMVFC_HOST_OFFLINE:
564 rc = -EINVAL;
565 break;
566 default:
567 vhost->state = state;
568 break;
569 }
570
571 return rc;
572}
573
574/**
575 * ibmvfc_set_host_action - Set the next init action for the host
576 * @vhost: ibmvfc host struct
577 * @action: action to perform
578 *
579 **/
580static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
581 enum ibmvfc_host_action action)
582{
583 switch (action) {
584 case IBMVFC_HOST_ACTION_ALLOC_TGTS:
585 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
586 vhost->action = action;
587 break;
588 case IBMVFC_HOST_ACTION_LOGO_WAIT:
589 if (vhost->action == IBMVFC_HOST_ACTION_LOGO)
590 vhost->action = action;
591 break;
592 case IBMVFC_HOST_ACTION_INIT_WAIT:
593 if (vhost->action == IBMVFC_HOST_ACTION_INIT)
594 vhost->action = action;
595 break;
596 case IBMVFC_HOST_ACTION_QUERY:
597 switch (vhost->action) {
598 case IBMVFC_HOST_ACTION_INIT_WAIT:
599 case IBMVFC_HOST_ACTION_NONE:
600 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
601 vhost->action = action;
602 break;
603 default:
604 break;
605 }
606 break;
607 case IBMVFC_HOST_ACTION_TGT_INIT:
608 if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
609 vhost->action = action;
610 break;
611 case IBMVFC_HOST_ACTION_REENABLE:
612 case IBMVFC_HOST_ACTION_RESET:
613 vhost->action = action;
614 break;
615 case IBMVFC_HOST_ACTION_INIT:
616 case IBMVFC_HOST_ACTION_TGT_DEL:
617 case IBMVFC_HOST_ACTION_LOGO:
618 case IBMVFC_HOST_ACTION_QUERY_TGTS:
619 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
620 case IBMVFC_HOST_ACTION_NONE:
621 default:
622 switch (vhost->action) {
623 case IBMVFC_HOST_ACTION_RESET:
624 case IBMVFC_HOST_ACTION_REENABLE:
625 break;
626 default:
627 vhost->action = action;
628 break;
629 }
630 break;
631 }
632}
633
634/**
635 * ibmvfc_reinit_host - Re-start host initialization (no NPIV Login)
636 * @vhost: ibmvfc host struct
637 *
638 * Return value:
639 * nothing
640 **/
641static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
642{
643 if (vhost->action == IBMVFC_HOST_ACTION_NONE &&
644 vhost->state == IBMVFC_ACTIVE) {
645 if (!ibmvfc_set_host_state(vhost, state: IBMVFC_INITIALIZING)) {
646 scsi_block_requests(vhost->host);
647 ibmvfc_set_host_action(vhost, action: IBMVFC_HOST_ACTION_QUERY);
648 }
649 } else
650 vhost->reinit = 1;
651
652 wake_up(&vhost->work_wait_q);
653}
654
655/**
656 * ibmvfc_del_tgt - Schedule cleanup and removal of the target
657 * @tgt: ibmvfc target struct
658 **/
659static void ibmvfc_del_tgt(struct ibmvfc_target *tgt)
660{
661 if (!ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_LOGOUT_RPORT)) {
662 tgt->job_step = ibmvfc_tgt_implicit_logout_and_del;
663 tgt->init_retries = 0;
664 }
665 wake_up(&tgt->vhost->work_wait_q);
666}
667
668/**
669 * ibmvfc_link_down - Handle a link down event from the adapter
670 * @vhost: ibmvfc host struct
671 * @state: ibmvfc host state to enter
672 *
673 **/
674static void ibmvfc_link_down(struct ibmvfc_host *vhost,
675 enum ibmvfc_host_state state)
676{
677 struct ibmvfc_target *tgt;
678
679 ENTER;
680 scsi_block_requests(vhost->host);
681 list_for_each_entry(tgt, &vhost->targets, queue)
682 ibmvfc_del_tgt(tgt);
683 ibmvfc_set_host_state(vhost, state);
684 ibmvfc_set_host_action(vhost, action: IBMVFC_HOST_ACTION_TGT_DEL);
685 vhost->events_to_log |= IBMVFC_AE_LINKDOWN;
686 wake_up(&vhost->work_wait_q);
687 LEAVE;
688}
689
690/**
691 * ibmvfc_init_host - Start host initialization
692 * @vhost: ibmvfc host struct
693 *
694 * Return value:
695 * nothing
696 **/
697static void ibmvfc_init_host(struct ibmvfc_host *vhost)
698{
699 struct ibmvfc_target *tgt;
700
701 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
702 if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
703 dev_err(vhost->dev,
704 "Host initialization retries exceeded. Taking adapter offline\n");
705 ibmvfc_link_down(vhost, state: IBMVFC_HOST_OFFLINE);
706 return;
707 }
708 }
709
710 if (!ibmvfc_set_host_state(vhost, state: IBMVFC_INITIALIZING)) {
711 memset(vhost->async_crq.msgs.async, 0, PAGE_SIZE);
712 vhost->async_crq.cur = 0;
713
714 list_for_each_entry(tgt, &vhost->targets, queue) {
715 if (vhost->client_migrated)
716 tgt->need_login = 1;
717 else
718 ibmvfc_del_tgt(tgt);
719 }
720
721 scsi_block_requests(vhost->host);
722 ibmvfc_set_host_action(vhost, action: IBMVFC_HOST_ACTION_INIT);
723 vhost->job_step = ibmvfc_npiv_login;
724 wake_up(&vhost->work_wait_q);
725 }
726}
727
728/**
729 * ibmvfc_send_crq - Send a CRQ
730 * @vhost: ibmvfc host struct
731 * @word1: the first 64 bits of the data
732 * @word2: the second 64 bits of the data
733 *
734 * Return value:
735 * 0 on success / other on failure
736 **/
737static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
738{
739 struct vio_dev *vdev = to_vio_dev(vhost->dev);
740 return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
741}
742
743static int ibmvfc_send_sub_crq(struct ibmvfc_host *vhost, u64 cookie, u64 word1,
744 u64 word2, u64 word3, u64 word4)
745{
746 struct vio_dev *vdev = to_vio_dev(vhost->dev);
747
748 return plpar_hcall_norets(H_SEND_SUB_CRQ, vdev->unit_address, cookie,
749 word1, word2, word3, word4);
750}
751
752/**
753 * ibmvfc_send_crq_init - Send a CRQ init message
754 * @vhost: ibmvfc host struct
755 *
756 * Return value:
757 * 0 on success / other on failure
758 **/
759static int ibmvfc_send_crq_init(struct ibmvfc_host *vhost)
760{
761 ibmvfc_dbg(vhost, "Sending CRQ init\n");
762 return ibmvfc_send_crq(vhost, word1: 0xC001000000000000LL, word2: 0);
763}
764
765/**
766 * ibmvfc_send_crq_init_complete - Send a CRQ init complete message
767 * @vhost: ibmvfc host struct
768 *
769 * Return value:
770 * 0 on success / other on failure
771 **/
772static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
773{
774 ibmvfc_dbg(vhost, "Sending CRQ init complete\n");
775 return ibmvfc_send_crq(vhost, word1: 0xC002000000000000LL, word2: 0);
776}
777
778/**
779 * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
780 * @vhost: ibmvfc host who owns the event pool
781 * @queue: ibmvfc queue struct
782 *
783 * Returns zero on success.
784 **/
785static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
786 struct ibmvfc_queue *queue)
787{
788 int i;
789 struct ibmvfc_event_pool *pool = &queue->evt_pool;
790
791 ENTER;
792 if (!queue->total_depth)
793 return 0;
794
795 pool->size = queue->total_depth;
796 pool->events = kcalloc(n: pool->size, size: sizeof(*pool->events), GFP_KERNEL);
797 if (!pool->events)
798 return -ENOMEM;
799
800 pool->iu_storage = dma_alloc_coherent(dev: vhost->dev,
801 size: pool->size * sizeof(*pool->iu_storage),
802 dma_handle: &pool->iu_token, gfp: 0);
803
804 if (!pool->iu_storage) {
805 kfree(objp: pool->events);
806 return -ENOMEM;
807 }
808
809 INIT_LIST_HEAD(list: &queue->sent);
810 INIT_LIST_HEAD(list: &queue->free);
811 queue->evt_free = queue->evt_depth;
812 queue->reserved_free = queue->reserved_depth;
813 spin_lock_init(&queue->l_lock);
814
815 for (i = 0; i < pool->size; ++i) {
816 struct ibmvfc_event *evt = &pool->events[i];
817
818 /*
819 * evt->active states
820 * 1 = in flight
821 * 0 = being completed
822 * -1 = free/freed
823 */
824 atomic_set(v: &evt->active, i: -1);
825 atomic_set(v: &evt->free, i: 1);
826 evt->crq.valid = 0x80;
827 evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
828 evt->xfer_iu = pool->iu_storage + i;
829 evt->vhost = vhost;
830 evt->queue = queue;
831 evt->ext_list = NULL;
832 list_add_tail(new: &evt->queue_list, head: &queue->free);
833 }
834
835 LEAVE;
836 return 0;
837}
838
839/**
840 * ibmvfc_free_event_pool - Frees memory of the event pool of a host
841 * @vhost: ibmvfc host who owns the event pool
842 * @queue: ibmvfc queue struct
843 *
844 **/
845static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost,
846 struct ibmvfc_queue *queue)
847{
848 int i;
849 struct ibmvfc_event_pool *pool = &queue->evt_pool;
850
851 ENTER;
852 for (i = 0; i < pool->size; ++i) {
853 list_del(entry: &pool->events[i].queue_list);
854 BUG_ON(atomic_read(&pool->events[i].free) != 1);
855 if (pool->events[i].ext_list)
856 dma_pool_free(pool: vhost->sg_pool,
857 vaddr: pool->events[i].ext_list,
858 addr: pool->events[i].ext_list_token);
859 }
860
861 kfree(objp: pool->events);
862 dma_free_coherent(dev: vhost->dev,
863 size: pool->size * sizeof(*pool->iu_storage),
864 cpu_addr: pool->iu_storage, dma_handle: pool->iu_token);
865 LEAVE;
866}
867
868/**
869 * ibmvfc_free_queue - Deallocate queue
870 * @vhost: ibmvfc host struct
871 * @queue: ibmvfc queue struct
872 *
873 * Unmaps dma and deallocates page for messages
874 **/
875static void ibmvfc_free_queue(struct ibmvfc_host *vhost,
876 struct ibmvfc_queue *queue)
877{
878 struct device *dev = vhost->dev;
879
880 dma_unmap_single(dev, queue->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
881 free_page((unsigned long)queue->msgs.handle);
882 queue->msgs.handle = NULL;
883
884 ibmvfc_free_event_pool(vhost, queue);
885}
886
887/**
888 * ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ
889 * @vhost: ibmvfc host struct
890 *
891 * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
892 * the crq with the hypervisor.
893 **/
894static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
895{
896 long rc = 0;
897 struct vio_dev *vdev = to_vio_dev(vhost->dev);
898 struct ibmvfc_queue *crq = &vhost->crq;
899
900 ibmvfc_dbg(vhost, "Releasing CRQ\n");
901 free_irq(vdev->irq, vhost);
902 tasklet_kill(t: &vhost->tasklet);
903 do {
904 if (rc)
905 msleep(100);
906 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
907 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
908
909 vhost->state = IBMVFC_NO_CRQ;
910 vhost->logged_in = 0;
911
912 ibmvfc_free_queue(vhost, queue: crq);
913}
914
915/**
916 * ibmvfc_reenable_crq_queue - reenables the CRQ
917 * @vhost: ibmvfc host struct
918 *
919 * Return value:
920 * 0 on success / other on failure
921 **/
922static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
923{
924 int rc = 0;
925 struct vio_dev *vdev = to_vio_dev(vhost->dev);
926 unsigned long flags;
927
928 ibmvfc_dereg_sub_crqs(vhost, &vhost->scsi_scrqs);
929
930 /* Re-enable the CRQ */
931 do {
932 if (rc)
933 msleep(100);
934 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
935 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
936
937 if (rc)
938 dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc);
939
940 spin_lock_irqsave(vhost->host->host_lock, flags);
941 spin_lock(lock: vhost->crq.q_lock);
942 vhost->do_enquiry = 1;
943 vhost->using_channels = 0;
944 spin_unlock(lock: vhost->crq.q_lock);
945 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
946
947 ibmvfc_reg_sub_crqs(vhost, &vhost->scsi_scrqs);
948
949 return rc;
950}
951
952/**
953 * ibmvfc_reset_crq - resets a crq after a failure
954 * @vhost: ibmvfc host struct
955 *
956 * Return value:
957 * 0 on success / other on failure
958 **/
959static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
960{
961 int rc = 0;
962 unsigned long flags;
963 struct vio_dev *vdev = to_vio_dev(vhost->dev);
964 struct ibmvfc_queue *crq = &vhost->crq;
965
966 ibmvfc_dereg_sub_crqs(vhost, &vhost->scsi_scrqs);
967
968 /* Close the CRQ */
969 do {
970 if (rc)
971 msleep(100);
972 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
973 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
974
975 spin_lock_irqsave(vhost->host->host_lock, flags);
976 spin_lock(lock: vhost->crq.q_lock);
977 vhost->state = IBMVFC_NO_CRQ;
978 vhost->logged_in = 0;
979 vhost->do_enquiry = 1;
980 vhost->using_channels = 0;
981
982 /* Clean out the queue */
983 memset(crq->msgs.crq, 0, PAGE_SIZE);
984 crq->cur = 0;
985
986 /* And re-open it again */
987 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
988 crq->msg_token, PAGE_SIZE);
989
990 if (rc == H_CLOSED)
991 /* Adapter is good, but other end is not ready */
992 dev_warn(vhost->dev, "Partner adapter not ready\n");
993 else if (rc != 0)
994 dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
995
996 spin_unlock(lock: vhost->crq.q_lock);
997 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
998
999 ibmvfc_reg_sub_crqs(vhost, &vhost->scsi_scrqs);
1000
1001 return rc;
1002}
1003
1004/**
1005 * ibmvfc_valid_event - Determines if event is valid.
1006 * @pool: event_pool that contains the event
1007 * @evt: ibmvfc event to be checked for validity
1008 *
1009 * Return value:
1010 * 1 if event is valid / 0 if event is not valid
1011 **/
1012static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
1013 struct ibmvfc_event *evt)
1014{
1015 int index = evt - pool->events;
1016 if (index < 0 || index >= pool->size) /* outside of bounds */
1017 return 0;
1018 if (evt != pool->events + index) /* unaligned */
1019 return 0;
1020 return 1;
1021}
1022
1023/**
1024 * ibmvfc_free_event - Free the specified event
1025 * @evt: ibmvfc_event to be freed
1026 *
1027 **/
1028static void ibmvfc_free_event(struct ibmvfc_event *evt)
1029{
1030 struct ibmvfc_event_pool *pool = &evt->queue->evt_pool;
1031 unsigned long flags;
1032
1033 BUG_ON(!ibmvfc_valid_event(pool, evt));
1034 BUG_ON(atomic_inc_return(&evt->free) != 1);
1035 BUG_ON(atomic_dec_and_test(&evt->active));
1036
1037 spin_lock_irqsave(&evt->queue->l_lock, flags);
1038 list_add_tail(new: &evt->queue_list, head: &evt->queue->free);
1039 if (evt->reserved) {
1040 evt->reserved = 0;
1041 evt->queue->reserved_free++;
1042 } else {
1043 evt->queue->evt_free++;
1044 }
1045 if (evt->eh_comp)
1046 complete(evt->eh_comp);
1047 spin_unlock_irqrestore(lock: &evt->queue->l_lock, flags);
1048}
1049
1050/**
1051 * ibmvfc_scsi_eh_done - EH done function for queuecommand commands
1052 * @evt: ibmvfc event struct
1053 *
1054 * This function does not setup any error status, that must be done
1055 * before this function gets called.
1056 **/
1057static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
1058{
1059 struct scsi_cmnd *cmnd = evt->cmnd;
1060
1061 if (cmnd) {
1062 scsi_dma_unmap(cmd: cmnd);
1063 scsi_done(cmd: cmnd);
1064 }
1065
1066 ibmvfc_free_event(evt);
1067}
1068
1069/**
1070 * ibmvfc_complete_purge - Complete failed command list
1071 * @purge_list: list head of failed commands
1072 *
1073 * This function runs completions on commands to fail as a result of a
1074 * host reset or platform migration.
1075 **/
1076static void ibmvfc_complete_purge(struct list_head *purge_list)
1077{
1078 struct ibmvfc_event *evt, *pos;
1079
1080 list_for_each_entry_safe(evt, pos, purge_list, queue_list) {
1081 list_del(entry: &evt->queue_list);
1082 ibmvfc_trc_end(evt);
1083 evt->done(evt);
1084 }
1085}
1086
1087/**
1088 * ibmvfc_fail_request - Fail request with specified error code
1089 * @evt: ibmvfc event struct
1090 * @error_code: error code to fail request with
1091 *
1092 * Return value:
1093 * none
1094 **/
1095static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
1096{
1097 /*
1098 * Anything we are failing should still be active. Otherwise, it
1099 * implies we already got a response for the command and are doing
1100 * something bad like double completing it.
1101 */
1102 BUG_ON(!atomic_dec_and_test(&evt->active));
1103 if (evt->cmnd) {
1104 evt->cmnd->result = (error_code << 16);
1105 evt->done = ibmvfc_scsi_eh_done;
1106 } else
1107 evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED);
1108
1109 del_timer(timer: &evt->timer);
1110}
1111
1112/**
1113 * ibmvfc_purge_requests - Our virtual adapter just shut down. Purge any sent requests
1114 * @vhost: ibmvfc host struct
1115 * @error_code: error code to fail requests with
1116 *
1117 * Return value:
1118 * none
1119 **/
1120static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
1121{
1122 struct ibmvfc_event *evt, *pos;
1123 struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs;
1124 unsigned long flags;
1125 int hwqs = 0;
1126 int i;
1127
1128 if (vhost->using_channels)
1129 hwqs = vhost->scsi_scrqs.active_queues;
1130
1131 ibmvfc_dbg(vhost, "Purging all requests\n");
1132 spin_lock_irqsave(&vhost->crq.l_lock, flags);
1133 list_for_each_entry_safe(evt, pos, &vhost->crq.sent, queue_list)
1134 ibmvfc_fail_request(evt, error_code);
1135 list_splice_init(list: &vhost->crq.sent, head: &vhost->purge);
1136 spin_unlock_irqrestore(lock: &vhost->crq.l_lock, flags);
1137
1138 for (i = 0; i < hwqs; i++) {
1139 spin_lock_irqsave(queues[i].q_lock, flags);
1140 spin_lock(lock: &queues[i].l_lock);
1141 list_for_each_entry_safe(evt, pos, &queues[i].sent, queue_list)
1142 ibmvfc_fail_request(evt, error_code);
1143 list_splice_init(list: &queues[i].sent, head: &vhost->purge);
1144 spin_unlock(lock: &queues[i].l_lock);
1145 spin_unlock_irqrestore(lock: queues[i].q_lock, flags);
1146 }
1147}
1148
1149/**
1150 * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ
1151 * @vhost: struct ibmvfc host to reset
1152 **/
1153static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost)
1154{
1155 ibmvfc_purge_requests(vhost, error_code: DID_ERROR);
1156 ibmvfc_link_down(vhost, state: IBMVFC_LINK_DOWN);
1157 ibmvfc_set_host_action(vhost, action: IBMVFC_HOST_ACTION_RESET);
1158}
1159
1160/**
1161 * __ibmvfc_reset_host - Reset the connection to the server (no locking)
1162 * @vhost: struct ibmvfc host to reset
1163 **/
1164static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
1165{
1166 if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT &&
1167 !ibmvfc_set_host_state(vhost, state: IBMVFC_INITIALIZING)) {
1168 scsi_block_requests(vhost->host);
1169 ibmvfc_set_host_action(vhost, action: IBMVFC_HOST_ACTION_LOGO);
1170 vhost->job_step = ibmvfc_npiv_logout;
1171 wake_up(&vhost->work_wait_q);
1172 } else
1173 ibmvfc_hard_reset_host(vhost);
1174}
1175
1176/**
1177 * ibmvfc_reset_host - Reset the connection to the server
1178 * @vhost: ibmvfc host struct
1179 **/
1180static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
1181{
1182 unsigned long flags;
1183
1184 spin_lock_irqsave(vhost->host->host_lock, flags);
1185 __ibmvfc_reset_host(vhost);
1186 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
1187}
1188
1189/**
1190 * ibmvfc_retry_host_init - Retry host initialization if allowed
1191 * @vhost: ibmvfc host struct
1192 *
1193 * Returns: 1 if init will be retried / 0 if not
1194 *
1195 **/
1196static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
1197{
1198 int retry = 0;
1199
1200 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
1201 vhost->delay_init = 1;
1202 if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
1203 dev_err(vhost->dev,
1204 "Host initialization retries exceeded. Taking adapter offline\n");
1205 ibmvfc_link_down(vhost, state: IBMVFC_HOST_OFFLINE);
1206 } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
1207 __ibmvfc_reset_host(vhost);
1208 else {
1209 ibmvfc_set_host_action(vhost, action: IBMVFC_HOST_ACTION_INIT);
1210 retry = 1;
1211 }
1212 }
1213
1214 wake_up(&vhost->work_wait_q);
1215 return retry;
1216}
1217
1218/**
1219 * __ibmvfc_get_target - Find the specified scsi_target (no locking)
1220 * @starget: scsi target struct
1221 *
1222 * Return value:
1223 * ibmvfc_target struct / NULL if not found
1224 **/
1225static struct ibmvfc_target *__ibmvfc_get_target(struct scsi_target *starget)
1226{
1227 struct Scsi_Host *shost = dev_to_shost(dev: starget->dev.parent);
1228 struct ibmvfc_host *vhost = shost_priv(shost);
1229 struct ibmvfc_target *tgt;
1230
1231 list_for_each_entry(tgt, &vhost->targets, queue)
1232 if (tgt->target_id == starget->id) {
1233 kref_get(kref: &tgt->kref);
1234 return tgt;
1235 }
1236 return NULL;
1237}
1238
1239/**
1240 * ibmvfc_get_target - Find the specified scsi_target
1241 * @starget: scsi target struct
1242 *
1243 * Return value:
1244 * ibmvfc_target struct / NULL if not found
1245 **/
1246static struct ibmvfc_target *ibmvfc_get_target(struct scsi_target *starget)
1247{
1248 struct Scsi_Host *shost = dev_to_shost(dev: starget->dev.parent);
1249 struct ibmvfc_target *tgt;
1250 unsigned long flags;
1251
1252 spin_lock_irqsave(shost->host_lock, flags);
1253 tgt = __ibmvfc_get_target(starget);
1254 spin_unlock_irqrestore(lock: shost->host_lock, flags);
1255 return tgt;
1256}
1257
1258/**
1259 * ibmvfc_get_host_speed - Get host port speed
1260 * @shost: scsi host struct
1261 *
1262 * Return value:
1263 * none
1264 **/
1265static void ibmvfc_get_host_speed(struct Scsi_Host *shost)
1266{
1267 struct ibmvfc_host *vhost = shost_priv(shost);
1268 unsigned long flags;
1269
1270 spin_lock_irqsave(shost->host_lock, flags);
1271 if (vhost->state == IBMVFC_ACTIVE) {
1272 switch (be64_to_cpu(vhost->login_buf->resp.link_speed) / 100) {
1273 case 1:
1274 fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
1275 break;
1276 case 2:
1277 fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
1278 break;
1279 case 4:
1280 fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
1281 break;
1282 case 8:
1283 fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
1284 break;
1285 case 10:
1286 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
1287 break;
1288 case 16:
1289 fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
1290 break;
1291 default:
1292 ibmvfc_log(vhost, 3, "Unknown port speed: %lld Gbit\n",
1293 be64_to_cpu(vhost->login_buf->resp.link_speed) / 100);
1294 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
1295 break;
1296 }
1297 } else
1298 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
1299 spin_unlock_irqrestore(lock: shost->host_lock, flags);
1300}
1301
1302/**
1303 * ibmvfc_get_host_port_state - Get host port state
1304 * @shost: scsi host struct
1305 *
1306 * Return value:
1307 * none
1308 **/
1309static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
1310{
1311 struct ibmvfc_host *vhost = shost_priv(shost);
1312 unsigned long flags;
1313
1314 spin_lock_irqsave(shost->host_lock, flags);
1315 switch (vhost->state) {
1316 case IBMVFC_INITIALIZING:
1317 case IBMVFC_ACTIVE:
1318 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
1319 break;
1320 case IBMVFC_LINK_DOWN:
1321 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
1322 break;
1323 case IBMVFC_LINK_DEAD:
1324 case IBMVFC_HOST_OFFLINE:
1325 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1326 break;
1327 case IBMVFC_HALTED:
1328 fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
1329 break;
1330 case IBMVFC_NO_CRQ:
1331 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1332 break;
1333 default:
1334 ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
1335 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1336 break;
1337 }
1338 spin_unlock_irqrestore(lock: shost->host_lock, flags);
1339}
1340
1341/**
1342 * ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout
1343 * @rport: rport struct
1344 * @timeout: timeout value
1345 *
1346 * Return value:
1347 * none
1348 **/
1349static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
1350{
1351 if (timeout)
1352 rport->dev_loss_tmo = timeout;
1353 else
1354 rport->dev_loss_tmo = 1;
1355}
1356
1357/**
1358 * ibmvfc_release_tgt - Free memory allocated for a target
1359 * @kref: kref struct
1360 *
1361 **/
1362static void ibmvfc_release_tgt(struct kref *kref)
1363{
1364 struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
1365 kfree(objp: tgt);
1366}
1367
1368/**
1369 * ibmvfc_get_starget_node_name - Get SCSI target's node name
1370 * @starget: scsi target struct
1371 *
1372 * Return value:
1373 * none
1374 **/
1375static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
1376{
1377 struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1378 fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
1379 if (tgt)
1380 kref_put(kref: &tgt->kref, release: ibmvfc_release_tgt);
1381}
1382
1383/**
1384 * ibmvfc_get_starget_port_name - Get SCSI target's port name
1385 * @starget: scsi target struct
1386 *
1387 * Return value:
1388 * none
1389 **/
1390static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
1391{
1392 struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1393 fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
1394 if (tgt)
1395 kref_put(kref: &tgt->kref, release: ibmvfc_release_tgt);
1396}
1397
1398/**
1399 * ibmvfc_get_starget_port_id - Get SCSI target's port ID
1400 * @starget: scsi target struct
1401 *
1402 * Return value:
1403 * none
1404 **/
1405static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
1406{
1407 struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1408 fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
1409 if (tgt)
1410 kref_put(kref: &tgt->kref, release: ibmvfc_release_tgt);
1411}
1412
1413/**
1414 * ibmvfc_wait_while_resetting - Wait while the host resets
1415 * @vhost: ibmvfc host struct
1416 *
1417 * Return value:
1418 * 0 on success / other on failure
1419 **/
1420static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost)
1421{
1422 long timeout = wait_event_timeout(vhost->init_wait_q,
1423 ((vhost->state == IBMVFC_ACTIVE ||
1424 vhost->state == IBMVFC_HOST_OFFLINE ||
1425 vhost->state == IBMVFC_LINK_DEAD) &&
1426 vhost->action == IBMVFC_HOST_ACTION_NONE),
1427 (init_timeout * HZ));
1428
1429 return timeout ? 0 : -EIO;
1430}
1431
1432/**
1433 * ibmvfc_issue_fc_host_lip - Re-initiate link initialization
1434 * @shost: scsi host struct
1435 *
1436 * Return value:
1437 * 0 on success / other on failure
1438 **/
1439static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
1440{
1441 struct ibmvfc_host *vhost = shost_priv(shost);
1442
1443 dev_err(vhost->dev, "Initiating host LIP. Resetting connection\n");
1444 ibmvfc_reset_host(vhost);
1445 return ibmvfc_wait_while_resetting(vhost);
1446}
1447
1448/**
1449 * ibmvfc_gather_partition_info - Gather info about the LPAR
1450 * @vhost: ibmvfc host struct
1451 *
1452 * Return value:
1453 * none
1454 **/
1455static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
1456{
1457 struct device_node *rootdn;
1458 const char *name;
1459 const unsigned int *num;
1460
1461 rootdn = of_find_node_by_path(path: "/");
1462 if (!rootdn)
1463 return;
1464
1465 name = of_get_property(node: rootdn, name: "ibm,partition-name", NULL);
1466 if (name)
1467 strncpy(p: vhost->partition_name, q: name, size: sizeof(vhost->partition_name));
1468 num = of_get_property(node: rootdn, name: "ibm,partition-no", NULL);
1469 if (num)
1470 vhost->partition_number = *num;
1471 of_node_put(node: rootdn);
1472}
1473
1474/**
1475 * ibmvfc_set_login_info - Setup info for NPIV login
1476 * @vhost: ibmvfc host struct
1477 *
1478 * Return value:
1479 * none
1480 **/
1481static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
1482{
1483 struct ibmvfc_npiv_login *login_info = &vhost->login_info;
1484 struct ibmvfc_queue *async_crq = &vhost->async_crq;
1485 struct device_node *of_node = vhost->dev->of_node;
1486 const char *location;
1487 u16 max_cmds;
1488
1489 max_cmds = scsi_qdepth + IBMVFC_NUM_INTERNAL_REQ;
1490 if (mq_enabled)
1491 max_cmds += (scsi_qdepth + IBMVFC_NUM_INTERNAL_SUBQ_REQ) *
1492 vhost->scsi_scrqs.desired_queues;
1493
1494 memset(login_info, 0, sizeof(*login_info));
1495
1496 login_info->ostype = cpu_to_be32(IBMVFC_OS_LINUX);
1497 login_info->max_dma_len = cpu_to_be64(IBMVFC_MAX_SECTORS << 9);
1498 login_info->max_payload = cpu_to_be32(sizeof(struct ibmvfc_fcp_cmd_iu));
1499 login_info->max_response = cpu_to_be32(sizeof(struct ibmvfc_fcp_rsp));
1500 login_info->partition_num = cpu_to_be32(vhost->partition_number);
1501 login_info->vfc_frame_version = cpu_to_be32(1);
1502 login_info->fcp_version = cpu_to_be16(3);
1503 login_info->flags = cpu_to_be16(IBMVFC_FLUSH_ON_HALT);
1504 if (vhost->client_migrated)
1505 login_info->flags |= cpu_to_be16(IBMVFC_CLIENT_MIGRATED);
1506
1507 login_info->max_cmds = cpu_to_be32(max_cmds);
1508 login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE | IBMVFC_CAN_SEND_VF_WWPN);
1509
1510 if (vhost->mq_enabled || vhost->using_channels)
1511 login_info->capabilities |= cpu_to_be64(IBMVFC_CAN_USE_CHANNELS);
1512
1513 login_info->async.va = cpu_to_be64(vhost->async_crq.msg_token);
1514 login_info->async.len = cpu_to_be32(async_crq->size *
1515 sizeof(*async_crq->msgs.async));
1516 strncpy(p: login_info->partition_name, q: vhost->partition_name, IBMVFC_MAX_NAME);
1517 strncpy(p: login_info->device_name,
1518 q: dev_name(dev: &vhost->host->shost_gendev), IBMVFC_MAX_NAME);
1519
1520 location = of_get_property(node: of_node, name: "ibm,loc-code", NULL);
1521 location = location ? location : dev_name(dev: vhost->dev);
1522 strncpy(p: login_info->drc_name, q: location, IBMVFC_MAX_NAME);
1523}
1524
1525/**
1526 * __ibmvfc_get_event - Gets the next free event in pool
1527 * @queue: ibmvfc queue struct
1528 * @reserved: event is for a reserved management command
1529 *
1530 * Returns a free event from the pool.
1531 **/
1532static struct ibmvfc_event *__ibmvfc_get_event(struct ibmvfc_queue *queue, int reserved)
1533{
1534 struct ibmvfc_event *evt = NULL;
1535 unsigned long flags;
1536
1537 spin_lock_irqsave(&queue->l_lock, flags);
1538 if (reserved && queue->reserved_free) {
1539 evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
1540 evt->reserved = 1;
1541 queue->reserved_free--;
1542 } else if (queue->evt_free) {
1543 evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
1544 queue->evt_free--;
1545 } else {
1546 goto out;
1547 }
1548
1549 atomic_set(v: &evt->free, i: 0);
1550 list_del(entry: &evt->queue_list);
1551out:
1552 spin_unlock_irqrestore(lock: &queue->l_lock, flags);
1553 return evt;
1554}
1555
1556#define ibmvfc_get_event(queue) __ibmvfc_get_event(queue, 0)
1557#define ibmvfc_get_reserved_event(queue) __ibmvfc_get_event(queue, 1)
1558
1559/**
1560 * ibmvfc_locked_done - Calls evt completion with host_lock held
1561 * @evt: ibmvfc evt to complete
1562 *
1563 * All non-scsi command completion callbacks have the expectation that the
1564 * host_lock is held. This callback is used by ibmvfc_init_event to wrap a
1565 * MAD evt with the host_lock.
1566 **/
1567static void ibmvfc_locked_done(struct ibmvfc_event *evt)
1568{
1569 unsigned long flags;
1570
1571 spin_lock_irqsave(evt->vhost->host->host_lock, flags);
1572 evt->_done(evt);
1573 spin_unlock_irqrestore(lock: evt->vhost->host->host_lock, flags);
1574}
1575
1576/**
1577 * ibmvfc_init_event - Initialize fields in an event struct that are always
1578 * required.
1579 * @evt: The event
1580 * @done: Routine to call when the event is responded to
1581 * @format: SRP or MAD format
1582 **/
1583static void ibmvfc_init_event(struct ibmvfc_event *evt,
1584 void (*done) (struct ibmvfc_event *), u8 format)
1585{
1586 evt->cmnd = NULL;
1587 evt->sync_iu = NULL;
1588 evt->eh_comp = NULL;
1589 evt->crq.format = format;
1590 if (format == IBMVFC_CMD_FORMAT)
1591 evt->done = done;
1592 else {
1593 evt->_done = done;
1594 evt->done = ibmvfc_locked_done;
1595 }
1596 evt->hwq = 0;
1597}
1598
1599/**
1600 * ibmvfc_map_sg_list - Initialize scatterlist
1601 * @scmd: scsi command struct
1602 * @nseg: number of scatterlist segments
1603 * @md: memory descriptor list to initialize
1604 **/
1605static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
1606 struct srp_direct_buf *md)
1607{
1608 int i;
1609 struct scatterlist *sg;
1610
1611 scsi_for_each_sg(scmd, sg, nseg, i) {
1612 md[i].va = cpu_to_be64(sg_dma_address(sg));
1613 md[i].len = cpu_to_be32(sg_dma_len(sg));
1614 md[i].key = 0;
1615 }
1616}
1617
1618/**
1619 * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes descriptor fields
1620 * @scmd: struct scsi_cmnd with the scatterlist
1621 * @evt: ibmvfc event struct
1622 * @vfc_cmd: vfc_cmd that contains the memory descriptor
1623 * @dev: device for which to map dma memory
1624 *
1625 * Returns:
1626 * 0 on success / non-zero on failure
1627 **/
1628static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
1629 struct ibmvfc_event *evt,
1630 struct ibmvfc_cmd *vfc_cmd, struct device *dev)
1631{
1632
1633 int sg_mapped;
1634 struct srp_direct_buf *data = &vfc_cmd->ioba;
1635 struct ibmvfc_host *vhost = dev_get_drvdata(dev);
1636 struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost: evt->vhost, vfc_cmd);
1637
1638 if (cls3_error)
1639 vfc_cmd->flags |= cpu_to_be16(IBMVFC_CLASS_3_ERR);
1640
1641 sg_mapped = scsi_dma_map(cmd: scmd);
1642 if (!sg_mapped) {
1643 vfc_cmd->flags |= cpu_to_be16(IBMVFC_NO_MEM_DESC);
1644 return 0;
1645 } else if (unlikely(sg_mapped < 0)) {
1646 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1647 scmd_printk(KERN_ERR, scmd, "Failed to map DMA buffer for command\n");
1648 return sg_mapped;
1649 }
1650
1651 if (scmd->sc_data_direction == DMA_TO_DEVICE) {
1652 vfc_cmd->flags |= cpu_to_be16(IBMVFC_WRITE);
1653 iu->add_cdb_len |= IBMVFC_WRDATA;
1654 } else {
1655 vfc_cmd->flags |= cpu_to_be16(IBMVFC_READ);
1656 iu->add_cdb_len |= IBMVFC_RDDATA;
1657 }
1658
1659 if (sg_mapped == 1) {
1660 ibmvfc_map_sg_list(scmd, nseg: sg_mapped, md: data);
1661 return 0;
1662 }
1663
1664 vfc_cmd->flags |= cpu_to_be16(IBMVFC_SCATTERLIST);
1665
1666 if (!evt->ext_list) {
1667 evt->ext_list = dma_pool_alloc(pool: vhost->sg_pool, GFP_ATOMIC,
1668 handle: &evt->ext_list_token);
1669
1670 if (!evt->ext_list) {
1671 scsi_dma_unmap(cmd: scmd);
1672 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1673 scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n");
1674 return -ENOMEM;
1675 }
1676 }
1677
1678 ibmvfc_map_sg_list(scmd, nseg: sg_mapped, md: evt->ext_list);
1679
1680 data->va = cpu_to_be64(evt->ext_list_token);
1681 data->len = cpu_to_be32(sg_mapped * sizeof(struct srp_direct_buf));
1682 data->key = 0;
1683 return 0;
1684}
1685
1686/**
1687 * ibmvfc_timeout - Internal command timeout handler
1688 * @t: struct ibmvfc_event that timed out
1689 *
1690 * Called when an internally generated command times out
1691 **/
1692static void ibmvfc_timeout(struct timer_list *t)
1693{
1694 struct ibmvfc_event *evt = from_timer(evt, t, timer);
1695 struct ibmvfc_host *vhost = evt->vhost;
1696 dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
1697 ibmvfc_reset_host(vhost);
1698}
1699
1700/**
1701 * ibmvfc_send_event - Transforms event to u64 array and calls send_crq()
1702 * @evt: event to be sent
1703 * @vhost: ibmvfc host struct
1704 * @timeout: timeout in seconds - 0 means do not time command
1705 *
1706 * Returns the value returned from ibmvfc_send_crq(). (Zero for success)
1707 **/
1708static int ibmvfc_send_event(struct ibmvfc_event *evt,
1709 struct ibmvfc_host *vhost, unsigned long timeout)
1710{
1711 __be64 *crq_as_u64 = (__be64 *) &evt->crq;
1712 unsigned long flags;
1713 int rc;
1714
1715 /* Copy the IU into the transfer area */
1716 *evt->xfer_iu = evt->iu;
1717 if (evt->crq.format == IBMVFC_CMD_FORMAT)
1718 evt->xfer_iu->cmd.tag = cpu_to_be64((u64)evt);
1719 else if (evt->crq.format == IBMVFC_MAD_FORMAT)
1720 evt->xfer_iu->mad_common.tag = cpu_to_be64((u64)evt);
1721 else
1722 BUG();
1723
1724 timer_setup(&evt->timer, ibmvfc_timeout, 0);
1725
1726 if (timeout) {
1727 evt->timer.expires = jiffies + (timeout * HZ);
1728 add_timer(timer: &evt->timer);
1729 }
1730
1731 spin_lock_irqsave(&evt->queue->l_lock, flags);
1732 list_add_tail(new: &evt->queue_list, head: &evt->queue->sent);
1733 atomic_set(v: &evt->active, i: 1);
1734
1735 mb();
1736
1737 if (evt->queue->fmt == IBMVFC_SUB_CRQ_FMT)
1738 rc = ibmvfc_send_sub_crq(vhost,
1739 cookie: evt->queue->vios_cookie,
1740 be64_to_cpu(crq_as_u64[0]),
1741 be64_to_cpu(crq_as_u64[1]),
1742 word3: 0, word4: 0);
1743 else
1744 rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
1745 be64_to_cpu(crq_as_u64[1]));
1746
1747 if (rc) {
1748 atomic_set(v: &evt->active, i: 0);
1749 list_del(entry: &evt->queue_list);
1750 spin_unlock_irqrestore(lock: &evt->queue->l_lock, flags);
1751 del_timer(timer: &evt->timer);
1752
1753 /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
1754 * Firmware will send a CRQ with a transport event (0xFF) to
1755 * tell this client what has happened to the transport. This
1756 * will be handled in ibmvfc_handle_crq()
1757 */
1758 if (rc == H_CLOSED) {
1759 if (printk_ratelimit())
1760 dev_warn(vhost->dev, "Send warning. Receive queue closed, will retry.\n");
1761 if (evt->cmnd)
1762 scsi_dma_unmap(cmd: evt->cmnd);
1763 ibmvfc_free_event(evt);
1764 return SCSI_MLQUEUE_HOST_BUSY;
1765 }
1766
1767 dev_err(vhost->dev, "Send error (rc=%d)\n", rc);
1768 if (evt->cmnd) {
1769 evt->cmnd->result = DID_ERROR << 16;
1770 evt->done = ibmvfc_scsi_eh_done;
1771 } else
1772 evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_CRQ_ERROR);
1773
1774 evt->done(evt);
1775 } else {
1776 spin_unlock_irqrestore(lock: &evt->queue->l_lock, flags);
1777 ibmvfc_trc_start(evt);
1778 }
1779
1780 return 0;
1781}
1782
1783/**
1784 * ibmvfc_log_error - Log an error for the failed command if appropriate
1785 * @evt: ibmvfc event to log
1786 *
1787 **/
1788static void ibmvfc_log_error(struct ibmvfc_event *evt)
1789{
1790 struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1791 struct ibmvfc_host *vhost = evt->vhost;
1792 struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
1793 struct scsi_cmnd *cmnd = evt->cmnd;
1794 const char *err = unknown_error;
1795 int index = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
1796 int logerr = 0;
1797 int rsp_code = 0;
1798
1799 if (index >= 0) {
1800 logerr = cmd_status[index].log;
1801 err = cmd_status[index].name;
1802 }
1803
1804 if (!logerr && (vhost->log_level <= (IBMVFC_DEFAULT_LOG_LEVEL + 1)))
1805 return;
1806
1807 if (rsp->flags & FCP_RSP_LEN_VALID)
1808 rsp_code = rsp->data.info.rsp_code;
1809
1810 scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) "
1811 "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
1812 cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error),
1813 rsp->flags, rsp_code, scsi_get_resid(cmd: cmnd), rsp->scsi_status);
1814}
1815
1816/**
1817 * ibmvfc_relogin - Log back into the specified device
1818 * @sdev: scsi device struct
1819 *
1820 **/
1821static void ibmvfc_relogin(struct scsi_device *sdev)
1822{
1823 struct ibmvfc_host *vhost = shost_priv(shost: sdev->host);
1824 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1825 struct ibmvfc_target *tgt;
1826 unsigned long flags;
1827
1828 spin_lock_irqsave(vhost->host->host_lock, flags);
1829 list_for_each_entry(tgt, &vhost->targets, queue) {
1830 if (rport == tgt->rport) {
1831 ibmvfc_del_tgt(tgt);
1832 break;
1833 }
1834 }
1835
1836 ibmvfc_reinit_host(vhost);
1837 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
1838}
1839
1840/**
1841 * ibmvfc_scsi_done - Handle responses from commands
1842 * @evt: ibmvfc event to be handled
1843 *
1844 * Used as a callback when sending scsi cmds.
1845 **/
1846static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1847{
1848 struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1849 struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost: evt->vhost, vfc_cmd);
1850 struct scsi_cmnd *cmnd = evt->cmnd;
1851 u32 rsp_len = 0;
1852 u32 sense_len = be32_to_cpu(rsp->fcp_sense_len);
1853
1854 if (cmnd) {
1855 if (be16_to_cpu(vfc_cmd->response_flags) & IBMVFC_ADAPTER_RESID_VALID)
1856 scsi_set_resid(cmd: cmnd, be32_to_cpu(vfc_cmd->adapter_resid));
1857 else if (rsp->flags & FCP_RESID_UNDER)
1858 scsi_set_resid(cmd: cmnd, be32_to_cpu(rsp->fcp_resid));
1859 else
1860 scsi_set_resid(cmd: cmnd, resid: 0);
1861
1862 if (vfc_cmd->status) {
1863 cmnd->result = ibmvfc_get_err_result(vhost: evt->vhost, vfc_cmd);
1864
1865 if (rsp->flags & FCP_RSP_LEN_VALID)
1866 rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
1867 if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE)
1868 sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
1869 if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
1870 memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
1871 if ((be16_to_cpu(vfc_cmd->status) & IBMVFC_VIOS_FAILURE) &&
1872 (be16_to_cpu(vfc_cmd->error) == IBMVFC_PLOGI_REQUIRED))
1873 ibmvfc_relogin(sdev: cmnd->device);
1874
1875 if (!cmnd->result && (!scsi_get_resid(cmd: cmnd) || (rsp->flags & FCP_RESID_OVER)))
1876 cmnd->result = (DID_ERROR << 16);
1877
1878 ibmvfc_log_error(evt);
1879 }
1880
1881 if (!cmnd->result &&
1882 (scsi_bufflen(cmd: cmnd) - scsi_get_resid(cmd: cmnd) < cmnd->underflow))
1883 cmnd->result = (DID_ERROR << 16);
1884
1885 scsi_dma_unmap(cmd: cmnd);
1886 scsi_done(cmd: cmnd);
1887 }
1888
1889 ibmvfc_free_event(evt);
1890}
1891
1892/**
1893 * ibmvfc_host_chkready - Check if the host can accept commands
1894 * @vhost: struct ibmvfc host
1895 *
1896 * Returns:
1897 * 1 if host can accept command / 0 if not
1898 **/
1899static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
1900{
1901 int result = 0;
1902
1903 switch (vhost->state) {
1904 case IBMVFC_LINK_DEAD:
1905 case IBMVFC_HOST_OFFLINE:
1906 result = DID_NO_CONNECT << 16;
1907 break;
1908 case IBMVFC_NO_CRQ:
1909 case IBMVFC_INITIALIZING:
1910 case IBMVFC_HALTED:
1911 case IBMVFC_LINK_DOWN:
1912 result = DID_REQUEUE << 16;
1913 break;
1914 case IBMVFC_ACTIVE:
1915 result = 0;
1916 break;
1917 }
1918
1919 return result;
1920}
1921
1922static struct ibmvfc_cmd *ibmvfc_init_vfc_cmd(struct ibmvfc_event *evt, struct scsi_device *sdev)
1923{
1924 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1925 struct ibmvfc_host *vhost = evt->vhost;
1926 struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
1927 struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
1928 struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
1929 size_t offset;
1930
1931 memset(vfc_cmd, 0, sizeof(*vfc_cmd));
1932 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
1933 offset = offsetof(struct ibmvfc_cmd, v2.rsp);
1934 vfc_cmd->target_wwpn = cpu_to_be64(rport->port_name);
1935 } else
1936 offset = offsetof(struct ibmvfc_cmd, v1.rsp);
1937 vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offset);
1938 vfc_cmd->resp.len = cpu_to_be32(sizeof(*rsp));
1939 vfc_cmd->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
1940 vfc_cmd->payload_len = cpu_to_be32(sizeof(*iu));
1941 vfc_cmd->resp_len = cpu_to_be32(sizeof(*rsp));
1942 vfc_cmd->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
1943 vfc_cmd->tgt_scsi_id = cpu_to_be64(rport->port_id);
1944 int_to_scsilun(sdev->lun, &iu->lun);
1945
1946 return vfc_cmd;
1947}
1948
1949/**
1950 * ibmvfc_queuecommand - The queuecommand function of the scsi template
1951 * @shost: scsi host struct
1952 * @cmnd: struct scsi_cmnd to be executed
1953 *
1954 * Returns:
1955 * 0 on success / other on failure
1956 **/
1957static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
1958{
1959 struct ibmvfc_host *vhost = shost_priv(shost);
1960 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1961 struct ibmvfc_cmd *vfc_cmd;
1962 struct ibmvfc_fcp_cmd_iu *iu;
1963 struct ibmvfc_event *evt;
1964 u32 tag_and_hwq = blk_mq_unique_tag(rq: scsi_cmd_to_rq(scmd: cmnd));
1965 u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag: tag_and_hwq);
1966 u16 scsi_channel;
1967 int rc;
1968
1969 if (unlikely((rc = fc_remote_port_chkready(rport))) ||
1970 unlikely((rc = ibmvfc_host_chkready(vhost)))) {
1971 cmnd->result = rc;
1972 scsi_done(cmd: cmnd);
1973 return 0;
1974 }
1975
1976 cmnd->result = (DID_OK << 16);
1977 if (vhost->using_channels) {
1978 scsi_channel = hwq % vhost->scsi_scrqs.active_queues;
1979 evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[scsi_channel]);
1980 if (!evt)
1981 return SCSI_MLQUEUE_HOST_BUSY;
1982
1983 evt->hwq = hwq % vhost->scsi_scrqs.active_queues;
1984 } else {
1985 evt = ibmvfc_get_event(&vhost->crq);
1986 if (!evt)
1987 return SCSI_MLQUEUE_HOST_BUSY;
1988 }
1989
1990 ibmvfc_init_event(evt, done: ibmvfc_scsi_done, format: IBMVFC_CMD_FORMAT);
1991 evt->cmnd = cmnd;
1992
1993 vfc_cmd = ibmvfc_init_vfc_cmd(evt, sdev: cmnd->device);
1994 iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
1995
1996 iu->xfer_len = cpu_to_be32(scsi_bufflen(cmnd));
1997 memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
1998
1999 if (cmnd->flags & SCMD_TAGGED) {
2000 vfc_cmd->task_tag = cpu_to_be64(scsi_cmd_to_rq(cmnd)->tag);
2001 iu->pri_task_attr = IBMVFC_SIMPLE_TASK;
2002 }
2003
2004 vfc_cmd->correlation = cpu_to_be64((u64)evt);
2005
2006 if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
2007 return ibmvfc_send_event(evt, vhost, timeout: 0);
2008
2009 ibmvfc_free_event(evt);
2010 if (rc == -ENOMEM)
2011 return SCSI_MLQUEUE_HOST_BUSY;
2012
2013 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2014 scmd_printk(KERN_ERR, cmnd,
2015 "Failed to map DMA buffer for command. rc=%d\n", rc);
2016
2017 cmnd->result = DID_ERROR << 16;
2018 scsi_done(cmd: cmnd);
2019 return 0;
2020}
2021
2022/**
2023 * ibmvfc_sync_completion - Signal that a synchronous command has completed
2024 * @evt: ibmvfc event struct
2025 *
2026 **/
2027static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
2028{
2029 /* copy the response back */
2030 if (evt->sync_iu)
2031 *evt->sync_iu = *evt->xfer_iu;
2032
2033 complete(&evt->comp);
2034}
2035
2036/**
2037 * ibmvfc_bsg_timeout_done - Completion handler for cancelling BSG commands
2038 * @evt: struct ibmvfc_event
2039 *
2040 **/
2041static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt)
2042{
2043 struct ibmvfc_host *vhost = evt->vhost;
2044
2045 ibmvfc_free_event(evt);
2046 vhost->aborting_passthru = 0;
2047 dev_info(vhost->dev, "Passthru command cancelled\n");
2048}
2049
2050/**
2051 * ibmvfc_bsg_timeout - Handle a BSG timeout
2052 * @job: struct bsg_job that timed out
2053 *
2054 * Returns:
2055 * 0 on success / other on failure
2056 **/
2057static int ibmvfc_bsg_timeout(struct bsg_job *job)
2058{
2059 struct ibmvfc_host *vhost = shost_priv(shost: fc_bsg_to_shost(job));
2060 unsigned long port_id = (unsigned long)job->dd_data;
2061 struct ibmvfc_event *evt;
2062 struct ibmvfc_tmf *tmf;
2063 unsigned long flags;
2064 int rc;
2065
2066 ENTER;
2067 spin_lock_irqsave(vhost->host->host_lock, flags);
2068 if (vhost->aborting_passthru || vhost->state != IBMVFC_ACTIVE) {
2069 __ibmvfc_reset_host(vhost);
2070 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
2071 return 0;
2072 }
2073
2074 vhost->aborting_passthru = 1;
2075 evt = ibmvfc_get_reserved_event(&vhost->crq);
2076 if (!evt) {
2077 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
2078 return -ENOMEM;
2079 }
2080
2081 ibmvfc_init_event(evt, done: ibmvfc_bsg_timeout_done, format: IBMVFC_MAD_FORMAT);
2082
2083 tmf = &evt->iu.tmf;
2084 memset(tmf, 0, sizeof(*tmf));
2085 tmf->common.version = cpu_to_be32(1);
2086 tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
2087 tmf->common.length = cpu_to_be16(sizeof(*tmf));
2088 tmf->scsi_id = cpu_to_be64(port_id);
2089 tmf->cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
2090 tmf->my_cancel_key = cpu_to_be32(IBMVFC_INTERNAL_CANCEL_KEY);
2091 rc = ibmvfc_send_event(evt, vhost, timeout: default_timeout);
2092
2093 if (rc != 0) {
2094 vhost->aborting_passthru = 0;
2095 dev_err(vhost->dev, "Failed to send cancel event. rc=%d\n", rc);
2096 rc = -EIO;
2097 } else
2098 dev_info(vhost->dev, "Cancelling passthru command to port id 0x%lx\n",
2099 port_id);
2100
2101 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
2102
2103 LEAVE;
2104 return rc;
2105}
2106
2107/**
2108 * ibmvfc_bsg_plogi - PLOGI into a target to handle a BSG command
2109 * @vhost: struct ibmvfc_host to send command
2110 * @port_id: port ID to send command
2111 *
2112 * Returns:
2113 * 0 on success / other on failure
2114 **/
2115static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
2116{
2117 struct ibmvfc_port_login *plogi;
2118 struct ibmvfc_target *tgt;
2119 struct ibmvfc_event *evt;
2120 union ibmvfc_iu rsp_iu;
2121 unsigned long flags;
2122 int rc = 0, issue_login = 1;
2123
2124 ENTER;
2125 spin_lock_irqsave(vhost->host->host_lock, flags);
2126 list_for_each_entry(tgt, &vhost->targets, queue) {
2127 if (tgt->scsi_id == port_id) {
2128 issue_login = 0;
2129 break;
2130 }
2131 }
2132
2133 if (!issue_login)
2134 goto unlock_out;
2135 if (unlikely((rc = ibmvfc_host_chkready(vhost))))
2136 goto unlock_out;
2137
2138 evt = ibmvfc_get_reserved_event(&vhost->crq);
2139 if (!evt) {
2140 rc = -ENOMEM;
2141 goto unlock_out;
2142 }
2143 ibmvfc_init_event(evt, done: ibmvfc_sync_completion, format: IBMVFC_MAD_FORMAT);
2144 plogi = &evt->iu.plogi;
2145 memset(plogi, 0, sizeof(*plogi));
2146 plogi->common.version = cpu_to_be32(1);
2147 plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
2148 plogi->common.length = cpu_to_be16(sizeof(*plogi));
2149 plogi->scsi_id = cpu_to_be64(port_id);
2150 evt->sync_iu = &rsp_iu;
2151 init_completion(x: &evt->comp);
2152
2153 rc = ibmvfc_send_event(evt, vhost, timeout: default_timeout);
2154 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
2155
2156 if (rc)
2157 return -EIO;
2158
2159 wait_for_completion(&evt->comp);
2160
2161 if (rsp_iu.plogi.common.status)
2162 rc = -EIO;
2163
2164 spin_lock_irqsave(vhost->host->host_lock, flags);
2165 ibmvfc_free_event(evt);
2166unlock_out:
2167 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
2168 LEAVE;
2169 return rc;
2170}
2171
2172/**
2173 * ibmvfc_bsg_request - Handle a BSG request
2174 * @job: struct bsg_job to be executed
2175 *
2176 * Returns:
2177 * 0 on success / other on failure
2178 **/
2179static int ibmvfc_bsg_request(struct bsg_job *job)
2180{
2181 struct ibmvfc_host *vhost = shost_priv(shost: fc_bsg_to_shost(job));
2182 struct fc_rport *rport = fc_bsg_to_rport(job);
2183 struct ibmvfc_passthru_mad *mad;
2184 struct ibmvfc_event *evt;
2185 union ibmvfc_iu rsp_iu;
2186 unsigned long flags, port_id = -1;
2187 struct fc_bsg_request *bsg_request = job->request;
2188 struct fc_bsg_reply *bsg_reply = job->reply;
2189 unsigned int code = bsg_request->msgcode;
2190 int rc = 0, req_seg, rsp_seg, issue_login = 0;
2191 u32 fc_flags, rsp_len;
2192
2193 ENTER;
2194 bsg_reply->reply_payload_rcv_len = 0;
2195 if (rport)
2196 port_id = rport->port_id;
2197
2198 switch (code) {
2199 case FC_BSG_HST_ELS_NOLOGIN:
2200 port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) |
2201 (bsg_request->rqst_data.h_els.port_id[1] << 8) |
2202 bsg_request->rqst_data.h_els.port_id[2];
2203 fallthrough;
2204 case FC_BSG_RPT_ELS:
2205 fc_flags = IBMVFC_FC_ELS;
2206 break;
2207 case FC_BSG_HST_CT:
2208 issue_login = 1;
2209 port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) |
2210 (bsg_request->rqst_data.h_ct.port_id[1] << 8) |
2211 bsg_request->rqst_data.h_ct.port_id[2];
2212 fallthrough;
2213 case FC_BSG_RPT_CT:
2214 fc_flags = IBMVFC_FC_CT_IU;
2215 break;
2216 default:
2217 return -ENOTSUPP;
2218 }
2219
2220 if (port_id == -1)
2221 return -EINVAL;
2222 if (!mutex_trylock(lock: &vhost->passthru_mutex))
2223 return -EBUSY;
2224
2225 job->dd_data = (void *)port_id;
2226 req_seg = dma_map_sg(vhost->dev, job->request_payload.sg_list,
2227 job->request_payload.sg_cnt, DMA_TO_DEVICE);
2228
2229 if (!req_seg) {
2230 mutex_unlock(lock: &vhost->passthru_mutex);
2231 return -ENOMEM;
2232 }
2233
2234 rsp_seg = dma_map_sg(vhost->dev, job->reply_payload.sg_list,
2235 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2236
2237 if (!rsp_seg) {
2238 dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
2239 job->request_payload.sg_cnt, DMA_TO_DEVICE);
2240 mutex_unlock(lock: &vhost->passthru_mutex);
2241 return -ENOMEM;
2242 }
2243
2244 if (req_seg > 1 || rsp_seg > 1) {
2245 rc = -EINVAL;
2246 goto out;
2247 }
2248
2249 if (issue_login)
2250 rc = ibmvfc_bsg_plogi(vhost, port_id);
2251
2252 spin_lock_irqsave(vhost->host->host_lock, flags);
2253
2254 if (unlikely(rc || (rport && (rc = fc_remote_port_chkready(rport)))) ||
2255 unlikely((rc = ibmvfc_host_chkready(vhost)))) {
2256 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
2257 goto out;
2258 }
2259
2260 evt = ibmvfc_get_reserved_event(&vhost->crq);
2261 if (!evt) {
2262 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
2263 rc = -ENOMEM;
2264 goto out;
2265 }
2266 ibmvfc_init_event(evt, done: ibmvfc_sync_completion, format: IBMVFC_MAD_FORMAT);
2267 mad = &evt->iu.passthru;
2268
2269 memset(mad, 0, sizeof(*mad));
2270 mad->common.version = cpu_to_be32(1);
2271 mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
2272 mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
2273
2274 mad->cmd_ioba.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) +
2275 offsetof(struct ibmvfc_passthru_mad, iu));
2276 mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
2277
2278 mad->iu.cmd_len = cpu_to_be32(job->request_payload.payload_len);
2279 mad->iu.rsp_len = cpu_to_be32(job->reply_payload.payload_len);
2280 mad->iu.flags = cpu_to_be32(fc_flags);
2281 mad->iu.cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
2282
2283 mad->iu.cmd.va = cpu_to_be64(sg_dma_address(job->request_payload.sg_list));
2284 mad->iu.cmd.len = cpu_to_be32(sg_dma_len(job->request_payload.sg_list));
2285 mad->iu.rsp.va = cpu_to_be64(sg_dma_address(job->reply_payload.sg_list));
2286 mad->iu.rsp.len = cpu_to_be32(sg_dma_len(job->reply_payload.sg_list));
2287 mad->iu.scsi_id = cpu_to_be64(port_id);
2288 mad->iu.tag = cpu_to_be64((u64)evt);
2289 rsp_len = be32_to_cpu(mad->iu.rsp.len);
2290
2291 evt->sync_iu = &rsp_iu;
2292 init_completion(x: &evt->comp);
2293 rc = ibmvfc_send_event(evt, vhost, timeout: 0);
2294 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
2295
2296 if (rc) {
2297 rc = -EIO;
2298 goto out;
2299 }
2300
2301 wait_for_completion(&evt->comp);
2302
2303 if (rsp_iu.passthru.common.status)
2304 rc = -EIO;
2305 else
2306 bsg_reply->reply_payload_rcv_len = rsp_len;
2307
2308 spin_lock_irqsave(vhost->host->host_lock, flags);
2309 ibmvfc_free_event(evt);
2310 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
2311 bsg_reply->result = rc;
2312 bsg_job_done(job, result: bsg_reply->result,
2313 reply_payload_rcv_len: bsg_reply->reply_payload_rcv_len);
2314 rc = 0;
2315out:
2316 dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
2317 job->request_payload.sg_cnt, DMA_TO_DEVICE);
2318 dma_unmap_sg(vhost->dev, job->reply_payload.sg_list,
2319 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2320 mutex_unlock(lock: &vhost->passthru_mutex);
2321 LEAVE;
2322 return rc;
2323}
2324
2325/**
2326 * ibmvfc_reset_device - Reset the device with the specified reset type
2327 * @sdev: scsi device to reset
2328 * @type: reset type
2329 * @desc: reset type description for log messages
2330 *
2331 * Returns:
2332 * 0 on success / other on failure
2333 **/
2334static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
2335{
2336 struct ibmvfc_host *vhost = shost_priv(shost: sdev->host);
2337 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2338 struct ibmvfc_cmd *tmf;
2339 struct ibmvfc_event *evt = NULL;
2340 union ibmvfc_iu rsp_iu;
2341 struct ibmvfc_fcp_cmd_iu *iu;
2342 struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd: &rsp_iu.cmd);
2343 int rsp_rc = -EBUSY;
2344 unsigned long flags;
2345 int rsp_code = 0;
2346
2347 spin_lock_irqsave(vhost->host->host_lock, flags);
2348 if (vhost->state == IBMVFC_ACTIVE) {
2349 if (vhost->using_channels)
2350 evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[0]);
2351 else
2352 evt = ibmvfc_get_event(&vhost->crq);
2353
2354 if (!evt) {
2355 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
2356 return -ENOMEM;
2357 }
2358
2359 ibmvfc_init_event(evt, done: ibmvfc_sync_completion, format: IBMVFC_CMD_FORMAT);
2360 tmf = ibmvfc_init_vfc_cmd(evt, sdev);
2361 iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd: tmf);
2362
2363 tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
2364 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
2365 tmf->target_wwpn = cpu_to_be64(rport->port_name);
2366 iu->tmf_flags = type;
2367 evt->sync_iu = &rsp_iu;
2368
2369 init_completion(x: &evt->comp);
2370 rsp_rc = ibmvfc_send_event(evt, vhost, timeout: default_timeout);
2371 }
2372 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
2373
2374 if (rsp_rc != 0) {
2375 sdev_printk(KERN_ERR, sdev, "Failed to send %s reset event. rc=%d\n",
2376 desc, rsp_rc);
2377 return -EIO;
2378 }
2379
2380 sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc);
2381 wait_for_completion(&evt->comp);
2382
2383 if (rsp_iu.cmd.status)
2384 rsp_code = ibmvfc_get_err_result(vhost, vfc_cmd: &rsp_iu.cmd);
2385
2386 if (rsp_code) {
2387 if (fc_rsp->flags & FCP_RSP_LEN_VALID)
2388 rsp_code = fc_rsp->data.info.rsp_code;
2389
2390 sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
2391 "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc,
2392 ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2393 be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2394 fc_rsp->scsi_status);
2395 rsp_rc = -EIO;
2396 } else
2397 sdev_printk(KERN_INFO, sdev, "%s reset successful\n", desc);
2398
2399 spin_lock_irqsave(vhost->host->host_lock, flags);
2400 ibmvfc_free_event(evt);
2401 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
2402 return rsp_rc;
2403}
2404
2405/**
2406 * ibmvfc_match_rport - Match function for specified remote port
2407 * @evt: ibmvfc event struct
2408 * @rport: device to match
2409 *
2410 * Returns:
2411 * 1 if event matches rport / 0 if event does not match rport
2412 **/
2413static int ibmvfc_match_rport(struct ibmvfc_event *evt, void *rport)
2414{
2415 struct fc_rport *cmd_rport;
2416
2417 if (evt->cmnd) {
2418 cmd_rport = starget_to_rport(scsi_target(evt->cmnd->device));
2419 if (cmd_rport == rport)
2420 return 1;
2421 }
2422 return 0;
2423}
2424
2425/**
2426 * ibmvfc_match_target - Match function for specified target
2427 * @evt: ibmvfc event struct
2428 * @device: device to match (starget)
2429 *
2430 * Returns:
2431 * 1 if event matches starget / 0 if event does not match starget
2432 **/
2433static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
2434{
2435 if (evt->cmnd && scsi_target(sdev: evt->cmnd->device) == device)
2436 return 1;
2437 return 0;
2438}
2439
2440/**
2441 * ibmvfc_match_lun - Match function for specified LUN
2442 * @evt: ibmvfc event struct
2443 * @device: device to match (sdev)
2444 *
2445 * Returns:
2446 * 1 if event matches sdev / 0 if event does not match sdev
2447 **/
2448static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
2449{
2450 if (evt->cmnd && evt->cmnd->device == device)
2451 return 1;
2452 return 0;
2453}
2454
2455/**
2456 * ibmvfc_event_is_free - Check if event is free or not
2457 * @evt: ibmvfc event struct
2458 *
2459 * Returns:
2460 * true / false
2461 **/
2462static bool ibmvfc_event_is_free(struct ibmvfc_event *evt)
2463{
2464 struct ibmvfc_event *loop_evt;
2465
2466 list_for_each_entry(loop_evt, &evt->queue->free, queue_list)
2467 if (loop_evt == evt)
2468 return true;
2469
2470 return false;
2471}
2472
2473/**
2474 * ibmvfc_wait_for_ops - Wait for ops to complete
2475 * @vhost: ibmvfc host struct
2476 * @device: device to match (starget or sdev)
2477 * @match: match function
2478 *
2479 * Returns:
2480 * SUCCESS / FAILED
2481 **/
2482static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
2483 int (*match) (struct ibmvfc_event *, void *))
2484{
2485 struct ibmvfc_event *evt;
2486 DECLARE_COMPLETION_ONSTACK(comp);
2487 int wait, i, q_index, q_size;
2488 unsigned long flags;
2489 signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
2490 struct ibmvfc_queue *queues;
2491
2492 ENTER;
2493 if (vhost->mq_enabled && vhost->using_channels) {
2494 queues = vhost->scsi_scrqs.scrqs;
2495 q_size = vhost->scsi_scrqs.active_queues;
2496 } else {
2497 queues = &vhost->crq;
2498 q_size = 1;
2499 }
2500
2501 do {
2502 wait = 0;
2503 spin_lock_irqsave(vhost->host->host_lock, flags);
2504 for (q_index = 0; q_index < q_size; q_index++) {
2505 spin_lock(lock: &queues[q_index].l_lock);
2506 for (i = 0; i < queues[q_index].evt_pool.size; i++) {
2507 evt = &queues[q_index].evt_pool.events[i];
2508 if (!ibmvfc_event_is_free(evt)) {
2509 if (match(evt, device)) {
2510 evt->eh_comp = &comp;
2511 wait++;
2512 }
2513 }
2514 }
2515 spin_unlock(lock: &queues[q_index].l_lock);
2516 }
2517 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
2518
2519 if (wait) {
2520 timeout = wait_for_completion_timeout(x: &comp, timeout);
2521
2522 if (!timeout) {
2523 wait = 0;
2524 spin_lock_irqsave(vhost->host->host_lock, flags);
2525 for (q_index = 0; q_index < q_size; q_index++) {
2526 spin_lock(lock: &queues[q_index].l_lock);
2527 for (i = 0; i < queues[q_index].evt_pool.size; i++) {
2528 evt = &queues[q_index].evt_pool.events[i];
2529 if (!ibmvfc_event_is_free(evt)) {
2530 if (match(evt, device)) {
2531 evt->eh_comp = NULL;
2532 wait++;
2533 }
2534 }
2535 }
2536 spin_unlock(lock: &queues[q_index].l_lock);
2537 }
2538 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
2539 if (wait)
2540 dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
2541 LEAVE;
2542 return wait ? FAILED : SUCCESS;
2543 }
2544 }
2545 } while (wait);
2546
2547 LEAVE;
2548 return SUCCESS;
2549}
2550
2551static struct ibmvfc_event *ibmvfc_init_tmf(struct ibmvfc_queue *queue,
2552 struct scsi_device *sdev,
2553 int type)
2554{
2555 struct ibmvfc_host *vhost = shost_priv(shost: sdev->host);
2556 struct scsi_target *starget = scsi_target(sdev);
2557 struct fc_rport *rport = starget_to_rport(starget);
2558 struct ibmvfc_event *evt;
2559 struct ibmvfc_tmf *tmf;
2560
2561 evt = ibmvfc_get_reserved_event(queue);
2562 if (!evt)
2563 return NULL;
2564 ibmvfc_init_event(evt, done: ibmvfc_sync_completion, format: IBMVFC_MAD_FORMAT);
2565
2566 tmf = &evt->iu.tmf;
2567 memset(tmf, 0, sizeof(*tmf));
2568 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
2569 tmf->common.version = cpu_to_be32(2);
2570 tmf->target_wwpn = cpu_to_be64(rport->port_name);
2571 } else {
2572 tmf->common.version = cpu_to_be32(1);
2573 }
2574 tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
2575 tmf->common.length = cpu_to_be16(sizeof(*tmf));
2576 tmf->scsi_id = cpu_to_be64(rport->port_id);
2577 int_to_scsilun(sdev->lun, &tmf->lun);
2578 if (!ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPRESS_ABTS))
2579 type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
2580 if (vhost->state == IBMVFC_ACTIVE)
2581 tmf->flags = cpu_to_be32((type | IBMVFC_TMF_LUA_VALID));
2582 else
2583 tmf->flags = cpu_to_be32(((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID));
2584 tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
2585 tmf->my_cancel_key = cpu_to_be32((unsigned long)starget->hostdata);
2586
2587 init_completion(x: &evt->comp);
2588
2589 return evt;
2590}
2591
2592static int ibmvfc_cancel_all_mq(struct scsi_device *sdev, int type)
2593{
2594 struct ibmvfc_host *vhost = shost_priv(shost: sdev->host);
2595 struct ibmvfc_event *evt, *found_evt, *temp;
2596 struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs;
2597 unsigned long flags;
2598 int num_hwq, i;
2599 int fail = 0;
2600 LIST_HEAD(cancelq);
2601 u16 status;
2602
2603 ENTER;
2604 spin_lock_irqsave(vhost->host->host_lock, flags);
2605 num_hwq = vhost->scsi_scrqs.active_queues;
2606 for (i = 0; i < num_hwq; i++) {
2607 spin_lock(lock: queues[i].q_lock);
2608 spin_lock(lock: &queues[i].l_lock);
2609 found_evt = NULL;
2610 list_for_each_entry(evt, &queues[i].sent, queue_list) {
2611 if (evt->cmnd && evt->cmnd->device == sdev) {
2612 found_evt = evt;
2613 break;
2614 }
2615 }
2616 spin_unlock(lock: &queues[i].l_lock);
2617
2618 if (found_evt && vhost->logged_in) {
2619 evt = ibmvfc_init_tmf(queue: &queues[i], sdev, type);
2620 if (!evt) {
2621 spin_unlock(lock: queues[i].q_lock);
2622 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
2623 return -ENOMEM;
2624 }
2625 evt->sync_iu = &queues[i].cancel_rsp;
2626 ibmvfc_send_event(evt, vhost, timeout: default_timeout);
2627 list_add_tail(new: &evt->cancel, head: &cancelq);
2628 }
2629
2630 spin_unlock(lock: queues[i].q_lock);
2631 }
2632 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
2633
2634 if (list_empty(head: &cancelq)) {
2635 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2636 sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
2637 return 0;
2638 }
2639
2640 sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
2641
2642 list_for_each_entry_safe(evt, temp, &cancelq, cancel) {
2643 wait_for_completion(&evt->comp);
2644 status = be16_to_cpu(evt->queue->cancel_rsp.mad_common.status);
2645 list_del(entry: &evt->cancel);
2646 ibmvfc_free_event(evt);
2647
2648 if (status != IBMVFC_MAD_SUCCESS) {
2649 sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
2650 switch (status) {
2651 case IBMVFC_MAD_DRIVER_FAILED:
2652 case IBMVFC_MAD_CRQ_ERROR:
2653 /* Host adapter most likely going through reset, return success to
2654 * the caller will wait for the command being cancelled to get returned
2655 */
2656 break;
2657 default:
2658 fail = 1;
2659 break;
2660 }
2661 }
2662 }
2663
2664 if (fail)
2665 return -EIO;
2666
2667 sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
2668 LEAVE;
2669 return 0;
2670}
2671
2672static int ibmvfc_cancel_all_sq(struct scsi_device *sdev, int type)
2673{
2674 struct ibmvfc_host *vhost = shost_priv(shost: sdev->host);
2675 struct ibmvfc_event *evt, *found_evt;
2676 union ibmvfc_iu rsp;
2677 int rsp_rc = -EBUSY;
2678 unsigned long flags;
2679 u16 status;
2680
2681 ENTER;
2682 found_evt = NULL;
2683 spin_lock_irqsave(vhost->host->host_lock, flags);
2684 spin_lock(lock: &vhost->crq.l_lock);
2685 list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
2686 if (evt->cmnd && evt->cmnd->device == sdev) {
2687 found_evt = evt;
2688 break;
2689 }
2690 }
2691 spin_unlock(lock: &vhost->crq.l_lock);
2692
2693 if (!found_evt) {
2694 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2695 sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
2696 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
2697 return 0;
2698 }
2699
2700 if (vhost->logged_in) {
2701 evt = ibmvfc_init_tmf(queue: &vhost->crq, sdev, type);
2702 evt->sync_iu = &rsp;
2703 rsp_rc = ibmvfc_send_event(evt, vhost, timeout: default_timeout);
2704 }
2705
2706 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
2707
2708 if (rsp_rc != 0) {
2709 sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
2710 /* If failure is received, the host adapter is most likely going
2711 through reset, return success so the caller will wait for the command
2712 being cancelled to get returned */
2713 return 0;
2714 }
2715
2716 sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
2717
2718 wait_for_completion(&evt->comp);
2719 status = be16_to_cpu(rsp.mad_common.status);
2720 spin_lock_irqsave(vhost->host->host_lock, flags);
2721 ibmvfc_free_event(evt);
2722 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
2723
2724 if (status != IBMVFC_MAD_SUCCESS) {
2725 sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
2726 switch (status) {
2727 case IBMVFC_MAD_DRIVER_FAILED:
2728 case IBMVFC_MAD_CRQ_ERROR:
2729 /* Host adapter most likely going through reset, return success to
2730 the caller will wait for the command being cancelled to get returned */
2731 return 0;
2732 default:
2733 return -EIO;
2734 };
2735 }
2736
2737 sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
2738 return 0;
2739}
2740
2741/**
2742 * ibmvfc_cancel_all - Cancel all outstanding commands to the device
2743 * @sdev: scsi device to cancel commands
2744 * @type: type of error recovery being performed
2745 *
2746 * This sends a cancel to the VIOS for the specified device. This does
2747 * NOT send any abort to the actual device. That must be done separately.
2748 *
2749 * Returns:
2750 * 0 on success / other on failure
2751 **/
2752static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
2753{
2754 struct ibmvfc_host *vhost = shost_priv(shost: sdev->host);
2755
2756 if (vhost->mq_enabled && vhost->using_channels)
2757 return ibmvfc_cancel_all_mq(sdev, type);
2758 else
2759 return ibmvfc_cancel_all_sq(sdev, type);
2760}
2761
2762/**
2763 * ibmvfc_match_key - Match function for specified cancel key
2764 * @evt: ibmvfc event struct
2765 * @key: cancel key to match
2766 *
2767 * Returns:
2768 * 1 if event matches key / 0 if event does not match key
2769 **/
2770static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key)
2771{
2772 unsigned long cancel_key = (unsigned long)key;
2773
2774 if (evt->crq.format == IBMVFC_CMD_FORMAT &&
2775 be32_to_cpu(evt->iu.cmd.cancel_key) == cancel_key)
2776 return 1;
2777 return 0;
2778}
2779
2780/**
2781 * ibmvfc_match_evt - Match function for specified event
2782 * @evt: ibmvfc event struct
2783 * @match: event to match
2784 *
2785 * Returns:
2786 * 1 if event matches key / 0 if event does not match key
2787 **/
2788static int ibmvfc_match_evt(struct ibmvfc_event *evt, void *match)
2789{
2790 if (evt == match)
2791 return 1;
2792 return 0;
2793}
2794
2795/**
2796 * ibmvfc_abort_task_set - Abort outstanding commands to the device
2797 * @sdev: scsi device to abort commands
2798 *
2799 * This sends an Abort Task Set to the VIOS for the specified device. This does
2800 * NOT send any cancel to the VIOS. That must be done separately.
2801 *
2802 * Returns:
2803 * 0 on success / other on failure
2804 **/
2805static int ibmvfc_abort_task_set(struct scsi_device *sdev)
2806{
2807 struct ibmvfc_host *vhost = shost_priv(shost: sdev->host);
2808 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2809 struct ibmvfc_cmd *tmf;
2810 struct ibmvfc_event *evt, *found_evt;
2811 union ibmvfc_iu rsp_iu;
2812 struct ibmvfc_fcp_cmd_iu *iu;
2813 struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd: &rsp_iu.cmd);
2814 int rc, rsp_rc = -EBUSY;
2815 unsigned long flags, timeout = IBMVFC_ABORT_TIMEOUT;
2816 int rsp_code = 0;
2817
2818 found_evt = NULL;
2819 spin_lock_irqsave(vhost->host->host_lock, flags);
2820 spin_lock(lock: &vhost->crq.l_lock);
2821 list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
2822 if (evt->cmnd && evt->cmnd->device == sdev) {
2823 found_evt = evt;
2824 break;
2825 }
2826 }
2827 spin_unlock(lock: &vhost->crq.l_lock);
2828
2829 if (!found_evt) {
2830 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2831 sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
2832 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
2833 return 0;
2834 }
2835
2836 if (vhost->state == IBMVFC_ACTIVE) {
2837 evt = ibmvfc_get_event(&vhost->crq);
2838 if (!evt) {
2839 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
2840 return -ENOMEM;
2841 }
2842 ibmvfc_init_event(evt, done: ibmvfc_sync_completion, format: IBMVFC_CMD_FORMAT);
2843 tmf = ibmvfc_init_vfc_cmd(evt, sdev);
2844 iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd: tmf);
2845
2846 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
2847 tmf->target_wwpn = cpu_to_be64(rport->port_name);
2848 iu->tmf_flags = IBMVFC_ABORT_TASK_SET;
2849 tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
2850 evt->sync_iu = &rsp_iu;
2851
2852 tmf->correlation = cpu_to_be64((u64)evt);
2853
2854 init_completion(x: &evt->comp);
2855 rsp_rc = ibmvfc_send_event(evt, vhost, timeout: default_timeout);
2856 }
2857
2858 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
2859
2860 if (rsp_rc != 0) {
2861 sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
2862 return -EIO;
2863 }
2864
2865 sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
2866 timeout = wait_for_completion_timeout(x: &evt->comp, timeout);
2867
2868 if (!timeout) {
2869 rc = ibmvfc_cancel_all(sdev, type: 0);
2870 if (!rc) {
2871 rc = ibmvfc_wait_for_ops(vhost, device: sdev->hostdata, match: ibmvfc_match_key);
2872 if (rc == SUCCESS)
2873 rc = 0;
2874 }
2875
2876 if (rc) {
2877 sdev_printk(KERN_INFO, sdev, "Cancel failed, resetting host\n");
2878 ibmvfc_reset_host(vhost);
2879 rsp_rc = -EIO;
2880 rc = ibmvfc_wait_for_ops(vhost, device: sdev->hostdata, match: ibmvfc_match_key);
2881
2882 if (rc == SUCCESS)
2883 rsp_rc = 0;
2884
2885 rc = ibmvfc_wait_for_ops(vhost, device: evt, match: ibmvfc_match_evt);
2886 if (rc != SUCCESS) {
2887 spin_lock_irqsave(vhost->host->host_lock, flags);
2888 ibmvfc_hard_reset_host(vhost);
2889 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
2890 rsp_rc = 0;
2891 }
2892
2893 goto out;
2894 }
2895 }
2896
2897 if (rsp_iu.cmd.status)
2898 rsp_code = ibmvfc_get_err_result(vhost, vfc_cmd: &rsp_iu.cmd);
2899
2900 if (rsp_code) {
2901 if (fc_rsp->flags & FCP_RSP_LEN_VALID)
2902 rsp_code = fc_rsp->data.info.rsp_code;
2903
2904 sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
2905 "flags: %x fcp_rsp: %x, scsi_status: %x\n",
2906 ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2907 be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2908 fc_rsp->scsi_status);
2909 rsp_rc = -EIO;
2910 } else
2911 sdev_printk(KERN_INFO, sdev, "Abort successful\n");
2912
2913out:
2914 spin_lock_irqsave(vhost->host->host_lock, flags);
2915 ibmvfc_free_event(evt);
2916 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
2917 return rsp_rc;
2918}
2919
2920/**
2921 * ibmvfc_eh_abort_handler - Abort a command
2922 * @cmd: scsi command to abort
2923 *
2924 * Returns:
2925 * SUCCESS / FAST_IO_FAIL / FAILED
2926 **/
2927static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
2928{
2929 struct scsi_device *sdev = cmd->device;
2930 struct ibmvfc_host *vhost = shost_priv(shost: sdev->host);
2931 int cancel_rc, block_rc;
2932 int rc = FAILED;
2933
2934 ENTER;
2935 block_rc = fc_block_scsi_eh(cmnd: cmd);
2936 ibmvfc_wait_while_resetting(vhost);
2937 if (block_rc != FAST_IO_FAIL) {
2938 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
2939 ibmvfc_abort_task_set(sdev);
2940 } else
2941 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2942
2943 if (!cancel_rc)
2944 rc = ibmvfc_wait_for_ops(vhost, device: sdev, match: ibmvfc_match_lun);
2945
2946 if (block_rc == FAST_IO_FAIL && rc != FAILED)
2947 rc = FAST_IO_FAIL;
2948
2949 LEAVE;
2950 return rc;
2951}
2952
2953/**
2954 * ibmvfc_eh_device_reset_handler - Reset a single LUN
2955 * @cmd: scsi command struct
2956 *
2957 * Returns:
2958 * SUCCESS / FAST_IO_FAIL / FAILED
2959 **/
2960static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
2961{
2962 struct scsi_device *sdev = cmd->device;
2963 struct ibmvfc_host *vhost = shost_priv(shost: sdev->host);
2964 int cancel_rc, block_rc, reset_rc = 0;
2965 int rc = FAILED;
2966
2967 ENTER;
2968 block_rc = fc_block_scsi_eh(cmnd: cmd);
2969 ibmvfc_wait_while_resetting(vhost);
2970 if (block_rc != FAST_IO_FAIL) {
2971 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
2972 reset_rc = ibmvfc_reset_device(sdev, type: IBMVFC_LUN_RESET, desc: "LUN");
2973 } else
2974 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2975
2976 if (!cancel_rc && !reset_rc)
2977 rc = ibmvfc_wait_for_ops(vhost, device: sdev, match: ibmvfc_match_lun);
2978
2979 if (block_rc == FAST_IO_FAIL && rc != FAILED)
2980 rc = FAST_IO_FAIL;
2981
2982 LEAVE;
2983 return rc;
2984}
2985
2986/**
2987 * ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function
2988 * @sdev: scsi device struct
2989 * @data: return code
2990 *
2991 **/
2992static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data)
2993{
2994 unsigned long *rc = data;
2995 *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2996}
2997
2998/**
2999 * ibmvfc_eh_target_reset_handler - Reset the target
3000 * @cmd: scsi command struct
3001 *
3002 * Returns:
3003 * SUCCESS / FAST_IO_FAIL / FAILED
3004 **/
3005static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
3006{
3007 struct scsi_target *starget = scsi_target(sdev: cmd->device);
3008 struct fc_rport *rport = starget_to_rport(starget);
3009 struct Scsi_Host *shost = rport_to_shost(rport);
3010 struct ibmvfc_host *vhost = shost_priv(shost);
3011 int block_rc;
3012 int reset_rc = 0;
3013 int rc = FAILED;
3014 unsigned long cancel_rc = 0;
3015 bool tgt_reset = false;
3016
3017 ENTER;
3018 block_rc = fc_block_rport(rport);
3019 ibmvfc_wait_while_resetting(vhost);
3020 if (block_rc != FAST_IO_FAIL) {
3021 struct scsi_device *sdev;
3022
3023 shost_for_each_device(sdev, shost) {
3024 if ((sdev->channel != starget->channel) ||
3025 (sdev->id != starget->id))
3026 continue;
3027
3028 cancel_rc |= ibmvfc_cancel_all(sdev,
3029 IBMVFC_TMF_TGT_RESET);
3030 if (!tgt_reset) {
3031 reset_rc = ibmvfc_reset_device(sdev,
3032 type: IBMVFC_TARGET_RESET, desc: "target");
3033 tgt_reset = true;
3034 }
3035 }
3036 } else
3037 starget_for_each_device(starget, &cancel_rc,
3038 fn: ibmvfc_dev_cancel_all_noreset);
3039
3040 if (!cancel_rc && !reset_rc)
3041 rc = ibmvfc_wait_for_ops(vhost, device: starget, match: ibmvfc_match_target);
3042
3043 if (block_rc == FAST_IO_FAIL && rc != FAILED)
3044 rc = FAST_IO_FAIL;
3045
3046 LEAVE;
3047 return rc;
3048}
3049
3050/**
3051 * ibmvfc_eh_host_reset_handler - Reset the connection to the server
3052 * @cmd: struct scsi_cmnd having problems
3053 *
3054 **/
3055static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
3056{
3057 int rc;
3058 struct ibmvfc_host *vhost = shost_priv(shost: cmd->device->host);
3059
3060 dev_err(vhost->dev, "Resetting connection due to error recovery\n");
3061 rc = ibmvfc_issue_fc_host_lip(shost: vhost->host);
3062
3063 return rc ? FAILED : SUCCESS;
3064}
3065
3066/**
3067 * ibmvfc_terminate_rport_io - Terminate all pending I/O to the rport.
3068 * @rport: rport struct
3069 *
3070 * Return value:
3071 * none
3072 **/
3073static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
3074{
3075 struct Scsi_Host *shost = rport_to_shost(rport);
3076 struct ibmvfc_host *vhost = shost_priv(shost);
3077 struct fc_rport *dev_rport;
3078 struct scsi_device *sdev;
3079 struct ibmvfc_target *tgt;
3080 unsigned long rc, flags;
3081 unsigned int found;
3082
3083 ENTER;
3084 shost_for_each_device(sdev, shost) {
3085 dev_rport = starget_to_rport(scsi_target(sdev));
3086 if (dev_rport != rport)
3087 continue;
3088 ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
3089 }
3090
3091 rc = ibmvfc_wait_for_ops(vhost, device: rport, match: ibmvfc_match_rport);
3092
3093 if (rc == FAILED)
3094 ibmvfc_issue_fc_host_lip(shost);
3095
3096 spin_lock_irqsave(shost->host_lock, flags);
3097 found = 0;
3098 list_for_each_entry(tgt, &vhost->targets, queue) {
3099 if (tgt->scsi_id == rport->port_id) {
3100 found++;
3101 break;
3102 }
3103 }
3104
3105 if (found && tgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
3106 /*
3107 * If we get here, that means we previously attempted to send
3108 * an implicit logout to the target but it failed, most likely
3109 * due to I/O being pending, so we need to send it again
3110 */
3111 ibmvfc_del_tgt(tgt);
3112 ibmvfc_reinit_host(vhost);
3113 }
3114
3115 spin_unlock_irqrestore(lock: shost->host_lock, flags);
3116 LEAVE;
3117}
3118
3119static const struct ibmvfc_async_desc ae_desc [] = {
3120 { "PLOGI", IBMVFC_AE_ELS_PLOGI, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3121 { "LOGO", IBMVFC_AE_ELS_LOGO, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3122 { "PRLO", IBMVFC_AE_ELS_PRLO, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3123 { "N-Port SCN", IBMVFC_AE_SCN_NPORT, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3124 { "Group SCN", IBMVFC_AE_SCN_GROUP, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3125 { "Domain SCN", IBMVFC_AE_SCN_DOMAIN, IBMVFC_DEFAULT_LOG_LEVEL },
3126 { "Fabric SCN", IBMVFC_AE_SCN_FABRIC, IBMVFC_DEFAULT_LOG_LEVEL },
3127 { "Link Up", IBMVFC_AE_LINK_UP, IBMVFC_DEFAULT_LOG_LEVEL },
3128 { "Link Down", IBMVFC_AE_LINK_DOWN, IBMVFC_DEFAULT_LOG_LEVEL },
3129 { "Link Dead", IBMVFC_AE_LINK_DEAD, IBMVFC_DEFAULT_LOG_LEVEL },
3130 { "Halt", IBMVFC_AE_HALT, IBMVFC_DEFAULT_LOG_LEVEL },
3131 { "Resume", IBMVFC_AE_RESUME, IBMVFC_DEFAULT_LOG_LEVEL },
3132 { "Adapter Failed", IBMVFC_AE_ADAPTER_FAILED, IBMVFC_DEFAULT_LOG_LEVEL },
3133};
3134
3135static const struct ibmvfc_async_desc unknown_ae = {
3136 "Unknown async", 0, IBMVFC_DEFAULT_LOG_LEVEL
3137};
3138
3139/**
3140 * ibmvfc_get_ae_desc - Get text description for async event
3141 * @ae: async event
3142 *
3143 **/
3144static const struct ibmvfc_async_desc *ibmvfc_get_ae_desc(u64 ae)
3145{
3146 int i;
3147
3148 for (i = 0; i < ARRAY_SIZE(ae_desc); i++)
3149 if (ae_desc[i].ae == ae)
3150 return &ae_desc[i];
3151
3152 return &unknown_ae;
3153}
3154
3155static const struct {
3156 enum ibmvfc_ae_link_state state;
3157 const char *desc;
3158} link_desc [] = {
3159 { IBMVFC_AE_LS_LINK_UP, " link up" },
3160 { IBMVFC_AE_LS_LINK_BOUNCED, " link bounced" },
3161 { IBMVFC_AE_LS_LINK_DOWN, " link down" },
3162 { IBMVFC_AE_LS_LINK_DEAD, " link dead" },
3163};
3164
3165/**
3166 * ibmvfc_get_link_state - Get text description for link state
3167 * @state: link state
3168 *
3169 **/
3170static const char *ibmvfc_get_link_state(enum ibmvfc_ae_link_state state)
3171{
3172 int i;
3173
3174 for (i = 0; i < ARRAY_SIZE(link_desc); i++)
3175 if (link_desc[i].state == state)
3176 return link_desc[i].desc;
3177
3178 return "";
3179}
3180
3181/**
3182 * ibmvfc_handle_async - Handle an async event from the adapter
3183 * @crq: crq to process
3184 * @vhost: ibmvfc host struct
3185 *
3186 **/
3187static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
3188 struct ibmvfc_host *vhost)
3189{
3190 const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be64_to_cpu(crq->event));
3191 struct ibmvfc_target *tgt;
3192
3193 ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx,"
3194 " node_name: %llx%s\n", desc->desc, be64_to_cpu(crq->scsi_id),
3195 be64_to_cpu(crq->wwpn), be64_to_cpu(crq->node_name),
3196 ibmvfc_get_link_state(crq->link_state));
3197
3198 switch (be64_to_cpu(crq->event)) {
3199 case IBMVFC_AE_RESUME:
3200 switch (crq->link_state) {
3201 case IBMVFC_AE_LS_LINK_DOWN:
3202 ibmvfc_link_down(vhost, state: IBMVFC_LINK_DOWN);
3203 break;
3204 case IBMVFC_AE_LS_LINK_DEAD:
3205 ibmvfc_link_down(vhost, state: IBMVFC_LINK_DEAD);
3206 break;
3207 case IBMVFC_AE_LS_LINK_UP:
3208 case IBMVFC_AE_LS_LINK_BOUNCED:
3209 default:
3210 vhost->events_to_log |= IBMVFC_AE_LINKUP;
3211 vhost->delay_init = 1;
3212 __ibmvfc_reset_host(vhost);
3213 break;
3214 }
3215
3216 break;
3217 case IBMVFC_AE_LINK_UP:
3218 vhost->events_to_log |= IBMVFC_AE_LINKUP;
3219 vhost->delay_init = 1;
3220 __ibmvfc_reset_host(vhost);
3221 break;
3222 case IBMVFC_AE_SCN_FABRIC:
3223 case IBMVFC_AE_SCN_DOMAIN:
3224 vhost->events_to_log |= IBMVFC_AE_RSCN;
3225 if (vhost->state < IBMVFC_HALTED) {
3226 vhost->delay_init = 1;
3227 __ibmvfc_reset_host(vhost);
3228 }
3229 break;
3230 case IBMVFC_AE_SCN_NPORT:
3231 case IBMVFC_AE_SCN_GROUP:
3232 vhost->events_to_log |= IBMVFC_AE_RSCN;
3233 ibmvfc_reinit_host(vhost);
3234 break;
3235 case IBMVFC_AE_ELS_LOGO:
3236 case IBMVFC_AE_ELS_PRLO:
3237 case IBMVFC_AE_ELS_PLOGI:
3238 list_for_each_entry(tgt, &vhost->targets, queue) {
3239 if (!crq->scsi_id && !crq->wwpn && !crq->node_name)
3240 break;
3241 if (crq->scsi_id && cpu_to_be64(tgt->scsi_id) != crq->scsi_id)
3242 continue;
3243 if (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn)
3244 continue;
3245 if (crq->node_name && cpu_to_be64(tgt->ids.node_name) != crq->node_name)
3246 continue;
3247 if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO)
3248 tgt->logo_rcvd = 1;
3249 if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) {
3250 ibmvfc_del_tgt(tgt);
3251 ibmvfc_reinit_host(vhost);
3252 }
3253 }
3254 break;
3255 case IBMVFC_AE_LINK_DOWN:
3256 case IBMVFC_AE_ADAPTER_FAILED:
3257 ibmvfc_link_down(vhost, state: IBMVFC_LINK_DOWN);
3258 break;
3259 case IBMVFC_AE_LINK_DEAD:
3260 ibmvfc_link_down(vhost, state: IBMVFC_LINK_DEAD);
3261 break;
3262 case IBMVFC_AE_HALT:
3263 ibmvfc_link_down(vhost, state: IBMVFC_HALTED);
3264 break;
3265 default:
3266 dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event);
3267 break;
3268 }
3269}
3270
3271/**
3272 * ibmvfc_handle_crq - Handles and frees received events in the CRQ
3273 * @crq: Command/Response queue
3274 * @vhost: ibmvfc host struct
3275 * @evt_doneq: Event done queue
3276 *
3277**/
3278static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
3279 struct list_head *evt_doneq)
3280{
3281 long rc;
3282 struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
3283
3284 switch (crq->valid) {
3285 case IBMVFC_CRQ_INIT_RSP:
3286 switch (crq->format) {
3287 case IBMVFC_CRQ_INIT:
3288 dev_info(vhost->dev, "Partner initialized\n");
3289 /* Send back a response */
3290 rc = ibmvfc_send_crq_init_complete(vhost);
3291 if (rc == 0)
3292 ibmvfc_init_host(vhost);
3293 else
3294 dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc);
3295 break;
3296 case IBMVFC_CRQ_INIT_COMPLETE:
3297 dev_info(vhost->dev, "Partner initialization complete\n");
3298 ibmvfc_init_host(vhost);
3299 break;
3300 default:
3301 dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format);
3302 }
3303 return;
3304 case IBMVFC_CRQ_XPORT_EVENT:
3305 vhost->state = IBMVFC_NO_CRQ;
3306 vhost->logged_in = 0;
3307 ibmvfc_set_host_action(vhost, action: IBMVFC_HOST_ACTION_NONE);
3308 if (crq->format == IBMVFC_PARTITION_MIGRATED) {
3309 /* We need to re-setup the interpartition connection */
3310 dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
3311 vhost->client_migrated = 1;
3312
3313 scsi_block_requests(vhost->host);
3314 ibmvfc_purge_requests(vhost, error_code: DID_REQUEUE);
3315 ibmvfc_set_host_state(vhost, state: IBMVFC_LINK_DOWN);
3316 ibmvfc_set_host_action(vhost, action: IBMVFC_HOST_ACTION_REENABLE);
3317 wake_up(&vhost->work_wait_q);
3318 } else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
3319 dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
3320 ibmvfc_purge_requests(vhost, error_code: DID_ERROR);
3321 ibmvfc_link_down(vhost, state: IBMVFC_LINK_DOWN);
3322 ibmvfc_set_host_action(vhost, action: IBMVFC_HOST_ACTION_RESET);
3323 } else {
3324 dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format);
3325 }
3326 return;
3327 case IBMVFC_CRQ_CMD_RSP:
3328 break;
3329 default:
3330 dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid);
3331 return;
3332 }
3333
3334 if (crq->format == IBMVFC_ASYNC_EVENT)
3335 return;
3336
3337 /* The only kind of payload CRQs we should get are responses to
3338 * things we send. Make sure this response is to something we
3339 * actually sent
3340 */
3341 if (unlikely(!ibmvfc_valid_event(&vhost->crq.evt_pool, evt))) {
3342 dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
3343 crq->ioba);
3344 return;
3345 }
3346
3347 if (unlikely(atomic_dec_if_positive(&evt->active))) {
3348 dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
3349 crq->ioba);
3350 return;
3351 }
3352
3353 spin_lock(lock: &evt->queue->l_lock);
3354 list_move_tail(list: &evt->queue_list, head: evt_doneq);
3355 spin_unlock(lock: &evt->queue->l_lock);
3356}
3357
3358/**
3359 * ibmvfc_scan_finished - Check if the device scan is done.
3360 * @shost: scsi host struct
3361 * @time: current elapsed time
3362 *
3363 * Returns:
3364 * 0 if scan is not done / 1 if scan is done
3365 **/
3366static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
3367{
3368 unsigned long flags;
3369 struct ibmvfc_host *vhost = shost_priv(shost);
3370 int done = 0;
3371
3372 spin_lock_irqsave(shost->host_lock, flags);
3373 if (!vhost->scan_timeout)
3374 done = 1;
3375 else if (time >= (vhost->scan_timeout * HZ)) {
3376 dev_info(vhost->dev, "Scan taking longer than %d seconds, "
3377 "continuing initialization\n", vhost->scan_timeout);
3378 done = 1;
3379 }
3380
3381 if (vhost->scan_complete) {
3382 vhost->scan_timeout = init_timeout;
3383 done = 1;
3384 }
3385 spin_unlock_irqrestore(lock: shost->host_lock, flags);
3386 return done;
3387}
3388
3389/**
3390 * ibmvfc_slave_alloc - Setup the device's task set value
3391 * @sdev: struct scsi_device device to configure
3392 *
3393 * Set the device's task set value so that error handling works as
3394 * expected.
3395 *
3396 * Returns:
3397 * 0 on success / -ENXIO if device does not exist
3398 **/
3399static int ibmvfc_slave_alloc(struct scsi_device *sdev)
3400{
3401 struct Scsi_Host *shost = sdev->host;
3402 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
3403 struct ibmvfc_host *vhost = shost_priv(shost);
3404 unsigned long flags = 0;
3405
3406 if (!rport || fc_remote_port_chkready(rport))
3407 return -ENXIO;
3408
3409 spin_lock_irqsave(shost->host_lock, flags);
3410 sdev->hostdata = (void *)(unsigned long)vhost->task_set++;
3411 spin_unlock_irqrestore(lock: shost->host_lock, flags);
3412 return 0;
3413}
3414
3415/**
3416 * ibmvfc_target_alloc - Setup the target's task set value
3417 * @starget: struct scsi_target
3418 *
3419 * Set the target's task set value so that error handling works as
3420 * expected.
3421 *
3422 * Returns:
3423 * 0 on success / -ENXIO if device does not exist
3424 **/
3425static int ibmvfc_target_alloc(struct scsi_target *starget)
3426{
3427 struct Scsi_Host *shost = dev_to_shost(dev: starget->dev.parent);
3428 struct ibmvfc_host *vhost = shost_priv(shost);
3429 unsigned long flags = 0;
3430
3431 spin_lock_irqsave(shost->host_lock, flags);
3432 starget->hostdata = (void *)(unsigned long)vhost->task_set++;
3433 spin_unlock_irqrestore(lock: shost->host_lock, flags);
3434 return 0;
3435}
3436
3437/**
3438 * ibmvfc_slave_configure - Configure the device
3439 * @sdev: struct scsi_device device to configure
3440 *
3441 * Enable allow_restart for a device if it is a disk. Adjust the
3442 * queue_depth here also.
3443 *
3444 * Returns:
3445 * 0
3446 **/
3447static int ibmvfc_slave_configure(struct scsi_device *sdev)
3448{
3449 struct Scsi_Host *shost = sdev->host;
3450 unsigned long flags = 0;
3451
3452 spin_lock_irqsave(shost->host_lock, flags);
3453 if (sdev->type == TYPE_DISK) {
3454 sdev->allow_restart = 1;
3455 blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
3456 }
3457 spin_unlock_irqrestore(lock: shost->host_lock, flags);
3458 return 0;
3459}
3460
3461/**
3462 * ibmvfc_change_queue_depth - Change the device's queue depth
3463 * @sdev: scsi device struct
3464 * @qdepth: depth to set
3465 *
3466 * Return value:
3467 * actual depth set
3468 **/
3469static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth)
3470{
3471 if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
3472 qdepth = IBMVFC_MAX_CMDS_PER_LUN;
3473
3474 return scsi_change_queue_depth(sdev, qdepth);
3475}
3476
3477static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
3478 struct device_attribute *attr, char *buf)
3479{
3480 struct Scsi_Host *shost = class_to_shost(dev);
3481 struct ibmvfc_host *vhost = shost_priv(shost);
3482
3483 return snprintf(buf, PAGE_SIZE, fmt: "%s\n",
3484 vhost->login_buf->resp.partition_name);
3485}
3486
3487static ssize_t ibmvfc_show_host_device_name(struct device *dev,
3488 struct device_attribute *attr, char *buf)
3489{
3490 struct Scsi_Host *shost = class_to_shost(dev);
3491 struct ibmvfc_host *vhost = shost_priv(shost);
3492
3493 return snprintf(buf, PAGE_SIZE, fmt: "%s\n",
3494 vhost->login_buf->resp.device_name);
3495}
3496
3497static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
3498 struct device_attribute *attr, char *buf)
3499{
3500 struct Scsi_Host *shost = class_to_shost(dev);
3501 struct ibmvfc_host *vhost = shost_priv(shost);
3502
3503 return snprintf(buf, PAGE_SIZE, fmt: "%s\n",
3504 vhost->login_buf->resp.port_loc_code);
3505}
3506
3507static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
3508 struct device_attribute *attr, char *buf)
3509{
3510 struct Scsi_Host *shost = class_to_shost(dev);
3511 struct ibmvfc_host *vhost = shost_priv(shost);
3512
3513 return snprintf(buf, PAGE_SIZE, fmt: "%s\n",
3514 vhost->login_buf->resp.drc_name);
3515}
3516
3517static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
3518 struct device_attribute *attr, char *buf)
3519{
3520 struct Scsi_Host *shost = class_to_shost(dev);
3521 struct ibmvfc_host *vhost = shost_priv(shost);
3522 return snprintf(buf, PAGE_SIZE, fmt: "%d\n", be32_to_cpu(vhost->login_buf->resp.version));
3523}
3524
3525static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
3526 struct device_attribute *attr, char *buf)
3527{
3528 struct Scsi_Host *shost = class_to_shost(dev);
3529 struct ibmvfc_host *vhost = shost_priv(shost);
3530 return snprintf(buf, PAGE_SIZE, fmt: "%llx\n", be64_to_cpu(vhost->login_buf->resp.capabilities));
3531}
3532
3533/**
3534 * ibmvfc_show_log_level - Show the adapter's error logging level
3535 * @dev: class device struct
3536 * @attr: unused
3537 * @buf: buffer
3538 *
3539 * Return value:
3540 * number of bytes printed to buffer
3541 **/
3542static ssize_t ibmvfc_show_log_level(struct device *dev,
3543 struct device_attribute *attr, char *buf)
3544{
3545 struct Scsi_Host *shost = class_to_shost(dev);
3546 struct ibmvfc_host *vhost = shost_priv(shost);
3547 unsigned long flags = 0;
3548 int len;
3549
3550 spin_lock_irqsave(shost->host_lock, flags);
3551 len = snprintf(buf, PAGE_SIZE, fmt: "%d\n", vhost->log_level);
3552 spin_unlock_irqrestore(lock: shost->host_lock, flags);
3553 return len;
3554}
3555
3556/**
3557 * ibmvfc_store_log_level - Change the adapter's error logging level
3558 * @dev: class device struct
3559 * @attr: unused
3560 * @buf: buffer
3561 * @count: buffer size
3562 *
3563 * Return value:
3564 * number of bytes printed to buffer
3565 **/
3566static ssize_t ibmvfc_store_log_level(struct device *dev,
3567 struct device_attribute *attr,
3568 const char *buf, size_t count)
3569{
3570 struct Scsi_Host *shost = class_to_shost(dev);
3571 struct ibmvfc_host *vhost = shost_priv(shost);
3572 unsigned long flags = 0;
3573
3574 spin_lock_irqsave(shost->host_lock, flags);
3575 vhost->log_level = simple_strtoul(buf, NULL, 10);
3576 spin_unlock_irqrestore(lock: shost->host_lock, flags);
3577 return strlen(buf);
3578}
3579
3580static ssize_t ibmvfc_show_scsi_channels(struct device *dev,
3581 struct device_attribute *attr, char *buf)
3582{
3583 struct Scsi_Host *shost = class_to_shost(dev);
3584 struct ibmvfc_host *vhost = shost_priv(shost);
3585 struct ibmvfc_channels *scsi = &vhost->scsi_scrqs;
3586 unsigned long flags = 0;
3587 int len;
3588
3589 spin_lock_irqsave(shost->host_lock, flags);
3590 len = snprintf(buf, PAGE_SIZE, fmt: "%d\n", scsi->desired_queues);
3591 spin_unlock_irqrestore(lock: shost->host_lock, flags);
3592 return len;
3593}
3594
3595static ssize_t ibmvfc_store_scsi_channels(struct device *dev,
3596 struct device_attribute *attr,
3597 const char *buf, size_t count)
3598{
3599 struct Scsi_Host *shost = class_to_shost(dev);
3600 struct ibmvfc_host *vhost = shost_priv(shost);
3601 struct ibmvfc_channels *scsi = &vhost->scsi_scrqs;
3602 unsigned long flags = 0;
3603 unsigned int channels;
3604
3605 spin_lock_irqsave(shost->host_lock, flags);
3606 channels = simple_strtoul(buf, NULL, 10);
3607 scsi->desired_queues = min(channels, shost->nr_hw_queues);
3608 ibmvfc_hard_reset_host(vhost);
3609 spin_unlock_irqrestore(lock: shost->host_lock, flags);
3610 return strlen(buf);
3611}
3612
3613static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL);
3614static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL);
3615static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL);
3616static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL);
3617static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL);
3618static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL);
3619static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
3620 ibmvfc_show_log_level, ibmvfc_store_log_level);
3621static DEVICE_ATTR(nr_scsi_channels, S_IRUGO | S_IWUSR,
3622 ibmvfc_show_scsi_channels, ibmvfc_store_scsi_channels);
3623
3624#ifdef CONFIG_SCSI_IBMVFC_TRACE
3625/**
3626 * ibmvfc_read_trace - Dump the adapter trace
3627 * @filp: open sysfs file
3628 * @kobj: kobject struct
3629 * @bin_attr: bin_attribute struct
3630 * @buf: buffer
3631 * @off: offset
3632 * @count: buffer size
3633 *
3634 * Return value:
3635 * number of bytes printed to buffer
3636 **/
3637static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
3638 struct bin_attribute *bin_attr,
3639 char *buf, loff_t off, size_t count)
3640{
3641 struct device *dev = kobj_to_dev(kobj);
3642 struct Scsi_Host *shost = class_to_shost(dev);
3643 struct ibmvfc_host *vhost = shost_priv(shost);
3644 unsigned long flags = 0;
3645 int size = IBMVFC_TRACE_SIZE;
3646 char *src = (char *)vhost->trace;
3647
3648 if (off > size)
3649 return 0;
3650 if (off + count > size) {
3651 size -= off;
3652 count = size;
3653 }
3654
3655 spin_lock_irqsave(shost->host_lock, flags);
3656 memcpy(buf, &src[off], count);
3657 spin_unlock_irqrestore(shost->host_lock, flags);
3658 return count;
3659}
3660
3661static struct bin_attribute ibmvfc_trace_attr = {
3662 .attr = {
3663 .name = "trace",
3664 .mode = S_IRUGO,
3665 },
3666 .size = 0,
3667 .read = ibmvfc_read_trace,
3668};
3669#endif
3670
3671static struct attribute *ibmvfc_host_attrs[] = {
3672 &dev_attr_partition_name.attr,
3673 &dev_attr_device_name.attr,
3674 &dev_attr_port_loc_code.attr,
3675 &dev_attr_drc_name.attr,
3676 &dev_attr_npiv_version.attr,
3677 &dev_attr_capabilities.attr,
3678 &dev_attr_log_level.attr,
3679 &dev_attr_nr_scsi_channels.attr,
3680 NULL
3681};
3682
3683ATTRIBUTE_GROUPS(ibmvfc_host);
3684
3685static const struct scsi_host_template driver_template = {
3686 .module = THIS_MODULE,
3687 .name = "IBM POWER Virtual FC Adapter",
3688 .proc_name = IBMVFC_NAME,
3689 .queuecommand = ibmvfc_queuecommand,
3690 .eh_timed_out = fc_eh_timed_out,
3691 .eh_abort_handler = ibmvfc_eh_abort_handler,
3692 .eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
3693 .eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
3694 .eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
3695 .slave_alloc = ibmvfc_slave_alloc,
3696 .slave_configure = ibmvfc_slave_configure,
3697 .target_alloc = ibmvfc_target_alloc,
3698 .scan_finished = ibmvfc_scan_finished,
3699 .change_queue_depth = ibmvfc_change_queue_depth,
3700 .cmd_per_lun = 16,
3701 .can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
3702 .this_id = -1,
3703 .sg_tablesize = SG_ALL,
3704 .max_sectors = IBMVFC_MAX_SECTORS,
3705 .shost_groups = ibmvfc_host_groups,
3706 .track_queue_depth = 1,
3707};
3708
3709/**
3710 * ibmvfc_next_async_crq - Returns the next entry in async queue
3711 * @vhost: ibmvfc host struct
3712 *
3713 * Returns:
3714 * Pointer to next entry in queue / NULL if empty
3715 **/
3716static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
3717{
3718 struct ibmvfc_queue *async_crq = &vhost->async_crq;
3719 struct ibmvfc_async_crq *crq;
3720
3721 crq = &async_crq->msgs.async[async_crq->cur];
3722 if (crq->valid & 0x80) {
3723 if (++async_crq->cur == async_crq->size)
3724 async_crq->cur = 0;
3725 rmb();
3726 } else
3727 crq = NULL;
3728
3729 return crq;
3730}
3731
3732/**
3733 * ibmvfc_next_crq - Returns the next entry in message queue
3734 * @vhost: ibmvfc host struct
3735 *
3736 * Returns:
3737 * Pointer to next entry in queue / NULL if empty
3738 **/
3739static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
3740{
3741 struct ibmvfc_queue *queue = &vhost->crq;
3742 struct ibmvfc_crq *crq;
3743
3744 crq = &queue->msgs.crq[queue->cur];
3745 if (crq->valid & 0x80) {
3746 if (++queue->cur == queue->size)
3747 queue->cur = 0;
3748 rmb();
3749 } else
3750 crq = NULL;
3751
3752 return crq;
3753}
3754
3755/**
3756 * ibmvfc_interrupt - Interrupt handler
3757 * @irq: number of irq to handle, not used
3758 * @dev_instance: ibmvfc_host that received interrupt
3759 *
3760 * Returns:
3761 * IRQ_HANDLED
3762 **/
3763static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
3764{
3765 struct ibmvfc_host *vhost = (struct ibmvfc_host *)dev_instance;
3766 unsigned long flags;
3767
3768 spin_lock_irqsave(vhost->host->host_lock, flags);
3769 vio_disable_interrupts(to_vio_dev(vhost->dev));
3770 tasklet_schedule(t: &vhost->tasklet);
3771 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
3772 return IRQ_HANDLED;
3773}
3774
3775/**
3776 * ibmvfc_tasklet - Interrupt handler tasklet
3777 * @data: ibmvfc host struct
3778 *
3779 * Returns:
3780 * Nothing
3781 **/
3782static void ibmvfc_tasklet(void *data)
3783{
3784 struct ibmvfc_host *vhost = data;
3785 struct vio_dev *vdev = to_vio_dev(vhost->dev);
3786 struct ibmvfc_crq *crq;
3787 struct ibmvfc_async_crq *async;
3788 struct ibmvfc_event *evt, *temp;
3789 unsigned long flags;
3790 int done = 0;
3791 LIST_HEAD(evt_doneq);
3792
3793 spin_lock_irqsave(vhost->host->host_lock, flags);
3794 spin_lock(lock: vhost->crq.q_lock);
3795 while (!done) {
3796 /* Pull all the valid messages off the async CRQ */
3797 while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3798 ibmvfc_handle_async(crq: async, vhost);
3799 async->valid = 0;
3800 wmb();
3801 }
3802
3803 /* Pull all the valid messages off the CRQ */
3804 while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3805 ibmvfc_handle_crq(crq, vhost, evt_doneq: &evt_doneq);
3806 crq->valid = 0;
3807 wmb();
3808 }
3809
3810 vio_enable_interrupts(vdev);
3811 if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3812 vio_disable_interrupts(vdev);
3813 ibmvfc_handle_async(crq: async, vhost);
3814 async->valid = 0;
3815 wmb();
3816 } else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3817 vio_disable_interrupts(vdev);
3818 ibmvfc_handle_crq(crq, vhost, evt_doneq: &evt_doneq);
3819 crq->valid = 0;
3820 wmb();
3821 } else
3822 done = 1;
3823 }
3824
3825 spin_unlock(lock: vhost->crq.q_lock);
3826 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
3827
3828 list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
3829 del_timer(timer: &evt->timer);
3830 list_del(entry: &evt->queue_list);
3831 ibmvfc_trc_end(evt);
3832 evt->done(evt);
3833 }
3834}
3835
3836static int ibmvfc_toggle_scrq_irq(struct ibmvfc_queue *scrq, int enable)
3837{
3838 struct device *dev = scrq->vhost->dev;
3839 struct vio_dev *vdev = to_vio_dev(dev);
3840 unsigned long rc;
3841 int irq_action = H_ENABLE_VIO_INTERRUPT;
3842
3843 if (!enable)
3844 irq_action = H_DISABLE_VIO_INTERRUPT;
3845
3846 rc = plpar_hcall_norets(H_VIOCTL, vdev->unit_address, irq_action,
3847 scrq->hw_irq, 0, 0);
3848
3849 if (rc)
3850 dev_err(dev, "Couldn't %s sub-crq[%lu] irq. rc=%ld\n",
3851 enable ? "enable" : "disable", scrq->hwq_id, rc);
3852
3853 return rc;
3854}
3855
3856static void ibmvfc_handle_scrq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
3857 struct list_head *evt_doneq)
3858{
3859 struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
3860
3861 switch (crq->valid) {
3862 case IBMVFC_CRQ_CMD_RSP:
3863 break;
3864 case IBMVFC_CRQ_XPORT_EVENT:
3865 return;
3866 default:
3867 dev_err(vhost->dev, "Got and invalid message type 0x%02x\n", crq->valid);
3868 return;
3869 }
3870
3871 /* The only kind of payload CRQs we should get are responses to
3872 * things we send. Make sure this response is to something we
3873 * actually sent
3874 */
3875 if (unlikely(!ibmvfc_valid_event(&evt->queue->evt_pool, evt))) {
3876 dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
3877 crq->ioba);
3878 return;
3879 }
3880
3881 if (unlikely(atomic_dec_if_positive(&evt->active))) {
3882 dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
3883 crq->ioba);
3884 return;
3885 }
3886
3887 spin_lock(lock: &evt->queue->l_lock);
3888 list_move_tail(list: &evt->queue_list, head: evt_doneq);
3889 spin_unlock(lock: &evt->queue->l_lock);
3890}
3891
3892static struct ibmvfc_crq *ibmvfc_next_scrq(struct ibmvfc_queue *scrq)
3893{
3894 struct ibmvfc_crq *crq;
3895
3896 crq = &scrq->msgs.scrq[scrq->cur].crq;
3897 if (crq->valid & 0x80) {
3898 if (++scrq->cur == scrq->size)
3899 scrq->cur = 0;
3900 rmb();
3901 } else
3902 crq = NULL;
3903
3904 return crq;
3905}
3906
3907static void ibmvfc_drain_sub_crq(struct ibmvfc_queue *scrq)
3908{
3909 struct ibmvfc_crq *crq;
3910 struct ibmvfc_event *evt, *temp;
3911 unsigned long flags;
3912 int done = 0;
3913 LIST_HEAD(evt_doneq);
3914
3915 spin_lock_irqsave(scrq->q_lock, flags);
3916 while (!done) {
3917 while ((crq = ibmvfc_next_scrq(scrq)) != NULL) {
3918 ibmvfc_handle_scrq(crq, vhost: scrq->vhost, evt_doneq: &evt_doneq);
3919 crq->valid = 0;
3920 wmb();
3921 }
3922
3923 ibmvfc_toggle_scrq_irq(scrq, enable: 1);
3924 if ((crq = ibmvfc_next_scrq(scrq)) != NULL) {
3925 ibmvfc_toggle_scrq_irq(scrq, enable: 0);
3926 ibmvfc_handle_scrq(crq, vhost: scrq->vhost, evt_doneq: &evt_doneq);
3927 crq->valid = 0;
3928 wmb();
3929 } else
3930 done = 1;
3931 }
3932 spin_unlock_irqrestore(lock: scrq->q_lock, flags);
3933
3934 list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
3935 del_timer(timer: &evt->timer);
3936 list_del(entry: &evt->queue_list);
3937 ibmvfc_trc_end(evt);
3938 evt->done(evt);
3939 }
3940}
3941
3942static irqreturn_t ibmvfc_interrupt_mq(int irq, void *scrq_instance)
3943{
3944 struct ibmvfc_queue *scrq = (struct ibmvfc_queue *)scrq_instance;
3945
3946 ibmvfc_toggle_scrq_irq(scrq, enable: 0);
3947 ibmvfc_drain_sub_crq(scrq);
3948
3949 return IRQ_HANDLED;
3950}
3951
3952/**
3953 * ibmvfc_init_tgt - Set the next init job step for the target
3954 * @tgt: ibmvfc target struct
3955 * @job_step: job step to perform
3956 *
3957 **/
3958static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
3959 void (*job_step) (struct ibmvfc_target *))
3960{
3961 if (!ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_INIT))
3962 tgt->job_step = job_step;
3963 wake_up(&tgt->vhost->work_wait_q);
3964}
3965
3966/**
3967 * ibmvfc_retry_tgt_init - Attempt to retry a step in target initialization
3968 * @tgt: ibmvfc target struct
3969 * @job_step: initialization job step
3970 *
3971 * Returns: 1 if step will be retried / 0 if not
3972 *
3973 **/
3974static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
3975 void (*job_step) (struct ibmvfc_target *))
3976{
3977 if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
3978 ibmvfc_del_tgt(tgt);
3979 wake_up(&tgt->vhost->work_wait_q);
3980 return 0;
3981 } else
3982 ibmvfc_init_tgt(tgt, job_step);
3983 return 1;
3984}
3985
3986/* Defined in FC-LS */
3987static const struct {
3988 int code;
3989 int retry;
3990 int logged_in;
3991} prli_rsp [] = {
3992 { 0, 1, 0 },
3993 { 1, 0, 1 },
3994 { 2, 1, 0 },
3995 { 3, 1, 0 },
3996 { 4, 0, 0 },
3997 { 5, 0, 0 },
3998 { 6, 0, 1 },
3999 { 7, 0, 0 },
4000 { 8, 1, 0 },
4001};
4002
4003/**
4004 * ibmvfc_get_prli_rsp - Find PRLI response index
4005 * @flags: PRLI response flags
4006 *
4007 **/
4008static int ibmvfc_get_prli_rsp(u16 flags)
4009{
4010 int i;
4011 int code = (flags & 0x0f00) >> 8;
4012
4013 for (i = 0; i < ARRAY_SIZE(prli_rsp); i++)
4014 if (prli_rsp[i].code == code)
4015 return i;
4016
4017 return 0;
4018}
4019
4020/**
4021 * ibmvfc_tgt_prli_done - Completion handler for Process Login
4022 * @evt: ibmvfc event struct
4023 *
4024 **/
4025static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
4026{
4027 struct ibmvfc_target *tgt = evt->tgt;
4028 struct ibmvfc_host *vhost = evt->vhost;
4029 struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
4030 struct ibmvfc_prli_svc_parms *parms = &rsp->parms;
4031 u32 status = be16_to_cpu(rsp->common.status);
4032 int index, level = IBMVFC_DEFAULT_LOG_LEVEL;
4033
4034 vhost->discovery_threads--;
4035 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_NONE);
4036 switch (status) {
4037 case IBMVFC_MAD_SUCCESS:
4038 tgt_dbg(tgt, "Process Login succeeded: %X %02X %04X\n",
4039 parms->type, parms->flags, parms->service_parms);
4040
4041 if (parms->type == IBMVFC_SCSI_FCP_TYPE) {
4042 index = ibmvfc_get_prli_rsp(be16_to_cpu(parms->flags));
4043 if (prli_rsp[index].logged_in) {
4044 if (be16_to_cpu(parms->flags) & IBMVFC_PRLI_EST_IMG_PAIR) {
4045 tgt->need_login = 0;
4046 tgt->ids.roles = 0;
4047 if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_TARGET_FUNC)
4048 tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
4049 if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_INITIATOR_FUNC)
4050 tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
4051 tgt->add_rport = 1;
4052 } else
4053 ibmvfc_del_tgt(tgt);
4054 } else if (prli_rsp[index].retry)
4055 ibmvfc_retry_tgt_init(tgt, job_step: ibmvfc_tgt_send_prli);
4056 else
4057 ibmvfc_del_tgt(tgt);
4058 } else
4059 ibmvfc_del_tgt(tgt);
4060 break;
4061 case IBMVFC_MAD_DRIVER_FAILED:
4062 break;
4063 case IBMVFC_MAD_CRQ_ERROR:
4064 ibmvfc_retry_tgt_init(tgt, job_step: ibmvfc_tgt_send_prli);
4065 break;
4066 case IBMVFC_MAD_FAILED:
4067 default:
4068 if ((be16_to_cpu(rsp->status) & IBMVFC_VIOS_FAILURE) &&
4069 be16_to_cpu(rsp->error) == IBMVFC_PLOGI_REQUIRED)
4070 level += ibmvfc_retry_tgt_init(tgt, job_step: ibmvfc_tgt_send_plogi);
4071 else if (tgt->logo_rcvd)
4072 level += ibmvfc_retry_tgt_init(tgt, job_step: ibmvfc_tgt_send_plogi);
4073 else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4074 level += ibmvfc_retry_tgt_init(tgt, job_step: ibmvfc_tgt_send_prli);
4075 else
4076 ibmvfc_del_tgt(tgt);
4077
4078 tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
4079 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4080 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status);
4081 break;
4082 }
4083
4084 kref_put(kref: &tgt->kref, release: ibmvfc_release_tgt);
4085 ibmvfc_free_event(evt);
4086 wake_up(&vhost->work_wait_q);
4087}
4088
4089/**
4090 * ibmvfc_tgt_send_prli - Send a process login
4091 * @tgt: ibmvfc target struct
4092 *
4093 **/
4094static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
4095{
4096 struct ibmvfc_process_login *prli;
4097 struct ibmvfc_host *vhost = tgt->vhost;
4098 struct ibmvfc_event *evt;
4099
4100 if (vhost->discovery_threads >= disc_threads)
4101 return;
4102
4103 kref_get(kref: &tgt->kref);
4104 evt = ibmvfc_get_reserved_event(&vhost->crq);
4105 if (!evt) {
4106 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_NONE);
4107 kref_put(kref: &tgt->kref, release: ibmvfc_release_tgt);
4108 __ibmvfc_reset_host(vhost);
4109 return;
4110 }
4111 vhost->discovery_threads++;
4112 ibmvfc_init_event(evt, done: ibmvfc_tgt_prli_done, format: IBMVFC_MAD_FORMAT);
4113 evt->tgt = tgt;
4114 prli = &evt->iu.prli;
4115 memset(prli, 0, sizeof(*prli));
4116 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
4117 prli->common.version = cpu_to_be32(2);
4118 prli->target_wwpn = cpu_to_be64(tgt->wwpn);
4119 } else {
4120 prli->common.version = cpu_to_be32(1);
4121 }
4122 prli->common.opcode = cpu_to_be32(IBMVFC_PROCESS_LOGIN);
4123 prli->common.length = cpu_to_be16(sizeof(*prli));
4124 prli->scsi_id = cpu_to_be64(tgt->scsi_id);
4125
4126 prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
4127 prli->parms.flags = cpu_to_be16(IBMVFC_PRLI_EST_IMG_PAIR);
4128 prli->parms.service_parms = cpu_to_be32(IBMVFC_PRLI_INITIATOR_FUNC);
4129 prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED);
4130
4131 if (cls3_error)
4132 prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_RETRY);
4133
4134 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_INIT_WAIT);
4135 if (ibmvfc_send_event(evt, vhost, timeout: default_timeout)) {
4136 vhost->discovery_threads--;
4137 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_NONE);
4138 kref_put(kref: &tgt->kref, release: ibmvfc_release_tgt);
4139 } else
4140 tgt_dbg(tgt, "Sent process login\n");
4141}
4142
4143/**
4144 * ibmvfc_tgt_plogi_done - Completion handler for Port Login
4145 * @evt: ibmvfc event struct
4146 *
4147 **/
4148static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
4149{
4150 struct ibmvfc_target *tgt = evt->tgt;
4151 struct ibmvfc_host *vhost = evt->vhost;
4152 struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
4153 u32 status = be16_to_cpu(rsp->common.status);
4154 int level = IBMVFC_DEFAULT_LOG_LEVEL;
4155
4156 vhost->discovery_threads--;
4157 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_NONE);
4158 switch (status) {
4159 case IBMVFC_MAD_SUCCESS:
4160 tgt_dbg(tgt, "Port Login succeeded\n");
4161 if (tgt->ids.port_name &&
4162 tgt->ids.port_name != wwn_to_u64(wwn: rsp->service_parms.port_name)) {
4163 vhost->reinit = 1;
4164 tgt_dbg(tgt, "Port re-init required\n");
4165 break;
4166 }
4167 tgt->ids.node_name = wwn_to_u64(wwn: rsp->service_parms.node_name);
4168 tgt->ids.port_name = wwn_to_u64(wwn: rsp->service_parms.port_name);
4169 tgt->ids.port_id = tgt->scsi_id;
4170 memcpy(&tgt->service_parms, &rsp->service_parms,
4171 sizeof(tgt->service_parms));
4172 memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
4173 sizeof(tgt->service_parms_change));
4174 ibmvfc_init_tgt(tgt, job_step: ibmvfc_tgt_send_prli);
4175 break;
4176 case IBMVFC_MAD_DRIVER_FAILED:
4177 break;
4178 case IBMVFC_MAD_CRQ_ERROR:
4179 ibmvfc_retry_tgt_init(tgt, job_step: ibmvfc_tgt_send_plogi);
4180 break;
4181 case IBMVFC_MAD_FAILED:
4182 default:
4183 if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4184 level += ibmvfc_retry_tgt_init(tgt, job_step: ibmvfc_tgt_send_plogi);
4185 else
4186 ibmvfc_del_tgt(tgt);
4187
4188 tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4189 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4190 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
4191 ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
4192 ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status);
4193 break;
4194 }
4195
4196 kref_put(kref: &tgt->kref, release: ibmvfc_release_tgt);
4197 ibmvfc_free_event(evt);
4198 wake_up(&vhost->work_wait_q);
4199}
4200
4201/**
4202 * ibmvfc_tgt_send_plogi - Send PLOGI to the specified target
4203 * @tgt: ibmvfc target struct
4204 *
4205 **/
4206static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
4207{
4208 struct ibmvfc_port_login *plogi;
4209 struct ibmvfc_host *vhost = tgt->vhost;
4210 struct ibmvfc_event *evt;
4211
4212 if (vhost->discovery_threads >= disc_threads)
4213 return;
4214
4215 kref_get(kref: &tgt->kref);
4216 tgt->logo_rcvd = 0;
4217 evt = ibmvfc_get_reserved_event(&vhost->crq);
4218 if (!evt) {
4219 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_NONE);
4220 kref_put(kref: &tgt->kref, release: ibmvfc_release_tgt);
4221 __ibmvfc_reset_host(vhost);
4222 return;
4223 }
4224 vhost->discovery_threads++;
4225 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_INIT_WAIT);
4226 ibmvfc_init_event(evt, done: ibmvfc_tgt_plogi_done, format: IBMVFC_MAD_FORMAT);
4227 evt->tgt = tgt;
4228 plogi = &evt->iu.plogi;
4229 memset(plogi, 0, sizeof(*plogi));
4230 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
4231 plogi->common.version = cpu_to_be32(2);
4232 plogi->target_wwpn = cpu_to_be64(tgt->wwpn);
4233 } else {
4234 plogi->common.version = cpu_to_be32(1);
4235 }
4236 plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
4237 plogi->common.length = cpu_to_be16(sizeof(*plogi));
4238 plogi->scsi_id = cpu_to_be64(tgt->scsi_id);
4239
4240 if (ibmvfc_send_event(evt, vhost, timeout: default_timeout)) {
4241 vhost->discovery_threads--;
4242 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_NONE);
4243 kref_put(kref: &tgt->kref, release: ibmvfc_release_tgt);
4244 } else
4245 tgt_dbg(tgt, "Sent port login\n");
4246}
4247
4248/**
4249 * ibmvfc_tgt_implicit_logout_done - Completion handler for Implicit Logout MAD
4250 * @evt: ibmvfc event struct
4251 *
4252 **/
4253static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
4254{
4255 struct ibmvfc_target *tgt = evt->tgt;
4256 struct ibmvfc_host *vhost = evt->vhost;
4257 struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout;
4258 u32 status = be16_to_cpu(rsp->common.status);
4259
4260 vhost->discovery_threads--;
4261 ibmvfc_free_event(evt);
4262 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_NONE);
4263
4264 switch (status) {
4265 case IBMVFC_MAD_SUCCESS:
4266 tgt_dbg(tgt, "Implicit Logout succeeded\n");
4267 break;
4268 case IBMVFC_MAD_DRIVER_FAILED:
4269 kref_put(kref: &tgt->kref, release: ibmvfc_release_tgt);
4270 wake_up(&vhost->work_wait_q);
4271 return;
4272 case IBMVFC_MAD_FAILED:
4273 default:
4274 tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
4275 break;
4276 }
4277
4278 ibmvfc_init_tgt(tgt, job_step: ibmvfc_tgt_send_plogi);
4279 kref_put(kref: &tgt->kref, release: ibmvfc_release_tgt);
4280 wake_up(&vhost->work_wait_q);
4281}
4282
4283/**
4284 * __ibmvfc_tgt_get_implicit_logout_evt - Allocate and init an event for implicit logout
4285 * @tgt: ibmvfc target struct
4286 * @done: Routine to call when the event is responded to
4287 *
4288 * Returns:
4289 * Allocated and initialized ibmvfc_event struct
4290 **/
4291static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_target *tgt,
4292 void (*done) (struct ibmvfc_event *))
4293{
4294 struct ibmvfc_implicit_logout *mad;
4295 struct ibmvfc_host *vhost = tgt->vhost;
4296 struct ibmvfc_event *evt;
4297
4298 kref_get(kref: &tgt->kref);
4299 evt = ibmvfc_get_reserved_event(&vhost->crq);
4300 if (!evt)
4301 return NULL;
4302 ibmvfc_init_event(evt, done, format: IBMVFC_MAD_FORMAT);
4303 evt->tgt = tgt;
4304 mad = &evt->iu.implicit_logout;
4305 memset(mad, 0, sizeof(*mad));
4306 mad->common.version = cpu_to_be32(1);
4307 mad->common.opcode = cpu_to_be32(IBMVFC_IMPLICIT_LOGOUT);
4308 mad->common.length = cpu_to_be16(sizeof(*mad));
4309 mad->old_scsi_id = cpu_to_be64(tgt->scsi_id);
4310 return evt;
4311}
4312
4313/**
4314 * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
4315 * @tgt: ibmvfc target struct
4316 *
4317 **/
4318static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
4319{
4320 struct ibmvfc_host *vhost = tgt->vhost;
4321 struct ibmvfc_event *evt;
4322
4323 if (vhost->discovery_threads >= disc_threads)
4324 return;
4325
4326 vhost->discovery_threads++;
4327 evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
4328 done: ibmvfc_tgt_implicit_logout_done);
4329 if (!evt) {
4330 vhost->discovery_threads--;
4331 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_NONE);
4332 kref_put(kref: &tgt->kref, release: ibmvfc_release_tgt);
4333 __ibmvfc_reset_host(vhost);
4334 return;
4335 }
4336
4337 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_INIT_WAIT);
4338 if (ibmvfc_send_event(evt, vhost, timeout: default_timeout)) {
4339 vhost->discovery_threads--;
4340 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_NONE);
4341 kref_put(kref: &tgt->kref, release: ibmvfc_release_tgt);
4342 } else
4343 tgt_dbg(tgt, "Sent Implicit Logout\n");
4344}
4345
4346/**
4347 * ibmvfc_tgt_implicit_logout_and_del_done - Completion handler for Implicit Logout MAD
4348 * @evt: ibmvfc event struct
4349 *
4350 **/
4351static void ibmvfc_tgt_implicit_logout_and_del_done(struct ibmvfc_event *evt)
4352{
4353 struct ibmvfc_target *tgt = evt->tgt;
4354 struct ibmvfc_host *vhost = evt->vhost;
4355 struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
4356 u32 status = be16_to_cpu(mad->common.status);
4357
4358 vhost->discovery_threads--;
4359 ibmvfc_free_event(evt);
4360
4361 /*
4362 * If our state is IBMVFC_HOST_OFFLINE, we could be unloading the
4363 * driver in which case we need to free up all the targets. If we are
4364 * not unloading, we will still go through a hard reset to get out of
4365 * offline state, so there is no need to track the old targets in that
4366 * case.
4367 */
4368 if (status == IBMVFC_MAD_SUCCESS || vhost->state == IBMVFC_HOST_OFFLINE)
4369 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_DEL_RPORT);
4370 else
4371 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT);
4372
4373 tgt_dbg(tgt, "Implicit Logout %s\n", (status == IBMVFC_MAD_SUCCESS) ? "succeeded" : "failed");
4374 kref_put(kref: &tgt->kref, release: ibmvfc_release_tgt);
4375 wake_up(&vhost->work_wait_q);
4376}
4377
4378/**
4379 * ibmvfc_tgt_implicit_logout_and_del - Initiate an Implicit Logout for specified target
4380 * @tgt: ibmvfc target struct
4381 *
4382 **/
4383static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *tgt)
4384{
4385 struct ibmvfc_host *vhost = tgt->vhost;
4386 struct ibmvfc_event *evt;
4387
4388 if (!vhost->logged_in) {
4389 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_DEL_RPORT);
4390 return;
4391 }
4392
4393 if (vhost->discovery_threads >= disc_threads)
4394 return;
4395
4396 vhost->discovery_threads++;
4397 evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
4398 done: ibmvfc_tgt_implicit_logout_and_del_done);
4399
4400 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT);
4401 if (ibmvfc_send_event(evt, vhost, timeout: default_timeout)) {
4402 vhost->discovery_threads--;
4403 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_DEL_RPORT);
4404 kref_put(kref: &tgt->kref, release: ibmvfc_release_tgt);
4405 } else
4406 tgt_dbg(tgt, "Sent Implicit Logout\n");
4407}
4408
4409/**
4410 * ibmvfc_tgt_move_login_done - Completion handler for Move Login
4411 * @evt: ibmvfc event struct
4412 *
4413 **/
4414static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt)
4415{
4416 struct ibmvfc_target *tgt = evt->tgt;
4417 struct ibmvfc_host *vhost = evt->vhost;
4418 struct ibmvfc_move_login *rsp = &evt->xfer_iu->move_login;
4419 u32 status = be16_to_cpu(rsp->common.status);
4420 int level = IBMVFC_DEFAULT_LOG_LEVEL;
4421
4422 vhost->discovery_threads--;
4423 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_NONE);
4424 switch (status) {
4425 case IBMVFC_MAD_SUCCESS:
4426 tgt_dbg(tgt, "Move Login succeeded for new scsi_id: %llX\n", tgt->new_scsi_id);
4427 tgt->ids.node_name = wwn_to_u64(wwn: rsp->service_parms.node_name);
4428 tgt->ids.port_name = wwn_to_u64(wwn: rsp->service_parms.port_name);
4429 tgt->scsi_id = tgt->new_scsi_id;
4430 tgt->ids.port_id = tgt->scsi_id;
4431 memcpy(&tgt->service_parms, &rsp->service_parms,
4432 sizeof(tgt->service_parms));
4433 memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
4434 sizeof(tgt->service_parms_change));
4435 ibmvfc_init_tgt(tgt, job_step: ibmvfc_tgt_send_prli);
4436 break;
4437 case IBMVFC_MAD_DRIVER_FAILED:
4438 break;
4439 case IBMVFC_MAD_CRQ_ERROR:
4440 ibmvfc_retry_tgt_init(tgt, job_step: ibmvfc_tgt_move_login);
4441 break;
4442 case IBMVFC_MAD_FAILED:
4443 default:
4444 level += ibmvfc_retry_tgt_init(tgt, job_step: ibmvfc_tgt_move_login);
4445
4446 tgt_log(tgt, level,
4447 "Move Login failed: new scsi_id: %llX, flags:%x, vios_flags:%x, rc=0x%02X\n",
4448 tgt->new_scsi_id, be32_to_cpu(rsp->flags), be16_to_cpu(rsp->vios_flags),
4449 status);
4450 break;
4451 }
4452
4453 kref_put(kref: &tgt->kref, release: ibmvfc_release_tgt);
4454 ibmvfc_free_event(evt);
4455 wake_up(&vhost->work_wait_q);
4456}
4457
4458
4459/**
4460 * ibmvfc_tgt_move_login - Initiate a move login for specified target
4461 * @tgt: ibmvfc target struct
4462 *
4463 **/
4464static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
4465{
4466 struct ibmvfc_host *vhost = tgt->vhost;
4467 struct ibmvfc_move_login *move;
4468 struct ibmvfc_event *evt;
4469
4470 if (vhost->discovery_threads >= disc_threads)
4471 return;
4472
4473 kref_get(kref: &tgt->kref);
4474 evt = ibmvfc_get_reserved_event(&vhost->crq);
4475 if (!evt) {
4476 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_DEL_RPORT);
4477 kref_put(kref: &tgt->kref, release: ibmvfc_release_tgt);
4478 __ibmvfc_reset_host(vhost);
4479 return;
4480 }
4481 vhost->discovery_threads++;
4482 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_INIT_WAIT);
4483 ibmvfc_init_event(evt, done: ibmvfc_tgt_move_login_done, format: IBMVFC_MAD_FORMAT);
4484 evt->tgt = tgt;
4485 move = &evt->iu.move_login;
4486 memset(move, 0, sizeof(*move));
4487 move->common.version = cpu_to_be32(1);
4488 move->common.opcode = cpu_to_be32(IBMVFC_MOVE_LOGIN);
4489 move->common.length = cpu_to_be16(sizeof(*move));
4490
4491 move->old_scsi_id = cpu_to_be64(tgt->scsi_id);
4492 move->new_scsi_id = cpu_to_be64(tgt->new_scsi_id);
4493 move->wwpn = cpu_to_be64(tgt->wwpn);
4494 move->node_name = cpu_to_be64(tgt->ids.node_name);
4495
4496 if (ibmvfc_send_event(evt, vhost, timeout: default_timeout)) {
4497 vhost->discovery_threads--;
4498 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_DEL_RPORT);
4499 kref_put(kref: &tgt->kref, release: ibmvfc_release_tgt);
4500 } else
4501 tgt_dbg(tgt, "Sent Move Login for new scsi_id: %llX\n", tgt->new_scsi_id);
4502}
4503
4504/**
4505 * ibmvfc_adisc_needs_plogi - Does device need PLOGI?
4506 * @mad: ibmvfc passthru mad struct
4507 * @tgt: ibmvfc target struct
4508 *
4509 * Returns:
4510 * 1 if PLOGI needed / 0 if PLOGI not needed
4511 **/
4512static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad,
4513 struct ibmvfc_target *tgt)
4514{
4515 if (wwn_to_u64(wwn: (u8 *)&mad->fc_iu.response[2]) != tgt->ids.port_name)
4516 return 1;
4517 if (wwn_to_u64(wwn: (u8 *)&mad->fc_iu.response[4]) != tgt->ids.node_name)
4518 return 1;
4519 if (be32_to_cpu(mad->fc_iu.response[6]) != tgt->scsi_id)
4520 return 1;
4521 return 0;
4522}
4523
4524/**
4525 * ibmvfc_tgt_adisc_done - Completion handler for ADISC
4526 * @evt: ibmvfc event struct
4527 *
4528 **/
4529static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
4530{
4531 struct ibmvfc_target *tgt = evt->tgt;
4532 struct ibmvfc_host *vhost = evt->vhost;
4533 struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
4534 u32 status = be16_to_cpu(mad->common.status);
4535 u8 fc_reason, fc_explain;
4536
4537 vhost->discovery_threads--;
4538 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_NONE);
4539 del_timer(timer: &tgt->timer);
4540
4541 switch (status) {
4542 case IBMVFC_MAD_SUCCESS:
4543 tgt_dbg(tgt, "ADISC succeeded\n");
4544 if (ibmvfc_adisc_needs_plogi(mad, tgt))
4545 ibmvfc_del_tgt(tgt);
4546 break;
4547 case IBMVFC_MAD_DRIVER_FAILED:
4548 break;
4549 case IBMVFC_MAD_FAILED:
4550 default:
4551 ibmvfc_del_tgt(tgt);
4552 fc_reason = (be32_to_cpu(mad->fc_iu.response[1]) & 0x00ff0000) >> 16;
4553 fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
4554 tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4555 ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)),
4556 be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error),
4557 ibmvfc_get_fc_type(fc_reason), fc_reason,
4558 ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
4559 break;
4560 }
4561
4562 kref_put(kref: &tgt->kref, release: ibmvfc_release_tgt);
4563 ibmvfc_free_event(evt);
4564 wake_up(&vhost->work_wait_q);
4565}
4566
4567/**
4568 * ibmvfc_init_passthru - Initialize an event struct for FC passthru
4569 * @evt: ibmvfc event struct
4570 *
4571 **/
4572static void ibmvfc_init_passthru(struct ibmvfc_event *evt)
4573{
4574 struct ibmvfc_passthru_mad *mad = &evt->iu.passthru;
4575
4576 memset(mad, 0, sizeof(*mad));
4577 mad->common.version = cpu_to_be32(1);
4578 mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
4579 mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
4580 mad->cmd_ioba.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4581 offsetof(struct ibmvfc_passthru_mad, iu));
4582 mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
4583 mad->iu.cmd_len = cpu_to_be32(sizeof(mad->fc_iu.payload));
4584 mad->iu.rsp_len = cpu_to_be32(sizeof(mad->fc_iu.response));
4585 mad->iu.cmd.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4586 offsetof(struct ibmvfc_passthru_mad, fc_iu) +
4587 offsetof(struct ibmvfc_passthru_fc_iu, payload));
4588 mad->iu.cmd.len = cpu_to_be32(sizeof(mad->fc_iu.payload));
4589 mad->iu.rsp.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4590 offsetof(struct ibmvfc_passthru_mad, fc_iu) +
4591 offsetof(struct ibmvfc_passthru_fc_iu, response));
4592 mad->iu.rsp.len = cpu_to_be32(sizeof(mad->fc_iu.response));
4593}
4594
4595/**
4596 * ibmvfc_tgt_adisc_cancel_done - Completion handler when cancelling an ADISC
4597 * @evt: ibmvfc event struct
4598 *
4599 * Just cleanup this event struct. Everything else is handled by
4600 * the ADISC completion handler. If the ADISC never actually comes
4601 * back, we still have the timer running on the ADISC event struct
4602 * which will fire and cause the CRQ to get reset.
4603 *
4604 **/
4605static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt)
4606{
4607 struct ibmvfc_host *vhost = evt->vhost;
4608 struct ibmvfc_target *tgt = evt->tgt;
4609
4610 tgt_dbg(tgt, "ADISC cancel complete\n");
4611 vhost->abort_threads--;
4612 ibmvfc_free_event(evt);
4613 kref_put(kref: &tgt->kref, release: ibmvfc_release_tgt);
4614 wake_up(&vhost->work_wait_q);
4615}
4616
4617/**
4618 * ibmvfc_adisc_timeout - Handle an ADISC timeout
4619 * @t: ibmvfc target struct
4620 *
4621 * If an ADISC times out, send a cancel. If the cancel times
4622 * out, reset the CRQ. When the ADISC comes back as cancelled,
4623 * log back into the target.
4624 **/
4625static void ibmvfc_adisc_timeout(struct timer_list *t)
4626{
4627 struct ibmvfc_target *tgt = from_timer(tgt, t, timer);
4628 struct ibmvfc_host *vhost = tgt->vhost;
4629 struct ibmvfc_event *evt;
4630 struct ibmvfc_tmf *tmf;
4631 unsigned long flags;
4632 int rc;
4633
4634 tgt_dbg(tgt, "ADISC timeout\n");
4635 spin_lock_irqsave(vhost->host->host_lock, flags);
4636 if (vhost->abort_threads >= disc_threads ||
4637 tgt->action != IBMVFC_TGT_ACTION_INIT_WAIT ||
4638 vhost->state != IBMVFC_INITIALIZING ||
4639 vhost->action != IBMVFC_HOST_ACTION_QUERY_TGTS) {
4640 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
4641 return;
4642 }
4643
4644 vhost->abort_threads++;
4645 kref_get(kref: &tgt->kref);
4646 evt = ibmvfc_get_reserved_event(&vhost->crq);
4647 if (!evt) {
4648 tgt_err(tgt, "Failed to get cancel event for ADISC.\n");
4649 vhost->abort_threads--;
4650 kref_put(kref: &tgt->kref, release: ibmvfc_release_tgt);
4651 __ibmvfc_reset_host(vhost);
4652 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
4653 return;
4654 }
4655 ibmvfc_init_event(evt, done: ibmvfc_tgt_adisc_cancel_done, format: IBMVFC_MAD_FORMAT);
4656
4657 evt->tgt = tgt;
4658 tmf = &evt->iu.tmf;
4659 memset(tmf, 0, sizeof(*tmf));
4660 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
4661 tmf->common.version = cpu_to_be32(2);
4662 tmf->target_wwpn = cpu_to_be64(tgt->wwpn);
4663 } else {
4664 tmf->common.version = cpu_to_be32(1);
4665 }
4666 tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
4667 tmf->common.length = cpu_to_be16(sizeof(*tmf));
4668 tmf->scsi_id = cpu_to_be64(tgt->scsi_id);
4669 tmf->cancel_key = cpu_to_be32(tgt->cancel_key);
4670
4671 rc = ibmvfc_send_event(evt, vhost, timeout: default_timeout);
4672
4673 if (rc) {
4674 tgt_err(tgt, "Failed to send cancel event for ADISC. rc=%d\n", rc);
4675 vhost->abort_threads--;
4676 kref_put(kref: &tgt->kref, release: ibmvfc_release_tgt);
4677 __ibmvfc_reset_host(vhost);
4678 } else
4679 tgt_dbg(tgt, "Attempting to cancel ADISC\n");
4680 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
4681}
4682
4683/**
4684 * ibmvfc_tgt_adisc - Initiate an ADISC for specified target
4685 * @tgt: ibmvfc target struct
4686 *
4687 * When sending an ADISC we end up with two timers running. The
4688 * first timer is the timer in the ibmvfc target struct. If this
4689 * fires, we send a cancel to the target. The second timer is the
4690 * timer on the ibmvfc event for the ADISC, which is longer. If that
4691 * fires, it means the ADISC timed out and our attempt to cancel it
4692 * also failed, so we need to reset the CRQ.
4693 **/
4694static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
4695{
4696 struct ibmvfc_passthru_mad *mad;
4697 struct ibmvfc_host *vhost = tgt->vhost;
4698 struct ibmvfc_event *evt;
4699
4700 if (vhost->discovery_threads >= disc_threads)
4701 return;
4702
4703 kref_get(kref: &tgt->kref);
4704 evt = ibmvfc_get_reserved_event(&vhost->crq);
4705 if (!evt) {
4706 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_NONE);
4707 kref_put(kref: &tgt->kref, release: ibmvfc_release_tgt);
4708 __ibmvfc_reset_host(vhost);
4709 return;
4710 }
4711 vhost->discovery_threads++;
4712 ibmvfc_init_event(evt, done: ibmvfc_tgt_adisc_done, format: IBMVFC_MAD_FORMAT);
4713 evt->tgt = tgt;
4714
4715 ibmvfc_init_passthru(evt);
4716 mad = &evt->iu.passthru;
4717 mad->iu.flags = cpu_to_be32(IBMVFC_FC_ELS);
4718 mad->iu.scsi_id = cpu_to_be64(tgt->scsi_id);
4719 mad->iu.cancel_key = cpu_to_be32(tgt->cancel_key);
4720
4721 mad->fc_iu.payload[0] = cpu_to_be32(IBMVFC_ADISC);
4722 memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name,
4723 sizeof(vhost->login_buf->resp.port_name));
4724 memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name,
4725 sizeof(vhost->login_buf->resp.node_name));
4726 mad->fc_iu.payload[6] = cpu_to_be32(be64_to_cpu(vhost->login_buf->resp.scsi_id) & 0x00ffffff);
4727
4728 if (timer_pending(timer: &tgt->timer))
4729 mod_timer(timer: &tgt->timer, expires: jiffies + (IBMVFC_ADISC_TIMEOUT * HZ));
4730 else {
4731 tgt->timer.expires = jiffies + (IBMVFC_ADISC_TIMEOUT * HZ);
4732 add_timer(timer: &tgt->timer);
4733 }
4734
4735 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_INIT_WAIT);
4736 if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) {
4737 vhost->discovery_threads--;
4738 del_timer(timer: &tgt->timer);
4739 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_NONE);
4740 kref_put(kref: &tgt->kref, release: ibmvfc_release_tgt);
4741 } else
4742 tgt_dbg(tgt, "Sent ADISC\n");
4743}
4744
4745/**
4746 * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD
4747 * @evt: ibmvfc event struct
4748 *
4749 **/
4750static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
4751{
4752 struct ibmvfc_target *tgt = evt->tgt;
4753 struct ibmvfc_host *vhost = evt->vhost;
4754 struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
4755 u32 status = be16_to_cpu(rsp->common.status);
4756 int level = IBMVFC_DEFAULT_LOG_LEVEL;
4757
4758 vhost->discovery_threads--;
4759 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_NONE);
4760 switch (status) {
4761 case IBMVFC_MAD_SUCCESS:
4762 tgt_dbg(tgt, "Query Target succeeded\n");
4763 if (be64_to_cpu(rsp->scsi_id) != tgt->scsi_id)
4764 ibmvfc_del_tgt(tgt);
4765 else
4766 ibmvfc_init_tgt(tgt, job_step: ibmvfc_tgt_adisc);
4767 break;
4768 case IBMVFC_MAD_DRIVER_FAILED:
4769 break;
4770 case IBMVFC_MAD_CRQ_ERROR:
4771 ibmvfc_retry_tgt_init(tgt, job_step: ibmvfc_tgt_query_target);
4772 break;
4773 case IBMVFC_MAD_FAILED:
4774 default:
4775 if ((be16_to_cpu(rsp->status) & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
4776 be16_to_cpu(rsp->error) == IBMVFC_UNABLE_TO_PERFORM_REQ &&
4777 be16_to_cpu(rsp->fc_explain) == IBMVFC_PORT_NAME_NOT_REG)
4778 ibmvfc_del_tgt(tgt);
4779 else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4780 level += ibmvfc_retry_tgt_init(tgt, job_step: ibmvfc_tgt_query_target);
4781 else
4782 ibmvfc_del_tgt(tgt);
4783
4784 tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4785 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4786 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
4787 ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
4788 ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain),
4789 status);
4790 break;
4791 }
4792
4793 kref_put(kref: &tgt->kref, release: ibmvfc_release_tgt);
4794 ibmvfc_free_event(evt);
4795 wake_up(&vhost->work_wait_q);
4796}
4797
4798/**
4799 * ibmvfc_tgt_query_target - Initiate a Query Target for specified target
4800 * @tgt: ibmvfc target struct
4801 *
4802 **/
4803static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
4804{
4805 struct ibmvfc_query_tgt *query_tgt;
4806 struct ibmvfc_host *vhost = tgt->vhost;
4807 struct ibmvfc_event *evt;
4808
4809 if (vhost->discovery_threads >= disc_threads)
4810 return;
4811
4812 kref_get(kref: &tgt->kref);
4813 evt = ibmvfc_get_reserved_event(&vhost->crq);
4814 if (!evt) {
4815 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_NONE);
4816 kref_put(kref: &tgt->kref, release: ibmvfc_release_tgt);
4817 __ibmvfc_reset_host(vhost);
4818 return;
4819 }
4820 vhost->discovery_threads++;
4821 evt->tgt = tgt;
4822 ibmvfc_init_event(evt, done: ibmvfc_tgt_query_target_done, format: IBMVFC_MAD_FORMAT);
4823 query_tgt = &evt->iu.query_tgt;
4824 memset(query_tgt, 0, sizeof(*query_tgt));
4825 query_tgt->common.version = cpu_to_be32(1);
4826 query_tgt->common.opcode = cpu_to_be32(IBMVFC_QUERY_TARGET);
4827 query_tgt->common.length = cpu_to_be16(sizeof(*query_tgt));
4828 query_tgt->wwpn = cpu_to_be64(tgt->ids.port_name);
4829
4830 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_INIT_WAIT);
4831 if (ibmvfc_send_event(evt, vhost, timeout: default_timeout)) {
4832 vhost->discovery_threads--;
4833 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_NONE);
4834 kref_put(kref: &tgt->kref, release: ibmvfc_release_tgt);
4835 } else
4836 tgt_dbg(tgt, "Sent Query Target\n");
4837}
4838
4839/**
4840 * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
4841 * @vhost: ibmvfc host struct
4842 * @target: Holds SCSI ID to allocate target forand the WWPN
4843 *
4844 * Returns:
4845 * 0 on success / other on failure
4846 **/
4847static int ibmvfc_alloc_target(struct ibmvfc_host *vhost,
4848 struct ibmvfc_discover_targets_entry *target)
4849{
4850 struct ibmvfc_target *stgt = NULL;
4851 struct ibmvfc_target *wtgt = NULL;
4852 struct ibmvfc_target *tgt;
4853 unsigned long flags;
4854 u64 scsi_id = be32_to_cpu(target->scsi_id) & IBMVFC_DISC_TGT_SCSI_ID_MASK;
4855 u64 wwpn = be64_to_cpu(target->wwpn);
4856
4857 /* Look to see if we already have a target allocated for this SCSI ID or WWPN */
4858 spin_lock_irqsave(vhost->host->host_lock, flags);
4859 list_for_each_entry(tgt, &vhost->targets, queue) {
4860 if (tgt->wwpn == wwpn) {
4861 wtgt = tgt;
4862 break;
4863 }
4864 }
4865
4866 list_for_each_entry(tgt, &vhost->targets, queue) {
4867 if (tgt->scsi_id == scsi_id) {
4868 stgt = tgt;
4869 break;
4870 }
4871 }
4872
4873 if (wtgt && !stgt) {
4874 /*
4875 * A WWPN target has moved and we still are tracking the old
4876 * SCSI ID. The only way we should be able to get here is if
4877 * we attempted to send an implicit logout for the old SCSI ID
4878 * and it failed for some reason, such as there being I/O
4879 * pending to the target. In this case, we will have already
4880 * deleted the rport from the FC transport so we do a move
4881 * login, which works even with I/O pending, however, if
4882 * there is still I/O pending, it will stay outstanding, so
4883 * we only do this if fast fail is disabled for the rport,
4884 * otherwise we let terminate_rport_io clean up the port
4885 * before we login at the new location.
4886 */
4887 if (wtgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
4888 if (wtgt->move_login) {
4889 /*
4890 * Do a move login here. The old target is no longer
4891 * known to the transport layer We don't use the
4892 * normal ibmvfc_set_tgt_action to set this, as we
4893 * don't normally want to allow this state change.
4894 */
4895 wtgt->new_scsi_id = scsi_id;
4896 wtgt->action = IBMVFC_TGT_ACTION_INIT;
4897 wtgt->init_retries = 0;
4898 ibmvfc_init_tgt(tgt: wtgt, job_step: ibmvfc_tgt_move_login);
4899 }
4900 goto unlock_out;
4901 } else {
4902 tgt_err(wtgt, "Unexpected target state: %d, %p\n",
4903 wtgt->action, wtgt->rport);
4904 }
4905 } else if (stgt) {
4906 if (tgt->need_login)
4907 ibmvfc_init_tgt(tgt, job_step: ibmvfc_tgt_implicit_logout);
4908 goto unlock_out;
4909 }
4910 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
4911
4912 tgt = mempool_alloc(pool: vhost->tgt_pool, GFP_NOIO);
4913 memset(tgt, 0, sizeof(*tgt));
4914 tgt->scsi_id = scsi_id;
4915 tgt->wwpn = wwpn;
4916 tgt->vhost = vhost;
4917 tgt->need_login = 1;
4918 timer_setup(&tgt->timer, ibmvfc_adisc_timeout, 0);
4919 kref_init(kref: &tgt->kref);
4920 ibmvfc_init_tgt(tgt, job_step: ibmvfc_tgt_implicit_logout);
4921 spin_lock_irqsave(vhost->host->host_lock, flags);
4922 tgt->cancel_key = vhost->task_set++;
4923 list_add_tail(new: &tgt->queue, head: &vhost->targets);
4924
4925unlock_out:
4926 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
4927 return 0;
4928}
4929
4930/**
4931 * ibmvfc_alloc_targets - Allocate and initialize ibmvfc targets
4932 * @vhost: ibmvfc host struct
4933 *
4934 * Returns:
4935 * 0 on success / other on failure
4936 **/
4937static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
4938{
4939 int i, rc;
4940
4941 for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
4942 rc = ibmvfc_alloc_target(vhost, target: &vhost->scsi_scrqs.disc_buf[i]);
4943
4944 return rc;
4945}
4946
4947/**
4948 * ibmvfc_discover_targets_done - Completion handler for discover targets MAD
4949 * @evt: ibmvfc event struct
4950 *
4951 **/
4952static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
4953{
4954 struct ibmvfc_host *vhost = evt->vhost;
4955 struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
4956 u32 mad_status = be16_to_cpu(rsp->common.status);
4957 int level = IBMVFC_DEFAULT_LOG_LEVEL;
4958
4959 switch (mad_status) {
4960 case IBMVFC_MAD_SUCCESS:
4961 ibmvfc_dbg(vhost, "Discover Targets succeeded\n");
4962 vhost->num_targets = be32_to_cpu(rsp->num_written);
4963 ibmvfc_set_host_action(vhost, action: IBMVFC_HOST_ACTION_ALLOC_TGTS);
4964 break;
4965 case IBMVFC_MAD_FAILED:
4966 level += ibmvfc_retry_host_init(vhost);
4967 ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
4968 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4969 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
4970 break;
4971 case IBMVFC_MAD_DRIVER_FAILED:
4972 break;
4973 default:
4974 dev_err(vhost->dev, "Invalid Discover Targets response: 0x%x\n", mad_status);
4975 ibmvfc_link_down(vhost, state: IBMVFC_LINK_DEAD);
4976 break;
4977 }
4978
4979 ibmvfc_free_event(evt);
4980 wake_up(&vhost->work_wait_q);
4981}
4982
4983/**
4984 * ibmvfc_discover_targets - Send Discover Targets MAD
4985 * @vhost: ibmvfc host struct
4986 *
4987 **/
4988static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
4989{
4990 struct ibmvfc_discover_targets *mad;
4991 struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
4992 int level = IBMVFC_DEFAULT_LOG_LEVEL;
4993
4994 if (!evt) {
4995 ibmvfc_log(vhost, level, "Discover Targets failed: no available events\n");
4996 ibmvfc_hard_reset_host(vhost);
4997 return;
4998 }
4999
5000 ibmvfc_init_event(evt, done: ibmvfc_discover_targets_done, format: IBMVFC_MAD_FORMAT);
5001 mad = &evt->iu.discover_targets;
5002 memset(mad, 0, sizeof(*mad));
5003 mad->common.version = cpu_to_be32(1);
5004 mad->common.opcode = cpu_to_be32(IBMVFC_DISC_TARGETS);
5005 mad->common.length = cpu_to_be16(sizeof(*mad));
5006 mad->bufflen = cpu_to_be32(vhost->scsi_scrqs.disc_buf_sz);
5007 mad->buffer.va = cpu_to_be64(vhost->scsi_scrqs.disc_buf_dma);
5008 mad->buffer.len = cpu_to_be32(vhost->scsi_scrqs.disc_buf_sz);
5009 mad->flags = cpu_to_be32(IBMVFC_DISC_TGT_PORT_ID_WWPN_LIST);
5010 ibmvfc_set_host_action(vhost, action: IBMVFC_HOST_ACTION_INIT_WAIT);
5011
5012 if (!ibmvfc_send_event(evt, vhost, timeout: default_timeout))
5013 ibmvfc_dbg(vhost, "Sent discover targets\n");
5014 else
5015 ibmvfc_link_down(vhost, state: IBMVFC_LINK_DEAD);
5016}
5017
5018static void ibmvfc_channel_setup_done(struct ibmvfc_event *evt)
5019{
5020 struct ibmvfc_host *vhost = evt->vhost;
5021 struct ibmvfc_channel_setup *setup = vhost->channel_setup_buf;
5022 struct ibmvfc_channels *scrqs = &vhost->scsi_scrqs;
5023 u32 mad_status = be16_to_cpu(evt->xfer_iu->channel_setup.common.status);
5024 int level = IBMVFC_DEFAULT_LOG_LEVEL;
5025 int flags, active_queues, i;
5026
5027 ibmvfc_free_event(evt);
5028
5029 switch (mad_status) {
5030 case IBMVFC_MAD_SUCCESS:
5031 ibmvfc_dbg(vhost, "Channel Setup succeeded\n");
5032 flags = be32_to_cpu(setup->flags);
5033 vhost->do_enquiry = 0;
5034 active_queues = be32_to_cpu(setup->num_scsi_subq_channels);
5035 scrqs->active_queues = active_queues;
5036
5037 if (flags & IBMVFC_CHANNELS_CANCELED) {
5038 ibmvfc_dbg(vhost, "Channels Canceled\n");
5039 vhost->using_channels = 0;
5040 } else {
5041 if (active_queues)
5042 vhost->using_channels = 1;
5043 for (i = 0; i < active_queues; i++)
5044 scrqs->scrqs[i].vios_cookie =
5045 be64_to_cpu(setup->channel_handles[i]);
5046
5047 ibmvfc_dbg(vhost, "Using %u channels\n",
5048 vhost->scsi_scrqs.active_queues);
5049 }
5050 break;
5051 case IBMVFC_MAD_FAILED:
5052 level += ibmvfc_retry_host_init(vhost);
5053 ibmvfc_log(vhost, level, "Channel Setup failed\n");
5054 fallthrough;
5055 case IBMVFC_MAD_DRIVER_FAILED:
5056 return;
5057 default:
5058 dev_err(vhost->dev, "Invalid Channel Setup response: 0x%x\n",
5059 mad_status);
5060 ibmvfc_link_down(vhost, state: IBMVFC_LINK_DEAD);
5061 return;
5062 }
5063
5064 ibmvfc_set_host_action(vhost, action: IBMVFC_HOST_ACTION_QUERY);
5065 wake_up(&vhost->work_wait_q);
5066}
5067
5068static void ibmvfc_channel_setup(struct ibmvfc_host *vhost)
5069{
5070 struct ibmvfc_channel_setup_mad *mad;
5071 struct ibmvfc_channel_setup *setup_buf = vhost->channel_setup_buf;
5072 struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
5073 struct ibmvfc_channels *scrqs = &vhost->scsi_scrqs;
5074 unsigned int num_channels =
5075 min(scrqs->desired_queues, vhost->max_vios_scsi_channels);
5076 int level = IBMVFC_DEFAULT_LOG_LEVEL;
5077 int i;
5078
5079 if (!evt) {
5080 ibmvfc_log(vhost, level, "Channel Setup failed: no available events\n");
5081 ibmvfc_hard_reset_host(vhost);
5082 return;
5083 }
5084
5085 memset(setup_buf, 0, sizeof(*setup_buf));
5086 if (num_channels == 0)
5087 setup_buf->flags = cpu_to_be32(IBMVFC_CANCEL_CHANNELS);
5088 else {
5089 setup_buf->num_scsi_subq_channels = cpu_to_be32(num_channels);
5090 for (i = 0; i < num_channels; i++)
5091 setup_buf->channel_handles[i] = cpu_to_be64(scrqs->scrqs[i].cookie);
5092 }
5093
5094 ibmvfc_init_event(evt, done: ibmvfc_channel_setup_done, format: IBMVFC_MAD_FORMAT);
5095 mad = &evt->iu.channel_setup;
5096 memset(mad, 0, sizeof(*mad));
5097 mad->common.version = cpu_to_be32(1);
5098 mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_SETUP);
5099 mad->common.length = cpu_to_be16(sizeof(*mad));
5100 mad->buffer.va = cpu_to_be64(vhost->channel_setup_dma);
5101 mad->buffer.len = cpu_to_be32(sizeof(*vhost->channel_setup_buf));
5102
5103 ibmvfc_set_host_action(vhost, action: IBMVFC_HOST_ACTION_INIT_WAIT);
5104
5105 if (!ibmvfc_send_event(evt, vhost, timeout: default_timeout))
5106 ibmvfc_dbg(vhost, "Sent channel setup\n");
5107 else
5108 ibmvfc_link_down(vhost, state: IBMVFC_LINK_DOWN);
5109}
5110
5111static void ibmvfc_channel_enquiry_done(struct ibmvfc_event *evt)
5112{
5113 struct ibmvfc_host *vhost = evt->vhost;
5114 struct ibmvfc_channel_enquiry *rsp = &evt->xfer_iu->channel_enquiry;
5115 u32 mad_status = be16_to_cpu(rsp->common.status);
5116 int level = IBMVFC_DEFAULT_LOG_LEVEL;
5117
5118 switch (mad_status) {
5119 case IBMVFC_MAD_SUCCESS:
5120 ibmvfc_dbg(vhost, "Channel Enquiry succeeded\n");
5121 vhost->max_vios_scsi_channels = be32_to_cpu(rsp->num_scsi_subq_channels);
5122 ibmvfc_free_event(evt);
5123 break;
5124 case IBMVFC_MAD_FAILED:
5125 level += ibmvfc_retry_host_init(vhost);
5126 ibmvfc_log(vhost, level, "Channel Enquiry failed\n");
5127 fallthrough;
5128 case IBMVFC_MAD_DRIVER_FAILED:
5129 ibmvfc_free_event(evt);
5130 return;
5131 default:
5132 dev_err(vhost->dev, "Invalid Channel Enquiry response: 0x%x\n",
5133 mad_status);
5134 ibmvfc_link_down(vhost, state: IBMVFC_LINK_DEAD);
5135 ibmvfc_free_event(evt);
5136 return;
5137 }
5138
5139 ibmvfc_channel_setup(vhost);
5140}
5141
5142static void ibmvfc_channel_enquiry(struct ibmvfc_host *vhost)
5143{
5144 struct ibmvfc_channel_enquiry *mad;
5145 struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
5146 int level = IBMVFC_DEFAULT_LOG_LEVEL;
5147
5148 if (!evt) {
5149 ibmvfc_log(vhost, level, "Channel Enquiry failed: no available events\n");
5150 ibmvfc_hard_reset_host(vhost);
5151 return;
5152 }
5153
5154 ibmvfc_init_event(evt, done: ibmvfc_channel_enquiry_done, format: IBMVFC_MAD_FORMAT);
5155 mad = &evt->iu.channel_enquiry;
5156 memset(mad, 0, sizeof(*mad));
5157 mad->common.version = cpu_to_be32(1);
5158 mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_ENQUIRY);
5159 mad->common.length = cpu_to_be16(sizeof(*mad));
5160
5161 if (mig_channels_only)
5162 mad->flags |= cpu_to_be32(IBMVFC_NO_CHANNELS_TO_CRQ_SUPPORT);
5163 if (mig_no_less_channels)
5164 mad->flags |= cpu_to_be32(IBMVFC_NO_N_TO_M_CHANNELS_SUPPORT);
5165
5166 ibmvfc_set_host_action(vhost, action: IBMVFC_HOST_ACTION_INIT_WAIT);
5167
5168 if (!ibmvfc_send_event(evt, vhost, timeout: default_timeout))
5169 ibmvfc_dbg(vhost, "Send channel enquiry\n");
5170 else
5171 ibmvfc_link_down(vhost, state: IBMVFC_LINK_DEAD);
5172}
5173
5174/**
5175 * ibmvfc_npiv_login_done - Completion handler for NPIV Login
5176 * @evt: ibmvfc event struct
5177 *
5178 **/
5179static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
5180{
5181 struct ibmvfc_host *vhost = evt->vhost;
5182 u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_login.common.status);
5183 struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
5184 unsigned int npiv_max_sectors;
5185 int level = IBMVFC_DEFAULT_LOG_LEVEL;
5186
5187 switch (mad_status) {
5188 case IBMVFC_MAD_SUCCESS:
5189 ibmvfc_free_event(evt);
5190 break;
5191 case IBMVFC_MAD_FAILED:
5192 if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
5193 level += ibmvfc_retry_host_init(vhost);
5194 else
5195 ibmvfc_link_down(vhost, state: IBMVFC_LINK_DEAD);
5196 ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
5197 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
5198 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
5199 ibmvfc_free_event(evt);
5200 return;
5201 case IBMVFC_MAD_CRQ_ERROR:
5202 ibmvfc_retry_host_init(vhost);
5203 fallthrough;
5204 case IBMVFC_MAD_DRIVER_FAILED:
5205 ibmvfc_free_event(evt);
5206 return;
5207 default:
5208 dev_err(vhost->dev, "Invalid NPIV Login response: 0x%x\n", mad_status);
5209 ibmvfc_link_down(vhost, state: IBMVFC_LINK_DEAD);
5210 ibmvfc_free_event(evt);
5211 return;
5212 }
5213
5214 vhost->client_migrated = 0;
5215
5216 if (!(be32_to_cpu(rsp->flags) & IBMVFC_NATIVE_FC)) {
5217 dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n",
5218 rsp->flags);
5219 ibmvfc_link_down(vhost, state: IBMVFC_LINK_DEAD);
5220 wake_up(&vhost->work_wait_q);
5221 return;
5222 }
5223
5224 if (be32_to_cpu(rsp->max_cmds) <= IBMVFC_NUM_INTERNAL_REQ) {
5225 dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n",
5226 rsp->max_cmds);
5227 ibmvfc_link_down(vhost, state: IBMVFC_LINK_DEAD);
5228 wake_up(&vhost->work_wait_q);
5229 return;
5230 }
5231
5232 vhost->logged_in = 1;
5233 npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), IBMVFC_MAX_SECTORS);
5234 dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
5235 rsp->partition_name, rsp->device_name, rsp->port_loc_code,
5236 rsp->drc_name, npiv_max_sectors);
5237
5238 fc_host_fabric_name(vhost->host) = be64_to_cpu(rsp->node_name);
5239 fc_host_node_name(vhost->host) = be64_to_cpu(rsp->node_name);
5240 fc_host_port_name(vhost->host) = be64_to_cpu(rsp->port_name);
5241 fc_host_port_id(vhost->host) = be64_to_cpu(rsp->scsi_id);
5242 fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV;
5243 fc_host_supported_classes(vhost->host) = 0;
5244 if (be32_to_cpu(rsp->service_parms.class1_parms[0]) & 0x80000000)
5245 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1;
5246 if (be32_to_cpu(rsp->service_parms.class2_parms[0]) & 0x80000000)
5247 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2;
5248 if (be32_to_cpu(rsp->service_parms.class3_parms[0]) & 0x80000000)
5249 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3;
5250 fc_host_maxframe_size(vhost->host) =
5251 be16_to_cpu(rsp->service_parms.common.bb_rcv_sz) & 0x0fff;
5252
5253 vhost->host->can_queue = be32_to_cpu(rsp->max_cmds) - IBMVFC_NUM_INTERNAL_REQ;
5254 vhost->host->max_sectors = npiv_max_sectors;
5255
5256 if (ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPORT_CHANNELS) && vhost->do_enquiry) {
5257 ibmvfc_channel_enquiry(vhost);
5258 } else {
5259 vhost->do_enquiry = 0;
5260 ibmvfc_set_host_action(vhost, action: IBMVFC_HOST_ACTION_QUERY);
5261 wake_up(&vhost->work_wait_q);
5262 }
5263}
5264
5265/**
5266 * ibmvfc_npiv_login - Sends NPIV login
5267 * @vhost: ibmvfc host struct
5268 *
5269 **/
5270static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
5271{
5272 struct ibmvfc_npiv_login_mad *mad;
5273 struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
5274
5275 if (!evt) {
5276 ibmvfc_dbg(vhost, "NPIV Login failed: no available events\n");
5277 ibmvfc_hard_reset_host(vhost);
5278 return;
5279 }
5280
5281 ibmvfc_gather_partition_info(vhost);
5282 ibmvfc_set_login_info(vhost);
5283 ibmvfc_init_event(evt, done: ibmvfc_npiv_login_done, format: IBMVFC_MAD_FORMAT);
5284
5285 memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info));
5286 mad = &evt->iu.npiv_login;
5287 memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad));
5288 mad->common.version = cpu_to_be32(1);
5289 mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGIN);
5290 mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_login_mad));
5291 mad->buffer.va = cpu_to_be64(vhost->login_buf_dma);
5292 mad->buffer.len = cpu_to_be32(sizeof(*vhost->login_buf));
5293
5294 ibmvfc_set_host_action(vhost, action: IBMVFC_HOST_ACTION_INIT_WAIT);
5295
5296 if (!ibmvfc_send_event(evt, vhost, timeout: default_timeout))
5297 ibmvfc_dbg(vhost, "Sent NPIV login\n");
5298 else
5299 ibmvfc_link_down(vhost, state: IBMVFC_LINK_DEAD);
5300}
5301
5302/**
5303 * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
5304 * @evt: ibmvfc event struct
5305 *
5306 **/
5307static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
5308{
5309 struct ibmvfc_host *vhost = evt->vhost;
5310 u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_logout.common.status);
5311
5312 ibmvfc_free_event(evt);
5313
5314 switch (mad_status) {
5315 case IBMVFC_MAD_SUCCESS:
5316 if (list_empty(head: &vhost->crq.sent) &&
5317 vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
5318 ibmvfc_init_host(vhost);
5319 return;
5320 }
5321 break;
5322 case IBMVFC_MAD_FAILED:
5323 case IBMVFC_MAD_NOT_SUPPORTED:
5324 case IBMVFC_MAD_CRQ_ERROR:
5325 case IBMVFC_MAD_DRIVER_FAILED:
5326 default:
5327 ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status);
5328 break;
5329 }
5330
5331 ibmvfc_hard_reset_host(vhost);
5332}
5333
5334/**
5335 * ibmvfc_npiv_logout - Issue an NPIV Logout
5336 * @vhost: ibmvfc host struct
5337 *
5338 **/
5339static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
5340{
5341 struct ibmvfc_npiv_logout_mad *mad;
5342 struct ibmvfc_event *evt;
5343
5344 evt = ibmvfc_get_reserved_event(&vhost->crq);
5345 if (!evt) {
5346 ibmvfc_dbg(vhost, "NPIV Logout failed: no available events\n");
5347 ibmvfc_hard_reset_host(vhost);
5348 return;
5349 }
5350
5351 ibmvfc_init_event(evt, done: ibmvfc_npiv_logout_done, format: IBMVFC_MAD_FORMAT);
5352
5353 mad = &evt->iu.npiv_logout;
5354 memset(mad, 0, sizeof(*mad));
5355 mad->common.version = cpu_to_be32(1);
5356 mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGOUT);
5357 mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_logout_mad));
5358
5359 ibmvfc_set_host_action(vhost, action: IBMVFC_HOST_ACTION_LOGO_WAIT);
5360
5361 if (!ibmvfc_send_event(evt, vhost, timeout: default_timeout))
5362 ibmvfc_dbg(vhost, "Sent NPIV logout\n");
5363 else
5364 ibmvfc_link_down(vhost, state: IBMVFC_LINK_DEAD);
5365}
5366
5367/**
5368 * ibmvfc_dev_init_to_do - Is there target initialization work to do?
5369 * @vhost: ibmvfc host struct
5370 *
5371 * Returns:
5372 * 1 if work to do / 0 if not
5373 **/
5374static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost)
5375{
5376 struct ibmvfc_target *tgt;
5377
5378 list_for_each_entry(tgt, &vhost->targets, queue) {
5379 if (tgt->action == IBMVFC_TGT_ACTION_INIT ||
5380 tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
5381 return 1;
5382 }
5383
5384 return 0;
5385}
5386
5387/**
5388 * ibmvfc_dev_logo_to_do - Is there target logout work to do?
5389 * @vhost: ibmvfc host struct
5390 *
5391 * Returns:
5392 * 1 if work to do / 0 if not
5393 **/
5394static int ibmvfc_dev_logo_to_do(struct ibmvfc_host *vhost)
5395{
5396 struct ibmvfc_target *tgt;
5397
5398 list_for_each_entry(tgt, &vhost->targets, queue) {
5399 if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT ||
5400 tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
5401 return 1;
5402 }
5403 return 0;
5404}
5405
5406/**
5407 * __ibmvfc_work_to_do - Is there task level work to do? (no locking)
5408 * @vhost: ibmvfc host struct
5409 *
5410 * Returns:
5411 * 1 if work to do / 0 if not
5412 **/
5413static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
5414{
5415 struct ibmvfc_target *tgt;
5416
5417 if (kthread_should_stop())
5418 return 1;
5419 switch (vhost->action) {
5420 case IBMVFC_HOST_ACTION_NONE:
5421 case IBMVFC_HOST_ACTION_INIT_WAIT:
5422 case IBMVFC_HOST_ACTION_LOGO_WAIT:
5423 return 0;
5424 case IBMVFC_HOST_ACTION_TGT_INIT:
5425 case IBMVFC_HOST_ACTION_QUERY_TGTS:
5426 if (vhost->discovery_threads == disc_threads)
5427 return 0;
5428 list_for_each_entry(tgt, &vhost->targets, queue)
5429 if (tgt->action == IBMVFC_TGT_ACTION_INIT)
5430 return 1;
5431 list_for_each_entry(tgt, &vhost->targets, queue)
5432 if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
5433 return 0;
5434 return 1;
5435 case IBMVFC_HOST_ACTION_TGT_DEL:
5436 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
5437 if (vhost->discovery_threads == disc_threads)
5438 return 0;
5439 list_for_each_entry(tgt, &vhost->targets, queue)
5440 if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT)
5441 return 1;
5442 list_for_each_entry(tgt, &vhost->targets, queue)
5443 if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
5444 return 0;
5445 return 1;
5446 case IBMVFC_HOST_ACTION_LOGO:
5447 case IBMVFC_HOST_ACTION_INIT:
5448 case IBMVFC_HOST_ACTION_ALLOC_TGTS:
5449 case IBMVFC_HOST_ACTION_QUERY:
5450 case IBMVFC_HOST_ACTION_RESET:
5451 case IBMVFC_HOST_ACTION_REENABLE:
5452 default:
5453 break;
5454 }
5455
5456 return 1;
5457}
5458
5459/**
5460 * ibmvfc_work_to_do - Is there task level work to do?
5461 * @vhost: ibmvfc host struct
5462 *
5463 * Returns:
5464 * 1 if work to do / 0 if not
5465 **/
5466static int ibmvfc_work_to_do(struct ibmvfc_host *vhost)
5467{
5468 unsigned long flags;
5469 int rc;
5470
5471 spin_lock_irqsave(vhost->host->host_lock, flags);
5472 rc = __ibmvfc_work_to_do(vhost);
5473 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
5474 return rc;
5475}
5476
5477/**
5478 * ibmvfc_log_ae - Log async events if necessary
5479 * @vhost: ibmvfc host struct
5480 * @events: events to log
5481 *
5482 **/
5483static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
5484{
5485 if (events & IBMVFC_AE_RSCN)
5486 fc_host_post_event(shost: vhost->host, event_number: fc_get_event_number(), event_code: FCH_EVT_RSCN, event_data: 0);
5487 if ((events & IBMVFC_AE_LINKDOWN) &&
5488 vhost->state >= IBMVFC_HALTED)
5489 fc_host_post_event(shost: vhost->host, event_number: fc_get_event_number(), event_code: FCH_EVT_LINKDOWN, event_data: 0);
5490 if ((events & IBMVFC_AE_LINKUP) &&
5491 vhost->state == IBMVFC_INITIALIZING)
5492 fc_host_post_event(shost: vhost->host, event_number: fc_get_event_number(), event_code: FCH_EVT_LINKUP, event_data: 0);
5493}
5494
5495/**
5496 * ibmvfc_tgt_add_rport - Tell the FC transport about a new remote port
5497 * @tgt: ibmvfc target struct
5498 *
5499 **/
5500static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
5501{
5502 struct ibmvfc_host *vhost = tgt->vhost;
5503 struct fc_rport *rport;
5504 unsigned long flags;
5505
5506 tgt_dbg(tgt, "Adding rport\n");
5507 rport = fc_remote_port_add(shost: vhost->host, channel: 0, ids: &tgt->ids);
5508 spin_lock_irqsave(vhost->host->host_lock, flags);
5509
5510 if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
5511 tgt_dbg(tgt, "Deleting rport\n");
5512 list_del(entry: &tgt->queue);
5513 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_DELETED_RPORT);
5514 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
5515 fc_remote_port_delete(rport);
5516 del_timer_sync(timer: &tgt->timer);
5517 kref_put(kref: &tgt->kref, release: ibmvfc_release_tgt);
5518 return;
5519 } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
5520 tgt_dbg(tgt, "Deleting rport with outstanding I/O\n");
5521 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
5522 tgt->rport = NULL;
5523 tgt->init_retries = 0;
5524 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
5525 fc_remote_port_delete(rport);
5526 return;
5527 } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
5528 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
5529 return;
5530 }
5531
5532 if (rport) {
5533 tgt_dbg(tgt, "rport add succeeded\n");
5534 tgt->rport = rport;
5535 rport->maxframe_size = be16_to_cpu(tgt->service_parms.common.bb_rcv_sz) & 0x0fff;
5536 rport->supported_classes = 0;
5537 tgt->target_id = rport->scsi_target_id;
5538 if (be32_to_cpu(tgt->service_parms.class1_parms[0]) & 0x80000000)
5539 rport->supported_classes |= FC_COS_CLASS1;
5540 if (be32_to_cpu(tgt->service_parms.class2_parms[0]) & 0x80000000)
5541 rport->supported_classes |= FC_COS_CLASS2;
5542 if (be32_to_cpu(tgt->service_parms.class3_parms[0]) & 0x80000000)
5543 rport->supported_classes |= FC_COS_CLASS3;
5544 if (rport->rqst_q)
5545 blk_queue_max_segments(rport->rqst_q, 1);
5546 } else
5547 tgt_dbg(tgt, "rport add failed\n");
5548 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
5549}
5550
5551/**
5552 * ibmvfc_do_work - Do task level work
5553 * @vhost: ibmvfc host struct
5554 *
5555 **/
5556static void ibmvfc_do_work(struct ibmvfc_host *vhost)
5557{
5558 struct ibmvfc_target *tgt;
5559 unsigned long flags;
5560 struct fc_rport *rport;
5561 LIST_HEAD(purge);
5562 int rc;
5563
5564 ibmvfc_log_ae(vhost, events: vhost->events_to_log);
5565 spin_lock_irqsave(vhost->host->host_lock, flags);
5566 vhost->events_to_log = 0;
5567 switch (vhost->action) {
5568 case IBMVFC_HOST_ACTION_NONE:
5569 case IBMVFC_HOST_ACTION_LOGO_WAIT:
5570 case IBMVFC_HOST_ACTION_INIT_WAIT:
5571 break;
5572 case IBMVFC_HOST_ACTION_RESET:
5573 list_splice_init(list: &vhost->purge, head: &purge);
5574 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
5575 ibmvfc_complete_purge(purge_list: &purge);
5576 rc = ibmvfc_reset_crq(vhost);
5577
5578 spin_lock_irqsave(vhost->host->host_lock, flags);
5579 if (!rc || rc == H_CLOSED)
5580 vio_enable_interrupts(to_vio_dev(vhost->dev));
5581 if (vhost->action == IBMVFC_HOST_ACTION_RESET) {
5582 /*
5583 * The only action we could have changed to would have
5584 * been reenable, in which case, we skip the rest of
5585 * this path and wait until we've done the re-enable
5586 * before sending the crq init.
5587 */
5588 vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
5589
5590 if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
5591 (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
5592 ibmvfc_link_down(vhost, state: IBMVFC_LINK_DEAD);
5593 dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
5594 }
5595 }
5596 break;
5597 case IBMVFC_HOST_ACTION_REENABLE:
5598 list_splice_init(list: &vhost->purge, head: &purge);
5599 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
5600 ibmvfc_complete_purge(purge_list: &purge);
5601 rc = ibmvfc_reenable_crq_queue(vhost);
5602
5603 spin_lock_irqsave(vhost->host->host_lock, flags);
5604 if (vhost->action == IBMVFC_HOST_ACTION_REENABLE) {
5605 /*
5606 * The only action we could have changed to would have
5607 * been reset, in which case, we skip the rest of this
5608 * path and wait until we've done the reset before
5609 * sending the crq init.
5610 */
5611 vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
5612 if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
5613 ibmvfc_link_down(vhost, state: IBMVFC_LINK_DEAD);
5614 dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
5615 }
5616 }
5617 break;
5618 case IBMVFC_HOST_ACTION_LOGO:
5619 vhost->job_step(vhost);
5620 break;
5621 case IBMVFC_HOST_ACTION_INIT:
5622 BUG_ON(vhost->state != IBMVFC_INITIALIZING);
5623 if (vhost->delay_init) {
5624 vhost->delay_init = 0;
5625 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
5626 ssleep(seconds: 15);
5627 return;
5628 } else
5629 vhost->job_step(vhost);
5630 break;
5631 case IBMVFC_HOST_ACTION_QUERY:
5632 list_for_each_entry(tgt, &vhost->targets, queue)
5633 ibmvfc_init_tgt(tgt, job_step: ibmvfc_tgt_query_target);
5634 ibmvfc_set_host_action(vhost, action: IBMVFC_HOST_ACTION_QUERY_TGTS);
5635 break;
5636 case IBMVFC_HOST_ACTION_QUERY_TGTS:
5637 list_for_each_entry(tgt, &vhost->targets, queue) {
5638 if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
5639 tgt->job_step(tgt);
5640 break;
5641 }
5642 }
5643
5644 if (!ibmvfc_dev_init_to_do(vhost))
5645 ibmvfc_set_host_action(vhost, action: IBMVFC_HOST_ACTION_TGT_DEL);
5646 break;
5647 case IBMVFC_HOST_ACTION_TGT_DEL:
5648 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
5649 list_for_each_entry(tgt, &vhost->targets, queue) {
5650 if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
5651 tgt->job_step(tgt);
5652 break;
5653 }
5654 }
5655
5656 if (ibmvfc_dev_logo_to_do(vhost)) {
5657 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
5658 return;
5659 }
5660
5661 list_for_each_entry(tgt, &vhost->targets, queue) {
5662 if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
5663 tgt_dbg(tgt, "Deleting rport\n");
5664 rport = tgt->rport;
5665 tgt->rport = NULL;
5666 list_del(entry: &tgt->queue);
5667 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_DELETED_RPORT);
5668 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
5669 if (rport)
5670 fc_remote_port_delete(rport);
5671 del_timer_sync(timer: &tgt->timer);
5672 kref_put(kref: &tgt->kref, release: ibmvfc_release_tgt);
5673 return;
5674 } else if (tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
5675 tgt_dbg(tgt, "Deleting rport with I/O outstanding\n");
5676 rport = tgt->rport;
5677 tgt->rport = NULL;
5678 tgt->init_retries = 0;
5679 ibmvfc_set_tgt_action(tgt, action: IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
5680
5681 /*
5682 * If fast fail is enabled, we wait for it to fire and then clean up
5683 * the old port, since we expect the fast fail timer to clean up the
5684 * outstanding I/O faster than waiting for normal command timeouts.
5685 * However, if fast fail is disabled, any I/O outstanding to the
5686 * rport LUNs will stay outstanding indefinitely, since the EH handlers
5687 * won't get invoked for I/O's timing out. If this is a NPIV failover
5688 * scenario, the better alternative is to use the move login.
5689 */
5690 if (rport && rport->fast_io_fail_tmo == -1)
5691 tgt->move_login = 1;
5692 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
5693 if (rport)
5694 fc_remote_port_delete(rport);
5695 return;
5696 }
5697 }
5698
5699 if (vhost->state == IBMVFC_INITIALIZING) {
5700 if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
5701 if (vhost->reinit) {
5702 vhost->reinit = 0;
5703 scsi_block_requests(vhost->host);
5704 ibmvfc_set_host_action(vhost, action: IBMVFC_HOST_ACTION_QUERY);
5705 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
5706 } else {
5707 ibmvfc_set_host_state(vhost, state: IBMVFC_ACTIVE);
5708 ibmvfc_set_host_action(vhost, action: IBMVFC_HOST_ACTION_NONE);
5709 wake_up(&vhost->init_wait_q);
5710 schedule_work(work: &vhost->rport_add_work_q);
5711 vhost->init_retries = 0;
5712 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
5713 scsi_unblock_requests(vhost->host);
5714 }
5715
5716 return;
5717 } else {
5718 ibmvfc_set_host_action(vhost, action: IBMVFC_HOST_ACTION_INIT);
5719 vhost->job_step = ibmvfc_discover_targets;
5720 }
5721 } else {
5722 ibmvfc_set_host_action(vhost, action: IBMVFC_HOST_ACTION_NONE);
5723 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
5724 scsi_unblock_requests(vhost->host);
5725 wake_up(&vhost->init_wait_q);
5726 return;
5727 }
5728 break;
5729 case IBMVFC_HOST_ACTION_ALLOC_TGTS:
5730 ibmvfc_set_host_action(vhost, action: IBMVFC_HOST_ACTION_TGT_INIT);
5731 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
5732 ibmvfc_alloc_targets(vhost);
5733 spin_lock_irqsave(vhost->host->host_lock, flags);
5734 break;
5735 case IBMVFC_HOST_ACTION_TGT_INIT:
5736 list_for_each_entry(tgt, &vhost->targets, queue) {
5737 if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
5738 tgt->job_step(tgt);
5739 break;
5740 }
5741 }
5742
5743 if (!ibmvfc_dev_init_to_do(vhost))
5744 ibmvfc_set_host_action(vhost, action: IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
5745 break;
5746 default:
5747 break;
5748 }
5749
5750 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
5751}
5752
5753/**
5754 * ibmvfc_work - Do task level work
5755 * @data: ibmvfc host struct
5756 *
5757 * Returns:
5758 * zero
5759 **/
5760static int ibmvfc_work(void *data)
5761{
5762 struct ibmvfc_host *vhost = data;
5763 int rc;
5764
5765 set_user_nice(current, MIN_NICE);
5766
5767 while (1) {
5768 rc = wait_event_interruptible(vhost->work_wait_q,
5769 ibmvfc_work_to_do(vhost));
5770
5771 BUG_ON(rc);
5772
5773 if (kthread_should_stop())
5774 break;
5775
5776 ibmvfc_do_work(vhost);
5777 }
5778
5779 ibmvfc_dbg(vhost, "ibmvfc kthread exiting...\n");
5780 return 0;
5781}
5782
5783/**
5784 * ibmvfc_alloc_queue - Allocate queue
5785 * @vhost: ibmvfc host struct
5786 * @queue: ibmvfc queue to allocate
5787 * @fmt: queue format to allocate
5788 *
5789 * Returns:
5790 * 0 on success / non-zero on failure
5791 **/
5792static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
5793 struct ibmvfc_queue *queue,
5794 enum ibmvfc_msg_fmt fmt)
5795{
5796 struct device *dev = vhost->dev;
5797 size_t fmt_size;
5798
5799 ENTER;
5800 spin_lock_init(&queue->_lock);
5801 queue->q_lock = &queue->_lock;
5802
5803 switch (fmt) {
5804 case IBMVFC_CRQ_FMT:
5805 fmt_size = sizeof(*queue->msgs.crq);
5806 queue->total_depth = scsi_qdepth + IBMVFC_NUM_INTERNAL_REQ;
5807 queue->evt_depth = scsi_qdepth;
5808 queue->reserved_depth = IBMVFC_NUM_INTERNAL_REQ;
5809 break;
5810 case IBMVFC_ASYNC_FMT:
5811 fmt_size = sizeof(*queue->msgs.async);
5812 break;
5813 case IBMVFC_SUB_CRQ_FMT:
5814 fmt_size = sizeof(*queue->msgs.scrq);
5815 /* We need one extra event for Cancel Commands */
5816 queue->total_depth = scsi_qdepth + IBMVFC_NUM_INTERNAL_SUBQ_REQ;
5817 queue->evt_depth = scsi_qdepth;
5818 queue->reserved_depth = IBMVFC_NUM_INTERNAL_SUBQ_REQ;
5819 break;
5820 default:
5821 dev_warn(dev, "Unknown command/response queue message format: %d\n", fmt);
5822 return -EINVAL;
5823 }
5824
5825 queue->fmt = fmt;
5826 if (ibmvfc_init_event_pool(vhost, queue)) {
5827 dev_err(dev, "Couldn't initialize event pool.\n");
5828 return -ENOMEM;
5829 }
5830
5831 queue->msgs.handle = (void *)get_zeroed_page(GFP_KERNEL);
5832 if (!queue->msgs.handle)
5833 return -ENOMEM;
5834
5835 queue->msg_token = dma_map_single(dev, queue->msgs.handle, PAGE_SIZE,
5836 DMA_BIDIRECTIONAL);
5837
5838 if (dma_mapping_error(dev, dma_addr: queue->msg_token)) {
5839 free_page((unsigned long)queue->msgs.handle);
5840 queue->msgs.handle = NULL;
5841 return -ENOMEM;
5842 }
5843
5844 queue->cur = 0;
5845 queue->size = PAGE_SIZE / fmt_size;
5846
5847 queue->vhost = vhost;
5848 return 0;
5849}
5850
5851/**
5852 * ibmvfc_init_crq - Initializes and registers CRQ with hypervisor
5853 * @vhost: ibmvfc host struct
5854 *
5855 * Allocates a page for messages, maps it for dma, and registers
5856 * the crq with the hypervisor.
5857 *
5858 * Return value:
5859 * zero on success / other on failure
5860 **/
5861static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
5862{
5863 int rc, retrc = -ENOMEM;
5864 struct device *dev = vhost->dev;
5865 struct vio_dev *vdev = to_vio_dev(dev);
5866 struct ibmvfc_queue *crq = &vhost->crq;
5867
5868 ENTER;
5869 if (ibmvfc_alloc_queue(vhost, queue: crq, fmt: IBMVFC_CRQ_FMT))
5870 return -ENOMEM;
5871
5872 retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5873 crq->msg_token, PAGE_SIZE);
5874
5875 if (rc == H_RESOURCE)
5876 /* maybe kexecing and resource is busy. try a reset */
5877 retrc = rc = ibmvfc_reset_crq(vhost);
5878
5879 if (rc == H_CLOSED)
5880 dev_warn(dev, "Partner adapter not ready\n");
5881 else if (rc) {
5882 dev_warn(dev, "Error %d opening adapter\n", rc);
5883 goto reg_crq_failed;
5884 }
5885
5886 retrc = 0;
5887
5888 tasklet_init(t: &vhost->tasklet, func: (void *)ibmvfc_tasklet, data: (unsigned long)vhost);
5889
5890 if ((rc = request_irq(irq: vdev->irq, handler: ibmvfc_interrupt, flags: 0, IBMVFC_NAME, dev: vhost))) {
5891 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
5892 goto req_irq_failed;
5893 }
5894
5895 if ((rc = vio_enable_interrupts(vdev))) {
5896 dev_err(dev, "Error %d enabling interrupts\n", rc);
5897 goto req_irq_failed;
5898 }
5899
5900 LEAVE;
5901 return retrc;
5902
5903req_irq_failed:
5904 tasklet_kill(t: &vhost->tasklet);
5905 do {
5906 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5907 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5908reg_crq_failed:
5909 ibmvfc_free_queue(vhost, queue: crq);
5910 return retrc;
5911}
5912
5913static int ibmvfc_register_channel(struct ibmvfc_host *vhost,
5914 struct ibmvfc_channels *channels,
5915 int index)
5916{
5917 struct device *dev = vhost->dev;
5918 struct vio_dev *vdev = to_vio_dev(dev);
5919 struct ibmvfc_queue *scrq = &channels->scrqs[index];
5920 int rc = -ENOMEM;
5921
5922 ENTER;
5923
5924 rc = h_reg_sub_crq(unit_address: vdev->unit_address, ioba: scrq->msg_token, PAGE_SIZE,
5925 cookie: &scrq->cookie, irq: &scrq->hw_irq);
5926
5927 /* H_CLOSED indicates successful register, but no CRQ partner */
5928 if (rc && rc != H_CLOSED) {
5929 dev_warn(dev, "Error registering sub-crq: %d\n", rc);
5930 if (rc == H_PARAMETER)
5931 dev_warn_once(dev, "Firmware may not support MQ\n");
5932 goto reg_failed;
5933 }
5934
5935 scrq->irq = irq_create_mapping(NULL, hwirq: scrq->hw_irq);
5936
5937 if (!scrq->irq) {
5938 rc = -EINVAL;
5939 dev_err(dev, "Error mapping sub-crq[%d] irq\n", index);
5940 goto irq_failed;
5941 }
5942
5943 switch (channels->protocol) {
5944 case IBMVFC_PROTO_SCSI:
5945 snprintf(buf: scrq->name, size: sizeof(scrq->name), fmt: "ibmvfc-%x-scsi%d",
5946 vdev->unit_address, index);
5947 scrq->handler = ibmvfc_interrupt_mq;
5948 break;
5949 case IBMVFC_PROTO_NVME:
5950 snprintf(buf: scrq->name, size: sizeof(scrq->name), fmt: "ibmvfc-%x-nvmf%d",
5951 vdev->unit_address, index);
5952 scrq->handler = ibmvfc_interrupt_mq;
5953 break;
5954 default:
5955 dev_err(dev, "Unknown channel protocol (%d)\n",
5956 channels->protocol);
5957 goto irq_failed;
5958 }
5959
5960 rc = request_irq(irq: scrq->irq, handler: scrq->handler, flags: 0, name: scrq->name, dev: scrq);
5961
5962 if (rc) {
5963 dev_err(dev, "Couldn't register sub-crq[%d] irq\n", index);
5964 irq_dispose_mapping(virq: scrq->irq);
5965 goto irq_failed;
5966 }
5967
5968 scrq->hwq_id = index;
5969
5970 LEAVE;
5971 return 0;
5972
5973irq_failed:
5974 do {
5975 rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie);
5976 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5977reg_failed:
5978 LEAVE;
5979 return rc;
5980}
5981
5982static void ibmvfc_deregister_channel(struct ibmvfc_host *vhost,
5983 struct ibmvfc_channels *channels,
5984 int index)
5985{
5986 struct device *dev = vhost->dev;
5987 struct vio_dev *vdev = to_vio_dev(dev);
5988 struct ibmvfc_queue *scrq = &channels->scrqs[index];
5989 long rc;
5990
5991 ENTER;
5992
5993 free_irq(scrq->irq, scrq);
5994 irq_dispose_mapping(virq: scrq->irq);
5995 scrq->irq = 0;
5996
5997 do {
5998 rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address,
5999 scrq->cookie);
6000 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
6001
6002 if (rc)
6003 dev_err(dev, "Failed to free sub-crq[%d]: rc=%ld\n", index, rc);
6004
6005 /* Clean out the queue */
6006 memset(scrq->msgs.crq, 0, PAGE_SIZE);
6007 scrq->cur = 0;
6008
6009 LEAVE;
6010}
6011
6012static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *vhost,
6013 struct ibmvfc_channels *channels)
6014{
6015 int i, j;
6016
6017 ENTER;
6018 if (!vhost->mq_enabled || !channels->scrqs)
6019 return;
6020
6021 for (i = 0; i < channels->max_queues; i++) {
6022 if (ibmvfc_register_channel(vhost, channels, index: i)) {
6023 for (j = i; j > 0; j--)
6024 ibmvfc_deregister_channel(vhost, channels, index: j - 1);
6025 vhost->do_enquiry = 0;
6026 return;
6027 }
6028 }
6029
6030 LEAVE;
6031}
6032
6033static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *vhost,
6034 struct ibmvfc_channels *channels)
6035{
6036 int i;
6037
6038 ENTER;
6039 if (!vhost->mq_enabled || !channels->scrqs)
6040 return;
6041
6042 for (i = 0; i < channels->max_queues; i++)
6043 ibmvfc_deregister_channel(vhost, channels, index: i);
6044
6045 LEAVE;
6046}
6047
6048static int ibmvfc_alloc_channels(struct ibmvfc_host *vhost,
6049 struct ibmvfc_channels *channels)
6050{
6051 struct ibmvfc_queue *scrq;
6052 int i, j;
6053 int rc = 0;
6054
6055 channels->scrqs = kcalloc(n: channels->max_queues,
6056 size: sizeof(*channels->scrqs),
6057 GFP_KERNEL);
6058 if (!channels->scrqs)
6059 return -ENOMEM;
6060
6061 for (i = 0; i < channels->max_queues; i++) {
6062 scrq = &channels->scrqs[i];
6063 rc = ibmvfc_alloc_queue(vhost, queue: scrq, fmt: IBMVFC_SUB_CRQ_FMT);
6064 if (rc) {
6065 for (j = i; j > 0; j--) {
6066 scrq = &channels->scrqs[j - 1];
6067 ibmvfc_free_queue(vhost, queue: scrq);
6068 }
6069 kfree(objp: channels->scrqs);
6070 channels->scrqs = NULL;
6071 channels->active_queues = 0;
6072 return rc;
6073 }
6074 }
6075
6076 return rc;
6077}
6078
6079static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
6080{
6081 ENTER;
6082 if (!vhost->mq_enabled)
6083 return;
6084
6085 if (ibmvfc_alloc_channels(vhost, channels: &vhost->scsi_scrqs)) {
6086 vhost->do_enquiry = 0;
6087 vhost->mq_enabled = 0;
6088 return;
6089 }
6090
6091 ibmvfc_reg_sub_crqs(vhost, channels: &vhost->scsi_scrqs);
6092
6093 LEAVE;
6094}
6095
6096static void ibmvfc_release_channels(struct ibmvfc_host *vhost,
6097 struct ibmvfc_channels *channels)
6098{
6099 struct ibmvfc_queue *scrq;
6100 int i;
6101
6102 if (channels->scrqs) {
6103 for (i = 0; i < channels->max_queues; i++) {
6104 scrq = &channels->scrqs[i];
6105 ibmvfc_free_queue(vhost, queue: scrq);
6106 }
6107
6108 kfree(objp: channels->scrqs);
6109 channels->scrqs = NULL;
6110 channels->active_queues = 0;
6111 }
6112}
6113
6114static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost)
6115{
6116 ENTER;
6117 if (!vhost->scsi_scrqs.scrqs)
6118 return;
6119
6120 ibmvfc_dereg_sub_crqs(vhost, channels: &vhost->scsi_scrqs);
6121
6122 ibmvfc_release_channels(vhost, channels: &vhost->scsi_scrqs);
6123 LEAVE;
6124}
6125
6126static void ibmvfc_free_disc_buf(struct device *dev, struct ibmvfc_channels *channels)
6127{
6128 dma_free_coherent(dev, size: channels->disc_buf_sz, cpu_addr: channels->disc_buf,
6129 dma_handle: channels->disc_buf_dma);
6130}
6131
6132/**
6133 * ibmvfc_free_mem - Free memory for vhost
6134 * @vhost: ibmvfc host struct
6135 *
6136 * Return value:
6137 * none
6138 **/
6139static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
6140{
6141 struct ibmvfc_queue *async_q = &vhost->async_crq;
6142
6143 ENTER;
6144 mempool_destroy(pool: vhost->tgt_pool);
6145 kfree(objp: vhost->trace);
6146 ibmvfc_free_disc_buf(dev: vhost->dev, channels: &vhost->scsi_scrqs);
6147 dma_free_coherent(dev: vhost->dev, size: sizeof(*vhost->login_buf),
6148 cpu_addr: vhost->login_buf, dma_handle: vhost->login_buf_dma);
6149 dma_free_coherent(dev: vhost->dev, size: sizeof(*vhost->channel_setup_buf),
6150 cpu_addr: vhost->channel_setup_buf, dma_handle: vhost->channel_setup_dma);
6151 dma_pool_destroy(pool: vhost->sg_pool);
6152 ibmvfc_free_queue(vhost, queue: async_q);
6153 LEAVE;
6154}
6155
6156static int ibmvfc_alloc_disc_buf(struct device *dev, struct ibmvfc_channels *channels)
6157{
6158 channels->disc_buf_sz = sizeof(*channels->disc_buf) * max_targets;
6159 channels->disc_buf = dma_alloc_coherent(dev, size: channels->disc_buf_sz,
6160 dma_handle: &channels->disc_buf_dma, GFP_KERNEL);
6161
6162 if (!channels->disc_buf) {
6163 dev_err(dev, "Couldn't allocate %s Discover Targets buffer\n",
6164 (channels->protocol == IBMVFC_PROTO_SCSI) ? "SCSI" : "NVMe");
6165 return -ENOMEM;
6166 }
6167
6168 return 0;
6169}
6170
6171/**
6172 * ibmvfc_alloc_mem - Allocate memory for vhost
6173 * @vhost: ibmvfc host struct
6174 *
6175 * Return value:
6176 * 0 on success / non-zero on failure
6177 **/
6178static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
6179{
6180 struct ibmvfc_queue *async_q = &vhost->async_crq;
6181 struct device *dev = vhost->dev;
6182
6183 ENTER;
6184 if (ibmvfc_alloc_queue(vhost, queue: async_q, fmt: IBMVFC_ASYNC_FMT)) {
6185 dev_err(dev, "Couldn't allocate/map async queue.\n");
6186 goto nomem;
6187 }
6188
6189 vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev,
6190 SG_ALL * sizeof(struct srp_direct_buf),
6191 align: sizeof(struct srp_direct_buf), allocation: 0);
6192
6193 if (!vhost->sg_pool) {
6194 dev_err(dev, "Failed to allocate sg pool\n");
6195 goto unmap_async_crq;
6196 }
6197
6198 vhost->login_buf = dma_alloc_coherent(dev, size: sizeof(*vhost->login_buf),
6199 dma_handle: &vhost->login_buf_dma, GFP_KERNEL);
6200
6201 if (!vhost->login_buf) {
6202 dev_err(dev, "Couldn't allocate NPIV login buffer\n");
6203 goto free_sg_pool;
6204 }
6205
6206 if (ibmvfc_alloc_disc_buf(dev, channels: &vhost->scsi_scrqs))
6207 goto free_login_buffer;
6208
6209 vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
6210 size: sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
6211 atomic_set(v: &vhost->trace_index, i: -1);
6212
6213 if (!vhost->trace)
6214 goto free_scsi_disc_buffer;
6215
6216 vhost->tgt_pool = mempool_create_kmalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
6217 size: sizeof(struct ibmvfc_target));
6218
6219 if (!vhost->tgt_pool) {
6220 dev_err(dev, "Couldn't allocate target memory pool\n");
6221 goto free_trace;
6222 }
6223
6224 vhost->channel_setup_buf = dma_alloc_coherent(dev, size: sizeof(*vhost->channel_setup_buf),
6225 dma_handle: &vhost->channel_setup_dma,
6226 GFP_KERNEL);
6227
6228 if (!vhost->channel_setup_buf) {
6229 dev_err(dev, "Couldn't allocate Channel Setup buffer\n");
6230 goto free_tgt_pool;
6231 }
6232
6233 LEAVE;
6234 return 0;
6235
6236free_tgt_pool:
6237 mempool_destroy(pool: vhost->tgt_pool);
6238free_trace:
6239 kfree(objp: vhost->trace);
6240free_scsi_disc_buffer:
6241 ibmvfc_free_disc_buf(dev, channels: &vhost->scsi_scrqs);
6242free_login_buffer:
6243 dma_free_coherent(dev, size: sizeof(*vhost->login_buf),
6244 cpu_addr: vhost->login_buf, dma_handle: vhost->login_buf_dma);
6245free_sg_pool:
6246 dma_pool_destroy(pool: vhost->sg_pool);
6247unmap_async_crq:
6248 ibmvfc_free_queue(vhost, queue: async_q);
6249nomem:
6250 LEAVE;
6251 return -ENOMEM;
6252}
6253
6254/**
6255 * ibmvfc_rport_add_thread - Worker thread for rport adds
6256 * @work: work struct
6257 *
6258 **/
6259static void ibmvfc_rport_add_thread(struct work_struct *work)
6260{
6261 struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host,
6262 rport_add_work_q);
6263 struct ibmvfc_target *tgt;
6264 struct fc_rport *rport;
6265 unsigned long flags;
6266 int did_work;
6267
6268 ENTER;
6269 spin_lock_irqsave(vhost->host->host_lock, flags);
6270 do {
6271 did_work = 0;
6272 if (vhost->state != IBMVFC_ACTIVE)
6273 break;
6274
6275 list_for_each_entry(tgt, &vhost->targets, queue) {
6276 if (tgt->add_rport) {
6277 did_work = 1;
6278 tgt->add_rport = 0;
6279 kref_get(kref: &tgt->kref);
6280 rport = tgt->rport;
6281 if (!rport) {
6282 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
6283 ibmvfc_tgt_add_rport(tgt);
6284 } else if (get_device(dev: &rport->dev)) {
6285 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
6286 tgt_dbg(tgt, "Setting rport roles\n");
6287 fc_remote_port_rolechg(rport, roles: tgt->ids.roles);
6288 put_device(dev: &rport->dev);
6289 } else {
6290 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
6291 }
6292
6293 kref_put(kref: &tgt->kref, release: ibmvfc_release_tgt);
6294 spin_lock_irqsave(vhost->host->host_lock, flags);
6295 break;
6296 }
6297 }
6298 } while(did_work);
6299
6300 if (vhost->state == IBMVFC_ACTIVE)
6301 vhost->scan_complete = 1;
6302 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
6303 LEAVE;
6304}
6305
6306/**
6307 * ibmvfc_probe - Adapter hot plug add entry point
6308 * @vdev: vio device struct
6309 * @id: vio device id struct
6310 *
6311 * Return value:
6312 * 0 on success / non-zero on failure
6313 **/
6314static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
6315{
6316 struct ibmvfc_host *vhost;
6317 struct Scsi_Host *shost;
6318 struct device *dev = &vdev->dev;
6319 int rc = -ENOMEM;
6320 unsigned int online_cpus = num_online_cpus();
6321 unsigned int max_scsi_queues = min((unsigned int)IBMVFC_MAX_SCSI_QUEUES, online_cpus);
6322
6323 ENTER;
6324 shost = scsi_host_alloc(&driver_template, sizeof(*vhost));
6325 if (!shost) {
6326 dev_err(dev, "Couldn't allocate host data\n");
6327 goto out;
6328 }
6329
6330 shost->transportt = ibmvfc_transport_template;
6331 shost->can_queue = scsi_qdepth;
6332 shost->max_lun = max_lun;
6333 shost->max_id = max_targets;
6334 shost->max_sectors = IBMVFC_MAX_SECTORS;
6335 shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
6336 shost->unique_id = shost->host_no;
6337 shost->nr_hw_queues = mq_enabled ? min(max_scsi_queues, nr_scsi_hw_queues) : 1;
6338
6339 vhost = shost_priv(shost);
6340 INIT_LIST_HEAD(list: &vhost->targets);
6341 INIT_LIST_HEAD(list: &vhost->purge);
6342 sprintf(buf: vhost->name, IBMVFC_NAME);
6343 vhost->host = shost;
6344 vhost->dev = dev;
6345 vhost->partition_number = -1;
6346 vhost->log_level = log_level;
6347 vhost->task_set = 1;
6348
6349 vhost->mq_enabled = mq_enabled;
6350 vhost->scsi_scrqs.desired_queues = min(shost->nr_hw_queues, nr_scsi_channels);
6351 vhost->scsi_scrqs.max_queues = shost->nr_hw_queues;
6352 vhost->scsi_scrqs.protocol = IBMVFC_PROTO_SCSI;
6353 vhost->using_channels = 0;
6354 vhost->do_enquiry = 1;
6355 vhost->scan_timeout = 0;
6356
6357 strcpy(p: vhost->partition_name, q: "UNKNOWN");
6358 init_waitqueue_head(&vhost->work_wait_q);
6359 init_waitqueue_head(&vhost->init_wait_q);
6360 INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread);
6361 mutex_init(&vhost->passthru_mutex);
6362
6363 if ((rc = ibmvfc_alloc_mem(vhost)))
6364 goto free_scsi_host;
6365
6366 vhost->work_thread = kthread_run(ibmvfc_work, vhost, "%s_%d", IBMVFC_NAME,
6367 shost->host_no);
6368
6369 if (IS_ERR(ptr: vhost->work_thread)) {
6370 dev_err(dev, "Couldn't create kernel thread: %ld\n",
6371 PTR_ERR(vhost->work_thread));
6372 rc = PTR_ERR(ptr: vhost->work_thread);
6373 goto free_host_mem;
6374 }
6375
6376 if ((rc = ibmvfc_init_crq(vhost))) {
6377 dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
6378 goto kill_kthread;
6379 }
6380
6381 if ((rc = scsi_add_host(host: shost, dev)))
6382 goto release_crq;
6383
6384 fc_host_dev_loss_tmo(shost) = IBMVFC_DEV_LOSS_TMO;
6385
6386 if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj,
6387 &ibmvfc_trace_attr))) {
6388 dev_err(dev, "Failed to create trace file. rc=%d\n", rc);
6389 goto remove_shost;
6390 }
6391
6392 ibmvfc_init_sub_crqs(vhost);
6393
6394 if (shost_to_fc_host(shost)->rqst_q)
6395 blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1);
6396 dev_set_drvdata(dev, data: vhost);
6397 spin_lock(lock: &ibmvfc_driver_lock);
6398 list_add_tail(new: &vhost->queue, head: &ibmvfc_head);
6399 spin_unlock(lock: &ibmvfc_driver_lock);
6400
6401 ibmvfc_send_crq_init(vhost);
6402 scsi_scan_host(shost);
6403 return 0;
6404
6405remove_shost:
6406 scsi_remove_host(shost);
6407release_crq:
6408 ibmvfc_release_crq_queue(vhost);
6409kill_kthread:
6410 kthread_stop(k: vhost->work_thread);
6411free_host_mem:
6412 ibmvfc_free_mem(vhost);
6413free_scsi_host:
6414 scsi_host_put(t: shost);
6415out:
6416 LEAVE;
6417 return rc;
6418}
6419
6420/**
6421 * ibmvfc_remove - Adapter hot plug remove entry point
6422 * @vdev: vio device struct
6423 *
6424 * Return value:
6425 * 0
6426 **/
6427static void ibmvfc_remove(struct vio_dev *vdev)
6428{
6429 struct ibmvfc_host *vhost = dev_get_drvdata(dev: &vdev->dev);
6430 LIST_HEAD(purge);
6431 unsigned long flags;
6432
6433 ENTER;
6434 ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
6435
6436 spin_lock_irqsave(vhost->host->host_lock, flags);
6437 ibmvfc_link_down(vhost, state: IBMVFC_HOST_OFFLINE);
6438 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
6439
6440 ibmvfc_wait_while_resetting(vhost);
6441 kthread_stop(k: vhost->work_thread);
6442 fc_remove_host(vhost->host);
6443 scsi_remove_host(vhost->host);
6444
6445 spin_lock_irqsave(vhost->host->host_lock, flags);
6446 ibmvfc_purge_requests(vhost, error_code: DID_ERROR);
6447 list_splice_init(list: &vhost->purge, head: &purge);
6448 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
6449 ibmvfc_complete_purge(purge_list: &purge);
6450 ibmvfc_release_sub_crqs(vhost);
6451 ibmvfc_release_crq_queue(vhost);
6452
6453 ibmvfc_free_mem(vhost);
6454 spin_lock(lock: &ibmvfc_driver_lock);
6455 list_del(entry: &vhost->queue);
6456 spin_unlock(lock: &ibmvfc_driver_lock);
6457 scsi_host_put(t: vhost->host);
6458 LEAVE;
6459}
6460
6461/**
6462 * ibmvfc_resume - Resume from suspend
6463 * @dev: device struct
6464 *
6465 * We may have lost an interrupt across suspend/resume, so kick the
6466 * interrupt handler
6467 *
6468 */
6469static int ibmvfc_resume(struct device *dev)
6470{
6471 unsigned long flags;
6472 struct ibmvfc_host *vhost = dev_get_drvdata(dev);
6473 struct vio_dev *vdev = to_vio_dev(dev);
6474
6475 spin_lock_irqsave(vhost->host->host_lock, flags);
6476 vio_disable_interrupts(vdev);
6477 tasklet_schedule(t: &vhost->tasklet);
6478 spin_unlock_irqrestore(lock: vhost->host->host_lock, flags);
6479 return 0;
6480}
6481
6482/**
6483 * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver
6484 * @vdev: vio device struct
6485 *
6486 * Return value:
6487 * Number of bytes the driver will need to DMA map at the same time in
6488 * order to perform well.
6489 */
6490static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
6491{
6492 unsigned long pool_dma;
6493
6494 pool_dma = (IBMVFC_MAX_SCSI_QUEUES * scsi_qdepth) * sizeof(union ibmvfc_iu);
6495 return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
6496}
6497
6498static const struct vio_device_id ibmvfc_device_table[] = {
6499 {"fcp", "IBM,vfc-client"},
6500 { "", "" }
6501};
6502MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
6503
6504static const struct dev_pm_ops ibmvfc_pm_ops = {
6505 .resume = ibmvfc_resume
6506};
6507
6508static struct vio_driver ibmvfc_driver = {
6509 .id_table = ibmvfc_device_table,
6510 .probe = ibmvfc_probe,
6511 .remove = ibmvfc_remove,
6512 .get_desired_dma = ibmvfc_get_desired_dma,
6513 .name = IBMVFC_NAME,
6514 .pm = &ibmvfc_pm_ops,
6515};
6516
6517static struct fc_function_template ibmvfc_transport_functions = {
6518 .show_host_fabric_name = 1,
6519 .show_host_node_name = 1,
6520 .show_host_port_name = 1,
6521 .show_host_supported_classes = 1,
6522 .show_host_port_type = 1,
6523 .show_host_port_id = 1,
6524 .show_host_maxframe_size = 1,
6525
6526 .get_host_port_state = ibmvfc_get_host_port_state,
6527 .show_host_port_state = 1,
6528
6529 .get_host_speed = ibmvfc_get_host_speed,
6530 .show_host_speed = 1,
6531
6532 .issue_fc_host_lip = ibmvfc_issue_fc_host_lip,
6533 .terminate_rport_io = ibmvfc_terminate_rport_io,
6534
6535 .show_rport_maxframe_size = 1,
6536 .show_rport_supported_classes = 1,
6537
6538 .set_rport_dev_loss_tmo = ibmvfc_set_rport_dev_loss_tmo,
6539 .show_rport_dev_loss_tmo = 1,
6540
6541 .get_starget_node_name = ibmvfc_get_starget_node_name,
6542 .show_starget_node_name = 1,
6543
6544 .get_starget_port_name = ibmvfc_get_starget_port_name,
6545 .show_starget_port_name = 1,
6546
6547 .get_starget_port_id = ibmvfc_get_starget_port_id,
6548 .show_starget_port_id = 1,
6549
6550 .bsg_request = ibmvfc_bsg_request,
6551 .bsg_timeout = ibmvfc_bsg_timeout,
6552};
6553
6554/**
6555 * ibmvfc_module_init - Initialize the ibmvfc module
6556 *
6557 * Return value:
6558 * 0 on success / other on failure
6559 **/
6560static int __init ibmvfc_module_init(void)
6561{
6562 int rc;
6563
6564 if (!firmware_has_feature(FW_FEATURE_VIO))
6565 return -ENODEV;
6566
6567 printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n",
6568 IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE);
6569
6570 ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions);
6571 if (!ibmvfc_transport_template)
6572 return -ENOMEM;
6573
6574 rc = vio_register_driver(&ibmvfc_driver);
6575 if (rc)
6576 fc_release_transport(ibmvfc_transport_template);
6577 return rc;
6578}
6579
6580/**
6581 * ibmvfc_module_exit - Teardown the ibmvfc module
6582 *
6583 * Return value:
6584 * nothing
6585 **/
6586static void __exit ibmvfc_module_exit(void)
6587{
6588 vio_unregister_driver(&ibmvfc_driver);
6589 fc_release_transport(ibmvfc_transport_template);
6590}
6591
6592module_init(ibmvfc_module_init);
6593module_exit(ibmvfc_module_exit);
6594

source code of linux/drivers/scsi/ibmvscsi/ibmvfc.c