1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * QLogic Fibre Channel HBA Driver |
4 | * Copyright (c) 2003-2014 QLogic Corporation |
5 | */ |
6 | #include "qla_def.h" |
7 | |
8 | #include <linux/bitfield.h> |
9 | #include <linux/moduleparam.h> |
10 | #include <linux/vmalloc.h> |
11 | #include <linux/delay.h> |
12 | #include <linux/kthread.h> |
13 | #include <linux/mutex.h> |
14 | #include <linux/kobject.h> |
15 | #include <linux/slab.h> |
16 | #include <linux/blk-mq-pci.h> |
17 | #include <linux/refcount.h> |
18 | #include <linux/crash_dump.h> |
19 | #include <linux/trace_events.h> |
20 | #include <linux/trace.h> |
21 | |
22 | #include <scsi/scsi_tcq.h> |
23 | #include <scsi/scsicam.h> |
24 | #include <scsi/scsi_transport.h> |
25 | #include <scsi/scsi_transport_fc.h> |
26 | |
27 | #include "qla_target.h" |
28 | |
29 | /* |
30 | * Driver version |
31 | */ |
32 | char qla2x00_version_str[40]; |
33 | |
34 | static int apidev_major; |
35 | |
36 | /* |
37 | * SRB allocation cache |
38 | */ |
39 | struct kmem_cache *srb_cachep; |
40 | |
41 | static struct trace_array *qla_trc_array; |
42 | |
43 | int ql2xfulldump_on_mpifail; |
44 | module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR); |
45 | MODULE_PARM_DESC(ql2xfulldump_on_mpifail, |
46 | "Set this to take full dump on MPI hang." ); |
47 | |
48 | int ql2xenforce_iocb_limit = 2; |
49 | module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR); |
50 | MODULE_PARM_DESC(ql2xenforce_iocb_limit, |
51 | "Enforce IOCB throttling, to avoid FW congestion. (default: 2) " |
52 | "1: track usage per queue, 2: track usage per adapter" ); |
53 | |
54 | /* |
55 | * CT6 CTX allocation cache |
56 | */ |
57 | static struct kmem_cache *ctx_cachep; |
58 | /* |
59 | * error level for logging |
60 | */ |
61 | uint ql_errlev = 0x8001; |
62 | |
63 | int ql2xsecenable; |
64 | module_param(ql2xsecenable, int, S_IRUGO); |
65 | MODULE_PARM_DESC(ql2xsecenable, |
66 | "Enable/disable security. 0(Default) - Security disabled. 1 - Security enabled." ); |
67 | |
68 | static int ql2xenableclass2; |
69 | module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR); |
70 | MODULE_PARM_DESC(ql2xenableclass2, |
71 | "Specify if Class 2 operations are supported from the very " |
72 | "beginning. Default is 0 - class 2 not supported." ); |
73 | |
74 | |
75 | int ql2xlogintimeout = 20; |
76 | module_param(ql2xlogintimeout, int, S_IRUGO); |
77 | MODULE_PARM_DESC(ql2xlogintimeout, |
78 | "Login timeout value in seconds." ); |
79 | |
80 | int qlport_down_retry; |
81 | module_param(qlport_down_retry, int, S_IRUGO); |
82 | MODULE_PARM_DESC(qlport_down_retry, |
83 | "Maximum number of command retries to a port that returns " |
84 | "a PORT-DOWN status." ); |
85 | |
86 | int ql2xplogiabsentdevice; |
87 | module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR); |
88 | MODULE_PARM_DESC(ql2xplogiabsentdevice, |
89 | "Option to enable PLOGI to devices that are not present after " |
90 | "a Fabric scan. This is needed for several broken switches. " |
91 | "Default is 0 - no PLOGI. 1 - perform PLOGI." ); |
92 | |
93 | int ql2xloginretrycount; |
94 | module_param(ql2xloginretrycount, int, S_IRUGO); |
95 | MODULE_PARM_DESC(ql2xloginretrycount, |
96 | "Specify an alternate value for the NVRAM login retry count." ); |
97 | |
98 | int ql2xallocfwdump = 1; |
99 | module_param(ql2xallocfwdump, int, S_IRUGO); |
100 | MODULE_PARM_DESC(ql2xallocfwdump, |
101 | "Option to enable allocation of memory for a firmware dump " |
102 | "during HBA initialization. Memory allocation requirements " |
103 | "vary by ISP type. Default is 1 - allocate memory." ); |
104 | |
105 | int ql2xextended_error_logging; |
106 | module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); |
107 | module_param_named(logging, ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); |
108 | MODULE_PARM_DESC(ql2xextended_error_logging, |
109 | "Option to enable extended error logging,\n" |
110 | "\t\tDefault is 0 - no logging. 0x40000000 - Module Init & Probe.\n" |
111 | "\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n" |
112 | "\t\t0x08000000 - IO tracing. 0x04000000 - DPC Thread.\n" |
113 | "\t\t0x02000000 - Async events. 0x01000000 - Timer routines.\n" |
114 | "\t\t0x00800000 - User space. 0x00400000 - Task Management.\n" |
115 | "\t\t0x00200000 - AER/EEH. 0x00100000 - Multi Q.\n" |
116 | "\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n" |
117 | "\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n" |
118 | "\t\t0x00008000 - Verbose. 0x00004000 - Target.\n" |
119 | "\t\t0x00002000 - Target Mgmt. 0x00001000 - Target TMF.\n" |
120 | "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n" |
121 | "\t\t0x1e400000 - Preferred value for capturing essential " |
122 | "debug information (equivalent to old " |
123 | "ql2xextended_error_logging=1).\n" |
124 | "\t\tDo LOGICAL OR of the value to enable more than one level" ); |
125 | |
126 | int ql2xextended_error_logging_ktrace = 1; |
127 | module_param(ql2xextended_error_logging_ktrace, int, S_IRUGO|S_IWUSR); |
128 | MODULE_PARM_DESC(ql2xextended_error_logging_ktrace, |
129 | "Same BIT definition as ql2xextended_error_logging, but used to control logging to kernel trace buffer (default=1).\n" ); |
130 | |
131 | int ql2xshiftctondsd = 6; |
132 | module_param(ql2xshiftctondsd, int, S_IRUGO); |
133 | MODULE_PARM_DESC(ql2xshiftctondsd, |
134 | "Set to control shifting of command type processing " |
135 | "based on total number of SG elements." ); |
136 | |
137 | int ql2xfdmienable = 1; |
138 | module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR); |
139 | module_param_named(fdmi, ql2xfdmienable, int, S_IRUGO|S_IWUSR); |
140 | MODULE_PARM_DESC(ql2xfdmienable, |
141 | "Enables FDMI registrations. " |
142 | "0 - no FDMI registrations. " |
143 | "1 - provide FDMI registrations (default)." ); |
144 | |
145 | #define MAX_Q_DEPTH 64 |
146 | static int ql2xmaxqdepth = MAX_Q_DEPTH; |
147 | module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR); |
148 | MODULE_PARM_DESC(ql2xmaxqdepth, |
149 | "Maximum queue depth to set for each LUN. " |
150 | "Default is 64." ); |
151 | |
152 | int ql2xenabledif = 2; |
153 | module_param(ql2xenabledif, int, S_IRUGO); |
154 | MODULE_PARM_DESC(ql2xenabledif, |
155 | " Enable T10-CRC-DIF:\n" |
156 | " Default is 2.\n" |
157 | " 0 -- No DIF Support\n" |
158 | " 1 -- Enable DIF for all types\n" |
159 | " 2 -- Enable DIF for all types, except Type 0.\n" ); |
160 | |
161 | #if (IS_ENABLED(CONFIG_NVME_FC)) |
162 | int ql2xnvmeenable = 1; |
163 | #else |
164 | int ql2xnvmeenable; |
165 | #endif |
166 | module_param(ql2xnvmeenable, int, 0644); |
167 | MODULE_PARM_DESC(ql2xnvmeenable, |
168 | "Enables NVME support. " |
169 | "0 - no NVMe. Default is Y" ); |
170 | |
171 | int ql2xenablehba_err_chk = 2; |
172 | module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR); |
173 | MODULE_PARM_DESC(ql2xenablehba_err_chk, |
174 | " Enable T10-CRC-DIF Error isolation by HBA:\n" |
175 | " Default is 2.\n" |
176 | " 0 -- Error isolation disabled\n" |
177 | " 1 -- Error isolation enabled only for DIX Type 0\n" |
178 | " 2 -- Error isolation enabled for all Types\n" ); |
179 | |
180 | int ql2xiidmaenable = 1; |
181 | module_param(ql2xiidmaenable, int, S_IRUGO); |
182 | MODULE_PARM_DESC(ql2xiidmaenable, |
183 | "Enables iIDMA settings " |
184 | "Default is 1 - perform iIDMA. 0 - no iIDMA." ); |
185 | |
186 | int ql2xmqsupport = 1; |
187 | module_param(ql2xmqsupport, int, S_IRUGO); |
188 | MODULE_PARM_DESC(ql2xmqsupport, |
189 | "Enable on demand multiple queue pairs support " |
190 | "Default is 1 for supported. " |
191 | "Set it to 0 to turn off mq qpair support." ); |
192 | |
193 | int ql2xfwloadbin; |
194 | module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR); |
195 | module_param_named(fwload, ql2xfwloadbin, int, S_IRUGO|S_IWUSR); |
196 | MODULE_PARM_DESC(ql2xfwloadbin, |
197 | "Option to specify location from which to load ISP firmware:.\n" |
198 | " 2 -- load firmware via the request_firmware() (hotplug).\n" |
199 | " interface.\n" |
200 | " 1 -- load firmware from flash.\n" |
201 | " 0 -- use default semantics.\n" ); |
202 | |
203 | int ql2xetsenable; |
204 | module_param(ql2xetsenable, int, S_IRUGO); |
205 | MODULE_PARM_DESC(ql2xetsenable, |
206 | "Enables firmware ETS burst." |
207 | "Default is 0 - skip ETS enablement." ); |
208 | |
209 | int ql2xdbwr = 1; |
210 | module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR); |
211 | MODULE_PARM_DESC(ql2xdbwr, |
212 | "Option to specify scheme for request queue posting.\n" |
213 | " 0 -- Regular doorbell.\n" |
214 | " 1 -- CAMRAM doorbell (faster).\n" ); |
215 | |
216 | int ql2xgffidenable; |
217 | module_param(ql2xgffidenable, int, S_IRUGO); |
218 | MODULE_PARM_DESC(ql2xgffidenable, |
219 | "Enables GFF_ID checks of port type. " |
220 | "Default is 0 - Do not use GFF_ID information." ); |
221 | |
222 | int ql2xasynctmfenable = 1; |
223 | module_param(ql2xasynctmfenable, int, S_IRUGO); |
224 | MODULE_PARM_DESC(ql2xasynctmfenable, |
225 | "Enables issue of TM IOCBs asynchronously via IOCB mechanism" |
226 | "Default is 1 - Issue TM IOCBs via mailbox mechanism." ); |
227 | |
228 | int ql2xdontresethba; |
229 | module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR); |
230 | MODULE_PARM_DESC(ql2xdontresethba, |
231 | "Option to specify reset behaviour.\n" |
232 | " 0 (Default) -- Reset on failure.\n" |
233 | " 1 -- Do not reset on failure.\n" ); |
234 | |
235 | uint64_t ql2xmaxlun = MAX_LUNS; |
236 | module_param(ql2xmaxlun, ullong, S_IRUGO); |
237 | MODULE_PARM_DESC(ql2xmaxlun, |
238 | "Defines the maximum LU number to register with the SCSI " |
239 | "midlayer. Default is 65535." ); |
240 | |
241 | int ql2xmdcapmask = 0x1F; |
242 | module_param(ql2xmdcapmask, int, S_IRUGO); |
243 | MODULE_PARM_DESC(ql2xmdcapmask, |
244 | "Set the Minidump driver capture mask level. " |
245 | "Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F." ); |
246 | |
247 | int ql2xmdenable = 1; |
248 | module_param(ql2xmdenable, int, S_IRUGO); |
249 | MODULE_PARM_DESC(ql2xmdenable, |
250 | "Enable/disable MiniDump. " |
251 | "0 - MiniDump disabled. " |
252 | "1 (Default) - MiniDump enabled." ); |
253 | |
254 | int ql2xexlogins; |
255 | module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR); |
256 | MODULE_PARM_DESC(ql2xexlogins, |
257 | "Number of extended Logins. " |
258 | "0 (Default)- Disabled." ); |
259 | |
260 | int ql2xexchoffld = 1024; |
261 | module_param(ql2xexchoffld, uint, 0644); |
262 | MODULE_PARM_DESC(ql2xexchoffld, |
263 | "Number of target exchanges." ); |
264 | |
265 | int ql2xiniexchg = 1024; |
266 | module_param(ql2xiniexchg, uint, 0644); |
267 | MODULE_PARM_DESC(ql2xiniexchg, |
268 | "Number of initiator exchanges." ); |
269 | |
270 | int ql2xfwholdabts; |
271 | module_param(ql2xfwholdabts, int, S_IRUGO); |
272 | MODULE_PARM_DESC(ql2xfwholdabts, |
273 | "Allow FW to hold status IOCB until ABTS rsp received. " |
274 | "0 (Default) Do not set fw option. " |
275 | "1 - Set fw option to hold ABTS." ); |
276 | |
277 | int ql2xmvasynctoatio = 1; |
278 | module_param(ql2xmvasynctoatio, int, S_IRUGO|S_IWUSR); |
279 | MODULE_PARM_DESC(ql2xmvasynctoatio, |
280 | "Move PUREX, ABTS RX and RIDA IOCBs to ATIOQ" |
281 | "0 (Default). Do not move IOCBs" |
282 | "1 - Move IOCBs." ); |
283 | |
284 | int ql2xautodetectsfp = 1; |
285 | module_param(ql2xautodetectsfp, int, 0444); |
286 | MODULE_PARM_DESC(ql2xautodetectsfp, |
287 | "Detect SFP range and set appropriate distance.\n" |
288 | "1 (Default): Enable\n" ); |
289 | |
290 | int ql2xenablemsix = 1; |
291 | module_param(ql2xenablemsix, int, 0444); |
292 | MODULE_PARM_DESC(ql2xenablemsix, |
293 | "Set to enable MSI or MSI-X interrupt mechanism.\n" |
294 | " Default is 1, enable MSI-X interrupt mechanism.\n" |
295 | " 0 -- enable traditional pin-based mechanism.\n" |
296 | " 1 -- enable MSI-X interrupt mechanism.\n" |
297 | " 2 -- enable MSI interrupt mechanism.\n" ); |
298 | |
299 | int qla2xuseresexchforels; |
300 | module_param(qla2xuseresexchforels, int, 0444); |
301 | MODULE_PARM_DESC(qla2xuseresexchforels, |
302 | "Reserve 1/2 of emergency exchanges for ELS.\n" |
303 | " 0 (default): disabled" ); |
304 | |
305 | static int ql2xprotmask; |
306 | module_param(ql2xprotmask, int, 0644); |
307 | MODULE_PARM_DESC(ql2xprotmask, |
308 | "Override DIF/DIX protection capabilities mask\n" |
309 | "Default is 0 which sets protection mask based on " |
310 | "capabilities reported by HBA firmware.\n" ); |
311 | |
312 | static int ql2xprotguard; |
313 | module_param(ql2xprotguard, int, 0644); |
314 | MODULE_PARM_DESC(ql2xprotguard, "Override choice of DIX checksum\n" |
315 | " 0 -- Let HBA firmware decide\n" |
316 | " 1 -- Force T10 CRC\n" |
317 | " 2 -- Force IP checksum\n" ); |
318 | |
319 | int ql2xdifbundlinginternalbuffers; |
320 | module_param(ql2xdifbundlinginternalbuffers, int, 0644); |
321 | MODULE_PARM_DESC(ql2xdifbundlinginternalbuffers, |
322 | "Force using internal buffers for DIF information\n" |
323 | "0 (Default). Based on check.\n" |
324 | "1 Force using internal buffers\n" ); |
325 | |
326 | int ql2xsmartsan; |
327 | module_param(ql2xsmartsan, int, 0444); |
328 | module_param_named(smartsan, ql2xsmartsan, int, 0444); |
329 | MODULE_PARM_DESC(ql2xsmartsan, |
330 | "Send SmartSAN Management Attributes for FDMI Registration." |
331 | " Default is 0 - No SmartSAN registration," |
332 | " 1 - Register SmartSAN Management Attributes." ); |
333 | |
334 | int ql2xrdpenable; |
335 | module_param(ql2xrdpenable, int, 0444); |
336 | module_param_named(rdpenable, ql2xrdpenable, int, 0444); |
337 | MODULE_PARM_DESC(ql2xrdpenable, |
338 | "Enables RDP responses. " |
339 | "0 - no RDP responses (default). " |
340 | "1 - provide RDP responses." ); |
341 | int ql2xabts_wait_nvme = 1; |
342 | module_param(ql2xabts_wait_nvme, int, 0444); |
343 | MODULE_PARM_DESC(ql2xabts_wait_nvme, |
344 | "To wait for ABTS response on I/O timeouts for NVMe. (default: 1)" ); |
345 | |
346 | |
347 | static u32 ql2xdelay_before_pci_error_handling = 5; |
348 | module_param(ql2xdelay_before_pci_error_handling, uint, 0644); |
349 | MODULE_PARM_DESC(ql2xdelay_before_pci_error_handling, |
350 | "Number of seconds delayed before qla begin PCI error self-handling (default: 5).\n" ); |
351 | |
352 | static void qla2x00_clear_drv_active(struct qla_hw_data *); |
353 | static void qla2x00_free_device(scsi_qla_host_t *); |
354 | static void qla2xxx_map_queues(struct Scsi_Host *shost); |
355 | static void qla2x00_destroy_deferred_work(struct qla_hw_data *); |
356 | |
357 | u32 ql2xnvme_queues = DEF_NVME_HW_QUEUES; |
358 | module_param(ql2xnvme_queues, uint, S_IRUGO); |
359 | MODULE_PARM_DESC(ql2xnvme_queues, |
360 | "Number of NVMe Queues that can be configured.\n" |
361 | "Final value will be min(ql2xnvme_queues, num_cpus,num_chip_queues)\n" |
362 | "1 - Minimum number of queues supported\n" |
363 | "8 - Default value" ); |
364 | |
365 | int ql2xfc2target = 1; |
366 | module_param(ql2xfc2target, int, 0444); |
367 | MODULE_PARM_DESC(qla2xfc2target, |
368 | "Enables FC2 Target support. " |
369 | "0 - FC2 Target support is disabled. " |
370 | "1 - FC2 Target support is enabled (default)." ); |
371 | |
372 | static struct scsi_transport_template *qla2xxx_transport_template = NULL; |
373 | struct scsi_transport_template *qla2xxx_transport_vport_template = NULL; |
374 | |
375 | /* TODO Convert to inlines |
376 | * |
377 | * Timer routines |
378 | */ |
379 | |
380 | __inline__ void |
381 | qla2x00_start_timer(scsi_qla_host_t *vha, unsigned long interval) |
382 | { |
383 | timer_setup(&vha->timer, qla2x00_timer, 0); |
384 | vha->timer.expires = jiffies + interval * HZ; |
385 | add_timer(timer: &vha->timer); |
386 | vha->timer_active = 1; |
387 | } |
388 | |
389 | static inline void |
390 | qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval) |
391 | { |
392 | /* Currently used for 82XX only. */ |
393 | if (vha->device_flags & DFLG_DEV_FAILED) { |
394 | ql_dbg(ql_dbg_timer, vha, 0x600d, |
395 | fmt: "Device in a failed state, returning.\n" ); |
396 | return; |
397 | } |
398 | |
399 | mod_timer(timer: &vha->timer, expires: jiffies + interval * HZ); |
400 | } |
401 | |
402 | static __inline__ void |
403 | qla2x00_stop_timer(scsi_qla_host_t *vha) |
404 | { |
405 | del_timer_sync(timer: &vha->timer); |
406 | vha->timer_active = 0; |
407 | } |
408 | |
409 | static int qla2x00_do_dpc(void *data); |
410 | |
411 | static void qla2x00_rst_aen(scsi_qla_host_t *); |
412 | |
413 | static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t, |
414 | struct req_que **, struct rsp_que **); |
415 | static void qla2x00_free_fw_dump(struct qla_hw_data *); |
416 | static void qla2x00_mem_free(struct qla_hw_data *); |
417 | int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, |
418 | struct qla_qpair *qpair); |
419 | |
420 | /* -------------------------------------------------------------------------- */ |
421 | static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req, |
422 | struct rsp_que *rsp) |
423 | { |
424 | struct qla_hw_data *ha = vha->hw; |
425 | |
426 | rsp->qpair = ha->base_qpair; |
427 | rsp->req = req; |
428 | ha->base_qpair->hw = ha; |
429 | ha->base_qpair->req = req; |
430 | ha->base_qpair->rsp = rsp; |
431 | ha->base_qpair->vha = vha; |
432 | ha->base_qpair->qp_lock_ptr = &ha->hardware_lock; |
433 | ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0; |
434 | ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q]; |
435 | ha->base_qpair->srb_mempool = ha->srb_mempool; |
436 | INIT_LIST_HEAD(list: &ha->base_qpair->hints_list); |
437 | INIT_LIST_HEAD(list: &ha->base_qpair->dsd_list); |
438 | ha->base_qpair->enable_class_2 = ql2xenableclass2; |
439 | /* init qpair to this cpu. Will adjust at run time. */ |
440 | qla_cpu_update(qpair: rsp->qpair, raw_smp_processor_id()); |
441 | ha->base_qpair->pdev = ha->pdev; |
442 | |
443 | if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) |
444 | ha->base_qpair->reqq_start_iocbs = qla_83xx_start_iocbs; |
445 | } |
446 | |
447 | static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, |
448 | struct rsp_que *rsp) |
449 | { |
450 | scsi_qla_host_t *vha = pci_get_drvdata(pdev: ha->pdev); |
451 | |
452 | ha->req_q_map = kcalloc(n: ha->max_req_queues, size: sizeof(struct req_que *), |
453 | GFP_KERNEL); |
454 | if (!ha->req_q_map) { |
455 | ql_log(ql_log_fatal, vha, 0x003b, |
456 | fmt: "Unable to allocate memory for request queue ptrs.\n" ); |
457 | goto fail_req_map; |
458 | } |
459 | |
460 | ha->rsp_q_map = kcalloc(n: ha->max_rsp_queues, size: sizeof(struct rsp_que *), |
461 | GFP_KERNEL); |
462 | if (!ha->rsp_q_map) { |
463 | ql_log(ql_log_fatal, vha, 0x003c, |
464 | fmt: "Unable to allocate memory for response queue ptrs.\n" ); |
465 | goto fail_rsp_map; |
466 | } |
467 | |
468 | ha->base_qpair = kzalloc(size: sizeof(struct qla_qpair), GFP_KERNEL); |
469 | if (ha->base_qpair == NULL) { |
470 | ql_log(ql_log_warn, vha, 0x00e0, |
471 | fmt: "Failed to allocate base queue pair memory.\n" ); |
472 | goto fail_base_qpair; |
473 | } |
474 | |
475 | qla_init_base_qpair(vha, req, rsp); |
476 | |
477 | if ((ql2xmqsupport || ql2xnvmeenable) && ha->max_qpairs) { |
478 | ha->queue_pair_map = kcalloc(n: ha->max_qpairs, size: sizeof(struct qla_qpair *), |
479 | GFP_KERNEL); |
480 | if (!ha->queue_pair_map) { |
481 | ql_log(ql_log_fatal, vha, 0x0180, |
482 | fmt: "Unable to allocate memory for queue pair ptrs.\n" ); |
483 | goto fail_qpair_map; |
484 | } |
485 | if (qla_mapq_alloc_qp_cpu_map(ha) != 0) { |
486 | kfree(objp: ha->queue_pair_map); |
487 | ha->queue_pair_map = NULL; |
488 | goto fail_qpair_map; |
489 | } |
490 | } |
491 | |
492 | /* |
493 | * Make sure we record at least the request and response queue zero in |
494 | * case we need to free them if part of the probe fails. |
495 | */ |
496 | ha->rsp_q_map[0] = rsp; |
497 | ha->req_q_map[0] = req; |
498 | set_bit(nr: 0, addr: ha->rsp_qid_map); |
499 | set_bit(nr: 0, addr: ha->req_qid_map); |
500 | return 0; |
501 | |
502 | fail_qpair_map: |
503 | kfree(objp: ha->base_qpair); |
504 | ha->base_qpair = NULL; |
505 | fail_base_qpair: |
506 | kfree(objp: ha->rsp_q_map); |
507 | ha->rsp_q_map = NULL; |
508 | fail_rsp_map: |
509 | kfree(objp: ha->req_q_map); |
510 | ha->req_q_map = NULL; |
511 | fail_req_map: |
512 | return -ENOMEM; |
513 | } |
514 | |
515 | static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req) |
516 | { |
517 | if (IS_QLAFX00(ha)) { |
518 | if (req && req->ring_fx00) |
519 | dma_free_coherent(dev: &ha->pdev->dev, |
520 | size: (req->length_fx00 + 1) * sizeof(request_t), |
521 | cpu_addr: req->ring_fx00, dma_handle: req->dma_fx00); |
522 | } else if (req && req->ring) |
523 | dma_free_coherent(dev: &ha->pdev->dev, |
524 | size: (req->length + 1) * sizeof(request_t), |
525 | cpu_addr: req->ring, dma_handle: req->dma); |
526 | |
527 | if (req) |
528 | kfree(objp: req->outstanding_cmds); |
529 | |
530 | kfree(objp: req); |
531 | } |
532 | |
533 | static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp) |
534 | { |
535 | if (IS_QLAFX00(ha)) { |
536 | if (rsp && rsp->ring_fx00) |
537 | dma_free_coherent(dev: &ha->pdev->dev, |
538 | size: (rsp->length_fx00 + 1) * sizeof(request_t), |
539 | cpu_addr: rsp->ring_fx00, dma_handle: rsp->dma_fx00); |
540 | } else if (rsp && rsp->ring) { |
541 | dma_free_coherent(dev: &ha->pdev->dev, |
542 | size: (rsp->length + 1) * sizeof(response_t), |
543 | cpu_addr: rsp->ring, dma_handle: rsp->dma); |
544 | } |
545 | kfree(objp: rsp); |
546 | } |
547 | |
548 | static void qla2x00_free_queues(struct qla_hw_data *ha) |
549 | { |
550 | struct req_que *req; |
551 | struct rsp_que *rsp; |
552 | int cnt; |
553 | unsigned long flags; |
554 | |
555 | if (ha->queue_pair_map) { |
556 | kfree(objp: ha->queue_pair_map); |
557 | ha->queue_pair_map = NULL; |
558 | } |
559 | if (ha->base_qpair) { |
560 | kfree(objp: ha->base_qpair); |
561 | ha->base_qpair = NULL; |
562 | } |
563 | |
564 | qla_mapq_free_qp_cpu_map(ha); |
565 | spin_lock_irqsave(&ha->hardware_lock, flags); |
566 | for (cnt = 0; cnt < ha->max_req_queues; cnt++) { |
567 | if (!test_bit(cnt, ha->req_qid_map)) |
568 | continue; |
569 | |
570 | req = ha->req_q_map[cnt]; |
571 | clear_bit(nr: cnt, addr: ha->req_qid_map); |
572 | ha->req_q_map[cnt] = NULL; |
573 | |
574 | spin_unlock_irqrestore(lock: &ha->hardware_lock, flags); |
575 | qla2x00_free_req_que(ha, req); |
576 | spin_lock_irqsave(&ha->hardware_lock, flags); |
577 | } |
578 | spin_unlock_irqrestore(lock: &ha->hardware_lock, flags); |
579 | |
580 | kfree(objp: ha->req_q_map); |
581 | ha->req_q_map = NULL; |
582 | |
583 | |
584 | spin_lock_irqsave(&ha->hardware_lock, flags); |
585 | for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) { |
586 | if (!test_bit(cnt, ha->rsp_qid_map)) |
587 | continue; |
588 | |
589 | rsp = ha->rsp_q_map[cnt]; |
590 | clear_bit(nr: cnt, addr: ha->rsp_qid_map); |
591 | ha->rsp_q_map[cnt] = NULL; |
592 | spin_unlock_irqrestore(lock: &ha->hardware_lock, flags); |
593 | qla2x00_free_rsp_que(ha, rsp); |
594 | spin_lock_irqsave(&ha->hardware_lock, flags); |
595 | } |
596 | spin_unlock_irqrestore(lock: &ha->hardware_lock, flags); |
597 | |
598 | kfree(objp: ha->rsp_q_map); |
599 | ha->rsp_q_map = NULL; |
600 | } |
601 | |
602 | static char * |
603 | qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len) |
604 | { |
605 | struct qla_hw_data *ha = vha->hw; |
606 | static const char *const pci_bus_modes[] = { |
607 | "33" , "66" , "100" , "133" , |
608 | }; |
609 | uint16_t pci_bus; |
610 | |
611 | pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9; |
612 | if (pci_bus) { |
613 | snprintf(buf: str, size: str_len, fmt: "PCI-X (%s MHz)" , |
614 | pci_bus_modes[pci_bus]); |
615 | } else { |
616 | pci_bus = (ha->pci_attr & BIT_8) >> 8; |
617 | snprintf(buf: str, size: str_len, fmt: "PCI (%s MHz)" , pci_bus_modes[pci_bus]); |
618 | } |
619 | |
620 | return str; |
621 | } |
622 | |
623 | static char * |
624 | qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len) |
625 | { |
626 | static const char *const pci_bus_modes[] = { |
627 | "33" , "66" , "100" , "133" , |
628 | }; |
629 | struct qla_hw_data *ha = vha->hw; |
630 | uint32_t pci_bus; |
631 | |
632 | if (pci_is_pcie(dev: ha->pdev)) { |
633 | uint32_t lstat, lspeed, lwidth; |
634 | const char *speed_str; |
635 | |
636 | pcie_capability_read_dword(dev: ha->pdev, PCI_EXP_LNKCAP, val: &lstat); |
637 | lspeed = FIELD_GET(PCI_EXP_LNKCAP_SLS, lstat); |
638 | lwidth = FIELD_GET(PCI_EXP_LNKCAP_MLW, lstat); |
639 | |
640 | switch (lspeed) { |
641 | case 1: |
642 | speed_str = "2.5GT/s" ; |
643 | break; |
644 | case 2: |
645 | speed_str = "5.0GT/s" ; |
646 | break; |
647 | case 3: |
648 | speed_str = "8.0GT/s" ; |
649 | break; |
650 | case 4: |
651 | speed_str = "16.0GT/s" ; |
652 | break; |
653 | default: |
654 | speed_str = "<unknown>" ; |
655 | break; |
656 | } |
657 | snprintf(buf: str, size: str_len, fmt: "PCIe (%s x%d)" , speed_str, lwidth); |
658 | |
659 | return str; |
660 | } |
661 | |
662 | pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8; |
663 | if (pci_bus == 0 || pci_bus == 8) |
664 | snprintf(buf: str, size: str_len, fmt: "PCI (%s MHz)" , |
665 | pci_bus_modes[pci_bus >> 3]); |
666 | else |
667 | snprintf(buf: str, size: str_len, fmt: "PCI-X Mode %d (%s MHz)" , |
668 | pci_bus & 4 ? 2 : 1, |
669 | pci_bus_modes[pci_bus & 3]); |
670 | |
671 | return str; |
672 | } |
673 | |
674 | static char * |
675 | qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) |
676 | { |
677 | char un_str[10]; |
678 | struct qla_hw_data *ha = vha->hw; |
679 | |
680 | snprintf(buf: str, size, fmt: "%d.%02d.%02d " , ha->fw_major_version, |
681 | ha->fw_minor_version, ha->fw_subminor_version); |
682 | |
683 | if (ha->fw_attributes & BIT_9) { |
684 | strcat(p: str, q: "FLX" ); |
685 | return (str); |
686 | } |
687 | |
688 | switch (ha->fw_attributes & 0xFF) { |
689 | case 0x7: |
690 | strcat(p: str, q: "EF" ); |
691 | break; |
692 | case 0x17: |
693 | strcat(p: str, q: "TP" ); |
694 | break; |
695 | case 0x37: |
696 | strcat(p: str, q: "IP" ); |
697 | break; |
698 | case 0x77: |
699 | strcat(p: str, q: "VI" ); |
700 | break; |
701 | default: |
702 | sprintf(buf: un_str, fmt: "(%x)" , ha->fw_attributes); |
703 | strcat(p: str, q: un_str); |
704 | break; |
705 | } |
706 | if (ha->fw_attributes & 0x100) |
707 | strcat(p: str, q: "X" ); |
708 | |
709 | return (str); |
710 | } |
711 | |
712 | static char * |
713 | qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) |
714 | { |
715 | struct qla_hw_data *ha = vha->hw; |
716 | |
717 | snprintf(buf: str, size, fmt: "%d.%02d.%02d (%x)" , ha->fw_major_version, |
718 | ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes); |
719 | return str; |
720 | } |
721 | |
722 | void qla2x00_sp_free_dma(srb_t *sp) |
723 | { |
724 | struct qla_hw_data *ha = sp->vha->hw; |
725 | struct scsi_cmnd *cmd = GET_CMD_SP(sp); |
726 | |
727 | if (sp->flags & SRB_DMA_VALID) { |
728 | scsi_dma_unmap(cmd); |
729 | sp->flags &= ~SRB_DMA_VALID; |
730 | } |
731 | |
732 | if (sp->flags & SRB_CRC_PROT_DMA_VALID) { |
733 | dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), |
734 | scsi_prot_sg_count(cmd), cmd->sc_data_direction); |
735 | sp->flags &= ~SRB_CRC_PROT_DMA_VALID; |
736 | } |
737 | |
738 | if (sp->flags & SRB_CRC_CTX_DSD_VALID) { |
739 | /* List assured to be having elements */ |
740 | qla2x00_clean_dsd_pool(ha, ctx: sp->u.scmd.crc_ctx); |
741 | sp->flags &= ~SRB_CRC_CTX_DSD_VALID; |
742 | } |
743 | |
744 | if (sp->flags & SRB_CRC_CTX_DMA_VALID) { |
745 | struct crc_context *ctx0 = sp->u.scmd.crc_ctx; |
746 | |
747 | dma_pool_free(pool: ha->dl_dma_pool, vaddr: ctx0, addr: ctx0->crc_ctx_dma); |
748 | sp->flags &= ~SRB_CRC_CTX_DMA_VALID; |
749 | } |
750 | |
751 | if (sp->flags & SRB_FCP_CMND_DMA_VALID) { |
752 | struct ct6_dsd *ctx1 = &sp->u.scmd.ct6_ctx; |
753 | |
754 | dma_pool_free(pool: ha->fcp_cmnd_dma_pool, vaddr: ctx1->fcp_cmnd, |
755 | addr: ctx1->fcp_cmnd_dma); |
756 | list_splice(list: &ctx1->dsd_list, head: &sp->qpair->dsd_list); |
757 | sp->qpair->dsd_inuse -= ctx1->dsd_use_cnt; |
758 | sp->qpair->dsd_avail += ctx1->dsd_use_cnt; |
759 | } |
760 | |
761 | if (sp->flags & SRB_GOT_BUF) |
762 | qla_put_buf(sp->qpair, &sp->u.scmd.buf_dsc); |
763 | } |
764 | |
765 | void qla2x00_sp_compl(srb_t *sp, int res) |
766 | { |
767 | struct scsi_cmnd *cmd = GET_CMD_SP(sp); |
768 | struct completion *comp = sp->comp; |
769 | |
770 | /* kref: INIT */ |
771 | kref_put(kref: &sp->cmd_kref, release: qla2x00_sp_release); |
772 | cmd->result = res; |
773 | sp->type = 0; |
774 | scsi_done(cmd); |
775 | if (comp) |
776 | complete(comp); |
777 | } |
778 | |
779 | void qla2xxx_qpair_sp_free_dma(srb_t *sp) |
780 | { |
781 | struct scsi_cmnd *cmd = GET_CMD_SP(sp); |
782 | struct qla_hw_data *ha = sp->fcport->vha->hw; |
783 | |
784 | if (sp->flags & SRB_DMA_VALID) { |
785 | scsi_dma_unmap(cmd); |
786 | sp->flags &= ~SRB_DMA_VALID; |
787 | } |
788 | |
789 | if (sp->flags & SRB_CRC_PROT_DMA_VALID) { |
790 | dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), |
791 | scsi_prot_sg_count(cmd), cmd->sc_data_direction); |
792 | sp->flags &= ~SRB_CRC_PROT_DMA_VALID; |
793 | } |
794 | |
795 | if (sp->flags & SRB_CRC_CTX_DSD_VALID) { |
796 | /* List assured to be having elements */ |
797 | qla2x00_clean_dsd_pool(ha, ctx: sp->u.scmd.crc_ctx); |
798 | sp->flags &= ~SRB_CRC_CTX_DSD_VALID; |
799 | } |
800 | |
801 | if (sp->flags & SRB_DIF_BUNDL_DMA_VALID) { |
802 | struct crc_context *difctx = sp->u.scmd.crc_ctx; |
803 | struct dsd_dma *dif_dsd, *nxt_dsd; |
804 | |
805 | list_for_each_entry_safe(dif_dsd, nxt_dsd, |
806 | &difctx->ldif_dma_hndl_list, list) { |
807 | list_del(entry: &dif_dsd->list); |
808 | dma_pool_free(pool: ha->dif_bundl_pool, vaddr: dif_dsd->dsd_addr, |
809 | addr: dif_dsd->dsd_list_dma); |
810 | kfree(objp: dif_dsd); |
811 | difctx->no_dif_bundl--; |
812 | } |
813 | |
814 | list_for_each_entry_safe(dif_dsd, nxt_dsd, |
815 | &difctx->ldif_dsd_list, list) { |
816 | list_del(entry: &dif_dsd->list); |
817 | dma_pool_free(pool: ha->dl_dma_pool, vaddr: dif_dsd->dsd_addr, |
818 | addr: dif_dsd->dsd_list_dma); |
819 | kfree(objp: dif_dsd); |
820 | difctx->no_ldif_dsd--; |
821 | } |
822 | |
823 | if (difctx->no_ldif_dsd) { |
824 | ql_dbg(ql_dbg_tgt+ql_dbg_verbose, vha: sp->vha, 0xe022, |
825 | fmt: "%s: difctx->no_ldif_dsd=%x\n" , |
826 | __func__, difctx->no_ldif_dsd); |
827 | } |
828 | |
829 | if (difctx->no_dif_bundl) { |
830 | ql_dbg(ql_dbg_tgt+ql_dbg_verbose, vha: sp->vha, 0xe022, |
831 | fmt: "%s: difctx->no_dif_bundl=%x\n" , |
832 | __func__, difctx->no_dif_bundl); |
833 | } |
834 | sp->flags &= ~SRB_DIF_BUNDL_DMA_VALID; |
835 | } |
836 | |
837 | if (sp->flags & SRB_FCP_CMND_DMA_VALID) { |
838 | struct ct6_dsd *ctx1 = &sp->u.scmd.ct6_ctx; |
839 | |
840 | dma_pool_free(pool: ha->fcp_cmnd_dma_pool, vaddr: ctx1->fcp_cmnd, |
841 | addr: ctx1->fcp_cmnd_dma); |
842 | list_splice(list: &ctx1->dsd_list, head: &sp->qpair->dsd_list); |
843 | sp->qpair->dsd_inuse -= ctx1->dsd_use_cnt; |
844 | sp->qpair->dsd_avail += ctx1->dsd_use_cnt; |
845 | sp->flags &= ~SRB_FCP_CMND_DMA_VALID; |
846 | } |
847 | |
848 | if (sp->flags & SRB_CRC_CTX_DMA_VALID) { |
849 | struct crc_context *ctx0 = sp->u.scmd.crc_ctx; |
850 | |
851 | dma_pool_free(pool: ha->dl_dma_pool, vaddr: ctx0, addr: ctx0->crc_ctx_dma); |
852 | sp->flags &= ~SRB_CRC_CTX_DMA_VALID; |
853 | } |
854 | |
855 | if (sp->flags & SRB_GOT_BUF) |
856 | qla_put_buf(sp->qpair, &sp->u.scmd.buf_dsc); |
857 | } |
858 | |
859 | void qla2xxx_qpair_sp_compl(srb_t *sp, int res) |
860 | { |
861 | struct scsi_cmnd *cmd = GET_CMD_SP(sp); |
862 | struct completion *comp = sp->comp; |
863 | |
864 | /* ref: INIT */ |
865 | kref_put(kref: &sp->cmd_kref, release: qla2x00_sp_release); |
866 | cmd->result = res; |
867 | sp->type = 0; |
868 | scsi_done(cmd); |
869 | if (comp) |
870 | complete(comp); |
871 | } |
872 | |
873 | static int |
874 | qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) |
875 | { |
876 | scsi_qla_host_t *vha = shost_priv(shost: host); |
877 | fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; |
878 | struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); |
879 | struct qla_hw_data *ha = vha->hw; |
880 | struct scsi_qla_host *base_vha = pci_get_drvdata(pdev: ha->pdev); |
881 | srb_t *sp; |
882 | int rval; |
883 | |
884 | if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)) || |
885 | WARN_ON_ONCE(!rport)) { |
886 | cmd->result = DID_NO_CONNECT << 16; |
887 | goto qc24_fail_command; |
888 | } |
889 | |
890 | if (ha->mqenable) { |
891 | uint32_t tag; |
892 | uint16_t hwq; |
893 | struct qla_qpair *qpair = NULL; |
894 | |
895 | tag = blk_mq_unique_tag(rq: scsi_cmd_to_rq(scmd: cmd)); |
896 | hwq = blk_mq_unique_tag_to_hwq(unique_tag: tag); |
897 | qpair = ha->queue_pair_map[hwq]; |
898 | |
899 | if (qpair) |
900 | return qla2xxx_mqueuecommand(host, cmd, qpair); |
901 | } |
902 | |
903 | if (ha->flags.eeh_busy) { |
904 | if (ha->flags.pci_channel_io_perm_failure) { |
905 | ql_dbg(ql_dbg_aer, vha, 0x9010, |
906 | fmt: "PCI Channel IO permanent failure, exiting " |
907 | "cmd=%p.\n" , cmd); |
908 | cmd->result = DID_NO_CONNECT << 16; |
909 | } else { |
910 | ql_dbg(ql_dbg_aer, vha, 0x9011, |
911 | fmt: "EEH_Busy, Requeuing the cmd=%p.\n" , cmd); |
912 | cmd->result = DID_REQUEUE << 16; |
913 | } |
914 | goto qc24_fail_command; |
915 | } |
916 | |
917 | rval = fc_remote_port_chkready(rport); |
918 | if (rval) { |
919 | cmd->result = rval; |
920 | ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003, |
921 | fmt: "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n" , |
922 | cmd, rval); |
923 | goto qc24_fail_command; |
924 | } |
925 | |
926 | if (!vha->flags.difdix_supported && |
927 | scsi_get_prot_op(scmd: cmd) != SCSI_PROT_NORMAL) { |
928 | ql_dbg(ql_dbg_io, vha, 0x3004, |
929 | fmt: "DIF Cap not reg, fail DIF capable cmd's:%p.\n" , |
930 | cmd); |
931 | cmd->result = DID_NO_CONNECT << 16; |
932 | goto qc24_fail_command; |
933 | } |
934 | |
935 | if (!fcport || fcport->deleted) { |
936 | cmd->result = DID_IMM_RETRY << 16; |
937 | goto qc24_fail_command; |
938 | } |
939 | |
940 | if (atomic_read(v: &fcport->state) != FCS_ONLINE || fcport->deleted) { |
941 | if (atomic_read(v: &fcport->state) == FCS_DEVICE_DEAD || |
942 | atomic_read(v: &base_vha->loop_state) == LOOP_DEAD) { |
943 | ql_dbg(ql_dbg_io, vha, 0x3005, |
944 | fmt: "Returning DNC, fcport_state=%d loop_state=%d.\n" , |
945 | atomic_read(v: &fcport->state), |
946 | atomic_read(v: &base_vha->loop_state)); |
947 | cmd->result = DID_NO_CONNECT << 16; |
948 | goto qc24_fail_command; |
949 | } |
950 | goto qc24_target_busy; |
951 | } |
952 | |
953 | /* |
954 | * Return target busy if we've received a non-zero retry_delay_timer |
955 | * in a FCP_RSP. |
956 | */ |
957 | if (fcport->retry_delay_timestamp == 0) { |
958 | /* retry delay not set */ |
959 | } else if (time_after(jiffies, fcport->retry_delay_timestamp)) |
960 | fcport->retry_delay_timestamp = 0; |
961 | else |
962 | goto qc24_target_busy; |
963 | |
964 | sp = scsi_cmd_priv(cmd); |
965 | /* ref: INIT */ |
966 | qla2xxx_init_sp(sp, vha, qpair: vha->hw->base_qpair, fcport); |
967 | |
968 | sp->u.scmd.cmd = cmd; |
969 | sp->type = SRB_SCSI_CMD; |
970 | sp->free = qla2x00_sp_free_dma; |
971 | sp->done = qla2x00_sp_compl; |
972 | |
973 | rval = ha->isp_ops->start_scsi(sp); |
974 | if (rval != QLA_SUCCESS) { |
975 | ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013, |
976 | fmt: "Start scsi failed rval=%d for cmd=%p.\n" , rval, cmd); |
977 | goto qc24_host_busy_free_sp; |
978 | } |
979 | |
980 | return 0; |
981 | |
982 | qc24_host_busy_free_sp: |
983 | /* ref: INIT */ |
984 | kref_put(kref: &sp->cmd_kref, release: qla2x00_sp_release); |
985 | |
986 | qc24_target_busy: |
987 | return SCSI_MLQUEUE_TARGET_BUSY; |
988 | |
989 | qc24_fail_command: |
990 | scsi_done(cmd); |
991 | |
992 | return 0; |
993 | } |
994 | |
995 | /* For MQ supported I/O */ |
996 | int |
997 | qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, |
998 | struct qla_qpair *qpair) |
999 | { |
1000 | scsi_qla_host_t *vha = shost_priv(shost: host); |
1001 | fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; |
1002 | struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); |
1003 | struct qla_hw_data *ha = vha->hw; |
1004 | struct scsi_qla_host *base_vha = pci_get_drvdata(pdev: ha->pdev); |
1005 | srb_t *sp; |
1006 | int rval; |
1007 | |
1008 | rval = rport ? fc_remote_port_chkready(rport) : (DID_NO_CONNECT << 16); |
1009 | if (rval) { |
1010 | cmd->result = rval; |
1011 | ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076, |
1012 | fmt: "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n" , |
1013 | cmd, rval); |
1014 | goto qc24_fail_command; |
1015 | } |
1016 | |
1017 | if (!qpair->online) { |
1018 | ql_dbg(ql_dbg_io, vha, 0x3077, |
1019 | fmt: "qpair not online. eeh_busy=%d.\n" , ha->flags.eeh_busy); |
1020 | cmd->result = DID_NO_CONNECT << 16; |
1021 | goto qc24_fail_command; |
1022 | } |
1023 | |
1024 | if (!fcport || fcport->deleted) { |
1025 | cmd->result = DID_IMM_RETRY << 16; |
1026 | goto qc24_fail_command; |
1027 | } |
1028 | |
1029 | if (atomic_read(v: &fcport->state) != FCS_ONLINE || fcport->deleted) { |
1030 | if (atomic_read(v: &fcport->state) == FCS_DEVICE_DEAD || |
1031 | atomic_read(v: &base_vha->loop_state) == LOOP_DEAD) { |
1032 | ql_dbg(ql_dbg_io, vha, 0x3077, |
1033 | fmt: "Returning DNC, fcport_state=%d loop_state=%d.\n" , |
1034 | atomic_read(v: &fcport->state), |
1035 | atomic_read(v: &base_vha->loop_state)); |
1036 | cmd->result = DID_NO_CONNECT << 16; |
1037 | goto qc24_fail_command; |
1038 | } |
1039 | goto qc24_target_busy; |
1040 | } |
1041 | |
1042 | /* |
1043 | * Return target busy if we've received a non-zero retry_delay_timer |
1044 | * in a FCP_RSP. |
1045 | */ |
1046 | if (fcport->retry_delay_timestamp == 0) { |
1047 | /* retry delay not set */ |
1048 | } else if (time_after(jiffies, fcport->retry_delay_timestamp)) |
1049 | fcport->retry_delay_timestamp = 0; |
1050 | else |
1051 | goto qc24_target_busy; |
1052 | |
1053 | sp = scsi_cmd_priv(cmd); |
1054 | /* ref: INIT */ |
1055 | qla2xxx_init_sp(sp, vha, qpair, fcport); |
1056 | |
1057 | sp->u.scmd.cmd = cmd; |
1058 | sp->type = SRB_SCSI_CMD; |
1059 | sp->free = qla2xxx_qpair_sp_free_dma; |
1060 | sp->done = qla2xxx_qpair_sp_compl; |
1061 | |
1062 | rval = ha->isp_ops->start_scsi_mq(sp); |
1063 | if (rval != QLA_SUCCESS) { |
1064 | ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078, |
1065 | fmt: "Start scsi failed rval=%d for cmd=%p.\n" , rval, cmd); |
1066 | goto qc24_host_busy_free_sp; |
1067 | } |
1068 | |
1069 | return 0; |
1070 | |
1071 | qc24_host_busy_free_sp: |
1072 | /* ref: INIT */ |
1073 | kref_put(kref: &sp->cmd_kref, release: qla2x00_sp_release); |
1074 | |
1075 | qc24_target_busy: |
1076 | return SCSI_MLQUEUE_TARGET_BUSY; |
1077 | |
1078 | qc24_fail_command: |
1079 | scsi_done(cmd); |
1080 | |
1081 | return 0; |
1082 | } |
1083 | |
1084 | /* |
1085 | * qla2x00_wait_for_hba_online |
1086 | * Wait till the HBA is online after going through |
1087 | * <= MAX_RETRIES_OF_ISP_ABORT or |
1088 | * finally HBA is disabled ie marked offline |
1089 | * |
1090 | * Input: |
1091 | * ha - pointer to host adapter structure |
1092 | * |
1093 | * Note: |
1094 | * Does context switching-Release SPIN_LOCK |
1095 | * (if any) before calling this routine. |
1096 | * |
1097 | * Return: |
1098 | * Success (Adapter is online) : 0 |
1099 | * Failed (Adapter is offline/disabled) : 1 |
1100 | */ |
1101 | int |
1102 | qla2x00_wait_for_hba_online(scsi_qla_host_t *vha) |
1103 | { |
1104 | int return_status; |
1105 | unsigned long wait_online; |
1106 | struct qla_hw_data *ha = vha->hw; |
1107 | scsi_qla_host_t *base_vha = pci_get_drvdata(pdev: ha->pdev); |
1108 | |
1109 | wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ); |
1110 | while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) || |
1111 | test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || |
1112 | test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || |
1113 | ha->dpc_active) && time_before(jiffies, wait_online)) { |
1114 | |
1115 | msleep(msecs: 1000); |
1116 | } |
1117 | if (base_vha->flags.online) |
1118 | return_status = QLA_SUCCESS; |
1119 | else |
1120 | return_status = QLA_FUNCTION_FAILED; |
1121 | |
1122 | return (return_status); |
1123 | } |
1124 | |
1125 | static inline int test_fcport_count(scsi_qla_host_t *vha) |
1126 | { |
1127 | struct qla_hw_data *ha = vha->hw; |
1128 | unsigned long flags; |
1129 | int res; |
1130 | /* Return 0 = sleep, x=wake */ |
1131 | |
1132 | spin_lock_irqsave(&ha->tgt.sess_lock, flags); |
1133 | ql_dbg(ql_dbg_init, vha, 0x00ec, |
1134 | fmt: "tgt %p, fcport_count=%d\n" , |
1135 | vha, vha->fcport_count); |
1136 | res = (vha->fcport_count == 0); |
1137 | if (res) { |
1138 | struct fc_port *fcport; |
1139 | |
1140 | list_for_each_entry(fcport, &vha->vp_fcports, list) { |
1141 | if (fcport->deleted != QLA_SESS_DELETED) { |
1142 | /* session(s) may not be fully logged in |
1143 | * (ie fcport_count=0), but session |
1144 | * deletion thread(s) may be inflight. |
1145 | */ |
1146 | |
1147 | res = 0; |
1148 | break; |
1149 | } |
1150 | } |
1151 | } |
1152 | spin_unlock_irqrestore(lock: &ha->tgt.sess_lock, flags); |
1153 | |
1154 | return res; |
1155 | } |
1156 | |
1157 | /* |
1158 | * qla2x00_wait_for_sess_deletion can only be called from remove_one. |
1159 | * it has dependency on UNLOADING flag to stop device discovery |
1160 | */ |
1161 | void |
1162 | qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha) |
1163 | { |
1164 | u8 i; |
1165 | |
1166 | qla2x00_mark_all_devices_lost(vha); |
1167 | |
1168 | for (i = 0; i < 10; i++) { |
1169 | if (wait_event_timeout(vha->fcport_waitQ, |
1170 | test_fcport_count(vha), HZ) > 0) |
1171 | break; |
1172 | } |
1173 | |
1174 | flush_workqueue(vha->hw->wq); |
1175 | } |
1176 | |
1177 | /* |
1178 | * qla2x00_wait_for_hba_ready |
1179 | * Wait till the HBA is ready before doing driver unload |
1180 | * |
1181 | * Input: |
1182 | * ha - pointer to host adapter structure |
1183 | * |
1184 | * Note: |
1185 | * Does context switching-Release SPIN_LOCK |
1186 | * (if any) before calling this routine. |
1187 | * |
1188 | */ |
1189 | static void |
1190 | qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha) |
1191 | { |
1192 | struct qla_hw_data *ha = vha->hw; |
1193 | scsi_qla_host_t *base_vha = pci_get_drvdata(pdev: ha->pdev); |
1194 | |
1195 | while ((qla2x00_reset_active(vha) || ha->dpc_active || |
1196 | ha->flags.mbox_busy) || |
1197 | test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) || |
1198 | test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) { |
1199 | if (test_bit(UNLOADING, &base_vha->dpc_flags)) |
1200 | break; |
1201 | msleep(msecs: 1000); |
1202 | } |
1203 | } |
1204 | |
1205 | int |
1206 | qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha) |
1207 | { |
1208 | int return_status; |
1209 | unsigned long wait_reset; |
1210 | struct qla_hw_data *ha = vha->hw; |
1211 | scsi_qla_host_t *base_vha = pci_get_drvdata(pdev: ha->pdev); |
1212 | |
1213 | wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ); |
1214 | while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) || |
1215 | test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || |
1216 | test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || |
1217 | ha->dpc_active) && time_before(jiffies, wait_reset)) { |
1218 | |
1219 | msleep(msecs: 1000); |
1220 | |
1221 | if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) && |
1222 | ha->flags.chip_reset_done) |
1223 | break; |
1224 | } |
1225 | if (ha->flags.chip_reset_done) |
1226 | return_status = QLA_SUCCESS; |
1227 | else |
1228 | return_status = QLA_FUNCTION_FAILED; |
1229 | |
1230 | return return_status; |
1231 | } |
1232 | |
1233 | /************************************************************************** |
1234 | * qla2xxx_eh_abort |
1235 | * |
1236 | * Description: |
1237 | * The abort function will abort the specified command. |
1238 | * |
1239 | * Input: |
1240 | * cmd = Linux SCSI command packet to be aborted. |
1241 | * |
1242 | * Returns: |
1243 | * Either SUCCESS or FAILED. |
1244 | * |
1245 | * Note: |
1246 | * Only return FAILED if command not returned by firmware. |
1247 | **************************************************************************/ |
1248 | static int |
1249 | qla2xxx_eh_abort(struct scsi_cmnd *cmd) |
1250 | { |
1251 | scsi_qla_host_t *vha = shost_priv(shost: cmd->device->host); |
1252 | DECLARE_COMPLETION_ONSTACK(comp); |
1253 | srb_t *sp; |
1254 | int ret; |
1255 | unsigned int id; |
1256 | uint64_t lun; |
1257 | int rval; |
1258 | struct qla_hw_data *ha = vha->hw; |
1259 | uint32_t ratov_j; |
1260 | struct qla_qpair *qpair; |
1261 | unsigned long flags; |
1262 | int fast_fail_status = SUCCESS; |
1263 | |
1264 | if (qla2x00_isp_reg_stat(ha)) { |
1265 | ql_log(ql_log_info, vha, 0x8042, |
1266 | fmt: "PCI/Register disconnect, exiting.\n" ); |
1267 | qla_pci_set_eeh_busy(vha); |
1268 | return FAILED; |
1269 | } |
1270 | |
1271 | /* Save any FAST_IO_FAIL value to return later if abort succeeds */ |
1272 | ret = fc_block_scsi_eh(cmnd: cmd); |
1273 | if (ret != 0) |
1274 | fast_fail_status = ret; |
1275 | |
1276 | sp = scsi_cmd_priv(cmd); |
1277 | qpair = sp->qpair; |
1278 | |
1279 | vha->cmd_timeout_cnt++; |
1280 | |
1281 | if ((sp->fcport && sp->fcport->deleted) || !qpair) |
1282 | return fast_fail_status != SUCCESS ? fast_fail_status : FAILED; |
1283 | |
1284 | spin_lock_irqsave(qpair->qp_lock_ptr, flags); |
1285 | sp->comp = ∁ |
1286 | spin_unlock_irqrestore(lock: qpair->qp_lock_ptr, flags); |
1287 | |
1288 | |
1289 | id = cmd->device->id; |
1290 | lun = cmd->device->lun; |
1291 | |
1292 | ql_dbg(ql_dbg_taskm, vha, 0x8002, |
1293 | fmt: "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n" , |
1294 | vha->host_no, id, lun, sp, cmd, sp->handle); |
1295 | |
1296 | /* |
1297 | * Abort will release the original Command/sp from FW. Let the |
1298 | * original command call scsi_done. In return, he will wakeup |
1299 | * this sleeping thread. |
1300 | */ |
1301 | rval = ha->isp_ops->abort_command(sp); |
1302 | |
1303 | ql_dbg(ql_dbg_taskm, vha, 0x8003, |
1304 | fmt: "Abort command mbx cmd=%p, rval=%x.\n" , cmd, rval); |
1305 | |
1306 | /* Wait for the command completion. */ |
1307 | ratov_j = ha->r_a_tov/10 * 4 * 1000; |
1308 | ratov_j = msecs_to_jiffies(m: ratov_j); |
1309 | switch (rval) { |
1310 | case QLA_SUCCESS: |
1311 | if (!wait_for_completion_timeout(x: &comp, timeout: ratov_j)) { |
1312 | ql_dbg(ql_dbg_taskm, vha, 0xffff, |
1313 | fmt: "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n" , |
1314 | __func__, ha->r_a_tov/10); |
1315 | ret = FAILED; |
1316 | } else { |
1317 | ret = fast_fail_status; |
1318 | } |
1319 | break; |
1320 | default: |
1321 | ret = FAILED; |
1322 | break; |
1323 | } |
1324 | |
1325 | sp->comp = NULL; |
1326 | |
1327 | ql_log(ql_log_info, vha, 0x801c, |
1328 | fmt: "Abort command issued nexus=%ld:%d:%llu -- %x.\n" , |
1329 | vha->host_no, id, lun, ret); |
1330 | |
1331 | return ret; |
1332 | } |
1333 | |
1334 | #define ABORT_POLLING_PERIOD 1000 |
1335 | #define ABORT_WAIT_ITER ((2 * 1000) / (ABORT_POLLING_PERIOD)) |
1336 | |
1337 | /* |
1338 | * Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED. |
1339 | */ |
1340 | static int |
1341 | __qla2x00_eh_wait_for_pending_commands(struct qla_qpair *qpair, unsigned int t, |
1342 | uint64_t l, enum nexus_wait_type type) |
1343 | { |
1344 | int cnt, match, status; |
1345 | unsigned long flags; |
1346 | scsi_qla_host_t *vha = qpair->vha; |
1347 | struct req_que *req = qpair->req; |
1348 | srb_t *sp; |
1349 | struct scsi_cmnd *cmd; |
1350 | unsigned long wait_iter = ABORT_WAIT_ITER; |
1351 | bool found; |
1352 | struct qla_hw_data *ha = vha->hw; |
1353 | |
1354 | status = QLA_SUCCESS; |
1355 | |
1356 | while (wait_iter--) { |
1357 | found = false; |
1358 | |
1359 | spin_lock_irqsave(qpair->qp_lock_ptr, flags); |
1360 | for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { |
1361 | sp = req->outstanding_cmds[cnt]; |
1362 | if (!sp) |
1363 | continue; |
1364 | if (sp->type != SRB_SCSI_CMD) |
1365 | continue; |
1366 | if (vha->vp_idx != sp->vha->vp_idx) |
1367 | continue; |
1368 | match = 0; |
1369 | cmd = GET_CMD_SP(sp); |
1370 | switch (type) { |
1371 | case WAIT_HOST: |
1372 | match = 1; |
1373 | break; |
1374 | case WAIT_TARGET: |
1375 | if (sp->fcport) |
1376 | match = sp->fcport->d_id.b24 == t; |
1377 | else |
1378 | match = 0; |
1379 | break; |
1380 | case WAIT_LUN: |
1381 | if (sp->fcport) |
1382 | match = (sp->fcport->d_id.b24 == t && |
1383 | cmd->device->lun == l); |
1384 | else |
1385 | match = 0; |
1386 | break; |
1387 | } |
1388 | if (!match) |
1389 | continue; |
1390 | |
1391 | spin_unlock_irqrestore(lock: qpair->qp_lock_ptr, flags); |
1392 | |
1393 | if (unlikely(pci_channel_offline(ha->pdev)) || |
1394 | ha->flags.eeh_busy) { |
1395 | ql_dbg(ql_dbg_taskm, vha, 0x8005, |
1396 | fmt: "Return:eh_wait.\n" ); |
1397 | return status; |
1398 | } |
1399 | |
1400 | /* |
1401 | * SRB_SCSI_CMD is still in the outstanding_cmds array. |
1402 | * it means scsi_done has not called. Wait for it to |
1403 | * clear from outstanding_cmds. |
1404 | */ |
1405 | msleep(ABORT_POLLING_PERIOD); |
1406 | spin_lock_irqsave(qpair->qp_lock_ptr, flags); |
1407 | found = true; |
1408 | } |
1409 | spin_unlock_irqrestore(lock: qpair->qp_lock_ptr, flags); |
1410 | |
1411 | if (!found) |
1412 | break; |
1413 | } |
1414 | |
1415 | if (wait_iter == -1) |
1416 | status = QLA_FUNCTION_FAILED; |
1417 | |
1418 | return status; |
1419 | } |
1420 | |
1421 | int |
1422 | qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, |
1423 | uint64_t l, enum nexus_wait_type type) |
1424 | { |
1425 | struct qla_qpair *qpair; |
1426 | struct qla_hw_data *ha = vha->hw; |
1427 | int i, status = QLA_SUCCESS; |
1428 | |
1429 | status = __qla2x00_eh_wait_for_pending_commands(qpair: ha->base_qpair, t, l, |
1430 | type); |
1431 | for (i = 0; status == QLA_SUCCESS && i < ha->max_qpairs; i++) { |
1432 | qpair = ha->queue_pair_map[i]; |
1433 | if (!qpair) |
1434 | continue; |
1435 | status = __qla2x00_eh_wait_for_pending_commands(qpair, t, l, |
1436 | type); |
1437 | } |
1438 | return status; |
1439 | } |
1440 | |
1441 | static char *reset_errors[] = { |
1442 | "HBA not online" , |
1443 | "HBA not ready" , |
1444 | "Task management failed" , |
1445 | "Waiting for command completions" , |
1446 | }; |
1447 | |
1448 | static int |
1449 | qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) |
1450 | { |
1451 | struct scsi_device *sdev = cmd->device; |
1452 | scsi_qla_host_t *vha = shost_priv(shost: sdev->host); |
1453 | struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); |
1454 | fc_port_t *fcport = (struct fc_port *) sdev->hostdata; |
1455 | struct qla_hw_data *ha = vha->hw; |
1456 | int err; |
1457 | |
1458 | if (qla2x00_isp_reg_stat(ha)) { |
1459 | ql_log(ql_log_info, vha, 0x803e, |
1460 | fmt: "PCI/Register disconnect, exiting.\n" ); |
1461 | qla_pci_set_eeh_busy(vha); |
1462 | return FAILED; |
1463 | } |
1464 | |
1465 | if (!fcport) { |
1466 | return FAILED; |
1467 | } |
1468 | |
1469 | err = fc_block_rport(rport); |
1470 | if (err != 0) |
1471 | return err; |
1472 | |
1473 | if (fcport->deleted) |
1474 | return FAILED; |
1475 | |
1476 | ql_log(ql_log_info, vha, 0x8009, |
1477 | fmt: "DEVICE RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n" , vha->host_no, |
1478 | sdev->id, sdev->lun, cmd); |
1479 | |
1480 | err = 0; |
1481 | if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { |
1482 | ql_log(ql_log_warn, vha, 0x800a, |
1483 | fmt: "Wait for hba online failed for cmd=%p.\n" , cmd); |
1484 | goto eh_reset_failed; |
1485 | } |
1486 | err = 2; |
1487 | if (ha->isp_ops->lun_reset(fcport, sdev->lun, 1) |
1488 | != QLA_SUCCESS) { |
1489 | ql_log(ql_log_warn, vha, 0x800c, |
1490 | fmt: "do_reset failed for cmd=%p.\n" , cmd); |
1491 | goto eh_reset_failed; |
1492 | } |
1493 | err = 3; |
1494 | if (qla2x00_eh_wait_for_pending_commands(vha, t: fcport->d_id.b24, |
1495 | l: cmd->device->lun, |
1496 | type: WAIT_LUN) != QLA_SUCCESS) { |
1497 | ql_log(ql_log_warn, vha, 0x800d, |
1498 | fmt: "wait for pending cmds failed for cmd=%p.\n" , cmd); |
1499 | goto eh_reset_failed; |
1500 | } |
1501 | |
1502 | ql_log(ql_log_info, vha, 0x800e, |
1503 | fmt: "DEVICE RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n" , |
1504 | vha->host_no, sdev->id, sdev->lun, cmd); |
1505 | |
1506 | return SUCCESS; |
1507 | |
1508 | eh_reset_failed: |
1509 | ql_log(ql_log_info, vha, 0x800f, |
1510 | fmt: "DEVICE RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n" , |
1511 | reset_errors[err], vha->host_no, sdev->id, sdev->lun, |
1512 | cmd); |
1513 | vha->reset_cmd_err_cnt++; |
1514 | return FAILED; |
1515 | } |
1516 | |
1517 | static int |
1518 | qla2xxx_eh_target_reset(struct scsi_cmnd *cmd) |
1519 | { |
1520 | struct scsi_device *sdev = cmd->device; |
1521 | struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); |
1522 | scsi_qla_host_t *vha = shost_priv(rport_to_shost(rport)); |
1523 | struct qla_hw_data *ha = vha->hw; |
1524 | fc_port_t *fcport = *(fc_port_t **)rport->dd_data; |
1525 | int err; |
1526 | |
1527 | if (qla2x00_isp_reg_stat(ha)) { |
1528 | ql_log(ql_log_info, vha, 0x803f, |
1529 | fmt: "PCI/Register disconnect, exiting.\n" ); |
1530 | qla_pci_set_eeh_busy(vha); |
1531 | return FAILED; |
1532 | } |
1533 | |
1534 | if (!fcport) { |
1535 | return FAILED; |
1536 | } |
1537 | |
1538 | err = fc_block_rport(rport); |
1539 | if (err != 0) |
1540 | return err; |
1541 | |
1542 | if (fcport->deleted) |
1543 | return FAILED; |
1544 | |
1545 | ql_log(ql_log_info, vha, 0x8009, |
1546 | fmt: "TARGET RESET ISSUED nexus=%ld:%d cmd=%p.\n" , vha->host_no, |
1547 | sdev->id, cmd); |
1548 | |
1549 | err = 0; |
1550 | if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { |
1551 | ql_log(ql_log_warn, vha, 0x800a, |
1552 | fmt: "Wait for hba online failed for cmd=%p.\n" , cmd); |
1553 | goto eh_reset_failed; |
1554 | } |
1555 | err = 2; |
1556 | if (ha->isp_ops->target_reset(fcport, 0, 0) != QLA_SUCCESS) { |
1557 | ql_log(ql_log_warn, vha, 0x800c, |
1558 | fmt: "target_reset failed for cmd=%p.\n" , cmd); |
1559 | goto eh_reset_failed; |
1560 | } |
1561 | err = 3; |
1562 | if (qla2x00_eh_wait_for_pending_commands(vha, t: fcport->d_id.b24, l: 0, |
1563 | type: WAIT_TARGET) != QLA_SUCCESS) { |
1564 | ql_log(ql_log_warn, vha, 0x800d, |
1565 | fmt: "wait for pending cmds failed for cmd=%p.\n" , cmd); |
1566 | goto eh_reset_failed; |
1567 | } |
1568 | |
1569 | ql_log(ql_log_info, vha, 0x800e, |
1570 | fmt: "TARGET RESET SUCCEEDED nexus:%ld:%d cmd=%p.\n" , |
1571 | vha->host_no, sdev->id, cmd); |
1572 | |
1573 | return SUCCESS; |
1574 | |
1575 | eh_reset_failed: |
1576 | ql_log(ql_log_info, vha, 0x800f, |
1577 | fmt: "TARGET RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n" , |
1578 | reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun, |
1579 | cmd); |
1580 | vha->reset_cmd_err_cnt++; |
1581 | return FAILED; |
1582 | } |
1583 | |
1584 | /************************************************************************** |
1585 | * qla2xxx_eh_bus_reset |
1586 | * |
1587 | * Description: |
1588 | * The bus reset function will reset the bus and abort any executing |
1589 | * commands. |
1590 | * |
1591 | * Input: |
1592 | * cmd = Linux SCSI command packet of the command that cause the |
1593 | * bus reset. |
1594 | * |
1595 | * Returns: |
1596 | * SUCCESS/FAILURE (defined as macro in scsi.h). |
1597 | * |
1598 | **************************************************************************/ |
1599 | static int |
1600 | qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) |
1601 | { |
1602 | scsi_qla_host_t *vha = shost_priv(shost: cmd->device->host); |
1603 | int ret = FAILED; |
1604 | unsigned int id; |
1605 | uint64_t lun; |
1606 | struct qla_hw_data *ha = vha->hw; |
1607 | |
1608 | if (qla2x00_isp_reg_stat(ha)) { |
1609 | ql_log(ql_log_info, vha, 0x8040, |
1610 | fmt: "PCI/Register disconnect, exiting.\n" ); |
1611 | qla_pci_set_eeh_busy(vha); |
1612 | return FAILED; |
1613 | } |
1614 | |
1615 | id = cmd->device->id; |
1616 | lun = cmd->device->lun; |
1617 | |
1618 | if (qla2x00_chip_is_down(vha)) |
1619 | return ret; |
1620 | |
1621 | ql_log(ql_log_info, vha, 0x8012, |
1622 | fmt: "BUS RESET ISSUED nexus=%ld:%d:%llu.\n" , vha->host_no, id, lun); |
1623 | |
1624 | if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { |
1625 | ql_log(ql_log_fatal, vha, 0x8013, |
1626 | fmt: "Wait for hba online failed board disabled.\n" ); |
1627 | goto eh_bus_reset_done; |
1628 | } |
1629 | |
1630 | if (qla2x00_loop_reset(vha) == QLA_SUCCESS) |
1631 | ret = SUCCESS; |
1632 | |
1633 | if (ret == FAILED) |
1634 | goto eh_bus_reset_done; |
1635 | |
1636 | /* Flush outstanding commands. */ |
1637 | if (qla2x00_eh_wait_for_pending_commands(vha, t: 0, l: 0, type: WAIT_HOST) != |
1638 | QLA_SUCCESS) { |
1639 | ql_log(ql_log_warn, vha, 0x8014, |
1640 | fmt: "Wait for pending commands failed.\n" ); |
1641 | ret = FAILED; |
1642 | } |
1643 | |
1644 | eh_bus_reset_done: |
1645 | ql_log(ql_log_warn, vha, 0x802b, |
1646 | fmt: "BUS RESET %s nexus=%ld:%d:%llu.\n" , |
1647 | (ret == FAILED) ? "FAILED" : "SUCCEEDED" , vha->host_no, id, lun); |
1648 | |
1649 | return ret; |
1650 | } |
1651 | |
1652 | /************************************************************************** |
1653 | * qla2xxx_eh_host_reset |
1654 | * |
1655 | * Description: |
1656 | * The reset function will reset the Adapter. |
1657 | * |
1658 | * Input: |
1659 | * cmd = Linux SCSI command packet of the command that cause the |
1660 | * adapter reset. |
1661 | * |
1662 | * Returns: |
1663 | * Either SUCCESS or FAILED. |
1664 | * |
1665 | * Note: |
1666 | **************************************************************************/ |
1667 | static int |
1668 | qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) |
1669 | { |
1670 | scsi_qla_host_t *vha = shost_priv(shost: cmd->device->host); |
1671 | struct qla_hw_data *ha = vha->hw; |
1672 | int ret = FAILED; |
1673 | unsigned int id; |
1674 | uint64_t lun; |
1675 | scsi_qla_host_t *base_vha = pci_get_drvdata(pdev: ha->pdev); |
1676 | |
1677 | if (qla2x00_isp_reg_stat(ha)) { |
1678 | ql_log(ql_log_info, vha, 0x8041, |
1679 | fmt: "PCI/Register disconnect, exiting.\n" ); |
1680 | qla_pci_set_eeh_busy(vha); |
1681 | return SUCCESS; |
1682 | } |
1683 | |
1684 | id = cmd->device->id; |
1685 | lun = cmd->device->lun; |
1686 | |
1687 | ql_log(ql_log_info, vha, 0x8018, |
1688 | fmt: "ADAPTER RESET ISSUED nexus=%ld:%d:%llu.\n" , vha->host_no, id, lun); |
1689 | |
1690 | /* |
1691 | * No point in issuing another reset if one is active. Also do not |
1692 | * attempt a reset if we are updating flash. |
1693 | */ |
1694 | if (qla2x00_reset_active(vha) || ha->optrom_state != QLA_SWAITING) |
1695 | goto eh_host_reset_lock; |
1696 | |
1697 | if (vha != base_vha) { |
1698 | if (qla2x00_vp_abort_isp(vha)) |
1699 | goto eh_host_reset_lock; |
1700 | } else { |
1701 | if (IS_P3P_TYPE(vha->hw)) { |
1702 | if (!qla82xx_fcoe_ctx_reset(vha)) { |
1703 | /* Ctx reset success */ |
1704 | ret = SUCCESS; |
1705 | goto eh_host_reset_lock; |
1706 | } |
1707 | /* fall thru if ctx reset failed */ |
1708 | } |
1709 | if (ha->wq) |
1710 | flush_workqueue(ha->wq); |
1711 | |
1712 | set_bit(ABORT_ISP_ACTIVE, addr: &base_vha->dpc_flags); |
1713 | if (ha->isp_ops->abort_isp(base_vha)) { |
1714 | clear_bit(ABORT_ISP_ACTIVE, addr: &base_vha->dpc_flags); |
1715 | /* failed. schedule dpc to try */ |
1716 | set_bit(ISP_ABORT_NEEDED, addr: &base_vha->dpc_flags); |
1717 | |
1718 | if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { |
1719 | ql_log(ql_log_warn, vha, 0x802a, |
1720 | fmt: "wait for hba online failed.\n" ); |
1721 | goto eh_host_reset_lock; |
1722 | } |
1723 | } |
1724 | clear_bit(ABORT_ISP_ACTIVE, addr: &base_vha->dpc_flags); |
1725 | } |
1726 | |
1727 | /* Waiting for command to be returned to OS.*/ |
1728 | if (qla2x00_eh_wait_for_pending_commands(vha, t: 0, l: 0, type: WAIT_HOST) == |
1729 | QLA_SUCCESS) |
1730 | ret = SUCCESS; |
1731 | |
1732 | eh_host_reset_lock: |
1733 | ql_log(ql_log_info, vha, 0x8017, |
1734 | fmt: "ADAPTER RESET %s nexus=%ld:%d:%llu.\n" , |
1735 | (ret == FAILED) ? "FAILED" : "SUCCEEDED" , vha->host_no, id, lun); |
1736 | |
1737 | return ret; |
1738 | } |
1739 | |
1740 | /* |
1741 | * qla2x00_loop_reset |
1742 | * Issue loop reset. |
1743 | * |
1744 | * Input: |
1745 | * ha = adapter block pointer. |
1746 | * |
1747 | * Returns: |
1748 | * 0 = success |
1749 | */ |
1750 | int |
1751 | qla2x00_loop_reset(scsi_qla_host_t *vha) |
1752 | { |
1753 | int ret; |
1754 | struct qla_hw_data *ha = vha->hw; |
1755 | |
1756 | if (IS_QLAFX00(ha)) |
1757 | return QLA_SUCCESS; |
1758 | |
1759 | if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) { |
1760 | atomic_set(v: &vha->loop_state, LOOP_DOWN); |
1761 | atomic_set(v: &vha->loop_down_timer, LOOP_DOWN_TIME); |
1762 | qla2x00_mark_all_devices_lost(vha); |
1763 | ret = qla2x00_full_login_lip(ha: vha); |
1764 | if (ret != QLA_SUCCESS) { |
1765 | ql_dbg(ql_dbg_taskm, vha, 0x802d, |
1766 | fmt: "full_login_lip=%d.\n" , ret); |
1767 | } |
1768 | } |
1769 | |
1770 | if (ha->flags.enable_lip_reset) { |
1771 | ret = qla2x00_lip_reset(vha); |
1772 | if (ret != QLA_SUCCESS) |
1773 | ql_dbg(ql_dbg_taskm, vha, 0x802e, |
1774 | fmt: "lip_reset failed (%d).\n" , ret); |
1775 | } |
1776 | |
1777 | /* Issue marker command only when we are going to start the I/O */ |
1778 | vha->marker_needed = 1; |
1779 | |
1780 | return QLA_SUCCESS; |
1781 | } |
1782 | |
1783 | /* |
1784 | * The caller must ensure that no completion interrupts will happen |
1785 | * while this function is in progress. |
1786 | */ |
1787 | static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res, |
1788 | unsigned long *flags) |
1789 | __releases(qp->qp_lock_ptr) |
1790 | __acquires(qp->qp_lock_ptr) |
1791 | { |
1792 | DECLARE_COMPLETION_ONSTACK(comp); |
1793 | scsi_qla_host_t *vha = qp->vha; |
1794 | struct qla_hw_data *ha = vha->hw; |
1795 | struct scsi_cmnd *cmd = GET_CMD_SP(sp); |
1796 | int rval; |
1797 | bool ret_cmd; |
1798 | uint32_t ratov_j; |
1799 | |
1800 | lockdep_assert_held(qp->qp_lock_ptr); |
1801 | |
1802 | if (qla2x00_chip_is_down(vha)) { |
1803 | sp->done(sp, res); |
1804 | return; |
1805 | } |
1806 | |
1807 | if (sp->type == SRB_NVME_CMD || sp->type == SRB_NVME_LS || |
1808 | (sp->type == SRB_SCSI_CMD && !ha->flags.eeh_busy && |
1809 | !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && |
1810 | !qla2x00_isp_reg_stat(ha))) { |
1811 | if (sp->comp) { |
1812 | sp->done(sp, res); |
1813 | return; |
1814 | } |
1815 | |
1816 | sp->comp = ∁ |
1817 | spin_unlock_irqrestore(lock: qp->qp_lock_ptr, flags: *flags); |
1818 | |
1819 | rval = ha->isp_ops->abort_command(sp); |
1820 | /* Wait for command completion. */ |
1821 | ret_cmd = false; |
1822 | ratov_j = ha->r_a_tov/10 * 4 * 1000; |
1823 | ratov_j = msecs_to_jiffies(m: ratov_j); |
1824 | switch (rval) { |
1825 | case QLA_SUCCESS: |
1826 | if (wait_for_completion_timeout(x: &comp, timeout: ratov_j)) { |
1827 | ql_dbg(ql_dbg_taskm, vha, 0xffff, |
1828 | fmt: "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n" , |
1829 | __func__, ha->r_a_tov/10); |
1830 | ret_cmd = true; |
1831 | } |
1832 | /* else FW return SP to driver */ |
1833 | break; |
1834 | default: |
1835 | ret_cmd = true; |
1836 | break; |
1837 | } |
1838 | |
1839 | spin_lock_irqsave(qp->qp_lock_ptr, *flags); |
1840 | if (ret_cmd && blk_mq_request_started(rq: scsi_cmd_to_rq(scmd: cmd))) |
1841 | sp->done(sp, res); |
1842 | } else { |
1843 | sp->done(sp, res); |
1844 | } |
1845 | } |
1846 | |
1847 | /* |
1848 | * The caller must ensure that no completion interrupts will happen |
1849 | * while this function is in progress. |
1850 | */ |
1851 | static void |
1852 | __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res) |
1853 | { |
1854 | int cnt; |
1855 | unsigned long flags; |
1856 | srb_t *sp; |
1857 | scsi_qla_host_t *vha = qp->vha; |
1858 | struct qla_hw_data *ha = vha->hw; |
1859 | struct req_que *req; |
1860 | struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; |
1861 | struct qla_tgt_cmd *cmd; |
1862 | |
1863 | if (!ha->req_q_map) |
1864 | return; |
1865 | spin_lock_irqsave(qp->qp_lock_ptr, flags); |
1866 | req = qp->req; |
1867 | for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { |
1868 | sp = req->outstanding_cmds[cnt]; |
1869 | if (sp) { |
1870 | /* |
1871 | * perform lockless completion during driver unload |
1872 | */ |
1873 | if (qla2x00_chip_is_down(vha)) { |
1874 | req->outstanding_cmds[cnt] = NULL; |
1875 | spin_unlock_irqrestore(lock: qp->qp_lock_ptr, flags); |
1876 | sp->done(sp, res); |
1877 | spin_lock_irqsave(qp->qp_lock_ptr, flags); |
1878 | continue; |
1879 | } |
1880 | |
1881 | switch (sp->cmd_type) { |
1882 | case TYPE_SRB: |
1883 | qla2x00_abort_srb(qp, sp, res, flags: &flags); |
1884 | break; |
1885 | case TYPE_TGT_CMD: |
1886 | if (!vha->hw->tgt.tgt_ops || !tgt || |
1887 | qla_ini_mode_enabled(ha: vha)) { |
1888 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003, |
1889 | fmt: "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n" , |
1890 | vha->dpc_flags); |
1891 | continue; |
1892 | } |
1893 | cmd = (struct qla_tgt_cmd *)sp; |
1894 | cmd->aborted = 1; |
1895 | break; |
1896 | case TYPE_TGT_TMCMD: |
1897 | /* Skip task management functions. */ |
1898 | break; |
1899 | default: |
1900 | break; |
1901 | } |
1902 | req->outstanding_cmds[cnt] = NULL; |
1903 | } |
1904 | } |
1905 | spin_unlock_irqrestore(lock: qp->qp_lock_ptr, flags); |
1906 | } |
1907 | |
1908 | /* |
1909 | * The caller must ensure that no completion interrupts will happen |
1910 | * while this function is in progress. |
1911 | */ |
1912 | void |
1913 | qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) |
1914 | { |
1915 | int que; |
1916 | struct qla_hw_data *ha = vha->hw; |
1917 | |
1918 | /* Continue only if initialization complete. */ |
1919 | if (!ha->base_qpair) |
1920 | return; |
1921 | __qla2x00_abort_all_cmds(qp: ha->base_qpair, res); |
1922 | |
1923 | if (!ha->queue_pair_map) |
1924 | return; |
1925 | for (que = 0; que < ha->max_qpairs; que++) { |
1926 | if (!ha->queue_pair_map[que]) |
1927 | continue; |
1928 | |
1929 | __qla2x00_abort_all_cmds(qp: ha->queue_pair_map[que], res); |
1930 | } |
1931 | } |
1932 | |
1933 | static int |
1934 | qla2xxx_slave_alloc(struct scsi_device *sdev) |
1935 | { |
1936 | struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); |
1937 | |
1938 | if (!rport || fc_remote_port_chkready(rport)) |
1939 | return -ENXIO; |
1940 | |
1941 | sdev->hostdata = *(fc_port_t **)rport->dd_data; |
1942 | |
1943 | return 0; |
1944 | } |
1945 | |
1946 | static int |
1947 | qla2xxx_slave_configure(struct scsi_device *sdev) |
1948 | { |
1949 | scsi_qla_host_t *vha = shost_priv(shost: sdev->host); |
1950 | struct req_que *req = vha->req; |
1951 | |
1952 | if (IS_T10_PI_CAPABLE(vha->hw)) |
1953 | blk_queue_update_dma_alignment(sdev->request_queue, 0x7); |
1954 | |
1955 | scsi_change_queue_depth(sdev, req->max_q_depth); |
1956 | return 0; |
1957 | } |
1958 | |
1959 | static void |
1960 | qla2xxx_slave_destroy(struct scsi_device *sdev) |
1961 | { |
1962 | sdev->hostdata = NULL; |
1963 | } |
1964 | |
1965 | /** |
1966 | * qla2x00_config_dma_addressing() - Configure OS DMA addressing method. |
1967 | * @ha: HA context |
1968 | * |
1969 | * At exit, the @ha's flags.enable_64bit_addressing set to indicated |
1970 | * supported addressing method. |
1971 | */ |
1972 | static void |
1973 | qla2x00_config_dma_addressing(struct qla_hw_data *ha) |
1974 | { |
1975 | /* Assume a 32bit DMA mask. */ |
1976 | ha->flags.enable_64bit_addressing = 0; |
1977 | |
1978 | if (!dma_set_mask(dev: &ha->pdev->dev, DMA_BIT_MASK(64))) { |
1979 | /* Any upper-dword bits set? */ |
1980 | if (MSD(dma_get_required_mask(&ha->pdev->dev)) && |
1981 | !dma_set_coherent_mask(dev: &ha->pdev->dev, DMA_BIT_MASK(64))) { |
1982 | /* Ok, a 64bit DMA mask is applicable. */ |
1983 | ha->flags.enable_64bit_addressing = 1; |
1984 | ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64; |
1985 | ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64; |
1986 | return; |
1987 | } |
1988 | } |
1989 | |
1990 | dma_set_mask(dev: &ha->pdev->dev, DMA_BIT_MASK(32)); |
1991 | dma_set_coherent_mask(dev: &ha->pdev->dev, DMA_BIT_MASK(32)); |
1992 | } |
1993 | |
1994 | static void |
1995 | qla2x00_enable_intrs(struct qla_hw_data *ha) |
1996 | { |
1997 | unsigned long flags = 0; |
1998 | struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; |
1999 | |
2000 | spin_lock_irqsave(&ha->hardware_lock, flags); |
2001 | ha->interrupts_on = 1; |
2002 | /* enable risc and host interrupts */ |
2003 | wrt_reg_word(addr: ®->ictrl, ICR_EN_INT | ICR_EN_RISC); |
2004 | rd_reg_word(addr: ®->ictrl); |
2005 | spin_unlock_irqrestore(lock: &ha->hardware_lock, flags); |
2006 | |
2007 | } |
2008 | |
2009 | static void |
2010 | qla2x00_disable_intrs(struct qla_hw_data *ha) |
2011 | { |
2012 | unsigned long flags = 0; |
2013 | struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; |
2014 | |
2015 | spin_lock_irqsave(&ha->hardware_lock, flags); |
2016 | ha->interrupts_on = 0; |
2017 | /* disable risc and host interrupts */ |
2018 | wrt_reg_word(addr: ®->ictrl, data: 0); |
2019 | rd_reg_word(addr: ®->ictrl); |
2020 | spin_unlock_irqrestore(lock: &ha->hardware_lock, flags); |
2021 | } |
2022 | |
2023 | static void |
2024 | qla24xx_enable_intrs(struct qla_hw_data *ha) |
2025 | { |
2026 | unsigned long flags = 0; |
2027 | struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; |
2028 | |
2029 | spin_lock_irqsave(&ha->hardware_lock, flags); |
2030 | ha->interrupts_on = 1; |
2031 | wrt_reg_dword(addr: ®->ictrl, ICRX_EN_RISC_INT); |
2032 | rd_reg_dword(addr: ®->ictrl); |
2033 | spin_unlock_irqrestore(lock: &ha->hardware_lock, flags); |
2034 | } |
2035 | |
2036 | static void |
2037 | qla24xx_disable_intrs(struct qla_hw_data *ha) |
2038 | { |
2039 | unsigned long flags = 0; |
2040 | struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; |
2041 | |
2042 | if (IS_NOPOLLING_TYPE(ha)) |
2043 | return; |
2044 | spin_lock_irqsave(&ha->hardware_lock, flags); |
2045 | ha->interrupts_on = 0; |
2046 | wrt_reg_dword(addr: ®->ictrl, data: 0); |
2047 | rd_reg_dword(addr: ®->ictrl); |
2048 | spin_unlock_irqrestore(lock: &ha->hardware_lock, flags); |
2049 | } |
2050 | |
2051 | static int |
2052 | qla2x00_iospace_config(struct qla_hw_data *ha) |
2053 | { |
2054 | resource_size_t pio; |
2055 | uint16_t msix; |
2056 | |
2057 | if (pci_request_selected_regions(ha->pdev, ha->bars, |
2058 | QLA2XXX_DRIVER_NAME)) { |
2059 | ql_log_pci(ql_log_fatal, pdev: ha->pdev, 0x0011, |
2060 | fmt: "Failed to reserve PIO/MMIO regions (%s), aborting.\n" , |
2061 | pci_name(pdev: ha->pdev)); |
2062 | goto iospace_error_exit; |
2063 | } |
2064 | if (!(ha->bars & 1)) |
2065 | goto skip_pio; |
2066 | |
2067 | /* We only need PIO for Flash operations on ISP2312 v2 chips. */ |
2068 | pio = pci_resource_start(ha->pdev, 0); |
2069 | if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) { |
2070 | if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { |
2071 | ql_log_pci(ql_log_warn, pdev: ha->pdev, 0x0012, |
2072 | fmt: "Invalid pci I/O region size (%s).\n" , |
2073 | pci_name(pdev: ha->pdev)); |
2074 | pio = 0; |
2075 | } |
2076 | } else { |
2077 | ql_log_pci(ql_log_warn, pdev: ha->pdev, 0x0013, |
2078 | fmt: "Region #0 no a PIO resource (%s).\n" , |
2079 | pci_name(pdev: ha->pdev)); |
2080 | pio = 0; |
2081 | } |
2082 | ha->pio_address = pio; |
2083 | ql_dbg_pci(ql_dbg_init, pdev: ha->pdev, 0x0014, |
2084 | fmt: "PIO address=%llu.\n" , |
2085 | (unsigned long long)ha->pio_address); |
2086 | |
2087 | skip_pio: |
2088 | /* Use MMIO operations for all accesses. */ |
2089 | if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) { |
2090 | ql_log_pci(ql_log_fatal, pdev: ha->pdev, 0x0015, |
2091 | fmt: "Region #1 not an MMIO resource (%s), aborting.\n" , |
2092 | pci_name(pdev: ha->pdev)); |
2093 | goto iospace_error_exit; |
2094 | } |
2095 | if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) { |
2096 | ql_log_pci(ql_log_fatal, pdev: ha->pdev, 0x0016, |
2097 | fmt: "Invalid PCI mem region size (%s), aborting.\n" , |
2098 | pci_name(pdev: ha->pdev)); |
2099 | goto iospace_error_exit; |
2100 | } |
2101 | |
2102 | ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN); |
2103 | if (!ha->iobase) { |
2104 | ql_log_pci(ql_log_fatal, pdev: ha->pdev, 0x0017, |
2105 | fmt: "Cannot remap MMIO (%s), aborting.\n" , |
2106 | pci_name(pdev: ha->pdev)); |
2107 | goto iospace_error_exit; |
2108 | } |
2109 | |
2110 | /* Determine queue resources */ |
2111 | ha->max_req_queues = ha->max_rsp_queues = 1; |
2112 | ha->msix_count = QLA_BASE_VECTORS; |
2113 | |
2114 | /* Check if FW supports MQ or not */ |
2115 | if (!(ha->fw_attributes & BIT_6)) |
2116 | goto mqiobase_exit; |
2117 | |
2118 | if (!ql2xmqsupport || !ql2xnvmeenable || |
2119 | (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) |
2120 | goto mqiobase_exit; |
2121 | |
2122 | ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), |
2123 | pci_resource_len(ha->pdev, 3)); |
2124 | if (ha->mqiobase) { |
2125 | ql_dbg_pci(ql_dbg_init, pdev: ha->pdev, 0x0018, |
2126 | fmt: "MQIO Base=%p.\n" , ha->mqiobase); |
2127 | /* Read MSIX vector size of the board */ |
2128 | pci_read_config_word(dev: ha->pdev, QLA_PCI_MSIX_CONTROL, val: &msix); |
2129 | ha->msix_count = msix + 1; |
2130 | /* Max queues are bounded by available msix vectors */ |
2131 | /* MB interrupt uses 1 vector */ |
2132 | ha->max_req_queues = ha->msix_count - 1; |
2133 | ha->max_rsp_queues = ha->max_req_queues; |
2134 | /* Queue pairs is the max value minus the base queue pair */ |
2135 | ha->max_qpairs = ha->max_rsp_queues - 1; |
2136 | ql_dbg_pci(ql_dbg_init, pdev: ha->pdev, 0x0188, |
2137 | fmt: "Max no of queues pairs: %d.\n" , ha->max_qpairs); |
2138 | |
2139 | ql_log_pci(ql_log_info, pdev: ha->pdev, 0x001a, |
2140 | fmt: "MSI-X vector count: %d.\n" , ha->msix_count); |
2141 | } else |
2142 | ql_log_pci(ql_log_info, pdev: ha->pdev, 0x001b, |
2143 | fmt: "BAR 3 not enabled.\n" ); |
2144 | |
2145 | mqiobase_exit: |
2146 | ql_dbg_pci(ql_dbg_init, pdev: ha->pdev, 0x001c, |
2147 | fmt: "MSIX Count: %d.\n" , ha->msix_count); |
2148 | return (0); |
2149 | |
2150 | iospace_error_exit: |
2151 | return (-ENOMEM); |
2152 | } |
2153 | |
2154 | |
2155 | static int |
2156 | qla83xx_iospace_config(struct qla_hw_data *ha) |
2157 | { |
2158 | uint16_t msix; |
2159 | |
2160 | if (pci_request_selected_regions(ha->pdev, ha->bars, |
2161 | QLA2XXX_DRIVER_NAME)) { |
2162 | ql_log_pci(ql_log_fatal, pdev: ha->pdev, 0x0117, |
2163 | fmt: "Failed to reserve PIO/MMIO regions (%s), aborting.\n" , |
2164 | pci_name(pdev: ha->pdev)); |
2165 | |
2166 | goto iospace_error_exit; |
2167 | } |
2168 | |
2169 | /* Use MMIO operations for all accesses. */ |
2170 | if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { |
2171 | ql_log_pci(ql_log_warn, pdev: ha->pdev, 0x0118, |
2172 | fmt: "Invalid pci I/O region size (%s).\n" , |
2173 | pci_name(pdev: ha->pdev)); |
2174 | goto iospace_error_exit; |
2175 | } |
2176 | if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { |
2177 | ql_log_pci(ql_log_warn, pdev: ha->pdev, 0x0119, |
2178 | fmt: "Invalid PCI mem region size (%s), aborting\n" , |
2179 | pci_name(pdev: ha->pdev)); |
2180 | goto iospace_error_exit; |
2181 | } |
2182 | |
2183 | ha->iobase = ioremap(pci_resource_start(ha->pdev, 0), MIN_IOBASE_LEN); |
2184 | if (!ha->iobase) { |
2185 | ql_log_pci(ql_log_fatal, pdev: ha->pdev, 0x011a, |
2186 | fmt: "Cannot remap MMIO (%s), aborting.\n" , |
2187 | pci_name(pdev: ha->pdev)); |
2188 | goto iospace_error_exit; |
2189 | } |
2190 | |
2191 | /* 64bit PCI BAR - BAR2 will correspoond to region 4 */ |
2192 | /* 83XX 26XX always use MQ type access for queues |
2193 | * - mbar 2, a.k.a region 4 */ |
2194 | ha->max_req_queues = ha->max_rsp_queues = 1; |
2195 | ha->msix_count = QLA_BASE_VECTORS; |
2196 | ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4), |
2197 | pci_resource_len(ha->pdev, 4)); |
2198 | |
2199 | if (!ha->mqiobase) { |
2200 | ql_log_pci(ql_log_fatal, pdev: ha->pdev, 0x011d, |
2201 | fmt: "BAR2/region4 not enabled\n" ); |
2202 | goto mqiobase_exit; |
2203 | } |
2204 | |
2205 | ha->msixbase = ioremap(pci_resource_start(ha->pdev, 2), |
2206 | pci_resource_len(ha->pdev, 2)); |
2207 | if (ha->msixbase) { |
2208 | /* Read MSIX vector size of the board */ |
2209 | pci_read_config_word(dev: ha->pdev, |
2210 | QLA_83XX_PCI_MSIX_CONTROL, val: &msix); |
2211 | ha->msix_count = (msix & PCI_MSIX_FLAGS_QSIZE) + 1; |
2212 | /* |
2213 | * By default, driver uses at least two msix vectors |
2214 | * (default & rspq) |
2215 | */ |
2216 | if (ql2xmqsupport || ql2xnvmeenable) { |
2217 | /* MB interrupt uses 1 vector */ |
2218 | ha->max_req_queues = ha->msix_count - 1; |
2219 | |
2220 | /* ATIOQ needs 1 vector. That's 1 less QPair */ |
2221 | if (QLA_TGT_MODE_ENABLED()) |
2222 | ha->max_req_queues--; |
2223 | |
2224 | ha->max_rsp_queues = ha->max_req_queues; |
2225 | |
2226 | /* Queue pairs is the max value minus |
2227 | * the base queue pair */ |
2228 | ha->max_qpairs = ha->max_req_queues - 1; |
2229 | ql_dbg_pci(ql_dbg_init, pdev: ha->pdev, 0x00e3, |
2230 | fmt: "Max no of queues pairs: %d.\n" , ha->max_qpairs); |
2231 | } |
2232 | ql_log_pci(ql_log_info, pdev: ha->pdev, 0x011c, |
2233 | fmt: "MSI-X vector count: %d.\n" , ha->msix_count); |
2234 | } else |
2235 | ql_log_pci(ql_log_info, pdev: ha->pdev, 0x011e, |
2236 | fmt: "BAR 1 not enabled.\n" ); |
2237 | |
2238 | mqiobase_exit: |
2239 | ql_dbg_pci(ql_dbg_init, pdev: ha->pdev, 0x011f, |
2240 | fmt: "MSIX Count: %d.\n" , ha->msix_count); |
2241 | return 0; |
2242 | |
2243 | iospace_error_exit: |
2244 | return -ENOMEM; |
2245 | } |
2246 | |
2247 | static struct isp_operations qla2100_isp_ops = { |
2248 | .pci_config = qla2100_pci_config, |
2249 | .reset_chip = qla2x00_reset_chip, |
2250 | .chip_diag = qla2x00_chip_diag, |
2251 | .config_rings = qla2x00_config_rings, |
2252 | .reset_adapter = qla2x00_reset_adapter, |
2253 | .nvram_config = qla2x00_nvram_config, |
2254 | .update_fw_options = qla2x00_update_fw_options, |
2255 | .load_risc = qla2x00_load_risc, |
2256 | .pci_info_str = qla2x00_pci_info_str, |
2257 | .fw_version_str = qla2x00_fw_version_str, |
2258 | .intr_handler = qla2100_intr_handler, |
2259 | .enable_intrs = qla2x00_enable_intrs, |
2260 | .disable_intrs = qla2x00_disable_intrs, |
2261 | .abort_command = qla2x00_abort_command, |
2262 | .target_reset = qla2x00_abort_target, |
2263 | .lun_reset = qla2x00_lun_reset, |
2264 | .fabric_login = qla2x00_login_fabric, |
2265 | .fabric_logout = qla2x00_fabric_logout, |
2266 | .calc_req_entries = qla2x00_calc_iocbs_32, |
2267 | .build_iocbs = qla2x00_build_scsi_iocbs_32, |
2268 | .prep_ms_iocb = qla2x00_prep_ms_iocb, |
2269 | .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb, |
2270 | .read_nvram = qla2x00_read_nvram_data, |
2271 | .write_nvram = qla2x00_write_nvram_data, |
2272 | .fw_dump = qla2100_fw_dump, |
2273 | .beacon_on = NULL, |
2274 | .beacon_off = NULL, |
2275 | .beacon_blink = NULL, |
2276 | .read_optrom = qla2x00_read_optrom_data, |
2277 | .write_optrom = qla2x00_write_optrom_data, |
2278 | .get_flash_version = qla2x00_get_flash_version, |
2279 | .start_scsi = qla2x00_start_scsi, |
2280 | .start_scsi_mq = NULL, |
2281 | .abort_isp = qla2x00_abort_isp, |
2282 | .iospace_config = qla2x00_iospace_config, |
2283 | .initialize_adapter = qla2x00_initialize_adapter, |
2284 | }; |
2285 | |
2286 | static struct isp_operations qla2300_isp_ops = { |
2287 | .pci_config = qla2300_pci_config, |
2288 | .reset_chip = qla2x00_reset_chip, |
2289 | .chip_diag = qla2x00_chip_diag, |
2290 | .config_rings = qla2x00_config_rings, |
2291 | .reset_adapter = qla2x00_reset_adapter, |
2292 | .nvram_config = qla2x00_nvram_config, |
2293 | .update_fw_options = qla2x00_update_fw_options, |
2294 | .load_risc = qla2x00_load_risc, |
2295 | .pci_info_str = qla2x00_pci_info_str, |
2296 | .fw_version_str = qla2x00_fw_version_str, |
2297 | .intr_handler = qla2300_intr_handler, |
2298 | .enable_intrs = qla2x00_enable_intrs, |
2299 | .disable_intrs = qla2x00_disable_intrs, |
2300 | .abort_command = qla2x00_abort_command, |
2301 | .target_reset = qla2x00_abort_target, |
2302 | .lun_reset = qla2x00_lun_reset, |
2303 | .fabric_login = qla2x00_login_fabric, |
2304 | .fabric_logout = qla2x00_fabric_logout, |
2305 | .calc_req_entries = qla2x00_calc_iocbs_32, |
2306 | .build_iocbs = qla2x00_build_scsi_iocbs_32, |
2307 | .prep_ms_iocb = qla2x00_prep_ms_iocb, |
2308 | .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb, |
2309 | .read_nvram = qla2x00_read_nvram_data, |
2310 | .write_nvram = qla2x00_write_nvram_data, |
2311 | .fw_dump = qla2300_fw_dump, |
2312 | .beacon_on = qla2x00_beacon_on, |
2313 | .beacon_off = qla2x00_beacon_off, |
2314 | .beacon_blink = qla2x00_beacon_blink, |
2315 | .read_optrom = qla2x00_read_optrom_data, |
2316 | .write_optrom = qla2x00_write_optrom_data, |
2317 | .get_flash_version = qla2x00_get_flash_version, |
2318 | .start_scsi = qla2x00_start_scsi, |
2319 | .start_scsi_mq = NULL, |
2320 | .abort_isp = qla2x00_abort_isp, |
2321 | .iospace_config = qla2x00_iospace_config, |
2322 | .initialize_adapter = qla2x00_initialize_adapter, |
2323 | }; |
2324 | |
2325 | static struct isp_operations qla24xx_isp_ops = { |
2326 | .pci_config = qla24xx_pci_config, |
2327 | .reset_chip = qla24xx_reset_chip, |
2328 | .chip_diag = qla24xx_chip_diag, |
2329 | .config_rings = qla24xx_config_rings, |
2330 | .reset_adapter = qla24xx_reset_adapter, |
2331 | .nvram_config = qla24xx_nvram_config, |
2332 | .update_fw_options = qla24xx_update_fw_options, |
2333 | .load_risc = qla24xx_load_risc, |
2334 | .pci_info_str = qla24xx_pci_info_str, |
2335 | .fw_version_str = qla24xx_fw_version_str, |
2336 | .intr_handler = qla24xx_intr_handler, |
2337 | .enable_intrs = qla24xx_enable_intrs, |
2338 | .disable_intrs = qla24xx_disable_intrs, |
2339 | .abort_command = qla24xx_abort_command, |
2340 | .target_reset = qla24xx_abort_target, |
2341 | .lun_reset = qla24xx_lun_reset, |
2342 | .fabric_login = qla24xx_login_fabric, |
2343 | .fabric_logout = qla24xx_fabric_logout, |
2344 | .calc_req_entries = NULL, |
2345 | .build_iocbs = NULL, |
2346 | .prep_ms_iocb = qla24xx_prep_ms_iocb, |
2347 | .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, |
2348 | .read_nvram = qla24xx_read_nvram_data, |
2349 | .write_nvram = qla24xx_write_nvram_data, |
2350 | .fw_dump = qla24xx_fw_dump, |
2351 | .beacon_on = qla24xx_beacon_on, |
2352 | .beacon_off = qla24xx_beacon_off, |
2353 | .beacon_blink = qla24xx_beacon_blink, |
2354 | .read_optrom = qla24xx_read_optrom_data, |
2355 | .write_optrom = qla24xx_write_optrom_data, |
2356 | .get_flash_version = qla24xx_get_flash_version, |
2357 | .start_scsi = qla24xx_start_scsi, |
2358 | .start_scsi_mq = NULL, |
2359 | .abort_isp = qla2x00_abort_isp, |
2360 | .iospace_config = qla2x00_iospace_config, |
2361 | .initialize_adapter = qla2x00_initialize_adapter, |
2362 | }; |
2363 | |
2364 | static struct isp_operations qla25xx_isp_ops = { |
2365 | .pci_config = qla25xx_pci_config, |
2366 | .reset_chip = qla24xx_reset_chip, |
2367 | .chip_diag = qla24xx_chip_diag, |
2368 | .config_rings = qla24xx_config_rings, |
2369 | .reset_adapter = qla24xx_reset_adapter, |
2370 | .nvram_config = qla24xx_nvram_config, |
2371 | .update_fw_options = qla24xx_update_fw_options, |
2372 | .load_risc = qla24xx_load_risc, |
2373 | .pci_info_str = qla24xx_pci_info_str, |
2374 | .fw_version_str = qla24xx_fw_version_str, |
2375 | .intr_handler = qla24xx_intr_handler, |
2376 | .enable_intrs = qla24xx_enable_intrs, |
2377 | .disable_intrs = qla24xx_disable_intrs, |
2378 | .abort_command = qla24xx_abort_command, |
2379 | .target_reset = qla24xx_abort_target, |
2380 | .lun_reset = qla24xx_lun_reset, |
2381 | .fabric_login = qla24xx_login_fabric, |
2382 | .fabric_logout = qla24xx_fabric_logout, |
2383 | .calc_req_entries = NULL, |
2384 | .build_iocbs = NULL, |
2385 | .prep_ms_iocb = qla24xx_prep_ms_iocb, |
2386 | .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, |
2387 | .read_nvram = qla25xx_read_nvram_data, |
2388 | .write_nvram = qla25xx_write_nvram_data, |
2389 | .fw_dump = qla25xx_fw_dump, |
2390 | .beacon_on = qla24xx_beacon_on, |
2391 | .beacon_off = qla24xx_beacon_off, |
2392 | .beacon_blink = qla24xx_beacon_blink, |
2393 | .read_optrom = qla25xx_read_optrom_data, |
2394 | .write_optrom = qla24xx_write_optrom_data, |
2395 | .get_flash_version = qla24xx_get_flash_version, |
2396 | .start_scsi = qla24xx_dif_start_scsi, |
2397 | .start_scsi_mq = qla2xxx_dif_start_scsi_mq, |
2398 | .abort_isp = qla2x00_abort_isp, |
2399 | .iospace_config = qla2x00_iospace_config, |
2400 | .initialize_adapter = qla2x00_initialize_adapter, |
2401 | }; |
2402 | |
2403 | static struct isp_operations qla81xx_isp_ops = { |
2404 | .pci_config = qla25xx_pci_config, |
2405 | .reset_chip = qla24xx_reset_chip, |
2406 | .chip_diag = qla24xx_chip_diag, |
2407 | .config_rings = qla24xx_config_rings, |
2408 | .reset_adapter = qla24xx_reset_adapter, |
2409 | .nvram_config = qla81xx_nvram_config, |
2410 | .update_fw_options = qla24xx_update_fw_options, |
2411 | .load_risc = qla81xx_load_risc, |
2412 | .pci_info_str = qla24xx_pci_info_str, |
2413 | .fw_version_str = qla24xx_fw_version_str, |
2414 | .intr_handler = qla24xx_intr_handler, |
2415 | .enable_intrs = qla24xx_enable_intrs, |
2416 | .disable_intrs = qla24xx_disable_intrs, |
2417 | .abort_command = qla24xx_abort_command, |
2418 | .target_reset = qla24xx_abort_target, |
2419 | .lun_reset = qla24xx_lun_reset, |
2420 | .fabric_login = qla24xx_login_fabric, |
2421 | .fabric_logout = qla24xx_fabric_logout, |
2422 | .calc_req_entries = NULL, |
2423 | .build_iocbs = NULL, |
2424 | .prep_ms_iocb = qla24xx_prep_ms_iocb, |
2425 | .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, |
2426 | .read_nvram = NULL, |
2427 | .write_nvram = NULL, |
2428 | .fw_dump = qla81xx_fw_dump, |
2429 | .beacon_on = qla24xx_beacon_on, |
2430 | .beacon_off = qla24xx_beacon_off, |
2431 | .beacon_blink = qla83xx_beacon_blink, |
2432 | .read_optrom = qla25xx_read_optrom_data, |
2433 | .write_optrom = qla24xx_write_optrom_data, |
2434 | .get_flash_version = qla24xx_get_flash_version, |
2435 | .start_scsi = qla24xx_dif_start_scsi, |
2436 | .start_scsi_mq = qla2xxx_dif_start_scsi_mq, |
2437 | .abort_isp = qla2x00_abort_isp, |
2438 | .iospace_config = qla2x00_iospace_config, |
2439 | .initialize_adapter = qla2x00_initialize_adapter, |
2440 | }; |
2441 | |
2442 | static struct isp_operations qla82xx_isp_ops = { |
2443 | .pci_config = qla82xx_pci_config, |
2444 | .reset_chip = qla82xx_reset_chip, |
2445 | .chip_diag = qla24xx_chip_diag, |
2446 | .config_rings = qla82xx_config_rings, |
2447 | .reset_adapter = qla24xx_reset_adapter, |
2448 | .nvram_config = qla81xx_nvram_config, |
2449 | .update_fw_options = qla24xx_update_fw_options, |
2450 | .load_risc = qla82xx_load_risc, |
2451 | .pci_info_str = qla24xx_pci_info_str, |
2452 | .fw_version_str = qla24xx_fw_version_str, |
2453 | .intr_handler = qla82xx_intr_handler, |
2454 | .enable_intrs = qla82xx_enable_intrs, |
2455 | .disable_intrs = qla82xx_disable_intrs, |
2456 | .abort_command = qla24xx_abort_command, |
2457 | .target_reset = qla24xx_abort_target, |
2458 | .lun_reset = qla24xx_lun_reset, |
2459 | .fabric_login = qla24xx_login_fabric, |
2460 | .fabric_logout = qla24xx_fabric_logout, |
2461 | .calc_req_entries = NULL, |
2462 | .build_iocbs = NULL, |
2463 | .prep_ms_iocb = qla24xx_prep_ms_iocb, |
2464 | .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, |
2465 | .read_nvram = qla24xx_read_nvram_data, |
2466 | .write_nvram = qla24xx_write_nvram_data, |
2467 | .fw_dump = qla82xx_fw_dump, |
2468 | .beacon_on = qla82xx_beacon_on, |
2469 | .beacon_off = qla82xx_beacon_off, |
2470 | .beacon_blink = NULL, |
2471 | .read_optrom = qla82xx_read_optrom_data, |
2472 | .write_optrom = qla82xx_write_optrom_data, |
2473 | .get_flash_version = qla82xx_get_flash_version, |
2474 | .start_scsi = qla82xx_start_scsi, |
2475 | .start_scsi_mq = NULL, |
2476 | .abort_isp = qla82xx_abort_isp, |
2477 | .iospace_config = qla82xx_iospace_config, |
2478 | .initialize_adapter = qla2x00_initialize_adapter, |
2479 | }; |
2480 | |
2481 | static struct isp_operations qla8044_isp_ops = { |
2482 | .pci_config = qla82xx_pci_config, |
2483 | .reset_chip = qla82xx_reset_chip, |
2484 | .chip_diag = qla24xx_chip_diag, |
2485 | .config_rings = qla82xx_config_rings, |
2486 | .reset_adapter = qla24xx_reset_adapter, |
2487 | .nvram_config = qla81xx_nvram_config, |
2488 | .update_fw_options = qla24xx_update_fw_options, |
2489 | .load_risc = qla82xx_load_risc, |
2490 | .pci_info_str = qla24xx_pci_info_str, |
2491 | .fw_version_str = qla24xx_fw_version_str, |
2492 | .intr_handler = qla8044_intr_handler, |
2493 | .enable_intrs = qla82xx_enable_intrs, |
2494 | .disable_intrs = qla82xx_disable_intrs, |
2495 | .abort_command = qla24xx_abort_command, |
2496 | .target_reset = qla24xx_abort_target, |
2497 | .lun_reset = qla24xx_lun_reset, |
2498 | .fabric_login = qla24xx_login_fabric, |
2499 | .fabric_logout = qla24xx_fabric_logout, |
2500 | .calc_req_entries = NULL, |
2501 | .build_iocbs = NULL, |
2502 | .prep_ms_iocb = qla24xx_prep_ms_iocb, |
2503 | .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, |
2504 | .read_nvram = NULL, |
2505 | .write_nvram = NULL, |
2506 | .fw_dump = qla8044_fw_dump, |
2507 | .beacon_on = qla82xx_beacon_on, |
2508 | .beacon_off = qla82xx_beacon_off, |
2509 | .beacon_blink = NULL, |
2510 | .read_optrom = qla8044_read_optrom_data, |
2511 | .write_optrom = qla8044_write_optrom_data, |
2512 | .get_flash_version = qla82xx_get_flash_version, |
2513 | .start_scsi = qla82xx_start_scsi, |
2514 | .start_scsi_mq = NULL, |
2515 | .abort_isp = qla8044_abort_isp, |
2516 | .iospace_config = qla82xx_iospace_config, |
2517 | .initialize_adapter = qla2x00_initialize_adapter, |
2518 | }; |
2519 | |
2520 | static struct isp_operations qla83xx_isp_ops = { |
2521 | .pci_config = qla25xx_pci_config, |
2522 | .reset_chip = qla24xx_reset_chip, |
2523 | .chip_diag = qla24xx_chip_diag, |
2524 | .config_rings = qla24xx_config_rings, |
2525 | .reset_adapter = qla24xx_reset_adapter, |
2526 | .nvram_config = qla81xx_nvram_config, |
2527 | .update_fw_options = qla24xx_update_fw_options, |
2528 | .load_risc = qla81xx_load_risc, |
2529 | .pci_info_str = qla24xx_pci_info_str, |
2530 | .fw_version_str = qla24xx_fw_version_str, |
2531 | .intr_handler = qla24xx_intr_handler, |
2532 | .enable_intrs = qla24xx_enable_intrs, |
2533 | .disable_intrs = qla24xx_disable_intrs, |
2534 | .abort_command = qla24xx_abort_command, |
2535 | .target_reset = qla24xx_abort_target, |
2536 | .lun_reset = qla24xx_lun_reset, |
2537 | .fabric_login = qla24xx_login_fabric, |
2538 | .fabric_logout = qla24xx_fabric_logout, |
2539 | .calc_req_entries = NULL, |
2540 | .build_iocbs = NULL, |
2541 | .prep_ms_iocb = qla24xx_prep_ms_iocb, |
2542 | .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, |
2543 | .read_nvram = NULL, |
2544 | .write_nvram = NULL, |
2545 | .fw_dump = qla83xx_fw_dump, |
2546 | .beacon_on = qla24xx_beacon_on, |
2547 | .beacon_off = qla24xx_beacon_off, |
2548 | .beacon_blink = qla83xx_beacon_blink, |
2549 | .read_optrom = qla25xx_read_optrom_data, |
2550 | .write_optrom = qla24xx_write_optrom_data, |
2551 | .get_flash_version = qla24xx_get_flash_version, |
2552 | .start_scsi = qla24xx_dif_start_scsi, |
2553 | .start_scsi_mq = qla2xxx_dif_start_scsi_mq, |
2554 | .abort_isp = qla2x00_abort_isp, |
2555 | .iospace_config = qla83xx_iospace_config, |
2556 | .initialize_adapter = qla2x00_initialize_adapter, |
2557 | }; |
2558 | |
2559 | static struct isp_operations qlafx00_isp_ops = { |
2560 | .pci_config = qlafx00_pci_config, |
2561 | .reset_chip = qlafx00_soft_reset, |
2562 | .chip_diag = qlafx00_chip_diag, |
2563 | .config_rings = qlafx00_config_rings, |
2564 | .reset_adapter = qlafx00_soft_reset, |
2565 | .nvram_config = NULL, |
2566 | .update_fw_options = NULL, |
2567 | .load_risc = NULL, |
2568 | .pci_info_str = qlafx00_pci_info_str, |
2569 | .fw_version_str = qlafx00_fw_version_str, |
2570 | .intr_handler = qlafx00_intr_handler, |
2571 | .enable_intrs = qlafx00_enable_intrs, |
2572 | .disable_intrs = qlafx00_disable_intrs, |
2573 | .abort_command = qla24xx_async_abort_command, |
2574 | .target_reset = qlafx00_abort_target, |
2575 | .lun_reset = qlafx00_lun_reset, |
2576 | .fabric_login = NULL, |
2577 | .fabric_logout = NULL, |
2578 | .calc_req_entries = NULL, |
2579 | .build_iocbs = NULL, |
2580 | .prep_ms_iocb = qla24xx_prep_ms_iocb, |
2581 | .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, |
2582 | .read_nvram = qla24xx_read_nvram_data, |
2583 | .write_nvram = qla24xx_write_nvram_data, |
2584 | .fw_dump = NULL, |
2585 | .beacon_on = qla24xx_beacon_on, |
2586 | .beacon_off = qla24xx_beacon_off, |
2587 | .beacon_blink = NULL, |
2588 | .read_optrom = qla24xx_read_optrom_data, |
2589 | .write_optrom = qla24xx_write_optrom_data, |
2590 | .get_flash_version = qla24xx_get_flash_version, |
2591 | .start_scsi = qlafx00_start_scsi, |
2592 | .start_scsi_mq = NULL, |
2593 | .abort_isp = qlafx00_abort_isp, |
2594 | .iospace_config = qlafx00_iospace_config, |
2595 | .initialize_adapter = qlafx00_initialize_adapter, |
2596 | }; |
2597 | |
2598 | static struct isp_operations qla27xx_isp_ops = { |
2599 | .pci_config = qla25xx_pci_config, |
2600 | .reset_chip = qla24xx_reset_chip, |
2601 | .chip_diag = qla24xx_chip_diag, |
2602 | .config_rings = qla24xx_config_rings, |
2603 | .reset_adapter = qla24xx_reset_adapter, |
2604 | .nvram_config = qla81xx_nvram_config, |
2605 | .update_fw_options = qla24xx_update_fw_options, |
2606 | .load_risc = qla81xx_load_risc, |
2607 | .pci_info_str = qla24xx_pci_info_str, |
2608 | .fw_version_str = qla24xx_fw_version_str, |
2609 | .intr_handler = qla24xx_intr_handler, |
2610 | .enable_intrs = qla24xx_enable_intrs, |
2611 | .disable_intrs = qla24xx_disable_intrs, |
2612 | .abort_command = qla24xx_abort_command, |
2613 | .target_reset = qla24xx_abort_target, |
2614 | .lun_reset = qla24xx_lun_reset, |
2615 | .fabric_login = qla24xx_login_fabric, |
2616 | .fabric_logout = qla24xx_fabric_logout, |
2617 | .calc_req_entries = NULL, |
2618 | .build_iocbs = NULL, |
2619 | .prep_ms_iocb = qla24xx_prep_ms_iocb, |
2620 | .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, |
2621 | .read_nvram = NULL, |
2622 | .write_nvram = NULL, |
2623 | .fw_dump = qla27xx_fwdump, |
2624 | .mpi_fw_dump = qla27xx_mpi_fwdump, |
2625 | .beacon_on = qla24xx_beacon_on, |
2626 | .beacon_off = qla24xx_beacon_off, |
2627 | .beacon_blink = qla83xx_beacon_blink, |
2628 | .read_optrom = qla25xx_read_optrom_data, |
2629 | .write_optrom = qla24xx_write_optrom_data, |
2630 | .get_flash_version = qla24xx_get_flash_version, |
2631 | .start_scsi = qla24xx_dif_start_scsi, |
2632 | .start_scsi_mq = qla2xxx_dif_start_scsi_mq, |
2633 | .abort_isp = qla2x00_abort_isp, |
2634 | .iospace_config = qla83xx_iospace_config, |
2635 | .initialize_adapter = qla2x00_initialize_adapter, |
2636 | }; |
2637 | |
2638 | static inline void |
2639 | qla2x00_set_isp_flags(struct qla_hw_data *ha) |
2640 | { |
2641 | ha->device_type = DT_EXTENDED_IDS; |
2642 | switch (ha->pdev->device) { |
2643 | case PCI_DEVICE_ID_QLOGIC_ISP2100: |
2644 | ha->isp_type |= DT_ISP2100; |
2645 | ha->device_type &= ~DT_EXTENDED_IDS; |
2646 | ha->fw_srisc_address = RISC_START_ADDRESS_2100; |
2647 | break; |
2648 | case PCI_DEVICE_ID_QLOGIC_ISP2200: |
2649 | ha->isp_type |= DT_ISP2200; |
2650 | ha->device_type &= ~DT_EXTENDED_IDS; |
2651 | ha->fw_srisc_address = RISC_START_ADDRESS_2100; |
2652 | break; |
2653 | case PCI_DEVICE_ID_QLOGIC_ISP2300: |
2654 | ha->isp_type |= DT_ISP2300; |
2655 | ha->device_type |= DT_ZIO_SUPPORTED; |
2656 | ha->fw_srisc_address = RISC_START_ADDRESS_2300; |
2657 | break; |
2658 | case PCI_DEVICE_ID_QLOGIC_ISP2312: |
2659 | ha->isp_type |= DT_ISP2312; |
2660 | ha->device_type |= DT_ZIO_SUPPORTED; |
2661 | ha->fw_srisc_address = RISC_START_ADDRESS_2300; |
2662 | break; |
2663 | case PCI_DEVICE_ID_QLOGIC_ISP2322: |
2664 | ha->isp_type |= DT_ISP2322; |
2665 | ha->device_type |= DT_ZIO_SUPPORTED; |
2666 | if (ha->pdev->subsystem_vendor == 0x1028 && |
2667 | ha->pdev->subsystem_device == 0x0170) |
2668 | ha->device_type |= DT_OEM_001; |
2669 | ha->fw_srisc_address = RISC_START_ADDRESS_2300; |
2670 | break; |
2671 | case PCI_DEVICE_ID_QLOGIC_ISP6312: |
2672 | ha->isp_type |= DT_ISP6312; |
2673 | ha->fw_srisc_address = RISC_START_ADDRESS_2300; |
2674 | break; |
2675 | case PCI_DEVICE_ID_QLOGIC_ISP6322: |
2676 | ha->isp_type |= DT_ISP6322; |
2677 | ha->fw_srisc_address = RISC_START_ADDRESS_2300; |
2678 | break; |
2679 | case PCI_DEVICE_ID_QLOGIC_ISP2422: |
2680 | ha->isp_type |= DT_ISP2422; |
2681 | ha->device_type |= DT_ZIO_SUPPORTED; |
2682 | ha->device_type |= DT_FWI2; |
2683 | ha->device_type |= DT_IIDMA; |
2684 | ha->fw_srisc_address = RISC_START_ADDRESS_2400; |
2685 | break; |
2686 | case PCI_DEVICE_ID_QLOGIC_ISP2432: |
2687 | ha->isp_type |= DT_ISP2432; |
2688 | ha->device_type |= DT_ZIO_SUPPORTED; |
2689 | ha->device_type |= DT_FWI2; |
2690 | ha->device_type |= DT_IIDMA; |
2691 | ha->fw_srisc_address = RISC_START_ADDRESS_2400; |
2692 | break; |
2693 | case PCI_DEVICE_ID_QLOGIC_ISP8432: |
2694 | ha->isp_type |= DT_ISP8432; |
2695 | ha->device_type |= DT_ZIO_SUPPORTED; |
2696 | ha->device_type |= DT_FWI2; |
2697 | ha->device_type |= DT_IIDMA; |
2698 | ha->fw_srisc_address = RISC_START_ADDRESS_2400; |
2699 | break; |
2700 | case PCI_DEVICE_ID_QLOGIC_ISP5422: |
2701 | ha->isp_type |= DT_ISP5422; |
2702 | ha->device_type |= DT_FWI2; |
2703 | ha->fw_srisc_address = RISC_START_ADDRESS_2400; |
2704 | break; |
2705 | case PCI_DEVICE_ID_QLOGIC_ISP5432: |
2706 | ha->isp_type |= DT_ISP5432; |
2707 | ha->device_type |= DT_FWI2; |
2708 | ha->fw_srisc_address = RISC_START_ADDRESS_2400; |
2709 | break; |
2710 | case PCI_DEVICE_ID_QLOGIC_ISP2532: |
2711 | ha->isp_type |= DT_ISP2532; |
2712 | ha->device_type |= DT_ZIO_SUPPORTED; |
2713 | ha->device_type |= DT_FWI2; |
2714 | ha->device_type |= DT_IIDMA; |
2715 | ha->fw_srisc_address = RISC_START_ADDRESS_2400; |
2716 | break; |
2717 | case PCI_DEVICE_ID_QLOGIC_ISP8001: |
2718 | ha->isp_type |= DT_ISP8001; |
2719 | ha->device_type |= DT_ZIO_SUPPORTED; |
2720 | ha->device_type |= DT_FWI2; |
2721 | ha->device_type |= DT_IIDMA; |
2722 | ha->fw_srisc_address = RISC_START_ADDRESS_2400; |
2723 | break; |
2724 | case PCI_DEVICE_ID_QLOGIC_ISP8021: |
2725 | ha->isp_type |= DT_ISP8021; |
2726 | ha->device_type |= DT_ZIO_SUPPORTED; |
2727 | ha->device_type |= DT_FWI2; |
2728 | ha->fw_srisc_address = RISC_START_ADDRESS_2400; |
2729 | /* Initialize 82XX ISP flags */ |
2730 | qla82xx_init_flags(ha); |
2731 | break; |
2732 | case PCI_DEVICE_ID_QLOGIC_ISP8044: |
2733 | ha->isp_type |= DT_ISP8044; |
2734 | ha->device_type |= DT_ZIO_SUPPORTED; |
2735 | ha->device_type |= DT_FWI2; |
2736 | ha->fw_srisc_address = RISC_START_ADDRESS_2400; |
2737 | /* Initialize 82XX ISP flags */ |
2738 | qla82xx_init_flags(ha); |
2739 | break; |
2740 | case PCI_DEVICE_ID_QLOGIC_ISP2031: |
2741 | ha->isp_type |= DT_ISP2031; |
2742 | ha->device_type |= DT_ZIO_SUPPORTED; |
2743 | ha->device_type |= DT_FWI2; |
2744 | ha->device_type |= DT_IIDMA; |
2745 | ha->device_type |= DT_T10_PI; |
2746 | ha->fw_srisc_address = RISC_START_ADDRESS_2400; |
2747 | break; |
2748 | case PCI_DEVICE_ID_QLOGIC_ISP8031: |
2749 | ha->isp_type |= DT_ISP8031; |
2750 | ha->device_type |= DT_ZIO_SUPPORTED; |
2751 | ha->device_type |= DT_FWI2; |
2752 | ha->device_type |= DT_IIDMA; |
2753 | ha->device_type |= DT_T10_PI; |
2754 | ha->fw_srisc_address = RISC_START_ADDRESS_2400; |
2755 | break; |
2756 | case PCI_DEVICE_ID_QLOGIC_ISPF001: |
2757 | ha->isp_type |= DT_ISPFX00; |
2758 | break; |
2759 | case PCI_DEVICE_ID_QLOGIC_ISP2071: |
2760 | ha->isp_type |= DT_ISP2071; |
2761 | ha->device_type |= DT_ZIO_SUPPORTED; |
2762 | ha->device_type |= DT_FWI2; |
2763 | ha->device_type |= DT_IIDMA; |
2764 | ha->device_type |= DT_T10_PI; |
2765 | ha->fw_srisc_address = RISC_START_ADDRESS_2400; |
2766 | break; |
2767 | case PCI_DEVICE_ID_QLOGIC_ISP2271: |
2768 | ha->isp_type |= DT_ISP2271; |
2769 | ha->device_type |= DT_ZIO_SUPPORTED; |
2770 | ha->device_type |= DT_FWI2; |
2771 | ha->device_type |= DT_IIDMA; |
2772 | ha->device_type |= DT_T10_PI; |
2773 | ha->fw_srisc_address = RISC_START_ADDRESS_2400; |
2774 | break; |
2775 | case PCI_DEVICE_ID_QLOGIC_ISP2261: |
2776 | ha->isp_type |= DT_ISP2261; |
2777 | ha->device_type |= DT_ZIO_SUPPORTED; |
2778 | ha->device_type |= DT_FWI2; |
2779 | ha->device_type |= DT_IIDMA; |
2780 | ha->device_type |= DT_T10_PI; |
2781 | ha->fw_srisc_address = RISC_START_ADDRESS_2400; |
2782 | break; |
2783 | case PCI_DEVICE_ID_QLOGIC_ISP2081: |
2784 | case PCI_DEVICE_ID_QLOGIC_ISP2089: |
2785 | ha->isp_type |= DT_ISP2081; |
2786 | ha->device_type |= DT_ZIO_SUPPORTED; |
2787 | ha->device_type |= DT_FWI2; |
2788 | ha->device_type |= DT_IIDMA; |
2789 | ha->device_type |= DT_T10_PI; |
2790 | ha->fw_srisc_address = RISC_START_ADDRESS_2400; |
2791 | break; |
2792 | case PCI_DEVICE_ID_QLOGIC_ISP2281: |
2793 | case PCI_DEVICE_ID_QLOGIC_ISP2289: |
2794 | ha->isp_type |= DT_ISP2281; |
2795 | ha->device_type |= DT_ZIO_SUPPORTED; |
2796 | ha->device_type |= DT_FWI2; |
2797 | ha->device_type |= DT_IIDMA; |
2798 | ha->device_type |= DT_T10_PI; |
2799 | ha->fw_srisc_address = RISC_START_ADDRESS_2400; |
2800 | break; |
2801 | } |
2802 | |
2803 | if (IS_QLA82XX(ha)) |
2804 | ha->port_no = ha->portnum & 1; |
2805 | else { |
2806 | /* Get adapter physical port no from interrupt pin register. */ |
2807 | pci_read_config_byte(dev: ha->pdev, PCI_INTERRUPT_PIN, val: &ha->port_no); |
2808 | if (IS_QLA25XX(ha) || IS_QLA2031(ha) || |
2809 | IS_QLA27XX(ha) || IS_QLA28XX(ha)) |
2810 | ha->port_no--; |
2811 | else |
2812 | ha->port_no = !(ha->port_no & 1); |
2813 | } |
2814 | |
2815 | ql_dbg_pci(ql_dbg_init, pdev: ha->pdev, 0x000b, |
2816 | fmt: "device_type=0x%x port=%d fw_srisc_address=0x%x.\n" , |
2817 | ha->device_type, ha->port_no, ha->fw_srisc_address); |
2818 | } |
2819 | |
2820 | static void |
2821 | qla2xxx_scan_start(struct Scsi_Host *shost) |
2822 | { |
2823 | scsi_qla_host_t *vha = shost_priv(shost); |
2824 | |
2825 | if (vha->hw->flags.running_gold_fw) |
2826 | return; |
2827 | |
2828 | set_bit(LOOP_RESYNC_NEEDED, addr: &vha->dpc_flags); |
2829 | set_bit(LOCAL_LOOP_UPDATE, addr: &vha->dpc_flags); |
2830 | set_bit(RSCN_UPDATE, addr: &vha->dpc_flags); |
2831 | set_bit(NPIV_CONFIG_NEEDED, addr: &vha->dpc_flags); |
2832 | } |
2833 | |
2834 | static int |
2835 | qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time) |
2836 | { |
2837 | scsi_qla_host_t *vha = shost_priv(shost); |
2838 | |
2839 | if (test_bit(UNLOADING, &vha->dpc_flags)) |
2840 | return 1; |
2841 | if (!vha->host) |
2842 | return 1; |
2843 | if (time > vha->hw->loop_reset_delay * HZ) |
2844 | return 1; |
2845 | |
2846 | return atomic_read(v: &vha->loop_state) == LOOP_READY; |
2847 | } |
2848 | |
2849 | static void qla_heartbeat_work_fn(struct work_struct *work) |
2850 | { |
2851 | struct qla_hw_data *ha = container_of(work, |
2852 | struct qla_hw_data, heartbeat_work); |
2853 | struct scsi_qla_host *base_vha = pci_get_drvdata(pdev: ha->pdev); |
2854 | |
2855 | if (!ha->flags.mbox_busy && base_vha->flags.init_done) |
2856 | qla_no_op_mb(vha: base_vha); |
2857 | } |
2858 | |
2859 | static void qla2x00_iocb_work_fn(struct work_struct *work) |
2860 | { |
2861 | struct scsi_qla_host *vha = container_of(work, |
2862 | struct scsi_qla_host, iocb_work); |
2863 | struct qla_hw_data *ha = vha->hw; |
2864 | struct scsi_qla_host *base_vha = pci_get_drvdata(pdev: ha->pdev); |
2865 | int i = 2; |
2866 | unsigned long flags; |
2867 | |
2868 | if (test_bit(UNLOADING, &base_vha->dpc_flags)) |
2869 | return; |
2870 | |
2871 | while (!list_empty(head: &vha->work_list) && i > 0) { |
2872 | qla2x00_do_work(vha); |
2873 | i--; |
2874 | } |
2875 | |
2876 | spin_lock_irqsave(&vha->work_lock, flags); |
2877 | clear_bit(IOCB_WORK_ACTIVE, addr: &vha->dpc_flags); |
2878 | spin_unlock_irqrestore(lock: &vha->work_lock, flags); |
2879 | } |
2880 | |
2881 | static void |
2882 | qla_trace_init(void) |
2883 | { |
2884 | qla_trc_array = trace_array_get_by_name(name: "qla2xxx" ); |
2885 | if (!qla_trc_array) { |
2886 | ql_log(ql_log_fatal, NULL, 0x0001, |
2887 | fmt: "Unable to create qla2xxx trace instance, instance logging will be disabled.\n" ); |
2888 | return; |
2889 | } |
2890 | |
2891 | QLA_TRACE_ENABLE(qla_trc_array); |
2892 | } |
2893 | |
2894 | static void |
2895 | qla_trace_uninit(void) |
2896 | { |
2897 | if (!qla_trc_array) |
2898 | return; |
2899 | trace_array_put(tr: qla_trc_array); |
2900 | } |
2901 | |
2902 | /* |
2903 | * PCI driver interface |
2904 | */ |
2905 | static int |
2906 | qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) |
2907 | { |
2908 | int ret = -ENODEV; |
2909 | struct Scsi_Host *host; |
2910 | scsi_qla_host_t *base_vha = NULL; |
2911 | struct qla_hw_data *ha; |
2912 | char pci_info[30]; |
2913 | char fw_str[30], wq_name[30]; |
2914 | struct scsi_host_template *sht; |
2915 | int bars, mem_only = 0; |
2916 | uint16_t req_length = 0, rsp_length = 0; |
2917 | struct req_que *req = NULL; |
2918 | struct rsp_que *rsp = NULL; |
2919 | int i; |
2920 | |
2921 | bars = pci_select_bars(dev: pdev, IORESOURCE_MEM | IORESOURCE_IO); |
2922 | sht = &qla2xxx_driver_template; |
2923 | if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 || |
2924 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 || |
2925 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 || |
2926 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 || |
2927 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 || |
2928 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 || |
2929 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 || |
2930 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 || |
2931 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 || |
2932 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 || |
2933 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 || |
2934 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 || |
2935 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 || |
2936 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271 || |
2937 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261 || |
2938 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2081 || |
2939 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2281 || |
2940 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2089 || |
2941 | pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2289) { |
2942 | bars = pci_select_bars(dev: pdev, IORESOURCE_MEM); |
2943 | mem_only = 1; |
2944 | ql_dbg_pci(ql_dbg_init, pdev, 0x0007, |
2945 | fmt: "Mem only adapter.\n" ); |
2946 | } |
2947 | ql_dbg_pci(ql_dbg_init, pdev, 0x0008, |
2948 | fmt: "Bars=%d.\n" , bars); |
2949 | |
2950 | if (mem_only) { |
2951 | if (pci_enable_device_mem(dev: pdev)) |
2952 | return ret; |
2953 | } else { |
2954 | if (pci_enable_device(dev: pdev)) |
2955 | return ret; |
2956 | } |
2957 | |
2958 | if (is_kdump_kernel()) { |
2959 | ql2xmqsupport = 0; |
2960 | ql2xallocfwdump = 0; |
2961 | } |
2962 | |
2963 | ha = kzalloc(size: sizeof(struct qla_hw_data), GFP_KERNEL); |
2964 | if (!ha) { |
2965 | ql_log_pci(ql_log_fatal, pdev, 0x0009, |
2966 | fmt: "Unable to allocate memory for ha.\n" ); |
2967 | goto disable_device; |
2968 | } |
2969 | ql_dbg_pci(ql_dbg_init, pdev, 0x000a, |
2970 | fmt: "Memory allocated for ha=%p.\n" , ha); |
2971 | ha->pdev = pdev; |
2972 | INIT_LIST_HEAD(list: &ha->tgt.q_full_list); |
2973 | spin_lock_init(&ha->tgt.q_full_lock); |
2974 | spin_lock_init(&ha->tgt.sess_lock); |
2975 | spin_lock_init(&ha->tgt.atio_lock); |
2976 | |
2977 | spin_lock_init(&ha->sadb_lock); |
2978 | INIT_LIST_HEAD(list: &ha->sadb_tx_index_list); |
2979 | INIT_LIST_HEAD(list: &ha->sadb_rx_index_list); |
2980 | |
2981 | spin_lock_init(&ha->sadb_fp_lock); |
2982 | |
2983 | if (qla_edif_sadb_build_free_pool(ha)) { |
2984 | kfree(objp: ha); |
2985 | goto disable_device; |
2986 | } |
2987 | |
2988 | atomic_set(v: &ha->nvme_active_aen_cnt, i: 0); |
2989 | |
2990 | /* Clear our data area */ |
2991 | ha->bars = bars; |
2992 | ha->mem_only = mem_only; |
2993 | spin_lock_init(&ha->hardware_lock); |
2994 | spin_lock_init(&ha->vport_slock); |
2995 | mutex_init(&ha->selflogin_lock); |
2996 | mutex_init(&ha->optrom_mutex); |
2997 | |
2998 | /* Set ISP-type information. */ |
2999 | qla2x00_set_isp_flags(ha); |
3000 | |
3001 | /* Set EEH reset type to fundamental if required by hba */ |
3002 | if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) || |
3003 | IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) |
3004 | pdev->needs_freset = 1; |
3005 | |
3006 | ha->prev_topology = 0; |
3007 | ha->init_cb_size = sizeof(init_cb_t); |
3008 | ha->link_data_rate = PORT_SPEED_UNKNOWN; |
3009 | ha->optrom_size = OPTROM_SIZE_2300; |
3010 | ha->max_exchg = FW_MAX_EXCHANGES_CNT; |
3011 | atomic_set(v: &ha->num_pend_mbx_stage1, i: 0); |
3012 | atomic_set(v: &ha->num_pend_mbx_stage2, i: 0); |
3013 | atomic_set(v: &ha->zio_threshold, DEFAULT_ZIO_THRESHOLD); |
3014 | ha->last_zio_threshold = DEFAULT_ZIO_THRESHOLD; |
3015 | INIT_LIST_HEAD(list: &ha->tmf_pending); |
3016 | INIT_LIST_HEAD(list: &ha->tmf_active); |
3017 | |
3018 | /* Assign ISP specific operations. */ |
3019 | if (IS_QLA2100(ha)) { |
3020 | ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; |
3021 | ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; |
3022 | req_length = REQUEST_ENTRY_CNT_2100; |
3023 | rsp_length = RESPONSE_ENTRY_CNT_2100; |
3024 | ha->max_loop_id = SNS_LAST_LOOP_ID_2100; |
3025 | ha->gid_list_info_size = 4; |
3026 | ha->flash_conf_off = ~0; |
3027 | ha->flash_data_off = ~0; |
3028 | ha->nvram_conf_off = ~0; |
3029 | ha->nvram_data_off = ~0; |
3030 | ha->isp_ops = &qla2100_isp_ops; |
3031 | } else if (IS_QLA2200(ha)) { |
3032 | ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; |
3033 | ha->mbx_count = MAILBOX_REGISTER_COUNT_2200; |
3034 | req_length = REQUEST_ENTRY_CNT_2200; |
3035 | rsp_length = RESPONSE_ENTRY_CNT_2100; |
3036 | ha->max_loop_id = SNS_LAST_LOOP_ID_2100; |
3037 | ha->gid_list_info_size = 4; |
3038 | ha->flash_conf_off = ~0; |
3039 | ha->flash_data_off = ~0; |
3040 | ha->nvram_conf_off = ~0; |
3041 | ha->nvram_data_off = ~0; |
3042 | ha->isp_ops = &qla2100_isp_ops; |
3043 | } else if (IS_QLA23XX(ha)) { |
3044 | ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; |
3045 | ha->mbx_count = MAILBOX_REGISTER_COUNT; |
3046 | req_length = REQUEST_ENTRY_CNT_2200; |
3047 | rsp_length = RESPONSE_ENTRY_CNT_2300; |
3048 | ha->max_loop_id = SNS_LAST_LOOP_ID_2300; |
3049 | ha->gid_list_info_size = 6; |
3050 | if (IS_QLA2322(ha) || IS_QLA6322(ha)) |
3051 | ha->optrom_size = OPTROM_SIZE_2322; |
3052 | ha->flash_conf_off = ~0; |
3053 | ha->flash_data_off = ~0; |
3054 | ha->nvram_conf_off = ~0; |
3055 | ha->nvram_data_off = ~0; |
3056 | ha->isp_ops = &qla2300_isp_ops; |
3057 | } else if (IS_QLA24XX_TYPE(ha)) { |
3058 | ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; |
3059 | ha->mbx_count = MAILBOX_REGISTER_COUNT; |
3060 | req_length = REQUEST_ENTRY_CNT_24XX; |
3061 | rsp_length = RESPONSE_ENTRY_CNT_2300; |
3062 | ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; |
3063 | ha->max_loop_id = SNS_LAST_LOOP_ID_2300; |
3064 | ha->init_cb_size = sizeof(struct mid_init_cb_24xx); |
3065 | ha->gid_list_info_size = 8; |
3066 | ha->optrom_size = OPTROM_SIZE_24XX; |
3067 | ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX; |
3068 | ha->isp_ops = &qla24xx_isp_ops; |
3069 | ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; |
3070 | ha->flash_data_off = FARX_ACCESS_FLASH_DATA; |
3071 | ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; |
3072 | ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; |
3073 | } else if (IS_QLA25XX(ha)) { |
3074 | ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; |
3075 | ha->mbx_count = MAILBOX_REGISTER_COUNT; |
3076 | req_length = REQUEST_ENTRY_CNT_24XX; |
3077 | rsp_length = RESPONSE_ENTRY_CNT_2300; |
3078 | ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; |
3079 | ha->max_loop_id = SNS_LAST_LOOP_ID_2300; |
3080 | ha->init_cb_size = sizeof(struct mid_init_cb_24xx); |
3081 | ha->gid_list_info_size = 8; |
3082 | ha->optrom_size = OPTROM_SIZE_25XX; |
3083 | ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; |
3084 | ha->isp_ops = &qla25xx_isp_ops; |
3085 | ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; |
3086 | ha->flash_data_off = FARX_ACCESS_FLASH_DATA; |
3087 | ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; |
3088 | ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; |
3089 | } else if (IS_QLA81XX(ha)) { |
3090 | ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; |
3091 | ha->mbx_count = MAILBOX_REGISTER_COUNT; |
3092 | req_length = REQUEST_ENTRY_CNT_24XX; |
3093 | rsp_length = RESPONSE_ENTRY_CNT_2300; |
3094 | ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; |
3095 | ha->max_loop_id = SNS_LAST_LOOP_ID_2300; |
3096 | ha->init_cb_size = sizeof(struct mid_init_cb_81xx); |
3097 | ha->gid_list_info_size = 8; |
3098 | ha->optrom_size = OPTROM_SIZE_81XX; |
3099 | ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; |
3100 | ha->isp_ops = &qla81xx_isp_ops; |
3101 | ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; |
3102 | ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; |
3103 | ha->nvram_conf_off = ~0; |
3104 | ha->nvram_data_off = ~0; |
3105 | } else if (IS_QLA82XX(ha)) { |
3106 | ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; |
3107 | ha->mbx_count = MAILBOX_REGISTER_COUNT; |
3108 | req_length = REQUEST_ENTRY_CNT_82XX; |
3109 | rsp_length = RESPONSE_ENTRY_CNT_82XX; |
3110 | ha->max_loop_id = SNS_LAST_LOOP_ID_2300; |
3111 | ha->init_cb_size = sizeof(struct mid_init_cb_81xx); |
3112 | ha->gid_list_info_size = 8; |
3113 | ha->optrom_size = OPTROM_SIZE_82XX; |
3114 | ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; |
3115 | ha->isp_ops = &qla82xx_isp_ops; |
3116 | ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; |
3117 | ha->flash_data_off = FARX_ACCESS_FLASH_DATA; |
3118 | ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; |
3119 | ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; |
3120 | } else if (IS_QLA8044(ha)) { |
3121 | ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; |
3122 | ha->mbx_count = MAILBOX_REGISTER_COUNT; |
3123 | req_length = REQUEST_ENTRY_CNT_82XX; |
3124 | rsp_length = RESPONSE_ENTRY_CNT_82XX; |
3125 | ha->max_loop_id = SNS_LAST_LOOP_ID_2300; |
3126 | ha->init_cb_size = sizeof(struct mid_init_cb_81xx); |
3127 | ha->gid_list_info_size = 8; |
3128 | ha->optrom_size = OPTROM_SIZE_83XX; |
3129 | ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; |
3130 | ha->isp_ops = &qla8044_isp_ops; |
3131 | ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; |
3132 | ha->flash_data_off = FARX_ACCESS_FLASH_DATA; |
3133 | ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; |
3134 | ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; |
3135 | } else if (IS_QLA83XX(ha)) { |
3136 | ha->portnum = PCI_FUNC(ha->pdev->devfn); |
3137 | ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; |
3138 | ha->mbx_count = MAILBOX_REGISTER_COUNT; |
3139 | req_length = REQUEST_ENTRY_CNT_83XX; |
3140 | rsp_length = RESPONSE_ENTRY_CNT_83XX; |
3141 | ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; |
3142 | ha->max_loop_id = SNS_LAST_LOOP_ID_2300; |
3143 | ha->init_cb_size = sizeof(struct mid_init_cb_81xx); |
3144 | ha->gid_list_info_size = 8; |
3145 | ha->optrom_size = OPTROM_SIZE_83XX; |
3146 | ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; |
3147 | ha->isp_ops = &qla83xx_isp_ops; |
3148 | ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; |
3149 | ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; |
3150 | ha->nvram_conf_off = ~0; |
3151 | ha->nvram_data_off = ~0; |
3152 | } else if (IS_QLAFX00(ha)) { |
3153 | ha->max_fibre_devices = MAX_FIBRE_DEVICES_FX00; |
3154 | ha->mbx_count = MAILBOX_REGISTER_COUNT_FX00; |
3155 | ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00; |
3156 | req_length = REQUEST_ENTRY_CNT_FX00; |
3157 | rsp_length = RESPONSE_ENTRY_CNT_FX00; |
3158 | ha->isp_ops = &qlafx00_isp_ops; |
3159 | ha->port_down_retry_count = 30; /* default value */ |
3160 | ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL; |
3161 | ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; |
3162 | ha->mr.fw_critemp_timer_tick = QLAFX00_CRITEMP_INTERVAL; |
3163 | ha->mr.fw_hbt_en = 1; |
3164 | ha->mr.host_info_resend = false; |
3165 | ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL; |
3166 | } else if (IS_QLA27XX(ha)) { |
3167 | ha->portnum = PCI_FUNC(ha->pdev->devfn); |
3168 | ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; |
3169 | ha->mbx_count = MAILBOX_REGISTER_COUNT; |
3170 | req_length = REQUEST_ENTRY_CNT_83XX; |
3171 | rsp_length = RESPONSE_ENTRY_CNT_83XX; |
3172 | ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; |
3173 | ha->max_loop_id = SNS_LAST_LOOP_ID_2300; |
3174 | ha->init_cb_size = sizeof(struct mid_init_cb_81xx); |
3175 | ha->gid_list_info_size = 8; |
3176 | ha->optrom_size = OPTROM_SIZE_83XX; |
3177 | ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; |
3178 | ha->isp_ops = &qla27xx_isp_ops; |
3179 | ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; |
3180 | ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; |
3181 | ha->nvram_conf_off = ~0; |
3182 | ha->nvram_data_off = ~0; |
3183 | } else if (IS_QLA28XX(ha)) { |
3184 | ha->portnum = PCI_FUNC(ha->pdev->devfn); |
3185 | ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; |
3186 | ha->mbx_count = MAILBOX_REGISTER_COUNT; |
3187 | req_length = REQUEST_ENTRY_CNT_83XX; |
3188 | rsp_length = RESPONSE_ENTRY_CNT_83XX; |
3189 | ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; |
3190 | ha->max_loop_id = SNS_LAST_LOOP_ID_2300; |
3191 | ha->init_cb_size = sizeof(struct mid_init_cb_81xx); |
3192 | ha->gid_list_info_size = 8; |
3193 | ha->optrom_size = OPTROM_SIZE_28XX; |
3194 | ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; |
3195 | ha->isp_ops = &qla27xx_isp_ops; |
3196 | ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_28XX; |
3197 | ha->flash_data_off = FARX_ACCESS_FLASH_DATA_28XX; |
3198 | ha->nvram_conf_off = ~0; |
3199 | ha->nvram_data_off = ~0; |
3200 | } |
3201 | |
3202 | ql_dbg_pci(ql_dbg_init, pdev, 0x001e, |
3203 | fmt: "mbx_count=%d, req_length=%d, " |
3204 | "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, " |
3205 | "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, " |
3206 | "max_fibre_devices=%d.\n" , |
3207 | ha->mbx_count, req_length, rsp_length, ha->max_loop_id, |
3208 | ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size, |
3209 | ha->nvram_npiv_size, ha->max_fibre_devices); |
3210 | ql_dbg_pci(ql_dbg_init, pdev, 0x001f, |
3211 | fmt: "isp_ops=%p, flash_conf_off=%d, " |
3212 | "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n" , |
3213 | ha->isp_ops, ha->flash_conf_off, ha->flash_data_off, |
3214 | ha->nvram_conf_off, ha->nvram_data_off); |
3215 | |
3216 | /* Configure PCI I/O space */ |
3217 | ret = ha->isp_ops->iospace_config(ha); |
3218 | if (ret) |
3219 | goto iospace_config_failed; |
3220 | |
3221 | ql_log_pci(ql_log_info, pdev, 0x001d, |
3222 | fmt: "Found an ISP%04X irq %d iobase 0x%p.\n" , |
3223 | pdev->device, pdev->irq, ha->iobase); |
3224 | mutex_init(&ha->vport_lock); |
3225 | mutex_init(&ha->mq_lock); |
3226 | init_completion(x: &ha->mbx_cmd_comp); |
3227 | complete(&ha->mbx_cmd_comp); |
3228 | init_completion(x: &ha->mbx_intr_comp); |
3229 | init_completion(x: &ha->dcbx_comp); |
3230 | init_completion(x: &ha->lb_portup_comp); |
3231 | |
3232 | set_bit(nr: 0, addr: (unsigned long *) ha->vp_idx_map); |
3233 | |
3234 | qla2x00_config_dma_addressing(ha); |
3235 | ql_dbg_pci(ql_dbg_init, pdev, 0x0020, |
3236 | fmt: "64 Bit addressing is %s.\n" , |
3237 | ha->flags.enable_64bit_addressing ? "enable" : |
3238 | "disable" ); |
3239 | ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp); |
3240 | if (ret) { |
3241 | ql_log_pci(ql_log_fatal, pdev, 0x0031, |
3242 | fmt: "Failed to allocate memory for adapter, aborting.\n" ); |
3243 | |
3244 | goto probe_hw_failed; |
3245 | } |
3246 | |
3247 | req->max_q_depth = MAX_Q_DEPTH; |
3248 | if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU) |
3249 | req->max_q_depth = ql2xmaxqdepth; |
3250 | |
3251 | |
3252 | base_vha = qla2x00_create_host(sht, ha); |
3253 | if (!base_vha) { |
3254 | ret = -ENOMEM; |
3255 | goto probe_hw_failed; |
3256 | } |
3257 | |
3258 | pci_set_drvdata(pdev, data: base_vha); |
3259 | set_bit(PFLG_DRIVER_PROBING, addr: &base_vha->pci_flags); |
3260 | |
3261 | host = base_vha->host; |
3262 | base_vha->req = req; |
3263 | if (IS_QLA2XXX_MIDTYPE(ha)) |
3264 | base_vha->mgmt_svr_loop_id = |
3265 | qla2x00_reserve_mgmt_server_loop_id(base_vha); |
3266 | else |
3267 | base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER + |
3268 | base_vha->vp_idx; |
3269 | |
3270 | /* Setup fcport template structure. */ |
3271 | ha->mr.fcport.vha = base_vha; |
3272 | ha->mr.fcport.port_type = FCT_UNKNOWN; |
3273 | ha->mr.fcport.loop_id = FC_NO_LOOP_ID; |
3274 | qla2x00_set_fcport_state(fcport: &ha->mr.fcport, state: FCS_UNCONFIGURED); |
3275 | ha->mr.fcport.supported_classes = FC_COS_UNSPECIFIED; |
3276 | ha->mr.fcport.scan_state = 1; |
3277 | |
3278 | qla2xxx_reset_stats(shost: host, QLA2XX_HW_ERROR | QLA2XX_SHT_LNK_DWN | |
3279 | QLA2XX_INT_ERR | QLA2XX_CMD_TIMEOUT | |
3280 | QLA2XX_RESET_CMD_ERR | QLA2XX_TGT_SHT_LNK_DOWN); |
3281 | |
3282 | /* Set the SG table size based on ISP type */ |
3283 | if (!IS_FWI2_CAPABLE(ha)) { |
3284 | if (IS_QLA2100(ha)) |
3285 | host->sg_tablesize = 32; |
3286 | } else { |
3287 | if (!IS_QLA82XX(ha)) |
3288 | host->sg_tablesize = QLA_SG_ALL; |
3289 | } |
3290 | host->max_id = ha->max_fibre_devices; |
3291 | host->cmd_per_lun = 3; |
3292 | host->unique_id = host->host_no; |
3293 | |
3294 | if (ql2xenabledif && ql2xenabledif != 2) { |
3295 | ql_log(ql_log_warn, vha: base_vha, 0x302d, |
3296 | fmt: "Invalid value for ql2xenabledif, resetting it to default (2)\n" ); |
3297 | ql2xenabledif = 2; |
3298 | } |
3299 | |
3300 | if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) |
3301 | host->max_cmd_len = 32; |
3302 | else |
3303 | host->max_cmd_len = MAX_CMDSZ; |
3304 | host->max_channel = MAX_BUSES - 1; |
3305 | /* Older HBAs support only 16-bit LUNs */ |
3306 | if (!IS_QLAFX00(ha) && !IS_FWI2_CAPABLE(ha) && |
3307 | ql2xmaxlun > 0xffff) |
3308 | host->max_lun = 0xffff; |
3309 | else |
3310 | host->max_lun = ql2xmaxlun; |
3311 | host->transportt = qla2xxx_transport_template; |
3312 | sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC); |
3313 | |
3314 | ql_dbg(ql_dbg_init, vha: base_vha, 0x0033, |
3315 | fmt: "max_id=%d this_id=%d " |
3316 | "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d " |
3317 | "max_lun=%llu transportt=%p, vendor_id=%llu.\n" , host->max_id, |
3318 | host->this_id, host->cmd_per_lun, host->unique_id, |
3319 | host->max_cmd_len, host->max_channel, host->max_lun, |
3320 | host->transportt, sht->vendor_id); |
3321 | |
3322 | INIT_WORK(&ha->heartbeat_work, qla_heartbeat_work_fn); |
3323 | |
3324 | /* Set up the irqs */ |
3325 | ret = qla2x00_request_irqs(ha, rsp); |
3326 | if (ret) |
3327 | goto probe_failed; |
3328 | |
3329 | /* Alloc arrays of request and response ring ptrs */ |
3330 | ret = qla2x00_alloc_queues(ha, req, rsp); |
3331 | if (ret) { |
3332 | ql_log(ql_log_fatal, vha: base_vha, 0x003d, |
3333 | fmt: "Failed to allocate memory for queue pointers..." |
3334 | "aborting.\n" ); |
3335 | ret = -ENODEV; |
3336 | goto probe_failed; |
3337 | } |
3338 | |
3339 | if (ha->mqenable) { |
3340 | /* number of hardware queues supported by blk/scsi-mq*/ |
3341 | host->nr_hw_queues = ha->max_qpairs; |
3342 | |
3343 | ql_dbg(ql_dbg_init, vha: base_vha, 0x0192, |
3344 | fmt: "blk/scsi-mq enabled, HW queues = %d.\n" , host->nr_hw_queues); |
3345 | } else { |
3346 | if (ql2xnvmeenable) { |
3347 | host->nr_hw_queues = ha->max_qpairs; |
3348 | ql_dbg(ql_dbg_init, vha: base_vha, 0x0194, |
3349 | fmt: "FC-NVMe support is enabled, HW queues=%d\n" , |
3350 | host->nr_hw_queues); |
3351 | } else { |
3352 | ql_dbg(ql_dbg_init, vha: base_vha, 0x0193, |
3353 | fmt: "blk/scsi-mq disabled.\n" ); |
3354 | } |
3355 | } |
3356 | |
3357 | qlt_probe_one_stage1(base_vha, ha); |
3358 | |
3359 | pci_save_state(dev: pdev); |
3360 | |
3361 | /* Assign back pointers */ |
3362 | rsp->req = req; |
3363 | req->rsp = rsp; |
3364 | |
3365 | if (IS_QLAFX00(ha)) { |
3366 | ha->rsp_q_map[0] = rsp; |
3367 | ha->req_q_map[0] = req; |
3368 | set_bit(nr: 0, addr: ha->req_qid_map); |
3369 | set_bit(nr: 0, addr: ha->rsp_qid_map); |
3370 | } |
3371 | |
3372 | /* FWI2-capable only. */ |
3373 | req->req_q_in = &ha->iobase->isp24.req_q_in; |
3374 | req->req_q_out = &ha->iobase->isp24.req_q_out; |
3375 | rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in; |
3376 | rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out; |
3377 | if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || |
3378 | IS_QLA28XX(ha)) { |
3379 | req->req_q_in = &ha->mqiobase->isp25mq.req_q_in; |
3380 | req->req_q_out = &ha->mqiobase->isp25mq.req_q_out; |
3381 | rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in; |
3382 | rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out; |
3383 | } |
3384 | |
3385 | if (IS_QLAFX00(ha)) { |
3386 | req->req_q_in = &ha->iobase->ispfx00.req_q_in; |
3387 | req->req_q_out = &ha->iobase->ispfx00.req_q_out; |
3388 | rsp->rsp_q_in = &ha->iobase->ispfx00.rsp_q_in; |
3389 | rsp->rsp_q_out = &ha->iobase->ispfx00.rsp_q_out; |
3390 | } |
3391 | |
3392 | if (IS_P3P_TYPE(ha)) { |
3393 | req->req_q_out = &ha->iobase->isp82.req_q_out[0]; |
3394 | rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0]; |
3395 | rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0]; |
3396 | } |
3397 | |
3398 | ql_dbg(ql_dbg_multiq, vha: base_vha, 0xc009, |
3399 | fmt: "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n" , |
3400 | ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp); |
3401 | ql_dbg(ql_dbg_multiq, vha: base_vha, 0xc00a, |
3402 | fmt: "req->req_q_in=%p req->req_q_out=%p " |
3403 | "rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n" , |
3404 | req->req_q_in, req->req_q_out, |
3405 | rsp->rsp_q_in, rsp->rsp_q_out); |
3406 | ql_dbg(ql_dbg_init, vha: base_vha, 0x003e, |
3407 | fmt: "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n" , |
3408 | ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp); |
3409 | ql_dbg(ql_dbg_init, vha: base_vha, 0x003f, |
3410 | fmt: "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n" , |
3411 | req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out); |
3412 | |
3413 | ha->wq = alloc_workqueue(fmt: "qla2xxx_wq" , flags: WQ_MEM_RECLAIM, max_active: 0); |
3414 | if (unlikely(!ha->wq)) { |
3415 | ret = -ENOMEM; |
3416 | goto probe_failed; |
3417 | } |
3418 | |
3419 | if (ha->isp_ops->initialize_adapter(base_vha)) { |
3420 | ql_log(ql_log_fatal, vha: base_vha, 0x00d6, |
3421 | fmt: "Failed to initialize adapter - Adapter flags %x.\n" , |
3422 | base_vha->device_flags); |
3423 | |
3424 | if (IS_QLA82XX(ha)) { |
3425 | qla82xx_idc_lock(ha); |
3426 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, |
3427 | QLA8XXX_DEV_FAILED); |
3428 | qla82xx_idc_unlock(ha); |
3429 | ql_log(ql_log_fatal, vha: base_vha, 0x00d7, |
3430 | fmt: "HW State: FAILED.\n" ); |
3431 | } else if (IS_QLA8044(ha)) { |
3432 | qla8044_idc_lock(ha); |
3433 | qla8044_wr_direct(vha: base_vha, |
3434 | crb_reg: QLA8044_CRB_DEV_STATE_INDEX, |
3435 | value: QLA8XXX_DEV_FAILED); |
3436 | qla8044_idc_unlock(ha); |
3437 | ql_log(ql_log_fatal, vha: base_vha, 0x0150, |
3438 | fmt: "HW State: FAILED.\n" ); |
3439 | } |
3440 | |
3441 | ret = -ENODEV; |
3442 | goto probe_failed; |
3443 | } |
3444 | |
3445 | if (IS_QLAFX00(ha)) |
3446 | host->can_queue = QLAFX00_MAX_CANQUEUE; |
3447 | else |
3448 | host->can_queue = req->num_outstanding_cmds - 10; |
3449 | |
3450 | ql_dbg(ql_dbg_init, vha: base_vha, 0x0032, |
3451 | fmt: "can_queue=%d, req=%p, mgmt_svr_loop_id=%d, sg_tablesize=%d.\n" , |
3452 | host->can_queue, base_vha->req, |
3453 | base_vha->mgmt_svr_loop_id, host->sg_tablesize); |
3454 | |
3455 | /* Check if FW supports MQ or not for ISP25xx */ |
3456 | if (IS_QLA25XX(ha) && !(ha->fw_attributes & BIT_6)) |
3457 | ha->mqenable = 0; |
3458 | |
3459 | if (ha->mqenable) { |
3460 | bool startit = false; |
3461 | |
3462 | if (QLA_TGT_MODE_ENABLED()) |
3463 | startit = false; |
3464 | |
3465 | if (ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED) |
3466 | startit = true; |
3467 | |
3468 | /* Create start of day qpairs for Block MQ */ |
3469 | for (i = 0; i < ha->max_qpairs; i++) |
3470 | qla2xxx_create_qpair(base_vha, 5, 0, startit); |
3471 | } |
3472 | qla_init_iocb_limit(base_vha); |
3473 | |
3474 | if (ha->flags.running_gold_fw) |
3475 | goto skip_dpc; |
3476 | |
3477 | /* |
3478 | * Startup the kernel thread for this host adapter |
3479 | */ |
3480 | ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha, |
3481 | "%s_dpc" , base_vha->host_str); |
3482 | if (IS_ERR(ptr: ha->dpc_thread)) { |
3483 | ql_log(ql_log_fatal, vha: base_vha, 0x00ed, |
3484 | fmt: "Failed to start DPC thread.\n" ); |
3485 | ret = PTR_ERR(ptr: ha->dpc_thread); |
3486 | ha->dpc_thread = NULL; |
3487 | goto probe_failed; |
3488 | } |
3489 | ql_dbg(ql_dbg_init, vha: base_vha, 0x00ee, |
3490 | fmt: "DPC thread started successfully.\n" ); |
3491 | |
3492 | /* |
3493 | * If we're not coming up in initiator mode, we might sit for |
3494 | * a while without waking up the dpc thread, which leads to a |
3495 | * stuck process warning. So just kick the dpc once here and |
3496 | * let the kthread start (and go back to sleep in qla2x00_do_dpc). |
3497 | */ |
3498 | qla2xxx_wake_dpc(base_vha); |
3499 | |
3500 | INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error); |
3501 | |
3502 | if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { |
3503 | sprintf(buf: wq_name, fmt: "qla2xxx_%lu_dpc_lp_wq" , base_vha->host_no); |
3504 | ha->dpc_lp_wq = create_singlethread_workqueue(wq_name); |
3505 | INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen); |
3506 | |
3507 | sprintf(buf: wq_name, fmt: "qla2xxx_%lu_dpc_hp_wq" , base_vha->host_no); |
3508 | ha->dpc_hp_wq = create_singlethread_workqueue(wq_name); |
3509 | INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work); |
3510 | INIT_WORK(&ha->idc_state_handler, |
3511 | qla83xx_idc_state_handler_work); |
3512 | INIT_WORK(&ha->nic_core_unrecoverable, |
3513 | qla83xx_nic_core_unrecoverable_work); |
3514 | } |
3515 | |
3516 | skip_dpc: |
3517 | list_add_tail(new: &base_vha->list, head: &ha->vp_list); |
3518 | base_vha->host->irq = ha->pdev->irq; |
3519 | |
3520 | /* Initialized the timer */ |
3521 | qla2x00_start_timer(vha: base_vha, WATCH_INTERVAL); |
3522 | ql_dbg(ql_dbg_init, vha: base_vha, 0x00ef, |
3523 | fmt: "Started qla2x00_timer with " |
3524 | "interval=%d.\n" , WATCH_INTERVAL); |
3525 | ql_dbg(ql_dbg_init, vha: base_vha, 0x00f0, |
3526 | fmt: "Detected hba at address=%p.\n" , |
3527 | ha); |
3528 | |
3529 | if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { |
3530 | if (ha->fw_attributes & BIT_4) { |
3531 | int prot = 0, guard; |
3532 | |
3533 | base_vha->flags.difdix_supported = 1; |
3534 | ql_dbg(ql_dbg_init, vha: base_vha, 0x00f1, |
3535 | fmt: "Registering for DIF/DIX type 1 and 3 protection.\n" ); |
3536 | if (ql2xprotmask) |
3537 | scsi_host_set_prot(shost: host, mask: ql2xprotmask); |
3538 | else |
3539 | scsi_host_set_prot(shost: host, |
3540 | mask: prot | SHOST_DIF_TYPE1_PROTECTION |
3541 | | SHOST_DIF_TYPE2_PROTECTION |
3542 | | SHOST_DIF_TYPE3_PROTECTION |
3543 | | SHOST_DIX_TYPE1_PROTECTION |
3544 | | SHOST_DIX_TYPE2_PROTECTION |
3545 | | SHOST_DIX_TYPE3_PROTECTION); |
3546 | |
3547 | guard = SHOST_DIX_GUARD_CRC; |
3548 | |
3549 | if (IS_PI_IPGUARD_CAPABLE(ha) && |
3550 | (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha))) |
3551 | guard |= SHOST_DIX_GUARD_IP; |
3552 | |
3553 | if (ql2xprotguard) |
3554 | scsi_host_set_guard(shost: host, type: ql2xprotguard); |
3555 | else |
3556 | scsi_host_set_guard(shost: host, type: guard); |
3557 | } else |
3558 | base_vha->flags.difdix_supported = 0; |
3559 | } |
3560 | |
3561 | ha->isp_ops->enable_intrs(ha); |
3562 | |
3563 | if (IS_QLAFX00(ha)) { |
3564 | ret = qlafx00_fx_disc(base_vha, |
3565 | &base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO); |
3566 | host->sg_tablesize = (ha->mr.extended_io_enabled) ? |
3567 | QLA_SG_ALL : 128; |
3568 | } |
3569 | |
3570 | ret = scsi_add_host(host, dev: &pdev->dev); |
3571 | if (ret) |
3572 | goto probe_failed; |
3573 | |
3574 | base_vha->flags.init_done = 1; |
3575 | base_vha->flags.online = 1; |
3576 | ha->prev_minidump_failed = 0; |
3577 | |
3578 | ql_dbg(ql_dbg_init, vha: base_vha, 0x00f2, |
3579 | fmt: "Init done and hba is online.\n" ); |
3580 | |
3581 | if (qla_ini_mode_enabled(ha: base_vha) || |
3582 | qla_dual_mode_enabled(ha: base_vha)) |
3583 | scsi_scan_host(host); |
3584 | else |
3585 | ql_log(ql_log_info, vha: base_vha, 0x0122, |
3586 | fmt: "skipping scsi_scan_host() for non-initiator port\n" ); |
3587 | |
3588 | qla2x00_alloc_sysfs_attr(base_vha); |
3589 | |
3590 | if (IS_QLAFX00(ha)) { |
3591 | ret = qlafx00_fx_disc(base_vha, |
3592 | &base_vha->hw->mr.fcport, FXDISC_GET_PORT_INFO); |
3593 | |
3594 | /* Register system information */ |
3595 | ret = qlafx00_fx_disc(base_vha, |
3596 | &base_vha->hw->mr.fcport, FXDISC_REG_HOST_INFO); |
3597 | } |
3598 | |
3599 | qla2x00_init_host_attr(base_vha); |
3600 | |
3601 | qla2x00_dfs_setup(base_vha); |
3602 | |
3603 | ql_log(ql_log_info, vha: base_vha, 0x00fb, |
3604 | fmt: "QLogic %s - %s.\n" , ha->model_number, ha->model_desc); |
3605 | ql_log(ql_log_info, vha: base_vha, 0x00fc, |
3606 | fmt: "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n" , |
3607 | pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info, |
3608 | sizeof(pci_info)), |
3609 | pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-', |
3610 | base_vha->host_no, |
3611 | ha->isp_ops->fw_version_str(base_vha, fw_str, sizeof(fw_str))); |
3612 | |
3613 | qlt_add_target(ha, base_vha); |
3614 | |
3615 | clear_bit(PFLG_DRIVER_PROBING, addr: &base_vha->pci_flags); |
3616 | |
3617 | if (test_bit(UNLOADING, &base_vha->dpc_flags)) |
3618 | return -ENODEV; |
3619 | |
3620 | return 0; |
3621 | |
3622 | probe_failed: |
3623 | qla_enode_stop(vha: base_vha); |
3624 | qla_edb_stop(vha: base_vha); |
3625 | vfree(addr: base_vha->scan.l); |
3626 | if (base_vha->gnl.l) { |
3627 | dma_free_coherent(dev: &ha->pdev->dev, size: base_vha->gnl.size, |
3628 | cpu_addr: base_vha->gnl.l, dma_handle: base_vha->gnl.ldma); |
3629 | base_vha->gnl.l = NULL; |
3630 | } |
3631 | |
3632 | if (base_vha->timer_active) |
3633 | qla2x00_stop_timer(vha: base_vha); |
3634 | base_vha->flags.online = 0; |
3635 | if (ha->dpc_thread) { |
3636 | struct task_struct *t = ha->dpc_thread; |
3637 | |
3638 | ha->dpc_thread = NULL; |
3639 | kthread_stop(k: t); |
3640 | } |
3641 | |
3642 | qla2x00_free_device(base_vha); |
3643 | scsi_host_put(t: base_vha->host); |
3644 | /* |
3645 | * Need to NULL out local req/rsp after |
3646 | * qla2x00_free_device => qla2x00_free_queues frees |
3647 | * what these are pointing to. Or else we'll |
3648 | * fall over below in qla2x00_free_req/rsp_que. |
3649 | */ |
3650 | req = NULL; |
3651 | rsp = NULL; |
3652 | |
3653 | probe_hw_failed: |
3654 | qla2x00_mem_free(ha); |
3655 | qla2x00_free_req_que(ha, req); |
3656 | qla2x00_free_rsp_que(ha, rsp); |
3657 | qla2x00_clear_drv_active(ha); |
3658 | |
3659 | iospace_config_failed: |
3660 | if (IS_P3P_TYPE(ha)) { |
3661 | if (!ha->nx_pcibase) |
3662 | iounmap(addr: (device_reg_t *)ha->nx_pcibase); |
3663 | if (!ql2xdbwr) |
3664 | iounmap(addr: (device_reg_t *)ha->nxdb_wr_ptr); |
3665 | } else { |
3666 | if (ha->iobase) |
3667 | iounmap(addr: ha->iobase); |
3668 | if (ha->cregbase) |
3669 | iounmap(addr: ha->cregbase); |
3670 | } |
3671 | pci_release_selected_regions(ha->pdev, ha->bars); |
3672 | kfree(objp: ha); |
3673 | |
3674 | disable_device: |
3675 | pci_disable_device(dev: pdev); |
3676 | return ret; |
3677 | } |
3678 | |
3679 | static void __qla_set_remove_flag(scsi_qla_host_t *base_vha) |
3680 | { |
3681 | scsi_qla_host_t *vp; |
3682 | unsigned long flags; |
3683 | struct qla_hw_data *ha; |
3684 | |
3685 | if (!base_vha) |
3686 | return; |
3687 | |
3688 | ha = base_vha->hw; |
3689 | |
3690 | spin_lock_irqsave(&ha->vport_slock, flags); |
3691 | list_for_each_entry(vp, &ha->vp_list, list) |
3692 | set_bit(PFLG_DRIVER_REMOVING, addr: &vp->pci_flags); |
3693 | |
3694 | /* |
3695 | * Indicate device removal to prevent future board_disable |
3696 | * and wait until any pending board_disable has completed. |
3697 | */ |
3698 | set_bit(PFLG_DRIVER_REMOVING, addr: &base_vha->pci_flags); |
3699 | spin_unlock_irqrestore(lock: &ha->vport_slock, flags); |
3700 | } |
3701 | |
3702 | static void |
3703 | qla2x00_shutdown(struct pci_dev *pdev) |
3704 | { |
3705 | scsi_qla_host_t *vha; |
3706 | struct qla_hw_data *ha; |
3707 | |
3708 | vha = pci_get_drvdata(pdev); |
3709 | ha = vha->hw; |
3710 | |
3711 | ql_log(ql_log_info, vha, 0xfffa, |
3712 | fmt: "Adapter shutdown\n" ); |
3713 | |
3714 | /* |
3715 | * Prevent future board_disable and wait |
3716 | * until any pending board_disable has completed. |
3717 | */ |
3718 | __qla_set_remove_flag(base_vha: vha); |
3719 | cancel_work_sync(work: &ha->board_disable); |
3720 | |
3721 | if (!atomic_read(v: &pdev->enable_cnt)) |
3722 | return; |
3723 | |
3724 | /* Notify ISPFX00 firmware */ |
3725 | if (IS_QLAFX00(ha)) |
3726 | qlafx00_driver_shutdown(vha, 20); |
3727 | |
3728 | /* Turn-off FCE trace */ |
3729 | if (ha->flags.fce_enabled) { |
3730 | qla2x00_disable_fce_trace(vha, NULL, NULL); |
3731 | ha->flags.fce_enabled = 0; |
3732 | } |
3733 | |
3734 | /* Turn-off EFT trace */ |
3735 | if (ha->eft) |
3736 | qla2x00_disable_eft_trace(vha); |
3737 | |
3738 | if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || |
3739 | IS_QLA28XX(ha)) { |
3740 | if (ha->flags.fw_started) |
3741 | qla2x00_abort_isp_cleanup(vha); |
3742 | } else { |
3743 | /* Stop currently executing firmware. */ |
3744 | qla2x00_try_to_stop_firmware(vha); |
3745 | } |
3746 | |
3747 | /* Disable timer */ |
3748 | if (vha->timer_active) |
3749 | qla2x00_stop_timer(vha); |
3750 | |
3751 | /* Turn adapter off line */ |
3752 | vha->flags.online = 0; |
3753 | |
3754 | /* turn-off interrupts on the card */ |
3755 | if (ha->interrupts_on) { |
3756 | vha->flags.init_done = 0; |
3757 | ha->isp_ops->disable_intrs(ha); |
3758 | } |
3759 | |
3760 | qla2x00_free_irqs(vha); |
3761 | |
3762 | qla2x00_free_fw_dump(ha); |
3763 | |
3764 | pci_disable_device(dev: pdev); |
3765 | ql_log(ql_log_info, vha, 0xfffe, |
3766 | fmt: "Adapter shutdown successfully.\n" ); |
3767 | } |
3768 | |
3769 | /* Deletes all the virtual ports for a given ha */ |
3770 | static void |
3771 | qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha) |
3772 | { |
3773 | scsi_qla_host_t *vha; |
3774 | unsigned long flags; |
3775 | |
3776 | mutex_lock(&ha->vport_lock); |
3777 | while (ha->cur_vport_count) { |
3778 | spin_lock_irqsave(&ha->vport_slock, flags); |
3779 | |
3780 | BUG_ON(base_vha->list.next == &ha->vp_list); |
3781 | /* This assumes first entry in ha->vp_list is always base vha */ |
3782 | vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list); |
3783 | scsi_host_get(vha->host); |
3784 | |
3785 | spin_unlock_irqrestore(lock: &ha->vport_slock, flags); |
3786 | mutex_unlock(lock: &ha->vport_lock); |
3787 | |
3788 | qla_nvme_delete(vha); |
3789 | |
3790 | fc_vport_terminate(vport: vha->fc_vport); |
3791 | scsi_host_put(t: vha->host); |
3792 | |
3793 | mutex_lock(&ha->vport_lock); |
3794 | } |
3795 | mutex_unlock(lock: &ha->vport_lock); |
3796 | } |
3797 | |
3798 | /* Stops all deferred work threads */ |
3799 | static void |
3800 | qla2x00_destroy_deferred_work(struct qla_hw_data *ha) |
3801 | { |
3802 | /* Cancel all work and destroy DPC workqueues */ |
3803 | if (ha->dpc_lp_wq) { |
3804 | cancel_work_sync(work: &ha->idc_aen); |
3805 | destroy_workqueue(wq: ha->dpc_lp_wq); |
3806 | ha->dpc_lp_wq = NULL; |
3807 | } |
3808 | |
3809 | if (ha->dpc_hp_wq) { |
3810 | cancel_work_sync(work: &ha->nic_core_reset); |
3811 | cancel_work_sync(work: &ha->idc_state_handler); |
3812 | cancel_work_sync(work: &ha->nic_core_unrecoverable); |
3813 | destroy_workqueue(wq: ha->dpc_hp_wq); |
3814 | ha->dpc_hp_wq = NULL; |
3815 | } |
3816 | |
3817 | /* Kill the kernel thread for this host */ |
3818 | if (ha->dpc_thread) { |
3819 | struct task_struct *t = ha->dpc_thread; |
3820 | |
3821 | /* |
3822 | * qla2xxx_wake_dpc checks for ->dpc_thread |
3823 | * so we need to zero it out. |
3824 | */ |
3825 | ha->dpc_thread = NULL; |
3826 | kthread_stop(k: t); |
3827 | } |
3828 | } |
3829 | |
3830 | static void |
3831 | qla2x00_unmap_iobases(struct qla_hw_data *ha) |
3832 | { |
3833 | if (IS_QLA82XX(ha)) { |
3834 | |
3835 | iounmap(addr: (device_reg_t *)ha->nx_pcibase); |
3836 | if (!ql2xdbwr) |
3837 | iounmap(addr: (device_reg_t *)ha->nxdb_wr_ptr); |
3838 | } else { |
3839 | if (ha->iobase) |
3840 | iounmap(addr: ha->iobase); |
3841 | |
3842 | if (ha->cregbase) |
3843 | iounmap(addr: ha->cregbase); |
3844 | |
3845 | if (ha->mqiobase) |
3846 | iounmap(addr: ha->mqiobase); |
3847 | |
3848 | if (ha->msixbase) |
3849 | iounmap(addr: ha->msixbase); |
3850 | } |
3851 | } |
3852 | |
3853 | static void |
3854 | qla2x00_clear_drv_active(struct qla_hw_data *ha) |
3855 | { |
3856 | if (IS_QLA8044(ha)) { |
3857 | qla8044_idc_lock(ha); |
3858 | qla8044_clear_drv_active(ha); |
3859 | qla8044_idc_unlock(ha); |
3860 | } else if (IS_QLA82XX(ha)) { |
3861 | qla82xx_idc_lock(ha); |
3862 | qla82xx_clear_drv_active(ha); |
3863 | qla82xx_idc_unlock(ha); |
3864 | } |
3865 | } |
3866 | |
3867 | static void |
3868 | qla2x00_remove_one(struct pci_dev *pdev) |
3869 | { |
3870 | scsi_qla_host_t *base_vha; |
3871 | struct qla_hw_data *ha; |
3872 | |
3873 | base_vha = pci_get_drvdata(pdev); |
3874 | ha = base_vha->hw; |
3875 | ql_log(ql_log_info, vha: base_vha, 0xb079, |
3876 | fmt: "Removing driver\n" ); |
3877 | __qla_set_remove_flag(base_vha); |
3878 | cancel_work_sync(work: &ha->board_disable); |
3879 | |
3880 | /* |
3881 | * If the PCI device is disabled then there was a PCI-disconnect and |
3882 | * qla2x00_disable_board_on_pci_error has taken care of most of the |
3883 | * resources. |
3884 | */ |
3885 | if (!atomic_read(v: &pdev->enable_cnt)) { |
3886 | dma_free_coherent(dev: &ha->pdev->dev, size: base_vha->gnl.size, |
3887 | cpu_addr: base_vha->gnl.l, dma_handle: base_vha->gnl.ldma); |
3888 | base_vha->gnl.l = NULL; |
3889 | scsi_host_put(t: base_vha->host); |
3890 | kfree(objp: ha); |
3891 | pci_set_drvdata(pdev, NULL); |
3892 | return; |
3893 | } |
3894 | qla2x00_wait_for_hba_ready(vha: base_vha); |
3895 | |
3896 | /* |
3897 | * if UNLOADING flag is already set, then continue unload, |
3898 | * where it was set first. |
3899 | */ |
3900 | if (test_and_set_bit(UNLOADING, addr: &base_vha->dpc_flags)) |
3901 | return; |
3902 | |
3903 | if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || |
3904 | IS_QLA28XX(ha)) { |
3905 | if (ha->flags.fw_started) |
3906 | qla2x00_abort_isp_cleanup(base_vha); |
3907 | } else if (!IS_QLAFX00(ha)) { |
3908 | if (IS_QLA8031(ha)) { |
3909 | ql_dbg(ql_dbg_p3p, vha: base_vha, 0xb07e, |
3910 | fmt: "Clearing fcoe driver presence.\n" ); |
3911 | if (qla83xx_clear_drv_presence(vha: base_vha) != QLA_SUCCESS) |
3912 | ql_dbg(ql_dbg_p3p, vha: base_vha, 0xb079, |
3913 | fmt: "Error while clearing DRV-Presence.\n" ); |
3914 | } |
3915 | |
3916 | qla2x00_try_to_stop_firmware(base_vha); |
3917 | } |
3918 | |
3919 | qla2x00_wait_for_sess_deletion(vha: base_vha); |
3920 | |
3921 | qla_nvme_delete(base_vha); |
3922 | |
3923 | dma_free_coherent(dev: &ha->pdev->dev, |
3924 | size: base_vha->gnl.size, cpu_addr: base_vha->gnl.l, dma_handle: base_vha->gnl.ldma); |
3925 | |
3926 | base_vha->gnl.l = NULL; |
3927 | qla_enode_stop(vha: base_vha); |
3928 | qla_edb_stop(vha: base_vha); |
3929 | |
3930 | vfree(addr: base_vha->scan.l); |
3931 | |
3932 | if (IS_QLAFX00(ha)) |
3933 | qlafx00_driver_shutdown(base_vha, 20); |
3934 | |
3935 | qla2x00_delete_all_vps(ha, base_vha); |
3936 | |
3937 | qla2x00_dfs_remove(base_vha); |
3938 | |
3939 | qla84xx_put_chip(base_vha); |
3940 | |
3941 | /* Disable timer */ |
3942 | if (base_vha->timer_active) |
3943 | qla2x00_stop_timer(vha: base_vha); |
3944 | |
3945 | base_vha->flags.online = 0; |
3946 | |
3947 | /* free DMA memory */ |
3948 | if (ha->exlogin_buf) |
3949 | qla2x00_free_exlogin_buffer(ha); |
3950 | |
3951 | /* free DMA memory */ |
3952 | if (ha->exchoffld_buf) |
3953 | qla2x00_free_exchoffld_buffer(ha); |
3954 | |
3955 | qla2x00_destroy_deferred_work(ha); |
3956 | |
3957 | qlt_remove_target(ha, base_vha); |
3958 | |
3959 | qla2x00_free_sysfs_attr(base_vha, true); |
3960 | |
3961 | fc_remove_host(base_vha->host); |
3962 | |
3963 | scsi_remove_host(base_vha->host); |
3964 | |
3965 | qla2x00_free_device(base_vha); |
3966 | |
3967 | qla2x00_clear_drv_active(ha); |
3968 | |
3969 | scsi_host_put(t: base_vha->host); |
3970 | |
3971 | qla2x00_unmap_iobases(ha); |
3972 | |
3973 | pci_release_selected_regions(ha->pdev, ha->bars); |
3974 | kfree(objp: ha); |
3975 | |
3976 | pci_disable_device(dev: pdev); |
3977 | } |
3978 | |
3979 | static inline void |
3980 | qla24xx_free_purex_list(struct purex_list *list) |
3981 | { |
3982 | struct purex_item *item, *next; |
3983 | ulong flags; |
3984 | |
3985 | spin_lock_irqsave(&list->lock, flags); |
3986 | list_for_each_entry_safe(item, next, &list->head, list) { |
3987 | list_del(entry: &item->list); |
3988 | if (item == &item->vha->default_item) |
3989 | continue; |
3990 | kfree(objp: item); |
3991 | } |
3992 | spin_unlock_irqrestore(lock: &list->lock, flags); |
3993 | } |
3994 | |
3995 | static void |
3996 | qla2x00_free_device(scsi_qla_host_t *vha) |
3997 | { |
3998 | struct qla_hw_data *ha = vha->hw; |
3999 | |
4000 | qla2x00_abort_all_cmds(vha, res: DID_NO_CONNECT << 16); |
4001 | |
4002 | /* Disable timer */ |
4003 | if (vha->timer_active) |
4004 | qla2x00_stop_timer(vha); |
4005 | |
4006 | qla25xx_delete_queues(vha); |
4007 | vha->flags.online = 0; |
4008 | |
4009 | /* turn-off interrupts on the card */ |
4010 | if (ha->interrupts_on) { |
4011 | vha->flags.init_done = 0; |
4012 | ha->isp_ops->disable_intrs(ha); |
4013 | } |
4014 | |
4015 | qla2x00_free_fcports(vha); |
4016 | |
4017 | qla2x00_free_irqs(vha); |
4018 | |
4019 | /* Flush the work queue and remove it */ |
4020 | if (ha->wq) { |
4021 | destroy_workqueue(wq: ha->wq); |
4022 | ha->wq = NULL; |
4023 | } |
4024 | |
4025 | |
4026 | qla24xx_free_purex_list(list: &vha->purex_list); |
4027 | |
4028 | qla2x00_mem_free(ha); |
4029 | |
4030 | qla82xx_md_free(vha); |
4031 | |
4032 | qla_edif_sadb_release_free_pool(ha); |
4033 | qla_edif_sadb_release(ha); |
4034 | |
4035 | qla2x00_free_queues(ha); |
4036 | } |
4037 | |
4038 | void qla2x00_free_fcports(struct scsi_qla_host *vha) |
4039 | { |
4040 | fc_port_t *fcport, *tfcport; |
4041 | |
4042 | list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) |
4043 | qla2x00_free_fcport(fcport); |
4044 | } |
4045 | |
4046 | static inline void |
4047 | qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport) |
4048 | { |
4049 | int now; |
4050 | |
4051 | if (!fcport->rport) |
4052 | return; |
4053 | |
4054 | if (fcport->rport) { |
4055 | ql_dbg(ql_dbg_disc, vha: fcport->vha, 0x2109, |
4056 | fmt: "%s %8phN. rport %p roles %x\n" , |
4057 | __func__, fcport->port_name, fcport->rport, |
4058 | fcport->rport->roles); |
4059 | fc_remote_port_delete(rport: fcport->rport); |
4060 | } |
4061 | qlt_do_generation_tick(vha, &now); |
4062 | } |
4063 | |
4064 | /* |
4065 | * qla2x00_mark_device_lost Updates fcport state when device goes offline. |
4066 | * |
4067 | * Input: ha = adapter block pointer. fcport = port structure pointer. |
4068 | * |
4069 | * Return: None. |
4070 | * |
4071 | * Context: |
4072 | */ |
4073 | void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, |
4074 | int do_login) |
4075 | { |
4076 | if (IS_QLAFX00(vha->hw)) { |
4077 | qla2x00_set_fcport_state(fcport, state: FCS_DEVICE_LOST); |
4078 | qla2x00_schedule_rport_del(vha, fcport); |
4079 | return; |
4080 | } |
4081 | |
4082 | if (atomic_read(v: &fcport->state) == FCS_ONLINE && |
4083 | vha->vp_idx == fcport->vha->vp_idx) { |
4084 | qla2x00_set_fcport_state(fcport, state: FCS_DEVICE_LOST); |
4085 | qla2x00_schedule_rport_del(vha, fcport); |
4086 | } |
4087 | |
4088 | /* |
4089 | * We may need to retry the login, so don't change the state of the |
4090 | * port but do the retries. |
4091 | */ |
4092 | if (atomic_read(v: &fcport->state) != FCS_DEVICE_DEAD) |
4093 | qla2x00_set_fcport_state(fcport, state: FCS_DEVICE_LOST); |
4094 | |
4095 | if (!do_login) |
4096 | return; |
4097 | |
4098 | set_bit(RELOGIN_NEEDED, addr: &vha->dpc_flags); |
4099 | } |
4100 | |
4101 | void |
4102 | qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha) |
4103 | { |
4104 | fc_port_t *fcport; |
4105 | |
4106 | ql_dbg(ql_dbg_disc, vha, 0x20f1, |
4107 | fmt: "Mark all dev lost\n" ); |
4108 | |
4109 | list_for_each_entry(fcport, &vha->vp_fcports, list) { |
4110 | if (ql2xfc2target && |
4111 | fcport->loop_id != FC_NO_LOOP_ID && |
4112 | (fcport->flags & FCF_FCP2_DEVICE) && |
4113 | fcport->port_type == FCT_TARGET && |
4114 | !qla2x00_reset_active(vha)) { |
4115 | ql_dbg(ql_dbg_disc, vha, 0x211a, |
4116 | fmt: "Delaying session delete for FCP2 flags 0x%x port_type = 0x%x port_id=%06x %phC" , |
4117 | fcport->flags, fcport->port_type, |
4118 | fcport->d_id.b24, fcport->port_name); |
4119 | continue; |
4120 | } |
4121 | fcport->scan_state = 0; |
4122 | qlt_schedule_sess_for_deletion(fcport); |
4123 | } |
4124 | } |
4125 | |
4126 | static void qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha) |
4127 | { |
4128 | int i; |
4129 | |
4130 | if (IS_FWI2_CAPABLE(ha)) |
4131 | return; |
4132 | |
4133 | for (i = 0; i < SNS_FIRST_LOOP_ID; i++) |
4134 | set_bit(nr: i, addr: ha->loop_id_map); |
4135 | set_bit(MANAGEMENT_SERVER, addr: ha->loop_id_map); |
4136 | set_bit(BROADCAST, addr: ha->loop_id_map); |
4137 | } |
4138 | |
4139 | /* |
4140 | * qla2x00_mem_alloc |
4141 | * Allocates adapter memory. |
4142 | * |
4143 | * Returns: |
4144 | * 0 = success. |
4145 | * !0 = failure. |
4146 | */ |
4147 | static int |
4148 | qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, |
4149 | struct req_que **req, struct rsp_que **rsp) |
4150 | { |
4151 | char name[16]; |
4152 | int rc; |
4153 | |
4154 | if (QLA_TGT_MODE_ENABLED() || EDIF_CAP(ha)) { |
4155 | ha->vp_map = kcalloc(MAX_MULTI_ID_FABRIC, size: sizeof(struct qla_vp_map), GFP_KERNEL); |
4156 | if (!ha->vp_map) |
4157 | goto fail; |
4158 | } |
4159 | |
4160 | ha->init_cb = dma_alloc_coherent(dev: &ha->pdev->dev, size: ha->init_cb_size, |
4161 | dma_handle: &ha->init_cb_dma, GFP_KERNEL); |
4162 | if (!ha->init_cb) |
4163 | goto fail_free_vp_map; |
4164 | |
4165 | rc = btree_init32(head: &ha->host_map); |
4166 | if (rc) |
4167 | goto fail_free_init_cb; |
4168 | |
4169 | if (qlt_mem_alloc(ha) < 0) |
4170 | goto fail_free_btree; |
4171 | |
4172 | ha->gid_list = dma_alloc_coherent(dev: &ha->pdev->dev, |
4173 | size: qla2x00_gid_list_size(ha), dma_handle: &ha->gid_list_dma, GFP_KERNEL); |
4174 | if (!ha->gid_list) |
4175 | goto fail_free_tgt_mem; |
4176 | |
4177 | ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, kc: srb_cachep); |
4178 | if (!ha->srb_mempool) |
4179 | goto fail_free_gid_list; |
4180 | |
4181 | if (IS_P3P_TYPE(ha) || IS_QLA27XX(ha) || (ql2xsecenable && IS_QLA28XX(ha))) { |
4182 | /* Allocate cache for CT6 Ctx. */ |
4183 | if (!ctx_cachep) { |
4184 | ctx_cachep = kmem_cache_create(name: "qla2xxx_ctx" , |
4185 | size: sizeof(struct ct6_dsd), align: 0, |
4186 | SLAB_HWCACHE_ALIGN, NULL); |
4187 | if (!ctx_cachep) |
4188 | goto fail_free_srb_mempool; |
4189 | } |
4190 | ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ, |
4191 | kc: ctx_cachep); |
4192 | if (!ha->ctx_mempool) |
4193 | goto fail_free_srb_mempool; |
4194 | ql_dbg_pci(ql_dbg_init, pdev: ha->pdev, 0x0021, |
4195 | fmt: "ctx_cachep=%p ctx_mempool=%p.\n" , |
4196 | ctx_cachep, ha->ctx_mempool); |
4197 | } |
4198 | |
4199 | /* Get memory for cached NVRAM */ |
4200 | ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL); |
4201 | if (!ha->nvram) |
4202 | goto fail_free_ctx_mempool; |
4203 | |
4204 | snprintf(buf: name, size: sizeof(name), fmt: "%s_%d" , QLA2XXX_DRIVER_NAME, |
4205 | ha->pdev->device); |
4206 | ha->s_dma_pool = dma_pool_create(name, dev: &ha->pdev->dev, |
4207 | DMA_POOL_SIZE, align: 8, allocation: 0); |
4208 | if (!ha->s_dma_pool) |
4209 | goto fail_free_nvram; |
4210 | |
4211 | ql_dbg_pci(ql_dbg_init, pdev: ha->pdev, 0x0022, |
4212 | fmt: "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n" , |
4213 | ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool); |
4214 | |
4215 | if (IS_P3P_TYPE(ha) || ql2xenabledif || (IS_QLA28XX(ha) && ql2xsecenable)) { |
4216 | ha->dl_dma_pool = dma_pool_create(name, dev: &ha->pdev->dev, |
4217 | DSD_LIST_DMA_POOL_SIZE, align: 8, allocation: 0); |
4218 | if (!ha->dl_dma_pool) { |
4219 | ql_log_pci(ql_log_fatal, pdev: ha->pdev, 0x0023, |
4220 | fmt: "Failed to allocate memory for dl_dma_pool.\n" ); |
4221 | goto fail_s_dma_pool; |
4222 | } |
4223 | |
4224 | ha->fcp_cmnd_dma_pool = dma_pool_create(name, dev: &ha->pdev->dev, |
4225 | FCP_CMND_DMA_POOL_SIZE, align: 8, allocation: 0); |
4226 | if (!ha->fcp_cmnd_dma_pool) { |
4227 | ql_log_pci(ql_log_fatal, pdev: ha->pdev, 0x0024, |
4228 | fmt: "Failed to allocate memory for fcp_cmnd_dma_pool.\n" ); |
4229 | goto fail_dl_dma_pool; |
4230 | } |
4231 | |
4232 | if (ql2xenabledif) { |
4233 | u64 bufsize = DIF_BUNDLING_DMA_POOL_SIZE; |
4234 | struct dsd_dma *dsd, *nxt; |
4235 | uint i; |
4236 | /* Creata a DMA pool of buffers for DIF bundling */ |
4237 | ha->dif_bundl_pool = dma_pool_create(name, |
4238 | dev: &ha->pdev->dev, DIF_BUNDLING_DMA_POOL_SIZE, align: 8, allocation: 0); |
4239 | if (!ha->dif_bundl_pool) { |
4240 | ql_dbg_pci(ql_dbg_init, pdev: ha->pdev, 0x0024, |
4241 | fmt: "%s: failed create dif_bundl_pool\n" , |
4242 | __func__); |
4243 | goto fail_dif_bundl_dma_pool; |
4244 | } |
4245 | |
4246 | INIT_LIST_HEAD(list: &ha->pool.good.head); |
4247 | INIT_LIST_HEAD(list: &ha->pool.unusable.head); |
4248 | ha->pool.good.count = 0; |
4249 | ha->pool.unusable.count = 0; |
4250 | for (i = 0; i < 128; i++) { |
4251 | dsd = kzalloc(size: sizeof(*dsd), GFP_ATOMIC); |
4252 | if (!dsd) { |
4253 | ql_dbg_pci(ql_dbg_init, pdev: ha->pdev, |
4254 | 0xe0ee, fmt: "%s: failed alloc dsd\n" , |
4255 | __func__); |
4256 | return -ENOMEM; |
4257 | } |
4258 | ha->dif_bundle_kallocs++; |
4259 | |
4260 | dsd->dsd_addr = dma_pool_alloc( |
4261 | pool: ha->dif_bundl_pool, GFP_ATOMIC, |
4262 | handle: &dsd->dsd_list_dma); |
4263 | if (!dsd->dsd_addr) { |
4264 | ql_dbg_pci(ql_dbg_init, pdev: ha->pdev, |
4265 | 0xe0ee, |
4266 | fmt: "%s: failed alloc ->dsd_addr\n" , |
4267 | __func__); |
4268 | kfree(objp: dsd); |
4269 | ha->dif_bundle_kallocs--; |
4270 | continue; |
4271 | } |
4272 | ha->dif_bundle_dma_allocs++; |
4273 | |
4274 | /* |
4275 | * if DMA buffer crosses 4G boundary, |
4276 | * put it on bad list |
4277 | */ |
4278 | if (MSD(dsd->dsd_list_dma) ^ |
4279 | MSD(dsd->dsd_list_dma + bufsize)) { |
4280 | list_add_tail(new: &dsd->list, |
4281 | head: &ha->pool.unusable.head); |
4282 | ha->pool.unusable.count++; |
4283 | } else { |
4284 | list_add_tail(new: &dsd->list, |
4285 | head: &ha->pool.good.head); |
4286 | ha->pool.good.count++; |
4287 | } |
4288 | } |
4289 | |
4290 | /* return the good ones back to the pool */ |
4291 | list_for_each_entry_safe(dsd, nxt, |
4292 | &ha->pool.good.head, list) { |
4293 | list_del(entry: &dsd->list); |
4294 | dma_pool_free(pool: ha->dif_bundl_pool, |
4295 | vaddr: dsd->dsd_addr, addr: dsd->dsd_list_dma); |
4296 | ha->dif_bundle_dma_allocs--; |
4297 | kfree(objp: dsd); |
4298 | ha->dif_bundle_kallocs--; |
4299 | } |
4300 | |
4301 | ql_dbg_pci(ql_dbg_init, pdev: ha->pdev, 0x0024, |
4302 | fmt: "%s: dif dma pool (good=%u unusable=%u)\n" , |
4303 | __func__, ha->pool.good.count, |
4304 | ha->pool.unusable.count); |
4305 | } |
4306 | |
4307 | ql_dbg_pci(ql_dbg_init, pdev: ha->pdev, 0x0025, |
4308 | fmt: "dl_dma_pool=%p fcp_cmnd_dma_pool=%p dif_bundl_pool=%p.\n" , |
4309 | ha->dl_dma_pool, ha->fcp_cmnd_dma_pool, |
4310 | ha->dif_bundl_pool); |
4311 | } |
4312 | |
4313 | /* Allocate memory for SNS commands */ |
4314 | if (IS_QLA2100(ha) || IS_QLA2200(ha)) { |
4315 | /* Get consistent memory allocated for SNS commands */ |
4316 | ha->sns_cmd = dma_alloc_coherent(dev: &ha->pdev->dev, |
4317 | size: sizeof(struct sns_cmd_pkt), dma_handle: &ha->sns_cmd_dma, GFP_KERNEL); |
4318 | if (!ha->sns_cmd) |
4319 | goto fail_dma_pool; |
4320 | ql_dbg_pci(ql_dbg_init, pdev: ha->pdev, 0x0026, |
4321 | fmt: "sns_cmd: %p.\n" , ha->sns_cmd); |
4322 | } else { |
4323 | /* Get consistent memory allocated for MS IOCB */ |
4324 | ha->ms_iocb = dma_pool_alloc(pool: ha->s_dma_pool, GFP_KERNEL, |
4325 | handle: &ha->ms_iocb_dma); |
4326 | if (!ha->ms_iocb) |
4327 | goto fail_dma_pool; |
4328 | /* Get consistent memory allocated for CT SNS commands */ |
4329 | ha->ct_sns = dma_alloc_coherent(dev: &ha->pdev->dev, |
4330 | size: sizeof(struct ct_sns_pkt), dma_handle: &ha->ct_sns_dma, GFP_KERNEL); |
4331 | if (!ha->ct_sns) |
4332 | goto fail_free_ms_iocb; |
4333 | ql_dbg_pci(ql_dbg_init, pdev: ha->pdev, 0x0027, |
4334 | fmt: "ms_iocb=%p ct_sns=%p.\n" , |
4335 | ha->ms_iocb, ha->ct_sns); |
4336 | } |
4337 | |
4338 | /* Allocate memory for request ring */ |
4339 | *req = kzalloc(size: sizeof(struct req_que), GFP_KERNEL); |
4340 | if (!*req) { |
4341 | ql_log_pci(ql_log_fatal, pdev: ha->pdev, 0x0028, |
4342 | fmt: "Failed to allocate memory for req.\n" ); |
4343 | goto fail_req; |
4344 | } |
4345 | (*req)->length = req_len; |
4346 | (*req)->ring = dma_alloc_coherent(dev: &ha->pdev->dev, |
4347 | size: ((*req)->length + 1) * sizeof(request_t), |
4348 | dma_handle: &(*req)->dma, GFP_KERNEL); |
4349 | if (!(*req)->ring) { |
4350 | ql_log_pci(ql_log_fatal, pdev: ha->pdev, 0x0029, |
4351 | fmt: "Failed to allocate memory for req_ring.\n" ); |
4352 | goto fail_req_ring; |
4353 | } |
4354 | /* Allocate memory for response ring */ |
4355 | *rsp = kzalloc(size: sizeof(struct rsp_que), GFP_KERNEL); |
4356 | if (!*rsp) { |
4357 | ql_log_pci(ql_log_fatal, pdev: ha->pdev, 0x002a, |
4358 | fmt: "Failed to allocate memory for rsp.\n" ); |
4359 | goto fail_rsp; |
4360 | } |
4361 | (*rsp)->hw = ha; |
4362 | (*rsp)->length = rsp_len; |
4363 | (*rsp)->ring = dma_alloc_coherent(dev: &ha->pdev->dev, |
4364 | size: ((*rsp)->length + 1) * sizeof(response_t), |
4365 | dma_handle: &(*rsp)->dma, GFP_KERNEL); |
4366 | if (!(*rsp)->ring) { |
4367 | ql_log_pci(ql_log_fatal, pdev: ha->pdev, 0x002b, |
4368 | fmt: "Failed to allocate memory for rsp_ring.\n" ); |
4369 | goto fail_rsp_ring; |
4370 | } |
4371 | (*req)->rsp = *rsp; |
4372 | (*rsp)->req = *req; |
4373 | ql_dbg_pci(ql_dbg_init, pdev: ha->pdev, 0x002c, |
4374 | fmt: "req=%p req->length=%d req->ring=%p rsp=%p " |
4375 | "rsp->length=%d rsp->ring=%p.\n" , |
4376 | *req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length, |
4377 | (*rsp)->ring); |
4378 | /* Allocate memory for NVRAM data for vports */ |
4379 | if (ha->nvram_npiv_size) { |
4380 | ha->npiv_info = kcalloc(n: ha->nvram_npiv_size, |
4381 | size: sizeof(struct qla_npiv_entry), |
4382 | GFP_KERNEL); |
4383 | if (!ha->npiv_info) { |
4384 | ql_log_pci(ql_log_fatal, pdev: ha->pdev, 0x002d, |
4385 | fmt: "Failed to allocate memory for npiv_info.\n" ); |
4386 | goto fail_npiv_info; |
4387 | } |
4388 | } else |
4389 | ha->npiv_info = NULL; |
4390 | |
4391 | /* Get consistent memory allocated for EX-INIT-CB. */ |
4392 | if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || |
4393 | IS_QLA28XX(ha)) { |
4394 | ha->ex_init_cb = dma_pool_alloc(pool: ha->s_dma_pool, GFP_KERNEL, |
4395 | handle: &ha->ex_init_cb_dma); |
4396 | if (!ha->ex_init_cb) |
4397 | goto fail_ex_init_cb; |
4398 | ql_dbg_pci(ql_dbg_init, pdev: ha->pdev, 0x002e, |
4399 | fmt: "ex_init_cb=%p.\n" , ha->ex_init_cb); |
4400 | } |
4401 | |
4402 | /* Get consistent memory allocated for Special Features-CB. */ |
4403 | if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { |
4404 | ha->sf_init_cb = dma_pool_zalloc(pool: ha->s_dma_pool, GFP_KERNEL, |
4405 | handle: &ha->sf_init_cb_dma); |
4406 | if (!ha->sf_init_cb) |
4407 | goto fail_sf_init_cb; |
4408 | ql_dbg_pci(ql_dbg_init, pdev: ha->pdev, 0x0199, |
4409 | fmt: "sf_init_cb=%p.\n" , ha->sf_init_cb); |
4410 | } |
4411 | |
4412 | |
4413 | /* Get consistent memory allocated for Async Port-Database. */ |
4414 | if (!IS_FWI2_CAPABLE(ha)) { |
4415 | ha->async_pd = dma_pool_alloc(pool: ha->s_dma_pool, GFP_KERNEL, |
4416 | handle: &ha->async_pd_dma); |
4417 | if (!ha->async_pd) |
4418 | goto fail_async_pd; |
4419 | ql_dbg_pci(ql_dbg_init, pdev: ha->pdev, 0x002f, |
4420 | fmt: "async_pd=%p.\n" , ha->async_pd); |
4421 | } |
4422 | |
4423 | INIT_LIST_HEAD(list: &ha->vp_list); |
4424 | |
4425 | /* Allocate memory for our loop_id bitmap */ |
4426 | ha->loop_id_map = kcalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE), |
4427 | size: sizeof(long), |
4428 | GFP_KERNEL); |
4429 | if (!ha->loop_id_map) |
4430 | goto fail_loop_id_map; |
4431 | else { |
4432 | qla2x00_set_reserved_loop_ids(ha); |
4433 | ql_dbg_pci(ql_dbg_init, pdev: ha->pdev, 0x0123, |
4434 | fmt: "loop_id_map=%p.\n" , ha->loop_id_map); |
4435 | } |
4436 | |
4437 | ha->sfp_data = dma_alloc_coherent(dev: &ha->pdev->dev, |
4438 | SFP_DEV_SIZE, dma_handle: &ha->sfp_data_dma, GFP_KERNEL); |
4439 | if (!ha->sfp_data) { |
4440 | ql_dbg_pci(ql_dbg_init, pdev: ha->pdev, 0x011b, |
4441 | fmt: "Unable to allocate memory for SFP read-data.\n" ); |
4442 | goto fail_sfp_data; |
4443 | } |
4444 | |
4445 | ha->flt = dma_alloc_coherent(dev: &ha->pdev->dev, |
4446 | size: sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE, dma_handle: &ha->flt_dma, |
4447 | GFP_KERNEL); |
4448 | if (!ha->flt) { |
4449 | ql_dbg_pci(ql_dbg_init, pdev: ha->pdev, 0x011b, |
4450 | fmt: "Unable to allocate memory for FLT.\n" ); |
4451 | goto fail_flt_buffer; |
4452 | } |
4453 | |
4454 | /* allocate the purex dma pool */ |
4455 | ha->purex_dma_pool = dma_pool_create(name, dev: &ha->pdev->dev, |
4456 | ELS_MAX_PAYLOAD, align: 8, allocation: 0); |
4457 | |
4458 | if (!ha->purex_dma_pool) { |
4459 | ql_dbg_pci(ql_dbg_init, pdev: ha->pdev, 0x011b, |
4460 | fmt: "Unable to allocate purex_dma_pool.\n" ); |
4461 | goto fail_flt; |
4462 | } |
4463 | |
4464 | ha->elsrej.size = sizeof(struct fc_els_ls_rjt) + 16; |
4465 | ha->elsrej.c = dma_alloc_coherent(dev: &ha->pdev->dev, |
4466 | size: ha->elsrej.size, |
4467 | dma_handle: &ha->elsrej.cdma, |
4468 | GFP_KERNEL); |
4469 | if (!ha->elsrej.c) { |
4470 | ql_dbg_pci(ql_dbg_init, pdev: ha->pdev, 0xffff, |
4471 | fmt: "Alloc failed for els reject cmd.\n" ); |
4472 | goto fail_elsrej; |
4473 | } |
4474 | ha->elsrej.c->er_cmd = ELS_LS_RJT; |
4475 | ha->elsrej.c->er_reason = ELS_RJT_LOGIC; |
4476 | ha->elsrej.c->er_explan = ELS_EXPL_UNAB_DATA; |
4477 | |
4478 | ha->lsrjt.size = sizeof(struct fcnvme_ls_rjt); |
4479 | ha->lsrjt.c = dma_alloc_coherent(dev: &ha->pdev->dev, size: ha->lsrjt.size, |
4480 | dma_handle: &ha->lsrjt.cdma, GFP_KERNEL); |
4481 | if (!ha->lsrjt.c) { |
4482 | ql_dbg_pci(ql_dbg_init, pdev: ha->pdev, 0xffff, |
4483 | fmt: "Alloc failed for nvme fc reject cmd.\n" ); |
4484 | goto fail_lsrjt; |
4485 | } |
4486 | |
4487 | return 0; |
4488 | |
4489 | fail_lsrjt: |
4490 | dma_free_coherent(dev: &ha->pdev->dev, size: ha->elsrej.size, |
4491 | cpu_addr: ha->elsrej.c, dma_handle: ha->elsrej.cdma); |
4492 | fail_elsrej: |
4493 | dma_pool_destroy(pool: ha->purex_dma_pool); |
4494 | fail_flt: |
4495 | dma_free_coherent(dev: &ha->pdev->dev, SFP_DEV_SIZE, |
4496 | cpu_addr: ha->flt, dma_handle: ha->flt_dma); |
4497 | |
4498 | fail_flt_buffer: |
4499 | dma_free_coherent(dev: &ha->pdev->dev, SFP_DEV_SIZE, |
4500 | cpu_addr: ha->sfp_data, dma_handle: ha->sfp_data_dma); |
4501 | fail_sfp_data: |
4502 | kfree(objp: ha->loop_id_map); |
4503 | fail_loop_id_map: |
4504 | dma_pool_free(pool: ha->s_dma_pool, vaddr: ha->async_pd, addr: ha->async_pd_dma); |
4505 | fail_async_pd: |
4506 | dma_pool_free(pool: ha->s_dma_pool, vaddr: ha->sf_init_cb, addr: ha->sf_init_cb_dma); |
4507 | fail_sf_init_cb: |
4508 | dma_pool_free(pool: ha->s_dma_pool, vaddr: ha->ex_init_cb, addr: ha->ex_init_cb_dma); |
4509 | fail_ex_init_cb: |
4510 | kfree(objp: ha->npiv_info); |
4511 | fail_npiv_info: |
4512 | dma_free_coherent(dev: &ha->pdev->dev, size: ((*rsp)->length + 1) * |
4513 | sizeof(response_t), cpu_addr: (*rsp)->ring, dma_handle: (*rsp)->dma); |
4514 | (*rsp)->ring = NULL; |
4515 | (*rsp)->dma = 0; |
4516 | fail_rsp_ring: |
4517 | kfree(objp: *rsp); |
4518 | *rsp = NULL; |
4519 | fail_rsp: |
4520 | dma_free_coherent(dev: &ha->pdev->dev, size: ((*req)->length + 1) * |
4521 | sizeof(request_t), cpu_addr: (*req)->ring, dma_handle: (*req)->dma); |
4522 | (*req)->ring = NULL; |
4523 | (*req)->dma = 0; |
4524 | fail_req_ring: |
4525 | kfree(objp: *req); |
4526 | *req = NULL; |
4527 | fail_req: |
4528 | dma_free_coherent(dev: &ha->pdev->dev, size: sizeof(struct ct_sns_pkt), |
4529 | cpu_addr: ha->ct_sns, dma_handle: ha->ct_sns_dma); |
4530 | ha->ct_sns = NULL; |
4531 | ha->ct_sns_dma = 0; |
4532 | fail_free_ms_iocb: |
4533 | dma_pool_free(pool: ha->s_dma_pool, vaddr: ha->ms_iocb, addr: ha->ms_iocb_dma); |
4534 | ha->ms_iocb = NULL; |
4535 | ha->ms_iocb_dma = 0; |
4536 | |
4537 | if (ha->sns_cmd) |
4538 | dma_free_coherent(dev: &ha->pdev->dev, size: sizeof(struct sns_cmd_pkt), |
4539 | cpu_addr: ha->sns_cmd, dma_handle: ha->sns_cmd_dma); |
4540 | fail_dma_pool: |
4541 | if (ql2xenabledif) { |
4542 | struct dsd_dma *dsd, *nxt; |
4543 | |
4544 | list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head, |
4545 | list) { |
4546 | list_del(entry: &dsd->list); |
4547 | dma_pool_free(pool: ha->dif_bundl_pool, vaddr: dsd->dsd_addr, |
4548 | addr: dsd->dsd_list_dma); |
4549 | ha->dif_bundle_dma_allocs--; |
4550 | kfree(objp: dsd); |
4551 | ha->dif_bundle_kallocs--; |
4552 | ha->pool.unusable.count--; |
4553 | } |
4554 | dma_pool_destroy(pool: ha->dif_bundl_pool); |
4555 | ha->dif_bundl_pool = NULL; |
4556 | } |
4557 | |
4558 | fail_dif_bundl_dma_pool: |
4559 | if (IS_QLA82XX(ha) || ql2xenabledif) { |
4560 | dma_pool_destroy(pool: ha->fcp_cmnd_dma_pool); |
4561 | ha->fcp_cmnd_dma_pool = NULL; |
4562 | } |
4563 | fail_dl_dma_pool: |
4564 | if (IS_QLA82XX(ha) || ql2xenabledif) { |
4565 | dma_pool_destroy(pool: ha->dl_dma_pool); |
4566 | ha->dl_dma_pool = NULL; |
4567 | } |
4568 | fail_s_dma_pool: |
4569 | dma_pool_destroy(pool: ha->s_dma_pool); |
4570 | ha->s_dma_pool = NULL; |
4571 | fail_free_nvram: |
4572 | kfree(objp: ha->nvram); |
4573 | ha->nvram = NULL; |
4574 | fail_free_ctx_mempool: |
4575 | mempool_destroy(pool: ha->ctx_mempool); |
4576 | ha->ctx_mempool = NULL; |
4577 | fail_free_srb_mempool: |
4578 | mempool_destroy(pool: ha->srb_mempool); |
4579 | ha->srb_mempool = NULL; |
4580 | fail_free_gid_list: |
4581 | dma_free_coherent(dev: &ha->pdev->dev, size: qla2x00_gid_list_size(ha), |
4582 | cpu_addr: ha->gid_list, |
4583 | dma_handle: ha->gid_list_dma); |
4584 | ha->gid_list = NULL; |
4585 | ha->gid_list_dma = 0; |
4586 | fail_free_tgt_mem: |
4587 | qlt_mem_free(ha); |
4588 | fail_free_btree: |
4589 | btree_destroy32(head: &ha->host_map); |
4590 | fail_free_init_cb: |
4591 | dma_free_coherent(dev: &ha->pdev->dev, size: ha->init_cb_size, cpu_addr: ha->init_cb, |
4592 | dma_handle: ha->init_cb_dma); |
4593 | ha->init_cb = NULL; |
4594 | ha->init_cb_dma = 0; |
4595 | fail_free_vp_map: |
4596 | kfree(objp: ha->vp_map); |
4597 | fail: |
4598 | ql_log(ql_log_fatal, NULL, 0x0030, |
4599 | fmt: "Memory allocation failure.\n" ); |
4600 | return -ENOMEM; |
4601 | } |
4602 | |
4603 | int |
4604 | qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha) |
4605 | { |
4606 | int rval; |
4607 | uint16_t size, max_cnt; |
4608 | uint32_t temp; |
4609 | struct qla_hw_data *ha = vha->hw; |
4610 | |
4611 | /* Return if we don't need to alloacate any extended logins */ |
4612 | if (ql2xexlogins <= MAX_FIBRE_DEVICES_2400) |
4613 | return QLA_SUCCESS; |
4614 | |
4615 | if (!IS_EXLOGIN_OFFLD_CAPABLE(ha)) |
4616 | return QLA_SUCCESS; |
4617 | |
4618 | ql_log(ql_log_info, vha, 0xd021, fmt: "EXLOGIN count: %d.\n" , ql2xexlogins); |
4619 | max_cnt = 0; |
4620 | rval = qla_get_exlogin_status(vha, &size, &max_cnt); |
4621 | if (rval != QLA_SUCCESS) { |
4622 | ql_log_pci(ql_log_fatal, pdev: ha->pdev, 0xd029, |
4623 | fmt: "Failed to get exlogin status.\n" ); |
4624 | return rval; |
4625 | } |
4626 | |
4627 | temp = (ql2xexlogins > max_cnt) ? max_cnt : ql2xexlogins; |
4628 | temp *= size; |
4629 | |
4630 | if (temp != ha->exlogin_size) { |
4631 | qla2x00_free_exlogin_buffer(ha); |
4632 | ha->exlogin_size = temp; |
4633 | |
4634 | ql_log(ql_log_info, vha, 0xd024, |
4635 | fmt: "EXLOGIN: max_logins=%d, portdb=0x%x, total=%d.\n" , |
4636 | max_cnt, size, temp); |
4637 | |
4638 | ql_log(ql_log_info, vha, 0xd025, |
4639 | fmt: "EXLOGIN: requested size=0x%x\n" , ha->exlogin_size); |
4640 | |
4641 | /* Get consistent memory for extended logins */ |
4642 | ha->exlogin_buf = dma_alloc_coherent(dev: &ha->pdev->dev, |
4643 | size: ha->exlogin_size, dma_handle: &ha->exlogin_buf_dma, GFP_KERNEL); |
4644 | if (!ha->exlogin_buf) { |
4645 | ql_log_pci(ql_log_fatal, pdev: ha->pdev, 0xd02a, |
4646 | fmt: "Failed to allocate memory for exlogin_buf_dma.\n" ); |
4647 | return -ENOMEM; |
4648 | } |
4649 | } |
4650 | |
4651 | /* Now configure the dma buffer */ |
4652 | rval = qla_set_exlogin_mem_cfg(vha, phys_addr: ha->exlogin_buf_dma); |
4653 | if (rval) { |
4654 | ql_log(ql_log_fatal, vha, 0xd033, |
4655 | fmt: "Setup extended login buffer ****FAILED****.\n" ); |
4656 | qla2x00_free_exlogin_buffer(ha); |
4657 | } |
4658 | |
4659 | return rval; |
4660 | } |
4661 | |
4662 | /* |
4663 | * qla2x00_free_exlogin_buffer |
4664 | * |
4665 | * Input: |
4666 | * ha = adapter block pointer |
4667 | */ |
4668 | void |
4669 | qla2x00_free_exlogin_buffer(struct qla_hw_data *ha) |
4670 | { |
4671 | if (ha->exlogin_buf) { |
4672 | dma_free_coherent(dev: &ha->pdev->dev, size: ha->exlogin_size, |
4673 | cpu_addr: ha->exlogin_buf, dma_handle: ha->exlogin_buf_dma); |
4674 | ha->exlogin_buf = NULL; |
4675 | ha->exlogin_size = 0; |
4676 | } |
4677 | } |
4678 | |
4679 | static void |
4680 | qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt) |
4681 | { |
4682 | u32 temp; |
4683 | struct init_cb_81xx *icb = (struct init_cb_81xx *)&vha->hw->init_cb; |
4684 | *ret_cnt = FW_DEF_EXCHANGES_CNT; |
4685 | |
4686 | if (max_cnt > vha->hw->max_exchg) |
4687 | max_cnt = vha->hw->max_exchg; |
4688 | |
4689 | if (qla_ini_mode_enabled(ha: vha)) { |
4690 | if (vha->ql2xiniexchg > max_cnt) |
4691 | vha->ql2xiniexchg = max_cnt; |
4692 | |
4693 | if (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT) |
4694 | *ret_cnt = vha->ql2xiniexchg; |
4695 | |
4696 | } else if (qla_tgt_mode_enabled(ha: vha)) { |
4697 | if (vha->ql2xexchoffld > max_cnt) { |
4698 | vha->ql2xexchoffld = max_cnt; |
4699 | icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld); |
4700 | } |
4701 | |
4702 | if (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT) |
4703 | *ret_cnt = vha->ql2xexchoffld; |
4704 | } else if (qla_dual_mode_enabled(ha: vha)) { |
4705 | temp = vha->ql2xiniexchg + vha->ql2xexchoffld; |
4706 | if (temp > max_cnt) { |
4707 | vha->ql2xiniexchg -= (temp - max_cnt)/2; |
4708 | vha->ql2xexchoffld -= (((temp - max_cnt)/2) + 1); |
4709 | temp = max_cnt; |
4710 | icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld); |
4711 | } |
4712 | |
4713 | if (temp > FW_DEF_EXCHANGES_CNT) |
4714 | *ret_cnt = temp; |
4715 | } |
4716 | } |
4717 | |
4718 | int |
4719 | qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha) |
4720 | { |
4721 | int rval; |
4722 | u16 size, max_cnt; |
4723 | u32 actual_cnt, totsz; |
4724 | struct qla_hw_data *ha = vha->hw; |
4725 | |
4726 | if (!ha->flags.exchoffld_enabled) |
4727 | return QLA_SUCCESS; |
4728 | |
4729 | if (!IS_EXCHG_OFFLD_CAPABLE(ha)) |
4730 | return QLA_SUCCESS; |
4731 | |
4732 | max_cnt = 0; |
4733 | rval = qla_get_exchoffld_status(vha, &size, &max_cnt); |
4734 | if (rval != QLA_SUCCESS) { |
4735 | ql_log_pci(ql_log_fatal, pdev: ha->pdev, 0xd012, |
4736 | fmt: "Failed to get exlogin status.\n" ); |
4737 | return rval; |
4738 | } |
4739 | |
4740 | qla2x00_number_of_exch(vha, ret_cnt: &actual_cnt, max_cnt); |
4741 | ql_log(ql_log_info, vha, 0xd014, |
4742 | fmt: "Actual exchange offload count: %d.\n" , actual_cnt); |
4743 | |
4744 | totsz = actual_cnt * size; |
4745 | |
4746 | if (totsz != ha->exchoffld_size) { |
4747 | qla2x00_free_exchoffld_buffer(ha); |
4748 | if (actual_cnt <= FW_DEF_EXCHANGES_CNT) { |
4749 | ha->exchoffld_size = 0; |
4750 | ha->flags.exchoffld_enabled = 0; |
4751 | return QLA_SUCCESS; |
4752 | } |
4753 | |
4754 | ha->exchoffld_size = totsz; |
4755 | |
4756 | ql_log(ql_log_info, vha, 0xd016, |
4757 | fmt: "Exchange offload: max_count=%d, actual count=%d entry sz=0x%x, total sz=0x%x\n" , |
4758 | max_cnt, actual_cnt, size, totsz); |
4759 | |
4760 | ql_log(ql_log_info, vha, 0xd017, |
4761 | fmt: "Exchange Buffers requested size = 0x%x\n" , |
4762 | ha->exchoffld_size); |
4763 | |
4764 | /* Get consistent memory for extended logins */ |
4765 | ha->exchoffld_buf = dma_alloc_coherent(dev: &ha->pdev->dev, |
4766 | size: ha->exchoffld_size, dma_handle: &ha->exchoffld_buf_dma, GFP_KERNEL); |
4767 | if (!ha->exchoffld_buf) { |
4768 | ql_log_pci(ql_log_fatal, pdev: ha->pdev, 0xd013, |
4769 | fmt: "Failed to allocate memory for Exchange Offload.\n" ); |
4770 | |
4771 | if (ha->max_exchg > |
4772 | (FW_DEF_EXCHANGES_CNT + REDUCE_EXCHANGES_CNT)) { |
4773 | ha->max_exchg -= REDUCE_EXCHANGES_CNT; |
4774 | } else if (ha->max_exchg > |
4775 | (FW_DEF_EXCHANGES_CNT + 512)) { |
4776 | ha->max_exchg -= 512; |
4777 | } else { |
4778 | ha->flags.exchoffld_enabled = 0; |
4779 | ql_log_pci(ql_log_fatal, pdev: ha->pdev, 0xd013, |
4780 | fmt: "Disabling Exchange offload due to lack of memory\n" ); |
4781 | } |
4782 | ha->exchoffld_size = 0; |
4783 | |
4784 | return -ENOMEM; |
4785 | } |
4786 | } else if (!ha->exchoffld_buf || (actual_cnt <= FW_DEF_EXCHANGES_CNT)) { |
4787 | /* pathological case */ |
4788 | qla2x00_free_exchoffld_buffer(ha); |
4789 | ha->exchoffld_size = 0; |
4790 | ha->flags.exchoffld_enabled = 0; |
4791 | ql_log(ql_log_info, vha, 0xd016, |
4792 | fmt: "Exchange offload not enable: offld size=%d, actual count=%d entry sz=0x%x, total sz=0x%x.\n" , |
4793 | ha->exchoffld_size, actual_cnt, size, totsz); |
4794 | return 0; |
4795 | } |
4796 | |
4797 | /* Now configure the dma buffer */ |
4798 | rval = qla_set_exchoffld_mem_cfg(vha); |
4799 | if (rval) { |
4800 | ql_log(ql_log_fatal, vha, 0xd02e, |
4801 | fmt: "Setup exchange offload buffer ****FAILED****.\n" ); |
4802 | qla2x00_free_exchoffld_buffer(ha); |
4803 | } else { |
4804 | /* re-adjust number of target exchange */ |
4805 | struct init_cb_81xx *icb = (struct init_cb_81xx *)ha->init_cb; |
4806 | |
4807 | if (qla_ini_mode_enabled(ha: vha)) |
4808 | icb->exchange_count = 0; |
4809 | else |
4810 | icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld); |
4811 | } |
4812 | |
4813 | return rval; |
4814 | } |
4815 | |
4816 | /* |
4817 | * qla2x00_free_exchoffld_buffer |
4818 | * |
4819 | * Input: |
4820 | * ha = adapter block pointer |
4821 | */ |
4822 | void |
4823 | qla2x00_free_exchoffld_buffer(struct qla_hw_data *ha) |
4824 | { |
4825 | if (ha->exchoffld_buf) { |
4826 | dma_free_coherent(dev: &ha->pdev->dev, size: ha->exchoffld_size, |
4827 | cpu_addr: ha->exchoffld_buf, dma_handle: ha->exchoffld_buf_dma); |
4828 | ha->exchoffld_buf = NULL; |
4829 | ha->exchoffld_size = 0; |
4830 | } |
4831 | } |
4832 | |
4833 | /* |
4834 | * qla2x00_free_fw_dump |
4835 | * Frees fw dump stuff. |
4836 | * |
4837 | * Input: |
4838 | * ha = adapter block pointer |
4839 | */ |
4840 | static void |
4841 | qla2x00_free_fw_dump(struct qla_hw_data *ha) |
4842 | { |
4843 | struct fwdt *fwdt = ha->fwdt; |
4844 | uint j; |
4845 | |
4846 | if (ha->fce) |
4847 | dma_free_coherent(dev: &ha->pdev->dev, |
4848 | FCE_SIZE, cpu_addr: ha->fce, dma_handle: ha->fce_dma); |
4849 | |
4850 | if (ha->eft) |
4851 | dma_free_coherent(dev: &ha->pdev->dev, |
4852 | EFT_SIZE, cpu_addr: ha->eft, dma_handle: ha->eft_dma); |
4853 | |
4854 | vfree(addr: ha->fw_dump); |
4855 | |
4856 | ha->fce = NULL; |
4857 | ha->fce_dma = 0; |
4858 | ha->flags.fce_enabled = 0; |
4859 | ha->eft = NULL; |
4860 | ha->eft_dma = 0; |
4861 | ha->fw_dumped = false; |
4862 | ha->fw_dump_cap_flags = 0; |
4863 | ha->fw_dump_reading = 0; |
4864 | ha->fw_dump = NULL; |
4865 | ha->fw_dump_len = 0; |
4866 | |
4867 | for (j = 0; j < 2; j++, fwdt++) { |
4868 | vfree(addr: fwdt->template); |
4869 | fwdt->template = NULL; |
4870 | fwdt->length = 0; |
4871 | } |
4872 | } |
4873 | |
4874 | /* |
4875 | * qla2x00_mem_free |
4876 | * Frees all adapter allocated memory. |
4877 | * |
4878 | * Input: |
4879 | * ha = adapter block pointer. |
4880 | */ |
4881 | static void |
4882 | qla2x00_mem_free(struct qla_hw_data *ha) |
4883 | { |
4884 | qla2x00_free_fw_dump(ha); |
4885 | |
4886 | if (ha->mctp_dump) |
4887 | dma_free_coherent(dev: &ha->pdev->dev, MCTP_DUMP_SIZE, cpu_addr: ha->mctp_dump, |
4888 | dma_handle: ha->mctp_dump_dma); |
4889 | ha->mctp_dump = NULL; |
4890 | |
4891 | mempool_destroy(pool: ha->srb_mempool); |
4892 | ha->srb_mempool = NULL; |
4893 | |
4894 | if (ha->dcbx_tlv) |
4895 | dma_free_coherent(dev: &ha->pdev->dev, DCBX_TLV_DATA_SIZE, |
4896 | cpu_addr: ha->dcbx_tlv, dma_handle: ha->dcbx_tlv_dma); |
4897 | ha->dcbx_tlv = NULL; |
4898 | |
4899 | if (ha->xgmac_data) |
4900 | dma_free_coherent(dev: &ha->pdev->dev, XGMAC_DATA_SIZE, |
4901 | cpu_addr: ha->xgmac_data, dma_handle: ha->xgmac_data_dma); |
4902 | ha->xgmac_data = NULL; |
4903 | |
4904 | if (ha->sns_cmd) |
4905 | dma_free_coherent(dev: &ha->pdev->dev, size: sizeof(struct sns_cmd_pkt), |
4906 | cpu_addr: ha->sns_cmd, dma_handle: ha->sns_cmd_dma); |
4907 | ha->sns_cmd = NULL; |
4908 | ha->sns_cmd_dma = 0; |
4909 | |
4910 | if (ha->ct_sns) |
4911 | dma_free_coherent(dev: &ha->pdev->dev, size: sizeof(struct ct_sns_pkt), |
4912 | cpu_addr: ha->ct_sns, dma_handle: ha->ct_sns_dma); |
4913 | ha->ct_sns = NULL; |
4914 | ha->ct_sns_dma = 0; |
4915 | |
4916 | if (ha->sfp_data) |
4917 | dma_free_coherent(dev: &ha->pdev->dev, SFP_DEV_SIZE, cpu_addr: ha->sfp_data, |
4918 | dma_handle: ha->sfp_data_dma); |
4919 | ha->sfp_data = NULL; |
4920 | |
4921 | if (ha->flt) |
4922 | dma_free_coherent(dev: &ha->pdev->dev, |
4923 | size: sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE, |
4924 | cpu_addr: ha->flt, dma_handle: ha->flt_dma); |
4925 | ha->flt = NULL; |
4926 | ha->flt_dma = 0; |
4927 | |
4928 | if (ha->ms_iocb) |
4929 | dma_pool_free(pool: ha->s_dma_pool, vaddr: ha->ms_iocb, addr: ha->ms_iocb_dma); |
4930 | ha->ms_iocb = NULL; |
4931 | ha->ms_iocb_dma = 0; |
4932 | |
4933 | if (ha->sf_init_cb) |
4934 | dma_pool_free(pool: ha->s_dma_pool, |
4935 | vaddr: ha->sf_init_cb, addr: ha->sf_init_cb_dma); |
4936 | |
4937 | if (ha->ex_init_cb) |
4938 | dma_pool_free(pool: ha->s_dma_pool, |
4939 | vaddr: ha->ex_init_cb, addr: ha->ex_init_cb_dma); |
4940 | ha->ex_init_cb = NULL; |
4941 | ha->ex_init_cb_dma = 0; |
4942 | |
4943 | if (ha->async_pd) |
4944 | dma_pool_free(pool: ha->s_dma_pool, vaddr: ha->async_pd, addr: ha->async_pd_dma); |
4945 | ha->async_pd = NULL; |
4946 | ha->async_pd_dma = 0; |
4947 | |
4948 | dma_pool_destroy(pool: ha->s_dma_pool); |
4949 | ha->s_dma_pool = NULL; |
4950 | |
4951 | if (ha->gid_list) |
4952 | dma_free_coherent(dev: &ha->pdev->dev, size: qla2x00_gid_list_size(ha), |
4953 | cpu_addr: ha->gid_list, dma_handle: ha->gid_list_dma); |
4954 | ha->gid_list = NULL; |
4955 | ha->gid_list_dma = 0; |
4956 | |
4957 | if (ha->base_qpair && !list_empty(head: &ha->base_qpair->dsd_list)) { |
4958 | struct dsd_dma *dsd_ptr, *tdsd_ptr; |
4959 | |
4960 | /* clean up allocated prev pool */ |
4961 | list_for_each_entry_safe(dsd_ptr, tdsd_ptr, |
4962 | &ha->base_qpair->dsd_list, list) { |
4963 | dma_pool_free(pool: ha->dl_dma_pool, vaddr: dsd_ptr->dsd_addr, |
4964 | addr: dsd_ptr->dsd_list_dma); |
4965 | list_del(entry: &dsd_ptr->list); |
4966 | kfree(objp: dsd_ptr); |
4967 | } |
4968 | } |
4969 | |
4970 | dma_pool_destroy(pool: ha->dl_dma_pool); |
4971 | ha->dl_dma_pool = NULL; |
4972 | |
4973 | dma_pool_destroy(pool: ha->fcp_cmnd_dma_pool); |
4974 | ha->fcp_cmnd_dma_pool = NULL; |
4975 | |
4976 | mempool_destroy(pool: ha->ctx_mempool); |
4977 | ha->ctx_mempool = NULL; |
4978 | |
4979 | if (ql2xenabledif && ha->dif_bundl_pool) { |
4980 | struct dsd_dma *dsd, *nxt; |
4981 | |
4982 | list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head, |
4983 | list) { |
4984 | list_del(entry: &dsd->list); |
4985 | dma_pool_free(pool: ha->dif_bundl_pool, vaddr: dsd->dsd_addr, |
4986 | addr: dsd->dsd_list_dma); |
4987 | ha->dif_bundle_dma_allocs--; |
4988 | kfree(objp: dsd); |
4989 | ha->dif_bundle_kallocs--; |
4990 | ha->pool.unusable.count--; |
4991 | } |
4992 | list_for_each_entry_safe(dsd, nxt, &ha->pool.good.head, list) { |
4993 | list_del(entry: &dsd->list); |
4994 | dma_pool_free(pool: ha->dif_bundl_pool, vaddr: dsd->dsd_addr, |
4995 | addr: dsd->dsd_list_dma); |
4996 | ha->dif_bundle_dma_allocs--; |
4997 | kfree(objp: dsd); |
4998 | ha->dif_bundle_kallocs--; |
4999 | } |
5000 | } |
5001 | |
5002 | dma_pool_destroy(pool: ha->dif_bundl_pool); |
5003 | ha->dif_bundl_pool = NULL; |
5004 | |
5005 | qlt_mem_free(ha); |
5006 | qla_remove_hostmap(ha); |
5007 | |
5008 | if (ha->init_cb) |
5009 | dma_free_coherent(dev: &ha->pdev->dev, size: ha->init_cb_size, |
5010 | cpu_addr: ha->init_cb, dma_handle: ha->init_cb_dma); |
5011 | |
5012 | dma_pool_destroy(pool: ha->purex_dma_pool); |
5013 | ha->purex_dma_pool = NULL; |
5014 | |
5015 | if (ha->elsrej.c) { |
5016 | dma_free_coherent(dev: &ha->pdev->dev, size: ha->elsrej.size, |
5017 | cpu_addr: ha->elsrej.c, dma_handle: ha->elsrej.cdma); |
5018 | ha->elsrej.c = NULL; |
5019 | } |
5020 | |
5021 | if (ha->lsrjt.c) { |
5022 | dma_free_coherent(dev: &ha->pdev->dev, size: ha->lsrjt.size, cpu_addr: ha->lsrjt.c, |
5023 | dma_handle: ha->lsrjt.cdma); |
5024 | ha->lsrjt.c = NULL; |
5025 | } |
5026 | |
5027 | ha->init_cb = NULL; |
5028 | ha->init_cb_dma = 0; |
5029 | |
5030 | vfree(addr: ha->optrom_buffer); |
5031 | ha->optrom_buffer = NULL; |
5032 | kfree(objp: ha->nvram); |
5033 | ha->nvram = NULL; |
5034 | kfree(objp: ha->npiv_info); |
5035 | ha->npiv_info = NULL; |
5036 | kfree(objp: ha->swl); |
5037 | ha->swl = NULL; |
5038 | kfree(objp: ha->loop_id_map); |
5039 | ha->sf_init_cb = NULL; |
5040 | ha->sf_init_cb_dma = 0; |
5041 | ha->loop_id_map = NULL; |
5042 | |
5043 | kfree(objp: ha->vp_map); |
5044 | ha->vp_map = NULL; |
5045 | } |
5046 | |
5047 | struct scsi_qla_host *qla2x00_create_host(const struct scsi_host_template *sht, |
5048 | struct qla_hw_data *ha) |
5049 | { |
5050 | struct Scsi_Host *host; |
5051 | struct scsi_qla_host *vha = NULL; |
5052 | |
5053 | host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); |
5054 | if (!host) { |
5055 | ql_log_pci(ql_log_fatal, pdev: ha->pdev, 0x0107, |
5056 | fmt: "Failed to allocate host from the scsi layer, aborting.\n" ); |
5057 | return NULL; |
5058 | } |
5059 | |
5060 | /* Clear our data area */ |
5061 | vha = shost_priv(shost: host); |
5062 | memset(vha, 0, sizeof(scsi_qla_host_t)); |
5063 | |
5064 | vha->host = host; |
5065 | vha->host_no = host->host_no; |
5066 | vha->hw = ha; |
5067 | |
5068 | vha->qlini_mode = ql2x_ini_mode; |
5069 | vha->ql2xexchoffld = ql2xexchoffld; |
5070 | vha->ql2xiniexchg = ql2xiniexchg; |
5071 | |
5072 | INIT_LIST_HEAD(list: &vha->vp_fcports); |
5073 | INIT_LIST_HEAD(list: &vha->work_list); |
5074 | INIT_LIST_HEAD(list: &vha->list); |
5075 | INIT_LIST_HEAD(list: &vha->qla_cmd_list); |
5076 | INIT_LIST_HEAD(list: &vha->logo_list); |
5077 | INIT_LIST_HEAD(list: &vha->plogi_ack_list); |
5078 | INIT_LIST_HEAD(list: &vha->qp_list); |
5079 | INIT_LIST_HEAD(list: &vha->gnl.fcports); |
5080 | INIT_WORK(&vha->iocb_work, qla2x00_iocb_work_fn); |
5081 | |
5082 | INIT_LIST_HEAD(list: &vha->purex_list.head); |
5083 | spin_lock_init(&vha->purex_list.lock); |
5084 | |
5085 | spin_lock_init(&vha->work_lock); |
5086 | spin_lock_init(&vha->cmd_list_lock); |
5087 | init_waitqueue_head(&vha->fcport_waitQ); |
5088 | init_waitqueue_head(&vha->vref_waitq); |
5089 | qla_enode_init(vha); |
5090 | qla_edb_init(vha); |
5091 | |
5092 | |
5093 | vha->gnl.size = sizeof(struct get_name_list_extended) * |
5094 | (ha->max_loop_id + 1); |
5095 | vha->gnl.l = dma_alloc_coherent(dev: &ha->pdev->dev, |
5096 | size: vha->gnl.size, dma_handle: &vha->gnl.ldma, GFP_KERNEL); |
5097 | if (!vha->gnl.l) { |
5098 | ql_log(ql_log_fatal, vha, 0xd04a, |
5099 | fmt: "Alloc failed for name list.\n" ); |
5100 | scsi_host_put(t: vha->host); |
5101 | return NULL; |
5102 | } |
5103 | |
5104 | /* todo: what about ext login? */ |
5105 | vha->scan.size = ha->max_fibre_devices * sizeof(struct fab_scan_rp); |
5106 | vha->scan.l = vmalloc(size: vha->scan.size); |
5107 | if (!vha->scan.l) { |
5108 | ql_log(ql_log_fatal, vha, 0xd04a, |
5109 | fmt: "Alloc failed for scan database.\n" ); |
5110 | dma_free_coherent(dev: &ha->pdev->dev, size: vha->gnl.size, |
5111 | cpu_addr: vha->gnl.l, dma_handle: vha->gnl.ldma); |
5112 | vha->gnl.l = NULL; |
5113 | scsi_host_put(t: vha->host); |
5114 | return NULL; |
5115 | } |
5116 | INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn); |
5117 | |
5118 | snprintf(buf: vha->host_str, size: sizeof(vha->host_str), fmt: "%s_%lu" , |
5119 | QLA2XXX_DRIVER_NAME, vha->host_no); |
5120 | ql_dbg(ql_dbg_init, vha, 0x0041, |
5121 | fmt: "Allocated the host=%p hw=%p vha=%p dev_name=%s" , |
5122 | vha->host, vha->hw, vha, |
5123 | dev_name(dev: &(ha->pdev->dev))); |
5124 | |
5125 | return vha; |
5126 | } |
5127 | |
5128 | struct qla_work_evt * |
5129 | qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) |
5130 | { |
5131 | struct qla_work_evt *e; |
5132 | |
5133 | if (test_bit(UNLOADING, &vha->dpc_flags)) |
5134 | return NULL; |
5135 | |
5136 | if (qla_vha_mark_busy(vha)) |
5137 | return NULL; |
5138 | |
5139 | e = kzalloc(size: sizeof(struct qla_work_evt), GFP_ATOMIC); |
5140 | if (!e) { |
5141 | QLA_VHA_MARK_NOT_BUSY(vha); |
5142 | return NULL; |
5143 | } |
5144 | |
5145 | INIT_LIST_HEAD(list: &e->list); |
5146 | e->type = type; |
5147 | e->flags = QLA_EVT_FLAG_FREE; |
5148 | return e; |
5149 | } |
5150 | |
5151 | int |
5152 | qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e) |
5153 | { |
5154 | unsigned long flags; |
5155 | bool q = false; |
5156 | |
5157 | spin_lock_irqsave(&vha->work_lock, flags); |
5158 | list_add_tail(new: &e->list, head: &vha->work_list); |
5159 | |
5160 | if (!test_and_set_bit(IOCB_WORK_ACTIVE, addr: &vha->dpc_flags)) |
5161 | q = true; |
5162 | |
5163 | spin_unlock_irqrestore(lock: &vha->work_lock, flags); |
5164 | |
5165 | if (q) |
5166 | queue_work(wq: vha->hw->wq, work: &vha->iocb_work); |
5167 | |
5168 | return QLA_SUCCESS; |
5169 | } |
5170 | |
5171 | int |
5172 | qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code, |
5173 | u32 data) |
5174 | { |
5175 | struct qla_work_evt *e; |
5176 | |
5177 | e = qla2x00_alloc_work(vha, type: QLA_EVT_AEN); |
5178 | if (!e) |
5179 | return QLA_FUNCTION_FAILED; |
5180 | |
5181 | e->u.aen.code = code; |
5182 | e->u.aen.data = data; |
5183 | return qla2x00_post_work(vha, e); |
5184 | } |
5185 | |
5186 | int |
5187 | qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb) |
5188 | { |
5189 | struct qla_work_evt *e; |
5190 | |
5191 | e = qla2x00_alloc_work(vha, type: QLA_EVT_IDC_ACK); |
5192 | if (!e) |
5193 | return QLA_FUNCTION_FAILED; |
5194 | |
5195 | memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); |
5196 | return qla2x00_post_work(vha, e); |
5197 | } |
5198 | |
5199 | #define qla2x00_post_async_work(name, type) \ |
5200 | int qla2x00_post_async_##name##_work( \ |
5201 | struct scsi_qla_host *vha, \ |
5202 | fc_port_t *fcport, uint16_t *data) \ |
5203 | { \ |
5204 | struct qla_work_evt *e; \ |
5205 | \ |
5206 | e = qla2x00_alloc_work(vha, type); \ |
5207 | if (!e) \ |
5208 | return QLA_FUNCTION_FAILED; \ |
5209 | \ |
5210 | e->u.logio.fcport = fcport; \ |
5211 | if (data) { \ |
5212 | e->u.logio.data[0] = data[0]; \ |
5213 | e->u.logio.data[1] = data[1]; \ |
5214 | } \ |
5215 | fcport->flags |= FCF_ASYNC_ACTIVE; \ |
5216 | return qla2x00_post_work(vha, e); \ |
5217 | } |
5218 | |
5219 | qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN); |
5220 | qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT); |
5221 | qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC); |
5222 | qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO); |
5223 | qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE); |
5224 | |
5225 | int |
5226 | qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code) |
5227 | { |
5228 | struct qla_work_evt *e; |
5229 | |
5230 | e = qla2x00_alloc_work(vha, type: QLA_EVT_UEVENT); |
5231 | if (!e) |
5232 | return QLA_FUNCTION_FAILED; |
5233 | |
5234 | e->u.uevent.code = code; |
5235 | return qla2x00_post_work(vha, e); |
5236 | } |
5237 | |
5238 | static void |
5239 | qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code) |
5240 | { |
5241 | char event_string[40]; |
5242 | char *envp[] = { event_string, NULL }; |
5243 | |
5244 | switch (code) { |
5245 | case QLA_UEVENT_CODE_FW_DUMP: |
5246 | snprintf(buf: event_string, size: sizeof(event_string), fmt: "FW_DUMP=%lu" , |
5247 | vha->host_no); |
5248 | break; |
5249 | default: |
5250 | /* do nothing */ |
5251 | break; |
5252 | } |
5253 | kobject_uevent_env(kobj: &vha->hw->pdev->dev.kobj, action: KOBJ_CHANGE, envp); |
5254 | } |
5255 | |
5256 | int |
5257 | qlafx00_post_aenfx_work(struct scsi_qla_host *vha, uint32_t evtcode, |
5258 | uint32_t *data, int cnt) |
5259 | { |
5260 | struct qla_work_evt *e; |
5261 | |
5262 | e = qla2x00_alloc_work(vha, type: QLA_EVT_AENFX); |
5263 | if (!e) |
5264 | return QLA_FUNCTION_FAILED; |
5265 | |
5266 | e->u.aenfx.evtcode = evtcode; |
5267 | e->u.aenfx.count = cnt; |
5268 | memcpy(e->u.aenfx.mbx, data, sizeof(*data) * cnt); |
5269 | return qla2x00_post_work(vha, e); |
5270 | } |
5271 | |
5272 | void qla24xx_sched_upd_fcport(fc_port_t *fcport) |
5273 | { |
5274 | unsigned long flags; |
5275 | |
5276 | if (IS_SW_RESV_ADDR(fcport->d_id)) |
5277 | return; |
5278 | |
5279 | spin_lock_irqsave(&fcport->vha->work_lock, flags); |
5280 | if (fcport->disc_state == DSC_UPD_FCPORT) { |
5281 | spin_unlock_irqrestore(lock: &fcport->vha->work_lock, flags); |
5282 | return; |
5283 | } |
5284 | fcport->jiffies_at_registration = jiffies; |
5285 | fcport->sec_since_registration = 0; |
5286 | fcport->next_disc_state = DSC_DELETED; |
5287 | qla2x00_set_fcport_disc_state(fcport, state: DSC_UPD_FCPORT); |
5288 | spin_unlock_irqrestore(lock: &fcport->vha->work_lock, flags); |
5289 | |
5290 | queue_work(wq: system_unbound_wq, work: &fcport->reg_work); |
5291 | } |
5292 | |
5293 | static |
5294 | void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) |
5295 | { |
5296 | unsigned long flags; |
5297 | fc_port_t *fcport = NULL, *tfcp; |
5298 | struct qlt_plogi_ack_t *pla = |
5299 | (struct qlt_plogi_ack_t *)e->u.new_sess.pla; |
5300 | uint8_t free_fcport = 0; |
5301 | |
5302 | ql_dbg(ql_dbg_disc, vha, 0xffff, |
5303 | fmt: "%s %d %8phC enter\n" , |
5304 | __func__, __LINE__, e->u.new_sess.port_name); |
5305 | |
5306 | spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); |
5307 | fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1); |
5308 | if (fcport) { |
5309 | fcport->d_id = e->u.new_sess.id; |
5310 | if (pla) { |
5311 | fcport->fw_login_state = DSC_LS_PLOGI_PEND; |
5312 | memcpy(fcport->node_name, |
5313 | pla->iocb.u.isp24.u.plogi.node_name, |
5314 | WWN_SIZE); |
5315 | qlt_plogi_ack_link(vha, pla, fcport, QLT_PLOGI_LINK_SAME_WWN); |
5316 | /* we took an extra ref_count to prevent PLOGI ACK when |
5317 | * fcport/sess has not been created. |
5318 | */ |
5319 | pla->ref_count--; |
5320 | } |
5321 | } else { |
5322 | spin_unlock_irqrestore(lock: &vha->hw->tgt.sess_lock, flags); |
5323 | fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); |
5324 | if (fcport) { |
5325 | fcport->d_id = e->u.new_sess.id; |
5326 | fcport->flags |= FCF_FABRIC_DEVICE; |
5327 | fcport->fw_login_state = DSC_LS_PLOGI_PEND; |
5328 | fcport->tgt_short_link_down_cnt = 0; |
5329 | |
5330 | memcpy(fcport->port_name, e->u.new_sess.port_name, |
5331 | WWN_SIZE); |
5332 | |
5333 | fcport->fc4_type = e->u.new_sess.fc4_type; |
5334 | if (NVME_PRIORITY(vha->hw, fcport)) |
5335 | fcport->do_prli_nvme = 1; |
5336 | else |
5337 | fcport->do_prli_nvme = 0; |
5338 | |
5339 | if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N) { |
5340 | fcport->dm_login_expire = jiffies + |
5341 | QLA_N2N_WAIT_TIME * HZ; |
5342 | fcport->fc4_type = FS_FC4TYPE_FCP; |
5343 | fcport->n2n_flag = 1; |
5344 | if (vha->flags.nvme_enabled) |
5345 | fcport->fc4_type |= FS_FC4TYPE_NVME; |
5346 | } |
5347 | |
5348 | } else { |
5349 | ql_dbg(ql_dbg_disc, vha, 0xffff, |
5350 | fmt: "%s %8phC mem alloc fail.\n" , |
5351 | __func__, e->u.new_sess.port_name); |
5352 | |
5353 | if (pla) { |
5354 | list_del(entry: &pla->list); |
5355 | kmem_cache_free(s: qla_tgt_plogi_cachep, objp: pla); |
5356 | } |
5357 | return; |
5358 | } |
5359 | |
5360 | spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); |
5361 | /* search again to make sure no one else got ahead */ |
5362 | tfcp = qla2x00_find_fcport_by_wwpn(vha, |
5363 | e->u.new_sess.port_name, 1); |
5364 | if (tfcp) { |
5365 | /* should rarily happen */ |
5366 | ql_dbg(ql_dbg_disc, vha, 0xffff, |
5367 | fmt: "%s %8phC found existing fcport b4 add. DS %d LS %d\n" , |
5368 | __func__, tfcp->port_name, tfcp->disc_state, |
5369 | tfcp->fw_login_state); |
5370 | |
5371 | free_fcport = 1; |
5372 | } else { |
5373 | list_add_tail(new: &fcport->list, head: &vha->vp_fcports); |
5374 | |
5375 | } |
5376 | if (pla) { |
5377 | qlt_plogi_ack_link(vha, pla, fcport, |
5378 | QLT_PLOGI_LINK_SAME_WWN); |
5379 | pla->ref_count--; |
5380 | } |
5381 | } |
5382 | spin_unlock_irqrestore(lock: &vha->hw->tgt.sess_lock, flags); |
5383 | |
5384 | if (fcport) { |
5385 | fcport->id_changed = 1; |
5386 | fcport->scan_state = QLA_FCPORT_FOUND; |
5387 | fcport->chip_reset = vha->hw->base_qpair->chip_reset; |
5388 | memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE); |
5389 | |
5390 | if (pla) { |
5391 | if (pla->iocb.u.isp24.status_subcode == ELS_PRLI) { |
5392 | u16 wd3_lo; |
5393 | |
5394 | fcport->fw_login_state = DSC_LS_PRLI_PEND; |
5395 | fcport->local = 0; |
5396 | fcport->loop_id = |
5397 | le16_to_cpu( |
5398 | pla->iocb.u.isp24.nport_handle); |
5399 | fcport->fw_login_state = DSC_LS_PRLI_PEND; |
5400 | wd3_lo = |
5401 | le16_to_cpu( |
5402 | pla->iocb.u.isp24.u.prli.wd3_lo); |
5403 | |
5404 | if (wd3_lo & BIT_7) |
5405 | fcport->conf_compl_supported = 1; |
5406 | |
5407 | if ((wd3_lo & BIT_4) == 0) |
5408 | fcport->port_type = FCT_INITIATOR; |
5409 | else |
5410 | fcport->port_type = FCT_TARGET; |
5411 | } |
5412 | qlt_plogi_ack_unref(vha, pla); |
5413 | } else { |
5414 | fc_port_t *dfcp = NULL; |
5415 | |
5416 | spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); |
5417 | tfcp = qla2x00_find_fcport_by_nportid(vha, |
5418 | &e->u.new_sess.id, 1); |
5419 | if (tfcp && (tfcp != fcport)) { |
5420 | /* |
5421 | * We have a conflict fcport with same NportID. |
5422 | */ |
5423 | ql_dbg(ql_dbg_disc, vha, 0xffff, |
5424 | fmt: "%s %8phC found conflict b4 add. DS %d LS %d\n" , |
5425 | __func__, tfcp->port_name, tfcp->disc_state, |
5426 | tfcp->fw_login_state); |
5427 | |
5428 | switch (tfcp->disc_state) { |
5429 | case DSC_DELETED: |
5430 | break; |
5431 | case DSC_DELETE_PEND: |
5432 | fcport->login_pause = 1; |
5433 | tfcp->conflict = fcport; |
5434 | break; |
5435 | default: |
5436 | fcport->login_pause = 1; |
5437 | tfcp->conflict = fcport; |
5438 | dfcp = tfcp; |
5439 | break; |
5440 | } |
5441 | } |
5442 | spin_unlock_irqrestore(lock: &vha->hw->tgt.sess_lock, flags); |
5443 | if (dfcp) |
5444 | qlt_schedule_sess_for_deletion(tfcp); |
5445 | |
5446 | if (N2N_TOPO(vha->hw)) { |
5447 | fcport->flags &= ~FCF_FABRIC_DEVICE; |
5448 | fcport->keep_nport_handle = 1; |
5449 | if (vha->flags.nvme_enabled) { |
5450 | fcport->fc4_type = |
5451 | (FS_FC4TYPE_NVME | FS_FC4TYPE_FCP); |
5452 | fcport->n2n_flag = 1; |
5453 | } |
5454 | fcport->fw_login_state = 0; |
5455 | |
5456 | schedule_delayed_work(dwork: &vha->scan.scan_work, delay: 5); |
5457 | } else { |
5458 | qla24xx_fcport_handle_login(vha, fcport); |
5459 | } |
5460 | } |
5461 | } |
5462 | |
5463 | if (free_fcport) { |
5464 | qla2x00_free_fcport(fcport); |
5465 | if (pla) { |
5466 | list_del(entry: &pla->list); |
5467 | kmem_cache_free(s: qla_tgt_plogi_cachep, objp: pla); |
5468 | } |
5469 | } |
5470 | } |
5471 | |
5472 | static void qla_sp_retry(struct scsi_qla_host *vha, struct qla_work_evt *e) |
5473 | { |
5474 | struct srb *sp = e->u.iosb.sp; |
5475 | int rval; |
5476 | |
5477 | rval = qla2x00_start_sp(sp); |
5478 | if (rval != QLA_SUCCESS) { |
5479 | ql_dbg(ql_dbg_disc, vha, 0x2043, |
5480 | fmt: "%s: %s: Re-issue IOCB failed (%d).\n" , |
5481 | __func__, sp->name, rval); |
5482 | qla24xx_sp_unmap(vha, sp); |
5483 | } |
5484 | } |
5485 | |
5486 | void |
5487 | qla2x00_do_work(struct scsi_qla_host *vha) |
5488 | { |
5489 | struct qla_work_evt *e, *tmp; |
5490 | unsigned long flags; |
5491 | LIST_HEAD(work); |
5492 | int rc; |
5493 | |
5494 | spin_lock_irqsave(&vha->work_lock, flags); |
5495 | list_splice_init(list: &vha->work_list, head: &work); |
5496 | spin_unlock_irqrestore(lock: &vha->work_lock, flags); |
5497 | |
5498 | list_for_each_entry_safe(e, tmp, &work, list) { |
5499 | rc = QLA_SUCCESS; |
5500 | switch (e->type) { |
5501 | case QLA_EVT_AEN: |
5502 | fc_host_post_event(shost: vha->host, event_number: fc_get_event_number(), |
5503 | event_code: e->u.aen.code, event_data: e->u.aen.data); |
5504 | break; |
5505 | case QLA_EVT_IDC_ACK: |
5506 | qla81xx_idc_ack(vha, e->u.idc_ack.mb); |
5507 | break; |
5508 | case QLA_EVT_ASYNC_LOGIN: |
5509 | qla2x00_async_login(vha, e->u.logio.fcport, |
5510 | e->u.logio.data); |
5511 | break; |
5512 | case QLA_EVT_ASYNC_LOGOUT: |
5513 | rc = qla2x00_async_logout(vha, e->u.logio.fcport); |
5514 | break; |
5515 | case QLA_EVT_ASYNC_ADISC: |
5516 | qla2x00_async_adisc(vha, e->u.logio.fcport, |
5517 | e->u.logio.data); |
5518 | break; |
5519 | case QLA_EVT_UEVENT: |
5520 | qla2x00_uevent_emit(vha, code: e->u.uevent.code); |
5521 | break; |
5522 | case QLA_EVT_AENFX: |
5523 | qlafx00_process_aen(vha, e); |
5524 | break; |
5525 | case QLA_EVT_UNMAP: |
5526 | qla24xx_sp_unmap(vha, e->u.iosb.sp); |
5527 | break; |
5528 | case QLA_EVT_RELOGIN: |
5529 | qla2x00_relogin(vha); |
5530 | break; |
5531 | case QLA_EVT_NEW_SESS: |
5532 | qla24xx_create_new_sess(vha, e); |
5533 | break; |
5534 | case QLA_EVT_GPDB: |
5535 | qla24xx_async_gpdb(vha, e->u.fcport.fcport, |
5536 | e->u.fcport.opt); |
5537 | break; |
5538 | case QLA_EVT_PRLI: |
5539 | qla24xx_async_prli(vha, e->u.fcport.fcport); |
5540 | break; |
5541 | case QLA_EVT_GPSC: |
5542 | qla24xx_async_gpsc(vha, e->u.fcport.fcport); |
5543 | break; |
5544 | case QLA_EVT_GNL: |
5545 | qla24xx_async_gnl(vha, e->u.fcport.fcport); |
5546 | break; |
5547 | case QLA_EVT_NACK: |
5548 | qla24xx_do_nack_work(vha, e); |
5549 | break; |
5550 | case QLA_EVT_ASYNC_PRLO: |
5551 | rc = qla2x00_async_prlo(vha, e->u.logio.fcport); |
5552 | break; |
5553 | case QLA_EVT_ASYNC_PRLO_DONE: |
5554 | qla2x00_async_prlo_done(vha, e->u.logio.fcport, |
5555 | e->u.logio.data); |
5556 | break; |
5557 | case QLA_EVT_GPNFT: |
5558 | qla24xx_async_gpnft(vha, e->u.gpnft.fc4_type, |
5559 | e->u.gpnft.sp); |
5560 | break; |
5561 | case QLA_EVT_GPNFT_DONE: |
5562 | qla24xx_async_gpnft_done(vha, e->u.iosb.sp); |
5563 | break; |
5564 | case QLA_EVT_GNNFT_DONE: |
5565 | qla24xx_async_gnnft_done(vha, e->u.iosb.sp); |
5566 | break; |
5567 | case QLA_EVT_GFPNID: |
5568 | qla24xx_async_gfpnid(vha, e->u.fcport.fcport); |
5569 | break; |
5570 | case QLA_EVT_SP_RETRY: |
5571 | qla_sp_retry(vha, e); |
5572 | break; |
5573 | case QLA_EVT_IIDMA: |
5574 | qla_do_iidma_work(vha, fcport: e->u.fcport.fcport); |
5575 | break; |
5576 | case QLA_EVT_ELS_PLOGI: |
5577 | qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI, |
5578 | e->u.fcport.fcport, false); |
5579 | break; |
5580 | case QLA_EVT_SA_REPLACE: |
5581 | rc = qla24xx_issue_sa_replace_iocb(vha, e); |
5582 | break; |
5583 | } |
5584 | |
5585 | if (rc == EAGAIN) { |
5586 | /* put 'work' at head of 'vha->work_list' */ |
5587 | spin_lock_irqsave(&vha->work_lock, flags); |
5588 | list_splice(list: &work, head: &vha->work_list); |
5589 | spin_unlock_irqrestore(lock: &vha->work_lock, flags); |
5590 | break; |
5591 | } |
5592 | list_del_init(entry: &e->list); |
5593 | if (e->flags & QLA_EVT_FLAG_FREE) |
5594 | kfree(objp: e); |
5595 | |
5596 | /* For each work completed decrement vha ref count */ |
5597 | QLA_VHA_MARK_NOT_BUSY(vha); |
5598 | } |
5599 | } |
5600 | |
5601 | int qla24xx_post_relogin_work(struct scsi_qla_host *vha) |
5602 | { |
5603 | struct qla_work_evt *e; |
5604 | |
5605 | e = qla2x00_alloc_work(vha, type: QLA_EVT_RELOGIN); |
5606 | |
5607 | if (!e) { |
5608 | set_bit(RELOGIN_NEEDED, addr: &vha->dpc_flags); |
5609 | return QLA_FUNCTION_FAILED; |
5610 | } |
5611 | |
5612 | return qla2x00_post_work(vha, e); |
5613 | } |
5614 | |
5615 | /* Relogins all the fcports of a vport |
5616 | * Context: dpc thread |
5617 | */ |
5618 | void qla2x00_relogin(struct scsi_qla_host *vha) |
5619 | { |
5620 | fc_port_t *fcport; |
5621 | int status, relogin_needed = 0; |
5622 | struct event_arg ea; |
5623 | |
5624 | list_for_each_entry(fcport, &vha->vp_fcports, list) { |
5625 | /* |
5626 | * If the port is not ONLINE then try to login |
5627 | * to it if we haven't run out of retries. |
5628 | */ |
5629 | if (atomic_read(v: &fcport->state) != FCS_ONLINE && |
5630 | fcport->login_retry) { |
5631 | if (fcport->scan_state != QLA_FCPORT_FOUND || |
5632 | fcport->disc_state == DSC_LOGIN_AUTH_PEND || |
5633 | fcport->disc_state == DSC_LOGIN_COMPLETE) |
5634 | continue; |
5635 | |
5636 | if (fcport->flags & (FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE) || |
5637 | fcport->disc_state == DSC_DELETE_PEND) { |
5638 | relogin_needed = 1; |
5639 | } else { |
5640 | if (vha->hw->current_topology != ISP_CFG_NL) { |
5641 | memset(&ea, 0, sizeof(ea)); |
5642 | ea.fcport = fcport; |
5643 | qla24xx_handle_relogin_event(vha, ea: &ea); |
5644 | } else if (vha->hw->current_topology == |
5645 | ISP_CFG_NL && |
5646 | IS_QLA2XXX_MIDTYPE(vha->hw)) { |
5647 | (void)qla24xx_fcport_handle_login(vha, |
5648 | fcport); |
5649 | } else if (vha->hw->current_topology == |
5650 | ISP_CFG_NL) { |
5651 | fcport->login_retry--; |
5652 | status = |
5653 | qla2x00_local_device_login(vha, |
5654 | fcport); |
5655 | if (status == QLA_SUCCESS) { |
5656 | fcport->old_loop_id = |
5657 | fcport->loop_id; |
5658 | ql_dbg(ql_dbg_disc, vha, 0x2003, |
5659 | fmt: "Port login OK: logged in ID 0x%x.\n" , |
5660 | fcport->loop_id); |
5661 | qla2x00_update_fcport |
5662 | (vha, fcport); |
5663 | } else if (status == 1) { |
5664 | set_bit(RELOGIN_NEEDED, |
5665 | addr: &vha->dpc_flags); |
5666 | /* retry the login again */ |
5667 | ql_dbg(ql_dbg_disc, vha, 0x2007, |
5668 | fmt: "Retrying %d login again loop_id 0x%x.\n" , |
5669 | fcport->login_retry, |
5670 | fcport->loop_id); |
5671 | } else { |
5672 | fcport->login_retry = 0; |
5673 | } |
5674 | |
5675 | if (fcport->login_retry == 0 && |
5676 | status != QLA_SUCCESS) |
5677 | qla2x00_clear_loop_id(fcport); |
5678 | } |
5679 | } |
5680 | } |
5681 | if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) |
5682 | break; |
5683 | } |
5684 | |
5685 | if (relogin_needed) |
5686 | set_bit(RELOGIN_NEEDED, addr: &vha->dpc_flags); |
5687 | |
5688 | ql_dbg(ql_dbg_disc, vha, 0x400e, |
5689 | fmt: "Relogin end.\n" ); |
5690 | } |
5691 | |
5692 | /* Schedule work on any of the dpc-workqueues */ |
5693 | void |
5694 | qla83xx_schedule_work(scsi_qla_host_t *base_vha, int work_code) |
5695 | { |
5696 | struct qla_hw_data *ha = base_vha->hw; |
5697 | |
5698 | switch (work_code) { |
5699 | case MBA_IDC_AEN: /* 0x8200 */ |
5700 | if (ha->dpc_lp_wq) |
5701 | queue_work(wq: ha->dpc_lp_wq, work: &ha->idc_aen); |
5702 | break; |
5703 | |
5704 | case QLA83XX_NIC_CORE_RESET: /* 0x1 */ |
5705 | if (!ha->flags.nic_core_reset_hdlr_active) { |
5706 | if (ha->dpc_hp_wq) |
5707 | queue_work(wq: ha->dpc_hp_wq, work: &ha->nic_core_reset); |
5708 | } else |
5709 | ql_dbg(ql_dbg_p3p, vha: base_vha, 0xb05e, |
5710 | fmt: "NIC Core reset is already active. Skip " |
5711 | "scheduling it again.\n" ); |
5712 | break; |
5713 | case QLA83XX_IDC_STATE_HANDLER: /* 0x2 */ |
5714 | if (ha->dpc_hp_wq) |
5715 | queue_work(wq: ha->dpc_hp_wq, work: &ha->idc_state_handler); |
5716 | break; |
5717 | case QLA83XX_NIC_CORE_UNRECOVERABLE: /* 0x3 */ |
5718 | if (ha->dpc_hp_wq) |
5719 | queue_work(wq: ha->dpc_hp_wq, work: &ha->nic_core_unrecoverable); |
5720 | break; |
5721 | default: |
5722 | ql_log(ql_log_warn, vha: base_vha, 0xb05f, |
5723 | fmt: "Unknown work-code=0x%x.\n" , work_code); |
5724 | } |
5725 | |
5726 | return; |
5727 | } |
5728 | |
5729 | /* Work: Perform NIC Core Unrecoverable state handling */ |
5730 | void |
5731 | qla83xx_nic_core_unrecoverable_work(struct work_struct *work) |
5732 | { |
5733 | struct qla_hw_data *ha = |
5734 | container_of(work, struct qla_hw_data, nic_core_unrecoverable); |
5735 | scsi_qla_host_t *base_vha = pci_get_drvdata(pdev: ha->pdev); |
5736 | uint32_t dev_state = 0; |
5737 | |
5738 | qla83xx_idc_lock(base_vha, 0); |
5739 | qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); |
5740 | qla83xx_reset_ownership(base_vha); |
5741 | if (ha->flags.nic_core_reset_owner) { |
5742 | ha->flags.nic_core_reset_owner = 0; |
5743 | qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE, |
5744 | QLA8XXX_DEV_FAILED); |
5745 | ql_log(ql_log_info, vha: base_vha, 0xb060, fmt: "HW State: FAILED.\n" ); |
5746 | qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER); |
5747 | } |
5748 | qla83xx_idc_unlock(base_vha, 0); |
5749 | } |
5750 | |
5751 | /* Work: Execute IDC state handler */ |
5752 | void |
5753 | qla83xx_idc_state_handler_work(struct work_struct *work) |
5754 | { |
5755 | struct qla_hw_data *ha = |
5756 | container_of(work, struct qla_hw_data, idc_state_handler); |
5757 | scsi_qla_host_t *base_vha = pci_get_drvdata(pdev: ha->pdev); |
5758 | uint32_t dev_state = 0; |
5759 | |
5760 | qla83xx_idc_lock(base_vha, 0); |
5761 | qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); |
5762 | if (dev_state == QLA8XXX_DEV_FAILED || |
5763 | dev_state == QLA8XXX_DEV_NEED_QUIESCENT) |
5764 | qla83xx_idc_state_handler(base_vha); |
5765 | qla83xx_idc_unlock(base_vha, 0); |
5766 | } |
5767 | |
5768 | static int |
5769 | qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha) |
5770 | { |
5771 | int rval = QLA_SUCCESS; |
5772 | unsigned long heart_beat_wait = jiffies + (1 * HZ); |
5773 | uint32_t heart_beat_counter1, heart_beat_counter2; |
5774 | |
5775 | do { |
5776 | if (time_after(jiffies, heart_beat_wait)) { |
5777 | ql_dbg(ql_dbg_p3p, vha: base_vha, 0xb07c, |
5778 | fmt: "Nic Core f/w is not alive.\n" ); |
5779 | rval = QLA_FUNCTION_FAILED; |
5780 | break; |
5781 | } |
5782 | |
5783 | qla83xx_idc_lock(base_vha, 0); |
5784 | qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT, |
5785 | &heart_beat_counter1); |
5786 | qla83xx_idc_unlock(base_vha, 0); |
5787 | msleep(msecs: 100); |
5788 | qla83xx_idc_lock(base_vha, 0); |
5789 | qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT, |
5790 | &heart_beat_counter2); |
5791 | qla83xx_idc_unlock(base_vha, 0); |
5792 | } while (heart_beat_counter1 == heart_beat_counter2); |
5793 | |
5794 | return rval; |
5795 | } |
5796 | |
5797 | /* Work: Perform NIC Core Reset handling */ |
5798 | void |
5799 | qla83xx_nic_core_reset_work(struct work_struct *work) |
5800 | { |
5801 | struct qla_hw_data *ha = |
5802 | container_of(work, struct qla_hw_data, nic_core_reset); |
5803 | scsi_qla_host_t *base_vha = pci_get_drvdata(pdev: ha->pdev); |
5804 | uint32_t dev_state = 0; |
5805 | |
5806 | if (IS_QLA2031(ha)) { |
5807 | if (qla2xxx_mctp_dump(base_vha) != QLA_SUCCESS) |
5808 | ql_log(ql_log_warn, vha: base_vha, 0xb081, |
5809 | fmt: "Failed to dump mctp\n" ); |
5810 | return; |
5811 | } |
5812 | |
5813 | if (!ha->flags.nic_core_reset_hdlr_active) { |
5814 | if (qla83xx_check_nic_core_fw_alive(base_vha) == QLA_SUCCESS) { |
5815 | qla83xx_idc_lock(base_vha, 0); |
5816 | qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, |
5817 | &dev_state); |
5818 | qla83xx_idc_unlock(base_vha, 0); |
5819 | if (dev_state != QLA8XXX_DEV_NEED_RESET) { |
5820 | ql_dbg(ql_dbg_p3p, vha: base_vha, 0xb07a, |
5821 | fmt: "Nic Core f/w is alive.\n" ); |
5822 | return; |
5823 | } |
5824 | } |
5825 | |
5826 | ha->flags.nic_core_reset_hdlr_active = 1; |
5827 | if (qla83xx_nic_core_reset(base_vha)) { |
5828 | /* NIC Core reset failed. */ |
5829 | ql_dbg(ql_dbg_p3p, vha: base_vha, 0xb061, |
5830 | fmt: "NIC Core reset failed.\n" ); |
5831 | } |
5832 | ha->flags.nic_core_reset_hdlr_active = 0; |
5833 | } |
5834 | } |
5835 | |
5836 | /* Work: Handle 8200 IDC aens */ |
5837 | void |
5838 | qla83xx_service_idc_aen(struct work_struct *work) |
5839 | { |
5840 | struct qla_hw_data *ha = |
5841 | container_of(work, struct qla_hw_data, idc_aen); |
5842 | scsi_qla_host_t *base_vha = pci_get_drvdata(pdev: ha->pdev); |
5843 | uint32_t dev_state, idc_control; |
5844 | |
5845 | qla83xx_idc_lock(base_vha, 0); |
5846 | qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); |
5847 | qla83xx_rd_reg(base_vha, QLA83XX_IDC_CONTROL, &idc_control); |
5848 | qla83xx_idc_unlock(base_vha, 0); |
5849 | if (dev_state == QLA8XXX_DEV_NEED_RESET) { |
5850 | if (idc_control & QLA83XX_IDC_GRACEFUL_RESET) { |
5851 | ql_dbg(ql_dbg_p3p, vha: base_vha, 0xb062, |
5852 | fmt: "Application requested NIC Core Reset.\n" ); |
5853 | qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET); |
5854 | } else if (qla83xx_check_nic_core_fw_alive(base_vha) == |
5855 | QLA_SUCCESS) { |
5856 | ql_dbg(ql_dbg_p3p, vha: base_vha, 0xb07b, |
5857 | fmt: "Other protocol driver requested NIC Core Reset.\n" ); |
5858 | qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET); |
5859 | } |
5860 | } else if (dev_state == QLA8XXX_DEV_FAILED || |
5861 | dev_state == QLA8XXX_DEV_NEED_QUIESCENT) { |
5862 | qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER); |
5863 | } |
5864 | } |
5865 | |
5866 | /* |
5867 | * Control the frequency of IDC lock retries |
5868 | */ |
5869 | #define QLA83XX_WAIT_LOGIC_MS 100 |
5870 | |
5871 | static int |
5872 | qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha) |
5873 | { |
5874 | int rval; |
5875 | uint32_t data; |
5876 | uint32_t idc_lck_rcvry_stage_mask = 0x3; |
5877 | uint32_t idc_lck_rcvry_owner_mask = 0x3c; |
5878 | struct qla_hw_data *ha = base_vha->hw; |
5879 | |
5880 | ql_dbg(ql_dbg_p3p, vha: base_vha, 0xb086, |
5881 | fmt: "Trying force recovery of the IDC lock.\n" ); |
5882 | |
5883 | rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data); |
5884 | if (rval) |
5885 | return rval; |
5886 | |
5887 | if ((data & idc_lck_rcvry_stage_mask) > 0) { |
5888 | return QLA_SUCCESS; |
5889 | } else { |
5890 | data = (IDC_LOCK_RECOVERY_STAGE1) | (ha->portnum << 2); |
5891 | rval = qla83xx_wr_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, |
5892 | data); |
5893 | if (rval) |
5894 | return rval; |
5895 | |
5896 | msleep(msecs: 200); |
5897 | |
5898 | rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, |
5899 | &data); |
5900 | if (rval) |
5901 | return rval; |
5902 | |
5903 | if (((data & idc_lck_rcvry_owner_mask) >> 2) == ha->portnum) { |
5904 | data &= (IDC_LOCK_RECOVERY_STAGE2 | |
5905 | ~(idc_lck_rcvry_stage_mask)); |
5906 | rval = qla83xx_wr_reg(base_vha, |
5907 | QLA83XX_IDC_LOCK_RECOVERY, data); |
5908 | if (rval) |
5909 | return rval; |
5910 | |
5911 | /* Forcefully perform IDC UnLock */ |
5912 | rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, |
5913 | &data); |
5914 | if (rval) |
5915 | return rval; |
5916 | /* Clear lock-id by setting 0xff */ |
5917 | rval = qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, |
5918 | 0xff); |
5919 | if (rval) |
5920 | return rval; |
5921 | /* Clear lock-recovery by setting 0x0 */ |
5922 | rval = qla83xx_wr_reg(base_vha, |
5923 | QLA83XX_IDC_LOCK_RECOVERY, 0x0); |
5924 | if (rval) |
5925 | return rval; |
5926 | } else |
5927 | return QLA_SUCCESS; |
5928 | } |
5929 | |
5930 | return rval; |
5931 | } |
5932 | |
5933 | static int |
5934 | qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha) |
5935 | { |
5936 | int rval = QLA_SUCCESS; |
5937 | uint32_t o_drv_lockid, n_drv_lockid; |
5938 | unsigned long lock_recovery_timeout; |
5939 | |
5940 | lock_recovery_timeout = jiffies + QLA83XX_MAX_LOCK_RECOVERY_WAIT; |
5941 | retry_lockid: |
5942 | rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &o_drv_lockid); |
5943 | if (rval) |
5944 | goto exit; |
5945 | |
5946 | /* MAX wait time before forcing IDC Lock recovery = 2 secs */ |
5947 | if (time_after_eq(jiffies, lock_recovery_timeout)) { |
5948 | if (qla83xx_force_lock_recovery(base_vha) == QLA_SUCCESS) |
5949 | return QLA_SUCCESS; |
5950 | else |
5951 | return QLA_FUNCTION_FAILED; |
5952 | } |
5953 | |
5954 | rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &n_drv_lockid); |
5955 | if (rval) |
5956 | goto exit; |
5957 | |
5958 | if (o_drv_lockid == n_drv_lockid) { |
5959 | msleep(QLA83XX_WAIT_LOGIC_MS); |
5960 | goto retry_lockid; |
5961 | } else |
5962 | return QLA_SUCCESS; |
5963 | |
5964 | exit: |
5965 | return rval; |
5966 | } |
5967 | |
5968 | /* |
5969 | * Context: task, can sleep |
5970 | */ |
5971 | void |
5972 | qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id) |
5973 | { |
5974 | uint32_t data; |
5975 | uint32_t lock_owner; |
5976 | struct qla_hw_data *ha = base_vha->hw; |
5977 | |
5978 | might_sleep(); |
5979 | |
5980 | /* IDC-lock implementation using driver-lock/lock-id remote registers */ |
5981 | retry_lock: |
5982 | if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCK, &data) |
5983 | == QLA_SUCCESS) { |
5984 | if (data) { |
5985 | /* Setting lock-id to our function-number */ |
5986 | qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, |
5987 | ha->portnum); |
5988 | } else { |
5989 | qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, |
5990 | &lock_owner); |
5991 | ql_dbg(ql_dbg_p3p, vha: base_vha, 0xb063, |
5992 | fmt: "Failed to acquire IDC lock, acquired by %d, " |
5993 | "retrying...\n" , lock_owner); |
5994 | |
5995 | /* Retry/Perform IDC-Lock recovery */ |
5996 | if (qla83xx_idc_lock_recovery(base_vha) |
5997 | == QLA_SUCCESS) { |
5998 | msleep(QLA83XX_WAIT_LOGIC_MS); |
5999 | goto retry_lock; |
6000 | } else |
6001 | ql_log(ql_log_warn, vha: base_vha, 0xb075, |
6002 | fmt: "IDC Lock recovery FAILED.\n" ); |
6003 | } |
6004 | |
6005 | } |
6006 | |
6007 | return; |
6008 | } |
6009 | |
6010 | static bool |
6011 | qla25xx_rdp_rsp_reduce_size(struct scsi_qla_host *vha, |
6012 | struct purex_entry_24xx *purex) |
6013 | { |
6014 | char fwstr[16]; |
6015 | u32 sid = purex->s_id[2] << 16 | purex->s_id[1] << 8 | purex->s_id[0]; |
6016 | struct port_database_24xx *pdb; |
6017 | |
6018 | /* Domain Controller is always logged-out. */ |
6019 | /* if RDP request is not from Domain Controller: */ |
6020 | if (sid != 0xfffc01) |
6021 | return false; |
6022 | |
6023 | ql_dbg(ql_dbg_init, vha, 0x0181, fmt: "%s: s_id=%#x\n" , __func__, sid); |
6024 | |
6025 | pdb = kzalloc(size: sizeof(*pdb), GFP_KERNEL); |
6026 | if (!pdb) { |
6027 | ql_dbg(ql_dbg_init, vha, 0x0181, |
6028 | fmt: "%s: Failed allocate pdb\n" , __func__); |
6029 | } else if (qla24xx_get_port_database(vha, |
6030 | le16_to_cpu(purex->nport_handle), pdb)) { |
6031 | ql_dbg(ql_dbg_init, vha, 0x0181, |
6032 | fmt: "%s: Failed get pdb sid=%x\n" , __func__, sid); |
6033 | } else if (pdb->current_login_state != PDS_PLOGI_COMPLETE && |
6034 | pdb->current_login_state != PDS_PRLI_COMPLETE) { |
6035 | ql_dbg(ql_dbg_init, vha, 0x0181, |
6036 | fmt: "%s: Port not logged in sid=%#x\n" , __func__, sid); |
6037 | } else { |
6038 | /* RDP request is from logged in port */ |
6039 | kfree(objp: pdb); |
6040 | return false; |
6041 | } |
6042 | kfree(objp: pdb); |
6043 | |
6044 | vha->hw->isp_ops->fw_version_str(vha, fwstr, sizeof(fwstr)); |
6045 | fwstr[strcspn(fwstr, " " )] = 0; |
6046 | /* if FW version allows RDP response length upto 2048 bytes: */ |
6047 | if (strcmp(fwstr, "8.09.00" ) > 0 || strcmp(fwstr, "8.05.65" ) == 0) |
6048 | return false; |
6049 | |
6050 | ql_dbg(ql_dbg_init, vha, 0x0181, fmt: "%s: fw=%s\n" , __func__, fwstr); |
6051 | |
6052 | /* RDP response length is to be reduced to maximum 256 bytes */ |
6053 | return true; |
6054 | } |
6055 | |
6056 | /* |
6057 | * Function Name: qla24xx_process_purex_iocb |
6058 | * |
6059 | * Description: |
6060 | * Prepare a RDP response and send to Fabric switch |
6061 | * |
6062 | * PARAMETERS: |
6063 | * vha: SCSI qla host |
6064 | * purex: RDP request received by HBA |
6065 | */ |
6066 | void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, |
6067 | struct purex_item *item) |
6068 | { |
6069 | struct qla_hw_data *ha = vha->hw; |
6070 | struct purex_entry_24xx *purex = |
6071 | (struct purex_entry_24xx *)&item->iocb; |
6072 | dma_addr_t rsp_els_dma; |
6073 | dma_addr_t rsp_payload_dma; |
6074 | dma_addr_t stat_dma; |
6075 | dma_addr_t sfp_dma; |
6076 | struct els_entry_24xx *rsp_els = NULL; |
6077 | struct rdp_rsp_payload *rsp_payload = NULL; |
6078 | struct link_statistics *stat = NULL; |
6079 | uint8_t *sfp = NULL; |
6080 | uint16_t sfp_flags = 0; |
6081 | uint rsp_payload_length = sizeof(*rsp_payload); |
6082 | int rval; |
6083 | |
6084 | ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0180, |
6085 | fmt: "%s: Enter\n" , __func__); |
6086 | |
6087 | ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0181, |
6088 | fmt: "-------- ELS REQ -------\n" ); |
6089 | ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0182, |
6090 | purex, sizeof(*purex)); |
6091 | |
6092 | if (qla25xx_rdp_rsp_reduce_size(vha, purex)) { |
6093 | rsp_payload_length = |
6094 | offsetof(typeof(*rsp_payload), optical_elmt_desc); |
6095 | ql_dbg(ql_dbg_init, vha, 0x0181, |
6096 | fmt: "Reducing RSP payload length to %u bytes...\n" , |
6097 | rsp_payload_length); |
6098 | } |
6099 | |
6100 | rsp_els = dma_alloc_coherent(dev: &ha->pdev->dev, size: sizeof(*rsp_els), |
6101 | dma_handle: &rsp_els_dma, GFP_KERNEL); |
6102 | if (!rsp_els) { |
6103 | ql_log(ql_log_warn, vha, 0x0183, |
6104 | fmt: "Failed allocate dma buffer ELS RSP.\n" ); |
6105 | goto dealloc; |
6106 | } |
6107 | |
6108 | rsp_payload = dma_alloc_coherent(dev: &ha->pdev->dev, size: sizeof(*rsp_payload), |
6109 | dma_handle: &rsp_payload_dma, GFP_KERNEL); |
6110 | if (!rsp_payload) { |
6111 | ql_log(ql_log_warn, vha, 0x0184, |
6112 | fmt: "Failed allocate dma buffer ELS RSP payload.\n" ); |
6113 | goto dealloc; |
6114 | } |
6115 | |
6116 | sfp = dma_alloc_coherent(dev: &ha->pdev->dev, SFP_RTDI_LEN, |
6117 | dma_handle: &sfp_dma, GFP_KERNEL); |
6118 | |
6119 | stat = dma_alloc_coherent(dev: &ha->pdev->dev, size: sizeof(*stat), |
6120 | dma_handle: &stat_dma, GFP_KERNEL); |
6121 | |
6122 | /* Prepare Response IOCB */ |
6123 | rsp_els->entry_type = ELS_IOCB_TYPE; |
6124 | rsp_els->entry_count = 1; |
6125 | rsp_els->sys_define = 0; |
6126 | rsp_els->entry_status = 0; |
6127 | rsp_els->handle = 0; |
6128 | rsp_els->nport_handle = purex->nport_handle; |
6129 | rsp_els->tx_dsd_count = cpu_to_le16(1); |
6130 | rsp_els->vp_index = purex->vp_idx; |
6131 | rsp_els->sof_type = EST_SOFI3; |
6132 | rsp_els->rx_xchg_address = purex->rx_xchg_addr; |
6133 | rsp_els->rx_dsd_count = 0; |
6134 | rsp_els->opcode = purex->els_frame_payload[0]; |
6135 | |
6136 | rsp_els->d_id[0] = purex->s_id[0]; |
6137 | rsp_els->d_id[1] = purex->s_id[1]; |
6138 | rsp_els->d_id[2] = purex->s_id[2]; |
6139 | |
6140 | rsp_els->control_flags = cpu_to_le16(EPD_ELS_ACC); |
6141 | rsp_els->rx_byte_count = 0; |
6142 | rsp_els->tx_byte_count = cpu_to_le32(rsp_payload_length); |
6143 | |
6144 | put_unaligned_le64(val: rsp_payload_dma, p: &rsp_els->tx_address); |
6145 | rsp_els->tx_len = rsp_els->tx_byte_count; |
6146 | |
6147 | rsp_els->rx_address = 0; |
6148 | rsp_els->rx_len = 0; |
6149 | |
6150 | /* Prepare Response Payload */ |
6151 | rsp_payload->hdr.cmd = cpu_to_be32(0x2 << 24); /* LS_ACC */ |
6152 | rsp_payload->hdr.len = cpu_to_be32(le32_to_cpu(rsp_els->tx_byte_count) - |
6153 | sizeof(rsp_payload->hdr)); |
6154 | |
6155 | /* Link service Request Info Descriptor */ |
6156 | rsp_payload->ls_req_info_desc.desc_tag = cpu_to_be32(0x1); |
6157 | rsp_payload->ls_req_info_desc.desc_len = |
6158 | cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc)); |
6159 | rsp_payload->ls_req_info_desc.req_payload_word_0 = |
6160 | cpu_to_be32p(p: (uint32_t *)purex->els_frame_payload); |
6161 | |
6162 | /* Link service Request Info Descriptor 2 */ |
6163 | rsp_payload->ls_req_info_desc2.desc_tag = cpu_to_be32(0x1); |
6164 | rsp_payload->ls_req_info_desc2.desc_len = |
6165 | cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc2)); |
6166 | rsp_payload->ls_req_info_desc2.req_payload_word_0 = |
6167 | cpu_to_be32p(p: (uint32_t *)purex->els_frame_payload); |
6168 | |
6169 | |
6170 | rsp_payload->sfp_diag_desc.desc_tag = cpu_to_be32(0x10000); |
6171 | rsp_payload->sfp_diag_desc.desc_len = |
6172 | cpu_to_be32(RDP_DESC_LEN(rsp_payload->sfp_diag_desc)); |
6173 | |
6174 | if (sfp) { |
6175 | /* SFP Flags */ |
6176 | memset(sfp, 0, SFP_RTDI_LEN); |
6177 | rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x7, 2, 0); |
6178 | if (!rval) { |
6179 | /* SFP Flags bits 3-0: Port Tx Laser Type */ |
6180 | if (sfp[0] & BIT_2 || sfp[1] & (BIT_6|BIT_5)) |
6181 | sfp_flags |= BIT_0; /* short wave */ |
6182 | else if (sfp[0] & BIT_1) |
6183 | sfp_flags |= BIT_1; /* long wave 1310nm */ |
6184 | else if (sfp[1] & BIT_4) |
6185 | sfp_flags |= BIT_1|BIT_0; /* long wave 1550nm */ |
6186 | } |
6187 | |
6188 | /* SFP Type */ |
6189 | memset(sfp, 0, SFP_RTDI_LEN); |
6190 | rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x0, 1, 0); |
6191 | if (!rval) { |
6192 | sfp_flags |= BIT_4; /* optical */ |
6193 | if (sfp[0] == 0x3) |
6194 | sfp_flags |= BIT_6; /* sfp+ */ |
6195 | } |
6196 | |
6197 | rsp_payload->sfp_diag_desc.sfp_flags = cpu_to_be16(sfp_flags); |
6198 | |
6199 | /* SFP Diagnostics */ |
6200 | memset(sfp, 0, SFP_RTDI_LEN); |
6201 | rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0x60, 10, 0); |
6202 | if (!rval) { |
6203 | __be16 *trx = (__force __be16 *)sfp; /* already be16 */ |
6204 | rsp_payload->sfp_diag_desc.temperature = trx[0]; |
6205 | rsp_payload->sfp_diag_desc.vcc = trx[1]; |
6206 | rsp_payload->sfp_diag_desc.tx_bias = trx[2]; |
6207 | rsp_payload->sfp_diag_desc.tx_power = trx[3]; |
6208 | rsp_payload->sfp_diag_desc.rx_power = trx[4]; |
6209 | } |
6210 | } |
6211 | |
6212 | /* Port Speed Descriptor */ |
6213 | rsp_payload->port_speed_desc.desc_tag = cpu_to_be32(0x10001); |
6214 | rsp_payload->port_speed_desc.desc_len = |
6215 | cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_speed_desc)); |
6216 | rsp_payload->port_speed_desc.speed_capab = cpu_to_be16( |
6217 | qla25xx_fdmi_port_speed_capability(ha)); |
6218 | rsp_payload->port_speed_desc.operating_speed = cpu_to_be16( |
6219 | qla25xx_fdmi_port_speed_currently(ha)); |
6220 | |
6221 | /* Link Error Status Descriptor */ |
6222 | rsp_payload->ls_err_desc.desc_tag = cpu_to_be32(0x10002); |
6223 | rsp_payload->ls_err_desc.desc_len = |
6224 | cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_err_desc)); |
6225 | |
6226 | if (stat) { |
6227 | rval = qla24xx_get_isp_stats(vha, stat, stat_dma, 0); |
6228 | if (!rval) { |
6229 | rsp_payload->ls_err_desc.link_fail_cnt = |
6230 | cpu_to_be32(le32_to_cpu(stat->link_fail_cnt)); |
6231 | rsp_payload->ls_err_desc.loss_sync_cnt = |
6232 | cpu_to_be32(le32_to_cpu(stat->loss_sync_cnt)); |
6233 | rsp_payload->ls_err_desc.loss_sig_cnt = |
6234 | cpu_to_be32(le32_to_cpu(stat->loss_sig_cnt)); |
6235 | rsp_payload->ls_err_desc.prim_seq_err_cnt = |
6236 | cpu_to_be32(le32_to_cpu(stat->prim_seq_err_cnt)); |
6237 | rsp_payload->ls_err_desc.inval_xmit_word_cnt = |
6238 | cpu_to_be32(le32_to_cpu(stat->inval_xmit_word_cnt)); |
6239 | rsp_payload->ls_err_desc.inval_crc_cnt = |
6240 | cpu_to_be32(le32_to_cpu(stat->inval_crc_cnt)); |
6241 | rsp_payload->ls_err_desc.pn_port_phy_type |= BIT_6; |
6242 | } |
6243 | } |
6244 | |
6245 | /* Portname Descriptor */ |
6246 | rsp_payload->port_name_diag_desc.desc_tag = cpu_to_be32(0x10003); |
6247 | rsp_payload->port_name_diag_desc.desc_len = |
6248 | cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_diag_desc)); |
6249 | memcpy(rsp_payload->port_name_diag_desc.WWNN, |
6250 | vha->node_name, |
6251 | sizeof(rsp_payload->port_name_diag_desc.WWNN)); |
6252 | memcpy(rsp_payload->port_name_diag_desc.WWPN, |
6253 | vha->port_name, |
6254 | sizeof(rsp_payload->port_name_diag_desc.WWPN)); |
6255 | |
6256 | /* F-Port Portname Descriptor */ |
6257 | rsp_payload->port_name_direct_desc.desc_tag = cpu_to_be32(0x10003); |
6258 | rsp_payload->port_name_direct_desc.desc_len = |
6259 | cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_direct_desc)); |
6260 | memcpy(rsp_payload->port_name_direct_desc.WWNN, |
6261 | vha->fabric_node_name, |
6262 | sizeof(rsp_payload->port_name_direct_desc.WWNN)); |
6263 | memcpy(rsp_payload->port_name_direct_desc.WWPN, |
6264 | vha->fabric_port_name, |
6265 | sizeof(rsp_payload->port_name_direct_desc.WWPN)); |
6266 | |
6267 | /* Bufer Credit Descriptor */ |
6268 | rsp_payload->buffer_credit_desc.desc_tag = cpu_to_be32(0x10006); |
6269 | rsp_payload->buffer_credit_desc.desc_len = |
6270 | cpu_to_be32(RDP_DESC_LEN(rsp_payload->buffer_credit_desc)); |
6271 | rsp_payload->buffer_credit_desc.fcport_b2b = 0; |
6272 | rsp_payload->buffer_credit_desc.attached_fcport_b2b = cpu_to_be32(0); |
6273 | rsp_payload->buffer_credit_desc.fcport_rtt = cpu_to_be32(0); |
6274 | |
6275 | if (ha->flags.plogi_template_valid) { |
6276 | uint32_t tmp = |
6277 | be16_to_cpu(ha->plogi_els_payld.fl_csp.sp_bb_cred); |
6278 | rsp_payload->buffer_credit_desc.fcport_b2b = cpu_to_be32(tmp); |
6279 | } |
6280 | |
6281 | if (rsp_payload_length < sizeof(*rsp_payload)) |
6282 | goto send; |
6283 | |
6284 | /* Optical Element Descriptor, Temperature */ |
6285 | rsp_payload->optical_elmt_desc[0].desc_tag = cpu_to_be32(0x10007); |
6286 | rsp_payload->optical_elmt_desc[0].desc_len = |
6287 | cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); |
6288 | /* Optical Element Descriptor, Voltage */ |
6289 | rsp_payload->optical_elmt_desc[1].desc_tag = cpu_to_be32(0x10007); |
6290 | rsp_payload->optical_elmt_desc[1].desc_len = |
6291 | cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); |
6292 | /* Optical Element Descriptor, Tx Bias Current */ |
6293 | rsp_payload->optical_elmt_desc[2].desc_tag = cpu_to_be32(0x10007); |
6294 | rsp_payload->optical_elmt_desc[2].desc_len = |
6295 | cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); |
6296 | /* Optical Element Descriptor, Tx Power */ |
6297 | rsp_payload->optical_elmt_desc[3].desc_tag = cpu_to_be32(0x10007); |
6298 | rsp_payload->optical_elmt_desc[3].desc_len = |
6299 | cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); |
6300 | /* Optical Element Descriptor, Rx Power */ |
6301 | rsp_payload->optical_elmt_desc[4].desc_tag = cpu_to_be32(0x10007); |
6302 | rsp_payload->optical_elmt_desc[4].desc_len = |
6303 | cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); |
6304 | |
6305 | if (sfp) { |
6306 | memset(sfp, 0, SFP_RTDI_LEN); |
6307 | rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0, 64, 0); |
6308 | if (!rval) { |
6309 | __be16 *trx = (__force __be16 *)sfp; /* already be16 */ |
6310 | |
6311 | /* Optical Element Descriptor, Temperature */ |
6312 | rsp_payload->optical_elmt_desc[0].high_alarm = trx[0]; |
6313 | rsp_payload->optical_elmt_desc[0].low_alarm = trx[1]; |
6314 | rsp_payload->optical_elmt_desc[0].high_warn = trx[2]; |
6315 | rsp_payload->optical_elmt_desc[0].low_warn = trx[3]; |
6316 | rsp_payload->optical_elmt_desc[0].element_flags = |
6317 | cpu_to_be32(1 << 28); |
6318 | |
6319 | /* Optical Element Descriptor, Voltage */ |
6320 | rsp_payload->optical_elmt_desc[1].high_alarm = trx[4]; |
6321 | rsp_payload->optical_elmt_desc[1].low_alarm = trx[5]; |
6322 | rsp_payload->optical_elmt_desc[1].high_warn = trx[6]; |
6323 | rsp_payload->optical_elmt_desc[1].low_warn = trx[7]; |
6324 | rsp_payload->optical_elmt_desc[1].element_flags = |
6325 | cpu_to_be32(2 << 28); |
6326 | |
6327 | /* Optical Element Descriptor, Tx Bias Current */ |
6328 | rsp_payload->optical_elmt_desc[2].high_alarm = trx[8]; |
6329 | rsp_payload->optical_elmt_desc[2].low_alarm = trx[9]; |
6330 | rsp_payload->optical_elmt_desc[2].high_warn = trx[10]; |
6331 | rsp_payload->optical_elmt_desc[2].low_warn = trx[11]; |
6332 | rsp_payload->optical_elmt_desc[2].element_flags = |
6333 | cpu_to_be32(3 << 28); |
6334 | |
6335 | /* Optical Element Descriptor, Tx Power */ |
6336 | rsp_payload->optical_elmt_desc[3].high_alarm = trx[12]; |
6337 | rsp_payload->optical_elmt_desc[3].low_alarm = trx[13]; |
6338 | rsp_payload->optical_elmt_desc[3].high_warn = trx[14]; |
6339 | rsp_payload->optical_elmt_desc[3].low_warn = trx[15]; |
6340 | rsp_payload->optical_elmt_desc[3].element_flags = |
6341 | cpu_to_be32(4 << 28); |
6342 | |
6343 | /* Optical Element Descriptor, Rx Power */ |
6344 | rsp_payload->optical_elmt_desc[4].high_alarm = trx[16]; |
6345 | rsp_payload->optical_elmt_desc[4].low_alarm = trx[17]; |
6346 | rsp_payload->optical_elmt_desc[4].high_warn = trx[18]; |
6347 | rsp_payload->optical_elmt_desc[4].low_warn = trx[19]; |
6348 | rsp_payload->optical_elmt_desc[4].element_flags = |
6349 | cpu_to_be32(5 << 28); |
6350 | } |
6351 | |
6352 | memset(sfp, 0, SFP_RTDI_LEN); |
6353 | rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 112, 64, 0); |
6354 | if (!rval) { |
6355 | /* Temperature high/low alarm/warning */ |
6356 | rsp_payload->optical_elmt_desc[0].element_flags |= |
6357 | cpu_to_be32( |
6358 | (sfp[0] >> 7 & 1) << 3 | |
6359 | (sfp[0] >> 6 & 1) << 2 | |
6360 | (sfp[4] >> 7 & 1) << 1 | |
6361 | (sfp[4] >> 6 & 1) << 0); |
6362 | |
6363 | /* Voltage high/low alarm/warning */ |
6364 | rsp_payload->optical_elmt_desc[1].element_flags |= |
6365 | cpu_to_be32( |
6366 | (sfp[0] >> 5 & 1) << 3 | |
6367 | (sfp[0] >> 4 & 1) << 2 | |
6368 | (sfp[4] >> 5 & 1) << 1 | |
6369 | (sfp[4] >> 4 & 1) << 0); |
6370 | |
6371 | /* Tx Bias Current high/low alarm/warning */ |
6372 | rsp_payload->optical_elmt_desc[2].element_flags |= |
6373 | cpu_to_be32( |
6374 | (sfp[0] >> 3 & 1) << 3 | |
6375 | (sfp[0] >> 2 & 1) << 2 | |
6376 | (sfp[4] >> 3 & 1) << 1 | |
6377 | (sfp[4] >> 2 & 1) << 0); |
6378 | |
6379 | /* Tx Power high/low alarm/warning */ |
6380 | rsp_payload->optical_elmt_desc[3].element_flags |= |
6381 | cpu_to_be32( |
6382 | (sfp[0] >> 1 & 1) << 3 | |
6383 | (sfp[0] >> 0 & 1) << 2 | |
6384 | (sfp[4] >> 1 & 1) << 1 | |
6385 | (sfp[4] >> 0 & 1) << 0); |
6386 | |
6387 | /* Rx Power high/low alarm/warning */ |
6388 | rsp_payload->optical_elmt_desc[4].element_flags |= |
6389 | cpu_to_be32( |
6390 | (sfp[1] >> 7 & 1) << 3 | |
6391 | (sfp[1] >> 6 & 1) << 2 | |
6392 | (sfp[5] >> 7 & 1) << 1 | |
6393 | (sfp[5] >> 6 & 1) << 0); |
6394 | } |
6395 | } |
6396 | |
6397 | /* Optical Product Data Descriptor */ |
6398 | rsp_payload->optical_prod_desc.desc_tag = cpu_to_be32(0x10008); |
6399 | rsp_payload->optical_prod_desc.desc_len = |
6400 | cpu_to_be32(RDP_DESC_LEN(rsp_payload->optical_prod_desc)); |
6401 | |
6402 | if (sfp) { |
6403 | memset(sfp, 0, SFP_RTDI_LEN); |
6404 | rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 20, 64, 0); |
6405 | if (!rval) { |
6406 | memcpy(rsp_payload->optical_prod_desc.vendor_name, |
6407 | sfp + 0, |
6408 | sizeof(rsp_payload->optical_prod_desc.vendor_name)); |
6409 | memcpy(rsp_payload->optical_prod_desc.part_number, |
6410 | sfp + 20, |
6411 | sizeof(rsp_payload->optical_prod_desc.part_number)); |
6412 | memcpy(rsp_payload->optical_prod_desc.revision, |
6413 | sfp + 36, |
6414 | sizeof(rsp_payload->optical_prod_desc.revision)); |
6415 | memcpy(rsp_payload->optical_prod_desc.serial_number, |
6416 | sfp + 48, |
6417 | sizeof(rsp_payload->optical_prod_desc.serial_number)); |
6418 | } |
6419 | |
6420 | memset(sfp, 0, SFP_RTDI_LEN); |
6421 | rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 84, 8, 0); |
6422 | if (!rval) { |
6423 | memcpy(rsp_payload->optical_prod_desc.date, |
6424 | sfp + 0, |
6425 | sizeof(rsp_payload->optical_prod_desc.date)); |
6426 | } |
6427 | } |
6428 | |
6429 | send: |
6430 | ql_dbg(ql_dbg_init, vha, 0x0183, |
6431 | fmt: "Sending ELS Response to RDP Request...\n" ); |
6432 | ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0184, |
6433 | fmt: "-------- ELS RSP -------\n" ); |
6434 | ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0185, |
6435 | rsp_els, sizeof(*rsp_els)); |
6436 | ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0186, |
6437 | fmt: "-------- ELS RSP PAYLOAD -------\n" ); |
6438 | ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0187, |
6439 | rsp_payload, rsp_payload_length); |
6440 | |
6441 | rval = qla2x00_issue_iocb(vha, rsp_els, rsp_els_dma, 0); |
6442 | |
6443 | if (rval) { |
6444 | ql_log(ql_log_warn, vha, 0x0188, |
6445 | fmt: "%s: iocb failed to execute -> %x\n" , __func__, rval); |
6446 | } else if (rsp_els->comp_status) { |
6447 | ql_log(ql_log_warn, vha, 0x0189, |
6448 | fmt: "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n" , |
6449 | __func__, rsp_els->comp_status, |
6450 | rsp_els->error_subcode_1, rsp_els->error_subcode_2); |
6451 | } else { |
6452 | ql_dbg(ql_dbg_init, vha, 0x018a, fmt: "%s: done.\n" , __func__); |
6453 | } |
6454 | |
6455 | dealloc: |
6456 | if (stat) |
6457 | dma_free_coherent(dev: &ha->pdev->dev, size: sizeof(*stat), |
6458 | cpu_addr: stat, dma_handle: stat_dma); |
6459 | if (sfp) |
6460 | dma_free_coherent(dev: &ha->pdev->dev, SFP_RTDI_LEN, |
6461 | cpu_addr: sfp, dma_handle: sfp_dma); |
6462 | if (rsp_payload) |
6463 | dma_free_coherent(dev: &ha->pdev->dev, size: sizeof(*rsp_payload), |
6464 | cpu_addr: rsp_payload, dma_handle: rsp_payload_dma); |
6465 | if (rsp_els) |
6466 | dma_free_coherent(dev: &ha->pdev->dev, size: sizeof(*rsp_els), |
6467 | cpu_addr: rsp_els, dma_handle: rsp_els_dma); |
6468 | } |
6469 | |
6470 | void |
6471 | qla24xx_free_purex_item(struct purex_item *item) |
6472 | { |
6473 | if (item == &item->vha->default_item) |
6474 | memset(&item->vha->default_item, 0, sizeof(struct purex_item)); |
6475 | else |
6476 | kfree(objp: item); |
6477 | } |
6478 | |
6479 | void qla24xx_process_purex_list(struct purex_list *list) |
6480 | { |
6481 | struct list_head head = LIST_HEAD_INIT(head); |
6482 | struct purex_item *item, *next; |
6483 | ulong flags; |
6484 | |
6485 | spin_lock_irqsave(&list->lock, flags); |
6486 | list_splice_init(list: &list->head, head: &head); |
6487 | spin_unlock_irqrestore(lock: &list->lock, flags); |
6488 | |
6489 | list_for_each_entry_safe(item, next, &head, list) { |
6490 | list_del(entry: &item->list); |
6491 | item->process_item(item->vha, item); |
6492 | qla24xx_free_purex_item(item); |
6493 | } |
6494 | } |
6495 | |
6496 | /* |
6497 | * Context: task, can sleep |
6498 | */ |
6499 | void |
6500 | qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id) |
6501 | { |
6502 | #if 0 |
6503 | uint16_t options = (requester_id << 15) | BIT_7; |
6504 | #endif |
6505 | uint16_t retry; |
6506 | uint32_t data; |
6507 | struct qla_hw_data *ha = base_vha->hw; |
6508 | |
6509 | might_sleep(); |
6510 | |
6511 | /* IDC-unlock implementation using driver-unlock/lock-id |
6512 | * remote registers |
6513 | */ |
6514 | retry = 0; |
6515 | retry_unlock: |
6516 | if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &data) |
6517 | == QLA_SUCCESS) { |
6518 | if (data == ha->portnum) { |
6519 | qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, &data); |
6520 | /* Clearing lock-id by setting 0xff */ |
6521 | qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 0xff); |
6522 | } else if (retry < 10) { |
6523 | /* SV: XXX: IDC unlock retrying needed here? */ |
6524 | |
6525 | /* Retry for IDC-unlock */ |
6526 | msleep(QLA83XX_WAIT_LOGIC_MS); |
6527 | retry++; |
6528 | ql_dbg(ql_dbg_p3p, vha: base_vha, 0xb064, |
6529 | fmt: "Failed to release IDC lock, retrying=%d\n" , retry); |
6530 | goto retry_unlock; |
6531 | } |
6532 | } else if (retry < 10) { |
6533 | /* Retry for IDC-unlock */ |
6534 | msleep(QLA83XX_WAIT_LOGIC_MS); |
6535 | retry++; |
6536 | ql_dbg(ql_dbg_p3p, vha: base_vha, 0xb065, |
6537 | fmt: "Failed to read drv-lockid, retrying=%d\n" , retry); |
6538 | goto retry_unlock; |
6539 | } |
6540 | |
6541 | return; |
6542 | |
6543 | #if 0 |
6544 | /* XXX: IDC-unlock implementation using access-control mbx */ |
6545 | retry = 0; |
6546 | retry_unlock2: |
6547 | if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) { |
6548 | if (retry < 10) { |
6549 | /* Retry for IDC-unlock */ |
6550 | msleep(QLA83XX_WAIT_LOGIC_MS); |
6551 | retry++; |
6552 | ql_dbg(ql_dbg_p3p, base_vha, 0xb066, |
6553 | "Failed to release IDC lock, retrying=%d\n" , retry); |
6554 | goto retry_unlock2; |
6555 | } |
6556 | } |
6557 | |
6558 | return; |
6559 | #endif |
6560 | } |
6561 | |
6562 | int |
6563 | __qla83xx_set_drv_presence(scsi_qla_host_t *vha) |
6564 | { |
6565 | int rval = QLA_SUCCESS; |
6566 | struct qla_hw_data *ha = vha->hw; |
6567 | uint32_t drv_presence; |
6568 | |
6569 | rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); |
6570 | if (rval == QLA_SUCCESS) { |
6571 | drv_presence |= (1 << ha->portnum); |
6572 | rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, |
6573 | drv_presence); |
6574 | } |
6575 | |
6576 | return rval; |
6577 | } |
6578 | |
6579 | int |
6580 | qla83xx_set_drv_presence(scsi_qla_host_t *vha) |
6581 | { |
6582 | int rval = QLA_SUCCESS; |
6583 | |
6584 | qla83xx_idc_lock(base_vha: vha, requester_id: 0); |
6585 | rval = __qla83xx_set_drv_presence(vha); |
6586 | qla83xx_idc_unlock(base_vha: vha, requester_id: 0); |
6587 | |
6588 | return rval; |
6589 | } |
6590 | |
6591 | int |
6592 | __qla83xx_clear_drv_presence(scsi_qla_host_t *vha) |
6593 | { |
6594 | int rval = QLA_SUCCESS; |
6595 | struct qla_hw_data *ha = vha->hw; |
6596 | uint32_t drv_presence; |
6597 | |
6598 | rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); |
6599 | if (rval == QLA_SUCCESS) { |
6600 | drv_presence &= ~(1 << ha->portnum); |
6601 | rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, |
6602 | drv_presence); |
6603 | } |
6604 | |
6605 | return rval; |
6606 | } |
6607 | |
6608 | int |
6609 | qla83xx_clear_drv_presence(scsi_qla_host_t *vha) |
6610 | { |
6611 | int rval = QLA_SUCCESS; |
6612 | |
6613 | qla83xx_idc_lock(base_vha: vha, requester_id: 0); |
6614 | rval = __qla83xx_clear_drv_presence(vha); |
6615 | qla83xx_idc_unlock(base_vha: vha, requester_id: 0); |
6616 | |
6617 | return rval; |
6618 | } |
6619 | |
6620 | static void |
6621 | qla83xx_need_reset_handler(scsi_qla_host_t *vha) |
6622 | { |
6623 | struct qla_hw_data *ha = vha->hw; |
6624 | uint32_t drv_ack, drv_presence; |
6625 | unsigned long ack_timeout; |
6626 | |
6627 | /* Wait for IDC ACK from all functions (DRV-ACK == DRV-PRESENCE) */ |
6628 | ack_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); |
6629 | while (1) { |
6630 | qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); |
6631 | qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); |
6632 | if ((drv_ack & drv_presence) == drv_presence) |
6633 | break; |
6634 | |
6635 | if (time_after_eq(jiffies, ack_timeout)) { |
6636 | ql_log(ql_log_warn, vha, 0xb067, |
6637 | fmt: "RESET ACK TIMEOUT! drv_presence=0x%x " |
6638 | "drv_ack=0x%x\n" , drv_presence, drv_ack); |
6639 | /* |
6640 | * The function(s) which did not ack in time are forced |
6641 | * to withdraw any further participation in the IDC |
6642 | * reset. |
6643 | */ |
6644 | if (drv_ack != drv_presence) |
6645 | qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, |
6646 | drv_ack); |
6647 | break; |
6648 | } |
6649 | |
6650 | qla83xx_idc_unlock(base_vha: vha, requester_id: 0); |
6651 | msleep(msecs: 1000); |
6652 | qla83xx_idc_lock(base_vha: vha, requester_id: 0); |
6653 | } |
6654 | |
6655 | qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_COLD); |
6656 | ql_log(ql_log_info, vha, 0xb068, fmt: "HW State: COLD/RE-INIT.\n" ); |
6657 | } |
6658 | |
6659 | static int |
6660 | qla83xx_device_bootstrap(scsi_qla_host_t *vha) |
6661 | { |
6662 | int rval = QLA_SUCCESS; |
6663 | uint32_t idc_control; |
6664 | |
6665 | qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_INITIALIZING); |
6666 | ql_log(ql_log_info, vha, 0xb069, fmt: "HW State: INITIALIZING.\n" ); |
6667 | |
6668 | /* Clearing IDC-Control Graceful-Reset Bit before resetting f/w */ |
6669 | __qla83xx_get_idc_control(vha, &idc_control); |
6670 | idc_control &= ~QLA83XX_IDC_GRACEFUL_RESET; |
6671 | __qla83xx_set_idc_control(vha, 0); |
6672 | |
6673 | qla83xx_idc_unlock(base_vha: vha, requester_id: 0); |
6674 | rval = qla83xx_restart_nic_firmware(vha); |
6675 | qla83xx_idc_lock(base_vha: vha, requester_id: 0); |
6676 | |
6677 | if (rval != QLA_SUCCESS) { |
6678 | ql_log(ql_log_fatal, vha, 0xb06a, |
6679 | fmt: "Failed to restart NIC f/w.\n" ); |
6680 | qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_FAILED); |
6681 | ql_log(ql_log_info, vha, 0xb06b, fmt: "HW State: FAILED.\n" ); |
6682 | } else { |
6683 | ql_dbg(ql_dbg_p3p, vha, 0xb06c, |
6684 | fmt: "Success in restarting nic f/w.\n" ); |
6685 | qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_READY); |
6686 | ql_log(ql_log_info, vha, 0xb06d, fmt: "HW State: READY.\n" ); |
6687 | } |
6688 | |
6689 | return rval; |
6690 | } |
6691 | |
6692 | /* Assumes idc_lock always held on entry */ |
6693 | int |
6694 | qla83xx_idc_state_handler(scsi_qla_host_t *base_vha) |
6695 | { |
6696 | struct qla_hw_data *ha = base_vha->hw; |
6697 | int rval = QLA_SUCCESS; |
6698 | unsigned long dev_init_timeout; |
6699 | uint32_t dev_state; |
6700 | |
6701 | /* Wait for MAX-INIT-TIMEOUT for the device to go ready */ |
6702 | dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); |
6703 | |
6704 | while (1) { |
6705 | |
6706 | if (time_after_eq(jiffies, dev_init_timeout)) { |
6707 | ql_log(ql_log_warn, vha: base_vha, 0xb06e, |
6708 | fmt: "Initialization TIMEOUT!\n" ); |
6709 | /* Init timeout. Disable further NIC Core |
6710 | * communication. |
6711 | */ |
6712 | qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE, |
6713 | QLA8XXX_DEV_FAILED); |
6714 | ql_log(ql_log_info, vha: base_vha, 0xb06f, |
6715 | fmt: "HW State: FAILED.\n" ); |
6716 | } |
6717 | |
6718 | qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); |
6719 | switch (dev_state) { |
6720 | case QLA8XXX_DEV_READY: |
6721 | if (ha->flags.nic_core_reset_owner) |
6722 | qla83xx_idc_audit(base_vha, |
6723 | IDC_AUDIT_COMPLETION); |
6724 | ha->flags.nic_core_reset_owner = 0; |
6725 | ql_dbg(ql_dbg_p3p, vha: base_vha, 0xb070, |
6726 | fmt: "Reset_owner reset by 0x%x.\n" , |
6727 | ha->portnum); |
6728 | goto exit; |
6729 | case QLA8XXX_DEV_COLD: |
6730 | if (ha->flags.nic_core_reset_owner) |
6731 | rval = qla83xx_device_bootstrap(vha: base_vha); |
6732 | else { |
6733 | /* Wait for AEN to change device-state */ |
6734 | qla83xx_idc_unlock(base_vha, requester_id: 0); |
6735 | msleep(msecs: 1000); |
6736 | qla83xx_idc_lock(base_vha, requester_id: 0); |
6737 | } |
6738 | break; |
6739 | case QLA8XXX_DEV_INITIALIZING: |
6740 | /* Wait for AEN to change device-state */ |
6741 | qla83xx_idc_unlock(base_vha, requester_id: 0); |
6742 | msleep(msecs: 1000); |
6743 | qla83xx_idc_lock(base_vha, requester_id: 0); |
6744 | break; |
6745 | case QLA8XXX_DEV_NEED_RESET: |
6746 | if (!ql2xdontresethba && ha->flags.nic_core_reset_owner) |
6747 | qla83xx_need_reset_handler(vha: base_vha); |
6748 | else { |
6749 | /* Wait for AEN to change device-state */ |
6750 | qla83xx_idc_unlock(base_vha, requester_id: 0); |
6751 | msleep(msecs: 1000); |
6752 | qla83xx_idc_lock(base_vha, requester_id: 0); |
6753 | } |
6754 | /* reset timeout value after need reset handler */ |
6755 | dev_init_timeout = jiffies + |
6756 | (ha->fcoe_dev_init_timeout * HZ); |
6757 | break; |
6758 | case QLA8XXX_DEV_NEED_QUIESCENT: |
6759 | /* XXX: DEBUG for now */ |
6760 | qla83xx_idc_unlock(base_vha, requester_id: 0); |
6761 | msleep(msecs: 1000); |
6762 | qla83xx_idc_lock(base_vha, requester_id: 0); |
6763 | break; |
6764 | case QLA8XXX_DEV_QUIESCENT: |
6765 | /* XXX: DEBUG for now */ |
6766 | if (ha->flags.quiesce_owner) |
6767 | goto exit; |
6768 | |
6769 | qla83xx_idc_unlock(base_vha, requester_id: 0); |
6770 | msleep(msecs: 1000); |
6771 | qla83xx_idc_lock(base_vha, requester_id: 0); |
6772 | dev_init_timeout = jiffies + |
6773 | (ha->fcoe_dev_init_timeout * HZ); |
6774 | break; |
6775 | case QLA8XXX_DEV_FAILED: |
6776 | if (ha->flags.nic_core_reset_owner) |
6777 | qla83xx_idc_audit(base_vha, |
6778 | IDC_AUDIT_COMPLETION); |
6779 | ha->flags.nic_core_reset_owner = 0; |
6780 | __qla83xx_clear_drv_presence(vha: base_vha); |
6781 | qla83xx_idc_unlock(base_vha, requester_id: 0); |
6782 | qla8xxx_dev_failed_handler(base_vha); |
6783 | rval = QLA_FUNCTION_FAILED; |
6784 | qla83xx_idc_lock(base_vha, requester_id: 0); |
6785 | goto exit; |
6786 | case QLA8XXX_BAD_VALUE: |
6787 | qla83xx_idc_unlock(base_vha, requester_id: 0); |
6788 | msleep(msecs: 1000); |
6789 | qla83xx_idc_lock(base_vha, requester_id: 0); |
6790 | break; |
6791 | default: |
6792 | ql_log(ql_log_warn, vha: base_vha, 0xb071, |
6793 | fmt: "Unknown Device State: %x.\n" , dev_state); |
6794 | qla83xx_idc_unlock(base_vha, requester_id: 0); |
6795 | qla8xxx_dev_failed_handler(base_vha); |
6796 | rval = QLA_FUNCTION_FAILED; |
6797 | qla83xx_idc_lock(base_vha, requester_id: 0); |
6798 | goto exit; |
6799 | } |
6800 | } |
6801 | |
6802 | exit: |
6803 | return rval; |
6804 | } |
6805 | |
6806 | void |
6807 | qla2x00_disable_board_on_pci_error(struct work_struct *work) |
6808 | { |
6809 | struct qla_hw_data *ha = container_of(work, struct qla_hw_data, |
6810 | board_disable); |
6811 | struct pci_dev *pdev = ha->pdev; |
6812 | scsi_qla_host_t *base_vha = pci_get_drvdata(pdev: ha->pdev); |
6813 | |
6814 | ql_log(ql_log_warn, vha: base_vha, 0x015b, |
6815 | fmt: "Disabling adapter.\n" ); |
6816 | |
6817 | if (!atomic_read(v: &pdev->enable_cnt)) { |
6818 | ql_log(ql_log_info, vha: base_vha, 0xfffc, |
6819 | fmt: "PCI device disabled, no action req for PCI error=%lx\n" , |
6820 | base_vha->pci_flags); |
6821 | return; |
6822 | } |
6823 | |
6824 | /* |
6825 | * if UNLOADING flag is already set, then continue unload, |
6826 | * where it was set first. |
6827 | */ |
6828 | if (test_and_set_bit(UNLOADING, addr: &base_vha->dpc_flags)) |
6829 | return; |
6830 | |
6831 | qla2x00_wait_for_sess_deletion(vha: base_vha); |
6832 | |
6833 | qla2x00_delete_all_vps(ha, base_vha); |
6834 | |
6835 | qla2x00_abort_all_cmds(vha: base_vha, res: DID_NO_CONNECT << 16); |
6836 | |
6837 | qla2x00_dfs_remove(base_vha); |
6838 | |
6839 | qla84xx_put_chip(base_vha); |
6840 | |
6841 | if (base_vha->timer_active) |
6842 | qla2x00_stop_timer(vha: base_vha); |
6843 | |
6844 | base_vha->flags.online = 0; |
6845 | |
6846 | qla2x00_destroy_deferred_work(ha); |
6847 | |
6848 | /* |
6849 | * Do not try to stop beacon blink as it will issue a mailbox |
6850 | * command. |
6851 | */ |
6852 | qla2x00_free_sysfs_attr(base_vha, false); |
6853 | |
6854 | fc_remove_host(base_vha->host); |
6855 | |
6856 | scsi_remove_host(base_vha->host); |
6857 | |
6858 | base_vha->flags.init_done = 0; |
6859 | qla25xx_delete_queues(base_vha); |
6860 | qla2x00_free_fcports(vha: base_vha); |
6861 | qla2x00_free_irqs(base_vha); |
6862 | qla2x00_mem_free(ha); |
6863 | qla82xx_md_free(base_vha); |
6864 | qla2x00_free_queues(ha); |
6865 | |
6866 | qla2x00_unmap_iobases(ha); |
6867 | |
6868 | pci_release_selected_regions(ha->pdev, ha->bars); |
6869 | pci_disable_device(dev: pdev); |
6870 | |
6871 | /* |
6872 | * Let qla2x00_remove_one cleanup qla_hw_data on device removal. |
6873 | */ |
6874 | } |
6875 | |
6876 | /************************************************************************** |
6877 | * qla2x00_do_dpc |
6878 | * This kernel thread is a task that is schedule by the interrupt handler |
6879 | * to perform the background processing for interrupts. |
6880 | * |
6881 | * Notes: |
6882 | * This task always run in the context of a kernel thread. It |
6883 | * is kick-off by the driver's detect code and starts up |
6884 | * up one per adapter. It immediately goes to sleep and waits for |
6885 | * some fibre event. When either the interrupt handler or |
6886 | * the timer routine detects a event it will one of the task |
6887 | * bits then wake us up. |
6888 | **************************************************************************/ |
6889 | static int |
6890 | qla2x00_do_dpc(void *data) |
6891 | { |
6892 | scsi_qla_host_t *base_vha; |
6893 | struct qla_hw_data *ha; |
6894 | uint32_t online; |
6895 | struct qla_qpair *qpair; |
6896 | |
6897 | ha = (struct qla_hw_data *)data; |
6898 | base_vha = pci_get_drvdata(pdev: ha->pdev); |
6899 | |
6900 | set_user_nice(current, MIN_NICE); |
6901 | |
6902 | set_current_state(TASK_INTERRUPTIBLE); |
6903 | while (!kthread_should_stop()) { |
6904 | ql_dbg(ql_dbg_dpc, vha: base_vha, 0x4000, |
6905 | fmt: "DPC handler sleeping.\n" ); |
6906 | |
6907 | schedule(); |
6908 | |
6909 | if (test_and_clear_bit(DO_EEH_RECOVERY, addr: &base_vha->dpc_flags)) |
6910 | qla_pci_set_eeh_busy(base_vha); |
6911 | |
6912 | if (!base_vha->flags.init_done || ha->flags.mbox_busy) |
6913 | goto end_loop; |
6914 | |
6915 | if (ha->flags.eeh_busy) { |
6916 | ql_dbg(ql_dbg_dpc, vha: base_vha, 0x4003, |
6917 | fmt: "eeh_busy=%d.\n" , ha->flags.eeh_busy); |
6918 | goto end_loop; |
6919 | } |
6920 | |
6921 | ha->dpc_active = 1; |
6922 | |
6923 | ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha: base_vha, 0x4001, |
6924 | fmt: "DPC handler waking up, dpc_flags=0x%lx.\n" , |
6925 | base_vha->dpc_flags); |
6926 | |
6927 | if (test_bit(UNLOADING, &base_vha->dpc_flags)) |
6928 | break; |
6929 | |
6930 | if (IS_P3P_TYPE(ha)) { |
6931 | if (IS_QLA8044(ha)) { |
6932 | if (test_and_clear_bit(ISP_UNRECOVERABLE, |
6933 | addr: &base_vha->dpc_flags)) { |
6934 | qla8044_idc_lock(ha); |
6935 | qla8044_wr_direct(vha: base_vha, |
6936 | crb_reg: QLA8044_CRB_DEV_STATE_INDEX, |
6937 | value: QLA8XXX_DEV_FAILED); |
6938 | qla8044_idc_unlock(ha); |
6939 | ql_log(ql_log_info, vha: base_vha, 0x4004, |
6940 | fmt: "HW State: FAILED.\n" ); |
6941 | qla8044_device_state_handler(vha: base_vha); |
6942 | continue; |
6943 | } |
6944 | |
6945 | } else { |
6946 | if (test_and_clear_bit(ISP_UNRECOVERABLE, |
6947 | addr: &base_vha->dpc_flags)) { |
6948 | qla82xx_idc_lock(ha); |
6949 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, |
6950 | QLA8XXX_DEV_FAILED); |
6951 | qla82xx_idc_unlock(ha); |
6952 | ql_log(ql_log_info, vha: base_vha, 0x0151, |
6953 | fmt: "HW State: FAILED.\n" ); |
6954 | qla82xx_device_state_handler(base_vha); |
6955 | continue; |
6956 | } |
6957 | } |
6958 | |
6959 | if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED, |
6960 | addr: &base_vha->dpc_flags)) { |
6961 | |
6962 | ql_dbg(ql_dbg_dpc, vha: base_vha, 0x4005, |
6963 | fmt: "FCoE context reset scheduled.\n" ); |
6964 | if (!(test_and_set_bit(ABORT_ISP_ACTIVE, |
6965 | addr: &base_vha->dpc_flags))) { |
6966 | if (qla82xx_fcoe_ctx_reset(base_vha)) { |
6967 | /* FCoE-ctx reset failed. |
6968 | * Escalate to chip-reset |
6969 | */ |
6970 | set_bit(ISP_ABORT_NEEDED, |
6971 | addr: &base_vha->dpc_flags); |
6972 | } |
6973 | clear_bit(ABORT_ISP_ACTIVE, |
6974 | addr: &base_vha->dpc_flags); |
6975 | } |
6976 | |
6977 | ql_dbg(ql_dbg_dpc, vha: base_vha, 0x4006, |
6978 | fmt: "FCoE context reset end.\n" ); |
6979 | } |
6980 | } else if (IS_QLAFX00(ha)) { |
6981 | if (test_and_clear_bit(ISP_UNRECOVERABLE, |
6982 | addr: &base_vha->dpc_flags)) { |
6983 | ql_dbg(ql_dbg_dpc, vha: base_vha, 0x4020, |
6984 | fmt: "Firmware Reset Recovery\n" ); |
6985 | if (qlafx00_reset_initialize(base_vha)) { |
6986 | /* Failed. Abort isp later. */ |
6987 | if (!test_bit(UNLOADING, |
6988 | &base_vha->dpc_flags)) { |
6989 | set_bit(ISP_UNRECOVERABLE, |
6990 | addr: &base_vha->dpc_flags); |
6991 | ql_dbg(ql_dbg_dpc, vha: base_vha, |
6992 | 0x4021, |
6993 | fmt: "Reset Recovery Failed\n" ); |
6994 | } |
6995 | } |
6996 | } |
6997 | |
6998 | if (test_and_clear_bit(FX00_TARGET_SCAN, |
6999 | addr: &base_vha->dpc_flags)) { |
7000 | ql_dbg(ql_dbg_dpc, vha: base_vha, 0x4022, |
7001 | fmt: "ISPFx00 Target Scan scheduled\n" ); |
7002 | if (qlafx00_rescan_isp(base_vha)) { |
7003 | if (!test_bit(UNLOADING, |
7004 | &base_vha->dpc_flags)) |
7005 | set_bit(ISP_UNRECOVERABLE, |
7006 | addr: &base_vha->dpc_flags); |
7007 | ql_dbg(ql_dbg_dpc, vha: base_vha, 0x401e, |
7008 | fmt: "ISPFx00 Target Scan Failed\n" ); |
7009 | } |
7010 | ql_dbg(ql_dbg_dpc, vha: base_vha, 0x401f, |
7011 | fmt: "ISPFx00 Target Scan End\n" ); |
7012 | } |
7013 | if (test_and_clear_bit(FX00_HOST_INFO_RESEND, |
7014 | addr: &base_vha->dpc_flags)) { |
7015 | ql_dbg(ql_dbg_dpc, vha: base_vha, 0x4023, |
7016 | fmt: "ISPFx00 Host Info resend scheduled\n" ); |
7017 | qlafx00_fx_disc(base_vha, |
7018 | &base_vha->hw->mr.fcport, |
7019 | FXDISC_REG_HOST_INFO); |
7020 | } |
7021 | } |
7022 | |
7023 | if (test_and_clear_bit(DETECT_SFP_CHANGE, |
7024 | addr: &base_vha->dpc_flags)) { |
7025 | /* Semantic: |
7026 | * - NO-OP -- await next ISP-ABORT. Preferred method |
7027 | * to minimize disruptions that will occur |
7028 | * when a forced chip-reset occurs. |
7029 | * - Force -- ISP-ABORT scheduled. |
7030 | */ |
7031 | /* set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); */ |
7032 | } |
7033 | |
7034 | if (test_and_clear_bit |
7035 | (ISP_ABORT_NEEDED, addr: &base_vha->dpc_flags) && |
7036 | !test_bit(UNLOADING, &base_vha->dpc_flags)) { |
7037 | bool do_reset = true; |
7038 | |
7039 | switch (base_vha->qlini_mode) { |
7040 | case QLA2XXX_INI_MODE_ENABLED: |
7041 | break; |
7042 | case QLA2XXX_INI_MODE_DISABLED: |
7043 | if (!qla_tgt_mode_enabled(ha: base_vha) && |
7044 | !ha->flags.fw_started) |
7045 | do_reset = false; |
7046 | break; |
7047 | case QLA2XXX_INI_MODE_DUAL: |
7048 | if (!qla_dual_mode_enabled(ha: base_vha) && |
7049 | !ha->flags.fw_started) |
7050 | do_reset = false; |
7051 | break; |
7052 | default: |
7053 | break; |
7054 | } |
7055 | |
7056 | if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE, |
7057 | addr: &base_vha->dpc_flags))) { |
7058 | base_vha->flags.online = 1; |
7059 | ql_dbg(ql_dbg_dpc, vha: base_vha, 0x4007, |
7060 | fmt: "ISP abort scheduled.\n" ); |
7061 | if (ha->isp_ops->abort_isp(base_vha)) { |
7062 | /* failed. retry later */ |
7063 | set_bit(ISP_ABORT_NEEDED, |
7064 | addr: &base_vha->dpc_flags); |
7065 | } |
7066 | clear_bit(ABORT_ISP_ACTIVE, |
7067 | addr: &base_vha->dpc_flags); |
7068 | ql_dbg(ql_dbg_dpc, vha: base_vha, 0x4008, |
7069 | fmt: "ISP abort end.\n" ); |
7070 | } |
7071 | } |
7072 | |
7073 | if (test_bit(PROCESS_PUREX_IOCB, &base_vha->dpc_flags)) { |
7074 | if (atomic_read(v: &base_vha->loop_state) == LOOP_READY) { |
7075 | qla24xx_process_purex_list |
7076 | (list: &base_vha->purex_list); |
7077 | clear_bit(PROCESS_PUREX_IOCB, |
7078 | addr: &base_vha->dpc_flags); |
7079 | } |
7080 | } |
7081 | |
7082 | if (IS_QLAFX00(ha)) |
7083 | goto loop_resync_check; |
7084 | |
7085 | if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { |
7086 | ql_dbg(ql_dbg_dpc, vha: base_vha, 0x4009, |
7087 | fmt: "Quiescence mode scheduled.\n" ); |
7088 | if (IS_P3P_TYPE(ha)) { |
7089 | if (IS_QLA82XX(ha)) |
7090 | qla82xx_device_state_handler(base_vha); |
7091 | if (IS_QLA8044(ha)) |
7092 | qla8044_device_state_handler(vha: base_vha); |
7093 | clear_bit(ISP_QUIESCE_NEEDED, |
7094 | addr: &base_vha->dpc_flags); |
7095 | if (!ha->flags.quiesce_owner) { |
7096 | qla2x00_perform_loop_resync(base_vha); |
7097 | if (IS_QLA82XX(ha)) { |
7098 | qla82xx_idc_lock(ha); |
7099 | qla82xx_clear_qsnt_ready( |
7100 | base_vha); |
7101 | qla82xx_idc_unlock(ha); |
7102 | } else if (IS_QLA8044(ha)) { |
7103 | qla8044_idc_lock(ha); |
7104 | qla8044_clear_qsnt_ready( |
7105 | vha: base_vha); |
7106 | qla8044_idc_unlock(ha); |
7107 | } |
7108 | } |
7109 | } else { |
7110 | clear_bit(ISP_QUIESCE_NEEDED, |
7111 | addr: &base_vha->dpc_flags); |
7112 | qla2x00_quiesce_io(base_vha); |
7113 | } |
7114 | ql_dbg(ql_dbg_dpc, vha: base_vha, 0x400a, |
7115 | fmt: "Quiescence mode end.\n" ); |
7116 | } |
7117 | |
7118 | if (test_and_clear_bit(RESET_MARKER_NEEDED, |
7119 | addr: &base_vha->dpc_flags) && |
7120 | (!(test_and_set_bit(RESET_ACTIVE, addr: &base_vha->dpc_flags)))) { |
7121 | |
7122 | ql_dbg(ql_dbg_dpc, vha: base_vha, 0x400b, |
7123 | fmt: "Reset marker scheduled.\n" ); |
7124 | qla2x00_rst_aen(base_vha); |
7125 | clear_bit(RESET_ACTIVE, addr: &base_vha->dpc_flags); |
7126 | ql_dbg(ql_dbg_dpc, vha: base_vha, 0x400c, |
7127 | fmt: "Reset marker end.\n" ); |
7128 | } |
7129 | |
7130 | /* Retry each device up to login retry count */ |
7131 | if (test_bit(RELOGIN_NEEDED, &base_vha->dpc_flags) && |
7132 | !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) && |
7133 | atomic_read(v: &base_vha->loop_state) != LOOP_DOWN) { |
7134 | |
7135 | if (!base_vha->relogin_jif || |
7136 | time_after_eq(jiffies, base_vha->relogin_jif)) { |
7137 | base_vha->relogin_jif = jiffies + HZ; |
7138 | clear_bit(RELOGIN_NEEDED, addr: &base_vha->dpc_flags); |
7139 | |
7140 | ql_dbg(ql_dbg_disc, vha: base_vha, 0x400d, |
7141 | fmt: "Relogin scheduled.\n" ); |
7142 | qla24xx_post_relogin_work(vha: base_vha); |
7143 | } |
7144 | } |
7145 | loop_resync_check: |
7146 | if (!qla2x00_reset_active(vha: base_vha) && |
7147 | test_and_clear_bit(LOOP_RESYNC_NEEDED, |
7148 | addr: &base_vha->dpc_flags)) { |
7149 | /* |
7150 | * Allow abort_isp to complete before moving on to scanning. |
7151 | */ |
7152 | ql_dbg(ql_dbg_dpc, vha: base_vha, 0x400f, |
7153 | fmt: "Loop resync scheduled.\n" ); |
7154 | |
7155 | if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, |
7156 | addr: &base_vha->dpc_flags))) { |
7157 | |
7158 | qla2x00_loop_resync(base_vha); |
7159 | |
7160 | clear_bit(LOOP_RESYNC_ACTIVE, |
7161 | addr: &base_vha->dpc_flags); |
7162 | } |
7163 | |
7164 | ql_dbg(ql_dbg_dpc, vha: base_vha, 0x4010, |
7165 | fmt: "Loop resync end.\n" ); |
7166 | } |
7167 | |
7168 | if (IS_QLAFX00(ha)) |
7169 | goto intr_on_check; |
7170 | |
7171 | if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) && |
7172 | atomic_read(v: &base_vha->loop_state) == LOOP_READY) { |
7173 | clear_bit(NPIV_CONFIG_NEEDED, addr: &base_vha->dpc_flags); |
7174 | qla2xxx_flash_npiv_conf(base_vha); |
7175 | } |
7176 | |
7177 | intr_on_check: |
7178 | if (!ha->interrupts_on) |
7179 | ha->isp_ops->enable_intrs(ha); |
7180 | |
7181 | if (test_and_clear_bit(BEACON_BLINK_NEEDED, |
7182 | addr: &base_vha->dpc_flags)) { |
7183 | if (ha->beacon_blink_led == 1) |
7184 | ha->isp_ops->beacon_blink(base_vha); |
7185 | } |
7186 | |
7187 | /* qpair online check */ |
7188 | if (test_and_clear_bit(QPAIR_ONLINE_CHECK_NEEDED, |
7189 | addr: &base_vha->dpc_flags)) { |
7190 | if (ha->flags.eeh_busy || |
7191 | ha->flags.pci_channel_io_perm_failure) |
7192 | online = 0; |
7193 | else |
7194 | online = 1; |
7195 | |
7196 | mutex_lock(&ha->mq_lock); |
7197 | list_for_each_entry(qpair, &base_vha->qp_list, |
7198 | qp_list_elem) |
7199 | qpair->online = online; |
7200 | mutex_unlock(lock: &ha->mq_lock); |
7201 | } |
7202 | |
7203 | if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED, |
7204 | addr: &base_vha->dpc_flags)) { |
7205 | u16 threshold = ha->nvme_last_rptd_aen + ha->last_zio_threshold; |
7206 | |
7207 | if (threshold > ha->orig_fw_xcb_count) |
7208 | threshold = ha->orig_fw_xcb_count; |
7209 | |
7210 | ql_log(ql_log_info, vha: base_vha, 0xffffff, |
7211 | fmt: "SET ZIO Activity exchange threshold to %d.\n" , |
7212 | threshold); |
7213 | if (qla27xx_set_zio_threshold(base_vha, threshold)) { |
7214 | ql_log(ql_log_info, vha: base_vha, 0xffffff, |
7215 | fmt: "Unable to SET ZIO Activity exchange threshold to %d.\n" , |
7216 | threshold); |
7217 | } |
7218 | } |
7219 | |
7220 | if (!IS_QLAFX00(ha)) |
7221 | qla2x00_do_dpc_all_vps(base_vha); |
7222 | |
7223 | if (test_and_clear_bit(N2N_LINK_RESET, |
7224 | addr: &base_vha->dpc_flags)) { |
7225 | qla2x00_lip_reset(base_vha); |
7226 | } |
7227 | |
7228 | ha->dpc_active = 0; |
7229 | end_loop: |
7230 | set_current_state(TASK_INTERRUPTIBLE); |
7231 | } /* End of while(1) */ |
7232 | __set_current_state(TASK_RUNNING); |
7233 | |
7234 | ql_dbg(ql_dbg_dpc, vha: base_vha, 0x4011, |
7235 | fmt: "DPC handler exiting.\n" ); |
7236 | |
7237 | /* |
7238 | * Make sure that nobody tries to wake us up again. |
7239 | */ |
7240 | ha->dpc_active = 0; |
7241 | |
7242 | /* Cleanup any residual CTX SRBs. */ |
7243 | qla2x00_abort_all_cmds(vha: base_vha, res: DID_NO_CONNECT << 16); |
7244 | |
7245 | return 0; |
7246 | } |
7247 | |
7248 | void |
7249 | qla2xxx_wake_dpc(struct scsi_qla_host *vha) |
7250 | { |
7251 | struct qla_hw_data *ha = vha->hw; |
7252 | struct task_struct *t = ha->dpc_thread; |
7253 | |
7254 | if (!test_bit(UNLOADING, &vha->dpc_flags) && t) |
7255 | wake_up_process(tsk: t); |
7256 | } |
7257 | |
7258 | /* |
7259 | * qla2x00_rst_aen |
7260 | * Processes asynchronous reset. |
7261 | * |
7262 | * Input: |
7263 | * ha = adapter block pointer. |
7264 | */ |
7265 | static void |
7266 | qla2x00_rst_aen(scsi_qla_host_t *vha) |
7267 | { |
7268 | if (vha->flags.online && !vha->flags.reset_active && |
7269 | !atomic_read(v: &vha->loop_down_timer) && |
7270 | !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) { |
7271 | do { |
7272 | clear_bit(RESET_MARKER_NEEDED, addr: &vha->dpc_flags); |
7273 | |
7274 | /* |
7275 | * Issue marker command only when we are going to start |
7276 | * the I/O. |
7277 | */ |
7278 | vha->marker_needed = 1; |
7279 | } while (!atomic_read(v: &vha->loop_down_timer) && |
7280 | (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags))); |
7281 | } |
7282 | } |
7283 | |
7284 | static bool qla_do_heartbeat(struct scsi_qla_host *vha) |
7285 | { |
7286 | struct qla_hw_data *ha = vha->hw; |
7287 | u32 cmpl_cnt; |
7288 | u16 i; |
7289 | bool do_heartbeat = false; |
7290 | |
7291 | /* |
7292 | * Allow do_heartbeat only if we don’t have any active interrupts, |
7293 | * but there are still IOs outstanding with firmware. |
7294 | */ |
7295 | cmpl_cnt = ha->base_qpair->cmd_completion_cnt; |
7296 | if (cmpl_cnt == ha->base_qpair->prev_completion_cnt && |
7297 | cmpl_cnt != ha->base_qpair->cmd_cnt) { |
7298 | do_heartbeat = true; |
7299 | goto skip; |
7300 | } |
7301 | ha->base_qpair->prev_completion_cnt = cmpl_cnt; |
7302 | |
7303 | for (i = 0; i < ha->max_qpairs; i++) { |
7304 | if (ha->queue_pair_map[i]) { |
7305 | cmpl_cnt = ha->queue_pair_map[i]->cmd_completion_cnt; |
7306 | if (cmpl_cnt == ha->queue_pair_map[i]->prev_completion_cnt && |
7307 | cmpl_cnt != ha->queue_pair_map[i]->cmd_cnt) { |
7308 | do_heartbeat = true; |
7309 | break; |
7310 | } |
7311 | ha->queue_pair_map[i]->prev_completion_cnt = cmpl_cnt; |
7312 | } |
7313 | } |
7314 | |
7315 | skip: |
7316 | return do_heartbeat; |
7317 | } |
7318 | |
7319 | static void qla_heart_beat(struct scsi_qla_host *vha, u16 dpc_started) |
7320 | { |
7321 | struct qla_hw_data *ha = vha->hw; |
7322 | |
7323 | if (vha->vp_idx) |
7324 | return; |
7325 | |
7326 | if (vha->hw->flags.eeh_busy || qla2x00_chip_is_down(vha)) |
7327 | return; |
7328 | |
7329 | /* |
7330 | * dpc thread cannot run if heartbeat is running at the same time. |
7331 | * We also do not want to starve heartbeat task. Therefore, do |
7332 | * heartbeat task at least once every 5 seconds. |
7333 | */ |
7334 | if (dpc_started && |
7335 | time_before(jiffies, ha->last_heartbeat_run_jiffies + 5 * HZ)) |
7336 | return; |
7337 | |
7338 | if (qla_do_heartbeat(vha)) { |
7339 | ha->last_heartbeat_run_jiffies = jiffies; |
7340 | queue_work(wq: ha->wq, work: &ha->heartbeat_work); |
7341 | } |
7342 | } |
7343 | |
7344 | static void qla_wind_down_chip(scsi_qla_host_t *vha) |
7345 | { |
7346 | struct qla_hw_data *ha = vha->hw; |
7347 | |
7348 | if (!ha->flags.eeh_busy) |
7349 | return; |
7350 | if (ha->pci_error_state) |
7351 | /* system is trying to recover */ |
7352 | return; |
7353 | |
7354 | /* |
7355 | * Current system is not handling PCIE error. At this point, this is |
7356 | * best effort to wind down the adapter. |
7357 | */ |
7358 | if (time_after_eq(jiffies, ha->eeh_jif + ql2xdelay_before_pci_error_handling * HZ) && |
7359 | !ha->flags.eeh_flush) { |
7360 | ql_log(ql_log_info, vha, 0x9009, |
7361 | fmt: "PCI Error detected, attempting to reset hardware.\n" ); |
7362 | |
7363 | ha->isp_ops->reset_chip(vha); |
7364 | ha->isp_ops->disable_intrs(ha); |
7365 | |
7366 | ha->flags.eeh_flush = EEH_FLUSH_RDY; |
7367 | ha->eeh_jif = jiffies; |
7368 | |
7369 | } else if (ha->flags.eeh_flush == EEH_FLUSH_RDY && |
7370 | time_after_eq(jiffies, ha->eeh_jif + 5 * HZ)) { |
7371 | pci_clear_master(dev: ha->pdev); |
7372 | |
7373 | /* flush all command */ |
7374 | qla2x00_abort_isp_cleanup(vha); |
7375 | ha->flags.eeh_flush = EEH_FLUSH_DONE; |
7376 | |
7377 | ql_log(ql_log_info, vha, 0x900a, |
7378 | fmt: "PCI Error handling complete, all IOs aborted.\n" ); |
7379 | } |
7380 | } |
7381 | |
7382 | /************************************************************************** |
7383 | * qla2x00_timer |
7384 | * |
7385 | * Description: |
7386 | * One second timer |
7387 | * |
7388 | * Context: Interrupt |
7389 | ***************************************************************************/ |
7390 | void |
7391 | qla2x00_timer(struct timer_list *t) |
7392 | { |
7393 | scsi_qla_host_t *vha = from_timer(vha, t, timer); |
7394 | unsigned long cpu_flags = 0; |
7395 | int start_dpc = 0; |
7396 | int index; |
7397 | srb_t *sp; |
7398 | uint16_t w; |
7399 | struct qla_hw_data *ha = vha->hw; |
7400 | struct req_que *req; |
7401 | unsigned long flags; |
7402 | fc_port_t *fcport = NULL; |
7403 | |
7404 | if (ha->flags.eeh_busy) { |
7405 | qla_wind_down_chip(vha); |
7406 | |
7407 | ql_dbg(ql_dbg_timer, vha, 0x6000, |
7408 | fmt: "EEH = %d, restarting timer.\n" , |
7409 | ha->flags.eeh_busy); |
7410 | qla2x00_restart_timer(vha, WATCH_INTERVAL); |
7411 | return; |
7412 | } |
7413 | |
7414 | /* |
7415 | * Hardware read to raise pending EEH errors during mailbox waits. If |
7416 | * the read returns -1 then disable the board. |
7417 | */ |
7418 | if (!pci_channel_offline(pdev: ha->pdev)) { |
7419 | pci_read_config_word(dev: ha->pdev, PCI_VENDOR_ID, val: &w); |
7420 | qla2x00_check_reg16_for_disconnect(vha, w); |
7421 | } |
7422 | |
7423 | /* Make sure qla82xx_watchdog is run only for physical port */ |
7424 | if (!vha->vp_idx && IS_P3P_TYPE(ha)) { |
7425 | if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) |
7426 | start_dpc++; |
7427 | if (IS_QLA82XX(ha)) |
7428 | qla82xx_watchdog(vha); |
7429 | else if (IS_QLA8044(ha)) |
7430 | qla8044_watchdog(vha); |
7431 | } |
7432 | |
7433 | if (!vha->vp_idx && IS_QLAFX00(ha)) |
7434 | qlafx00_timer_routine(vha); |
7435 | |
7436 | if (vha->link_down_time < QLA2XX_MAX_LINK_DOWN_TIME) |
7437 | vha->link_down_time++; |
7438 | |
7439 | spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); |
7440 | list_for_each_entry(fcport, &vha->vp_fcports, list) { |
7441 | if (fcport->tgt_link_down_time < QLA2XX_MAX_LINK_DOWN_TIME) |
7442 | fcport->tgt_link_down_time++; |
7443 | } |
7444 | spin_unlock_irqrestore(lock: &vha->hw->tgt.sess_lock, flags); |
7445 | |
7446 | /* Loop down handler. */ |
7447 | if (atomic_read(v: &vha->loop_down_timer) > 0 && |
7448 | !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) && |
7449 | !(test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags)) |
7450 | && vha->flags.online) { |
7451 | |
7452 | if (atomic_read(v: &vha->loop_down_timer) == |
7453 | vha->loop_down_abort_time) { |
7454 | |
7455 | ql_log(ql_log_info, vha, 0x6008, |
7456 | fmt: "Loop down - aborting the queues before time expires.\n" ); |
7457 | |
7458 | if (!IS_QLA2100(ha) && vha->link_down_timeout) |
7459 | atomic_set(v: &vha->loop_state, LOOP_DEAD); |
7460 | |
7461 | /* |
7462 | * Schedule an ISP abort to return any FCP2-device |
7463 | * commands. |
7464 | */ |
7465 | /* NPIV - scan physical port only */ |
7466 | if (!vha->vp_idx) { |
7467 | spin_lock_irqsave(&ha->hardware_lock, |
7468 | cpu_flags); |
7469 | req = ha->req_q_map[0]; |
7470 | for (index = 1; |
7471 | index < req->num_outstanding_cmds; |
7472 | index++) { |
7473 | fc_port_t *sfcp; |
7474 | |
7475 | sp = req->outstanding_cmds[index]; |
7476 | if (!sp) |
7477 | continue; |
7478 | if (sp->cmd_type != TYPE_SRB) |
7479 | continue; |
7480 | if (sp->type != SRB_SCSI_CMD) |
7481 | continue; |
7482 | sfcp = sp->fcport; |
7483 | if (!(sfcp->flags & FCF_FCP2_DEVICE)) |
7484 | continue; |
7485 | |
7486 | if (IS_QLA82XX(ha)) |
7487 | set_bit(FCOE_CTX_RESET_NEEDED, |
7488 | addr: &vha->dpc_flags); |
7489 | else |
7490 | set_bit(ISP_ABORT_NEEDED, |
7491 | addr: &vha->dpc_flags); |
7492 | break; |
7493 | } |
7494 | spin_unlock_irqrestore(lock: &ha->hardware_lock, |
7495 | flags: cpu_flags); |
7496 | } |
7497 | start_dpc++; |
7498 | } |
7499 | |
7500 | /* if the loop has been down for 4 minutes, reinit adapter */ |
7501 | if (atomic_dec_and_test(v: &vha->loop_down_timer) != 0) { |
7502 | if (!(vha->device_flags & DFLG_NO_CABLE) && !vha->vp_idx) { |
7503 | ql_log(ql_log_warn, vha, 0x6009, |
7504 | fmt: "Loop down - aborting ISP.\n" ); |
7505 | |
7506 | if (IS_QLA82XX(ha)) |
7507 | set_bit(FCOE_CTX_RESET_NEEDED, |
7508 | addr: &vha->dpc_flags); |
7509 | else |
7510 | set_bit(ISP_ABORT_NEEDED, |
7511 | addr: &vha->dpc_flags); |
7512 | } |
7513 | } |
7514 | ql_dbg(ql_dbg_timer, vha, 0x600a, |
7515 | fmt: "Loop down - seconds remaining %d.\n" , |
7516 | atomic_read(v: &vha->loop_down_timer)); |
7517 | } |
7518 | /* Check if beacon LED needs to be blinked for physical host only */ |
7519 | if (!vha->vp_idx && (ha->beacon_blink_led == 1)) { |
7520 | /* There is no beacon_blink function for ISP82xx */ |
7521 | if (!IS_P3P_TYPE(ha)) { |
7522 | set_bit(BEACON_BLINK_NEEDED, addr: &vha->dpc_flags); |
7523 | start_dpc++; |
7524 | } |
7525 | } |
7526 | |
7527 | /* check if edif running */ |
7528 | if (vha->hw->flags.edif_enabled) |
7529 | qla_edif_timer(vha); |
7530 | |
7531 | /* Process any deferred work. */ |
7532 | if (!list_empty(head: &vha->work_list)) { |
7533 | unsigned long flags; |
7534 | bool q = false; |
7535 | |
7536 | spin_lock_irqsave(&vha->work_lock, flags); |
7537 | if (!test_and_set_bit(IOCB_WORK_ACTIVE, addr: &vha->dpc_flags)) |
7538 | q = true; |
7539 | spin_unlock_irqrestore(lock: &vha->work_lock, flags); |
7540 | if (q) |
7541 | queue_work(wq: vha->hw->wq, work: &vha->iocb_work); |
7542 | } |
7543 | |
7544 | /* |
7545 | * FC-NVME |
7546 | * see if the active AEN count has changed from what was last reported. |
7547 | */ |
7548 | index = atomic_read(v: &ha->nvme_active_aen_cnt); |
7549 | if (!vha->vp_idx && |
7550 | (index != ha->nvme_last_rptd_aen) && |
7551 | ha->zio_mode == QLA_ZIO_MODE_6 && |
7552 | !ha->flags.host_shutting_down) { |
7553 | ha->nvme_last_rptd_aen = atomic_read(v: &ha->nvme_active_aen_cnt); |
7554 | ql_log(ql_log_info, vha, 0x3002, |
7555 | fmt: "nvme: Sched: Set ZIO exchange threshold to %d.\n" , |
7556 | ha->nvme_last_rptd_aen); |
7557 | set_bit(SET_ZIO_THRESHOLD_NEEDED, addr: &vha->dpc_flags); |
7558 | start_dpc++; |
7559 | } |
7560 | |
7561 | if (!vha->vp_idx && |
7562 | atomic_read(v: &ha->zio_threshold) != ha->last_zio_threshold && |
7563 | IS_ZIO_THRESHOLD_CAPABLE(ha)) { |
7564 | ql_log(ql_log_info, vha, 0x3002, |
7565 | fmt: "Sched: Set ZIO exchange threshold to %d.\n" , |
7566 | ha->last_zio_threshold); |
7567 | ha->last_zio_threshold = atomic_read(v: &ha->zio_threshold); |
7568 | set_bit(SET_ZIO_THRESHOLD_NEEDED, addr: &vha->dpc_flags); |
7569 | start_dpc++; |
7570 | } |
7571 | qla_adjust_buf(vha); |
7572 | |
7573 | /* borrowing w to signify dpc will run */ |
7574 | w = 0; |
7575 | /* Schedule the DPC routine if needed */ |
7576 | if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || |
7577 | test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) || |
7578 | start_dpc || |
7579 | test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) || |
7580 | test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) || |
7581 | test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) || |
7582 | test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) || |
7583 | test_bit(VP_DPC_NEEDED, &vha->dpc_flags) || |
7584 | test_bit(RELOGIN_NEEDED, &vha->dpc_flags) || |
7585 | test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags))) { |
7586 | ql_dbg(ql_dbg_timer, vha, 0x600b, |
7587 | fmt: "isp_abort_needed=%d loop_resync_needed=%d " |
7588 | "start_dpc=%d reset_marker_needed=%d" , |
7589 | test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags), |
7590 | test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags), |
7591 | start_dpc, test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)); |
7592 | ql_dbg(ql_dbg_timer, vha, 0x600c, |
7593 | fmt: "beacon_blink_needed=%d isp_unrecoverable=%d " |
7594 | "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d " |
7595 | "relogin_needed=%d, Process_purex_iocb=%d.\n" , |
7596 | test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags), |
7597 | test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags), |
7598 | test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags), |
7599 | test_bit(VP_DPC_NEEDED, &vha->dpc_flags), |
7600 | test_bit(RELOGIN_NEEDED, &vha->dpc_flags), |
7601 | test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags)); |
7602 | qla2xxx_wake_dpc(vha); |
7603 | w = 1; |
7604 | } |
7605 | |
7606 | qla_heart_beat(vha, dpc_started: w); |
7607 | |
7608 | qla2x00_restart_timer(vha, WATCH_INTERVAL); |
7609 | } |
7610 | |
7611 | /* Firmware interface routines. */ |
7612 | |
7613 | #define FW_ISP21XX 0 |
7614 | #define FW_ISP22XX 1 |
7615 | #define FW_ISP2300 2 |
7616 | #define FW_ISP2322 3 |
7617 | #define FW_ISP24XX 4 |
7618 | #define FW_ISP25XX 5 |
7619 | #define FW_ISP81XX 6 |
7620 | #define FW_ISP82XX 7 |
7621 | #define FW_ISP2031 8 |
7622 | #define FW_ISP8031 9 |
7623 | #define FW_ISP27XX 10 |
7624 | #define FW_ISP28XX 11 |
7625 | |
7626 | #define FW_FILE_ISP21XX "ql2100_fw.bin" |
7627 | #define FW_FILE_ISP22XX "ql2200_fw.bin" |
7628 | #define FW_FILE_ISP2300 "ql2300_fw.bin" |
7629 | #define FW_FILE_ISP2322 "ql2322_fw.bin" |
7630 | #define FW_FILE_ISP24XX "ql2400_fw.bin" |
7631 | #define FW_FILE_ISP25XX "ql2500_fw.bin" |
7632 | #define FW_FILE_ISP81XX "ql8100_fw.bin" |
7633 | #define FW_FILE_ISP82XX "ql8200_fw.bin" |
7634 | #define FW_FILE_ISP2031 "ql2600_fw.bin" |
7635 | #define FW_FILE_ISP8031 "ql8300_fw.bin" |
7636 | #define FW_FILE_ISP27XX "ql2700_fw.bin" |
7637 | #define FW_FILE_ISP28XX "ql2800_fw.bin" |
7638 | |
7639 | |
7640 | static DEFINE_MUTEX(qla_fw_lock); |
7641 | |
7642 | static struct fw_blob qla_fw_blobs[] = { |
7643 | { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, }, |
7644 | { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, }, |
7645 | { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, }, |
7646 | { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, }, |
7647 | { .name = FW_FILE_ISP24XX, }, |
7648 | { .name = FW_FILE_ISP25XX, }, |
7649 | { .name = FW_FILE_ISP81XX, }, |
7650 | { .name = FW_FILE_ISP82XX, }, |
7651 | { .name = FW_FILE_ISP2031, }, |
7652 | { .name = FW_FILE_ISP8031, }, |
7653 | { .name = FW_FILE_ISP27XX, }, |
7654 | { .name = FW_FILE_ISP28XX, }, |
7655 | { .name = NULL, }, |
7656 | }; |
7657 | |
7658 | struct fw_blob * |
7659 | qla2x00_request_firmware(scsi_qla_host_t *vha) |
7660 | { |
7661 | struct qla_hw_data *ha = vha->hw; |
7662 | struct fw_blob *blob; |
7663 | |
7664 | if (IS_QLA2100(ha)) { |
7665 | blob = &qla_fw_blobs[FW_ISP21XX]; |
7666 | } else if (IS_QLA2200(ha)) { |
7667 | blob = &qla_fw_blobs[FW_ISP22XX]; |
7668 | } else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { |
7669 | blob = &qla_fw_blobs[FW_ISP2300]; |
7670 | } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) { |
7671 | blob = &qla_fw_blobs[FW_ISP2322]; |
7672 | } else if (IS_QLA24XX_TYPE(ha)) { |
7673 | blob = &qla_fw_blobs[FW_ISP24XX]; |
7674 | } else if (IS_QLA25XX(ha)) { |
7675 | blob = &qla_fw_blobs[FW_ISP25XX]; |
7676 | } else if (IS_QLA81XX(ha)) { |
7677 | blob = &qla_fw_blobs[FW_ISP81XX]; |
7678 | } else if (IS_QLA82XX(ha)) { |
7679 | blob = &qla_fw_blobs[FW_ISP82XX]; |
7680 | } else if (IS_QLA2031(ha)) { |
7681 | blob = &qla_fw_blobs[FW_ISP2031]; |
7682 | } else if (IS_QLA8031(ha)) { |
7683 | blob = &qla_fw_blobs[FW_ISP8031]; |
7684 | } else if (IS_QLA27XX(ha)) { |
7685 | blob = &qla_fw_blobs[FW_ISP27XX]; |
7686 | } else if (IS_QLA28XX(ha)) { |
7687 | blob = &qla_fw_blobs[FW_ISP28XX]; |
7688 | } else { |
7689 | return NULL; |
7690 | } |
7691 | |
7692 | if (!blob->name) |
7693 | return NULL; |
7694 | |
7695 | mutex_lock(&qla_fw_lock); |
7696 | if (blob->fw) |
7697 | goto out; |
7698 | |
7699 | if (request_firmware(fw: &blob->fw, name: blob->name, device: &ha->pdev->dev)) { |
7700 | ql_log(ql_log_warn, vha, 0x0063, |
7701 | fmt: "Failed to load firmware image (%s).\n" , blob->name); |
7702 | blob->fw = NULL; |
7703 | blob = NULL; |
7704 | } |
7705 | |
7706 | out: |
7707 | mutex_unlock(lock: &qla_fw_lock); |
7708 | return blob; |
7709 | } |
7710 | |
7711 | static void |
7712 | qla2x00_release_firmware(void) |
7713 | { |
7714 | struct fw_blob *blob; |
7715 | |
7716 | mutex_lock(&qla_fw_lock); |
7717 | for (blob = qla_fw_blobs; blob->name; blob++) |
7718 | release_firmware(fw: blob->fw); |
7719 | mutex_unlock(lock: &qla_fw_lock); |
7720 | } |
7721 | |
7722 | static void qla_pci_error_cleanup(scsi_qla_host_t *vha) |
7723 | { |
7724 | struct qla_hw_data *ha = vha->hw; |
7725 | scsi_qla_host_t *base_vha = pci_get_drvdata(pdev: ha->pdev); |
7726 | struct qla_qpair *qpair = NULL; |
7727 | struct scsi_qla_host *vp, *tvp; |
7728 | fc_port_t *fcport; |
7729 | int i; |
7730 | unsigned long flags; |
7731 | |
7732 | ql_dbg(ql_dbg_aer, vha, 0x9000, |
7733 | fmt: "%s\n" , __func__); |
7734 | ha->chip_reset++; |
7735 | |
7736 | ha->base_qpair->chip_reset = ha->chip_reset; |
7737 | for (i = 0; i < ha->max_qpairs; i++) { |
7738 | if (ha->queue_pair_map[i]) |
7739 | ha->queue_pair_map[i]->chip_reset = |
7740 | ha->base_qpair->chip_reset; |
7741 | } |
7742 | |
7743 | /* |
7744 | * purge mailbox might take a while. Slot Reset/chip reset |
7745 | * will take care of the purge |
7746 | */ |
7747 | |
7748 | mutex_lock(&ha->mq_lock); |
7749 | ha->base_qpair->online = 0; |
7750 | list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) |
7751 | qpair->online = 0; |
7752 | wmb(); |
7753 | mutex_unlock(lock: &ha->mq_lock); |
7754 | |
7755 | qla2x00_mark_all_devices_lost(vha); |
7756 | |
7757 | spin_lock_irqsave(&ha->vport_slock, flags); |
7758 | list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { |
7759 | atomic_inc(v: &vp->vref_count); |
7760 | spin_unlock_irqrestore(lock: &ha->vport_slock, flags); |
7761 | qla2x00_mark_all_devices_lost(vha: vp); |
7762 | spin_lock_irqsave(&ha->vport_slock, flags); |
7763 | atomic_dec(v: &vp->vref_count); |
7764 | } |
7765 | spin_unlock_irqrestore(lock: &ha->vport_slock, flags); |
7766 | |
7767 | /* Clear all async request states across all VPs. */ |
7768 | list_for_each_entry(fcport, &vha->vp_fcports, list) |
7769 | fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); |
7770 | |
7771 | spin_lock_irqsave(&ha->vport_slock, flags); |
7772 | list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { |
7773 | atomic_inc(v: &vp->vref_count); |
7774 | spin_unlock_irqrestore(lock: &ha->vport_slock, flags); |
7775 | list_for_each_entry(fcport, &vp->vp_fcports, list) |
7776 | fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); |
7777 | spin_lock_irqsave(&ha->vport_slock, flags); |
7778 | atomic_dec(v: &vp->vref_count); |
7779 | } |
7780 | spin_unlock_irqrestore(lock: &ha->vport_slock, flags); |
7781 | } |
7782 | |
7783 | |
7784 | static pci_ers_result_t |
7785 | qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) |
7786 | { |
7787 | scsi_qla_host_t *vha = pci_get_drvdata(pdev); |
7788 | struct qla_hw_data *ha = vha->hw; |
7789 | pci_ers_result_t ret = PCI_ERS_RESULT_NEED_RESET; |
7790 | |
7791 | ql_log(ql_log_warn, vha, 0x9000, |
7792 | fmt: "PCI error detected, state %x.\n" , state); |
7793 | ha->pci_error_state = QLA_PCI_ERR_DETECTED; |
7794 | |
7795 | if (!atomic_read(v: &pdev->enable_cnt)) { |
7796 | ql_log(ql_log_info, vha, 0xffff, |
7797 | fmt: "PCI device is disabled,state %x\n" , state); |
7798 | ret = PCI_ERS_RESULT_NEED_RESET; |
7799 | goto out; |
7800 | } |
7801 | |
7802 | switch (state) { |
7803 | case pci_channel_io_normal: |
7804 | qla_pci_set_eeh_busy(vha); |
7805 | if (ql2xmqsupport || ql2xnvmeenable) { |
7806 | set_bit(QPAIR_ONLINE_CHECK_NEEDED, addr: &vha->dpc_flags); |
7807 | qla2xxx_wake_dpc(vha); |
7808 | } |
7809 | ret = PCI_ERS_RESULT_CAN_RECOVER; |
7810 | break; |
7811 | case pci_channel_io_frozen: |
7812 | qla_pci_set_eeh_busy(vha); |
7813 | ret = PCI_ERS_RESULT_NEED_RESET; |
7814 | break; |
7815 | case pci_channel_io_perm_failure: |
7816 | ha->flags.pci_channel_io_perm_failure = 1; |
7817 | qla2x00_abort_all_cmds(vha, res: DID_NO_CONNECT << 16); |
7818 | if (ql2xmqsupport || ql2xnvmeenable) { |
7819 | set_bit(QPAIR_ONLINE_CHECK_NEEDED, addr: &vha->dpc_flags); |
7820 | qla2xxx_wake_dpc(vha); |
7821 | } |
7822 | ret = PCI_ERS_RESULT_DISCONNECT; |
7823 | } |
7824 | out: |
7825 | ql_dbg(ql_dbg_aer, vha, 0x600d, |
7826 | fmt: "PCI error detected returning [%x].\n" , ret); |
7827 | return ret; |
7828 | } |
7829 | |
7830 | static pci_ers_result_t |
7831 | qla2xxx_pci_mmio_enabled(struct pci_dev *pdev) |
7832 | { |
7833 | int risc_paused = 0; |
7834 | uint32_t stat; |
7835 | unsigned long flags; |
7836 | scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); |
7837 | struct qla_hw_data *ha = base_vha->hw; |
7838 | struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; |
7839 | struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; |
7840 | |
7841 | ql_log(ql_log_warn, vha: base_vha, 0x9000, |
7842 | fmt: "mmio enabled\n" ); |
7843 | |
7844 | ha->pci_error_state = QLA_PCI_MMIO_ENABLED; |
7845 | |
7846 | if (IS_QLA82XX(ha)) |
7847 | return PCI_ERS_RESULT_RECOVERED; |
7848 | |
7849 | if (qla2x00_isp_reg_stat(ha)) { |
7850 | ql_log(ql_log_info, vha: base_vha, 0x803f, |
7851 | fmt: "During mmio enabled, PCI/Register disconnect still detected.\n" ); |
7852 | goto out; |
7853 | } |
7854 | |
7855 | spin_lock_irqsave(&ha->hardware_lock, flags); |
7856 | if (IS_QLA2100(ha) || IS_QLA2200(ha)){ |
7857 | stat = rd_reg_word(addr: ®->hccr); |
7858 | if (stat & HCCR_RISC_PAUSE) |
7859 | risc_paused = 1; |
7860 | } else if (IS_QLA23XX(ha)) { |
7861 | stat = rd_reg_dword(addr: ®->u.isp2300.host_status); |
7862 | if (stat & HSR_RISC_PAUSED) |
7863 | risc_paused = 1; |
7864 | } else if (IS_FWI2_CAPABLE(ha)) { |
7865 | stat = rd_reg_dword(addr: ®24->host_status); |
7866 | if (stat & HSRX_RISC_PAUSED) |
7867 | risc_paused = 1; |
7868 | } |
7869 | spin_unlock_irqrestore(lock: &ha->hardware_lock, flags); |
7870 | |
7871 | if (risc_paused) { |
7872 | ql_log(ql_log_info, vha: base_vha, 0x9003, |
7873 | fmt: "RISC paused -- mmio_enabled, Dumping firmware.\n" ); |
7874 | qla2xxx_dump_fw(vha: base_vha); |
7875 | } |
7876 | out: |
7877 | /* set PCI_ERS_RESULT_NEED_RESET to trigger call to qla2xxx_pci_slot_reset */ |
7878 | ql_dbg(ql_dbg_aer, vha: base_vha, 0x600d, |
7879 | fmt: "mmio enabled returning.\n" ); |
7880 | return PCI_ERS_RESULT_NEED_RESET; |
7881 | } |
7882 | |
7883 | static pci_ers_result_t |
7884 | qla2xxx_pci_slot_reset(struct pci_dev *pdev) |
7885 | { |
7886 | pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; |
7887 | scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); |
7888 | struct qla_hw_data *ha = base_vha->hw; |
7889 | int rc; |
7890 | struct qla_qpair *qpair = NULL; |
7891 | |
7892 | ql_log(ql_log_warn, vha: base_vha, 0x9004, |
7893 | fmt: "Slot Reset.\n" ); |
7894 | |
7895 | ha->pci_error_state = QLA_PCI_SLOT_RESET; |
7896 | /* Workaround: qla2xxx driver which access hardware earlier |
7897 | * needs error state to be pci_channel_io_online. |
7898 | * Otherwise mailbox command timesout. |
7899 | */ |
7900 | pdev->error_state = pci_channel_io_normal; |
7901 | |
7902 | pci_restore_state(dev: pdev); |
7903 | |
7904 | /* pci_restore_state() clears the saved_state flag of the device |
7905 | * save restored state which resets saved_state flag |
7906 | */ |
7907 | pci_save_state(dev: pdev); |
7908 | |
7909 | if (ha->mem_only) |
7910 | rc = pci_enable_device_mem(dev: pdev); |
7911 | else |
7912 | rc = pci_enable_device(dev: pdev); |
7913 | |
7914 | if (rc) { |
7915 | ql_log(ql_log_warn, vha: base_vha, 0x9005, |
7916 | fmt: "Can't re-enable PCI device after reset.\n" ); |
7917 | goto exit_slot_reset; |
7918 | } |
7919 | |
7920 | |
7921 | if (ha->isp_ops->pci_config(base_vha)) |
7922 | goto exit_slot_reset; |
7923 | |
7924 | mutex_lock(&ha->mq_lock); |
7925 | list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) |
7926 | qpair->online = 1; |
7927 | mutex_unlock(lock: &ha->mq_lock); |
7928 | |
7929 | ha->flags.eeh_busy = 0; |
7930 | base_vha->flags.online = 1; |
7931 | set_bit(ABORT_ISP_ACTIVE, addr: &base_vha->dpc_flags); |
7932 | ha->isp_ops->abort_isp(base_vha); |
7933 | clear_bit(ABORT_ISP_ACTIVE, addr: &base_vha->dpc_flags); |
7934 | |
7935 | if (qla2x00_isp_reg_stat(ha)) { |
7936 | ha->flags.eeh_busy = 1; |
7937 | qla_pci_error_cleanup(vha: base_vha); |
7938 | ql_log(ql_log_warn, vha: base_vha, 0x9005, |
7939 | fmt: "Device unable to recover from PCI error.\n" ); |
7940 | } else { |
7941 | ret = PCI_ERS_RESULT_RECOVERED; |
7942 | } |
7943 | |
7944 | exit_slot_reset: |
7945 | ql_dbg(ql_dbg_aer, vha: base_vha, 0x900e, |
7946 | fmt: "Slot Reset returning %x.\n" , ret); |
7947 | |
7948 | return ret; |
7949 | } |
7950 | |
7951 | static void |
7952 | qla2xxx_pci_resume(struct pci_dev *pdev) |
7953 | { |
7954 | scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); |
7955 | struct qla_hw_data *ha = base_vha->hw; |
7956 | int ret; |
7957 | |
7958 | ql_log(ql_log_warn, vha: base_vha, 0x900f, |
7959 | fmt: "Pci Resume.\n" ); |
7960 | |
7961 | |
7962 | ret = qla2x00_wait_for_hba_online(vha: base_vha); |
7963 | if (ret != QLA_SUCCESS) { |
7964 | ql_log(ql_log_fatal, vha: base_vha, 0x9002, |
7965 | fmt: "The device failed to resume I/O from slot/link_reset.\n" ); |
7966 | } |
7967 | ha->pci_error_state = QLA_PCI_RESUME; |
7968 | ql_dbg(ql_dbg_aer, vha: base_vha, 0x600d, |
7969 | fmt: "Pci Resume returning.\n" ); |
7970 | } |
7971 | |
7972 | void qla_pci_set_eeh_busy(struct scsi_qla_host *vha) |
7973 | { |
7974 | struct qla_hw_data *ha = vha->hw; |
7975 | struct scsi_qla_host *base_vha = pci_get_drvdata(pdev: ha->pdev); |
7976 | bool do_cleanup = false; |
7977 | unsigned long flags; |
7978 | |
7979 | if (ha->flags.eeh_busy) |
7980 | return; |
7981 | |
7982 | spin_lock_irqsave(&base_vha->work_lock, flags); |
7983 | if (!ha->flags.eeh_busy) { |
7984 | ha->eeh_jif = jiffies; |
7985 | ha->flags.eeh_flush = 0; |
7986 | |
7987 | ha->flags.eeh_busy = 1; |
7988 | do_cleanup = true; |
7989 | } |
7990 | spin_unlock_irqrestore(lock: &base_vha->work_lock, flags); |
7991 | |
7992 | if (do_cleanup) |
7993 | qla_pci_error_cleanup(vha: base_vha); |
7994 | } |
7995 | |
7996 | /* |
7997 | * this routine will schedule a task to pause IO from interrupt context |
7998 | * if caller sees a PCIE error event (register read = 0xf's) |
7999 | */ |
8000 | void qla_schedule_eeh_work(struct scsi_qla_host *vha) |
8001 | { |
8002 | struct qla_hw_data *ha = vha->hw; |
8003 | struct scsi_qla_host *base_vha = pci_get_drvdata(pdev: ha->pdev); |
8004 | |
8005 | if (ha->flags.eeh_busy) |
8006 | return; |
8007 | |
8008 | set_bit(DO_EEH_RECOVERY, addr: &base_vha->dpc_flags); |
8009 | qla2xxx_wake_dpc(vha: base_vha); |
8010 | } |
8011 | |
8012 | static void |
8013 | qla_pci_reset_prepare(struct pci_dev *pdev) |
8014 | { |
8015 | scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); |
8016 | struct qla_hw_data *ha = base_vha->hw; |
8017 | struct qla_qpair *qpair; |
8018 | |
8019 | ql_log(ql_log_warn, vha: base_vha, 0xffff, |
8020 | fmt: "%s.\n" , __func__); |
8021 | |
8022 | /* |
8023 | * PCI FLR/function reset is about to reset the |
8024 | * slot. Stop the chip to stop all DMA access. |
8025 | * It is assumed that pci_reset_done will be called |
8026 | * after FLR to resume Chip operation. |
8027 | */ |
8028 | ha->flags.eeh_busy = 1; |
8029 | mutex_lock(&ha->mq_lock); |
8030 | list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) |
8031 | qpair->online = 0; |
8032 | mutex_unlock(lock: &ha->mq_lock); |
8033 | |
8034 | set_bit(ABORT_ISP_ACTIVE, addr: &base_vha->dpc_flags); |
8035 | qla2x00_abort_isp_cleanup(base_vha); |
8036 | qla2x00_abort_all_cmds(vha: base_vha, res: DID_RESET << 16); |
8037 | } |
8038 | |
8039 | static void |
8040 | qla_pci_reset_done(struct pci_dev *pdev) |
8041 | { |
8042 | scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); |
8043 | struct qla_hw_data *ha = base_vha->hw; |
8044 | struct qla_qpair *qpair; |
8045 | |
8046 | ql_log(ql_log_warn, vha: base_vha, 0xffff, |
8047 | fmt: "%s.\n" , __func__); |
8048 | |
8049 | /* |
8050 | * FLR just completed by PCI layer. Resume adapter |
8051 | */ |
8052 | ha->flags.eeh_busy = 0; |
8053 | mutex_lock(&ha->mq_lock); |
8054 | list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) |
8055 | qpair->online = 1; |
8056 | mutex_unlock(lock: &ha->mq_lock); |
8057 | |
8058 | base_vha->flags.online = 1; |
8059 | ha->isp_ops->abort_isp(base_vha); |
8060 | clear_bit(ABORT_ISP_ACTIVE, addr: &base_vha->dpc_flags); |
8061 | } |
8062 | |
8063 | static void qla2xxx_map_queues(struct Scsi_Host *shost) |
8064 | { |
8065 | scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata; |
8066 | struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; |
8067 | |
8068 | if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase) |
8069 | blk_mq_map_queues(qmap); |
8070 | else |
8071 | blk_mq_pci_map_queues(qmap, pdev: vha->hw->pdev, offset: vha->irq_offset); |
8072 | } |
8073 | |
8074 | struct scsi_host_template qla2xxx_driver_template = { |
8075 | .module = THIS_MODULE, |
8076 | .name = QLA2XXX_DRIVER_NAME, |
8077 | .queuecommand = qla2xxx_queuecommand, |
8078 | |
8079 | .eh_timed_out = fc_eh_timed_out, |
8080 | .eh_abort_handler = qla2xxx_eh_abort, |
8081 | .eh_should_retry_cmd = fc_eh_should_retry_cmd, |
8082 | .eh_device_reset_handler = qla2xxx_eh_device_reset, |
8083 | .eh_target_reset_handler = qla2xxx_eh_target_reset, |
8084 | .eh_bus_reset_handler = qla2xxx_eh_bus_reset, |
8085 | .eh_host_reset_handler = qla2xxx_eh_host_reset, |
8086 | |
8087 | .slave_configure = qla2xxx_slave_configure, |
8088 | |
8089 | .slave_alloc = qla2xxx_slave_alloc, |
8090 | .slave_destroy = qla2xxx_slave_destroy, |
8091 | .scan_finished = qla2xxx_scan_finished, |
8092 | .scan_start = qla2xxx_scan_start, |
8093 | .change_queue_depth = scsi_change_queue_depth, |
8094 | .map_queues = qla2xxx_map_queues, |
8095 | .this_id = -1, |
8096 | .cmd_per_lun = 3, |
8097 | .sg_tablesize = SG_ALL, |
8098 | |
8099 | .max_sectors = 0xFFFF, |
8100 | .shost_groups = qla2x00_host_groups, |
8101 | |
8102 | .supported_mode = MODE_INITIATOR, |
8103 | .track_queue_depth = 1, |
8104 | .cmd_size = sizeof(srb_t), |
8105 | }; |
8106 | |
8107 | static const struct pci_error_handlers qla2xxx_err_handler = { |
8108 | .error_detected = qla2xxx_pci_error_detected, |
8109 | .mmio_enabled = qla2xxx_pci_mmio_enabled, |
8110 | .slot_reset = qla2xxx_pci_slot_reset, |
8111 | .resume = qla2xxx_pci_resume, |
8112 | .reset_prepare = qla_pci_reset_prepare, |
8113 | .reset_done = qla_pci_reset_done, |
8114 | }; |
8115 | |
8116 | static struct pci_device_id qla2xxx_pci_tbl[] = { |
8117 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) }, |
8118 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) }, |
8119 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) }, |
8120 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) }, |
8121 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) }, |
8122 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) }, |
8123 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) }, |
8124 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) }, |
8125 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) }, |
8126 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) }, |
8127 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) }, |
8128 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) }, |
8129 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) }, |
8130 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) }, |
8131 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) }, |
8132 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) }, |
8133 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) }, |
8134 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) }, |
8135 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) }, |
8136 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) }, |
8137 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) }, |
8138 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2261) }, |
8139 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2061) }, |
8140 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2081) }, |
8141 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2281) }, |
8142 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2089) }, |
8143 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2289) }, |
8144 | { 0 }, |
8145 | }; |
8146 | MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); |
8147 | |
8148 | static struct pci_driver qla2xxx_pci_driver = { |
8149 | .name = QLA2XXX_DRIVER_NAME, |
8150 | .driver = { |
8151 | .owner = THIS_MODULE, |
8152 | }, |
8153 | .id_table = qla2xxx_pci_tbl, |
8154 | .probe = qla2x00_probe_one, |
8155 | .remove = qla2x00_remove_one, |
8156 | .shutdown = qla2x00_shutdown, |
8157 | .err_handler = &qla2xxx_err_handler, |
8158 | }; |
8159 | |
8160 | static const struct file_operations apidev_fops = { |
8161 | .owner = THIS_MODULE, |
8162 | .llseek = noop_llseek, |
8163 | }; |
8164 | |
8165 | /** |
8166 | * qla2x00_module_init - Module initialization. |
8167 | **/ |
8168 | static int __init |
8169 | qla2x00_module_init(void) |
8170 | { |
8171 | int ret = 0; |
8172 | |
8173 | BUILD_BUG_ON(sizeof(cmd_a64_entry_t) != 64); |
8174 | BUILD_BUG_ON(sizeof(cmd_entry_t) != 64); |
8175 | BUILD_BUG_ON(sizeof(cont_a64_entry_t) != 64); |
8176 | BUILD_BUG_ON(sizeof(cont_entry_t) != 64); |
8177 | BUILD_BUG_ON(sizeof(init_cb_t) != 96); |
8178 | BUILD_BUG_ON(sizeof(mrk_entry_t) != 64); |
8179 | BUILD_BUG_ON(sizeof(ms_iocb_entry_t) != 64); |
8180 | BUILD_BUG_ON(sizeof(request_t) != 64); |
8181 | BUILD_BUG_ON(sizeof(struct abort_entry_24xx) != 64); |
8182 | BUILD_BUG_ON(sizeof(struct abort_iocb_entry_fx00) != 64); |
8183 | BUILD_BUG_ON(sizeof(struct abts_entry_24xx) != 64); |
8184 | BUILD_BUG_ON(sizeof(struct access_chip_84xx) != 64); |
8185 | BUILD_BUG_ON(sizeof(struct access_chip_rsp_84xx) != 64); |
8186 | BUILD_BUG_ON(sizeof(struct cmd_bidir) != 64); |
8187 | BUILD_BUG_ON(sizeof(struct cmd_nvme) != 64); |
8188 | BUILD_BUG_ON(sizeof(struct cmd_type_6) != 64); |
8189 | BUILD_BUG_ON(sizeof(struct cmd_type_7) != 64); |
8190 | BUILD_BUG_ON(sizeof(struct cmd_type_7_fx00) != 64); |
8191 | BUILD_BUG_ON(sizeof(struct cmd_type_crc_2) != 64); |
8192 | BUILD_BUG_ON(sizeof(struct ct_entry_24xx) != 64); |
8193 | BUILD_BUG_ON(sizeof(struct ct_fdmi1_hba_attributes) != 2604); |
8194 | BUILD_BUG_ON(sizeof(struct ct_fdmi2_hba_attributes) != 4424); |
8195 | BUILD_BUG_ON(sizeof(struct ct_fdmi2_port_attributes) != 4164); |
8196 | BUILD_BUG_ON(sizeof(struct ct_fdmi_hba_attr) != 260); |
8197 | BUILD_BUG_ON(sizeof(struct ct_fdmi_port_attr) != 260); |
8198 | BUILD_BUG_ON(sizeof(struct ct_rsp_hdr) != 16); |
8199 | BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64); |
8200 | BUILD_BUG_ON(sizeof(struct device_reg_24xx) != 256); |
8201 | BUILD_BUG_ON(sizeof(struct device_reg_25xxmq) != 24); |
8202 | BUILD_BUG_ON(sizeof(struct device_reg_2xxx) != 256); |
8203 | BUILD_BUG_ON(sizeof(struct device_reg_82xx) != 1288); |
8204 | BUILD_BUG_ON(sizeof(struct device_reg_fx00) != 216); |
8205 | BUILD_BUG_ON(sizeof(struct els_entry_24xx) != 64); |
8206 | BUILD_BUG_ON(sizeof(struct els_sts_entry_24xx) != 64); |
8207 | BUILD_BUG_ON(sizeof(struct fxdisc_entry_fx00) != 64); |
8208 | BUILD_BUG_ON(sizeof(struct imm_ntfy_from_isp) != 64); |
8209 | BUILD_BUG_ON(sizeof(struct init_cb_24xx) != 128); |
8210 | BUILD_BUG_ON(sizeof(struct init_cb_81xx) != 128); |
8211 | BUILD_BUG_ON(sizeof(struct logio_entry_24xx) != 64); |
8212 | BUILD_BUG_ON(sizeof(struct mbx_entry) != 64); |
8213 | BUILD_BUG_ON(sizeof(struct mid_init_cb_24xx) != 5252); |
8214 | BUILD_BUG_ON(sizeof(struct mrk_entry_24xx) != 64); |
8215 | BUILD_BUG_ON(sizeof(struct nvram_24xx) != 512); |
8216 | BUILD_BUG_ON(sizeof(struct nvram_81xx) != 512); |
8217 | BUILD_BUG_ON(sizeof(struct pt_ls4_request) != 64); |
8218 | BUILD_BUG_ON(sizeof(struct pt_ls4_rx_unsol) != 64); |
8219 | BUILD_BUG_ON(sizeof(struct purex_entry_24xx) != 64); |
8220 | BUILD_BUG_ON(sizeof(struct qla2100_fw_dump) != 123634); |
8221 | BUILD_BUG_ON(sizeof(struct qla2300_fw_dump) != 136100); |
8222 | BUILD_BUG_ON(sizeof(struct qla24xx_fw_dump) != 37976); |
8223 | BUILD_BUG_ON(sizeof(struct qla25xx_fw_dump) != 39228); |
8224 | BUILD_BUG_ON(sizeof(struct qla2xxx_fce_chain) != 52); |
8225 | BUILD_BUG_ON(sizeof(struct qla2xxx_fw_dump) != 136172); |
8226 | BUILD_BUG_ON(sizeof(struct qla2xxx_mq_chain) != 524); |
8227 | BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_chain) != 8); |
8228 | BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_header) != 12); |
8229 | BUILD_BUG_ON(sizeof(struct qla2xxx_offld_chain) != 24); |
8230 | BUILD_BUG_ON(sizeof(struct qla81xx_fw_dump) != 39420); |
8231 | BUILD_BUG_ON(sizeof(struct qla82xx_uri_data_desc) != 28); |
8232 | BUILD_BUG_ON(sizeof(struct qla82xx_uri_table_desc) != 32); |
8233 | BUILD_BUG_ON(sizeof(struct qla83xx_fw_dump) != 51196); |
8234 | BUILD_BUG_ON(sizeof(struct qla_fcp_prio_cfg) != FCP_PRIO_CFG_SIZE); |
8235 | BUILD_BUG_ON(sizeof(struct qla_fdt_layout) != 128); |
8236 | BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8); |
8237 | BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16); |
8238 | BUILD_BUG_ON(sizeof(struct qla_npiv_entry) != 24); |
8239 | BUILD_BUG_ON(sizeof(struct qla_npiv_header) != 16); |
8240 | BUILD_BUG_ON(sizeof(struct rdp_rsp_payload) != 336); |
8241 | BUILD_BUG_ON(sizeof(struct sns_cmd_pkt) != 2064); |
8242 | BUILD_BUG_ON(sizeof(struct sts_entry_24xx) != 64); |
8243 | BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry) != 64); |
8244 | BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry_fx00) != 64); |
8245 | BUILD_BUG_ON(sizeof(struct verify_chip_entry_84xx) != 64); |
8246 | BUILD_BUG_ON(sizeof(struct verify_chip_rsp_84xx) != 52); |
8247 | BUILD_BUG_ON(sizeof(struct vf_evfp_entry_24xx) != 56); |
8248 | BUILD_BUG_ON(sizeof(struct vp_config_entry_24xx) != 64); |
8249 | BUILD_BUG_ON(sizeof(struct vp_ctrl_entry_24xx) != 64); |
8250 | BUILD_BUG_ON(sizeof(struct vp_rpt_id_entry_24xx) != 64); |
8251 | BUILD_BUG_ON(sizeof(sts21_entry_t) != 64); |
8252 | BUILD_BUG_ON(sizeof(sts22_entry_t) != 64); |
8253 | BUILD_BUG_ON(sizeof(sts_cont_entry_t) != 64); |
8254 | BUILD_BUG_ON(sizeof(sts_entry_t) != 64); |
8255 | BUILD_BUG_ON(sizeof(sw_info_t) != 32); |
8256 | BUILD_BUG_ON(sizeof(target_id_t) != 2); |
8257 | |
8258 | qla_trace_init(); |
8259 | |
8260 | /* Allocate cache for SRBs. */ |
8261 | srb_cachep = kmem_cache_create(name: "qla2xxx_srbs" , size: sizeof(srb_t), align: 0, |
8262 | SLAB_HWCACHE_ALIGN, NULL); |
8263 | if (srb_cachep == NULL) { |
8264 | ql_log(ql_log_fatal, NULL, 0x0001, |
8265 | fmt: "Unable to allocate SRB cache...Failing load!.\n" ); |
8266 | return -ENOMEM; |
8267 | } |
8268 | |
8269 | /* Initialize target kmem_cache and mem_pools */ |
8270 | ret = qlt_init(); |
8271 | if (ret < 0) { |
8272 | goto destroy_cache; |
8273 | } else if (ret > 0) { |
8274 | /* |
8275 | * If initiator mode is explictly disabled by qlt_init(), |
8276 | * prevent scsi_transport_fc.c:fc_scsi_scan_rport() from |
8277 | * performing scsi_scan_target() during LOOP UP event. |
8278 | */ |
8279 | qla2xxx_transport_functions.disable_target_scan = 1; |
8280 | qla2xxx_transport_vport_functions.disable_target_scan = 1; |
8281 | } |
8282 | |
8283 | /* Derive version string. */ |
8284 | strcpy(p: qla2x00_version_str, QLA2XXX_VERSION); |
8285 | if (ql2xextended_error_logging) |
8286 | strcat(p: qla2x00_version_str, q: "-debug" ); |
8287 | if (ql2xextended_error_logging == 1) |
8288 | ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK; |
8289 | |
8290 | qla2xxx_transport_template = |
8291 | fc_attach_transport(&qla2xxx_transport_functions); |
8292 | if (!qla2xxx_transport_template) { |
8293 | ql_log(ql_log_fatal, NULL, 0x0002, |
8294 | fmt: "fc_attach_transport failed...Failing load!.\n" ); |
8295 | ret = -ENODEV; |
8296 | goto qlt_exit; |
8297 | } |
8298 | |
8299 | apidev_major = register_chrdev(major: 0, QLA2XXX_APIDEV, fops: &apidev_fops); |
8300 | if (apidev_major < 0) { |
8301 | ql_log(ql_log_fatal, NULL, 0x0003, |
8302 | fmt: "Unable to register char device %s.\n" , QLA2XXX_APIDEV); |
8303 | } |
8304 | |
8305 | qla2xxx_transport_vport_template = |
8306 | fc_attach_transport(&qla2xxx_transport_vport_functions); |
8307 | if (!qla2xxx_transport_vport_template) { |
8308 | ql_log(ql_log_fatal, NULL, 0x0004, |
8309 | fmt: "fc_attach_transport vport failed...Failing load!.\n" ); |
8310 | ret = -ENODEV; |
8311 | goto unreg_chrdev; |
8312 | } |
8313 | ql_log(ql_log_info, NULL, 0x0005, |
8314 | fmt: "QLogic Fibre Channel HBA Driver: %s.\n" , |
8315 | qla2x00_version_str); |
8316 | ret = pci_register_driver(&qla2xxx_pci_driver); |
8317 | if (ret) { |
8318 | ql_log(ql_log_fatal, NULL, 0x0006, |
8319 | fmt: "pci_register_driver failed...ret=%d Failing load!.\n" , |
8320 | ret); |
8321 | goto release_vport_transport; |
8322 | } |
8323 | return ret; |
8324 | |
8325 | release_vport_transport: |
8326 | fc_release_transport(qla2xxx_transport_vport_template); |
8327 | |
8328 | unreg_chrdev: |
8329 | if (apidev_major >= 0) |
8330 | unregister_chrdev(major: apidev_major, QLA2XXX_APIDEV); |
8331 | fc_release_transport(qla2xxx_transport_template); |
8332 | |
8333 | qlt_exit: |
8334 | qlt_exit(); |
8335 | |
8336 | destroy_cache: |
8337 | kmem_cache_destroy(s: srb_cachep); |
8338 | |
8339 | qla_trace_uninit(); |
8340 | return ret; |
8341 | } |
8342 | |
8343 | /** |
8344 | * qla2x00_module_exit - Module cleanup. |
8345 | **/ |
8346 | static void __exit |
8347 | qla2x00_module_exit(void) |
8348 | { |
8349 | pci_unregister_driver(dev: &qla2xxx_pci_driver); |
8350 | qla2x00_release_firmware(); |
8351 | kmem_cache_destroy(s: ctx_cachep); |
8352 | fc_release_transport(qla2xxx_transport_vport_template); |
8353 | if (apidev_major >= 0) |
8354 | unregister_chrdev(major: apidev_major, QLA2XXX_APIDEV); |
8355 | fc_release_transport(qla2xxx_transport_template); |
8356 | qlt_exit(); |
8357 | kmem_cache_destroy(s: srb_cachep); |
8358 | qla_trace_uninit(); |
8359 | } |
8360 | |
8361 | module_init(qla2x00_module_init); |
8362 | module_exit(qla2x00_module_exit); |
8363 | |
8364 | MODULE_AUTHOR("QLogic Corporation" ); |
8365 | MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver" ); |
8366 | MODULE_LICENSE("GPL" ); |
8367 | MODULE_FIRMWARE(FW_FILE_ISP21XX); |
8368 | MODULE_FIRMWARE(FW_FILE_ISP22XX); |
8369 | MODULE_FIRMWARE(FW_FILE_ISP2300); |
8370 | MODULE_FIRMWARE(FW_FILE_ISP2322); |
8371 | MODULE_FIRMWARE(FW_FILE_ISP24XX); |
8372 | MODULE_FIRMWARE(FW_FILE_ISP25XX); |
8373 | |