1/*
2 * linux/drivers/message/fusion/mptbase.c
3 * This is the Fusion MPT base driver which supports multiple
4 * (SCSI + LAN) specialized protocol drivers.
5 * For use with LSI PCI chip/adapter(s)
6 * running LSI Fusion MPT (Message Passing Technology) firmware.
7 *
8 * Copyright (c) 1999-2008 LSI Corporation
9 * (mailto:DL-MPTFusionLinux@lsi.com)
10 *
11 */
12/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
13/*
14 This program is free software; you can redistribute it and/or modify
15 it under the terms of the GNU General Public License as published by
16 the Free Software Foundation; version 2 of the License.
17
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 NO WARRANTY
24 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
25 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
26 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
27 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
28 solely responsible for determining the appropriateness of using and
29 distributing the Program and assumes all risks associated with its
30 exercise of rights under this Agreement, including but not limited to
31 the risks and costs of program errors, damage to or loss of data,
32 programs or equipment, and unavailability or interruption of operations.
33
34 DISCLAIMER OF LIABILITY
35 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
36 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
38 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
39 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
40 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
41 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
42
43 You should have received a copy of the GNU General Public License
44 along with this program; if not, write to the Free Software
45 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
46*/
47/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
48
49#include <linux/kernel.h>
50#include <linux/module.h>
51#include <linux/errno.h>
52#include <linux/init.h>
53#include <linux/seq_file.h>
54#include <linux/slab.h>
55#include <linux/types.h>
56#include <linux/pci.h>
57#include <linux/kdev_t.h>
58#include <linux/blkdev.h>
59#include <linux/delay.h>
60#include <linux/interrupt.h> /* needed for in_interrupt() proto */
61#include <linux/dma-mapping.h>
62#include <linux/kthread.h>
63#include <scsi/scsi_host.h>
64
65#include "mptbase.h"
66#include "lsi/mpi_log_fc.h"
67
68/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
69#define my_NAME "Fusion MPT base driver"
70#define my_VERSION MPT_LINUX_VERSION_COMMON
71#define MYNAM "mptbase"
72
73MODULE_AUTHOR(MODULEAUTHOR);
74MODULE_DESCRIPTION(my_NAME);
75MODULE_LICENSE("GPL");
76MODULE_VERSION(my_VERSION);
77
78/*
79 * cmd line parameters
80 */
81
82static int mpt_msi_enable_spi;
83module_param(mpt_msi_enable_spi, int, 0);
84MODULE_PARM_DESC(mpt_msi_enable_spi,
85 " Enable MSI Support for SPI controllers (default=0)");
86
87static int mpt_msi_enable_fc;
88module_param(mpt_msi_enable_fc, int, 0);
89MODULE_PARM_DESC(mpt_msi_enable_fc,
90 " Enable MSI Support for FC controllers (default=0)");
91
92static int mpt_msi_enable_sas;
93module_param(mpt_msi_enable_sas, int, 0);
94MODULE_PARM_DESC(mpt_msi_enable_sas,
95 " Enable MSI Support for SAS controllers (default=0)");
96
97static int mpt_channel_mapping;
98module_param(mpt_channel_mapping, int, 0);
99MODULE_PARM_DESC(mpt_channel_mapping, " Mapping id's to channels (default=0)");
100
101static int mpt_debug_level;
102static int mpt_set_debug_level(const char *val, const struct kernel_param *kp);
103module_param_call(mpt_debug_level, mpt_set_debug_level, param_get_int,
104 &mpt_debug_level, 0600);
105MODULE_PARM_DESC(mpt_debug_level,
106 " debug level - refer to mptdebug.h - (default=0)");
107
108int mpt_fwfault_debug;
109EXPORT_SYMBOL(mpt_fwfault_debug);
110module_param(mpt_fwfault_debug, int, 0600);
111MODULE_PARM_DESC(mpt_fwfault_debug,
112 "Enable detection of Firmware fault and halt Firmware on fault - (default=0)");
113
114static char MptCallbacksName[MPT_MAX_PROTOCOL_DRIVERS]
115 [MPT_MAX_CALLBACKNAME_LEN+1];
116
117#ifdef MFCNT
118static int mfcounter = 0;
119#define PRINT_MF_COUNT 20000
120#endif
121
122/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
123/*
124 * Public data...
125 */
126
127#define WHOINIT_UNKNOWN 0xAA
128
129/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
130/*
131 * Private data...
132 */
133 /* Adapter link list */
134LIST_HEAD(ioc_list);
135 /* Callback lookup table */
136static MPT_CALLBACK MptCallbacks[MPT_MAX_PROTOCOL_DRIVERS];
137 /* Protocol driver class lookup table */
138static int MptDriverClass[MPT_MAX_PROTOCOL_DRIVERS];
139 /* Event handler lookup table */
140static MPT_EVHANDLER MptEvHandlers[MPT_MAX_PROTOCOL_DRIVERS];
141 /* Reset handler lookup table */
142static MPT_RESETHANDLER MptResetHandlers[MPT_MAX_PROTOCOL_DRIVERS];
143static struct mpt_pci_driver *MptDeviceDriverHandlers[MPT_MAX_PROTOCOL_DRIVERS];
144
145#ifdef CONFIG_PROC_FS
146static struct proc_dir_entry *mpt_proc_root_dir;
147#endif
148
149/*
150 * Driver Callback Index's
151 */
152static u8 mpt_base_index = MPT_MAX_PROTOCOL_DRIVERS;
153static u8 last_drv_idx;
154
155/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
156/*
157 * Forward protos...
158 */
159static irqreturn_t mpt_interrupt(int irq, void *bus_id);
160static int mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
161 MPT_FRAME_HDR *reply);
162static int mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes,
163 u32 *req, int replyBytes, u16 *u16reply, int maxwait,
164 int sleepFlag);
165static int mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag);
166static void mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev);
167static void mpt_adapter_disable(MPT_ADAPTER *ioc);
168static void mpt_adapter_dispose(MPT_ADAPTER *ioc);
169
170static void MptDisplayIocCapabilities(MPT_ADAPTER *ioc);
171static int MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag);
172static int GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason);
173static int GetPortFacts(MPT_ADAPTER *ioc, int portnum, int sleepFlag);
174static int SendIocInit(MPT_ADAPTER *ioc, int sleepFlag);
175static int SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag);
176static int mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag);
177static int mpt_downloadboot(MPT_ADAPTER *ioc, MpiFwHeader_t *pFwHeader, int sleepFlag);
178static int mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag);
179static int KickStart(MPT_ADAPTER *ioc, int ignore, int sleepFlag);
180static int SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag);
181static int PrimeIocFifos(MPT_ADAPTER *ioc);
182static int WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag);
183static int WaitForDoorbellInt(MPT_ADAPTER *ioc, int howlong, int sleepFlag);
184static int WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag);
185static int GetLanConfigPages(MPT_ADAPTER *ioc);
186static int GetIoUnitPage2(MPT_ADAPTER *ioc);
187int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
188static int mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum);
189static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum);
190static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc);
191static void mpt_read_ioc_pg_4(MPT_ADAPTER *ioc);
192static void mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc);
193static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch,
194 int sleepFlag);
195static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp);
196static int mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag);
197static int mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init);
198
199#ifdef CONFIG_PROC_FS
200static int mpt_summary_proc_show(struct seq_file *m, void *v);
201static int mpt_version_proc_show(struct seq_file *m, void *v);
202static int mpt_iocinfo_proc_show(struct seq_file *m, void *v);
203#endif
204static void mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc);
205
206static int ProcessEventNotification(MPT_ADAPTER *ioc,
207 EventNotificationReply_t *evReply, int *evHandlers);
208static void mpt_iocstatus_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf);
209static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info);
210static void mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info);
211static void mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info , u8 cb_idx);
212static int mpt_read_ioc_pg_3(MPT_ADAPTER *ioc);
213static void mpt_inactive_raid_list_free(MPT_ADAPTER *ioc);
214
215/* module entry point */
216static int __init fusion_init (void);
217static void __exit fusion_exit (void);
218
219#define CHIPREG_READ32(addr) readl_relaxed(addr)
220#define CHIPREG_READ32_dmasync(addr) readl(addr)
221#define CHIPREG_WRITE32(addr,val) writel(val, addr)
222#define CHIPREG_PIO_WRITE32(addr,val) outl(val, (unsigned long)addr)
223#define CHIPREG_PIO_READ32(addr) inl((unsigned long)addr)
224
225static void
226pci_disable_io_access(struct pci_dev *pdev)
227{
228 u16 command_reg;
229
230 pci_read_config_word(pdev, PCI_COMMAND, &command_reg);
231 command_reg &= ~1;
232 pci_write_config_word(pdev, PCI_COMMAND, command_reg);
233}
234
235static void
236pci_enable_io_access(struct pci_dev *pdev)
237{
238 u16 command_reg;
239
240 pci_read_config_word(pdev, PCI_COMMAND, &command_reg);
241 command_reg |= 1;
242 pci_write_config_word(pdev, PCI_COMMAND, command_reg);
243}
244
245static int mpt_set_debug_level(const char *val, const struct kernel_param *kp)
246{
247 int ret = param_set_int(val, kp);
248 MPT_ADAPTER *ioc;
249
250 if (ret)
251 return ret;
252
253 list_for_each_entry(ioc, &ioc_list, list)
254 ioc->debug_level = mpt_debug_level;
255 return 0;
256}
257
258/**
259 * mpt_get_cb_idx - obtain cb_idx for registered driver
260 * @dclass: class driver enum
261 *
262 * Returns cb_idx, or zero means it wasn't found
263 **/
264static u8
265mpt_get_cb_idx(MPT_DRIVER_CLASS dclass)
266{
267 u8 cb_idx;
268
269 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--)
270 if (MptDriverClass[cb_idx] == dclass)
271 return cb_idx;
272 return 0;
273}
274
275/**
276 * mpt_is_discovery_complete - determine if discovery has completed
277 * @ioc: per adatper instance
278 *
279 * Returns 1 when discovery completed, else zero.
280 */
281static int
282mpt_is_discovery_complete(MPT_ADAPTER *ioc)
283{
284 ConfigExtendedPageHeader_t hdr;
285 CONFIGPARMS cfg;
286 SasIOUnitPage0_t *buffer;
287 dma_addr_t dma_handle;
288 int rc = 0;
289
290 memset(&hdr, 0, sizeof(ConfigExtendedPageHeader_t));
291 memset(&cfg, 0, sizeof(CONFIGPARMS));
292 hdr.PageVersion = MPI_SASIOUNITPAGE0_PAGEVERSION;
293 hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
294 hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
295 cfg.cfghdr.ehdr = &hdr;
296 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
297
298 if ((mpt_config(ioc, &cfg)))
299 goto out;
300 if (!hdr.ExtPageLength)
301 goto out;
302
303 buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
304 &dma_handle);
305 if (!buffer)
306 goto out;
307
308 cfg.physAddr = dma_handle;
309 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
310
311 if ((mpt_config(ioc, &cfg)))
312 goto out_free_consistent;
313
314 if (!(buffer->PhyData[0].PortFlags &
315 MPI_SAS_IOUNIT0_PORT_FLAGS_DISCOVERY_IN_PROGRESS))
316 rc = 1;
317
318 out_free_consistent:
319 pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
320 buffer, dma_handle);
321 out:
322 return rc;
323}
324
325
326/**
327 * mpt_remove_dead_ioc_func - kthread context to remove dead ioc
328 * @arg: input argument, used to derive ioc
329 *
330 * Return 0 if controller is removed from pci subsystem.
331 * Return -1 for other case.
332 */
333static int mpt_remove_dead_ioc_func(void *arg)
334{
335 MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
336 struct pci_dev *pdev;
337
338 if (!ioc)
339 return -1;
340
341 pdev = ioc->pcidev;
342 if (!pdev)
343 return -1;
344
345 pci_stop_and_remove_bus_device_locked(pdev);
346 return 0;
347}
348
349
350
351/**
352 * mpt_fault_reset_work - work performed on workq after ioc fault
353 * @work: input argument, used to derive ioc
354 *
355**/
356static void
357mpt_fault_reset_work(struct work_struct *work)
358{
359 MPT_ADAPTER *ioc =
360 container_of(work, MPT_ADAPTER, fault_reset_work.work);
361 u32 ioc_raw_state;
362 int rc;
363 unsigned long flags;
364 MPT_SCSI_HOST *hd;
365 struct task_struct *p;
366
367 if (ioc->ioc_reset_in_progress || !ioc->active)
368 goto out;
369
370
371 ioc_raw_state = mpt_GetIocState(ioc, 0);
372 if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_MASK) {
373 printk(MYIOC_s_INFO_FMT "%s: IOC is non-operational !!!!\n",
374 ioc->name, __func__);
375
376 /*
377 * Call mptscsih_flush_pending_cmds callback so that we
378 * flush all pending commands back to OS.
379 * This call is required to aovid deadlock at block layer.
380 * Dead IOC will fail to do diag reset,and this call is safe
381 * since dead ioc will never return any command back from HW.
382 */
383 hd = shost_priv(ioc->sh);
384 ioc->schedule_dead_ioc_flush_running_cmds(hd);
385
386 /*Remove the Dead Host */
387 p = kthread_run(mpt_remove_dead_ioc_func, ioc,
388 "mpt_dead_ioc_%d", ioc->id);
389 if (IS_ERR(p)) {
390 printk(MYIOC_s_ERR_FMT
391 "%s: Running mpt_dead_ioc thread failed !\n",
392 ioc->name, __func__);
393 } else {
394 printk(MYIOC_s_WARN_FMT
395 "%s: Running mpt_dead_ioc thread success !\n",
396 ioc->name, __func__);
397 }
398 return; /* don't rearm timer */
399 }
400
401 if ((ioc_raw_state & MPI_IOC_STATE_MASK)
402 == MPI_IOC_STATE_FAULT) {
403 printk(MYIOC_s_WARN_FMT "IOC is in FAULT state (%04xh)!!!\n",
404 ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK);
405 printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n",
406 ioc->name, __func__);
407 rc = mpt_HardResetHandler(ioc, CAN_SLEEP);
408 printk(MYIOC_s_WARN_FMT "%s: HardReset: %s\n", ioc->name,
409 __func__, (rc == 0) ? "success" : "failed");
410 ioc_raw_state = mpt_GetIocState(ioc, 0);
411 if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT)
412 printk(MYIOC_s_WARN_FMT "IOC is in FAULT state after "
413 "reset (%04xh)\n", ioc->name, ioc_raw_state &
414 MPI_DOORBELL_DATA_MASK);
415 } else if (ioc->bus_type == SAS && ioc->sas_discovery_quiesce_io) {
416 if ((mpt_is_discovery_complete(ioc))) {
417 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "clearing "
418 "discovery_quiesce_io flag\n", ioc->name));
419 ioc->sas_discovery_quiesce_io = 0;
420 }
421 }
422
423 out:
424 /*
425 * Take turns polling alternate controller
426 */
427 if (ioc->alt_ioc)
428 ioc = ioc->alt_ioc;
429
430 /* rearm the timer */
431 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
432 if (ioc->reset_work_q)
433 queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work,
434 msecs_to_jiffies(MPT_POLLING_INTERVAL));
435 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
436}
437
438
439/*
440 * Process turbo (context) reply...
441 */
442static void
443mpt_turbo_reply(MPT_ADAPTER *ioc, u32 pa)
444{
445 MPT_FRAME_HDR *mf = NULL;
446 MPT_FRAME_HDR *mr = NULL;
447 u16 req_idx = 0;
448 u8 cb_idx;
449
450 dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Got TURBO reply req_idx=%08x\n",
451 ioc->name, pa));
452
453 switch (pa >> MPI_CONTEXT_REPLY_TYPE_SHIFT) {
454 case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT:
455 req_idx = pa & 0x0000FFFF;
456 cb_idx = (pa & 0x00FF0000) >> 16;
457 mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
458 break;
459 case MPI_CONTEXT_REPLY_TYPE_LAN:
460 cb_idx = mpt_get_cb_idx(MPTLAN_DRIVER);
461 /*
462 * Blind set of mf to NULL here was fatal
463 * after lan_reply says "freeme"
464 * Fix sort of combined with an optimization here;
465 * added explicit check for case where lan_reply
466 * was just returning 1 and doing nothing else.
467 * For this case skip the callback, but set up
468 * proper mf value first here:-)
469 */
470 if ((pa & 0x58000000) == 0x58000000) {
471 req_idx = pa & 0x0000FFFF;
472 mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
473 mpt_free_msg_frame(ioc, mf);
474 mb();
475 return;
476 break;
477 }
478 mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
479 break;
480 case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET:
481 cb_idx = mpt_get_cb_idx(MPTSTM_DRIVER);
482 mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
483 break;
484 default:
485 cb_idx = 0;
486 BUG();
487 }
488
489 /* Check for (valid) IO callback! */
490 if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS ||
491 MptCallbacks[cb_idx] == NULL) {
492 printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n",
493 __func__, ioc->name, cb_idx);
494 goto out;
495 }
496
497 if (MptCallbacks[cb_idx](ioc, mf, mr))
498 mpt_free_msg_frame(ioc, mf);
499 out:
500 mb();
501}
502
503static void
504mpt_reply(MPT_ADAPTER *ioc, u32 pa)
505{
506 MPT_FRAME_HDR *mf;
507 MPT_FRAME_HDR *mr;
508 u16 req_idx;
509 u8 cb_idx;
510 int freeme;
511
512 u32 reply_dma_low;
513 u16 ioc_stat;
514
515 /* non-TURBO reply! Hmmm, something may be up...
516 * Newest turbo reply mechanism; get address
517 * via left shift 1 (get rid of MPI_ADDRESS_REPLY_A_BIT)!
518 */
519
520 /* Map DMA address of reply header to cpu address.
521 * pa is 32 bits - but the dma address may be 32 or 64 bits
522 * get offset based only only the low addresses
523 */
524
525 reply_dma_low = (pa <<= 1);
526 mr = (MPT_FRAME_HDR *)((u8 *)ioc->reply_frames +
527 (reply_dma_low - ioc->reply_frames_low_dma));
528
529 req_idx = le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx);
530 cb_idx = mr->u.frame.hwhdr.msgctxu.fld.cb_idx;
531 mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
532
533 dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Got non-TURBO reply=%p req_idx=%x cb_idx=%x Function=%x\n",
534 ioc->name, mr, req_idx, cb_idx, mr->u.hdr.Function));
535 DBG_DUMP_REPLY_FRAME(ioc, (u32 *)mr);
536
537 /* Check/log IOC log info
538 */
539 ioc_stat = le16_to_cpu(mr->u.reply.IOCStatus);
540 if (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
541 u32 log_info = le32_to_cpu(mr->u.reply.IOCLogInfo);
542 if (ioc->bus_type == FC)
543 mpt_fc_log_info(ioc, log_info);
544 else if (ioc->bus_type == SPI)
545 mpt_spi_log_info(ioc, log_info);
546 else if (ioc->bus_type == SAS)
547 mpt_sas_log_info(ioc, log_info, cb_idx);
548 }
549
550 if (ioc_stat & MPI_IOCSTATUS_MASK)
551 mpt_iocstatus_info(ioc, (u32)ioc_stat, mf);
552
553 /* Check for (valid) IO callback! */
554 if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS ||
555 MptCallbacks[cb_idx] == NULL) {
556 printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n",
557 __func__, ioc->name, cb_idx);
558 freeme = 0;
559 goto out;
560 }
561
562 freeme = MptCallbacks[cb_idx](ioc, mf, mr);
563
564 out:
565 /* Flush (non-TURBO) reply with a WRITE! */
566 CHIPREG_WRITE32(&ioc->chip->ReplyFifo, pa);
567
568 if (freeme)
569 mpt_free_msg_frame(ioc, mf);
570 mb();
571}
572
573/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
574/**
575 * mpt_interrupt - MPT adapter (IOC) specific interrupt handler.
576 * @irq: irq number (not used)
577 * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
578 *
579 * This routine is registered via the request_irq() kernel API call,
580 * and handles all interrupts generated from a specific MPT adapter
581 * (also referred to as a IO Controller or IOC).
582 * This routine must clear the interrupt from the adapter and does
583 * so by reading the reply FIFO. Multiple replies may be processed
584 * per single call to this routine.
585 *
586 * This routine handles register-level access of the adapter but
587 * dispatches (calls) a protocol-specific callback routine to handle
588 * the protocol-specific details of the MPT request completion.
589 */
590static irqreturn_t
591mpt_interrupt(int irq, void *bus_id)
592{
593 MPT_ADAPTER *ioc = bus_id;
594 u32 pa = CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo);
595
596 if (pa == 0xFFFFFFFF)
597 return IRQ_NONE;
598
599 /*
600 * Drain the reply FIFO!
601 */
602 do {
603 if (pa & MPI_ADDRESS_REPLY_A_BIT)
604 mpt_reply(ioc, pa);
605 else
606 mpt_turbo_reply(ioc, pa);
607 pa = CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo);
608 } while (pa != 0xFFFFFFFF);
609
610 return IRQ_HANDLED;
611}
612
613/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
614/**
615 * mptbase_reply - MPT base driver's callback routine
616 * @ioc: Pointer to MPT_ADAPTER structure
617 * @req: Pointer to original MPT request frame
618 * @reply: Pointer to MPT reply frame (NULL if TurboReply)
619 *
620 * MPT base driver's callback routine; all base driver
621 * "internal" request/reply processing is routed here.
622 * Currently used for EventNotification and EventAck handling.
623 *
624 * Returns 1 indicating original alloc'd request frame ptr
625 * should be freed, or 0 if it shouldn't.
626 */
627static int
628mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
629{
630 EventNotificationReply_t *pEventReply;
631 u8 event;
632 int evHandlers;
633 int freereq = 1;
634
635 switch (reply->u.hdr.Function) {
636 case MPI_FUNCTION_EVENT_NOTIFICATION:
637 pEventReply = (EventNotificationReply_t *)reply;
638 evHandlers = 0;
639 ProcessEventNotification(ioc, pEventReply, &evHandlers);
640 event = le32_to_cpu(pEventReply->Event) & 0xFF;
641 if (pEventReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)
642 freereq = 0;
643 if (event != MPI_EVENT_EVENT_CHANGE)
644 break;
645 /* else: fall through */
646 case MPI_FUNCTION_CONFIG:
647 case MPI_FUNCTION_SAS_IO_UNIT_CONTROL:
648 ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
649 ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
650 memcpy(ioc->mptbase_cmds.reply, reply,
651 min(MPT_DEFAULT_FRAME_SIZE,
652 4 * reply->u.reply.MsgLength));
653 if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_PENDING) {
654 ioc->mptbase_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
655 complete(&ioc->mptbase_cmds.done);
656 } else
657 freereq = 0;
658 if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_FREE_MF)
659 freereq = 1;
660 break;
661 case MPI_FUNCTION_EVENT_ACK:
662 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
663 "EventAck reply received\n", ioc->name));
664 break;
665 default:
666 printk(MYIOC_s_ERR_FMT
667 "Unexpected msg function (=%02Xh) reply received!\n",
668 ioc->name, reply->u.hdr.Function);
669 break;
670 }
671
672 /*
673 * Conditionally tell caller to free the original
674 * EventNotification/EventAck/unexpected request frame!
675 */
676 return freereq;
677}
678
679/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
680/**
681 * mpt_register - Register protocol-specific main callback handler.
682 * @cbfunc: callback function pointer
683 * @dclass: Protocol driver's class (%MPT_DRIVER_CLASS enum value)
684 * @func_name: call function's name
685 *
686 * This routine is called by a protocol-specific driver (SCSI host,
687 * LAN, SCSI target) to register its reply callback routine. Each
688 * protocol-specific driver must do this before it will be able to
689 * use any IOC resources, such as obtaining request frames.
690 *
691 * NOTES: The SCSI protocol driver currently calls this routine thrice
692 * in order to register separate callbacks; one for "normal" SCSI IO;
693 * one for MptScsiTaskMgmt requests; one for Scan/DV requests.
694 *
695 * Returns u8 valued "handle" in the range (and S.O.D. order)
696 * {N,...,7,6,5,...,1} if successful.
697 * A return value of MPT_MAX_PROTOCOL_DRIVERS (including zero!) should be
698 * considered an error by the caller.
699 */
700u8
701mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass, char *func_name)
702{
703 u8 cb_idx;
704 last_drv_idx = MPT_MAX_PROTOCOL_DRIVERS;
705
706 /*
707 * Search for empty callback slot in this order: {N,...,7,6,5,...,1}
708 * (slot/handle 0 is reserved!)
709 */
710 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
711 if (MptCallbacks[cb_idx] == NULL) {
712 MptCallbacks[cb_idx] = cbfunc;
713 MptDriverClass[cb_idx] = dclass;
714 MptEvHandlers[cb_idx] = NULL;
715 last_drv_idx = cb_idx;
716 strlcpy(MptCallbacksName[cb_idx], func_name,
717 MPT_MAX_CALLBACKNAME_LEN+1);
718 break;
719 }
720 }
721
722 return last_drv_idx;
723}
724
725/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
726/**
727 * mpt_deregister - Deregister a protocol drivers resources.
728 * @cb_idx: previously registered callback handle
729 *
730 * Each protocol-specific driver should call this routine when its
731 * module is unloaded.
732 */
733void
734mpt_deregister(u8 cb_idx)
735{
736 if (cb_idx && (cb_idx < MPT_MAX_PROTOCOL_DRIVERS)) {
737 MptCallbacks[cb_idx] = NULL;
738 MptDriverClass[cb_idx] = MPTUNKNOWN_DRIVER;
739 MptEvHandlers[cb_idx] = NULL;
740
741 last_drv_idx++;
742 }
743}
744
745/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
746/**
747 * mpt_event_register - Register protocol-specific event callback handler.
748 * @cb_idx: previously registered (via mpt_register) callback handle
749 * @ev_cbfunc: callback function
750 *
751 * This routine can be called by one or more protocol-specific drivers
752 * if/when they choose to be notified of MPT events.
753 *
754 * Returns 0 for success.
755 */
756int
757mpt_event_register(u8 cb_idx, MPT_EVHANDLER ev_cbfunc)
758{
759 if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
760 return -1;
761
762 MptEvHandlers[cb_idx] = ev_cbfunc;
763 return 0;
764}
765
766/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
767/**
768 * mpt_event_deregister - Deregister protocol-specific event callback handler
769 * @cb_idx: previously registered callback handle
770 *
771 * Each protocol-specific driver should call this routine
772 * when it does not (or can no longer) handle events,
773 * or when its module is unloaded.
774 */
775void
776mpt_event_deregister(u8 cb_idx)
777{
778 if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
779 return;
780
781 MptEvHandlers[cb_idx] = NULL;
782}
783
784/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
785/**
786 * mpt_reset_register - Register protocol-specific IOC reset handler.
787 * @cb_idx: previously registered (via mpt_register) callback handle
788 * @reset_func: reset function
789 *
790 * This routine can be called by one or more protocol-specific drivers
791 * if/when they choose to be notified of IOC resets.
792 *
793 * Returns 0 for success.
794 */
795int
796mpt_reset_register(u8 cb_idx, MPT_RESETHANDLER reset_func)
797{
798 if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
799 return -1;
800
801 MptResetHandlers[cb_idx] = reset_func;
802 return 0;
803}
804
805/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
806/**
807 * mpt_reset_deregister - Deregister protocol-specific IOC reset handler.
808 * @cb_idx: previously registered callback handle
809 *
810 * Each protocol-specific driver should call this routine
811 * when it does not (or can no longer) handle IOC reset handling,
812 * or when its module is unloaded.
813 */
814void
815mpt_reset_deregister(u8 cb_idx)
816{
817 if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
818 return;
819
820 MptResetHandlers[cb_idx] = NULL;
821}
822
823/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
824/**
825 * mpt_device_driver_register - Register device driver hooks
826 * @dd_cbfunc: driver callbacks struct
827 * @cb_idx: MPT protocol driver index
828 */
829int
830mpt_device_driver_register(struct mpt_pci_driver * dd_cbfunc, u8 cb_idx)
831{
832 MPT_ADAPTER *ioc;
833 const struct pci_device_id *id;
834
835 if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
836 return -EINVAL;
837
838 MptDeviceDriverHandlers[cb_idx] = dd_cbfunc;
839
840 /* call per pci device probe entry point */
841 list_for_each_entry(ioc, &ioc_list, list) {
842 id = ioc->pcidev->driver ?
843 ioc->pcidev->driver->id_table : NULL;
844 if (dd_cbfunc->probe)
845 dd_cbfunc->probe(ioc->pcidev, id);
846 }
847
848 return 0;
849}
850
851/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
852/**
853 * mpt_device_driver_deregister - DeRegister device driver hooks
854 * @cb_idx: MPT protocol driver index
855 */
856void
857mpt_device_driver_deregister(u8 cb_idx)
858{
859 struct mpt_pci_driver *dd_cbfunc;
860 MPT_ADAPTER *ioc;
861
862 if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
863 return;
864
865 dd_cbfunc = MptDeviceDriverHandlers[cb_idx];
866
867 list_for_each_entry(ioc, &ioc_list, list) {
868 if (dd_cbfunc->remove)
869 dd_cbfunc->remove(ioc->pcidev);
870 }
871
872 MptDeviceDriverHandlers[cb_idx] = NULL;
873}
874
875
876/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
877/**
878 * mpt_get_msg_frame - Obtain an MPT request frame from the pool
879 * @cb_idx: Handle of registered MPT protocol driver
880 * @ioc: Pointer to MPT adapter structure
881 *
882 * Obtain an MPT request frame from the pool (of 1024) that are
883 * allocated per MPT adapter.
884 *
885 * Returns pointer to a MPT request frame or %NULL if none are available
886 * or IOC is not active.
887 */
888MPT_FRAME_HDR*
889mpt_get_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc)
890{
891 MPT_FRAME_HDR *mf;
892 unsigned long flags;
893 u16 req_idx; /* Request index */
894
895 /* validate handle and ioc identifier */
896
897#ifdef MFCNT
898 if (!ioc->active)
899 printk(MYIOC_s_WARN_FMT "IOC Not Active! mpt_get_msg_frame "
900 "returning NULL!\n", ioc->name);
901#endif
902
903 /* If interrupts are not attached, do not return a request frame */
904 if (!ioc->active)
905 return NULL;
906
907 spin_lock_irqsave(&ioc->FreeQlock, flags);
908 if (!list_empty(&ioc->FreeQ)) {
909 int req_offset;
910
911 mf = list_entry(ioc->FreeQ.next, MPT_FRAME_HDR,
912 u.frame.linkage.list);
913 list_del(&mf->u.frame.linkage.list);
914 mf->u.frame.linkage.arg1 = 0;
915 mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx; /* byte */
916 req_offset = (u8 *)mf - (u8 *)ioc->req_frames;
917 /* u16! */
918 req_idx = req_offset / ioc->req_sz;
919 mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(req_idx);
920 mf->u.frame.hwhdr.msgctxu.fld.rsvd = 0;
921 /* Default, will be changed if necessary in SG generation */
922 ioc->RequestNB[req_idx] = ioc->NB_for_64_byte_frame;
923#ifdef MFCNT
924 ioc->mfcnt++;
925#endif
926 }
927 else
928 mf = NULL;
929 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
930
931#ifdef MFCNT
932 if (mf == NULL)
933 printk(MYIOC_s_WARN_FMT "IOC Active. No free Msg Frames! "
934 "Count 0x%x Max 0x%x\n", ioc->name, ioc->mfcnt,
935 ioc->req_depth);
936 mfcounter++;
937 if (mfcounter == PRINT_MF_COUNT)
938 printk(MYIOC_s_INFO_FMT "MF Count 0x%x Max 0x%x \n", ioc->name,
939 ioc->mfcnt, ioc->req_depth);
940#endif
941
942 dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_get_msg_frame(%d,%d), got mf=%p\n",
943 ioc->name, cb_idx, ioc->id, mf));
944 return mf;
945}
946
947/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
948/**
949 * mpt_put_msg_frame - Send a protocol-specific MPT request frame to an IOC
950 * @cb_idx: Handle of registered MPT protocol driver
951 * @ioc: Pointer to MPT adapter structure
952 * @mf: Pointer to MPT request frame
953 *
954 * This routine posts an MPT request frame to the request post FIFO of a
955 * specific MPT adapter.
956 */
957void
958mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
959{
960 u32 mf_dma_addr;
961 int req_offset;
962 u16 req_idx; /* Request index */
963
964 /* ensure values are reset properly! */
965 mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx; /* byte */
966 req_offset = (u8 *)mf - (u8 *)ioc->req_frames;
967 /* u16! */
968 req_idx = req_offset / ioc->req_sz;
969 mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(req_idx);
970 mf->u.frame.hwhdr.msgctxu.fld.rsvd = 0;
971
972 DBG_DUMP_PUT_MSG_FRAME(ioc, (u32 *)mf);
973
974 mf_dma_addr = (ioc->req_frames_low_dma + req_offset) | ioc->RequestNB[req_idx];
975 dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mf_dma_addr=%x req_idx=%d "
976 "RequestNB=%x\n", ioc->name, mf_dma_addr, req_idx,
977 ioc->RequestNB[req_idx]));
978 CHIPREG_WRITE32(&ioc->chip->RequestFifo, mf_dma_addr);
979}
980
981/**
982 * mpt_put_msg_frame_hi_pri - Send a hi-pri protocol-specific MPT request frame
983 * @cb_idx: Handle of registered MPT protocol driver
984 * @ioc: Pointer to MPT adapter structure
985 * @mf: Pointer to MPT request frame
986 *
987 * Send a protocol-specific MPT request frame to an IOC using
988 * hi-priority request queue.
989 *
990 * This routine posts an MPT request frame to the request post FIFO of a
991 * specific MPT adapter.
992 **/
993void
994mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
995{
996 u32 mf_dma_addr;
997 int req_offset;
998 u16 req_idx; /* Request index */
999
1000 /* ensure values are reset properly! */
1001 mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx;
1002 req_offset = (u8 *)mf - (u8 *)ioc->req_frames;
1003 req_idx = req_offset / ioc->req_sz;
1004 mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(req_idx);
1005 mf->u.frame.hwhdr.msgctxu.fld.rsvd = 0;
1006
1007 DBG_DUMP_PUT_MSG_FRAME(ioc, (u32 *)mf);
1008
1009 mf_dma_addr = (ioc->req_frames_low_dma + req_offset);
1010 dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mf_dma_addr=%x req_idx=%d\n",
1011 ioc->name, mf_dma_addr, req_idx));
1012 CHIPREG_WRITE32(&ioc->chip->RequestHiPriFifo, mf_dma_addr);
1013}
1014
1015/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1016/**
1017 * mpt_free_msg_frame - Place MPT request frame back on FreeQ.
1018 * @ioc: Pointer to MPT adapter structure
1019 * @mf: Pointer to MPT request frame
1020 *
1021 * This routine places a MPT request frame back on the MPT adapter's
1022 * FreeQ.
1023 */
1024void
1025mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
1026{
1027 unsigned long flags;
1028
1029 /* Put Request back on FreeQ! */
1030 spin_lock_irqsave(&ioc->FreeQlock, flags);
1031 if (cpu_to_le32(mf->u.frame.linkage.arg1) == 0xdeadbeaf)
1032 goto out;
1033 /* signature to know if this mf is freed */
1034 mf->u.frame.linkage.arg1 = cpu_to_le32(0xdeadbeaf);
1035 list_add(&mf->u.frame.linkage.list, &ioc->FreeQ);
1036#ifdef MFCNT
1037 ioc->mfcnt--;
1038#endif
1039 out:
1040 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
1041}
1042
1043/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1044/**
1045 * mpt_add_sge - Place a simple 32 bit SGE at address pAddr.
1046 * @pAddr: virtual address for SGE
1047 * @flagslength: SGE flags and data transfer length
1048 * @dma_addr: Physical address
1049 *
1050 * This routine places a MPT request frame back on the MPT adapter's
1051 * FreeQ.
1052 */
1053static void
1054mpt_add_sge(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
1055{
1056 SGESimple32_t *pSge = (SGESimple32_t *) pAddr;
1057 pSge->FlagsLength = cpu_to_le32(flagslength);
1058 pSge->Address = cpu_to_le32(dma_addr);
1059}
1060
1061/**
1062 * mpt_add_sge_64bit - Place a simple 64 bit SGE at address pAddr.
1063 * @pAddr: virtual address for SGE
1064 * @flagslength: SGE flags and data transfer length
1065 * @dma_addr: Physical address
1066 *
1067 * This routine places a MPT request frame back on the MPT adapter's
1068 * FreeQ.
1069 **/
1070static void
1071mpt_add_sge_64bit(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
1072{
1073 SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
1074 pSge->Address.Low = cpu_to_le32
1075 (lower_32_bits(dma_addr));
1076 pSge->Address.High = cpu_to_le32
1077 (upper_32_bits(dma_addr));
1078 pSge->FlagsLength = cpu_to_le32
1079 ((flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING));
1080}
1081
1082/**
1083 * mpt_add_sge_64bit_1078 - Place a simple 64 bit SGE at address pAddr (1078 workaround).
1084 * @pAddr: virtual address for SGE
1085 * @flagslength: SGE flags and data transfer length
1086 * @dma_addr: Physical address
1087 *
1088 * This routine places a MPT request frame back on the MPT adapter's
1089 * FreeQ.
1090 **/
1091static void
1092mpt_add_sge_64bit_1078(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
1093{
1094 SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
1095 u32 tmp;
1096
1097 pSge->Address.Low = cpu_to_le32
1098 (lower_32_bits(dma_addr));
1099 tmp = (u32)(upper_32_bits(dma_addr));
1100
1101 /*
1102 * 1078 errata workaround for the 36GB limitation
1103 */
1104 if ((((u64)dma_addr + MPI_SGE_LENGTH(flagslength)) >> 32) == 9) {
1105 flagslength |=
1106 MPI_SGE_SET_FLAGS(MPI_SGE_FLAGS_LOCAL_ADDRESS);
1107 tmp |= (1<<31);
1108 if (mpt_debug_level & MPT_DEBUG_36GB_MEM)
1109 printk(KERN_DEBUG "1078 P0M2 addressing for "
1110 "addr = 0x%llx len = %d\n",
1111 (unsigned long long)dma_addr,
1112 MPI_SGE_LENGTH(flagslength));
1113 }
1114
1115 pSge->Address.High = cpu_to_le32(tmp);
1116 pSge->FlagsLength = cpu_to_le32(
1117 (flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING));
1118}
1119
1120/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1121/**
1122 * mpt_add_chain - Place a 32 bit chain SGE at address pAddr.
1123 * @pAddr: virtual address for SGE
1124 * @next: nextChainOffset value (u32's)
1125 * @length: length of next SGL segment
1126 * @dma_addr: Physical address
1127 *
1128 */
1129static void
1130mpt_add_chain(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
1131{
1132 SGEChain32_t *pChain = (SGEChain32_t *) pAddr;
1133
1134 pChain->Length = cpu_to_le16(length);
1135 pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1136 pChain->NextChainOffset = next;
1137 pChain->Address = cpu_to_le32(dma_addr);
1138}
1139
1140/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1141/**
1142 * mpt_add_chain_64bit - Place a 64 bit chain SGE at address pAddr.
1143 * @pAddr: virtual address for SGE
1144 * @next: nextChainOffset value (u32's)
1145 * @length: length of next SGL segment
1146 * @dma_addr: Physical address
1147 *
1148 */
1149static void
1150mpt_add_chain_64bit(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
1151{
1152 SGEChain64_t *pChain = (SGEChain64_t *) pAddr;
1153 u32 tmp = dma_addr & 0xFFFFFFFF;
1154
1155 pChain->Length = cpu_to_le16(length);
1156 pChain->Flags = (MPI_SGE_FLAGS_CHAIN_ELEMENT |
1157 MPI_SGE_FLAGS_64_BIT_ADDRESSING);
1158
1159 pChain->NextChainOffset = next;
1160
1161 pChain->Address.Low = cpu_to_le32(tmp);
1162 tmp = (u32)(upper_32_bits(dma_addr));
1163 pChain->Address.High = cpu_to_le32(tmp);
1164}
1165
1166/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1167/**
1168 * mpt_send_handshake_request - Send MPT request via doorbell handshake method.
1169 * @cb_idx: Handle of registered MPT protocol driver
1170 * @ioc: Pointer to MPT adapter structure
1171 * @reqBytes: Size of the request in bytes
1172 * @req: Pointer to MPT request frame
1173 * @sleepFlag: Use schedule if CAN_SLEEP else use udelay.
1174 *
1175 * This routine is used exclusively to send MptScsiTaskMgmt
1176 * requests since they are required to be sent via doorbell handshake.
1177 *
1178 * NOTE: It is the callers responsibility to byte-swap fields in the
1179 * request which are greater than 1 byte in size.
1180 *
1181 * Returns 0 for success, non-zero for failure.
1182 */
1183int
1184mpt_send_handshake_request(u8 cb_idx, MPT_ADAPTER *ioc, int reqBytes, u32 *req, int sleepFlag)
1185{
1186 int r = 0;
1187 u8 *req_as_bytes;
1188 int ii;
1189
1190 /* State is known to be good upon entering
1191 * this function so issue the bus reset
1192 * request.
1193 */
1194
1195 /*
1196 * Emulate what mpt_put_msg_frame() does /wrt to sanity
1197 * setting cb_idx/req_idx. But ONLY if this request
1198 * is in proper (pre-alloc'd) request buffer range...
1199 */
1200 ii = MFPTR_2_MPT_INDEX(ioc,(MPT_FRAME_HDR*)req);
1201 if (reqBytes >= 12 && ii >= 0 && ii < ioc->req_depth) {
1202 MPT_FRAME_HDR *mf = (MPT_FRAME_HDR*)req;
1203 mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(ii);
1204 mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx;
1205 }
1206
1207 /* Make sure there are no doorbells */
1208 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
1209
1210 CHIPREG_WRITE32(&ioc->chip->Doorbell,
1211 ((MPI_FUNCTION_HANDSHAKE<<MPI_DOORBELL_FUNCTION_SHIFT) |
1212 ((reqBytes/4)<<MPI_DOORBELL_ADD_DWORDS_SHIFT)));
1213
1214 /* Wait for IOC doorbell int */
1215 if ((ii = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0) {
1216 return ii;
1217 }
1218
1219 /* Read doorbell and check for active bit */
1220 if (!(CHIPREG_READ32(&ioc->chip->Doorbell) & MPI_DOORBELL_ACTIVE))
1221 return -5;
1222
1223 dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_send_handshake_request start, WaitCnt=%d\n",
1224 ioc->name, ii));
1225
1226 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
1227
1228 if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
1229 return -2;
1230 }
1231
1232 /* Send request via doorbell handshake */
1233 req_as_bytes = (u8 *) req;
1234 for (ii = 0; ii < reqBytes/4; ii++) {
1235 u32 word;
1236
1237 word = ((req_as_bytes[(ii*4) + 0] << 0) |
1238 (req_as_bytes[(ii*4) + 1] << 8) |
1239 (req_as_bytes[(ii*4) + 2] << 16) |
1240 (req_as_bytes[(ii*4) + 3] << 24));
1241 CHIPREG_WRITE32(&ioc->chip->Doorbell, word);
1242 if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
1243 r = -3;
1244 break;
1245 }
1246 }
1247
1248 if (r >= 0 && WaitForDoorbellInt(ioc, 10, sleepFlag) >= 0)
1249 r = 0;
1250 else
1251 r = -4;
1252
1253 /* Make sure there are no doorbells */
1254 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
1255
1256 return r;
1257}
1258
1259/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1260/**
1261 * mpt_host_page_access_control - control the IOC's Host Page Buffer access
1262 * @ioc: Pointer to MPT adapter structure
1263 * @access_control_value: define bits below
1264 * @sleepFlag: Specifies whether the process can sleep
1265 *
1266 * Provides mechanism for the host driver to control the IOC's
1267 * Host Page Buffer access.
1268 *
1269 * Access Control Value - bits[15:12]
1270 * 0h Reserved
1271 * 1h Enable Access { MPI_DB_HPBAC_ENABLE_ACCESS }
1272 * 2h Disable Access { MPI_DB_HPBAC_DISABLE_ACCESS }
1273 * 3h Free Buffer { MPI_DB_HPBAC_FREE_BUFFER }
1274 *
1275 * Returns 0 for success, non-zero for failure.
1276 */
1277
1278static int
1279mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag)
1280{
1281 int r = 0;
1282
1283 /* return if in use */
1284 if (CHIPREG_READ32(&ioc->chip->Doorbell)
1285 & MPI_DOORBELL_ACTIVE)
1286 return -1;
1287
1288 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
1289
1290 CHIPREG_WRITE32(&ioc->chip->Doorbell,
1291 ((MPI_FUNCTION_HOST_PAGEBUF_ACCESS_CONTROL
1292 <<MPI_DOORBELL_FUNCTION_SHIFT) |
1293 (access_control_value<<12)));
1294
1295 /* Wait for IOC to clear Doorbell Status bit */
1296 if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
1297 return -2;
1298 }else
1299 return 0;
1300}
1301
1302/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1303/**
1304 * mpt_host_page_alloc - allocate system memory for the fw
1305 * @ioc: Pointer to pointer to IOC adapter
1306 * @ioc_init: Pointer to ioc init config page
1307 *
1308 * If we already allocated memory in past, then resend the same pointer.
1309 * Returns 0 for success, non-zero for failure.
1310 */
1311static int
1312mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
1313{
1314 char *psge;
1315 int flags_length;
1316 u32 host_page_buffer_sz=0;
1317
1318 if(!ioc->HostPageBuffer) {
1319
1320 host_page_buffer_sz =
1321 le32_to_cpu(ioc->facts.HostPageBufferSGE.FlagsLength) & 0xFFFFFF;
1322
1323 if(!host_page_buffer_sz)
1324 return 0; /* fw doesn't need any host buffers */
1325
1326 /* spin till we get enough memory */
1327 while(host_page_buffer_sz > 0) {
1328
1329 if((ioc->HostPageBuffer = pci_alloc_consistent(
1330 ioc->pcidev,
1331 host_page_buffer_sz,
1332 &ioc->HostPageBuffer_dma)) != NULL) {
1333
1334 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1335 "host_page_buffer @ %p, dma @ %x, sz=%d bytes\n",
1336 ioc->name, ioc->HostPageBuffer,
1337 (u32)ioc->HostPageBuffer_dma,
1338 host_page_buffer_sz));
1339 ioc->alloc_total += host_page_buffer_sz;
1340 ioc->HostPageBuffer_sz = host_page_buffer_sz;
1341 break;
1342 }
1343
1344 host_page_buffer_sz -= (4*1024);
1345 }
1346 }
1347
1348 if(!ioc->HostPageBuffer) {
1349 printk(MYIOC_s_ERR_FMT
1350 "Failed to alloc memory for host_page_buffer!\n",
1351 ioc->name);
1352 return -999;
1353 }
1354
1355 psge = (char *)&ioc_init->HostPageBufferSGE;
1356 flags_length = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1357 MPI_SGE_FLAGS_SYSTEM_ADDRESS |
1358 MPI_SGE_FLAGS_HOST_TO_IOC |
1359 MPI_SGE_FLAGS_END_OF_BUFFER;
1360 flags_length = flags_length << MPI_SGE_FLAGS_SHIFT;
1361 flags_length |= ioc->HostPageBuffer_sz;
1362 ioc->add_sge(psge, flags_length, ioc->HostPageBuffer_dma);
1363 ioc->facts.HostPageBufferSGE = ioc_init->HostPageBufferSGE;
1364
1365 return 0;
1366}
1367
1368/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1369/**
1370 * mpt_verify_adapter - Given IOC identifier, set pointer to its adapter structure.
1371 * @iocid: IOC unique identifier (integer)
1372 * @iocpp: Pointer to pointer to IOC adapter
1373 *
1374 * Given a unique IOC identifier, set pointer to the associated MPT
1375 * adapter structure.
1376 *
1377 * Returns iocid and sets iocpp if iocid is found.
1378 * Returns -1 if iocid is not found.
1379 */
1380int
1381mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp)
1382{
1383 MPT_ADAPTER *ioc;
1384
1385 list_for_each_entry(ioc,&ioc_list,list) {
1386 if (ioc->id == iocid) {
1387 *iocpp =ioc;
1388 return iocid;
1389 }
1390 }
1391
1392 *iocpp = NULL;
1393 return -1;
1394}
1395
1396/**
1397 * mpt_get_product_name - returns product string
1398 * @vendor: pci vendor id
1399 * @device: pci device id
1400 * @revision: pci revision id
1401 *
1402 * Returns product string displayed when driver loads,
1403 * in /proc/mpt/summary and /sysfs/class/scsi_host/host<X>/version_product
1404 *
1405 **/
1406static const char*
1407mpt_get_product_name(u16 vendor, u16 device, u8 revision)
1408{
1409 char *product_str = NULL;
1410
1411 if (vendor == PCI_VENDOR_ID_BROCADE) {
1412 switch (device)
1413 {
1414 case MPI_MANUFACTPAGE_DEVICEID_FC949E:
1415 switch (revision)
1416 {
1417 case 0x00:
1418 product_str = "BRE040 A0";
1419 break;
1420 case 0x01:
1421 product_str = "BRE040 A1";
1422 break;
1423 default:
1424 product_str = "BRE040";
1425 break;
1426 }
1427 break;
1428 }
1429 goto out;
1430 }
1431
1432 switch (device)
1433 {
1434 case MPI_MANUFACTPAGE_DEVICEID_FC909:
1435 product_str = "LSIFC909 B1";
1436 break;
1437 case MPI_MANUFACTPAGE_DEVICEID_FC919:
1438 product_str = "LSIFC919 B0";
1439 break;
1440 case MPI_MANUFACTPAGE_DEVICEID_FC929:
1441 product_str = "LSIFC929 B0";
1442 break;
1443 case MPI_MANUFACTPAGE_DEVICEID_FC919X:
1444 if (revision < 0x80)
1445 product_str = "LSIFC919X A0";
1446 else
1447 product_str = "LSIFC919XL A1";
1448 break;
1449 case MPI_MANUFACTPAGE_DEVICEID_FC929X:
1450 if (revision < 0x80)
1451 product_str = "LSIFC929X A0";
1452 else
1453 product_str = "LSIFC929XL A1";
1454 break;
1455 case MPI_MANUFACTPAGE_DEVICEID_FC939X:
1456 product_str = "LSIFC939X A1";
1457 break;
1458 case MPI_MANUFACTPAGE_DEVICEID_FC949X:
1459 product_str = "LSIFC949X A1";
1460 break;
1461 case MPI_MANUFACTPAGE_DEVICEID_FC949E:
1462 switch (revision)
1463 {
1464 case 0x00:
1465 product_str = "LSIFC949E A0";
1466 break;
1467 case 0x01:
1468 product_str = "LSIFC949E A1";
1469 break;
1470 default:
1471 product_str = "LSIFC949E";
1472 break;
1473 }
1474 break;
1475 case MPI_MANUFACTPAGE_DEVID_53C1030:
1476 switch (revision)
1477 {
1478 case 0x00:
1479 product_str = "LSI53C1030 A0";
1480 break;
1481 case 0x01:
1482 product_str = "LSI53C1030 B0";
1483 break;
1484 case 0x03:
1485 product_str = "LSI53C1030 B1";
1486 break;
1487 case 0x07:
1488 product_str = "LSI53C1030 B2";
1489 break;
1490 case 0x08:
1491 product_str = "LSI53C1030 C0";
1492 break;
1493 case 0x80:
1494 product_str = "LSI53C1030T A0";
1495 break;
1496 case 0x83:
1497 product_str = "LSI53C1030T A2";
1498 break;
1499 case 0x87:
1500 product_str = "LSI53C1030T A3";
1501 break;
1502 case 0xc1:
1503 product_str = "LSI53C1020A A1";
1504 break;
1505 default:
1506 product_str = "LSI53C1030";
1507 break;
1508 }
1509 break;
1510 case MPI_MANUFACTPAGE_DEVID_1030_53C1035:
1511 switch (revision)
1512 {
1513 case 0x03:
1514 product_str = "LSI53C1035 A2";
1515 break;
1516 case 0x04:
1517 product_str = "LSI53C1035 B0";
1518 break;
1519 default:
1520 product_str = "LSI53C1035";
1521 break;
1522 }
1523 break;
1524 case MPI_MANUFACTPAGE_DEVID_SAS1064:
1525 switch (revision)
1526 {
1527 case 0x00:
1528 product_str = "LSISAS1064 A1";
1529 break;
1530 case 0x01:
1531 product_str = "LSISAS1064 A2";
1532 break;
1533 case 0x02:
1534 product_str = "LSISAS1064 A3";
1535 break;
1536 case 0x03:
1537 product_str = "LSISAS1064 A4";
1538 break;
1539 default:
1540 product_str = "LSISAS1064";
1541 break;
1542 }
1543 break;
1544 case MPI_MANUFACTPAGE_DEVID_SAS1064E:
1545 switch (revision)
1546 {
1547 case 0x00:
1548 product_str = "LSISAS1064E A0";
1549 break;
1550 case 0x01:
1551 product_str = "LSISAS1064E B0";
1552 break;
1553 case 0x02:
1554 product_str = "LSISAS1064E B1";
1555 break;
1556 case 0x04:
1557 product_str = "LSISAS1064E B2";
1558 break;
1559 case 0x08:
1560 product_str = "LSISAS1064E B3";
1561 break;
1562 default:
1563 product_str = "LSISAS1064E";
1564 break;
1565 }
1566 break;
1567 case MPI_MANUFACTPAGE_DEVID_SAS1068:
1568 switch (revision)
1569 {
1570 case 0x00:
1571 product_str = "LSISAS1068 A0";
1572 break;
1573 case 0x01:
1574 product_str = "LSISAS1068 B0";
1575 break;
1576 case 0x02:
1577 product_str = "LSISAS1068 B1";
1578 break;
1579 default:
1580 product_str = "LSISAS1068";
1581 break;
1582 }
1583 break;
1584 case MPI_MANUFACTPAGE_DEVID_SAS1068E:
1585 switch (revision)
1586 {
1587 case 0x00:
1588 product_str = "LSISAS1068E A0";
1589 break;
1590 case 0x01:
1591 product_str = "LSISAS1068E B0";
1592 break;
1593 case 0x02:
1594 product_str = "LSISAS1068E B1";
1595 break;
1596 case 0x04:
1597 product_str = "LSISAS1068E B2";
1598 break;
1599 case 0x08:
1600 product_str = "LSISAS1068E B3";
1601 break;
1602 default:
1603 product_str = "LSISAS1068E";
1604 break;
1605 }
1606 break;
1607 case MPI_MANUFACTPAGE_DEVID_SAS1078:
1608 switch (revision)
1609 {
1610 case 0x00:
1611 product_str = "LSISAS1078 A0";
1612 break;
1613 case 0x01:
1614 product_str = "LSISAS1078 B0";
1615 break;
1616 case 0x02:
1617 product_str = "LSISAS1078 C0";
1618 break;
1619 case 0x03:
1620 product_str = "LSISAS1078 C1";
1621 break;
1622 case 0x04:
1623 product_str = "LSISAS1078 C2";
1624 break;
1625 default:
1626 product_str = "LSISAS1078";
1627 break;
1628 }
1629 break;
1630 }
1631
1632 out:
1633 return product_str;
1634}
1635
1636/**
1637 * mpt_mapresources - map in memory mapped io
1638 * @ioc: Pointer to pointer to IOC adapter
1639 *
1640 **/
1641static int
1642mpt_mapresources(MPT_ADAPTER *ioc)
1643{
1644 u8 __iomem *mem;
1645 int ii;
1646 resource_size_t mem_phys;
1647 unsigned long port;
1648 u32 msize;
1649 u32 psize;
1650 int r = -ENODEV;
1651 struct pci_dev *pdev;
1652
1653 pdev = ioc->pcidev;
1654 ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
1655 if (pci_enable_device_mem(pdev)) {
1656 printk(MYIOC_s_ERR_FMT "pci_enable_device_mem() "
1657 "failed\n", ioc->name);
1658 return r;
1659 }
1660 if (pci_request_selected_regions(pdev, ioc->bars, "mpt")) {
1661 printk(MYIOC_s_ERR_FMT "pci_request_selected_regions() with "
1662 "MEM failed\n", ioc->name);
1663 goto out_pci_disable_device;
1664 }
1665
1666 if (sizeof(dma_addr_t) > 4) {
1667 const uint64_t required_mask = dma_get_required_mask
1668 (&pdev->dev);
1669 if (required_mask > DMA_BIT_MASK(32)
1670 && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
1671 && !pci_set_consistent_dma_mask(pdev,
1672 DMA_BIT_MASK(64))) {
1673 ioc->dma_mask = DMA_BIT_MASK(64);
1674 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
1675 ": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
1676 ioc->name));
1677 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
1678 && !pci_set_consistent_dma_mask(pdev,
1679 DMA_BIT_MASK(32))) {
1680 ioc->dma_mask = DMA_BIT_MASK(32);
1681 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
1682 ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
1683 ioc->name));
1684 } else {
1685 printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
1686 ioc->name, pci_name(pdev));
1687 goto out_pci_release_region;
1688 }
1689 } else {
1690 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
1691 && !pci_set_consistent_dma_mask(pdev,
1692 DMA_BIT_MASK(32))) {
1693 ioc->dma_mask = DMA_BIT_MASK(32);
1694 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
1695 ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
1696 ioc->name));
1697 } else {
1698 printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
1699 ioc->name, pci_name(pdev));
1700 goto out_pci_release_region;
1701 }
1702 }
1703
1704 mem_phys = msize = 0;
1705 port = psize = 0;
1706 for (ii = 0; ii < DEVICE_COUNT_RESOURCE; ii++) {
1707 if (pci_resource_flags(pdev, ii) & PCI_BASE_ADDRESS_SPACE_IO) {
1708 if (psize)
1709 continue;
1710 /* Get I/O space! */
1711 port = pci_resource_start(pdev, ii);
1712 psize = pci_resource_len(pdev, ii);
1713 } else {
1714 if (msize)
1715 continue;
1716 /* Get memmap */
1717 mem_phys = pci_resource_start(pdev, ii);
1718 msize = pci_resource_len(pdev, ii);
1719 }
1720 }
1721 ioc->mem_size = msize;
1722
1723 mem = NULL;
1724 /* Get logical ptr for PciMem0 space */
1725 /*mem = ioremap(mem_phys, msize);*/
1726 mem = ioremap(mem_phys, msize);
1727 if (mem == NULL) {
1728 printk(MYIOC_s_ERR_FMT ": ERROR - Unable to map adapter"
1729 " memory!\n", ioc->name);
1730 r = -EINVAL;
1731 goto out_pci_release_region;
1732 }
1733 ioc->memmap = mem;
1734 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "mem = %p, mem_phys = %llx\n",
1735 ioc->name, mem, (unsigned long long)mem_phys));
1736
1737 ioc->mem_phys = mem_phys;
1738 ioc->chip = (SYSIF_REGS __iomem *)mem;
1739
1740 /* Save Port IO values in case we need to do downloadboot */
1741 ioc->pio_mem_phys = port;
1742 ioc->pio_chip = (SYSIF_REGS __iomem *)port;
1743
1744 return 0;
1745
1746out_pci_release_region:
1747 pci_release_selected_regions(pdev, ioc->bars);
1748out_pci_disable_device:
1749 pci_disable_device(pdev);
1750 return r;
1751}
1752
1753/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1754/**
1755 * mpt_attach - Install a PCI intelligent MPT adapter.
1756 * @pdev: Pointer to pci_dev structure
1757 * @id: PCI device ID information
1758 *
1759 * This routine performs all the steps necessary to bring the IOC of
1760 * a MPT adapter to a OPERATIONAL state. This includes registering
1761 * memory regions, registering the interrupt, and allocating request
1762 * and reply memory pools.
1763 *
1764 * This routine also pre-fetches the LAN MAC address of a Fibre Channel
1765 * MPT adapter.
1766 *
1767 * Returns 0 for success, non-zero for failure.
1768 *
1769 * TODO: Add support for polled controllers
1770 */
1771int
1772mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1773{
1774 MPT_ADAPTER *ioc;
1775 u8 cb_idx;
1776 int r = -ENODEV;
1777 u8 pcixcmd;
1778 static int mpt_ids = 0;
1779#ifdef CONFIG_PROC_FS
1780 struct proc_dir_entry *dent;
1781#endif
1782
1783 ioc = kzalloc(sizeof(MPT_ADAPTER), GFP_KERNEL);
1784 if (ioc == NULL) {
1785 printk(KERN_ERR MYNAM ": ERROR - Insufficient memory to add adapter!\n");
1786 return -ENOMEM;
1787 }
1788
1789 ioc->id = mpt_ids++;
1790 sprintf(ioc->name, "ioc%d", ioc->id);
1791 dinitprintk(ioc, printk(KERN_WARNING MYNAM ": mpt_adapter_install\n"));
1792
1793 /*
1794 * set initial debug level
1795 * (refer to mptdebug.h)
1796 *
1797 */
1798 ioc->debug_level = mpt_debug_level;
1799 if (mpt_debug_level)
1800 printk(KERN_INFO "mpt_debug_level=%xh\n", mpt_debug_level);
1801
1802 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": mpt_adapter_install\n", ioc->name));
1803
1804 ioc->pcidev = pdev;
1805 if (mpt_mapresources(ioc)) {
1806 goto out_free_ioc;
1807 }
1808
1809 /*
1810 * Setting up proper handlers for scatter gather handling
1811 */
1812 if (ioc->dma_mask == DMA_BIT_MASK(64)) {
1813 if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1078)
1814 ioc->add_sge = &mpt_add_sge_64bit_1078;
1815 else
1816 ioc->add_sge = &mpt_add_sge_64bit;
1817 ioc->add_chain = &mpt_add_chain_64bit;
1818 ioc->sg_addr_size = 8;
1819 } else {
1820 ioc->add_sge = &mpt_add_sge;
1821 ioc->add_chain = &mpt_add_chain;
1822 ioc->sg_addr_size = 4;
1823 }
1824 ioc->SGE_size = sizeof(u32) + ioc->sg_addr_size;
1825
1826 ioc->alloc_total = sizeof(MPT_ADAPTER);
1827 ioc->req_sz = MPT_DEFAULT_FRAME_SIZE; /* avoid div by zero! */
1828 ioc->reply_sz = MPT_REPLY_FRAME_SIZE;
1829
1830
1831 spin_lock_init(&ioc->taskmgmt_lock);
1832 mutex_init(&ioc->internal_cmds.mutex);
1833 init_completion(&ioc->internal_cmds.done);
1834 mutex_init(&ioc->mptbase_cmds.mutex);
1835 init_completion(&ioc->mptbase_cmds.done);
1836 mutex_init(&ioc->taskmgmt_cmds.mutex);
1837 init_completion(&ioc->taskmgmt_cmds.done);
1838
1839 /* Initialize the event logging.
1840 */
1841 ioc->eventTypes = 0; /* None */
1842 ioc->eventContext = 0;
1843 ioc->eventLogSize = 0;
1844 ioc->events = NULL;
1845
1846#ifdef MFCNT
1847 ioc->mfcnt = 0;
1848#endif
1849
1850 ioc->sh = NULL;
1851 ioc->cached_fw = NULL;
1852
1853 /* Initialize SCSI Config Data structure
1854 */
1855 memset(&ioc->spi_data, 0, sizeof(SpiCfgData));
1856
1857 /* Initialize the fc rport list head.
1858 */
1859 INIT_LIST_HEAD(&ioc->fc_rports);
1860
1861 /* Find lookup slot. */
1862 INIT_LIST_HEAD(&ioc->list);
1863
1864
1865 /* Initialize workqueue */
1866 INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work);
1867
1868 snprintf(ioc->reset_work_q_name, MPT_KOBJ_NAME_LEN,
1869 "mpt_poll_%d", ioc->id);
1870 ioc->reset_work_q = alloc_workqueue(ioc->reset_work_q_name,
1871 WQ_MEM_RECLAIM, 0);
1872 if (!ioc->reset_work_q) {
1873 printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
1874 ioc->name);
1875 r = -ENOMEM;
1876 goto out_unmap_resources;
1877 }
1878
1879 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "facts @ %p, pfacts[0] @ %p\n",
1880 ioc->name, &ioc->facts, &ioc->pfacts[0]));
1881
1882 ioc->prod_name = mpt_get_product_name(pdev->vendor, pdev->device,
1883 pdev->revision);
1884
1885 switch (pdev->device)
1886 {
1887 case MPI_MANUFACTPAGE_DEVICEID_FC939X:
1888 case MPI_MANUFACTPAGE_DEVICEID_FC949X:
1889 ioc->errata_flag_1064 = 1;
1890 /* fall through */
1891 case MPI_MANUFACTPAGE_DEVICEID_FC909:
1892 case MPI_MANUFACTPAGE_DEVICEID_FC929:
1893 case MPI_MANUFACTPAGE_DEVICEID_FC919:
1894 case MPI_MANUFACTPAGE_DEVICEID_FC949E:
1895 ioc->bus_type = FC;
1896 break;
1897
1898 case MPI_MANUFACTPAGE_DEVICEID_FC929X:
1899 if (pdev->revision < XL_929) {
1900 /* 929X Chip Fix. Set Split transactions level
1901 * for PCIX. Set MOST bits to zero.
1902 */
1903 pci_read_config_byte(pdev, 0x6a, &pcixcmd);
1904 pcixcmd &= 0x8F;
1905 pci_write_config_byte(pdev, 0x6a, pcixcmd);
1906 } else {
1907 /* 929XL Chip Fix. Set MMRBC to 0x08.
1908 */
1909 pci_read_config_byte(pdev, 0x6a, &pcixcmd);
1910 pcixcmd |= 0x08;
1911 pci_write_config_byte(pdev, 0x6a, pcixcmd);
1912 }
1913 ioc->bus_type = FC;
1914 break;
1915
1916 case MPI_MANUFACTPAGE_DEVICEID_FC919X:
1917 /* 919X Chip Fix. Set Split transactions level
1918 * for PCIX. Set MOST bits to zero.
1919 */
1920 pci_read_config_byte(pdev, 0x6a, &pcixcmd);
1921 pcixcmd &= 0x8F;
1922 pci_write_config_byte(pdev, 0x6a, pcixcmd);
1923 ioc->bus_type = FC;
1924 break;
1925
1926 case MPI_MANUFACTPAGE_DEVID_53C1030:
1927 /* 1030 Chip Fix. Disable Split transactions
1928 * for PCIX. Set MOST bits to zero if Rev < C0( = 8).
1929 */
1930 if (pdev->revision < C0_1030) {
1931 pci_read_config_byte(pdev, 0x6a, &pcixcmd);
1932 pcixcmd &= 0x8F;
1933 pci_write_config_byte(pdev, 0x6a, pcixcmd);
1934 }
1935 /* fall through */
1936
1937 case MPI_MANUFACTPAGE_DEVID_1030_53C1035:
1938 ioc->bus_type = SPI;
1939 break;
1940
1941 case MPI_MANUFACTPAGE_DEVID_SAS1064:
1942 case MPI_MANUFACTPAGE_DEVID_SAS1068:
1943 ioc->errata_flag_1064 = 1;
1944 ioc->bus_type = SAS;
1945 break;
1946
1947 case MPI_MANUFACTPAGE_DEVID_SAS1064E:
1948 case MPI_MANUFACTPAGE_DEVID_SAS1068E:
1949 case MPI_MANUFACTPAGE_DEVID_SAS1078:
1950 ioc->bus_type = SAS;
1951 break;
1952 }
1953
1954
1955 switch (ioc->bus_type) {
1956
1957 case SAS:
1958 ioc->msi_enable = mpt_msi_enable_sas;
1959 break;
1960
1961 case SPI:
1962 ioc->msi_enable = mpt_msi_enable_spi;
1963 break;
1964
1965 case FC:
1966 ioc->msi_enable = mpt_msi_enable_fc;
1967 break;
1968
1969 default:
1970 ioc->msi_enable = 0;
1971 break;
1972 }
1973
1974 ioc->fw_events_off = 1;
1975
1976 if (ioc->errata_flag_1064)
1977 pci_disable_io_access(pdev);
1978
1979 spin_lock_init(&ioc->FreeQlock);
1980
1981 /* Disable all! */
1982 CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
1983 ioc->active = 0;
1984 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
1985
1986 /* Set IOC ptr in the pcidev's driver data. */
1987 pci_set_drvdata(ioc->pcidev, ioc);
1988
1989 /* Set lookup ptr. */
1990 list_add_tail(&ioc->list, &ioc_list);
1991
1992 /* Check for "bound ports" (929, 929X, 1030, 1035) to reduce redundant resets.
1993 */
1994 mpt_detect_bound_ports(ioc, pdev);
1995
1996 INIT_LIST_HEAD(&ioc->fw_event_list);
1997 spin_lock_init(&ioc->fw_event_lock);
1998 snprintf(ioc->fw_event_q_name, MPT_KOBJ_NAME_LEN, "mpt/%d", ioc->id);
1999 ioc->fw_event_q = alloc_workqueue(ioc->fw_event_q_name,
2000 WQ_MEM_RECLAIM, 0);
2001 if (!ioc->fw_event_q) {
2002 printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
2003 ioc->name);
2004 r = -ENOMEM;
2005 goto out_remove_ioc;
2006 }
2007
2008 if ((r = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP,
2009 CAN_SLEEP)) != 0){
2010 printk(MYIOC_s_ERR_FMT "didn't initialize properly! (%d)\n",
2011 ioc->name, r);
2012
2013 destroy_workqueue(ioc->fw_event_q);
2014 ioc->fw_event_q = NULL;
2015
2016 list_del(&ioc->list);
2017 if (ioc->alt_ioc)
2018 ioc->alt_ioc->alt_ioc = NULL;
2019 iounmap(ioc->memmap);
2020 if (pci_is_enabled(pdev))
2021 pci_disable_device(pdev);
2022 if (r != -5)
2023 pci_release_selected_regions(pdev, ioc->bars);
2024
2025 destroy_workqueue(ioc->reset_work_q);
2026 ioc->reset_work_q = NULL;
2027
2028 kfree(ioc);
2029 return r;
2030 }
2031
2032 /* call per device driver probe entry point */
2033 for(cb_idx = 0; cb_idx < MPT_MAX_PROTOCOL_DRIVERS; cb_idx++) {
2034 if(MptDeviceDriverHandlers[cb_idx] &&
2035 MptDeviceDriverHandlers[cb_idx]->probe) {
2036 MptDeviceDriverHandlers[cb_idx]->probe(pdev,id);
2037 }
2038 }
2039
2040#ifdef CONFIG_PROC_FS
2041 /*
2042 * Create "/proc/mpt/iocN" subdirectory entry for each MPT adapter.
2043 */
2044 dent = proc_mkdir(ioc->name, mpt_proc_root_dir);
2045 if (dent) {
2046 proc_create_single_data("info", S_IRUGO, dent,
2047 mpt_iocinfo_proc_show, ioc);
2048 proc_create_single_data("summary", S_IRUGO, dent,
2049 mpt_summary_proc_show, ioc);
2050 }
2051#endif
2052
2053 if (!ioc->alt_ioc)
2054 queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work,
2055 msecs_to_jiffies(MPT_POLLING_INTERVAL));
2056
2057 return 0;
2058
2059out_remove_ioc:
2060 list_del(&ioc->list);
2061 if (ioc->alt_ioc)
2062 ioc->alt_ioc->alt_ioc = NULL;
2063
2064 destroy_workqueue(ioc->reset_work_q);
2065 ioc->reset_work_q = NULL;
2066
2067out_unmap_resources:
2068 iounmap(ioc->memmap);
2069 pci_disable_device(pdev);
2070 pci_release_selected_regions(pdev, ioc->bars);
2071
2072out_free_ioc:
2073 kfree(ioc);
2074
2075 return r;
2076}
2077
2078/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2079/**
2080 * mpt_detach - Remove a PCI intelligent MPT adapter.
2081 * @pdev: Pointer to pci_dev structure
2082 */
2083
2084void
2085mpt_detach(struct pci_dev *pdev)
2086{
2087 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
2088 char pname[64];
2089 u8 cb_idx;
2090 unsigned long flags;
2091 struct workqueue_struct *wq;
2092
2093 /*
2094 * Stop polling ioc for fault condition
2095 */
2096 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
2097 wq = ioc->reset_work_q;
2098 ioc->reset_work_q = NULL;
2099 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
2100 cancel_delayed_work(&ioc->fault_reset_work);
2101 destroy_workqueue(wq);
2102
2103 spin_lock_irqsave(&ioc->fw_event_lock, flags);
2104 wq = ioc->fw_event_q;
2105 ioc->fw_event_q = NULL;
2106 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
2107 destroy_workqueue(wq);
2108
2109 snprintf(pname, sizeof(pname), MPT_PROCFS_MPTBASEDIR "/%s/summary", ioc->name);
2110 remove_proc_entry(pname, NULL);
2111 snprintf(pname, sizeof(pname), MPT_PROCFS_MPTBASEDIR "/%s/info", ioc->name);
2112 remove_proc_entry(pname, NULL);
2113 snprintf(pname, sizeof(pname), MPT_PROCFS_MPTBASEDIR "/%s", ioc->name);
2114 remove_proc_entry(pname, NULL);
2115
2116 /* call per device driver remove entry point */
2117 for(cb_idx = 0; cb_idx < MPT_MAX_PROTOCOL_DRIVERS; cb_idx++) {
2118 if(MptDeviceDriverHandlers[cb_idx] &&
2119 MptDeviceDriverHandlers[cb_idx]->remove) {
2120 MptDeviceDriverHandlers[cb_idx]->remove(pdev);
2121 }
2122 }
2123
2124 /* Disable interrupts! */
2125 CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
2126
2127 ioc->active = 0;
2128 synchronize_irq(pdev->irq);
2129
2130 /* Clear any lingering interrupt */
2131 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
2132
2133 CHIPREG_READ32(&ioc->chip->IntStatus);
2134
2135 mpt_adapter_dispose(ioc);
2136
2137}
2138
2139/**************************************************************************
2140 * Power Management
2141 */
2142#ifdef CONFIG_PM
2143/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2144/**
2145 * mpt_suspend - Fusion MPT base driver suspend routine.
2146 * @pdev: Pointer to pci_dev structure
2147 * @state: new state to enter
2148 */
2149int
2150mpt_suspend(struct pci_dev *pdev, pm_message_t state)
2151{
2152 u32 device_state;
2153 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
2154
2155 device_state = pci_choose_state(pdev, state);
2156 printk(MYIOC_s_INFO_FMT "pci-suspend: pdev=0x%p, slot=%s, Entering "
2157 "operating state [D%d]\n", ioc->name, pdev, pci_name(pdev),
2158 device_state);
2159
2160 /* put ioc into READY_STATE */
2161 if (SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, CAN_SLEEP)) {
2162 printk(MYIOC_s_ERR_FMT
2163 "pci-suspend: IOC msg unit reset failed!\n", ioc->name);
2164 }
2165
2166 /* disable interrupts */
2167 CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
2168 ioc->active = 0;
2169
2170 /* Clear any lingering interrupt */
2171 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
2172
2173 free_irq(ioc->pci_irq, ioc);
2174 if (ioc->msi_enable)
2175 pci_disable_msi(ioc->pcidev);
2176 ioc->pci_irq = -1;
2177 pci_save_state(pdev);
2178 pci_disable_device(pdev);
2179 pci_release_selected_regions(pdev, ioc->bars);
2180 pci_set_power_state(pdev, device_state);
2181 return 0;
2182}
2183
2184/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2185/**
2186 * mpt_resume - Fusion MPT base driver resume routine.
2187 * @pdev: Pointer to pci_dev structure
2188 */
2189int
2190mpt_resume(struct pci_dev *pdev)
2191{
2192 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
2193 u32 device_state = pdev->current_state;
2194 int recovery_state;
2195 int err;
2196
2197 printk(MYIOC_s_INFO_FMT "pci-resume: pdev=0x%p, slot=%s, Previous "
2198 "operating state [D%d]\n", ioc->name, pdev, pci_name(pdev),
2199 device_state);
2200
2201 pci_set_power_state(pdev, PCI_D0);
2202 pci_enable_wake(pdev, PCI_D0, 0);
2203 pci_restore_state(pdev);
2204 ioc->pcidev = pdev;
2205 err = mpt_mapresources(ioc);
2206 if (err)
2207 return err;
2208
2209 if (ioc->dma_mask == DMA_BIT_MASK(64)) {
2210 if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1078)
2211 ioc->add_sge = &mpt_add_sge_64bit_1078;
2212 else
2213 ioc->add_sge = &mpt_add_sge_64bit;
2214 ioc->add_chain = &mpt_add_chain_64bit;
2215 ioc->sg_addr_size = 8;
2216 } else {
2217
2218 ioc->add_sge = &mpt_add_sge;
2219 ioc->add_chain = &mpt_add_chain;
2220 ioc->sg_addr_size = 4;
2221 }
2222 ioc->SGE_size = sizeof(u32) + ioc->sg_addr_size;
2223
2224 printk(MYIOC_s_INFO_FMT "pci-resume: ioc-state=0x%x,doorbell=0x%x\n",
2225 ioc->name, (mpt_GetIocState(ioc, 1) >> MPI_IOC_STATE_SHIFT),
2226 CHIPREG_READ32(&ioc->chip->Doorbell));
2227
2228 /*
2229 * Errata workaround for SAS pci express:
2230 * Upon returning to the D0 state, the contents of the doorbell will be
2231 * stale data, and this will incorrectly signal to the host driver that
2232 * the firmware is ready to process mpt commands. The workaround is
2233 * to issue a diagnostic reset.
2234 */
2235 if (ioc->bus_type == SAS && (pdev->device ==
2236 MPI_MANUFACTPAGE_DEVID_SAS1068E || pdev->device ==
2237 MPI_MANUFACTPAGE_DEVID_SAS1064E)) {
2238 if (KickStart(ioc, 1, CAN_SLEEP) < 0) {
2239 printk(MYIOC_s_WARN_FMT "pci-resume: Cannot recover\n",
2240 ioc->name);
2241 goto out;
2242 }
2243 }
2244
2245 /* bring ioc to operational state */
2246 printk(MYIOC_s_INFO_FMT "Sending mpt_do_ioc_recovery\n", ioc->name);
2247 recovery_state = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP,
2248 CAN_SLEEP);
2249 if (recovery_state != 0)
2250 printk(MYIOC_s_WARN_FMT "pci-resume: Cannot recover, "
2251 "error:[%x]\n", ioc->name, recovery_state);
2252 else
2253 printk(MYIOC_s_INFO_FMT
2254 "pci-resume: success\n", ioc->name);
2255 out:
2256 return 0;
2257
2258}
2259#endif
2260
2261static int
2262mpt_signal_reset(u8 index, MPT_ADAPTER *ioc, int reset_phase)
2263{
2264 if ((MptDriverClass[index] == MPTSPI_DRIVER &&
2265 ioc->bus_type != SPI) ||
2266 (MptDriverClass[index] == MPTFC_DRIVER &&
2267 ioc->bus_type != FC) ||
2268 (MptDriverClass[index] == MPTSAS_DRIVER &&
2269 ioc->bus_type != SAS))
2270 /* make sure we only call the relevant reset handler
2271 * for the bus */
2272 return 0;
2273 return (MptResetHandlers[index])(ioc, reset_phase);
2274}
2275
2276/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2277/**
2278 * mpt_do_ioc_recovery - Initialize or recover MPT adapter.
2279 * @ioc: Pointer to MPT adapter structure
2280 * @reason: Event word / reason
2281 * @sleepFlag: Use schedule if CAN_SLEEP else use udelay.
2282 *
2283 * This routine performs all the steps necessary to bring the IOC
2284 * to a OPERATIONAL state.
2285 *
2286 * This routine also pre-fetches the LAN MAC address of a Fibre Channel
2287 * MPT adapter.
2288 *
2289 * Returns:
2290 * 0 for success
2291 * -1 if failed to get board READY
2292 * -2 if READY but IOCFacts Failed
2293 * -3 if READY but PrimeIOCFifos Failed
2294 * -4 if READY but IOCInit Failed
2295 * -5 if failed to enable_device and/or request_selected_regions
2296 * -6 if failed to upload firmware
2297 */
2298static int
2299mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2300{
2301 int hard_reset_done = 0;
2302 int alt_ioc_ready = 0;
2303 int hard;
2304 int rc=0;
2305 int ii;
2306 int ret = 0;
2307 int reset_alt_ioc_active = 0;
2308 int irq_allocated = 0;
2309 u8 *a;
2310
2311 printk(MYIOC_s_INFO_FMT "Initiating %s\n", ioc->name,
2312 reason == MPT_HOSTEVENT_IOC_BRINGUP ? "bringup" : "recovery");
2313
2314 /* Disable reply interrupts (also blocks FreeQ) */
2315 CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
2316 ioc->active = 0;
2317
2318 if (ioc->alt_ioc) {
2319 if (ioc->alt_ioc->active ||
2320 reason == MPT_HOSTEVENT_IOC_RECOVER) {
2321 reset_alt_ioc_active = 1;
2322 /* Disable alt-IOC's reply interrupts
2323 * (and FreeQ) for a bit
2324 **/
2325 CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask,
2326 0xFFFFFFFF);
2327 ioc->alt_ioc->active = 0;
2328 }
2329 }
2330
2331 hard = 1;
2332 if (reason == MPT_HOSTEVENT_IOC_BRINGUP)
2333 hard = 0;
2334
2335 if ((hard_reset_done = MakeIocReady(ioc, hard, sleepFlag)) < 0) {
2336 if (hard_reset_done == -4) {
2337 printk(MYIOC_s_WARN_FMT "Owned by PEER..skipping!\n",
2338 ioc->name);
2339
2340 if (reset_alt_ioc_active && ioc->alt_ioc) {
2341 /* (re)Enable alt-IOC! (reply interrupt, FreeQ) */
2342 dprintk(ioc, printk(MYIOC_s_INFO_FMT
2343 "alt_ioc reply irq re-enabled\n", ioc->alt_ioc->name));
2344 CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, MPI_HIM_DIM);
2345 ioc->alt_ioc->active = 1;
2346 }
2347
2348 } else {
2349 printk(MYIOC_s_WARN_FMT
2350 "NOT READY WARNING!\n", ioc->name);
2351 }
2352 ret = -1;
2353 goto out;
2354 }
2355
2356 /* hard_reset_done = 0 if a soft reset was performed
2357 * and 1 if a hard reset was performed.
2358 */
2359 if (hard_reset_done && reset_alt_ioc_active && ioc->alt_ioc) {
2360 if ((rc = MakeIocReady(ioc->alt_ioc, 0, sleepFlag)) == 0)
2361 alt_ioc_ready = 1;
2362 else
2363 printk(MYIOC_s_WARN_FMT
2364 ": alt-ioc Not ready WARNING!\n",
2365 ioc->alt_ioc->name);
2366 }
2367
2368 for (ii=0; ii<5; ii++) {
2369 /* Get IOC facts! Allow 5 retries */
2370 if ((rc = GetIocFacts(ioc, sleepFlag, reason)) == 0)
2371 break;
2372 }
2373
2374
2375 if (ii == 5) {
2376 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2377 "Retry IocFacts failed rc=%x\n", ioc->name, rc));
2378 ret = -2;
2379 } else if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
2380 MptDisplayIocCapabilities(ioc);
2381 }
2382
2383 if (alt_ioc_ready) {
2384 if ((rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason)) != 0) {
2385 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2386 "Initial Alt IocFacts failed rc=%x\n",
2387 ioc->name, rc));
2388 /* Retry - alt IOC was initialized once
2389 */
2390 rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason);
2391 }
2392 if (rc) {
2393 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2394 "Retry Alt IocFacts failed rc=%x\n", ioc->name, rc));
2395 alt_ioc_ready = 0;
2396 reset_alt_ioc_active = 0;
2397 } else if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
2398 MptDisplayIocCapabilities(ioc->alt_ioc);
2399 }
2400 }
2401
2402 if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP) &&
2403 (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)) {
2404 pci_release_selected_regions(ioc->pcidev, ioc->bars);
2405 ioc->bars = pci_select_bars(ioc->pcidev, IORESOURCE_MEM |
2406 IORESOURCE_IO);
2407 if (pci_enable_device(ioc->pcidev))
2408 return -5;
2409 if (pci_request_selected_regions(ioc->pcidev, ioc->bars,
2410 "mpt"))
2411 return -5;
2412 }
2413
2414 /*
2415 * Device is reset now. It must have de-asserted the interrupt line
2416 * (if it was asserted) and it should be safe to register for the
2417 * interrupt now.
2418 */
2419 if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP)) {
2420 ioc->pci_irq = -1;
2421 if (ioc->pcidev->irq) {
2422 if (ioc->msi_enable && !pci_enable_msi(ioc->pcidev))
2423 printk(MYIOC_s_INFO_FMT "PCI-MSI enabled\n",
2424 ioc->name);
2425 else
2426 ioc->msi_enable = 0;
2427 rc = request_irq(ioc->pcidev->irq, mpt_interrupt,
2428 IRQF_SHARED, ioc->name, ioc);
2429 if (rc < 0) {
2430 printk(MYIOC_s_ERR_FMT "Unable to allocate "
2431 "interrupt %d!\n",
2432 ioc->name, ioc->pcidev->irq);
2433 if (ioc->msi_enable)
2434 pci_disable_msi(ioc->pcidev);
2435 ret = -EBUSY;
2436 goto out;
2437 }
2438 irq_allocated = 1;
2439 ioc->pci_irq = ioc->pcidev->irq;
2440 pci_set_master(ioc->pcidev); /* ?? */
2441 pci_set_drvdata(ioc->pcidev, ioc);
2442 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
2443 "installed at interrupt %d\n", ioc->name,
2444 ioc->pcidev->irq));
2445 }
2446 }
2447
2448 /* Prime reply & request queues!
2449 * (mucho alloc's) Must be done prior to
2450 * init as upper addresses are needed for init.
2451 * If fails, continue with alt-ioc processing
2452 */
2453 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "PrimeIocFifos\n",
2454 ioc->name));
2455 if ((ret == 0) && ((rc = PrimeIocFifos(ioc)) != 0))
2456 ret = -3;
2457
2458 /* May need to check/upload firmware & data here!
2459 * If fails, continue with alt-ioc processing
2460 */
2461 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "SendIocInit\n",
2462 ioc->name));
2463 if ((ret == 0) && ((rc = SendIocInit(ioc, sleepFlag)) != 0))
2464 ret = -4;
2465// NEW!
2466 if (alt_ioc_ready && ((rc = PrimeIocFifos(ioc->alt_ioc)) != 0)) {
2467 printk(MYIOC_s_WARN_FMT
2468 ": alt-ioc (%d) FIFO mgmt alloc WARNING!\n",
2469 ioc->alt_ioc->name, rc);
2470 alt_ioc_ready = 0;
2471 reset_alt_ioc_active = 0;
2472 }
2473
2474 if (alt_ioc_ready) {
2475 if ((rc = SendIocInit(ioc->alt_ioc, sleepFlag)) != 0) {
2476 alt_ioc_ready = 0;
2477 reset_alt_ioc_active = 0;
2478 printk(MYIOC_s_WARN_FMT
2479 ": alt-ioc: (%d) init failure WARNING!\n",
2480 ioc->alt_ioc->name, rc);
2481 }
2482 }
2483
2484 if (reason == MPT_HOSTEVENT_IOC_BRINGUP){
2485 if (ioc->upload_fw) {
2486 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2487 "firmware upload required!\n", ioc->name));
2488
2489 /* Controller is not operational, cannot do upload
2490 */
2491 if (ret == 0) {
2492 rc = mpt_do_upload(ioc, sleepFlag);
2493 if (rc == 0) {
2494 if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) {
2495 /*
2496 * Maintain only one pointer to FW memory
2497 * so there will not be two attempt to
2498 * downloadboot onboard dual function
2499 * chips (mpt_adapter_disable,
2500 * mpt_diag_reset)
2501 */
2502 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2503 "mpt_upload: alt_%s has cached_fw=%p \n",
2504 ioc->name, ioc->alt_ioc->name, ioc->alt_ioc->cached_fw));
2505 ioc->cached_fw = NULL;
2506 }
2507 } else {
2508 printk(MYIOC_s_WARN_FMT
2509 "firmware upload failure!\n", ioc->name);
2510 ret = -6;
2511 }
2512 }
2513 }
2514 }
2515
2516 /* Enable MPT base driver management of EventNotification
2517 * and EventAck handling.
2518 */
2519 if ((ret == 0) && (!ioc->facts.EventState)) {
2520 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
2521 "SendEventNotification\n",
2522 ioc->name));
2523 ret = SendEventNotification(ioc, 1, sleepFlag); /* 1=Enable */
2524 }
2525
2526 if (ioc->alt_ioc && alt_ioc_ready && !ioc->alt_ioc->facts.EventState)
2527 rc = SendEventNotification(ioc->alt_ioc, 1, sleepFlag);
2528
2529 if (ret == 0) {
2530 /* Enable! (reply interrupt) */
2531 CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM);
2532 ioc->active = 1;
2533 }
2534 if (rc == 0) { /* alt ioc */
2535 if (reset_alt_ioc_active && ioc->alt_ioc) {
2536 /* (re)Enable alt-IOC! (reply interrupt) */
2537 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "alt-ioc"
2538 "reply irq re-enabled\n",
2539 ioc->alt_ioc->name));
2540 CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask,
2541 MPI_HIM_DIM);
2542 ioc->alt_ioc->active = 1;
2543 }
2544 }
2545
2546
2547 /* Add additional "reason" check before call to GetLanConfigPages
2548 * (combined with GetIoUnitPage2 call). This prevents a somewhat
2549 * recursive scenario; GetLanConfigPages times out, timer expired
2550 * routine calls HardResetHandler, which calls into here again,
2551 * and we try GetLanConfigPages again...
2552 */
2553 if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP)) {
2554
2555 /*
2556 * Initialize link list for inactive raid volumes.
2557 */
2558 mutex_init(&ioc->raid_data.inactive_list_mutex);
2559 INIT_LIST_HEAD(&ioc->raid_data.inactive_list);
2560
2561 switch (ioc->bus_type) {
2562
2563 case SAS:
2564 /* clear persistency table */
2565 if(ioc->facts.IOCExceptions &
2566 MPI_IOCFACTS_EXCEPT_PERSISTENT_TABLE_FULL) {
2567 ret = mptbase_sas_persist_operation(ioc,
2568 MPI_SAS_OP_CLEAR_NOT_PRESENT);
2569 if(ret != 0)
2570 goto out;
2571 }
2572
2573 /* Find IM volumes
2574 */
2575 mpt_findImVolumes(ioc);
2576
2577 /* Check, and possibly reset, the coalescing value
2578 */
2579 mpt_read_ioc_pg_1(ioc);
2580
2581 break;
2582
2583 case FC:
2584 if ((ioc->pfacts[0].ProtocolFlags &
2585 MPI_PORTFACTS_PROTOCOL_LAN) &&
2586 (ioc->lan_cnfg_page0.Header.PageLength == 0)) {
2587 /*
2588 * Pre-fetch the ports LAN MAC address!
2589 * (LANPage1_t stuff)
2590 */
2591 (void) GetLanConfigPages(ioc);
2592 a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
2593 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2594 "LanAddr = %pMR\n", ioc->name, a));
2595 }
2596 break;
2597
2598 case SPI:
2599 /* Get NVRAM and adapter maximums from SPP 0 and 2
2600 */
2601 mpt_GetScsiPortSettings(ioc, 0);
2602
2603 /* Get version and length of SDP 1
2604 */
2605 mpt_readScsiDevicePageHeaders(ioc, 0);
2606
2607 /* Find IM volumes
2608 */
2609 if (ioc->facts.MsgVersion >= MPI_VERSION_01_02)
2610 mpt_findImVolumes(ioc);
2611
2612 /* Check, and possibly reset, the coalescing value
2613 */
2614 mpt_read_ioc_pg_1(ioc);
2615
2616 mpt_read_ioc_pg_4(ioc);
2617
2618 break;
2619 }
2620
2621 GetIoUnitPage2(ioc);
2622 mpt_get_manufacturing_pg_0(ioc);
2623 }
2624
2625 out:
2626 if ((ret != 0) && irq_allocated) {
2627 free_irq(ioc->pci_irq, ioc);
2628 if (ioc->msi_enable)
2629 pci_disable_msi(ioc->pcidev);
2630 }
2631 return ret;
2632}
2633
2634/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2635/**
2636 * mpt_detect_bound_ports - Search for matching PCI bus/dev_function
2637 * @ioc: Pointer to MPT adapter structure
2638 * @pdev: Pointer to (struct pci_dev) structure
2639 *
2640 * Search for PCI bus/dev_function which matches
2641 * PCI bus/dev_function (+/-1) for newly discovered 929,
2642 * 929X, 1030 or 1035.
2643 *
2644 * If match on PCI dev_function +/-1 is found, bind the two MPT adapters
2645 * using alt_ioc pointer fields in their %MPT_ADAPTER structures.
2646 */
2647static void
2648mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev)
2649{
2650 struct pci_dev *peer=NULL;
2651 unsigned int slot = PCI_SLOT(pdev->devfn);
2652 unsigned int func = PCI_FUNC(pdev->devfn);
2653 MPT_ADAPTER *ioc_srch;
2654
2655 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "PCI device %s devfn=%x/%x,"
2656 " searching for devfn match on %x or %x\n",
2657 ioc->name, pci_name(pdev), pdev->bus->number,
2658 pdev->devfn, func-1, func+1));
2659
2660 peer = pci_get_slot(pdev->bus, PCI_DEVFN(slot,func-1));
2661 if (!peer) {
2662 peer = pci_get_slot(pdev->bus, PCI_DEVFN(slot,func+1));
2663 if (!peer)
2664 return;
2665 }
2666
2667 list_for_each_entry(ioc_srch, &ioc_list, list) {
2668 struct pci_dev *_pcidev = ioc_srch->pcidev;
2669 if (_pcidev == peer) {
2670 /* Paranoia checks */
2671 if (ioc->alt_ioc != NULL) {
2672 printk(MYIOC_s_WARN_FMT
2673 "Oops, already bound (%s <==> %s)!\n",
2674 ioc->name, ioc->name, ioc->alt_ioc->name);
2675 break;
2676 } else if (ioc_srch->alt_ioc != NULL) {
2677 printk(MYIOC_s_WARN_FMT
2678 "Oops, already bound (%s <==> %s)!\n",
2679 ioc_srch->name, ioc_srch->name,
2680 ioc_srch->alt_ioc->name);
2681 break;
2682 }
2683 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2684 "FOUND! binding %s <==> %s\n",
2685 ioc->name, ioc->name, ioc_srch->name));
2686 ioc_srch->alt_ioc = ioc;
2687 ioc->alt_ioc = ioc_srch;
2688 }
2689 }
2690 pci_dev_put(peer);
2691}
2692
2693/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2694/**
2695 * mpt_adapter_disable - Disable misbehaving MPT adapter.
2696 * @ioc: Pointer to MPT adapter structure
2697 */
2698static void
2699mpt_adapter_disable(MPT_ADAPTER *ioc)
2700{
2701 int sz;
2702 int ret;
2703
2704 if (ioc->cached_fw != NULL) {
2705 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2706 "%s: Pushing FW onto adapter\n", __func__, ioc->name));
2707 if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *)
2708 ioc->cached_fw, CAN_SLEEP)) < 0) {
2709 printk(MYIOC_s_WARN_FMT
2710 ": firmware downloadboot failure (%d)!\n",
2711 ioc->name, ret);
2712 }
2713 }
2714
2715 /*
2716 * Put the controller into ready state (if its not already)
2717 */
2718 if (mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_READY) {
2719 if (!SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET,
2720 CAN_SLEEP)) {
2721 if (mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_READY)
2722 printk(MYIOC_s_ERR_FMT "%s: IOC msg unit "
2723 "reset failed to put ioc in ready state!\n",
2724 ioc->name, __func__);
2725 } else
2726 printk(MYIOC_s_ERR_FMT "%s: IOC msg unit reset "
2727 "failed!\n", ioc->name, __func__);
2728 }
2729
2730
2731 /* Disable adapter interrupts! */
2732 synchronize_irq(ioc->pcidev->irq);
2733 CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
2734 ioc->active = 0;
2735
2736 /* Clear any lingering interrupt */
2737 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
2738 CHIPREG_READ32(&ioc->chip->IntStatus);
2739
2740 if (ioc->alloc != NULL) {
2741 sz = ioc->alloc_sz;
2742 dexitprintk(ioc, printk(MYIOC_s_INFO_FMT "free @ %p, sz=%d bytes\n",
2743 ioc->name, ioc->alloc, ioc->alloc_sz));
2744 pci_free_consistent(ioc->pcidev, sz,
2745 ioc->alloc, ioc->alloc_dma);
2746 ioc->reply_frames = NULL;
2747 ioc->req_frames = NULL;
2748 ioc->alloc = NULL;
2749 ioc->alloc_total -= sz;
2750 }
2751
2752 if (ioc->sense_buf_pool != NULL) {
2753 sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
2754 pci_free_consistent(ioc->pcidev, sz,
2755 ioc->sense_buf_pool, ioc->sense_buf_pool_dma);
2756 ioc->sense_buf_pool = NULL;
2757 ioc->alloc_total -= sz;
2758 }
2759
2760 if (ioc->events != NULL){
2761 sz = MPTCTL_EVENT_LOG_SIZE * sizeof(MPT_IOCTL_EVENTS);
2762 kfree(ioc->events);
2763 ioc->events = NULL;
2764 ioc->alloc_total -= sz;
2765 }
2766
2767 mpt_free_fw_memory(ioc);
2768
2769 kfree(ioc->spi_data.nvram);
2770 mpt_inactive_raid_list_free(ioc);
2771 kfree(ioc->raid_data.pIocPg2);
2772 kfree(ioc->raid_data.pIocPg3);
2773 ioc->spi_data.nvram = NULL;
2774 ioc->raid_data.pIocPg3 = NULL;
2775
2776 if (ioc->spi_data.pIocPg4 != NULL) {
2777 sz = ioc->spi_data.IocPg4Sz;
2778 pci_free_consistent(ioc->pcidev, sz,
2779 ioc->spi_data.pIocPg4,
2780 ioc->spi_data.IocPg4_dma);
2781 ioc->spi_data.pIocPg4 = NULL;
2782 ioc->alloc_total -= sz;
2783 }
2784
2785 if (ioc->ReqToChain != NULL) {
2786 kfree(ioc->ReqToChain);
2787 kfree(ioc->RequestNB);
2788 ioc->ReqToChain = NULL;
2789 }
2790
2791 kfree(ioc->ChainToChain);
2792 ioc->ChainToChain = NULL;
2793
2794 if (ioc->HostPageBuffer != NULL) {
2795 if((ret = mpt_host_page_access_control(ioc,
2796 MPI_DB_HPBAC_FREE_BUFFER, NO_SLEEP)) != 0) {
2797 printk(MYIOC_s_ERR_FMT
2798 ": %s: host page buffers free failed (%d)!\n",
2799 ioc->name, __func__, ret);
2800 }
2801 dexitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2802 "HostPageBuffer free @ %p, sz=%d bytes\n",
2803 ioc->name, ioc->HostPageBuffer,
2804 ioc->HostPageBuffer_sz));
2805 pci_free_consistent(ioc->pcidev, ioc->HostPageBuffer_sz,
2806 ioc->HostPageBuffer, ioc->HostPageBuffer_dma);
2807 ioc->HostPageBuffer = NULL;
2808 ioc->HostPageBuffer_sz = 0;
2809 ioc->alloc_total -= ioc->HostPageBuffer_sz;
2810 }
2811
2812 pci_set_drvdata(ioc->pcidev, NULL);
2813}
2814/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2815/**
2816 * mpt_adapter_dispose - Free all resources associated with an MPT adapter
2817 * @ioc: Pointer to MPT adapter structure
2818 *
2819 * This routine unregisters h/w resources and frees all alloc'd memory
2820 * associated with a MPT adapter structure.
2821 */
2822static void
2823mpt_adapter_dispose(MPT_ADAPTER *ioc)
2824{
2825 int sz_first, sz_last;
2826
2827 if (ioc == NULL)
2828 return;
2829
2830 sz_first = ioc->alloc_total;
2831
2832 mpt_adapter_disable(ioc);
2833
2834 if (ioc->pci_irq != -1) {
2835 free_irq(ioc->pci_irq, ioc);
2836 if (ioc->msi_enable)
2837 pci_disable_msi(ioc->pcidev);
2838 ioc->pci_irq = -1;
2839 }
2840
2841 if (ioc->memmap != NULL) {
2842 iounmap(ioc->memmap);
2843 ioc->memmap = NULL;
2844 }
2845
2846 pci_disable_device(ioc->pcidev);
2847 pci_release_selected_regions(ioc->pcidev, ioc->bars);
2848
2849 /* Zap the adapter lookup ptr! */
2850 list_del(&ioc->list);
2851
2852 sz_last = ioc->alloc_total;
2853 dprintk(ioc, printk(MYIOC_s_INFO_FMT "free'd %d of %d bytes\n",
2854 ioc->name, sz_first-sz_last+(int)sizeof(*ioc), sz_first));
2855
2856 if (ioc->alt_ioc)
2857 ioc->alt_ioc->alt_ioc = NULL;
2858
2859 kfree(ioc);
2860}
2861
2862/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2863/**
2864 * MptDisplayIocCapabilities - Disply IOC's capabilities.
2865 * @ioc: Pointer to MPT adapter structure
2866 */
2867static void
2868MptDisplayIocCapabilities(MPT_ADAPTER *ioc)
2869{
2870 int i = 0;
2871
2872 printk(KERN_INFO "%s: ", ioc->name);
2873 if (ioc->prod_name)
2874 pr_cont("%s: ", ioc->prod_name);
2875 pr_cont("Capabilities={");
2876
2877 if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) {
2878 pr_cont("Initiator");
2879 i++;
2880 }
2881
2882 if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
2883 pr_cont("%sTarget", i ? "," : "");
2884 i++;
2885 }
2886
2887 if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) {
2888 pr_cont("%sLAN", i ? "," : "");
2889 i++;
2890 }
2891
2892#if 0
2893 /*
2894 * This would probably evoke more questions than it's worth
2895 */
2896 if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
2897 pr_cont("%sLogBusAddr", i ? "," : "");
2898 i++;
2899 }
2900#endif
2901
2902 pr_cont("}\n");
2903}
2904
2905/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2906/**
2907 * MakeIocReady - Get IOC to a READY state, using KickStart if needed.
2908 * @ioc: Pointer to MPT_ADAPTER structure
2909 * @force: Force hard KickStart of IOC
2910 * @sleepFlag: Specifies whether the process can sleep
2911 *
2912 * Returns:
2913 * 1 - DIAG reset and READY
2914 * 0 - READY initially OR soft reset and READY
2915 * -1 - Any failure on KickStart
2916 * -2 - Msg Unit Reset Failed
2917 * -3 - IO Unit Reset Failed
2918 * -4 - IOC owned by a PEER
2919 */
2920static int
2921MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
2922{
2923 u32 ioc_state;
2924 int statefault = 0;
2925 int cntdn;
2926 int hard_reset_done = 0;
2927 int r;
2928 int ii;
2929 int whoinit;
2930
2931 /* Get current [raw] IOC state */
2932 ioc_state = mpt_GetIocState(ioc, 0);
2933 dhsprintk(ioc, printk(MYIOC_s_INFO_FMT "MakeIocReady [raw] state=%08x\n", ioc->name, ioc_state));
2934
2935 /*
2936 * Check to see if IOC got left/stuck in doorbell handshake
2937 * grip of death. If so, hard reset the IOC.
2938 */
2939 if (ioc_state & MPI_DOORBELL_ACTIVE) {
2940 statefault = 1;
2941 printk(MYIOC_s_WARN_FMT "Unexpected doorbell active!\n",
2942 ioc->name);
2943 }
2944
2945 /* Is it already READY? */
2946 if (!statefault &&
2947 ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_READY)) {
2948 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
2949 "IOC is in READY state\n", ioc->name));
2950 return 0;
2951 }
2952
2953 /*
2954 * Check to see if IOC is in FAULT state.
2955 */
2956 if ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) {
2957 statefault = 2;
2958 printk(MYIOC_s_WARN_FMT "IOC is in FAULT state!!!\n",
2959 ioc->name);
2960 printk(MYIOC_s_WARN_FMT " FAULT code = %04xh\n",
2961 ioc->name, ioc_state & MPI_DOORBELL_DATA_MASK);
2962 }
2963
2964 /*
2965 * Hmmm... Did it get left operational?
2966 */
2967 if ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_OPERATIONAL) {
2968 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IOC operational unexpected\n",
2969 ioc->name));
2970
2971 /* Check WhoInit.
2972 * If PCI Peer, exit.
2973 * Else, if no fault conditions are present, issue a MessageUnitReset
2974 * Else, fall through to KickStart case
2975 */
2976 whoinit = (ioc_state & MPI_DOORBELL_WHO_INIT_MASK) >> MPI_DOORBELL_WHO_INIT_SHIFT;
2977 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
2978 "whoinit 0x%x statefault %d force %d\n",
2979 ioc->name, whoinit, statefault, force));
2980 if (whoinit == MPI_WHOINIT_PCI_PEER)
2981 return -4;
2982 else {
2983 if ((statefault == 0 ) && (force == 0)) {
2984 if ((r = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag)) == 0)
2985 return 0;
2986 }
2987 statefault = 3;
2988 }
2989 }
2990
2991 hard_reset_done = KickStart(ioc, statefault||force, sleepFlag);
2992 if (hard_reset_done < 0)
2993 return -1;
2994
2995 /*
2996 * Loop here waiting for IOC to come READY.
2997 */
2998 ii = 0;
2999 cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 5; /* 5 seconds */
3000
3001 while ((ioc_state = mpt_GetIocState(ioc, 1)) != MPI_IOC_STATE_READY) {
3002 if (ioc_state == MPI_IOC_STATE_OPERATIONAL) {
3003 /*
3004 * BIOS or previous driver load left IOC in OP state.
3005 * Reset messaging FIFOs.
3006 */
3007 if ((r = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag)) != 0) {
3008 printk(MYIOC_s_ERR_FMT "IOC msg unit reset failed!\n", ioc->name);
3009 return -2;
3010 }
3011 } else if (ioc_state == MPI_IOC_STATE_RESET) {
3012 /*
3013 * Something is wrong. Try to get IOC back
3014 * to a known state.
3015 */
3016 if ((r = SendIocReset(ioc, MPI_FUNCTION_IO_UNIT_RESET, sleepFlag)) != 0) {
3017 printk(MYIOC_s_ERR_FMT "IO unit reset failed!\n", ioc->name);
3018 return -3;
3019 }
3020 }
3021
3022 ii++; cntdn--;
3023 if (!cntdn) {
3024 printk(MYIOC_s_ERR_FMT
3025 "Wait IOC_READY state (0x%x) timeout(%d)!\n",
3026 ioc->name, ioc_state, (int)((ii+5)/HZ));
3027 return -ETIME;
3028 }
3029
3030 if (sleepFlag == CAN_SLEEP) {
3031 msleep(1);
3032 } else {
3033 mdelay (1); /* 1 msec delay */
3034 }
3035
3036 }
3037
3038 if (statefault < 3) {
3039 printk(MYIOC_s_INFO_FMT "Recovered from %s\n", ioc->name,
3040 statefault == 1 ? "stuck handshake" : "IOC FAULT");
3041 }
3042
3043 return hard_reset_done;
3044}
3045
3046/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3047/**
3048 * mpt_GetIocState - Get the current state of a MPT adapter.
3049 * @ioc: Pointer to MPT_ADAPTER structure
3050 * @cooked: Request raw or cooked IOC state
3051 *
3052 * Returns all IOC Doorbell register bits if cooked==0, else just the
3053 * Doorbell bits in MPI_IOC_STATE_MASK.
3054 */
3055u32
3056mpt_GetIocState(MPT_ADAPTER *ioc, int cooked)
3057{
3058 u32 s, sc;
3059
3060 /* Get! */
3061 s = CHIPREG_READ32(&ioc->chip->Doorbell);
3062 sc = s & MPI_IOC_STATE_MASK;
3063
3064 /* Save! */
3065 ioc->last_state = sc;
3066
3067 return cooked ? sc : s;
3068}
3069
3070/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3071/**
3072 * GetIocFacts - Send IOCFacts request to MPT adapter.
3073 * @ioc: Pointer to MPT_ADAPTER structure
3074 * @sleepFlag: Specifies whether the process can sleep
3075 * @reason: If recovery, only update facts.
3076 *
3077 * Returns 0 for success, non-zero for failure.
3078 */
3079static int
3080GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
3081{
3082 IOCFacts_t get_facts;
3083 IOCFactsReply_t *facts;
3084 int r;
3085 int req_sz;
3086 int reply_sz;
3087 int sz;
3088 u32 status, vv;
3089 u8 shiftFactor=1;
3090
3091 /* IOC *must* NOT be in RESET state! */
3092 if (ioc->last_state == MPI_IOC_STATE_RESET) {
3093 printk(KERN_ERR MYNAM
3094 ": ERROR - Can't get IOCFacts, %s NOT READY! (%08x)\n",
3095 ioc->name, ioc->last_state);
3096 return -44;
3097 }
3098
3099 facts = &ioc->facts;
3100
3101 /* Destination (reply area)... */
3102 reply_sz = sizeof(*facts);
3103 memset(facts, 0, reply_sz);
3104
3105 /* Request area (get_facts on the stack right now!) */
3106 req_sz = sizeof(get_facts);
3107 memset(&get_facts, 0, req_sz);
3108
3109 get_facts.Function = MPI_FUNCTION_IOC_FACTS;
3110 /* Assert: All other get_facts fields are zero! */
3111
3112 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
3113 "Sending get IocFacts request req_sz=%d reply_sz=%d\n",
3114 ioc->name, req_sz, reply_sz));
3115
3116 /* No non-zero fields in the get_facts request are greater than
3117 * 1 byte in size, so we can just fire it off as is.
3118 */
3119 r = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&get_facts,
3120 reply_sz, (u16*)facts, 5 /*seconds*/, sleepFlag);
3121 if (r != 0)
3122 return r;
3123
3124 /*
3125 * Now byte swap (GRRR) the necessary fields before any further
3126 * inspection of reply contents.
3127 *
3128 * But need to do some sanity checks on MsgLength (byte) field
3129 * to make sure we don't zero IOC's req_sz!
3130 */
3131 /* Did we get a valid reply? */
3132 if (facts->MsgLength > offsetof(IOCFactsReply_t, RequestFrameSize)/sizeof(u32)) {
3133 if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
3134 /*
3135 * If not been here, done that, save off first WhoInit value
3136 */
3137 if (ioc->FirstWhoInit == WHOINIT_UNKNOWN)
3138 ioc->FirstWhoInit = facts->WhoInit;
3139 }
3140
3141 facts->MsgVersion = le16_to_cpu(facts->MsgVersion);
3142 facts->MsgContext = le32_to_cpu(facts->MsgContext);
3143 facts->IOCExceptions = le16_to_cpu(facts->IOCExceptions);
3144 facts->IOCStatus = le16_to_cpu(facts->IOCStatus);
3145 facts->IOCLogInfo = le32_to_cpu(facts->IOCLogInfo);
3146 status = le16_to_cpu(facts->IOCStatus) & MPI_IOCSTATUS_MASK;
3147 /* CHECKME! IOCStatus, IOCLogInfo */
3148
3149 facts->ReplyQueueDepth = le16_to_cpu(facts->ReplyQueueDepth);
3150 facts->RequestFrameSize = le16_to_cpu(facts->RequestFrameSize);
3151
3152 /*
3153 * FC f/w version changed between 1.1 and 1.2
3154 * Old: u16{Major(4),Minor(4),SubMinor(8)}
3155 * New: u32{Major(8),Minor(8),Unit(8),Dev(8)}
3156 */
3157 if (facts->MsgVersion < MPI_VERSION_01_02) {
3158 /*
3159 * Handle old FC f/w style, convert to new...
3160 */
3161 u16 oldv = le16_to_cpu(facts->Reserved_0101_FWVersion);
3162 facts->FWVersion.Word =
3163 ((oldv<<12) & 0xFF000000) |
3164 ((oldv<<8) & 0x000FFF00);
3165 } else
3166 facts->FWVersion.Word = le32_to_cpu(facts->FWVersion.Word);
3167
3168 facts->ProductID = le16_to_cpu(facts->ProductID);
3169
3170 if ((ioc->facts.ProductID & MPI_FW_HEADER_PID_PROD_MASK)
3171 > MPI_FW_HEADER_PID_PROD_TARGET_SCSI)
3172 ioc->ir_firmware = 1;
3173
3174 facts->CurrentHostMfaHighAddr =
3175 le32_to_cpu(facts->CurrentHostMfaHighAddr);
3176 facts->GlobalCredits = le16_to_cpu(facts->GlobalCredits);
3177 facts->CurrentSenseBufferHighAddr =
3178 le32_to_cpu(facts->CurrentSenseBufferHighAddr);
3179 facts->CurReplyFrameSize =
3180 le16_to_cpu(facts->CurReplyFrameSize);
3181 facts->IOCCapabilities = le32_to_cpu(facts->IOCCapabilities);
3182
3183 /*
3184 * Handle NEW (!) IOCFactsReply fields in MPI-1.01.xx
3185 * Older MPI-1.00.xx struct had 13 dwords, and enlarged
3186 * to 14 in MPI-1.01.0x.
3187 */
3188 if (facts->MsgLength >= (offsetof(IOCFactsReply_t,FWImageSize) + 7)/4 &&
3189 facts->MsgVersion > MPI_VERSION_01_00) {
3190 facts->FWImageSize = le32_to_cpu(facts->FWImageSize);
3191 }
3192
3193 facts->FWImageSize = ALIGN(facts->FWImageSize, 4);
3194
3195 if (!facts->RequestFrameSize) {
3196 /* Something is wrong! */
3197 printk(MYIOC_s_ERR_FMT "IOC reported invalid 0 request size!\n",
3198 ioc->name);
3199 return -55;
3200 }
3201
3202 r = sz = facts->BlockSize;
3203 vv = ((63 / (sz * 4)) + 1) & 0x03;
3204 ioc->NB_for_64_byte_frame = vv;
3205 while ( sz )
3206 {
3207 shiftFactor++;
3208 sz = sz >> 1;
3209 }
3210 ioc->NBShiftFactor = shiftFactor;
3211 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
3212 "NB_for_64_byte_frame=%x NBShiftFactor=%x BlockSize=%x\n",
3213 ioc->name, vv, shiftFactor, r));
3214
3215 if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
3216 /*
3217 * Set values for this IOC's request & reply frame sizes,
3218 * and request & reply queue depths...
3219 */
3220 ioc->req_sz = min(MPT_DEFAULT_FRAME_SIZE, facts->RequestFrameSize * 4);
3221 ioc->req_depth = min_t(int, MPT_MAX_REQ_DEPTH, facts->GlobalCredits);
3222 ioc->reply_sz = MPT_REPLY_FRAME_SIZE;
3223 ioc->reply_depth = min_t(int, MPT_DEFAULT_REPLY_DEPTH, facts->ReplyQueueDepth);
3224
3225 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "reply_sz=%3d, reply_depth=%4d\n",
3226 ioc->name, ioc->reply_sz, ioc->reply_depth));
3227 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "req_sz =%3d, req_depth =%4d\n",
3228 ioc->name, ioc->req_sz, ioc->req_depth));
3229
3230 /* Get port facts! */
3231 if ( (r = GetPortFacts(ioc, 0, sleepFlag)) != 0 )
3232 return r;
3233 }
3234 } else {
3235 printk(MYIOC_s_ERR_FMT
3236 "Invalid IOC facts reply, msgLength=%d offsetof=%zd!\n",
3237 ioc->name, facts->MsgLength, (offsetof(IOCFactsReply_t,
3238 RequestFrameSize)/sizeof(u32)));
3239 return -66;
3240 }
3241
3242 return 0;
3243}
3244
3245/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3246/**
3247 * GetPortFacts - Send PortFacts request to MPT adapter.
3248 * @ioc: Pointer to MPT_ADAPTER structure
3249 * @portnum: Port number
3250 * @sleepFlag: Specifies whether the process can sleep
3251 *
3252 * Returns 0 for success, non-zero for failure.
3253 */
3254static int
3255GetPortFacts(MPT_ADAPTER *ioc, int portnum, int sleepFlag)
3256{
3257 PortFacts_t get_pfacts;
3258 PortFactsReply_t *pfacts;
3259 int ii;
3260 int req_sz;
3261 int reply_sz;
3262 int max_id;
3263
3264 /* IOC *must* NOT be in RESET state! */
3265 if (ioc->last_state == MPI_IOC_STATE_RESET) {
3266 printk(MYIOC_s_ERR_FMT "Can't get PortFacts NOT READY! (%08x)\n",
3267 ioc->name, ioc->last_state );
3268 return -4;
3269 }
3270
3271 pfacts = &ioc->pfacts[portnum];
3272
3273 /* Destination (reply area)... */
3274 reply_sz = sizeof(*pfacts);
3275 memset(pfacts, 0, reply_sz);
3276
3277 /* Request area (get_pfacts on the stack right now!) */
3278 req_sz = sizeof(get_pfacts);
3279 memset(&get_pfacts, 0, req_sz);
3280
3281 get_pfacts.Function = MPI_FUNCTION_PORT_FACTS;
3282 get_pfacts.PortNumber = portnum;
3283 /* Assert: All other get_pfacts fields are zero! */
3284
3285 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending get PortFacts(%d) request\n",
3286 ioc->name, portnum));
3287
3288 /* No non-zero fields in the get_pfacts request are greater than
3289 * 1 byte in size, so we can just fire it off as is.
3290 */
3291 ii = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&get_pfacts,
3292 reply_sz, (u16*)pfacts, 5 /*seconds*/, sleepFlag);
3293 if (ii != 0)
3294 return ii;
3295
3296 /* Did we get a valid reply? */
3297
3298 /* Now byte swap the necessary fields in the response. */
3299 pfacts->MsgContext = le32_to_cpu(pfacts->MsgContext);
3300 pfacts->IOCStatus = le16_to_cpu(pfacts->IOCStatus);
3301 pfacts->IOCLogInfo = le32_to_cpu(pfacts->IOCLogInfo);
3302 pfacts->MaxDevices = le16_to_cpu(pfacts->MaxDevices);
3303 pfacts->PortSCSIID = le16_to_cpu(pfacts->PortSCSIID);
3304 pfacts->ProtocolFlags = le16_to_cpu(pfacts->ProtocolFlags);
3305 pfacts->MaxPostedCmdBuffers = le16_to_cpu(pfacts->MaxPostedCmdBuffers);
3306 pfacts->MaxPersistentIDs = le16_to_cpu(pfacts->MaxPersistentIDs);
3307 pfacts->MaxLanBuckets = le16_to_cpu(pfacts->MaxLanBuckets);
3308
3309 max_id = (ioc->bus_type == SAS) ? pfacts->PortSCSIID :
3310 pfacts->MaxDevices;
3311 ioc->devices_per_bus = (max_id > 255) ? 256 : max_id;
3312 ioc->number_of_buses = (ioc->devices_per_bus < 256) ? 1 : max_id/256;
3313
3314 /*
3315 * Place all the devices on channels
3316 *
3317 * (for debuging)
3318 */
3319 if (mpt_channel_mapping) {
3320 ioc->devices_per_bus = 1;
3321 ioc->number_of_buses = (max_id > 255) ? 255 : max_id;
3322 }
3323
3324 return 0;
3325}
3326
3327/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3328/**
3329 * SendIocInit - Send IOCInit request to MPT adapter.
3330 * @ioc: Pointer to MPT_ADAPTER structure
3331 * @sleepFlag: Specifies whether the process can sleep
3332 *
3333 * Send IOCInit followed by PortEnable to bring IOC to OPERATIONAL state.
3334 *
3335 * Returns 0 for success, non-zero for failure.
3336 */
3337static int
3338SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
3339{
3340 IOCInit_t ioc_init;
3341 MPIDefaultReply_t init_reply;
3342 u32 state;
3343 int r;
3344 int count;
3345 int cntdn;
3346
3347 memset(&ioc_init, 0, sizeof(ioc_init));
3348 memset(&init_reply, 0, sizeof(init_reply));
3349
3350 ioc_init.WhoInit = MPI_WHOINIT_HOST_DRIVER;
3351 ioc_init.Function = MPI_FUNCTION_IOC_INIT;
3352
3353 /* If we are in a recovery mode and we uploaded the FW image,
3354 * then this pointer is not NULL. Skip the upload a second time.
3355 * Set this flag if cached_fw set for either IOC.
3356 */
3357 if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)
3358 ioc->upload_fw = 1;
3359 else
3360 ioc->upload_fw = 0;
3361 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "upload_fw %d facts.Flags=%x\n",
3362 ioc->name, ioc->upload_fw, ioc->facts.Flags));
3363
3364 ioc_init.MaxDevices = (U8)ioc->devices_per_bus;
3365 ioc_init.MaxBuses = (U8)ioc->number_of_buses;
3366
3367 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "facts.MsgVersion=%x\n",
3368 ioc->name, ioc->facts.MsgVersion));
3369 if (ioc->facts.MsgVersion >= MPI_VERSION_01_05) {
3370 // set MsgVersion and HeaderVersion host driver was built with
3371 ioc_init.MsgVersion = cpu_to_le16(MPI_VERSION);
3372 ioc_init.HeaderVersion = cpu_to_le16(MPI_HEADER_VERSION);
3373
3374 if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_HOST_PAGE_BUFFER_PERSISTENT) {
3375 ioc_init.HostPageBufferSGE = ioc->facts.HostPageBufferSGE;
3376 } else if(mpt_host_page_alloc(ioc, &ioc_init))
3377 return -99;
3378 }
3379 ioc_init.ReplyFrameSize = cpu_to_le16(ioc->reply_sz); /* in BYTES */
3380
3381 if (ioc->sg_addr_size == sizeof(u64)) {
3382 /* Save the upper 32-bits of the request
3383 * (reply) and sense buffers.
3384 */
3385 ioc_init.HostMfaHighAddr = cpu_to_le32((u32)((u64)ioc->alloc_dma >> 32));
3386 ioc_init.SenseBufferHighAddr = cpu_to_le32((u32)((u64)ioc->sense_buf_pool_dma >> 32));
3387 } else {
3388 /* Force 32-bit addressing */
3389 ioc_init.HostMfaHighAddr = cpu_to_le32(0);
3390 ioc_init.SenseBufferHighAddr = cpu_to_le32(0);
3391 }
3392
3393 ioc->facts.CurrentHostMfaHighAddr = ioc_init.HostMfaHighAddr;
3394 ioc->facts.CurrentSenseBufferHighAddr = ioc_init.SenseBufferHighAddr;
3395 ioc->facts.MaxDevices = ioc_init.MaxDevices;
3396 ioc->facts.MaxBuses = ioc_init.MaxBuses;
3397
3398 dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending IOCInit (req @ %p)\n",
3399 ioc->name, &ioc_init));
3400
3401 r = mpt_handshake_req_reply_wait(ioc, sizeof(IOCInit_t), (u32*)&ioc_init,
3402 sizeof(MPIDefaultReply_t), (u16*)&init_reply, 10 /*seconds*/, sleepFlag);
3403 if (r != 0) {
3404 printk(MYIOC_s_ERR_FMT "Sending IOCInit failed(%d)!\n",ioc->name, r);
3405 return r;
3406 }
3407
3408 /* No need to byte swap the multibyte fields in the reply
3409 * since we don't even look at its contents.
3410 */
3411
3412 dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending PortEnable (req @ %p)\n",
3413 ioc->name, &ioc_init));
3414
3415 if ((r = SendPortEnable(ioc, 0, sleepFlag)) != 0) {
3416 printk(MYIOC_s_ERR_FMT "Sending PortEnable failed(%d)!\n",ioc->name, r);
3417 return r;
3418 }
3419
3420 /* YIKES! SUPER IMPORTANT!!!
3421 * Poll IocState until _OPERATIONAL while IOC is doing
3422 * LoopInit and TargetDiscovery!
3423 */
3424 count = 0;
3425 cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 60; /* 60 seconds */
3426 state = mpt_GetIocState(ioc, 1);
3427 while (state != MPI_IOC_STATE_OPERATIONAL && --cntdn) {
3428 if (sleepFlag == CAN_SLEEP) {
3429 msleep(1);
3430 } else {
3431 mdelay(1);
3432 }
3433
3434 if (!cntdn) {
3435 printk(MYIOC_s_ERR_FMT "Wait IOC_OP state timeout(%d)!\n",
3436 ioc->name, (int)((count+5)/HZ));
3437 return -9;
3438 }
3439
3440 state = mpt_GetIocState(ioc, 1);
3441 count++;
3442 }
3443 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Wait IOC_OPERATIONAL state (cnt=%d)\n",
3444 ioc->name, count));
3445
3446 ioc->aen_event_read_flag=0;
3447 return r;
3448}
3449
3450/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3451/**
3452 * SendPortEnable - Send PortEnable request to MPT adapter port.
3453 * @ioc: Pointer to MPT_ADAPTER structure
3454 * @portnum: Port number to enable
3455 * @sleepFlag: Specifies whether the process can sleep
3456 *
3457 * Send PortEnable to bring IOC to OPERATIONAL state.
3458 *
3459 * Returns 0 for success, non-zero for failure.
3460 */
3461static int
3462SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag)
3463{
3464 PortEnable_t port_enable;
3465 MPIDefaultReply_t reply_buf;
3466 int rc;
3467 int req_sz;
3468 int reply_sz;
3469
3470 /* Destination... */
3471 reply_sz = sizeof(MPIDefaultReply_t);
3472 memset(&reply_buf, 0, reply_sz);
3473
3474 req_sz = sizeof(PortEnable_t);
3475 memset(&port_enable, 0, req_sz);
3476
3477 port_enable.Function = MPI_FUNCTION_PORT_ENABLE;
3478 port_enable.PortNumber = portnum;
3479/* port_enable.ChainOffset = 0; */
3480/* port_enable.MsgFlags = 0; */
3481/* port_enable.MsgContext = 0; */
3482
3483 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending Port(%d)Enable (req @ %p)\n",
3484 ioc->name, portnum, &port_enable));
3485
3486 /* RAID FW may take a long time to enable
3487 */
3488 if (ioc->ir_firmware || ioc->bus_type == SAS) {
3489 rc = mpt_handshake_req_reply_wait(ioc, req_sz,
3490 (u32*)&port_enable, reply_sz, (u16*)&reply_buf,
3491 300 /*seconds*/, sleepFlag);
3492 } else {
3493 rc = mpt_handshake_req_reply_wait(ioc, req_sz,
3494 (u32*)&port_enable, reply_sz, (u16*)&reply_buf,
3495 30 /*seconds*/, sleepFlag);
3496 }
3497 return rc;
3498}
3499
3500/**
3501 * mpt_alloc_fw_memory - allocate firmware memory
3502 * @ioc: Pointer to MPT_ADAPTER structure
3503 * @size: total FW bytes
3504 *
3505 * If memory has already been allocated, the same (cached) value
3506 * is returned.
3507 *
3508 * Return 0 if successful, or non-zero for failure
3509 **/
3510int
3511mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size)
3512{
3513 int rc;
3514
3515 if (ioc->cached_fw) {
3516 rc = 0; /* use already allocated memory */
3517 goto out;
3518 }
3519 else if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) {
3520 ioc->cached_fw = ioc->alt_ioc->cached_fw; /* use alt_ioc's memory */
3521 ioc->cached_fw_dma = ioc->alt_ioc->cached_fw_dma;
3522 rc = 0;
3523 goto out;
3524 }
3525 ioc->cached_fw = pci_alloc_consistent(ioc->pcidev, size, &ioc->cached_fw_dma);
3526 if (!ioc->cached_fw) {
3527 printk(MYIOC_s_ERR_FMT "Unable to allocate memory for the cached firmware image!\n",
3528 ioc->name);
3529 rc = -1;
3530 } else {
3531 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "FW Image @ %p[%p], sz=%d[%x] bytes\n",
3532 ioc->name, ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, size, size));
3533 ioc->alloc_total += size;
3534 rc = 0;
3535 }
3536 out:
3537 return rc;
3538}
3539
3540/**
3541 * mpt_free_fw_memory - free firmware memory
3542 * @ioc: Pointer to MPT_ADAPTER structure
3543 *
3544 * If alt_img is NULL, delete from ioc structure.
3545 * Else, delete a secondary image in same format.
3546 **/
3547void
3548mpt_free_fw_memory(MPT_ADAPTER *ioc)
3549{
3550 int sz;
3551
3552 if (!ioc->cached_fw)
3553 return;
3554
3555 sz = ioc->facts.FWImageSize;
3556 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "free_fw_memory: FW Image @ %p[%p], sz=%d[%x] bytes\n",
3557 ioc->name, ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz));
3558 pci_free_consistent(ioc->pcidev, sz, ioc->cached_fw, ioc->cached_fw_dma);
3559 ioc->alloc_total -= sz;
3560 ioc->cached_fw = NULL;
3561}
3562
3563/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3564/**
3565 * mpt_do_upload - Construct and Send FWUpload request to MPT adapter port.
3566 * @ioc: Pointer to MPT_ADAPTER structure
3567 * @sleepFlag: Specifies whether the process can sleep
3568 *
3569 * Returns 0 for success, >0 for handshake failure
3570 * <0 for fw upload failure.
3571 *
3572 * Remark: If bound IOC and a successful FWUpload was performed
3573 * on the bound IOC, the second image is discarded
3574 * and memory is free'd. Both channels must upload to prevent
3575 * IOC from running in degraded mode.
3576 */
3577static int
3578mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
3579{
3580 u8 reply[sizeof(FWUploadReply_t)];
3581 FWUpload_t *prequest;
3582 FWUploadReply_t *preply;
3583 FWUploadTCSGE_t *ptcsge;
3584 u32 flagsLength;
3585 int ii, sz, reply_sz;
3586 int cmdStatus;
3587 int request_size;
3588 /* If the image size is 0, we are done.
3589 */
3590 if ((sz = ioc->facts.FWImageSize) == 0)
3591 return 0;
3592
3593 if (mpt_alloc_fw_memory(ioc, ioc->facts.FWImageSize) != 0)
3594 return -ENOMEM;
3595
3596 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": FW Image @ %p[%p], sz=%d[%x] bytes\n",
3597 ioc->name, ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz));
3598
3599 prequest = (sleepFlag == NO_SLEEP) ? kzalloc(ioc->req_sz, GFP_ATOMIC) :
3600 kzalloc(ioc->req_sz, GFP_KERNEL);
3601 if (!prequest) {
3602 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "fw upload failed "
3603 "while allocating memory \n", ioc->name));
3604 mpt_free_fw_memory(ioc);
3605 return -ENOMEM;
3606 }
3607
3608 preply = (FWUploadReply_t *)&reply;
3609
3610 reply_sz = sizeof(reply);
3611 memset(preply, 0, reply_sz);
3612
3613 prequest->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM;
3614 prequest->Function = MPI_FUNCTION_FW_UPLOAD;
3615
3616 ptcsge = (FWUploadTCSGE_t *) &prequest->SGL;
3617 ptcsge->DetailsLength = 12;
3618 ptcsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT;
3619 ptcsge->ImageSize = cpu_to_le32(sz);
3620 ptcsge++;
3621
3622 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ | sz;
3623 ioc->add_sge((char *)ptcsge, flagsLength, ioc->cached_fw_dma);
3624 request_size = offsetof(FWUpload_t, SGL) + sizeof(FWUploadTCSGE_t) +
3625 ioc->SGE_size;
3626 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending FW Upload "
3627 " (req @ %p) fw_size=%d mf_request_size=%d\n", ioc->name, prequest,
3628 ioc->facts.FWImageSize, request_size));
3629 DBG_DUMP_FW_REQUEST_FRAME(ioc, (u32 *)prequest);
3630
3631 ii = mpt_handshake_req_reply_wait(ioc, request_size, (u32 *)prequest,
3632 reply_sz, (u16 *)preply, 65 /*seconds*/, sleepFlag);
3633
3634 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "FW Upload completed "
3635 "rc=%x \n", ioc->name, ii));
3636
3637 cmdStatus = -EFAULT;
3638 if (ii == 0) {
3639 /* Handshake transfer was complete and successful.
3640 * Check the Reply Frame.
3641 */
3642 int status;
3643 status = le16_to_cpu(preply->IOCStatus) &
3644 MPI_IOCSTATUS_MASK;
3645 if (status == MPI_IOCSTATUS_SUCCESS &&
3646 ioc->facts.FWImageSize ==
3647 le32_to_cpu(preply->ActualImageSize))
3648 cmdStatus = 0;
3649 }
3650 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": do_upload cmdStatus=%d \n",
3651 ioc->name, cmdStatus));
3652
3653
3654 if (cmdStatus) {
3655 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "fw upload failed, "
3656 "freeing image \n", ioc->name));
3657 mpt_free_fw_memory(ioc);
3658 }
3659 kfree(prequest);
3660
3661 return cmdStatus;
3662}
3663
3664/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3665/**
3666 * mpt_downloadboot - DownloadBoot code
3667 * @ioc: Pointer to MPT_ADAPTER structure
3668 * @pFwHeader: Pointer to firmware header info
3669 * @sleepFlag: Specifies whether the process can sleep
3670 *
3671 * FwDownloadBoot requires Programmed IO access.
3672 *
3673 * Returns 0 for success
3674 * -1 FW Image size is 0
3675 * -2 No valid cached_fw Pointer
3676 * <0 for fw upload failure.
3677 */
3678static int
3679mpt_downloadboot(MPT_ADAPTER *ioc, MpiFwHeader_t *pFwHeader, int sleepFlag)
3680{
3681 MpiExtImageHeader_t *pExtImage;
3682 u32 fwSize;
3683 u32 diag0val;
3684 int count;
3685 u32 *ptrFw;
3686 u32 diagRwData;
3687 u32 nextImage;
3688 u32 load_addr;
3689 u32 ioc_state=0;
3690
3691 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "downloadboot: fw size 0x%x (%d), FW Ptr %p\n",
3692 ioc->name, pFwHeader->ImageSize, pFwHeader->ImageSize, pFwHeader));
3693
3694 CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
3695 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE);
3696 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE);
3697 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE);
3698 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE);
3699 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE);
3700
3701 CHIPREG_WRITE32(&ioc->chip->Diagnostic, (MPI_DIAG_PREVENT_IOC_BOOT | MPI_DIAG_DISABLE_ARM));
3702
3703 /* wait 1 msec */
3704 if (sleepFlag == CAN_SLEEP) {
3705 msleep(1);
3706 } else {
3707 mdelay (1);
3708 }
3709
3710 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
3711 CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | MPI_DIAG_RESET_ADAPTER);
3712
3713 for (count = 0; count < 30; count ++) {
3714 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
3715 if (!(diag0val & MPI_DIAG_RESET_ADAPTER)) {
3716 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RESET_ADAPTER cleared, count=%d\n",
3717 ioc->name, count));
3718 break;
3719 }
3720 /* wait .1 sec */
3721 if (sleepFlag == CAN_SLEEP) {
3722 msleep (100);
3723 } else {
3724 mdelay (100);
3725 }
3726 }
3727
3728 if ( count == 30 ) {
3729 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "downloadboot failed! "
3730 "Unable to get MPI_DIAG_DRWE mode, diag0val=%x\n",
3731 ioc->name, diag0val));
3732 return -3;
3733 }
3734
3735 CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
3736 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE);
3737 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE);
3738 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE);
3739 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE);
3740 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE);
3741
3742 /* Set the DiagRwEn and Disable ARM bits */
3743 CHIPREG_WRITE32(&ioc->chip->Diagnostic, (MPI_DIAG_RW_ENABLE | MPI_DIAG_DISABLE_ARM));
3744
3745 fwSize = (pFwHeader->ImageSize + 3)/4;
3746 ptrFw = (u32 *) pFwHeader;
3747
3748 /* Write the LoadStartAddress to the DiagRw Address Register
3749 * using Programmed IO
3750 */
3751 if (ioc->errata_flag_1064)
3752 pci_enable_io_access(ioc->pcidev);
3753
3754 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, pFwHeader->LoadStartAddress);
3755 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "LoadStart addr written 0x%x \n",
3756 ioc->name, pFwHeader->LoadStartAddress));
3757
3758 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Write FW Image: 0x%x bytes @ %p\n",
3759 ioc->name, fwSize*4, ptrFw));
3760 while (fwSize--) {
3761 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, *ptrFw++);
3762 }
3763
3764 nextImage = pFwHeader->NextImageHeaderOffset;
3765 while (nextImage) {
3766 pExtImage = (MpiExtImageHeader_t *) ((char *)pFwHeader + nextImage);
3767
3768 load_addr = pExtImage->LoadStartAddress;
3769
3770 fwSize = (pExtImage->ImageSize + 3) >> 2;
3771 ptrFw = (u32 *)pExtImage;
3772
3773 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Write Ext Image: 0x%x (%d) bytes @ %p load_addr=%x\n",
3774 ioc->name, fwSize*4, fwSize*4, ptrFw, load_addr));
3775 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, load_addr);
3776
3777 while (fwSize--) {
3778 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, *ptrFw++);
3779 }
3780 nextImage = pExtImage->NextImageHeaderOffset;
3781 }
3782
3783 /* Write the IopResetVectorRegAddr */
3784 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Write IopResetVector Addr=%x! \n", ioc->name, pFwHeader->IopResetRegAddr));
3785 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, pFwHeader->IopResetRegAddr);
3786
3787 /* Write the IopResetVectorValue */
3788