1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Adaptec AAC series RAID controller driver
4 * (c) Copyright 2001 Red Hat Inc.
5 *
6 * based on the old aacraid driver that is..
7 * Adaptec aacraid device driver for Linux.
8 *
9 * Copyright (c) 2000-2010 Adaptec, Inc.
10 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
11 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
12 *
13 * Module Name:
14 * commsup.c
15 *
16 * Abstract: Contain all routines that are required for FSA host/adapter
17 * communication.
18 */
19
20#include <linux/kernel.h>
21#include <linux/init.h>
22#include <linux/crash_dump.h>
23#include <linux/types.h>
24#include <linux/sched.h>
25#include <linux/pci.h>
26#include <linux/spinlock.h>
27#include <linux/slab.h>
28#include <linux/completion.h>
29#include <linux/blkdev.h>
30#include <linux/delay.h>
31#include <linux/kthread.h>
32#include <linux/interrupt.h>
33#include <linux/bcd.h>
34#include <scsi/scsi.h>
35#include <scsi/scsi_host.h>
36#include <scsi/scsi_device.h>
37#include <scsi/scsi_cmnd.h>
38
39#include "aacraid.h"
40
41/**
42 * fib_map_alloc - allocate the fib objects
43 * @dev: Adapter to allocate for
44 *
45 * Allocate and map the shared PCI space for the FIB blocks used to
46 * talk to the Adaptec firmware.
47 */
48
49static int fib_map_alloc(struct aac_dev *dev)
50{
51 if (dev->max_fib_size > AAC_MAX_NATIVE_SIZE)
52 dev->max_cmd_size = AAC_MAX_NATIVE_SIZE;
53 else
54 dev->max_cmd_size = dev->max_fib_size;
55 if (dev->max_fib_size < AAC_MAX_NATIVE_SIZE) {
56 dev->max_cmd_size = AAC_MAX_NATIVE_SIZE;
57 } else {
58 dev->max_cmd_size = dev->max_fib_size;
59 }
60
61 dprintk((KERN_INFO
62 "allocate hardware fibs dma_alloc_coherent(%p, %d * (%d + %d), %p)\n",
63 &dev->pdev->dev, dev->max_cmd_size, dev->scsi_host_ptr->can_queue,
64 AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
65 dev->hw_fib_va = dma_alloc_coherent(dev: &dev->pdev->dev,
66 size: (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr))
67 * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1),
68 dma_handle: &dev->hw_fib_pa, GFP_KERNEL);
69 if (dev->hw_fib_va == NULL)
70 return -ENOMEM;
71 return 0;
72}
73
74/**
75 * aac_fib_map_free - free the fib objects
76 * @dev: Adapter to free
77 *
78 * Free the PCI mappings and the memory allocated for FIB blocks
79 * on this adapter.
80 */
81
82void aac_fib_map_free(struct aac_dev *dev)
83{
84 size_t alloc_size;
85 size_t fib_size;
86 int num_fibs;
87
88 if(!dev->hw_fib_va || !dev->max_cmd_size)
89 return;
90
91 num_fibs = dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB;
92 fib_size = dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
93 alloc_size = fib_size * num_fibs + ALIGN32 - 1;
94
95 dma_free_coherent(dev: &dev->pdev->dev, size: alloc_size, cpu_addr: dev->hw_fib_va,
96 dma_handle: dev->hw_fib_pa);
97
98 dev->hw_fib_va = NULL;
99 dev->hw_fib_pa = 0;
100}
101
102void aac_fib_vector_assign(struct aac_dev *dev)
103{
104 u32 i = 0;
105 u32 vector = 1;
106 struct fib *fibptr = NULL;
107
108 for (i = 0, fibptr = &dev->fibs[i];
109 i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
110 i++, fibptr++) {
111 if ((dev->max_msix == 1) ||
112 (i > ((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1)
113 - dev->vector_cap))) {
114 fibptr->vector_no = 0;
115 } else {
116 fibptr->vector_no = vector;
117 vector++;
118 if (vector == dev->max_msix)
119 vector = 1;
120 }
121 }
122}
123
124/**
125 * aac_fib_setup - setup the fibs
126 * @dev: Adapter to set up
127 *
128 * Allocate the PCI space for the fibs, map it and then initialise the
129 * fib area, the unmapped fib data and also the free list
130 */
131
132int aac_fib_setup(struct aac_dev * dev)
133{
134 struct fib *fibptr;
135 struct hw_fib *hw_fib;
136 dma_addr_t hw_fib_pa;
137 int i;
138 u32 max_cmds;
139
140 while (((i = fib_map_alloc(dev)) == -ENOMEM)
141 && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) {
142 max_cmds = (dev->scsi_host_ptr->can_queue+AAC_NUM_MGT_FIB) >> 1;
143 dev->scsi_host_ptr->can_queue = max_cmds - AAC_NUM_MGT_FIB;
144 if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3)
145 dev->init->r7.max_io_commands = cpu_to_le32(max_cmds);
146 }
147 if (i<0)
148 return -ENOMEM;
149
150 memset(dev->hw_fib_va, 0,
151 (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)) *
152 (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
153
154 /* 32 byte alignment for PMC */
155 hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1);
156 hw_fib = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
157 (hw_fib_pa - dev->hw_fib_pa));
158
159 /* add Xport header */
160 hw_fib = (struct hw_fib *)((unsigned char *)hw_fib +
161 sizeof(struct aac_fib_xporthdr));
162 hw_fib_pa += sizeof(struct aac_fib_xporthdr);
163
164 /*
165 * Initialise the fibs
166 */
167 for (i = 0, fibptr = &dev->fibs[i];
168 i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
169 i++, fibptr++)
170 {
171 fibptr->flags = 0;
172 fibptr->size = sizeof(struct fib);
173 fibptr->dev = dev;
174 fibptr->hw_fib_va = hw_fib;
175 fibptr->data = (void *) fibptr->hw_fib_va->data;
176 fibptr->next = fibptr+1; /* Forward chain the fibs */
177 init_completion(x: &fibptr->event_wait);
178 spin_lock_init(&fibptr->event_lock);
179 hw_fib->header.XferState = cpu_to_le32(0xffffffff);
180 hw_fib->header.SenderSize =
181 cpu_to_le16(dev->max_fib_size); /* ?? max_cmd_size */
182 fibptr->hw_fib_pa = hw_fib_pa;
183 fibptr->hw_sgl_pa = hw_fib_pa +
184 offsetof(struct aac_hba_cmd_req, sge[2]);
185 /*
186 * one element is for the ptr to the separate sg list,
187 * second element for 32 byte alignment
188 */
189 fibptr->hw_error_pa = hw_fib_pa +
190 offsetof(struct aac_native_hba, resp.resp_bytes[0]);
191
192 hw_fib = (struct hw_fib *)((unsigned char *)hw_fib +
193 dev->max_cmd_size + sizeof(struct aac_fib_xporthdr));
194 hw_fib_pa = hw_fib_pa +
195 dev->max_cmd_size + sizeof(struct aac_fib_xporthdr);
196 }
197
198 /*
199 *Assign vector numbers to fibs
200 */
201 aac_fib_vector_assign(dev);
202
203 /*
204 * Add the fib chain to the free list
205 */
206 dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL;
207 /*
208 * Set 8 fibs aside for management tools
209 */
210 dev->free_fib = &dev->fibs[dev->scsi_host_ptr->can_queue];
211 return 0;
212}
213
214/**
215 * aac_fib_alloc_tag-allocate a fib using tags
216 * @dev: Adapter to allocate the fib for
217 * @scmd: SCSI command
218 *
219 * Allocate a fib from the adapter fib pool using tags
220 * from the blk layer.
221 */
222
223struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
224{
225 struct fib *fibptr;
226 u32 blk_tag;
227 int i;
228
229 blk_tag = blk_mq_unique_tag(rq: scsi_cmd_to_rq(scmd));
230 i = blk_mq_unique_tag_to_tag(unique_tag: blk_tag);
231 fibptr = &dev->fibs[i];
232 /*
233 * Null out fields that depend on being zero at the start of
234 * each I/O
235 */
236 fibptr->hw_fib_va->header.XferState = 0;
237 fibptr->type = FSAFS_NTC_FIB_CONTEXT;
238 fibptr->callback_data = NULL;
239 fibptr->callback = NULL;
240 fibptr->flags = 0;
241
242 return fibptr;
243}
244
245/**
246 * aac_fib_alloc - allocate a fib
247 * @dev: Adapter to allocate the fib for
248 *
249 * Allocate a fib from the adapter fib pool. If the pool is empty we
250 * return NULL.
251 */
252
253struct fib *aac_fib_alloc(struct aac_dev *dev)
254{
255 struct fib * fibptr;
256 unsigned long flags;
257 spin_lock_irqsave(&dev->fib_lock, flags);
258 fibptr = dev->free_fib;
259 if(!fibptr){
260 spin_unlock_irqrestore(lock: &dev->fib_lock, flags);
261 return fibptr;
262 }
263 dev->free_fib = fibptr->next;
264 spin_unlock_irqrestore(lock: &dev->fib_lock, flags);
265 /*
266 * Set the proper node type code and node byte size
267 */
268 fibptr->type = FSAFS_NTC_FIB_CONTEXT;
269 fibptr->size = sizeof(struct fib);
270 /*
271 * Null out fields that depend on being zero at the start of
272 * each I/O
273 */
274 fibptr->hw_fib_va->header.XferState = 0;
275 fibptr->flags = 0;
276 fibptr->callback = NULL;
277 fibptr->callback_data = NULL;
278
279 return fibptr;
280}
281
282/**
283 * aac_fib_free - free a fib
284 * @fibptr: fib to free up
285 *
286 * Frees up a fib and places it on the appropriate queue
287 */
288
289void aac_fib_free(struct fib *fibptr)
290{
291 unsigned long flags;
292
293 if (fibptr->done == 2)
294 return;
295
296 spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
297 if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
298 aac_config.fib_timeouts++;
299 if (!(fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) &&
300 fibptr->hw_fib_va->header.XferState != 0) {
301 printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
302 (void*)fibptr,
303 le32_to_cpu(fibptr->hw_fib_va->header.XferState));
304 }
305 fibptr->next = fibptr->dev->free_fib;
306 fibptr->dev->free_fib = fibptr;
307 spin_unlock_irqrestore(lock: &fibptr->dev->fib_lock, flags);
308}
309
310/**
311 * aac_fib_init - initialise a fib
312 * @fibptr: The fib to initialize
313 *
314 * Set up the generic fib fields ready for use
315 */
316
317void aac_fib_init(struct fib *fibptr)
318{
319 struct hw_fib *hw_fib = fibptr->hw_fib_va;
320
321 memset(&hw_fib->header, 0, sizeof(struct aac_fibhdr));
322 hw_fib->header.StructType = FIB_MAGIC;
323 hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
324 hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
325 hw_fib->header.u.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
326 hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size);
327}
328
329/**
330 * fib_dealloc - deallocate a fib
331 * @fibptr: fib to deallocate
332 *
333 * Will deallocate and return to the free pool the FIB pointed to by the
334 * caller.
335 */
336
337static void fib_dealloc(struct fib * fibptr)
338{
339 struct hw_fib *hw_fib = fibptr->hw_fib_va;
340 hw_fib->header.XferState = 0;
341}
342
343/*
344 * Commuication primitives define and support the queuing method we use to
345 * support host to adapter commuication. All queue accesses happen through
346 * these routines and are the only routines which have a knowledge of the
347 * how these queues are implemented.
348 */
349
350/**
351 * aac_get_entry - get a queue entry
352 * @dev: Adapter
353 * @qid: Queue Number
354 * @entry: Entry return
355 * @index: Index return
356 * @nonotify: notification control
357 *
358 * With a priority the routine returns a queue entry if the queue has free entries. If the queue
359 * is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
360 * returned.
361 */
362
363static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
364{
365 struct aac_queue * q;
366 unsigned long idx;
367
368 /*
369 * All of the queues wrap when they reach the end, so we check
370 * to see if they have reached the end and if they have we just
371 * set the index back to zero. This is a wrap. You could or off
372 * the high bits in all updates but this is a bit faster I think.
373 */
374
375 q = &dev->queues->queue[qid];
376
377 idx = *index = le32_to_cpu(*(q->headers.producer));
378 /* Interrupt Moderation, only interrupt for first two entries */
379 if (idx != le32_to_cpu(*(q->headers.consumer))) {
380 if (--idx == 0) {
381 if (qid == AdapNormCmdQueue)
382 idx = ADAP_NORM_CMD_ENTRIES;
383 else
384 idx = ADAP_NORM_RESP_ENTRIES;
385 }
386 if (idx != le32_to_cpu(*(q->headers.consumer)))
387 *nonotify = 1;
388 }
389
390 if (qid == AdapNormCmdQueue) {
391 if (*index >= ADAP_NORM_CMD_ENTRIES)
392 *index = 0; /* Wrap to front of the Producer Queue. */
393 } else {
394 if (*index >= ADAP_NORM_RESP_ENTRIES)
395 *index = 0; /* Wrap to front of the Producer Queue. */
396 }
397
398 /* Queue is full */
399 if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) {
400 printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
401 qid, atomic_read(&q->numpending));
402 return 0;
403 } else {
404 *entry = q->base + *index;
405 return 1;
406 }
407}
408
409/**
410 * aac_queue_get - get the next free QE
411 * @dev: Adapter
412 * @index: Returned index
413 * @qid: Queue number
414 * @hw_fib: Fib to associate with the queue entry
415 * @wait: Wait if queue full
416 * @fibptr: Driver fib object to go with fib
417 * @nonotify: Don't notify the adapter
418 *
419 * Gets the next free QE off the requested priorty adapter command
420 * queue and associates the Fib with the QE. The QE represented by
421 * index is ready to insert on the queue when this routine returns
422 * success.
423 */
424
425int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
426{
427 struct aac_entry * entry = NULL;
428 int map = 0;
429
430 if (qid == AdapNormCmdQueue) {
431 /* if no entries wait for some if caller wants to */
432 while (!aac_get_entry(dev, qid, entry: &entry, index, nonotify)) {
433 printk(KERN_ERR "GetEntries failed\n");
434 }
435 /*
436 * Setup queue entry with a command, status and fib mapped
437 */
438 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
439 map = 1;
440 } else {
441 while (!aac_get_entry(dev, qid, entry: &entry, index, nonotify)) {
442 /* if no entries wait for some if caller wants to */
443 }
444 /*
445 * Setup queue entry with command, status and fib mapped
446 */
447 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
448 entry->addr = hw_fib->header.SenderFibAddress;
449 /* Restore adapters pointer to the FIB */
450 hw_fib->header.u.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */
451 map = 0;
452 }
453 /*
454 * If MapFib is true than we need to map the Fib and put pointers
455 * in the queue entry.
456 */
457 if (map)
458 entry->addr = cpu_to_le32(fibptr->hw_fib_pa);
459 return 0;
460}
461
462/*
463 * Define the highest level of host to adapter communication routines.
464 * These routines will support host to adapter FS commuication. These
465 * routines have no knowledge of the commuication method used. This level
466 * sends and receives FIBs. This level has no knowledge of how these FIBs
467 * get passed back and forth.
468 */
469
470/**
471 * aac_fib_send - send a fib to the adapter
472 * @command: Command to send
473 * @fibptr: The fib
474 * @size: Size of fib data area
475 * @priority: Priority of Fib
476 * @wait: Async/sync select
477 * @reply: True if a reply is wanted
478 * @callback: Called with reply
479 * @callback_data: Passed to callback
480 *
481 * Sends the requested FIB to the adapter and optionally will wait for a
482 * response FIB. If the caller does not wish to wait for a response than
483 * an event to wait on must be supplied. This event will be set when a
484 * response FIB is received from the adapter.
485 */
486
487int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
488 int priority, int wait, int reply, fib_callback callback,
489 void *callback_data)
490{
491 struct aac_dev * dev = fibptr->dev;
492 struct hw_fib * hw_fib = fibptr->hw_fib_va;
493 unsigned long flags = 0;
494 unsigned long mflags = 0;
495 unsigned long sflags = 0;
496
497 if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
498 return -EBUSY;
499
500 if (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))
501 return -EINVAL;
502
503 /*
504 * There are 5 cases with the wait and response requested flags.
505 * The only invalid cases are if the caller requests to wait and
506 * does not request a response and if the caller does not want a
507 * response and the Fib is not allocated from pool. If a response
508 * is not requested the Fib will just be deallocaed by the DPC
509 * routine when the response comes back from the adapter. No
510 * further processing will be done besides deleting the Fib. We
511 * will have a debug mode where the adapter can notify the host
512 * it had a problem and the host can log that fact.
513 */
514 fibptr->flags = 0;
515 if (wait && !reply) {
516 return -EINVAL;
517 } else if (!wait && reply) {
518 hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
519 FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
520 } else if (!wait && !reply) {
521 hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
522 FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
523 } else if (wait && reply) {
524 hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
525 FIB_COUNTER_INCREMENT(aac_config.NormalSent);
526 }
527 /*
528 * Map the fib into 32bits by using the fib number
529 */
530
531 hw_fib->header.SenderFibAddress =
532 cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
533
534 /* use the same shifted value for handle to be compatible
535 * with the new native hba command handle
536 */
537 hw_fib->header.Handle =
538 cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
539
540 /*
541 * Set FIB state to indicate where it came from and if we want a
542 * response from the adapter. Also load the command from the
543 * caller.
544 *
545 * Map the hw fib pointer as a 32bit value
546 */
547 hw_fib->header.Command = cpu_to_le16(command);
548 hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
549 /*
550 * Set the size of the Fib we want to send to the adapter
551 */
552 hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
553 if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
554 return -EMSGSIZE;
555 }
556 /*
557 * Get a queue entry connect the FIB to it and send an notify
558 * the adapter a command is ready.
559 */
560 hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
561
562 /*
563 * Fill in the Callback and CallbackContext if we are not
564 * going to wait.
565 */
566 if (!wait) {
567 fibptr->callback = callback;
568 fibptr->callback_data = callback_data;
569 fibptr->flags = FIB_CONTEXT_FLAG;
570 }
571
572 fibptr->done = 0;
573
574 FIB_COUNTER_INCREMENT(aac_config.FibsSent);
575
576 dprintk((KERN_DEBUG "Fib contents:.\n"));
577 dprintk((KERN_DEBUG " Command = %d.\n", le32_to_cpu(hw_fib->header.Command)));
578 dprintk((KERN_DEBUG " SubCommand = %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command)));
579 dprintk((KERN_DEBUG " XferState = %x.\n", le32_to_cpu(hw_fib->header.XferState)));
580 dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib_va));
581 dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
582 dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
583
584 if (!dev->queues)
585 return -EBUSY;
586
587 if (wait) {
588
589 spin_lock_irqsave(&dev->manage_lock, mflags);
590 if (dev->management_fib_count >= AAC_NUM_MGT_FIB) {
591 printk(KERN_INFO "No management Fibs Available:%d\n",
592 dev->management_fib_count);
593 spin_unlock_irqrestore(lock: &dev->manage_lock, flags: mflags);
594 return -EBUSY;
595 }
596 dev->management_fib_count++;
597 spin_unlock_irqrestore(lock: &dev->manage_lock, flags: mflags);
598 spin_lock_irqsave(&fibptr->event_lock, flags);
599 }
600
601 if (dev->sync_mode) {
602 if (wait)
603 spin_unlock_irqrestore(lock: &fibptr->event_lock, flags);
604 spin_lock_irqsave(&dev->sync_lock, sflags);
605 if (dev->sync_fib) {
606 list_add_tail(new: &fibptr->fiblink, head: &dev->sync_fib_list);
607 spin_unlock_irqrestore(lock: &dev->sync_lock, flags: sflags);
608 } else {
609 dev->sync_fib = fibptr;
610 spin_unlock_irqrestore(lock: &dev->sync_lock, flags: sflags);
611 aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
612 (u32)fibptr->hw_fib_pa, 0, 0, 0, 0, 0,
613 NULL, NULL, NULL, NULL, NULL);
614 }
615 if (wait) {
616 fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
617 if (wait_for_completion_interruptible(x: &fibptr->event_wait)) {
618 fibptr->flags &= ~FIB_CONTEXT_FLAG_WAIT;
619 return -EFAULT;
620 }
621 return 0;
622 }
623 return -EINPROGRESS;
624 }
625
626 if (aac_adapter_deliver(fibptr) != 0) {
627 printk(KERN_ERR "aac_fib_send: returned -EBUSY\n");
628 if (wait) {
629 spin_unlock_irqrestore(lock: &fibptr->event_lock, flags);
630 spin_lock_irqsave(&dev->manage_lock, mflags);
631 dev->management_fib_count--;
632 spin_unlock_irqrestore(lock: &dev->manage_lock, flags: mflags);
633 }
634 return -EBUSY;
635 }
636
637
638 /*
639 * If the caller wanted us to wait for response wait now.
640 */
641
642 if (wait) {
643 spin_unlock_irqrestore(lock: &fibptr->event_lock, flags);
644 /* Only set for first known interruptable command */
645 if (wait < 0) {
646 /*
647 * *VERY* Dangerous to time out a command, the
648 * assumption is made that we have no hope of
649 * functioning because an interrupt routing or other
650 * hardware failure has occurred.
651 */
652 unsigned long timeout = jiffies + (180 * HZ); /* 3 minutes */
653 while (!try_wait_for_completion(x: &fibptr->event_wait)) {
654 int blink;
655 if (time_is_before_eq_jiffies(timeout)) {
656 struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
657 atomic_dec(v: &q->numpending);
658 if (wait == -1) {
659 printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n"
660 "Usually a result of a PCI interrupt routing problem;\n"
661 "update mother board BIOS or consider utilizing one of\n"
662 "the SAFE mode kernel options (acpi, apic etc)\n");
663 }
664 return -ETIMEDOUT;
665 }
666
667 if (unlikely(aac_pci_offline(dev)))
668 return -EFAULT;
669
670 if ((blink = aac_adapter_check_health(dev)) > 0) {
671 if (wait == -1) {
672 printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
673 "Usually a result of a serious unrecoverable hardware problem\n",
674 blink);
675 }
676 return -EFAULT;
677 }
678 /*
679 * Allow other processes / CPUS to use core
680 */
681 schedule();
682 }
683 } else if (wait_for_completion_interruptible(x: &fibptr->event_wait)) {
684 /* Do nothing ... satisfy
685 * wait_for_completion_interruptible must_check */
686 }
687
688 spin_lock_irqsave(&fibptr->event_lock, flags);
689 if (fibptr->done == 0) {
690 fibptr->done = 2; /* Tell interrupt we aborted */
691 spin_unlock_irqrestore(lock: &fibptr->event_lock, flags);
692 return -ERESTARTSYS;
693 }
694 spin_unlock_irqrestore(lock: &fibptr->event_lock, flags);
695 BUG_ON(fibptr->done == 0);
696
697 if(unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
698 return -ETIMEDOUT;
699 return 0;
700 }
701 /*
702 * If the user does not want a response than return success otherwise
703 * return pending
704 */
705 if (reply)
706 return -EINPROGRESS;
707 else
708 return 0;
709}
710
711int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
712 void *callback_data)
713{
714 struct aac_dev *dev = fibptr->dev;
715 int wait;
716 unsigned long flags = 0;
717 unsigned long mflags = 0;
718 struct aac_hba_cmd_req *hbacmd = (struct aac_hba_cmd_req *)
719 fibptr->hw_fib_va;
720
721 fibptr->flags = (FIB_CONTEXT_FLAG | FIB_CONTEXT_FLAG_NATIVE_HBA);
722 if (callback) {
723 wait = 0;
724 fibptr->callback = callback;
725 fibptr->callback_data = callback_data;
726 } else
727 wait = 1;
728
729
730 hbacmd->iu_type = command;
731
732 if (command == HBA_IU_TYPE_SCSI_CMD_REQ) {
733 /* bit1 of request_id must be 0 */
734 hbacmd->request_id =
735 cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
736 fibptr->flags |= FIB_CONTEXT_FLAG_SCSI_CMD;
737 } else
738 return -EINVAL;
739
740
741 if (wait) {
742 spin_lock_irqsave(&dev->manage_lock, mflags);
743 if (dev->management_fib_count >= AAC_NUM_MGT_FIB) {
744 spin_unlock_irqrestore(lock: &dev->manage_lock, flags: mflags);
745 return -EBUSY;
746 }
747 dev->management_fib_count++;
748 spin_unlock_irqrestore(lock: &dev->manage_lock, flags: mflags);
749 spin_lock_irqsave(&fibptr->event_lock, flags);
750 }
751
752 if (aac_adapter_deliver(fibptr) != 0) {
753 if (wait) {
754 spin_unlock_irqrestore(lock: &fibptr->event_lock, flags);
755 spin_lock_irqsave(&dev->manage_lock, mflags);
756 dev->management_fib_count--;
757 spin_unlock_irqrestore(lock: &dev->manage_lock, flags: mflags);
758 }
759 return -EBUSY;
760 }
761 FIB_COUNTER_INCREMENT(aac_config.NativeSent);
762
763 if (wait) {
764
765 spin_unlock_irqrestore(lock: &fibptr->event_lock, flags);
766
767 if (unlikely(aac_pci_offline(dev)))
768 return -EFAULT;
769
770 fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
771 if (wait_for_completion_interruptible(x: &fibptr->event_wait))
772 fibptr->done = 2;
773 fibptr->flags &= ~(FIB_CONTEXT_FLAG_WAIT);
774
775 spin_lock_irqsave(&fibptr->event_lock, flags);
776 if ((fibptr->done == 0) || (fibptr->done == 2)) {
777 fibptr->done = 2; /* Tell interrupt we aborted */
778 spin_unlock_irqrestore(lock: &fibptr->event_lock, flags);
779 return -ERESTARTSYS;
780 }
781 spin_unlock_irqrestore(lock: &fibptr->event_lock, flags);
782 WARN_ON(fibptr->done == 0);
783
784 if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
785 return -ETIMEDOUT;
786
787 return 0;
788 }
789
790 return -EINPROGRESS;
791}
792
793/**
794 * aac_consumer_get - get the top of the queue
795 * @dev: Adapter
796 * @q: Queue
797 * @entry: Return entry
798 *
799 * Will return a pointer to the entry on the top of the queue requested that
800 * we are a consumer of, and return the address of the queue entry. It does
801 * not change the state of the queue.
802 */
803
804int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
805{
806 u32 index;
807 int status;
808 if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
809 status = 0;
810 } else {
811 /*
812 * The consumer index must be wrapped if we have reached
813 * the end of the queue, else we just use the entry
814 * pointed to by the header index
815 */
816 if (le32_to_cpu(*q->headers.consumer) >= q->entries)
817 index = 0;
818 else
819 index = le32_to_cpu(*q->headers.consumer);
820 *entry = q->base + index;
821 status = 1;
822 }
823 return(status);
824}
825
826/**
827 * aac_consumer_free - free consumer entry
828 * @dev: Adapter
829 * @q: Queue
830 * @qid: Queue ident
831 *
832 * Frees up the current top of the queue we are a consumer of. If the
833 * queue was full notify the producer that the queue is no longer full.
834 */
835
836void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
837{
838 int wasfull = 0;
839 u32 notify;
840
841 if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
842 wasfull = 1;
843
844 if (le32_to_cpu(*q->headers.consumer) >= q->entries)
845 *q->headers.consumer = cpu_to_le32(1);
846 else
847 le32_add_cpu(var: q->headers.consumer, val: 1);
848
849 if (wasfull) {
850 switch (qid) {
851
852 case HostNormCmdQueue:
853 notify = HostNormCmdNotFull;
854 break;
855 case HostNormRespQueue:
856 notify = HostNormRespNotFull;
857 break;
858 default:
859 BUG();
860 return;
861 }
862 aac_adapter_notify(dev, notify);
863 }
864}
865
866/**
867 * aac_fib_adapter_complete - complete adapter issued fib
868 * @fibptr: fib to complete
869 * @size: size of fib
870 *
871 * Will do all necessary work to complete a FIB that was sent from
872 * the adapter.
873 */
874
875int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
876{
877 struct hw_fib * hw_fib = fibptr->hw_fib_va;
878 struct aac_dev * dev = fibptr->dev;
879 struct aac_queue * q;
880 unsigned long nointr = 0;
881 unsigned long qflags;
882
883 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
884 dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
885 dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
886 kfree(objp: hw_fib);
887 return 0;
888 }
889
890 if (hw_fib->header.XferState == 0) {
891 if (dev->comm_interface == AAC_COMM_MESSAGE)
892 kfree(objp: hw_fib);
893 return 0;
894 }
895 /*
896 * If we plan to do anything check the structure type first.
897 */
898 if (hw_fib->header.StructType != FIB_MAGIC &&
899 hw_fib->header.StructType != FIB_MAGIC2 &&
900 hw_fib->header.StructType != FIB_MAGIC2_64) {
901 if (dev->comm_interface == AAC_COMM_MESSAGE)
902 kfree(objp: hw_fib);
903 return -EINVAL;
904 }
905 /*
906 * This block handles the case where the adapter had sent us a
907 * command and we have finished processing the command. We
908 * call completeFib when we are done processing the command
909 * and want to send a response back to the adapter. This will
910 * send the completed cdb to the adapter.
911 */
912 if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
913 if (dev->comm_interface == AAC_COMM_MESSAGE) {
914 kfree (objp: hw_fib);
915 } else {
916 u32 index;
917 hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
918 if (size) {
919 size += sizeof(struct aac_fibhdr);
920 if (size > le16_to_cpu(hw_fib->header.SenderSize))
921 return -EMSGSIZE;
922 hw_fib->header.Size = cpu_to_le16(size);
923 }
924 q = &dev->queues->queue[AdapNormRespQueue];
925 spin_lock_irqsave(q->lock, qflags);
926 aac_queue_get(dev, index: &index, qid: AdapNormRespQueue, hw_fib, wait: 1, NULL, nonotify: &nointr);
927 *(q->headers.producer) = cpu_to_le32(index + 1);
928 spin_unlock_irqrestore(lock: q->lock, flags: qflags);
929 if (!(nointr & (int)aac_config.irq_mod))
930 aac_adapter_notify(dev, AdapNormRespQueue);
931 }
932 } else {
933 printk(KERN_WARNING "aac_fib_adapter_complete: "
934 "Unknown xferstate detected.\n");
935 BUG();
936 }
937 return 0;
938}
939
940/**
941 * aac_fib_complete - fib completion handler
942 * @fibptr: FIB to complete
943 *
944 * Will do all necessary work to complete a FIB.
945 */
946
947int aac_fib_complete(struct fib *fibptr)
948{
949 struct hw_fib * hw_fib = fibptr->hw_fib_va;
950
951 if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) {
952 fib_dealloc(fibptr);
953 return 0;
954 }
955
956 /*
957 * Check for a fib which has already been completed or with a
958 * status wait timeout
959 */
960
961 if (hw_fib->header.XferState == 0 || fibptr->done == 2)
962 return 0;
963 /*
964 * If we plan to do anything check the structure type first.
965 */
966
967 if (hw_fib->header.StructType != FIB_MAGIC &&
968 hw_fib->header.StructType != FIB_MAGIC2 &&
969 hw_fib->header.StructType != FIB_MAGIC2_64)
970 return -EINVAL;
971 /*
972 * This block completes a cdb which orginated on the host and we
973 * just need to deallocate the cdb or reinit it. At this point the
974 * command is complete that we had sent to the adapter and this
975 * cdb could be reused.
976 */
977
978 if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
979 (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
980 {
981 fib_dealloc(fibptr);
982 }
983 else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
984 {
985 /*
986 * This handles the case when the host has aborted the I/O
987 * to the adapter because the adapter is not responding
988 */
989 fib_dealloc(fibptr);
990 } else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
991 fib_dealloc(fibptr);
992 } else {
993 BUG();
994 }
995 return 0;
996}
997
998/**
999 * aac_printf - handle printf from firmware
1000 * @dev: Adapter
1001 * @val: Message info
1002 *
1003 * Print a message passed to us by the controller firmware on the
1004 * Adaptec board
1005 */
1006
1007void aac_printf(struct aac_dev *dev, u32 val)
1008{
1009 char *cp = dev->printfbuf;
1010 if (dev->printf_enabled)
1011 {
1012 int length = val & 0xffff;
1013 int level = (val >> 16) & 0xffff;
1014
1015 /*
1016 * The size of the printfbuf is set in port.c
1017 * There is no variable or define for it
1018 */
1019 if (length > 255)
1020 length = 255;
1021 if (cp[length] != 0)
1022 cp[length] = 0;
1023 if (level == LOG_AAC_HIGH_ERROR)
1024 printk(KERN_WARNING "%s:%s", dev->name, cp);
1025 else
1026 printk(KERN_INFO "%s:%s", dev->name, cp);
1027 }
1028 memset(cp, 0, 256);
1029}
1030
1031static inline int aac_aif_data(struct aac_aifcmd *aifcmd, uint32_t index)
1032{
1033 return le32_to_cpu(((__le32 *)aifcmd->data)[index]);
1034}
1035
1036
1037static void aac_handle_aif_bu(struct aac_dev *dev, struct aac_aifcmd *aifcmd)
1038{
1039 switch (aac_aif_data(aifcmd, index: 1)) {
1040 case AifBuCacheDataLoss:
1041 if (aac_aif_data(aifcmd, index: 2))
1042 dev_info(&dev->pdev->dev, "Backup unit had cache data loss - [%d]\n",
1043 aac_aif_data(aifcmd, 2));
1044 else
1045 dev_info(&dev->pdev->dev, "Backup Unit had cache data loss\n");
1046 break;
1047 case AifBuCacheDataRecover:
1048 if (aac_aif_data(aifcmd, index: 2))
1049 dev_info(&dev->pdev->dev, "DDR cache data recovered successfully - [%d]\n",
1050 aac_aif_data(aifcmd, 2));
1051 else
1052 dev_info(&dev->pdev->dev, "DDR cache data recovered successfully\n");
1053 break;
1054 }
1055}
1056
1057#define AIF_SNIFF_TIMEOUT (500*HZ)
1058/**
1059 * aac_handle_aif - Handle a message from the firmware
1060 * @dev: Which adapter this fib is from
1061 * @fibptr: Pointer to fibptr from adapter
1062 *
1063 * This routine handles a driver notify fib from the adapter and
1064 * dispatches it to the appropriate routine for handling.
1065 */
1066static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
1067{
1068 struct hw_fib * hw_fib = fibptr->hw_fib_va;
1069 struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
1070 u32 channel, id, lun, container;
1071 struct scsi_device *device;
1072 enum {
1073 NOTHING,
1074 DELETE,
1075 ADD,
1076 CHANGE
1077 } device_config_needed = NOTHING;
1078
1079 /* Sniff for container changes */
1080
1081 if (!dev || !dev->fsa_dev)
1082 return;
1083 container = channel = id = lun = (u32)-1;
1084
1085 /*
1086 * We have set this up to try and minimize the number of
1087 * re-configures that take place. As a result of this when
1088 * certain AIF's come in we will set a flag waiting for another
1089 * type of AIF before setting the re-config flag.
1090 */
1091 switch (le32_to_cpu(aifcmd->command)) {
1092 case AifCmdDriverNotify:
1093 switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
1094 case AifRawDeviceRemove:
1095 container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1096 if ((container >> 28)) {
1097 container = (u32)-1;
1098 break;
1099 }
1100 channel = (container >> 24) & 0xF;
1101 if (channel >= dev->maximum_num_channels) {
1102 container = (u32)-1;
1103 break;
1104 }
1105 id = container & 0xFFFF;
1106 if (id >= dev->maximum_num_physicals) {
1107 container = (u32)-1;
1108 break;
1109 }
1110 lun = (container >> 16) & 0xFF;
1111 container = (u32)-1;
1112 channel = aac_phys_to_logical(channel);
1113 device_config_needed = DELETE;
1114 break;
1115
1116 /*
1117 * Morph or Expand complete
1118 */
1119 case AifDenMorphComplete:
1120 case AifDenVolumeExtendComplete:
1121 container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1122 if (container >= dev->maximum_num_containers)
1123 break;
1124
1125 /*
1126 * Find the scsi_device associated with the SCSI
1127 * address. Make sure we have the right array, and if
1128 * so set the flag to initiate a new re-config once we
1129 * see an AifEnConfigChange AIF come through.
1130 */
1131
1132 if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
1133 device = scsi_device_lookup(dev->scsi_host_ptr,
1134 CONTAINER_TO_CHANNEL(container),
1135 CONTAINER_TO_ID(container),
1136 CONTAINER_TO_LUN(container));
1137 if (device) {
1138 dev->fsa_dev[container].config_needed = CHANGE;
1139 dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
1140 dev->fsa_dev[container].config_waiting_stamp = jiffies;
1141 scsi_device_put(device);
1142 }
1143 }
1144 }
1145
1146 /*
1147 * If we are waiting on something and this happens to be
1148 * that thing then set the re-configure flag.
1149 */
1150 if (container != (u32)-1) {
1151 if (container >= dev->maximum_num_containers)
1152 break;
1153 if ((dev->fsa_dev[container].config_waiting_on ==
1154 le32_to_cpu(*(__le32 *)aifcmd->data)) &&
1155 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1156 dev->fsa_dev[container].config_waiting_on = 0;
1157 } else for (container = 0;
1158 container < dev->maximum_num_containers; ++container) {
1159 if ((dev->fsa_dev[container].config_waiting_on ==
1160 le32_to_cpu(*(__le32 *)aifcmd->data)) &&
1161 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1162 dev->fsa_dev[container].config_waiting_on = 0;
1163 }
1164 break;
1165
1166 case AifCmdEventNotify:
1167 switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
1168 case AifEnBatteryEvent:
1169 dev->cache_protected =
1170 (((__le32 *)aifcmd->data)[1] == cpu_to_le32(3));
1171 break;
1172 /*
1173 * Add an Array.
1174 */
1175 case AifEnAddContainer:
1176 container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1177 if (container >= dev->maximum_num_containers)
1178 break;
1179 dev->fsa_dev[container].config_needed = ADD;
1180 dev->fsa_dev[container].config_waiting_on =
1181 AifEnConfigChange;
1182 dev->fsa_dev[container].config_waiting_stamp = jiffies;
1183 break;
1184
1185 /*
1186 * Delete an Array.
1187 */
1188 case AifEnDeleteContainer:
1189 container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1190 if (container >= dev->maximum_num_containers)
1191 break;
1192 dev->fsa_dev[container].config_needed = DELETE;
1193 dev->fsa_dev[container].config_waiting_on =
1194 AifEnConfigChange;
1195 dev->fsa_dev[container].config_waiting_stamp = jiffies;
1196 break;
1197
1198 /*
1199 * Container change detected. If we currently are not
1200 * waiting on something else, setup to wait on a Config Change.
1201 */
1202 case AifEnContainerChange:
1203 container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1204 if (container >= dev->maximum_num_containers)
1205 break;
1206 if (dev->fsa_dev[container].config_waiting_on &&
1207 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1208 break;
1209 dev->fsa_dev[container].config_needed = CHANGE;
1210 dev->fsa_dev[container].config_waiting_on =
1211 AifEnConfigChange;
1212 dev->fsa_dev[container].config_waiting_stamp = jiffies;
1213 break;
1214
1215 case AifEnConfigChange:
1216 break;
1217
1218 case AifEnAddJBOD:
1219 case AifEnDeleteJBOD:
1220 container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1221 if ((container >> 28)) {
1222 container = (u32)-1;
1223 break;
1224 }
1225 channel = (container >> 24) & 0xF;
1226 if (channel >= dev->maximum_num_channels) {
1227 container = (u32)-1;
1228 break;
1229 }
1230 id = container & 0xFFFF;
1231 if (id >= dev->maximum_num_physicals) {
1232 container = (u32)-1;
1233 break;
1234 }
1235 lun = (container >> 16) & 0xFF;
1236 container = (u32)-1;
1237 channel = aac_phys_to_logical(channel);
1238 device_config_needed =
1239 (((__le32 *)aifcmd->data)[0] ==
1240 cpu_to_le32(AifEnAddJBOD)) ? ADD : DELETE;
1241 if (device_config_needed == ADD) {
1242 device = scsi_device_lookup(dev->scsi_host_ptr,
1243 channel,
1244 id,
1245 lun);
1246 if (device) {
1247 scsi_remove_device(device);
1248 scsi_device_put(device);
1249 }
1250 }
1251 break;
1252
1253 case AifEnEnclosureManagement:
1254 /*
1255 * If in JBOD mode, automatic exposure of new
1256 * physical target to be suppressed until configured.
1257 */
1258 if (dev->jbod)
1259 break;
1260 switch (le32_to_cpu(((__le32 *)aifcmd->data)[3])) {
1261 case EM_DRIVE_INSERTION:
1262 case EM_DRIVE_REMOVAL:
1263 case EM_SES_DRIVE_INSERTION:
1264 case EM_SES_DRIVE_REMOVAL:
1265 container = le32_to_cpu(
1266 ((__le32 *)aifcmd->data)[2]);
1267 if ((container >> 28)) {
1268 container = (u32)-1;
1269 break;
1270 }
1271 channel = (container >> 24) & 0xF;
1272 if (channel >= dev->maximum_num_channels) {
1273 container = (u32)-1;
1274 break;
1275 }
1276 id = container & 0xFFFF;
1277 lun = (container >> 16) & 0xFF;
1278 container = (u32)-1;
1279 if (id >= dev->maximum_num_physicals) {
1280 /* legacy dev_t ? */
1281 if ((0x2000 <= id) || lun || channel ||
1282 ((channel = (id >> 7) & 0x3F) >=
1283 dev->maximum_num_channels))
1284 break;
1285 lun = (id >> 4) & 7;
1286 id &= 0xF;
1287 }
1288 channel = aac_phys_to_logical(channel);
1289 device_config_needed =
1290 ((((__le32 *)aifcmd->data)[3]
1291 == cpu_to_le32(EM_DRIVE_INSERTION)) ||
1292 (((__le32 *)aifcmd->data)[3]
1293 == cpu_to_le32(EM_SES_DRIVE_INSERTION))) ?
1294 ADD : DELETE;
1295 break;
1296 }
1297 break;
1298 case AifBuManagerEvent:
1299 aac_handle_aif_bu(dev, aifcmd);
1300 break;
1301 }
1302
1303 /*
1304 * If we are waiting on something and this happens to be
1305 * that thing then set the re-configure flag.
1306 */
1307 if (container != (u32)-1) {
1308 if (container >= dev->maximum_num_containers)
1309 break;
1310 if ((dev->fsa_dev[container].config_waiting_on ==
1311 le32_to_cpu(*(__le32 *)aifcmd->data)) &&
1312 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1313 dev->fsa_dev[container].config_waiting_on = 0;
1314 } else for (container = 0;
1315 container < dev->maximum_num_containers; ++container) {
1316 if ((dev->fsa_dev[container].config_waiting_on ==
1317 le32_to_cpu(*(__le32 *)aifcmd->data)) &&
1318 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1319 dev->fsa_dev[container].config_waiting_on = 0;
1320 }
1321 break;
1322
1323 case AifCmdJobProgress:
1324 /*
1325 * These are job progress AIF's. When a Clear is being
1326 * done on a container it is initially created then hidden from
1327 * the OS. When the clear completes we don't get a config
1328 * change so we monitor the job status complete on a clear then
1329 * wait for a container change.
1330 */
1331
1332 if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
1333 (((__le32 *)aifcmd->data)[6] == ((__le32 *)aifcmd->data)[5] ||
1334 ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess))) {
1335 for (container = 0;
1336 container < dev->maximum_num_containers;
1337 ++container) {
1338 /*
1339 * Stomp on all config sequencing for all
1340 * containers?
1341 */
1342 dev->fsa_dev[container].config_waiting_on =
1343 AifEnContainerChange;
1344 dev->fsa_dev[container].config_needed = ADD;
1345 dev->fsa_dev[container].config_waiting_stamp =
1346 jiffies;
1347 }
1348 }
1349 if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
1350 ((__le32 *)aifcmd->data)[6] == 0 &&
1351 ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning)) {
1352 for (container = 0;
1353 container < dev->maximum_num_containers;
1354 ++container) {
1355 /*
1356 * Stomp on all config sequencing for all
1357 * containers?
1358 */
1359 dev->fsa_dev[container].config_waiting_on =
1360 AifEnContainerChange;
1361 dev->fsa_dev[container].config_needed = DELETE;
1362 dev->fsa_dev[container].config_waiting_stamp =
1363 jiffies;
1364 }
1365 }
1366 break;
1367 }
1368
1369 container = 0;
1370retry_next:
1371 if (device_config_needed == NOTHING) {
1372 for (; container < dev->maximum_num_containers; ++container) {
1373 if ((dev->fsa_dev[container].config_waiting_on == 0) &&
1374 (dev->fsa_dev[container].config_needed != NOTHING) &&
1375 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
1376 device_config_needed =
1377 dev->fsa_dev[container].config_needed;
1378 dev->fsa_dev[container].config_needed = NOTHING;
1379 channel = CONTAINER_TO_CHANNEL(container);
1380 id = CONTAINER_TO_ID(container);
1381 lun = CONTAINER_TO_LUN(container);
1382 break;
1383 }
1384 }
1385 }
1386 if (device_config_needed == NOTHING)
1387 return;
1388
1389 /*
1390 * If we decided that a re-configuration needs to be done,
1391 * schedule it here on the way out the door, please close the door
1392 * behind you.
1393 */
1394
1395 /*
1396 * Find the scsi_device associated with the SCSI address,
1397 * and mark it as changed, invalidating the cache. This deals
1398 * with changes to existing device IDs.
1399 */
1400
1401 if (!dev || !dev->scsi_host_ptr)
1402 return;
1403 /*
1404 * force reload of disk info via aac_probe_container
1405 */
1406 if ((channel == CONTAINER_CHANNEL) &&
1407 (device_config_needed != NOTHING)) {
1408 if (dev->fsa_dev[container].valid == 1)
1409 dev->fsa_dev[container].valid = 2;
1410 aac_probe_container(dev, cid: container);
1411 }
1412 device = scsi_device_lookup(dev->scsi_host_ptr, channel, id, lun);
1413 if (device) {
1414 switch (device_config_needed) {
1415 case DELETE:
1416#if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
1417 scsi_remove_device(device);
1418#else
1419 if (scsi_device_online(device)) {
1420 scsi_device_set_state(device, SDEV_OFFLINE);
1421 sdev_printk(KERN_INFO, device,
1422 "Device offlined - %s\n",
1423 (channel == CONTAINER_CHANNEL) ?
1424 "array deleted" :
1425 "enclosure services event");
1426 }
1427#endif
1428 break;
1429 case ADD:
1430 if (!scsi_device_online(sdev: device)) {
1431 sdev_printk(KERN_INFO, device,
1432 "Device online - %s\n",
1433 (channel == CONTAINER_CHANNEL) ?
1434 "array created" :
1435 "enclosure services event");
1436 scsi_device_set_state(sdev: device, state: SDEV_RUNNING);
1437 }
1438 fallthrough;
1439 case CHANGE:
1440 if ((channel == CONTAINER_CHANNEL)
1441 && (!dev->fsa_dev[container].valid)) {
1442#if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
1443 scsi_remove_device(device);
1444#else
1445 if (!scsi_device_online(device))
1446 break;
1447 scsi_device_set_state(device, SDEV_OFFLINE);
1448 sdev_printk(KERN_INFO, device,
1449 "Device offlined - %s\n",
1450 "array failed");
1451#endif
1452 break;
1453 }
1454 scsi_rescan_device(sdev: device);
1455 break;
1456
1457 default:
1458 break;
1459 }
1460 scsi_device_put(device);
1461 device_config_needed = NOTHING;
1462 }
1463 if (device_config_needed == ADD)
1464 scsi_add_device(host: dev->scsi_host_ptr, channel, target: id, lun);
1465 if (channel == CONTAINER_CHANNEL) {
1466 container++;
1467 device_config_needed = NOTHING;
1468 goto retry_next;
1469 }
1470}
1471
1472static void aac_schedule_bus_scan(struct aac_dev *aac)
1473{
1474 if (aac->sa_firmware)
1475 aac_schedule_safw_scan_worker(dev: aac);
1476 else
1477 aac_schedule_src_reinit_aif_worker(dev: aac);
1478}
1479
1480static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
1481{
1482 int index, quirks;
1483 int retval;
1484 struct Scsi_Host *host = aac->scsi_host_ptr;
1485 int jafo = 0;
1486 int bled;
1487 u64 dmamask;
1488 int num_of_fibs = 0;
1489
1490 /*
1491 * Assumptions:
1492 * - host is locked, unless called by the aacraid thread.
1493 * (a matter of convenience, due to legacy issues surrounding
1494 * eh_host_adapter_reset).
1495 * - in_reset is asserted, so no new i/o is getting to the
1496 * card.
1497 * - The card is dead, or will be very shortly ;-/ so no new
1498 * commands are completing in the interrupt service.
1499 */
1500 aac_adapter_disable_int(aac);
1501 if (aac->thread && aac->thread->pid != current->pid) {
1502 spin_unlock_irq(lock: host->host_lock);
1503 kthread_stop(k: aac->thread);
1504 aac->thread = NULL;
1505 jafo = 1;
1506 }
1507
1508 /*
1509 * If a positive health, means in a known DEAD PANIC
1510 * state and the adapter could be reset to `try again'.
1511 */
1512 bled = forced ? 0 : aac_adapter_check_health(dev: aac);
1513 retval = aac_adapter_restart(aac, bled, reset_type);
1514
1515 if (retval)
1516 goto out;
1517
1518 /*
1519 * Loop through the fibs, close the synchronous FIBS
1520 */
1521 retval = 1;
1522 num_of_fibs = aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB;
1523 for (index = 0; index < num_of_fibs; index++) {
1524
1525 struct fib *fib = &aac->fibs[index];
1526 __le32 XferState = fib->hw_fib_va->header.XferState;
1527 bool is_response_expected = false;
1528
1529 if (!(XferState & cpu_to_le32(NoResponseExpected | Async)) &&
1530 (XferState & cpu_to_le32(ResponseExpected)))
1531 is_response_expected = true;
1532
1533 if (is_response_expected
1534 || fib->flags & FIB_CONTEXT_FLAG_WAIT) {
1535 unsigned long flagv;
1536 spin_lock_irqsave(&fib->event_lock, flagv);
1537 complete(&fib->event_wait);
1538 spin_unlock_irqrestore(lock: &fib->event_lock, flags: flagv);
1539 schedule();
1540 retval = 0;
1541 }
1542 }
1543 /* Give some extra time for ioctls to complete. */
1544 if (retval == 0)
1545 ssleep(seconds: 2);
1546 index = aac->cardtype;
1547
1548 /*
1549 * Re-initialize the adapter, first free resources, then carefully
1550 * apply the initialization sequence to come back again. Only risk
1551 * is a change in Firmware dropping cache, it is assumed the caller
1552 * will ensure that i/o is queisced and the card is flushed in that
1553 * case.
1554 */
1555 aac_free_irq(dev: aac);
1556 aac_fib_map_free(dev: aac);
1557 dma_free_coherent(dev: &aac->pdev->dev, size: aac->comm_size, cpu_addr: aac->comm_addr,
1558 dma_handle: aac->comm_phys);
1559 aac_adapter_ioremap(aac, 0);
1560 aac->comm_addr = NULL;
1561 aac->comm_phys = 0;
1562 kfree(objp: aac->queues);
1563 aac->queues = NULL;
1564 kfree(objp: aac->fsa_dev);
1565 aac->fsa_dev = NULL;
1566
1567 dmamask = DMA_BIT_MASK(32);
1568 quirks = aac_get_driver_ident(devtype: index)->quirks;
1569 if (quirks & AAC_QUIRK_31BIT)
1570 retval = dma_set_mask(dev: &aac->pdev->dev, mask: dmamask);
1571 else if (!(quirks & AAC_QUIRK_SRC))
1572 retval = dma_set_mask(dev: &aac->pdev->dev, mask: dmamask);
1573 else
1574 retval = dma_set_coherent_mask(dev: &aac->pdev->dev, mask: dmamask);
1575
1576 if (quirks & AAC_QUIRK_31BIT && !retval) {
1577 dmamask = DMA_BIT_MASK(31);
1578 retval = dma_set_coherent_mask(dev: &aac->pdev->dev, mask: dmamask);
1579 }
1580
1581 if (retval)
1582 goto out;
1583
1584 if ((retval = (*(aac_get_driver_ident(devtype: index)->init))(aac)))
1585 goto out;
1586
1587 if (jafo) {
1588 aac->thread = kthread_run(aac_command_thread, aac, "%s",
1589 aac->name);
1590 if (IS_ERR(ptr: aac->thread)) {
1591 retval = PTR_ERR(ptr: aac->thread);
1592 aac->thread = NULL;
1593 goto out;
1594 }
1595 }
1596 (void)aac_get_adapter_info(dev: aac);
1597 if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) {
1598 host->sg_tablesize = 34;
1599 host->max_sectors = (host->sg_tablesize * 8) + 112;
1600 }
1601 if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) {
1602 host->sg_tablesize = 17;
1603 host->max_sectors = (host->sg_tablesize * 8) + 112;
1604 }
1605 aac_get_config_status(dev: aac, commit_flag: 1);
1606 aac_get_containers(dev: aac);
1607 /*
1608 * This is where the assumption that the Adapter is quiesced
1609 * is important.
1610 */
1611 scsi_host_complete_all_commands(shost: host, status: DID_RESET);
1612
1613 retval = 0;
1614out:
1615 aac->in_reset = 0;
1616
1617 /*
1618 * Issue bus rescan to catch any configuration that might have
1619 * occurred
1620 */
1621 if (!retval && !is_kdump_kernel()) {
1622 dev_info(&aac->pdev->dev, "Scheduling bus rescan\n");
1623 aac_schedule_bus_scan(aac);
1624 }
1625
1626 if (jafo) {
1627 spin_lock_irq(lock: host->host_lock);
1628 }
1629 return retval;
1630}
1631
1632int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
1633{
1634 unsigned long flagv = 0;
1635 int retval, unblock_retval;
1636 struct Scsi_Host *host = aac->scsi_host_ptr;
1637 int bled;
1638
1639 if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
1640 return -EBUSY;
1641
1642 if (aac->in_reset) {
1643 spin_unlock_irqrestore(lock: &aac->fib_lock, flags: flagv);
1644 return -EBUSY;
1645 }
1646 aac->in_reset = 1;
1647 spin_unlock_irqrestore(lock: &aac->fib_lock, flags: flagv);
1648
1649 /*
1650 * Wait for all commands to complete to this specific
1651 * target (block maximum 60 seconds). Although not necessary,
1652 * it does make us a good storage citizen.
1653 */
1654 scsi_host_block(shost: host);
1655
1656 /* Quiesce build, flush cache, write through mode */
1657 if (forced < 2)
1658 aac_send_shutdown(dev: aac);
1659 spin_lock_irqsave(host->host_lock, flagv);
1660 bled = forced ? forced :
1661 (aac_check_reset != 0 && aac_check_reset != 1);
1662 retval = _aac_reset_adapter(aac, forced: bled, reset_type);
1663 spin_unlock_irqrestore(lock: host->host_lock, flags: flagv);
1664
1665 unblock_retval = scsi_host_unblock(shost: host, new_state: SDEV_RUNNING);
1666 if (!retval)
1667 retval = unblock_retval;
1668 if ((forced < 2) && (retval == -ENODEV)) {
1669 /* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
1670 struct fib * fibctx = aac_fib_alloc(dev: aac);
1671 if (fibctx) {
1672 struct aac_pause *cmd;
1673 int status;
1674
1675 aac_fib_init(fibptr: fibctx);
1676
1677 cmd = (struct aac_pause *) fib_data(fibctx);
1678
1679 cmd->command = cpu_to_le32(VM_ContainerConfig);
1680 cmd->type = cpu_to_le32(CT_PAUSE_IO);
1681 cmd->timeout = cpu_to_le32(1);
1682 cmd->min = cpu_to_le32(1);
1683 cmd->noRescan = cpu_to_le32(1);
1684 cmd->count = cpu_to_le32(0);
1685
1686 status = aac_fib_send(ContainerCommand,
1687 fibptr: fibctx,
1688 size: sizeof(struct aac_pause),
1689 FsaNormal,
1690 wait: -2 /* Timeout silently */, reply: 1,
1691 NULL, NULL);
1692
1693 if (status >= 0)
1694 aac_fib_complete(fibptr: fibctx);
1695 /* FIB should be freed only after getting
1696 * the response from the F/W */
1697 if (status != -ERESTARTSYS)
1698 aac_fib_free(fibptr: fibctx);
1699 }
1700 }
1701
1702 return retval;
1703}
1704
1705int aac_check_health(struct aac_dev * aac)
1706{
1707 int BlinkLED;
1708 unsigned long time_now, flagv = 0;
1709 struct list_head * entry;
1710
1711 /* Extending the scope of fib_lock slightly to protect aac->in_reset */
1712 if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
1713 return 0;
1714
1715 if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(dev: aac))) {
1716 spin_unlock_irqrestore(lock: &aac->fib_lock, flags: flagv);
1717 return 0; /* OK */
1718 }
1719
1720 aac->in_reset = 1;
1721
1722 /* Fake up an AIF:
1723 * aac_aifcmd.command = AifCmdEventNotify = 1
1724 * aac_aifcmd.seqnum = 0xFFFFFFFF
1725 * aac_aifcmd.data[0] = AifEnExpEvent = 23
1726 * aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
1727 * aac.aifcmd.data[2] = AifHighPriority = 3
1728 * aac.aifcmd.data[3] = BlinkLED
1729 */
1730
1731 time_now = jiffies/HZ;
1732 entry = aac->fib_list.next;
1733
1734 /*
1735 * For each Context that is on the
1736 * fibctxList, make a copy of the
1737 * fib, and then set the event to wake up the
1738 * thread that is waiting for it.
1739 */
1740 while (entry != &aac->fib_list) {
1741 /*
1742 * Extract the fibctx
1743 */
1744 struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next);
1745 struct hw_fib * hw_fib;
1746 struct fib * fib;
1747 /*
1748 * Check if the queue is getting
1749 * backlogged
1750 */
1751 if (fibctx->count > 20) {
1752 /*
1753 * It's *not* jiffies folks,
1754 * but jiffies / HZ, so do not
1755 * panic ...
1756 */
1757 u32 time_last = fibctx->jiffies;
1758 /*
1759 * Has it been > 2 minutes
1760 * since the last read off
1761 * the queue?
1762 */
1763 if ((time_now - time_last) > aif_timeout) {
1764 entry = entry->next;
1765 aac_close_fib_context(dev: aac, fibctx);
1766 continue;
1767 }
1768 }
1769 /*
1770 * Warning: no sleep allowed while
1771 * holding spinlock
1772 */
1773 hw_fib = kzalloc(size: sizeof(struct hw_fib), GFP_ATOMIC);
1774 fib = kzalloc(size: sizeof(struct fib), GFP_ATOMIC);
1775 if (fib && hw_fib) {
1776 struct aac_aifcmd * aif;
1777
1778 fib->hw_fib_va = hw_fib;
1779 fib->dev = aac;
1780 aac_fib_init(fibptr: fib);
1781 fib->type = FSAFS_NTC_FIB_CONTEXT;
1782 fib->size = sizeof (struct fib);
1783 fib->data = hw_fib->data;
1784 aif = (struct aac_aifcmd *)hw_fib->data;
1785 aif->command = cpu_to_le32(AifCmdEventNotify);
1786 aif->seqnum = cpu_to_le32(0xFFFFFFFF);
1787 ((__le32 *)aif->data)[0] = cpu_to_le32(AifEnExpEvent);
1788 ((__le32 *)aif->data)[1] = cpu_to_le32(AifExeFirmwarePanic);
1789 ((__le32 *)aif->data)[2] = cpu_to_le32(AifHighPriority);
1790 ((__le32 *)aif->data)[3] = cpu_to_le32(BlinkLED);
1791
1792 /*
1793 * Put the FIB onto the
1794 * fibctx's fibs
1795 */
1796 list_add_tail(new: &fib->fiblink, head: &fibctx->fib_list);
1797 fibctx->count++;
1798 /*
1799 * Set the event to wake up the
1800 * thread that will waiting.
1801 */
1802 complete(&fibctx->completion);
1803 } else {
1804 printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
1805 kfree(objp: fib);
1806 kfree(objp: hw_fib);
1807 }
1808 entry = entry->next;
1809 }
1810
1811 spin_unlock_irqrestore(lock: &aac->fib_lock, flags: flagv);
1812
1813 if (BlinkLED < 0) {
1814 printk(KERN_ERR "%s: Host adapter is dead (or got a PCI error) %d\n",
1815 aac->name, BlinkLED);
1816 goto out;
1817 }
1818
1819 printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
1820
1821out:
1822 aac->in_reset = 0;
1823 return BlinkLED;
1824}
1825
1826static inline int is_safw_raid_volume(struct aac_dev *aac, int bus, int target)
1827{
1828 return bus == CONTAINER_CHANNEL && target < aac->maximum_num_containers;
1829}
1830
1831static struct scsi_device *aac_lookup_safw_scsi_device(struct aac_dev *dev,
1832 int bus,
1833 int target)
1834{
1835 if (bus != CONTAINER_CHANNEL)
1836 bus = aac_phys_to_logical(bus);
1837
1838 return scsi_device_lookup(dev->scsi_host_ptr, bus, target, 0);
1839}
1840
1841static int aac_add_safw_device(struct aac_dev *dev, int bus, int target)
1842{
1843 if (bus != CONTAINER_CHANNEL)
1844 bus = aac_phys_to_logical(bus);
1845
1846 return scsi_add_device(host: dev->scsi_host_ptr, channel: bus, target, lun: 0);
1847}
1848
1849static void aac_put_safw_scsi_device(struct scsi_device *sdev)
1850{
1851 if (sdev)
1852 scsi_device_put(sdev);
1853}
1854
1855static void aac_remove_safw_device(struct aac_dev *dev, int bus, int target)
1856{
1857 struct scsi_device *sdev;
1858
1859 sdev = aac_lookup_safw_scsi_device(dev, bus, target);
1860 scsi_remove_device(sdev);
1861 aac_put_safw_scsi_device(sdev);
1862}
1863
1864static inline int aac_is_safw_scan_count_equal(struct aac_dev *dev,
1865 int bus, int target)
1866{
1867 return dev->hba_map[bus][target].scan_counter == dev->scan_counter;
1868}
1869
1870static int aac_is_safw_target_valid(struct aac_dev *dev, int bus, int target)
1871{
1872 if (is_safw_raid_volume(aac: dev, bus, target))
1873 return dev->fsa_dev[target].valid;
1874 else
1875 return aac_is_safw_scan_count_equal(dev, bus, target);
1876}
1877
1878static int aac_is_safw_device_exposed(struct aac_dev *dev, int bus, int target)
1879{
1880 int is_exposed = 0;
1881 struct scsi_device *sdev;
1882
1883 sdev = aac_lookup_safw_scsi_device(dev, bus, target);
1884 if (sdev)
1885 is_exposed = 1;
1886 aac_put_safw_scsi_device(sdev);
1887
1888 return is_exposed;
1889}
1890
1891static int aac_update_safw_host_devices(struct aac_dev *dev)
1892{
1893 int i;
1894 int bus;
1895 int target;
1896 int is_exposed = 0;
1897 int rcode = 0;
1898
1899 rcode = aac_setup_safw_adapter(dev);
1900 if (unlikely(rcode < 0)) {
1901 goto out;
1902 }
1903
1904 for (i = 0; i < AAC_BUS_TARGET_LOOP; i++) {
1905
1906 bus = get_bus_number(i);
1907 target = get_target_number(i);
1908
1909 is_exposed = aac_is_safw_device_exposed(dev, bus, target);
1910
1911 if (aac_is_safw_target_valid(dev, bus, target) && !is_exposed)
1912 aac_add_safw_device(dev, bus, target);
1913 else if (!aac_is_safw_target_valid(dev, bus, target) &&
1914 is_exposed)
1915 aac_remove_safw_device(dev, bus, target);
1916 }
1917out:
1918 return rcode;
1919}
1920
1921static int aac_scan_safw_host(struct aac_dev *dev)
1922{
1923 int rcode = 0;
1924
1925 rcode = aac_update_safw_host_devices(dev);
1926 if (rcode)
1927 aac_schedule_safw_scan_worker(dev);
1928
1929 return rcode;
1930}
1931
1932int aac_scan_host(struct aac_dev *dev)
1933{
1934 int rcode = 0;
1935
1936 mutex_lock(&dev->scan_mutex);
1937 if (dev->sa_firmware)
1938 rcode = aac_scan_safw_host(dev);
1939 else
1940 scsi_scan_host(dev->scsi_host_ptr);
1941 mutex_unlock(lock: &dev->scan_mutex);
1942
1943 return rcode;
1944}
1945
1946void aac_src_reinit_aif_worker(struct work_struct *work)
1947{
1948 struct aac_dev *dev = container_of(to_delayed_work(work),
1949 struct aac_dev, src_reinit_aif_worker);
1950
1951 wait_event(dev->scsi_host_ptr->host_wait,
1952 !scsi_host_in_recovery(dev->scsi_host_ptr));
1953 aac_reinit_aif(aac: dev, index: dev->cardtype);
1954}
1955
1956/**
1957 * aac_handle_sa_aif - Handle a message from the firmware
1958 * @dev: Which adapter this fib is from
1959 * @fibptr: Pointer to fibptr from adapter
1960 *
1961 * This routine handles a driver notify fib from the adapter and
1962 * dispatches it to the appropriate routine for handling.
1963 */
1964static void aac_handle_sa_aif(struct aac_dev *dev, struct fib *fibptr)
1965{
1966 int i;
1967 u32 events = 0;
1968
1969 if (fibptr->hbacmd_size & SA_AIF_HOTPLUG)
1970 events = SA_AIF_HOTPLUG;
1971 else if (fibptr->hbacmd_size & SA_AIF_HARDWARE)
1972 events = SA_AIF_HARDWARE;
1973 else if (fibptr->hbacmd_size & SA_AIF_PDEV_CHANGE)
1974 events = SA_AIF_PDEV_CHANGE;
1975 else if (fibptr->hbacmd_size & SA_AIF_LDEV_CHANGE)
1976 events = SA_AIF_LDEV_CHANGE;
1977 else if (fibptr->hbacmd_size & SA_AIF_BPSTAT_CHANGE)
1978 events = SA_AIF_BPSTAT_CHANGE;
1979 else if (fibptr->hbacmd_size & SA_AIF_BPCFG_CHANGE)
1980 events = SA_AIF_BPCFG_CHANGE;
1981
1982 switch (events) {
1983 case SA_AIF_HOTPLUG:
1984 case SA_AIF_HARDWARE:
1985 case SA_AIF_PDEV_CHANGE:
1986 case SA_AIF_LDEV_CHANGE:
1987 case SA_AIF_BPCFG_CHANGE:
1988
1989 aac_scan_host(dev);
1990
1991 break;
1992
1993 case SA_AIF_BPSTAT_CHANGE:
1994 /* currently do nothing */
1995 break;
1996 }
1997
1998 for (i = 1; i <= 10; ++i) {
1999 events = src_readl(dev, MUnit.IDR);
2000 if (events & (1<<23)) {
2001 pr_warn(" AIF not cleared by firmware - %d/%d)\n",
2002 i, 10);
2003 ssleep(seconds: 1);
2004 }
2005 }
2006}
2007
2008static int get_fib_count(struct aac_dev *dev)
2009{
2010 unsigned int num = 0;
2011 struct list_head *entry;
2012 unsigned long flagv;
2013
2014 /*
2015 * Warning: no sleep allowed while
2016 * holding spinlock. We take the estimate
2017 * and pre-allocate a set of fibs outside the
2018 * lock.
2019 */
2020 num = le32_to_cpu(dev->init->r7.adapter_fibs_size)
2021 / sizeof(struct hw_fib); /* some extra */
2022 spin_lock_irqsave(&dev->fib_lock, flagv);
2023 entry = dev->fib_list.next;
2024 while (entry != &dev->fib_list) {
2025 entry = entry->next;
2026 ++num;
2027 }
2028 spin_unlock_irqrestore(lock: &dev->fib_lock, flags: flagv);
2029
2030 return num;
2031}
2032
2033static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool,
2034 struct fib **fib_pool,
2035 unsigned int num)
2036{
2037 struct hw_fib **hw_fib_p;
2038 struct fib **fib_p;
2039
2040 hw_fib_p = hw_fib_pool;
2041 fib_p = fib_pool;
2042 while (hw_fib_p < &hw_fib_pool[num]) {
2043 *(hw_fib_p) = kmalloc(size: sizeof(struct hw_fib), GFP_KERNEL);
2044 if (!(*(hw_fib_p++))) {
2045 --hw_fib_p;
2046 break;
2047 }
2048
2049 *(fib_p) = kmalloc(size: sizeof(struct fib), GFP_KERNEL);
2050 if (!(*(fib_p++))) {
2051 kfree(objp: *(--hw_fib_p));
2052 break;
2053 }
2054 }
2055
2056 /*
2057 * Get the actual number of allocated fibs
2058 */
2059 num = hw_fib_p - hw_fib_pool;
2060 return num;
2061}
2062
2063static void wakeup_fibctx_threads(struct aac_dev *dev,
2064 struct hw_fib **hw_fib_pool,
2065 struct fib **fib_pool,
2066 struct fib *fib,
2067 struct hw_fib *hw_fib,
2068 unsigned int num)
2069{
2070 unsigned long flagv;
2071 struct list_head *entry;
2072 struct hw_fib **hw_fib_p;
2073 struct fib **fib_p;
2074 u32 time_now, time_last;
2075 struct hw_fib *hw_newfib;
2076 struct fib *newfib;
2077 struct aac_fib_context *fibctx;
2078
2079 time_now = jiffies/HZ;
2080 spin_lock_irqsave(&dev->fib_lock, flagv);
2081 entry = dev->fib_list.next;
2082 /*
2083 * For each Context that is on the
2084 * fibctxList, make a copy of the
2085 * fib, and then set the event to wake up the
2086 * thread that is waiting for it.
2087 */
2088
2089 hw_fib_p = hw_fib_pool;
2090 fib_p = fib_pool;
2091 while (entry != &dev->fib_list) {
2092 /*
2093 * Extract the fibctx
2094 */
2095 fibctx = list_entry(entry, struct aac_fib_context,
2096 next);
2097 /*
2098 * Check if the queue is getting
2099 * backlogged
2100 */
2101 if (fibctx->count > 20) {
2102 /*
2103 * It's *not* jiffies folks,
2104 * but jiffies / HZ so do not
2105 * panic ...
2106 */
2107 time_last = fibctx->jiffies;
2108 /*
2109 * Has it been > 2 minutes
2110 * since the last read off
2111 * the queue?
2112 */
2113 if ((time_now - time_last) > aif_timeout) {
2114 entry = entry->next;
2115 aac_close_fib_context(dev, fibctx);
2116 continue;
2117 }
2118 }
2119 /*
2120 * Warning: no sleep allowed while
2121 * holding spinlock
2122 */
2123 if (hw_fib_p >= &hw_fib_pool[num]) {
2124 pr_warn("aifd: didn't allocate NewFib\n");
2125 entry = entry->next;
2126 continue;
2127 }
2128
2129 hw_newfib = *hw_fib_p;
2130 *(hw_fib_p++) = NULL;
2131 newfib = *fib_p;
2132 *(fib_p++) = NULL;
2133 /*
2134 * Make the copy of the FIB
2135 */
2136 memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
2137 memcpy(newfib, fib, sizeof(struct fib));
2138 newfib->hw_fib_va = hw_newfib;
2139 /*
2140 * Put the FIB onto the
2141 * fibctx's fibs
2142 */
2143 list_add_tail(new: &newfib->fiblink, head: &fibctx->fib_list);
2144 fibctx->count++;
2145 /*
2146 * Set the event to wake up the
2147 * thread that is waiting.
2148 */
2149 complete(&fibctx->completion);
2150
2151 entry = entry->next;
2152 }
2153 /*
2154 * Set the status of this FIB
2155 */
2156 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
2157 aac_fib_adapter_complete(fibptr: fib, size: sizeof(u32));
2158 spin_unlock_irqrestore(lock: &dev->fib_lock, flags: flagv);
2159
2160}
2161
2162static void aac_process_events(struct aac_dev *dev)
2163{
2164 struct hw_fib *hw_fib;
2165 struct fib *fib;
2166 unsigned long flags;
2167 spinlock_t *t_lock;
2168
2169 t_lock = dev->queues->queue[HostNormCmdQueue].lock;
2170 spin_lock_irqsave(t_lock, flags);
2171
2172 while (!list_empty(head: &(dev->queues->queue[HostNormCmdQueue].cmdq))) {
2173 struct list_head *entry;
2174 struct aac_aifcmd *aifcmd;
2175 unsigned int num;
2176 struct hw_fib **hw_fib_pool, **hw_fib_p;
2177 struct fib **fib_pool, **fib_p;
2178
2179 set_current_state(TASK_RUNNING);
2180
2181 entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
2182 list_del(entry);
2183
2184 t_lock = dev->queues->queue[HostNormCmdQueue].lock;
2185 spin_unlock_irqrestore(lock: t_lock, flags);
2186
2187 fib = list_entry(entry, struct fib, fiblink);
2188 hw_fib = fib->hw_fib_va;
2189 if (dev->sa_firmware) {
2190 /* Thor AIF */
2191 aac_handle_sa_aif(dev, fibptr: fib);
2192 aac_fib_adapter_complete(fibptr: fib, size: (u16)sizeof(u32));
2193 goto free_fib;
2194 }
2195 /*
2196 * We will process the FIB here or pass it to a
2197 * worker thread that is TBD. We Really can't
2198 * do anything at this point since we don't have
2199 * anything defined for this thread to do.
2200 */
2201 memset(fib, 0, sizeof(struct fib));
2202 fib->type = FSAFS_NTC_FIB_CONTEXT;
2203 fib->size = sizeof(struct fib);
2204 fib->hw_fib_va = hw_fib;
2205 fib->data = hw_fib->data;
2206 fib->dev = dev;
2207 /*
2208 * We only handle AifRequest fibs from the adapter.
2209 */
2210
2211 aifcmd = (struct aac_aifcmd *) hw_fib->data;
2212 if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
2213 /* Handle Driver Notify Events */
2214 aac_handle_aif(dev, fibptr: fib);
2215 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
2216 aac_fib_adapter_complete(fibptr: fib, size: (u16)sizeof(u32));
2217 goto free_fib;
2218 }
2219 /*
2220 * The u32 here is important and intended. We are using
2221 * 32bit wrapping time to fit the adapter field
2222 */
2223
2224 /* Sniff events */
2225 if (aifcmd->command == cpu_to_le32(AifCmdEventNotify)
2226 || aifcmd->command == cpu_to_le32(AifCmdJobProgress)) {
2227 aac_handle_aif(dev, fibptr: fib);
2228 }
2229
2230 /*
2231 * get number of fibs to process
2232 */
2233 num = get_fib_count(dev);
2234 if (!num)
2235 goto free_fib;
2236
2237 hw_fib_pool = kmalloc_array(n: num, size: sizeof(struct hw_fib *),
2238 GFP_KERNEL);
2239 if (!hw_fib_pool)
2240 goto free_fib;
2241
2242 fib_pool = kmalloc_array(n: num, size: sizeof(struct fib *), GFP_KERNEL);
2243 if (!fib_pool)
2244 goto free_hw_fib_pool;
2245
2246 /*
2247 * Fill up fib pointer pools with actual fibs
2248 * and hw_fibs
2249 */
2250 num = fillup_pools(dev, hw_fib_pool, fib_pool, num);
2251 if (!num)
2252 goto free_mem;
2253
2254 /*
2255 * wakeup the thread that is waiting for
2256 * the response from fw (ioctl)
2257 */
2258 wakeup_fibctx_threads(dev, hw_fib_pool, fib_pool,
2259 fib, hw_fib, num);
2260
2261free_mem:
2262 /* Free up the remaining resources */
2263 hw_fib_p = hw_fib_pool;
2264 fib_p = fib_pool;
2265 while (hw_fib_p < &hw_fib_pool[num]) {
2266 kfree(objp: *hw_fib_p);
2267 kfree(objp: *fib_p);
2268 ++fib_p;
2269 ++hw_fib_p;
2270 }
2271 kfree(objp: fib_pool);
2272free_hw_fib_pool:
2273 kfree(objp: hw_fib_pool);
2274free_fib:
2275 kfree(objp: fib);
2276 t_lock = dev->queues->queue[HostNormCmdQueue].lock;
2277 spin_lock_irqsave(t_lock, flags);
2278 }
2279 /*
2280 * There are no more AIF's
2281 */
2282 t_lock = dev->queues->queue[HostNormCmdQueue].lock;
2283 spin_unlock_irqrestore(lock: t_lock, flags);
2284}
2285
2286static int aac_send_wellness_command(struct aac_dev *dev, char *wellness_str,
2287 u32 datasize)
2288{
2289 struct aac_srb *srbcmd;
2290 struct sgmap64 *sg64;
2291 dma_addr_t addr;
2292 char *dma_buf;
2293 struct fib *fibptr;
2294 int ret = -ENOMEM;
2295 u32 vbus, vid;
2296
2297 fibptr = aac_fib_alloc(dev);
2298 if (!fibptr)
2299 goto out;
2300
2301 dma_buf = dma_alloc_coherent(dev: &dev->pdev->dev, size: datasize, dma_handle: &addr,
2302 GFP_KERNEL);
2303 if (!dma_buf)
2304 goto fib_free_out;
2305
2306 aac_fib_init(fibptr);
2307
2308 vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_bus);
2309 vid = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_target);
2310
2311 srbcmd = (struct aac_srb *)fib_data(fibptr);
2312
2313 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
2314 srbcmd->channel = cpu_to_le32(vbus);
2315 srbcmd->id = cpu_to_le32(vid);
2316 srbcmd->lun = 0;
2317 srbcmd->flags = cpu_to_le32(SRB_DataOut);
2318 srbcmd->timeout = cpu_to_le32(10);
2319 srbcmd->retry_limit = 0;
2320 srbcmd->cdb_size = cpu_to_le32(12);
2321 srbcmd->count = cpu_to_le32(datasize);
2322
2323 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
2324 srbcmd->cdb[0] = BMIC_OUT;
2325 srbcmd->cdb[6] = WRITE_HOST_WELLNESS;
2326 memcpy(dma_buf, (char *)wellness_str, datasize);
2327
2328 sg64 = (struct sgmap64 *)&srbcmd->sg;
2329 sg64->count = cpu_to_le32(1);
2330 sg64->sg[0].addr[1] = cpu_to_le32((u32)(((addr) >> 16) >> 16));
2331 sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
2332 sg64->sg[0].count = cpu_to_le32(datasize);
2333
2334 ret = aac_fib_send(ScsiPortCommand64, fibptr, size: sizeof(struct aac_srb),
2335 FsaNormal, wait: 1, reply: 1, NULL, NULL);
2336
2337 dma_free_coherent(dev: &dev->pdev->dev, size: datasize, cpu_addr: dma_buf, dma_handle: addr);
2338
2339 /*
2340 * Do not set XferState to zero unless
2341 * receives a response from F/W
2342 */
2343 if (ret >= 0)
2344 aac_fib_complete(fibptr);
2345
2346 /*
2347 * FIB should be freed only after
2348 * getting the response from the F/W
2349 */
2350 if (ret != -ERESTARTSYS)
2351 goto fib_free_out;
2352
2353out:
2354 return ret;
2355fib_free_out:
2356 aac_fib_free(fibptr);
2357 goto out;
2358}
2359
2360static int aac_send_safw_hostttime(struct aac_dev *dev, struct timespec64 *now)
2361{
2362 struct tm cur_tm;
2363 char wellness_str[] = "<HW>TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ";
2364 u32 datasize = sizeof(wellness_str);
2365 time64_t local_time;
2366 int ret = -ENODEV;
2367
2368 if (!dev->sa_firmware)
2369 goto out;
2370
2371 local_time = (now->tv_sec - (sys_tz.tz_minuteswest * 60));
2372 time64_to_tm(totalsecs: local_time, offset: 0, result: &cur_tm);
2373 cur_tm.tm_mon += 1;
2374 cur_tm.tm_year += 1900;
2375 wellness_str[8] = bin2bcd(cur_tm.tm_hour);
2376 wellness_str[9] = bin2bcd(cur_tm.tm_min);
2377 wellness_str[10] = bin2bcd(cur_tm.tm_sec);
2378 wellness_str[12] = bin2bcd(cur_tm.tm_mon);
2379 wellness_str[13] = bin2bcd(cur_tm.tm_mday);
2380 wellness_str[14] = bin2bcd(cur_tm.tm_year / 100);
2381 wellness_str[15] = bin2bcd(cur_tm.tm_year % 100);
2382
2383 ret = aac_send_wellness_command(dev, wellness_str, datasize);
2384
2385out:
2386 return ret;
2387}
2388
2389static int aac_send_hosttime(struct aac_dev *dev, struct timespec64 *now)
2390{
2391 int ret = -ENOMEM;
2392 struct fib *fibptr;
2393 __le32 *info;
2394
2395 fibptr = aac_fib_alloc(dev);
2396 if (!fibptr)
2397 goto out;
2398
2399 aac_fib_init(fibptr);
2400 info = (__le32 *)fib_data(fibptr);
2401 *info = cpu_to_le32(now->tv_sec); /* overflow in y2106 */
2402 ret = aac_fib_send(SendHostTime, fibptr, size: sizeof(*info), FsaNormal,
2403 wait: 1, reply: 1, NULL, NULL);
2404
2405 /*
2406 * Do not set XferState to zero unless
2407 * receives a response from F/W
2408 */
2409 if (ret >= 0)
2410 aac_fib_complete(fibptr);
2411
2412 /*
2413 * FIB should be freed only after
2414 * getting the response from the F/W
2415 */
2416 if (ret != -ERESTARTSYS)
2417 aac_fib_free(fibptr);
2418
2419out:
2420 return ret;
2421}
2422
2423/**
2424 * aac_command_thread - command processing thread
2425 * @data: Adapter to monitor
2426 *
2427 * Waits on the commandready event in it's queue. When the event gets set
2428 * it will pull FIBs off it's queue. It will continue to pull FIBs off
2429 * until the queue is empty. When the queue is empty it will wait for
2430 * more FIBs.
2431 */
2432
2433int aac_command_thread(void *data)
2434{
2435 struct aac_dev *dev = data;
2436 DECLARE_WAITQUEUE(wait, current);
2437 unsigned long next_jiffies = jiffies + HZ;
2438 unsigned long next_check_jiffies = next_jiffies;
2439 long difference = HZ;
2440
2441 /*
2442 * We can only have one thread per adapter for AIF's.
2443 */
2444 if (dev->aif_thread)
2445 return -EINVAL;
2446
2447 /*
2448 * Let the DPC know it has a place to send the AIF's to.
2449 */
2450 dev->aif_thread = 1;
2451 add_wait_queue(wq_head: &dev->queues->queue[HostNormCmdQueue].cmdready, wq_entry: &wait);
2452 set_current_state(TASK_INTERRUPTIBLE);
2453 dprintk ((KERN_INFO "aac_command_thread start\n"));
2454 while (1) {
2455
2456 aac_process_events(dev);
2457
2458 /*
2459 * Background activity
2460 */
2461 if ((time_before(next_check_jiffies,next_jiffies))
2462 && ((difference = next_check_jiffies - jiffies) <= 0)) {
2463 next_check_jiffies = next_jiffies;
2464 if (aac_adapter_check_health(dev) == 0) {
2465 difference = ((long)(unsigned)check_interval)
2466 * HZ;
2467 next_check_jiffies = jiffies + difference;
2468 } else if (!dev->queues)
2469 break;
2470 }
2471 if (!time_before(next_check_jiffies,next_jiffies)
2472 && ((difference = next_jiffies - jiffies) <= 0)) {
2473 struct timespec64 now;
2474 int ret;
2475
2476 /* Don't even try to talk to adapter if its sick */
2477 ret = aac_adapter_check_health(dev);
2478 if (ret || !dev->queues)
2479 break;
2480 next_check_jiffies = jiffies
2481 + ((long)(unsigned)check_interval)
2482 * HZ;
2483 ktime_get_real_ts64(tv: &now);
2484
2485 /* Synchronize our watches */
2486 if (((NSEC_PER_SEC - (NSEC_PER_SEC / HZ)) > now.tv_nsec)
2487 && (now.tv_nsec > (NSEC_PER_SEC / HZ)))
2488 difference = HZ + HZ / 2 -
2489 now.tv_nsec / (NSEC_PER_SEC / HZ);
2490 else {
2491 if (now.tv_nsec > NSEC_PER_SEC / 2)
2492 ++now.tv_sec;
2493
2494 if (dev->sa_firmware)
2495 ret =
2496 aac_send_safw_hostttime(dev, now: &now);
2497 else
2498 ret = aac_send_hosttime(dev, now: &now);
2499
2500 difference = (long)(unsigned)update_interval*HZ;
2501 }
2502 next_jiffies = jiffies + difference;
2503 if (time_before(next_check_jiffies,next_jiffies))
2504 difference = next_check_jiffies - jiffies;
2505 }
2506 if (difference <= 0)
2507 difference = 1;
2508 set_current_state(TASK_INTERRUPTIBLE);
2509
2510 if (kthread_should_stop())
2511 break;
2512
2513 /*
2514 * we probably want usleep_range() here instead of the
2515 * jiffies computation
2516 */
2517 schedule_timeout(timeout: difference);
2518
2519 if (kthread_should_stop())
2520 break;
2521 }
2522 if (dev->queues)
2523 remove_wait_queue(wq_head: &dev->queues->queue[HostNormCmdQueue].cmdready, wq_entry: &wait);
2524 dev->aif_thread = 0;
2525 return 0;
2526}
2527
2528int aac_acquire_irq(struct aac_dev *dev)
2529{
2530 int i;
2531 int j;
2532 int ret = 0;
2533
2534 if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) {
2535 for (i = 0; i < dev->max_msix; i++) {
2536 dev->aac_msix[i].vector_no = i;
2537 dev->aac_msix[i].dev = dev;
2538 if (request_irq(irq: pci_irq_vector(dev: dev->pdev, nr: i),
2539 handler: dev->a_ops.adapter_intr,
2540 flags: 0, name: "aacraid", dev: &(dev->aac_msix[i]))) {
2541 printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n",
2542 dev->name, dev->id, i);
2543 for (j = 0 ; j < i ; j++)
2544 free_irq(pci_irq_vector(dev: dev->pdev, nr: j),
2545 &(dev->aac_msix[j]));
2546 pci_disable_msix(dev: dev->pdev);
2547 ret = -1;
2548 }
2549 }
2550 } else {
2551 dev->aac_msix[0].vector_no = 0;
2552 dev->aac_msix[0].dev = dev;
2553
2554 if (request_irq(irq: dev->pdev->irq, handler: dev->a_ops.adapter_intr,
2555 IRQF_SHARED, name: "aacraid",
2556 dev: &(dev->aac_msix[0])) < 0) {
2557 if (dev->msi)
2558 pci_disable_msi(dev: dev->pdev);
2559 printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
2560 dev->name, dev->id);
2561 ret = -1;
2562 }
2563 }
2564 return ret;
2565}
2566
2567void aac_free_irq(struct aac_dev *dev)
2568{
2569 int i;
2570
2571 if (aac_is_src(dev)) {
2572 if (dev->max_msix > 1) {
2573 for (i = 0; i < dev->max_msix; i++)
2574 free_irq(pci_irq_vector(dev: dev->pdev, nr: i),
2575 &(dev->aac_msix[i]));
2576 } else {
2577 free_irq(dev->pdev->irq, &(dev->aac_msix[0]));
2578 }
2579 } else {
2580 free_irq(dev->pdev->irq, dev);
2581 }
2582 if (dev->msi)
2583 pci_disable_msi(dev: dev->pdev);
2584 else if (dev->max_msix > 1)
2585 pci_disable_msix(dev: dev->pdev);
2586}
2587

source code of linux/drivers/scsi/aacraid/commsup.c