1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Adaptec AAC series RAID controller driver |
4 | * (c) Copyright 2001 Red Hat Inc. |
5 | * |
6 | * based on the old aacraid driver that is.. |
7 | * Adaptec aacraid device driver for Linux. |
8 | * |
9 | * Copyright (c) 2000-2010 Adaptec, Inc. |
10 | * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) |
11 | * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) |
12 | * |
13 | * Module Name: |
14 | * src.c |
15 | * |
16 | * Abstract: Hardware Device Interface for PMC SRC based controllers |
17 | */ |
18 | |
19 | #include <linux/kernel.h> |
20 | #include <linux/init.h> |
21 | #include <linux/types.h> |
22 | #include <linux/pci.h> |
23 | #include <linux/spinlock.h> |
24 | #include <linux/slab.h> |
25 | #include <linux/blkdev.h> |
26 | #include <linux/delay.h> |
27 | #include <linux/completion.h> |
28 | #include <linux/time.h> |
29 | #include <linux/interrupt.h> |
30 | #include <scsi/scsi_host.h> |
31 | |
32 | #include "aacraid.h" |
33 | |
34 | static int aac_src_get_sync_status(struct aac_dev *dev); |
35 | |
36 | static irqreturn_t aac_src_intr_message(int irq, void *dev_id) |
37 | { |
38 | struct aac_msix_ctx *ctx; |
39 | struct aac_dev *dev; |
40 | unsigned long bellbits, bellbits_shifted; |
41 | int vector_no; |
42 | int isFastResponse, mode; |
43 | u32 index, handle; |
44 | |
45 | ctx = (struct aac_msix_ctx *)dev_id; |
46 | dev = ctx->dev; |
47 | vector_no = ctx->vector_no; |
48 | |
49 | if (dev->msi_enabled) { |
50 | mode = AAC_INT_MODE_MSI; |
51 | if (vector_no == 0) { |
52 | bellbits = src_readl(dev, MUnit.ODR_MSI); |
53 | if (bellbits & 0x40000) |
54 | mode |= AAC_INT_MODE_AIF; |
55 | if (bellbits & 0x1000) |
56 | mode |= AAC_INT_MODE_SYNC; |
57 | } |
58 | } else { |
59 | mode = AAC_INT_MODE_INTX; |
60 | bellbits = src_readl(dev, MUnit.ODR_R); |
61 | if (bellbits & PmDoorBellResponseSent) { |
62 | bellbits = PmDoorBellResponseSent; |
63 | src_writel(dev, MUnit.ODR_C, bellbits); |
64 | src_readl(dev, MUnit.ODR_C); |
65 | } else { |
66 | bellbits_shifted = (bellbits >> SRC_ODR_SHIFT); |
67 | src_writel(dev, MUnit.ODR_C, bellbits); |
68 | src_readl(dev, MUnit.ODR_C); |
69 | |
70 | if (bellbits_shifted & DoorBellAifPending) |
71 | mode |= AAC_INT_MODE_AIF; |
72 | else if (bellbits_shifted & OUTBOUNDDOORBELL_0) |
73 | mode |= AAC_INT_MODE_SYNC; |
74 | } |
75 | } |
76 | |
77 | if (mode & AAC_INT_MODE_SYNC) { |
78 | unsigned long sflags; |
79 | struct list_head *entry; |
80 | int send_it = 0; |
81 | extern int aac_sync_mode; |
82 | |
83 | if (!aac_sync_mode && !dev->msi_enabled) { |
84 | src_writel(dev, MUnit.ODR_C, bellbits); |
85 | src_readl(dev, MUnit.ODR_C); |
86 | } |
87 | |
88 | if (dev->sync_fib) { |
89 | if (dev->sync_fib->callback) |
90 | dev->sync_fib->callback(dev->sync_fib->callback_data, |
91 | dev->sync_fib); |
92 | spin_lock_irqsave(&dev->sync_fib->event_lock, sflags); |
93 | if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) { |
94 | dev->management_fib_count--; |
95 | complete(&dev->sync_fib->event_wait); |
96 | } |
97 | spin_unlock_irqrestore(lock: &dev->sync_fib->event_lock, |
98 | flags: sflags); |
99 | spin_lock_irqsave(&dev->sync_lock, sflags); |
100 | if (!list_empty(head: &dev->sync_fib_list)) { |
101 | entry = dev->sync_fib_list.next; |
102 | dev->sync_fib = list_entry(entry, |
103 | struct fib, |
104 | fiblink); |
105 | list_del(entry); |
106 | send_it = 1; |
107 | } else { |
108 | dev->sync_fib = NULL; |
109 | } |
110 | spin_unlock_irqrestore(lock: &dev->sync_lock, flags: sflags); |
111 | if (send_it) { |
112 | aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB, |
113 | (u32)dev->sync_fib->hw_fib_pa, |
114 | 0, 0, 0, 0, 0, |
115 | NULL, NULL, NULL, NULL, NULL); |
116 | } |
117 | } |
118 | if (!dev->msi_enabled) |
119 | mode = 0; |
120 | |
121 | } |
122 | |
123 | if (mode & AAC_INT_MODE_AIF) { |
124 | /* handle AIF */ |
125 | if (dev->sa_firmware) { |
126 | u32 events = src_readl(dev, MUnit.SCR0); |
127 | |
128 | aac_intr_normal(dev, Index: events, isAif: 1, isFastResponse: 0, NULL); |
129 | writel(val: events, addr: &dev->IndexRegs->Mailbox[0]); |
130 | src_writel(dev, MUnit.IDR, 1 << 23); |
131 | } else { |
132 | if (dev->aif_thread && dev->fsa_dev) |
133 | aac_intr_normal(dev, Index: 0, isAif: 2, isFastResponse: 0, NULL); |
134 | } |
135 | if (dev->msi_enabled) |
136 | aac_src_access_devreg(dev, mode: AAC_CLEAR_AIF_BIT); |
137 | mode = 0; |
138 | } |
139 | |
140 | if (mode) { |
141 | index = dev->host_rrq_idx[vector_no]; |
142 | |
143 | for (;;) { |
144 | isFastResponse = 0; |
145 | /* remove toggle bit (31) */ |
146 | handle = le32_to_cpu((dev->host_rrq[index]) |
147 | & 0x7fffffff); |
148 | /* check fast response bits (30, 1) */ |
149 | if (handle & 0x40000000) |
150 | isFastResponse = 1; |
151 | handle &= 0x0000ffff; |
152 | if (handle == 0) |
153 | break; |
154 | handle >>= 2; |
155 | if (dev->msi_enabled && dev->max_msix > 1) |
156 | atomic_dec(v: &dev->rrq_outstanding[vector_no]); |
157 | aac_intr_normal(dev, Index: handle, isAif: 0, isFastResponse, NULL); |
158 | dev->host_rrq[index++] = 0; |
159 | if (index == (vector_no + 1) * dev->vector_cap) |
160 | index = vector_no * dev->vector_cap; |
161 | dev->host_rrq_idx[vector_no] = index; |
162 | } |
163 | mode = 0; |
164 | } |
165 | |
166 | return IRQ_HANDLED; |
167 | } |
168 | |
169 | /** |
170 | * aac_src_disable_interrupt - Disable interrupts |
171 | * @dev: Adapter |
172 | */ |
173 | |
174 | static void aac_src_disable_interrupt(struct aac_dev *dev) |
175 | { |
176 | src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff); |
177 | } |
178 | |
179 | /** |
180 | * aac_src_enable_interrupt_message - Enable interrupts |
181 | * @dev: Adapter |
182 | */ |
183 | |
184 | static void aac_src_enable_interrupt_message(struct aac_dev *dev) |
185 | { |
186 | aac_src_access_devreg(dev, mode: AAC_ENABLE_INTERRUPT); |
187 | } |
188 | |
189 | /** |
190 | * src_sync_cmd - send a command and wait |
191 | * @dev: Adapter |
192 | * @command: Command to execute |
193 | * @p1: first parameter |
194 | * @p2: second parameter |
195 | * @p3: third parameter |
196 | * @p4: forth parameter |
197 | * @p5: fifth parameter |
198 | * @p6: sixth parameter |
199 | * @status: adapter status |
200 | * @r1: first return value |
201 | * @r2: second return valu |
202 | * @r3: third return value |
203 | * @r4: forth return value |
204 | * |
205 | * This routine will send a synchronous command to the adapter and wait |
206 | * for its completion. |
207 | */ |
208 | |
209 | static int src_sync_cmd(struct aac_dev *dev, u32 command, |
210 | u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, |
211 | u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4) |
212 | { |
213 | unsigned long start; |
214 | unsigned long delay; |
215 | int ok; |
216 | |
217 | /* |
218 | * Write the command into Mailbox 0 |
219 | */ |
220 | writel(val: command, addr: &dev->IndexRegs->Mailbox[0]); |
221 | /* |
222 | * Write the parameters into Mailboxes 1 - 6 |
223 | */ |
224 | writel(val: p1, addr: &dev->IndexRegs->Mailbox[1]); |
225 | writel(val: p2, addr: &dev->IndexRegs->Mailbox[2]); |
226 | writel(val: p3, addr: &dev->IndexRegs->Mailbox[3]); |
227 | writel(val: p4, addr: &dev->IndexRegs->Mailbox[4]); |
228 | |
229 | /* |
230 | * Clear the synch command doorbell to start on a clean slate. |
231 | */ |
232 | if (!dev->msi_enabled) |
233 | src_writel(dev, |
234 | MUnit.ODR_C, |
235 | OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); |
236 | |
237 | /* |
238 | * Disable doorbell interrupts |
239 | */ |
240 | src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff); |
241 | |
242 | /* |
243 | * Force the completion of the mask register write before issuing |
244 | * the interrupt. |
245 | */ |
246 | src_readl(dev, MUnit.OIMR); |
247 | |
248 | /* |
249 | * Signal that there is a new synch command |
250 | */ |
251 | src_writel(dev, MUnit.IDR, INBOUNDDOORBELL_0 << SRC_IDR_SHIFT); |
252 | |
253 | if ((!dev->sync_mode || command != SEND_SYNCHRONOUS_FIB) && |
254 | !dev->in_soft_reset) { |
255 | ok = 0; |
256 | start = jiffies; |
257 | |
258 | if (command == IOP_RESET_ALWAYS) { |
259 | /* Wait up to 10 sec */ |
260 | delay = 10*HZ; |
261 | } else { |
262 | /* Wait up to 5 minutes */ |
263 | delay = 300*HZ; |
264 | } |
265 | while (time_before(jiffies, start+delay)) { |
266 | udelay(5); /* Delay 5 microseconds to let Mon960 get info. */ |
267 | /* |
268 | * Mon960 will set doorbell0 bit when it has completed the command. |
269 | */ |
270 | if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) { |
271 | /* |
272 | * Clear the doorbell. |
273 | */ |
274 | if (dev->msi_enabled) |
275 | aac_src_access_devreg(dev, |
276 | mode: AAC_CLEAR_SYNC_BIT); |
277 | else |
278 | src_writel(dev, |
279 | MUnit.ODR_C, |
280 | OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); |
281 | ok = 1; |
282 | break; |
283 | } |
284 | /* |
285 | * Yield the processor in case we are slow |
286 | */ |
287 | msleep(msecs: 1); |
288 | } |
289 | if (unlikely(ok != 1)) { |
290 | /* |
291 | * Restore interrupt mask even though we timed out |
292 | */ |
293 | aac_adapter_enable_int(dev); |
294 | return -ETIMEDOUT; |
295 | } |
296 | /* |
297 | * Pull the synch status from Mailbox 0. |
298 | */ |
299 | if (status) |
300 | *status = readl(addr: &dev->IndexRegs->Mailbox[0]); |
301 | if (r1) |
302 | *r1 = readl(addr: &dev->IndexRegs->Mailbox[1]); |
303 | if (r2) |
304 | *r2 = readl(addr: &dev->IndexRegs->Mailbox[2]); |
305 | if (r3) |
306 | *r3 = readl(addr: &dev->IndexRegs->Mailbox[3]); |
307 | if (r4) |
308 | *r4 = readl(addr: &dev->IndexRegs->Mailbox[4]); |
309 | if (command == GET_COMM_PREFERRED_SETTINGS) |
310 | dev->max_msix = |
311 | readl(addr: &dev->IndexRegs->Mailbox[5]) & 0xFFFF; |
312 | /* |
313 | * Clear the synch command doorbell. |
314 | */ |
315 | if (!dev->msi_enabled) |
316 | src_writel(dev, |
317 | MUnit.ODR_C, |
318 | OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); |
319 | } |
320 | |
321 | /* |
322 | * Restore interrupt mask |
323 | */ |
324 | aac_adapter_enable_int(dev); |
325 | return 0; |
326 | } |
327 | |
328 | /** |
329 | * aac_src_interrupt_adapter - interrupt adapter |
330 | * @dev: Adapter |
331 | * |
332 | * Send an interrupt to the i960 and breakpoint it. |
333 | */ |
334 | |
335 | static void aac_src_interrupt_adapter(struct aac_dev *dev) |
336 | { |
337 | src_sync_cmd(dev, BREAKPOINT_REQUEST, |
338 | p1: 0, p2: 0, p3: 0, p4: 0, p5: 0, p6: 0, |
339 | NULL, NULL, NULL, NULL, NULL); |
340 | } |
341 | |
342 | /** |
343 | * aac_src_notify_adapter - send an event to the adapter |
344 | * @dev: Adapter |
345 | * @event: Event to send |
346 | * |
347 | * Notify the i960 that something it probably cares about has |
348 | * happened. |
349 | */ |
350 | |
351 | static void aac_src_notify_adapter(struct aac_dev *dev, u32 event) |
352 | { |
353 | switch (event) { |
354 | |
355 | case AdapNormCmdQue: |
356 | src_writel(dev, MUnit.ODR_C, |
357 | INBOUNDDOORBELL_1 << SRC_ODR_SHIFT); |
358 | break; |
359 | case HostNormRespNotFull: |
360 | src_writel(dev, MUnit.ODR_C, |
361 | INBOUNDDOORBELL_4 << SRC_ODR_SHIFT); |
362 | break; |
363 | case AdapNormRespQue: |
364 | src_writel(dev, MUnit.ODR_C, |
365 | INBOUNDDOORBELL_2 << SRC_ODR_SHIFT); |
366 | break; |
367 | case HostNormCmdNotFull: |
368 | src_writel(dev, MUnit.ODR_C, |
369 | INBOUNDDOORBELL_3 << SRC_ODR_SHIFT); |
370 | break; |
371 | case FastIo: |
372 | src_writel(dev, MUnit.ODR_C, |
373 | INBOUNDDOORBELL_6 << SRC_ODR_SHIFT); |
374 | break; |
375 | case AdapPrintfDone: |
376 | src_writel(dev, MUnit.ODR_C, |
377 | INBOUNDDOORBELL_5 << SRC_ODR_SHIFT); |
378 | break; |
379 | default: |
380 | BUG(); |
381 | break; |
382 | } |
383 | } |
384 | |
385 | /** |
386 | * aac_src_start_adapter - activate adapter |
387 | * @dev: Adapter |
388 | * |
389 | * Start up processing on an i960 based AAC adapter |
390 | */ |
391 | |
392 | static void aac_src_start_adapter(struct aac_dev *dev) |
393 | { |
394 | union aac_init *init; |
395 | int i; |
396 | |
397 | /* reset host_rrq_idx first */ |
398 | for (i = 0; i < dev->max_msix; i++) { |
399 | dev->host_rrq_idx[i] = i * dev->vector_cap; |
400 | atomic_set(v: &dev->rrq_outstanding[i], i: 0); |
401 | } |
402 | atomic_set(v: &dev->msix_counter, i: 0); |
403 | dev->fibs_pushed_no = 0; |
404 | |
405 | init = dev->init; |
406 | if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) { |
407 | init->r8.host_elapsed_seconds = |
408 | cpu_to_le32(ktime_get_real_seconds()); |
409 | src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, |
410 | lower_32_bits(dev->init_pa), |
411 | upper_32_bits(dev->init_pa), |
412 | p3: sizeof(struct _r8) + |
413 | (AAC_MAX_HRRQ - 1) * sizeof(struct _rrq), |
414 | p4: 0, p5: 0, p6: 0, NULL, NULL, NULL, NULL, NULL); |
415 | } else { |
416 | init->r7.host_elapsed_seconds = |
417 | cpu_to_le32(ktime_get_real_seconds()); |
418 | // We can only use a 32 bit address here |
419 | src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, |
420 | p1: (u32)(ulong)dev->init_pa, p2: 0, p3: 0, p4: 0, p5: 0, p6: 0, |
421 | NULL, NULL, NULL, NULL, NULL); |
422 | } |
423 | |
424 | } |
425 | |
426 | /** |
427 | * aac_src_check_health |
428 | * @dev: device to check if healthy |
429 | * |
430 | * Will attempt to determine if the specified adapter is alive and |
431 | * capable of handling requests, returning 0 if alive. |
432 | */ |
433 | static int aac_src_check_health(struct aac_dev *dev) |
434 | { |
435 | u32 status = src_readl(dev, MUnit.OMR); |
436 | |
437 | /* |
438 | * Check to see if the board panic'd. |
439 | */ |
440 | if (unlikely(status & KERNEL_PANIC)) |
441 | goto err_blink; |
442 | |
443 | /* |
444 | * Check to see if the board failed any self tests. |
445 | */ |
446 | if (unlikely(status & SELF_TEST_FAILED)) |
447 | goto err_out; |
448 | |
449 | /* |
450 | * Check to see if the board failed any self tests. |
451 | */ |
452 | if (unlikely(status & MONITOR_PANIC)) |
453 | goto err_out; |
454 | |
455 | /* |
456 | * Wait for the adapter to be up and running. |
457 | */ |
458 | if (unlikely(!(status & KERNEL_UP_AND_RUNNING))) |
459 | return -3; |
460 | /* |
461 | * Everything is OK |
462 | */ |
463 | return 0; |
464 | |
465 | err_out: |
466 | return -1; |
467 | |
468 | err_blink: |
469 | return (status >> 16) & 0xFF; |
470 | } |
471 | |
472 | static inline u32 aac_get_vector(struct aac_dev *dev) |
473 | { |
474 | return atomic_inc_return(v: &dev->msix_counter)%dev->max_msix; |
475 | } |
476 | |
477 | /** |
478 | * aac_src_deliver_message |
479 | * @fib: fib to issue |
480 | * |
481 | * Will send a fib, returning 0 if successful. |
482 | */ |
483 | static int aac_src_deliver_message(struct fib *fib) |
484 | { |
485 | struct aac_dev *dev = fib->dev; |
486 | struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; |
487 | u32 fibsize; |
488 | dma_addr_t address; |
489 | struct aac_fib_xporthdr *pFibX; |
490 | int native_hba; |
491 | #if !defined(writeq) |
492 | unsigned long flags; |
493 | #endif |
494 | |
495 | u16 vector_no; |
496 | struct scsi_cmnd *scmd; |
497 | u32 blk_tag; |
498 | struct Scsi_Host *shost = dev->scsi_host_ptr; |
499 | struct blk_mq_queue_map *qmap; |
500 | |
501 | atomic_inc(v: &q->numpending); |
502 | |
503 | native_hba = (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) ? 1 : 0; |
504 | |
505 | |
506 | if (dev->msi_enabled && dev->max_msix > 1 && |
507 | (native_hba || fib->hw_fib_va->header.Command != AifRequest)) { |
508 | |
509 | if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) |
510 | && dev->sa_firmware) |
511 | vector_no = aac_get_vector(dev); |
512 | else { |
513 | if (!fib->vector_no || !fib->callback_data) { |
514 | if (shost && dev->use_map_queue) { |
515 | qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; |
516 | vector_no = qmap->mq_map[raw_smp_processor_id()]; |
517 | } |
518 | /* |
519 | * We hardcode the vector_no for |
520 | * reserved commands as a valid shost is |
521 | * absent during the init |
522 | */ |
523 | else |
524 | vector_no = 0; |
525 | } else { |
526 | scmd = (struct scsi_cmnd *)fib->callback_data; |
527 | blk_tag = blk_mq_unique_tag(rq: scsi_cmd_to_rq(scmd)); |
528 | vector_no = blk_mq_unique_tag_to_hwq(unique_tag: blk_tag); |
529 | } |
530 | } |
531 | |
532 | if (native_hba) { |
533 | if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) { |
534 | struct aac_hba_tm_req *tm_req; |
535 | |
536 | tm_req = (struct aac_hba_tm_req *) |
537 | fib->hw_fib_va; |
538 | if (tm_req->iu_type == |
539 | HBA_IU_TYPE_SCSI_TM_REQ) { |
540 | ((struct aac_hba_tm_req *) |
541 | fib->hw_fib_va)->reply_qid |
542 | = vector_no; |
543 | ((struct aac_hba_tm_req *) |
544 | fib->hw_fib_va)->request_id |
545 | += (vector_no << 16); |
546 | } else { |
547 | ((struct aac_hba_reset_req *) |
548 | fib->hw_fib_va)->reply_qid |
549 | = vector_no; |
550 | ((struct aac_hba_reset_req *) |
551 | fib->hw_fib_va)->request_id |
552 | += (vector_no << 16); |
553 | } |
554 | } else { |
555 | ((struct aac_hba_cmd_req *) |
556 | fib->hw_fib_va)->reply_qid |
557 | = vector_no; |
558 | ((struct aac_hba_cmd_req *) |
559 | fib->hw_fib_va)->request_id |
560 | += (vector_no << 16); |
561 | } |
562 | } else { |
563 | fib->hw_fib_va->header.Handle += (vector_no << 16); |
564 | } |
565 | } else { |
566 | vector_no = 0; |
567 | } |
568 | |
569 | atomic_inc(v: &dev->rrq_outstanding[vector_no]); |
570 | |
571 | if (native_hba) { |
572 | address = fib->hw_fib_pa; |
573 | fibsize = (fib->hbacmd_size + 127) / 128 - 1; |
574 | if (fibsize > 31) |
575 | fibsize = 31; |
576 | address |= fibsize; |
577 | #if defined(writeq) |
578 | src_writeq(dev, MUnit.IQN_L, (u64)address); |
579 | #else |
580 | spin_lock_irqsave(&fib->dev->iq_lock, flags); |
581 | src_writel(dev, MUnit.IQN_H, |
582 | upper_32_bits(address) & 0xffffffff); |
583 | src_writel(dev, MUnit.IQN_L, address & 0xffffffff); |
584 | spin_unlock_irqrestore(&fib->dev->iq_lock, flags); |
585 | #endif |
586 | } else { |
587 | if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 || |
588 | dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) { |
589 | /* Calculate the amount to the fibsize bits */ |
590 | fibsize = (le16_to_cpu(fib->hw_fib_va->header.Size) |
591 | + 127) / 128 - 1; |
592 | /* New FIB header, 32-bit */ |
593 | address = fib->hw_fib_pa; |
594 | fib->hw_fib_va->header.StructType = FIB_MAGIC2; |
595 | fib->hw_fib_va->header.SenderFibAddress = |
596 | cpu_to_le32((u32)address); |
597 | fib->hw_fib_va->header.u.TimeStamp = 0; |
598 | WARN_ON(upper_32_bits(address) != 0L); |
599 | } else { |
600 | /* Calculate the amount to the fibsize bits */ |
601 | fibsize = (sizeof(struct aac_fib_xporthdr) + |
602 | le16_to_cpu(fib->hw_fib_va->header.Size) |
603 | + 127) / 128 - 1; |
604 | /* Fill XPORT header */ |
605 | pFibX = (struct aac_fib_xporthdr *) |
606 | ((unsigned char *)fib->hw_fib_va - |
607 | sizeof(struct aac_fib_xporthdr)); |
608 | pFibX->Handle = fib->hw_fib_va->header.Handle; |
609 | pFibX->HostAddress = |
610 | cpu_to_le64((u64)fib->hw_fib_pa); |
611 | pFibX->Size = cpu_to_le32( |
612 | le16_to_cpu(fib->hw_fib_va->header.Size)); |
613 | address = fib->hw_fib_pa - |
614 | (u64)sizeof(struct aac_fib_xporthdr); |
615 | } |
616 | if (fibsize > 31) |
617 | fibsize = 31; |
618 | address |= fibsize; |
619 | |
620 | #if defined(writeq) |
621 | src_writeq(dev, MUnit.IQ_L, (u64)address); |
622 | #else |
623 | spin_lock_irqsave(&fib->dev->iq_lock, flags); |
624 | src_writel(dev, MUnit.IQ_H, |
625 | upper_32_bits(address) & 0xffffffff); |
626 | src_writel(dev, MUnit.IQ_L, address & 0xffffffff); |
627 | spin_unlock_irqrestore(&fib->dev->iq_lock, flags); |
628 | #endif |
629 | } |
630 | return 0; |
631 | } |
632 | |
633 | /** |
634 | * aac_src_ioremap |
635 | * @dev: device ioremap |
636 | * @size: mapping resize request |
637 | * |
638 | */ |
639 | static int aac_src_ioremap(struct aac_dev *dev, u32 size) |
640 | { |
641 | if (!size) { |
642 | iounmap(addr: dev->regs.src.bar1); |
643 | dev->regs.src.bar1 = NULL; |
644 | iounmap(addr: dev->regs.src.bar0); |
645 | dev->base = dev->regs.src.bar0 = NULL; |
646 | return 0; |
647 | } |
648 | dev->regs.src.bar1 = ioremap(pci_resource_start(dev->pdev, 2), |
649 | AAC_MIN_SRC_BAR1_SIZE); |
650 | dev->base = NULL; |
651 | if (dev->regs.src.bar1 == NULL) |
652 | return -1; |
653 | dev->base = dev->regs.src.bar0 = ioremap(offset: dev->base_start, size); |
654 | if (dev->base == NULL) { |
655 | iounmap(addr: dev->regs.src.bar1); |
656 | dev->regs.src.bar1 = NULL; |
657 | return -1; |
658 | } |
659 | dev->IndexRegs = &((struct src_registers __iomem *) |
660 | dev->base)->u.tupelo.IndexRegs; |
661 | return 0; |
662 | } |
663 | |
664 | /** |
665 | * aac_srcv_ioremap |
666 | * @dev: device ioremap |
667 | * @size: mapping resize request |
668 | * |
669 | */ |
670 | static int aac_srcv_ioremap(struct aac_dev *dev, u32 size) |
671 | { |
672 | if (!size) { |
673 | iounmap(addr: dev->regs.src.bar0); |
674 | dev->base = dev->regs.src.bar0 = NULL; |
675 | return 0; |
676 | } |
677 | |
678 | dev->regs.src.bar1 = |
679 | ioremap(pci_resource_start(dev->pdev, 2), AAC_MIN_SRCV_BAR1_SIZE); |
680 | dev->base = NULL; |
681 | if (dev->regs.src.bar1 == NULL) |
682 | return -1; |
683 | dev->base = dev->regs.src.bar0 = ioremap(offset: dev->base_start, size); |
684 | if (dev->base == NULL) { |
685 | iounmap(addr: dev->regs.src.bar1); |
686 | dev->regs.src.bar1 = NULL; |
687 | return -1; |
688 | } |
689 | dev->IndexRegs = &((struct src_registers __iomem *) |
690 | dev->base)->u.denali.IndexRegs; |
691 | return 0; |
692 | } |
693 | |
694 | void aac_set_intx_mode(struct aac_dev *dev) |
695 | { |
696 | if (dev->msi_enabled) { |
697 | aac_src_access_devreg(dev, mode: AAC_ENABLE_INTX); |
698 | dev->msi_enabled = 0; |
699 | msleep(msecs: 5000); /* Delay 5 seconds */ |
700 | } |
701 | } |
702 | |
703 | static void aac_clear_omr(struct aac_dev *dev) |
704 | { |
705 | u32 omr_value = 0; |
706 | |
707 | omr_value = src_readl(dev, MUnit.OMR); |
708 | |
709 | /* |
710 | * Check for PCI Errors or Kernel Panic |
711 | */ |
712 | if ((omr_value == INVALID_OMR) || (omr_value & KERNEL_PANIC)) |
713 | omr_value = 0; |
714 | |
715 | /* |
716 | * Preserve MSIX Value if any |
717 | */ |
718 | src_writel(dev, MUnit.OMR, omr_value & AAC_INT_MODE_MSIX); |
719 | src_readl(dev, MUnit.OMR); |
720 | } |
721 | |
722 | static void aac_dump_fw_fib_iop_reset(struct aac_dev *dev) |
723 | { |
724 | __le32 supported_options3; |
725 | |
726 | if (!aac_fib_dump) |
727 | return; |
728 | |
729 | supported_options3 = dev->supplement_adapter_info.supported_options3; |
730 | if (!(supported_options3 & AAC_OPTION_SUPPORTED3_IOP_RESET_FIB_DUMP)) |
731 | return; |
732 | |
733 | aac_adapter_sync_cmd(dev, IOP_RESET_FW_FIB_DUMP, |
734 | 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); |
735 | } |
736 | |
737 | static bool aac_is_ctrl_up_and_running(struct aac_dev *dev) |
738 | { |
739 | bool ctrl_up = true; |
740 | unsigned long status, start; |
741 | bool is_up = false; |
742 | |
743 | start = jiffies; |
744 | do { |
745 | schedule(); |
746 | status = src_readl(dev, MUnit.OMR); |
747 | |
748 | if (status == 0xffffffff) |
749 | status = 0; |
750 | |
751 | if (status & KERNEL_BOOTING) { |
752 | start = jiffies; |
753 | continue; |
754 | } |
755 | |
756 | if (time_after(jiffies, start+HZ*SOFT_RESET_TIME)) { |
757 | ctrl_up = false; |
758 | break; |
759 | } |
760 | |
761 | is_up = status & KERNEL_UP_AND_RUNNING; |
762 | |
763 | } while (!is_up); |
764 | |
765 | return ctrl_up; |
766 | } |
767 | |
768 | static void aac_src_drop_io(struct aac_dev *dev) |
769 | { |
770 | if (!dev->soft_reset_support) |
771 | return; |
772 | |
773 | aac_adapter_sync_cmd(dev, DROP_IO, |
774 | 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); |
775 | } |
776 | |
777 | static void aac_notify_fw_of_iop_reset(struct aac_dev *dev) |
778 | { |
779 | aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 0, 0, 0, 0, 0, 0, NULL, |
780 | NULL, NULL, NULL, NULL); |
781 | aac_src_drop_io(dev); |
782 | } |
783 | |
784 | static void aac_send_iop_reset(struct aac_dev *dev) |
785 | { |
786 | aac_dump_fw_fib_iop_reset(dev); |
787 | |
788 | aac_notify_fw_of_iop_reset(dev); |
789 | |
790 | aac_set_intx_mode(dev); |
791 | |
792 | aac_clear_omr(dev); |
793 | |
794 | src_writel(dev, MUnit.IDR, IOP_SRC_RESET_MASK); |
795 | |
796 | msleep(msecs: 5000); |
797 | } |
798 | |
799 | static void aac_send_hardware_soft_reset(struct aac_dev *dev) |
800 | { |
801 | u_int32_t val; |
802 | |
803 | aac_clear_omr(dev); |
804 | val = readl(addr: ((char *)(dev->base) + IBW_SWR_OFFSET)); |
805 | val |= 0x01; |
806 | writel(val, addr: ((char *)(dev->base) + IBW_SWR_OFFSET)); |
807 | msleep_interruptible(msecs: 20000); |
808 | } |
809 | |
810 | static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type) |
811 | { |
812 | bool is_ctrl_up; |
813 | int ret = 0; |
814 | |
815 | if (bled < 0) |
816 | goto invalid_out; |
817 | |
818 | if (bled) |
819 | dev_err(&dev->pdev->dev, "adapter kernel panic'd %x.\n" , bled); |
820 | |
821 | /* |
822 | * When there is a BlinkLED, IOP_RESET has not effect |
823 | */ |
824 | if (bled >= 2 && dev->sa_firmware && reset_type & HW_IOP_RESET) |
825 | reset_type &= ~HW_IOP_RESET; |
826 | |
827 | dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; |
828 | |
829 | dev_err(&dev->pdev->dev, "Controller reset type is %d\n" , reset_type); |
830 | |
831 | if (reset_type & HW_IOP_RESET) { |
832 | dev_info(&dev->pdev->dev, "Issuing IOP reset\n" ); |
833 | aac_send_iop_reset(dev); |
834 | |
835 | /* |
836 | * Creates a delay or wait till up and running comes thru |
837 | */ |
838 | is_ctrl_up = aac_is_ctrl_up_and_running(dev); |
839 | if (!is_ctrl_up) |
840 | dev_err(&dev->pdev->dev, "IOP reset failed\n" ); |
841 | else { |
842 | dev_info(&dev->pdev->dev, "IOP reset succeeded\n" ); |
843 | goto set_startup; |
844 | } |
845 | } |
846 | |
847 | if (!dev->sa_firmware) { |
848 | dev_err(&dev->pdev->dev, "ARC Reset attempt failed\n" ); |
849 | ret = -ENODEV; |
850 | goto out; |
851 | } |
852 | |
853 | if (reset_type & HW_SOFT_RESET) { |
854 | dev_info(&dev->pdev->dev, "Issuing SOFT reset\n" ); |
855 | aac_send_hardware_soft_reset(dev); |
856 | dev->msi_enabled = 0; |
857 | |
858 | is_ctrl_up = aac_is_ctrl_up_and_running(dev); |
859 | if (!is_ctrl_up) { |
860 | dev_err(&dev->pdev->dev, "SOFT reset failed\n" ); |
861 | ret = -ENODEV; |
862 | goto out; |
863 | } else |
864 | dev_info(&dev->pdev->dev, "SOFT reset succeeded\n" ); |
865 | } |
866 | |
867 | set_startup: |
868 | if (startup_timeout < 300) |
869 | startup_timeout = 300; |
870 | |
871 | out: |
872 | return ret; |
873 | |
874 | invalid_out: |
875 | if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC) |
876 | ret = -ENODEV; |
877 | goto out; |
878 | } |
879 | |
880 | /** |
881 | * aac_src_select_comm - Select communications method |
882 | * @dev: Adapter |
883 | * @comm: communications method |
884 | */ |
885 | static int aac_src_select_comm(struct aac_dev *dev, int comm) |
886 | { |
887 | switch (comm) { |
888 | case AAC_COMM_MESSAGE: |
889 | dev->a_ops.adapter_intr = aac_src_intr_message; |
890 | dev->a_ops.adapter_deliver = aac_src_deliver_message; |
891 | break; |
892 | default: |
893 | return 1; |
894 | } |
895 | return 0; |
896 | } |
897 | |
898 | /** |
899 | * aac_src_init - initialize an Cardinal Frey Bar card |
900 | * @dev: device to configure |
901 | * |
902 | */ |
903 | |
904 | int aac_src_init(struct aac_dev *dev) |
905 | { |
906 | unsigned long start; |
907 | unsigned long status; |
908 | int restart = 0; |
909 | int instance = dev->id; |
910 | const char *name = dev->name; |
911 | |
912 | dev->a_ops.adapter_ioremap = aac_src_ioremap; |
913 | dev->a_ops.adapter_comm = aac_src_select_comm; |
914 | |
915 | dev->base_size = AAC_MIN_SRC_BAR0_SIZE; |
916 | if (aac_adapter_ioremap(dev, dev->base_size)) { |
917 | printk(KERN_WARNING "%s: unable to map adapter.\n" , name); |
918 | goto error_iounmap; |
919 | } |
920 | |
921 | /* Failure to reset here is an option ... */ |
922 | dev->a_ops.adapter_sync_cmd = src_sync_cmd; |
923 | dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; |
924 | |
925 | if (dev->init_reset) { |
926 | dev->init_reset = false; |
927 | if (!aac_src_restart_adapter(dev, bled: 0, IOP_HWSOFT_RESET)) |
928 | ++restart; |
929 | } |
930 | |
931 | /* |
932 | * Check to see if the board panic'd while booting. |
933 | */ |
934 | status = src_readl(dev, MUnit.OMR); |
935 | if (status & KERNEL_PANIC) { |
936 | if (aac_src_restart_adapter(dev, |
937 | bled: aac_src_check_health(dev), IOP_HWSOFT_RESET)) |
938 | goto error_iounmap; |
939 | ++restart; |
940 | } |
941 | /* |
942 | * Check to see if the board failed any self tests. |
943 | */ |
944 | status = src_readl(dev, MUnit.OMR); |
945 | if (status & SELF_TEST_FAILED) { |
946 | printk(KERN_ERR "%s%d: adapter self-test failed.\n" , |
947 | dev->name, instance); |
948 | goto error_iounmap; |
949 | } |
950 | /* |
951 | * Check to see if the monitor panic'd while booting. |
952 | */ |
953 | if (status & MONITOR_PANIC) { |
954 | printk(KERN_ERR "%s%d: adapter monitor panic.\n" , |
955 | dev->name, instance); |
956 | goto error_iounmap; |
957 | } |
958 | start = jiffies; |
959 | /* |
960 | * Wait for the adapter to be up and running. Wait up to 3 minutes |
961 | */ |
962 | while (!((status = src_readl(dev, MUnit.OMR)) & |
963 | KERNEL_UP_AND_RUNNING)) { |
964 | if ((restart && |
965 | (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) || |
966 | time_after(jiffies, start+HZ*startup_timeout)) { |
967 | printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n" , |
968 | dev->name, instance, status); |
969 | goto error_iounmap; |
970 | } |
971 | if (!restart && |
972 | ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) || |
973 | time_after(jiffies, start + HZ * |
974 | ((startup_timeout > 60) |
975 | ? (startup_timeout - 60) |
976 | : (startup_timeout / 2))))) { |
977 | if (likely(!aac_src_restart_adapter(dev, |
978 | aac_src_check_health(dev), IOP_HWSOFT_RESET))) |
979 | start = jiffies; |
980 | ++restart; |
981 | } |
982 | msleep(msecs: 1); |
983 | } |
984 | if (restart && aac_commit) |
985 | aac_commit = 1; |
986 | /* |
987 | * Fill in the common function dispatch table. |
988 | */ |
989 | dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter; |
990 | dev->a_ops.adapter_disable_int = aac_src_disable_interrupt; |
991 | dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; |
992 | dev->a_ops.adapter_notify = aac_src_notify_adapter; |
993 | dev->a_ops.adapter_sync_cmd = src_sync_cmd; |
994 | dev->a_ops.adapter_check_health = aac_src_check_health; |
995 | dev->a_ops.adapter_restart = aac_src_restart_adapter; |
996 | dev->a_ops.adapter_start = aac_src_start_adapter; |
997 | |
998 | /* |
999 | * First clear out all interrupts. Then enable the one's that we |
1000 | * can handle. |
1001 | */ |
1002 | aac_adapter_comm(dev, AAC_COMM_MESSAGE); |
1003 | aac_adapter_disable_int(dev); |
1004 | src_writel(dev, MUnit.ODR_C, 0xffffffff); |
1005 | aac_adapter_enable_int(dev); |
1006 | |
1007 | if (aac_init_adapter(dev) == NULL) |
1008 | goto error_iounmap; |
1009 | if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1) |
1010 | goto error_iounmap; |
1011 | |
1012 | dev->msi = !pci_enable_msi(dev: dev->pdev); |
1013 | |
1014 | dev->aac_msix[0].vector_no = 0; |
1015 | dev->aac_msix[0].dev = dev; |
1016 | |
1017 | if (request_irq(irq: dev->pdev->irq, handler: dev->a_ops.adapter_intr, |
1018 | IRQF_SHARED, name: "aacraid" , dev: &(dev->aac_msix[0])) < 0) { |
1019 | |
1020 | if (dev->msi) |
1021 | pci_disable_msi(dev: dev->pdev); |
1022 | |
1023 | printk(KERN_ERR "%s%d: Interrupt unavailable.\n" , |
1024 | name, instance); |
1025 | goto error_iounmap; |
1026 | } |
1027 | dev->dbg_base = pci_resource_start(dev->pdev, 2); |
1028 | dev->dbg_base_mapped = dev->regs.src.bar1; |
1029 | dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE; |
1030 | dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message; |
1031 | |
1032 | aac_adapter_enable_int(dev); |
1033 | |
1034 | if (!dev->sync_mode) { |
1035 | /* |
1036 | * Tell the adapter that all is configured, and it can |
1037 | * start accepting requests |
1038 | */ |
1039 | aac_src_start_adapter(dev); |
1040 | } |
1041 | return 0; |
1042 | |
1043 | error_iounmap: |
1044 | |
1045 | return -1; |
1046 | } |
1047 | |
1048 | static int aac_src_wait_sync(struct aac_dev *dev, int *status) |
1049 | { |
1050 | unsigned long start = jiffies; |
1051 | unsigned long usecs = 0; |
1052 | int delay = 5 * HZ; |
1053 | int rc = 1; |
1054 | |
1055 | while (time_before(jiffies, start+delay)) { |
1056 | /* |
1057 | * Delay 5 microseconds to let Mon960 get info. |
1058 | */ |
1059 | udelay(5); |
1060 | |
1061 | /* |
1062 | * Mon960 will set doorbell0 bit when it has completed the |
1063 | * command. |
1064 | */ |
1065 | if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) { |
1066 | /* |
1067 | * Clear: the doorbell. |
1068 | */ |
1069 | if (dev->msi_enabled) |
1070 | aac_src_access_devreg(dev, mode: AAC_CLEAR_SYNC_BIT); |
1071 | else |
1072 | src_writel(dev, MUnit.ODR_C, |
1073 | OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); |
1074 | rc = 0; |
1075 | |
1076 | break; |
1077 | } |
1078 | |
1079 | /* |
1080 | * Yield the processor in case we are slow |
1081 | */ |
1082 | usecs = 1 * USEC_PER_MSEC; |
1083 | usleep_range(min: usecs, max: usecs + 50); |
1084 | } |
1085 | /* |
1086 | * Pull the synch status from Mailbox 0. |
1087 | */ |
1088 | if (status && !rc) { |
1089 | status[0] = readl(addr: &dev->IndexRegs->Mailbox[0]); |
1090 | status[1] = readl(addr: &dev->IndexRegs->Mailbox[1]); |
1091 | status[2] = readl(addr: &dev->IndexRegs->Mailbox[2]); |
1092 | status[3] = readl(addr: &dev->IndexRegs->Mailbox[3]); |
1093 | status[4] = readl(addr: &dev->IndexRegs->Mailbox[4]); |
1094 | } |
1095 | |
1096 | return rc; |
1097 | } |
1098 | |
1099 | /** |
1100 | * aac_src_soft_reset - perform soft reset to speed up |
1101 | * access |
1102 | * |
1103 | * Assumptions: That the controller is in a state where we can |
1104 | * bring it back to life with an init struct. We can only use |
1105 | * fast sync commands, as the timeout is 5 seconds. |
1106 | * |
1107 | * @dev: device to configure |
1108 | * |
1109 | */ |
1110 | |
1111 | static int aac_src_soft_reset(struct aac_dev *dev) |
1112 | { |
1113 | u32 status_omr = src_readl(dev, MUnit.OMR); |
1114 | u32 status[5]; |
1115 | int rc = 1; |
1116 | int state = 0; |
1117 | char *state_str[7] = { |
1118 | "GET_ADAPTER_PROPERTIES Failed" , |
1119 | "GET_ADAPTER_PROPERTIES timeout" , |
1120 | "SOFT_RESET not supported" , |
1121 | "DROP_IO Failed" , |
1122 | "DROP_IO timeout" , |
1123 | "Check Health failed" |
1124 | }; |
1125 | |
1126 | if (status_omr == INVALID_OMR) |
1127 | return 1; // pcie hosed |
1128 | |
1129 | if (!(status_omr & KERNEL_UP_AND_RUNNING)) |
1130 | return 1; // not up and running |
1131 | |
1132 | /* |
1133 | * We go into soft reset mode to allow us to handle response |
1134 | */ |
1135 | dev->in_soft_reset = 1; |
1136 | dev->msi_enabled = status_omr & AAC_INT_MODE_MSIX; |
1137 | |
1138 | /* Get adapter properties */ |
1139 | rc = aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, 0, 0, 0, |
1140 | 0, 0, 0, status+0, status+1, status+2, status+3, status+4); |
1141 | if (rc) |
1142 | goto out; |
1143 | |
1144 | state++; |
1145 | if (aac_src_wait_sync(dev, status)) { |
1146 | rc = 1; |
1147 | goto out; |
1148 | } |
1149 | |
1150 | state++; |
1151 | if (!(status[1] & le32_to_cpu(AAC_OPT_EXTENDED) && |
1152 | (status[4] & le32_to_cpu(AAC_EXTOPT_SOFT_RESET)))) { |
1153 | rc = 2; |
1154 | goto out; |
1155 | } |
1156 | |
1157 | if ((status[1] & le32_to_cpu(AAC_OPT_EXTENDED)) && |
1158 | (status[4] & le32_to_cpu(AAC_EXTOPT_SA_FIRMWARE))) |
1159 | dev->sa_firmware = 1; |
1160 | |
1161 | state++; |
1162 | rc = aac_adapter_sync_cmd(dev, DROP_IO, 0, 0, 0, 0, 0, 0, |
1163 | status+0, status+1, status+2, status+3, status+4); |
1164 | |
1165 | if (rc) |
1166 | goto out; |
1167 | |
1168 | state++; |
1169 | if (aac_src_wait_sync(dev, status)) { |
1170 | rc = 3; |
1171 | goto out; |
1172 | } |
1173 | |
1174 | if (status[1]) |
1175 | dev_err(&dev->pdev->dev, "%s: %d outstanding I/O pending\n" , |
1176 | __func__, status[1]); |
1177 | |
1178 | state++; |
1179 | rc = aac_src_check_health(dev); |
1180 | |
1181 | out: |
1182 | dev->in_soft_reset = 0; |
1183 | dev->msi_enabled = 0; |
1184 | if (rc) |
1185 | dev_err(&dev->pdev->dev, "%s: %s status = %d" , __func__, |
1186 | state_str[state], rc); |
1187 | |
1188 | return rc; |
1189 | } |
1190 | /** |
1191 | * aac_srcv_init - initialize an SRCv card |
1192 | * @dev: device to configure |
1193 | * |
1194 | */ |
1195 | |
1196 | int aac_srcv_init(struct aac_dev *dev) |
1197 | { |
1198 | unsigned long start; |
1199 | unsigned long status; |
1200 | int restart = 0; |
1201 | int instance = dev->id; |
1202 | const char *name = dev->name; |
1203 | |
1204 | dev->a_ops.adapter_ioremap = aac_srcv_ioremap; |
1205 | dev->a_ops.adapter_comm = aac_src_select_comm; |
1206 | |
1207 | dev->base_size = AAC_MIN_SRCV_BAR0_SIZE; |
1208 | if (aac_adapter_ioremap(dev, dev->base_size)) { |
1209 | printk(KERN_WARNING "%s: unable to map adapter.\n" , name); |
1210 | goto error_iounmap; |
1211 | } |
1212 | |
1213 | /* Failure to reset here is an option ... */ |
1214 | dev->a_ops.adapter_sync_cmd = src_sync_cmd; |
1215 | dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; |
1216 | |
1217 | if (dev->init_reset) { |
1218 | dev->init_reset = false; |
1219 | if (aac_src_soft_reset(dev)) { |
1220 | aac_src_restart_adapter(dev, bled: 0, IOP_HWSOFT_RESET); |
1221 | ++restart; |
1222 | } |
1223 | } |
1224 | |
1225 | /* |
1226 | * Check to see if flash update is running. |
1227 | * Wait for the adapter to be up and running. Wait up to 5 minutes |
1228 | */ |
1229 | status = src_readl(dev, MUnit.OMR); |
1230 | if (status & FLASH_UPD_PENDING) { |
1231 | start = jiffies; |
1232 | do { |
1233 | status = src_readl(dev, MUnit.OMR); |
1234 | if (time_after(jiffies, start+HZ*FWUPD_TIMEOUT)) { |
1235 | printk(KERN_ERR "%s%d: adapter flash update failed.\n" , |
1236 | dev->name, instance); |
1237 | goto error_iounmap; |
1238 | } |
1239 | } while (!(status & FLASH_UPD_SUCCESS) && |
1240 | !(status & FLASH_UPD_FAILED)); |
1241 | /* Delay 10 seconds. |
1242 | * Because right now FW is doing a soft reset, |
1243 | * do not read scratch pad register at this time |
1244 | */ |
1245 | ssleep(seconds: 10); |
1246 | } |
1247 | /* |
1248 | * Check to see if the board panic'd while booting. |
1249 | */ |
1250 | status = src_readl(dev, MUnit.OMR); |
1251 | if (status & KERNEL_PANIC) { |
1252 | if (aac_src_restart_adapter(dev, |
1253 | bled: aac_src_check_health(dev), IOP_HWSOFT_RESET)) |
1254 | goto error_iounmap; |
1255 | ++restart; |
1256 | } |
1257 | /* |
1258 | * Check to see if the board failed any self tests. |
1259 | */ |
1260 | status = src_readl(dev, MUnit.OMR); |
1261 | if (status & SELF_TEST_FAILED) { |
1262 | printk(KERN_ERR "%s%d: adapter self-test failed.\n" , dev->name, instance); |
1263 | goto error_iounmap; |
1264 | } |
1265 | /* |
1266 | * Check to see if the monitor panic'd while booting. |
1267 | */ |
1268 | if (status & MONITOR_PANIC) { |
1269 | printk(KERN_ERR "%s%d: adapter monitor panic.\n" , dev->name, instance); |
1270 | goto error_iounmap; |
1271 | } |
1272 | |
1273 | start = jiffies; |
1274 | /* |
1275 | * Wait for the adapter to be up and running. Wait up to 3 minutes |
1276 | */ |
1277 | do { |
1278 | status = src_readl(dev, MUnit.OMR); |
1279 | if (status == INVALID_OMR) |
1280 | status = 0; |
1281 | |
1282 | if ((restart && |
1283 | (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) || |
1284 | time_after(jiffies, start+HZ*startup_timeout)) { |
1285 | printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n" , |
1286 | dev->name, instance, status); |
1287 | goto error_iounmap; |
1288 | } |
1289 | if (!restart && |
1290 | ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) || |
1291 | time_after(jiffies, start + HZ * |
1292 | ((startup_timeout > 60) |
1293 | ? (startup_timeout - 60) |
1294 | : (startup_timeout / 2))))) { |
1295 | if (likely(!aac_src_restart_adapter(dev, |
1296 | aac_src_check_health(dev), IOP_HWSOFT_RESET))) |
1297 | start = jiffies; |
1298 | ++restart; |
1299 | } |
1300 | msleep(msecs: 1); |
1301 | } while (!(status & KERNEL_UP_AND_RUNNING)); |
1302 | |
1303 | if (restart && aac_commit) |
1304 | aac_commit = 1; |
1305 | /* |
1306 | * Fill in the common function dispatch table. |
1307 | */ |
1308 | dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter; |
1309 | dev->a_ops.adapter_disable_int = aac_src_disable_interrupt; |
1310 | dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; |
1311 | dev->a_ops.adapter_notify = aac_src_notify_adapter; |
1312 | dev->a_ops.adapter_sync_cmd = src_sync_cmd; |
1313 | dev->a_ops.adapter_check_health = aac_src_check_health; |
1314 | dev->a_ops.adapter_restart = aac_src_restart_adapter; |
1315 | dev->a_ops.adapter_start = aac_src_start_adapter; |
1316 | |
1317 | /* |
1318 | * First clear out all interrupts. Then enable the one's that we |
1319 | * can handle. |
1320 | */ |
1321 | aac_adapter_comm(dev, AAC_COMM_MESSAGE); |
1322 | aac_adapter_disable_int(dev); |
1323 | src_writel(dev, MUnit.ODR_C, 0xffffffff); |
1324 | aac_adapter_enable_int(dev); |
1325 | |
1326 | if (aac_init_adapter(dev) == NULL) |
1327 | goto error_iounmap; |
1328 | if ((dev->comm_interface != AAC_COMM_MESSAGE_TYPE2) && |
1329 | (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3)) |
1330 | goto error_iounmap; |
1331 | if (dev->msi_enabled) |
1332 | aac_src_access_devreg(dev, mode: AAC_ENABLE_MSIX); |
1333 | |
1334 | if (aac_acquire_irq(dev)) |
1335 | goto error_iounmap; |
1336 | |
1337 | dev->dbg_base = pci_resource_start(dev->pdev, 2); |
1338 | dev->dbg_base_mapped = dev->regs.src.bar1; |
1339 | dev->dbg_size = AAC_MIN_SRCV_BAR1_SIZE; |
1340 | dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message; |
1341 | |
1342 | aac_adapter_enable_int(dev); |
1343 | |
1344 | if (!dev->sync_mode) { |
1345 | /* |
1346 | * Tell the adapter that all is configured, and it can |
1347 | * start accepting requests |
1348 | */ |
1349 | aac_src_start_adapter(dev); |
1350 | } |
1351 | return 0; |
1352 | |
1353 | error_iounmap: |
1354 | |
1355 | return -1; |
1356 | } |
1357 | |
1358 | void aac_src_access_devreg(struct aac_dev *dev, int mode) |
1359 | { |
1360 | u_int32_t val; |
1361 | |
1362 | switch (mode) { |
1363 | case AAC_ENABLE_INTERRUPT: |
1364 | src_writel(dev, |
1365 | MUnit.OIMR, |
1366 | dev->OIMR = (dev->msi_enabled ? |
1367 | AAC_INT_ENABLE_TYPE1_MSIX : |
1368 | AAC_INT_ENABLE_TYPE1_INTX)); |
1369 | break; |
1370 | |
1371 | case AAC_DISABLE_INTERRUPT: |
1372 | src_writel(dev, |
1373 | MUnit.OIMR, |
1374 | dev->OIMR = AAC_INT_DISABLE_ALL); |
1375 | break; |
1376 | |
1377 | case AAC_ENABLE_MSIX: |
1378 | /* set bit 6 */ |
1379 | val = src_readl(dev, MUnit.IDR); |
1380 | val |= 0x40; |
1381 | src_writel(dev, MUnit.IDR, val); |
1382 | src_readl(dev, MUnit.IDR); |
1383 | /* unmask int. */ |
1384 | val = PMC_ALL_INTERRUPT_BITS; |
1385 | src_writel(dev, MUnit.IOAR, val); |
1386 | val = src_readl(dev, MUnit.OIMR); |
1387 | src_writel(dev, |
1388 | MUnit.OIMR, |
1389 | val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0))); |
1390 | break; |
1391 | |
1392 | case AAC_DISABLE_MSIX: |
1393 | /* reset bit 6 */ |
1394 | val = src_readl(dev, MUnit.IDR); |
1395 | val &= ~0x40; |
1396 | src_writel(dev, MUnit.IDR, val); |
1397 | src_readl(dev, MUnit.IDR); |
1398 | break; |
1399 | |
1400 | case AAC_CLEAR_AIF_BIT: |
1401 | /* set bit 5 */ |
1402 | val = src_readl(dev, MUnit.IDR); |
1403 | val |= 0x20; |
1404 | src_writel(dev, MUnit.IDR, val); |
1405 | src_readl(dev, MUnit.IDR); |
1406 | break; |
1407 | |
1408 | case AAC_CLEAR_SYNC_BIT: |
1409 | /* set bit 4 */ |
1410 | val = src_readl(dev, MUnit.IDR); |
1411 | val |= 0x10; |
1412 | src_writel(dev, MUnit.IDR, val); |
1413 | src_readl(dev, MUnit.IDR); |
1414 | break; |
1415 | |
1416 | case AAC_ENABLE_INTX: |
1417 | /* set bit 7 */ |
1418 | val = src_readl(dev, MUnit.IDR); |
1419 | val |= 0x80; |
1420 | src_writel(dev, MUnit.IDR, val); |
1421 | src_readl(dev, MUnit.IDR); |
1422 | /* unmask int. */ |
1423 | val = PMC_ALL_INTERRUPT_BITS; |
1424 | src_writel(dev, MUnit.IOAR, val); |
1425 | src_readl(dev, MUnit.IOAR); |
1426 | val = src_readl(dev, MUnit.OIMR); |
1427 | src_writel(dev, MUnit.OIMR, |
1428 | val & (~(PMC_GLOBAL_INT_BIT2))); |
1429 | break; |
1430 | |
1431 | default: |
1432 | break; |
1433 | } |
1434 | } |
1435 | |
1436 | static int aac_src_get_sync_status(struct aac_dev *dev) |
1437 | { |
1438 | int msix_val = 0; |
1439 | int legacy_val = 0; |
1440 | |
1441 | msix_val = src_readl(dev, MUnit.ODR_MSI) & SRC_MSI_READ_MASK ? 1 : 0; |
1442 | |
1443 | if (!dev->msi_enabled) { |
1444 | /* |
1445 | * if Legacy int status indicates cmd is not complete |
1446 | * sample MSIx register to see if it indiactes cmd complete, |
1447 | * if yes set the controller in MSIx mode and consider cmd |
1448 | * completed |
1449 | */ |
1450 | legacy_val = src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT; |
1451 | if (!(legacy_val & 1) && msix_val) |
1452 | dev->msi_enabled = 1; |
1453 | return legacy_val; |
1454 | } |
1455 | |
1456 | return msix_val; |
1457 | } |
1458 | |