1/************************************************************************
2 * Linux driver for *
3 * ICP vortex GmbH: GDT PCI Disk Array Controllers *
4 * Intel Corporation: Storage RAID Controllers *
5 * *
6 * gdth.c *
7 * Copyright (C) 1995-06 ICP vortex GmbH, Achim Leubner *
8 * Copyright (C) 2002-04 Intel Corporation *
9 * Copyright (C) 2003-06 Adaptec Inc. *
10 * <achim_leubner@adaptec.com> *
11 * *
12 * Additions/Fixes: *
13 * Boji Tony Kannanthanam <boji.t.kannanthanam@intel.com> *
14 * Johannes Dinner <johannes_dinner@adaptec.com> *
15 * *
16 * This program is free software; you can redistribute it and/or modify *
17 * it under the terms of the GNU General Public License as published *
18 * by the Free Software Foundation; either version 2 of the License, *
19 * or (at your option) any later version. *
20 * *
21 * This program is distributed in the hope that it will be useful, *
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
24 * GNU General Public License for more details. *
25 * *
26 * You should have received a copy of the GNU General Public License *
27 * along with this kernel; if not, write to the Free Software *
28 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *
29 * *
30 * Linux kernel 2.6.x supported *
31 * *
32 ************************************************************************/
33
34/* All GDT Disk Array Controllers are fully supported by this driver.
35 * This includes the PCI SCSI Disk Array Controllers and the
36 * PCI Fibre Channel Disk Array Controllers. See gdth.h for a complete
37 * list of all controller types.
38 *
39 * After the optional list of IRQ values, other possible
40 * command line options are:
41 * disable:Y disable driver
42 * disable:N enable driver
43 * reserve_mode:0 reserve no drives for the raw service
44 * reserve_mode:1 reserve all not init., removable drives
45 * reserve_mode:2 reserve all not init. drives
46 * reserve_list:h,b,t,l,h,b,t,l,... reserve particular drive(s) with
47 * h- controller no., b- channel no.,
48 * t- target ID, l- LUN
49 * reverse_scan:Y reverse scan order for PCI controllers
50 * reverse_scan:N scan PCI controllers like BIOS
51 * max_ids:x x - target ID count per channel (1..MAXID)
52 * rescan:Y rescan all channels/IDs
53 * rescan:N use all devices found until now
54 * hdr_channel:x x - number of virtual bus for host drives
55 * shared_access:Y disable driver reserve/release protocol to
56 * access a shared resource from several nodes,
57 * appropriate controller firmware required
58 * shared_access:N enable driver reserve/release protocol
59 * force_dma32:Y use only 32 bit DMA mode
60 * force_dma32:N use 64 bit DMA mode, if supported
61 *
62 * The default values are: "gdth=disable:N,reserve_mode:1,reverse_scan:N,
63 * max_ids:127,rescan:N,hdr_channel:0,
64 * shared_access:Y,force_dma32:N".
65 * Here is another example: "gdth=reserve_list:0,1,2,0,0,1,3,0,rescan:Y".
66 *
67 * When loading the gdth driver as a module, the same options are available.
68 * You can set the IRQs with "IRQ=...". However, the syntax to specify the
69 * options changes slightly. You must replace all ',' between options
70 * with ' ' and all ':' with '=' and you must use
71 * '1' in place of 'Y' and '0' in place of 'N'.
72 *
73 * Default: "modprobe gdth disable=0 reserve_mode=1 reverse_scan=0
74 * max_ids=127 rescan=0 hdr_channel=0 shared_access=0
75 * force_dma32=0"
76 * The other example: "modprobe gdth reserve_list=0,1,2,0,0,1,3,0 rescan=1".
77 */
78
79/* The meaning of the Scsi_Pointer members in this driver is as follows:
80 * ptr: Chaining
81 * this_residual: unused
82 * buffer: unused
83 * dma_handle: unused
84 * buffers_residual: unused
85 * Status: unused
86 * Message: unused
87 * have_data_in: unused
88 * sent_command: unused
89 * phase: unused
90 */
91
92/* statistics */
93#define GDTH_STATISTICS
94
95#include <linux/module.h>
96
97#include <linux/version.h>
98#include <linux/kernel.h>
99#include <linux/types.h>
100#include <linux/pci.h>
101#include <linux/string.h>
102#include <linux/ctype.h>
103#include <linux/ioport.h>
104#include <linux/delay.h>
105#include <linux/interrupt.h>
106#include <linux/in.h>
107#include <linux/proc_fs.h>
108#include <linux/time.h>
109#include <linux/timer.h>
110#include <linux/dma-mapping.h>
111#include <linux/list.h>
112#include <linux/mutex.h>
113#include <linux/slab.h>
114#include <linux/reboot.h>
115
116#include <asm/dma.h>
117#include <asm/io.h>
118#include <linux/uaccess.h>
119#include <linux/spinlock.h>
120#include <linux/blkdev.h>
121#include <linux/scatterlist.h>
122
123#include "scsi.h"
124#include <scsi/scsi_host.h>
125#include "gdth.h"
126
127static DEFINE_MUTEX(gdth_mutex);
128static void gdth_delay(int milliseconds);
129static void gdth_eval_mapping(u32 size, u32 *cyls, int *heads, int *secs);
130static irqreturn_t gdth_interrupt(int irq, void *dev_id);
131static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
132 int gdth_from_wait, int* pIndex);
133static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index,
134 struct scsi_cmnd *scp);
135static int gdth_async_event(gdth_ha_str *ha);
136static void gdth_log_event(gdth_evt_data *dvr, char *buffer);
137
138static void gdth_putq(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 priority);
139static void gdth_next(gdth_ha_str *ha);
140static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b);
141static int gdth_special_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp);
142static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source,
143 u16 idx, gdth_evt_data *evt);
144static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr);
145static void gdth_readapp_event(gdth_ha_str *ha, u8 application,
146 gdth_evt_str *estr);
147static void gdth_clear_events(void);
148
149static void gdth_copy_internal_data(gdth_ha_str *ha, struct scsi_cmnd *scp,
150 char *buffer, u16 count);
151static int gdth_internal_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp);
152static int gdth_fill_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp,
153 u16 hdrive);
154
155static void gdth_enable_int(gdth_ha_str *ha);
156static int gdth_test_busy(gdth_ha_str *ha);
157static int gdth_get_cmd_index(gdth_ha_str *ha);
158static void gdth_release_event(gdth_ha_str *ha);
159static int gdth_wait(gdth_ha_str *ha, int index,u32 time);
160static int gdth_internal_cmd(gdth_ha_str *ha, u8 service, u16 opcode,
161 u32 p1, u64 p2,u64 p3);
162static int gdth_search_drives(gdth_ha_str *ha);
163static int gdth_analyse_hdrive(gdth_ha_str *ha, u16 hdrive);
164
165static const char *gdth_ctr_name(gdth_ha_str *ha);
166
167static int gdth_open(struct inode *inode, struct file *filep);
168static int gdth_close(struct inode *inode, struct file *filep);
169static long gdth_unlocked_ioctl(struct file *filep, unsigned int cmd,
170 unsigned long arg);
171
172static void gdth_flush(gdth_ha_str *ha);
173static int gdth_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
174static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp,
175 struct gdth_cmndinfo *cmndinfo);
176static void gdth_scsi_done(struct scsi_cmnd *scp);
177
178#ifdef DEBUG_GDTH
179static u8 DebugState = DEBUG_GDTH;
180#define TRACE(a) {if (DebugState==1) {printk a;}}
181#define TRACE2(a) {if (DebugState==1 || DebugState==2) {printk a;}}
182#define TRACE3(a) {if (DebugState!=0) {printk a;}}
183#else /* !DEBUG */
184#define TRACE(a)
185#define TRACE2(a)
186#define TRACE3(a)
187#endif
188
189#ifdef GDTH_STATISTICS
190static u32 max_rq=0, max_index=0, max_sg=0;
191static u32 act_ints=0, act_ios=0, act_stats=0, act_rq=0;
192static struct timer_list gdth_timer;
193#endif
194
195#define PTR2USHORT(a) (u16)(unsigned long)(a)
196#define GDTOFFSOF(a,b) (size_t)&(((a*)0)->b)
197#define INDEX_OK(i,t) ((i)<ARRAY_SIZE(t))
198
199#define BUS_L2P(a,b) ((b)>(a)->virt_bus ? (b-1):(b))
200
201static u8 gdth_polling; /* polling if TRUE */
202static int gdth_ctr_count = 0; /* controller count */
203static LIST_HEAD(gdth_instances); /* controller list */
204static u8 gdth_write_through = FALSE; /* write through */
205static gdth_evt_str ebuffer[MAX_EVENTS]; /* event buffer */
206static int elastidx;
207static int eoldidx;
208static int major;
209
210#define DIN 1 /* IN data direction */
211#define DOU 2 /* OUT data direction */
212#define DNO DIN /* no data transfer */
213#define DUN DIN /* unknown data direction */
214static u8 gdth_direction_tab[0x100] = {
215 DNO,DNO,DIN,DIN,DOU,DIN,DIN,DOU,DIN,DUN,DOU,DOU,DUN,DUN,DUN,DIN,
216 DNO,DIN,DIN,DOU,DIN,DOU,DNO,DNO,DOU,DNO,DIN,DNO,DIN,DOU,DNO,DUN,
217 DIN,DUN,DIN,DUN,DOU,DIN,DUN,DUN,DIN,DIN,DOU,DNO,DUN,DIN,DOU,DOU,
218 DOU,DOU,DOU,DNO,DIN,DNO,DNO,DIN,DOU,DOU,DOU,DOU,DIN,DOU,DIN,DOU,
219 DOU,DOU,DIN,DIN,DIN,DNO,DUN,DNO,DNO,DNO,DUN,DNO,DOU,DIN,DUN,DUN,
220 DUN,DUN,DUN,DUN,DUN,DOU,DUN,DUN,DUN,DUN,DIN,DUN,DUN,DUN,DUN,DUN,
221 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
222 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
223 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DIN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,
224 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DIN,DUN,
225 DUN,DUN,DUN,DUN,DUN,DNO,DNO,DUN,DIN,DNO,DOU,DUN,DNO,DUN,DOU,DOU,
226 DOU,DOU,DOU,DNO,DUN,DIN,DOU,DIN,DIN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
227 DUN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
228 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
229 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,
230 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN
231};
232
233/* LILO and modprobe/insmod parameters */
234/* disable driver flag */
235static int disable __initdata = 0;
236/* reserve flag */
237static int reserve_mode = 1;
238/* reserve list */
239static int reserve_list[MAX_RES_ARGS] =
240{0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
241 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
242 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff};
243/* scan order for PCI controllers */
244static int reverse_scan = 0;
245/* virtual channel for the host drives */
246static int hdr_channel = 0;
247/* max. IDs per channel */
248static int max_ids = MAXID;
249/* rescan all IDs */
250static int rescan = 0;
251/* shared access */
252static int shared_access = 1;
253/* 64 bit DMA mode, support for drives > 2 TB, if force_dma32 = 0 */
254static int force_dma32 = 0;
255
256/* parameters for modprobe/insmod */
257module_param(disable, int, 0);
258module_param(reserve_mode, int, 0);
259module_param_array(reserve_list, int, NULL, 0);
260module_param(reverse_scan, int, 0);
261module_param(hdr_channel, int, 0);
262module_param(max_ids, int, 0);
263module_param(rescan, int, 0);
264module_param(shared_access, int, 0);
265module_param(force_dma32, int, 0);
266MODULE_AUTHOR("Achim Leubner");
267MODULE_LICENSE("GPL");
268
269/* ioctl interface */
270static const struct file_operations gdth_fops = {
271 .unlocked_ioctl = gdth_unlocked_ioctl,
272 .open = gdth_open,
273 .release = gdth_close,
274 .llseek = noop_llseek,
275};
276
277#include "gdth_proc.h"
278#include "gdth_proc.c"
279
280static gdth_ha_str *gdth_find_ha(int hanum)
281{
282 gdth_ha_str *ha;
283
284 list_for_each_entry(ha, &gdth_instances, list)
285 if (hanum == ha->hanum)
286 return ha;
287
288 return NULL;
289}
290
291static struct gdth_cmndinfo *gdth_get_cmndinfo(gdth_ha_str *ha)
292{
293 struct gdth_cmndinfo *priv = NULL;
294 unsigned long flags;
295 int i;
296
297 spin_lock_irqsave(&ha->smp_lock, flags);
298
299 for (i=0; i<GDTH_MAXCMDS; ++i) {
300 if (ha->cmndinfo[i].index == 0) {
301 priv = &ha->cmndinfo[i];
302 memset(priv, 0, sizeof(*priv));
303 priv->index = i+1;
304 break;
305 }
306 }
307
308 spin_unlock_irqrestore(&ha->smp_lock, flags);
309
310 return priv;
311}
312
313static void gdth_put_cmndinfo(struct gdth_cmndinfo *priv)
314{
315 BUG_ON(!priv);
316 priv->index = 0;
317}
318
319static void gdth_delay(int milliseconds)
320{
321 if (milliseconds == 0) {
322 udelay(1);
323 } else {
324 mdelay(milliseconds);
325 }
326}
327
328static void gdth_scsi_done(struct scsi_cmnd *scp)
329{
330 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
331 int internal_command = cmndinfo->internal_command;
332
333 TRACE2(("gdth_scsi_done()\n"));
334
335 gdth_put_cmndinfo(cmndinfo);
336 scp->host_scribble = NULL;
337
338 if (internal_command)
339 complete((struct completion *)scp->request);
340 else
341 scp->scsi_done(scp);
342}
343
344int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
345 int timeout, u32 *info)
346{
347 gdth_ha_str *ha = shost_priv(sdev->host);
348 struct scsi_cmnd *scp;
349 struct gdth_cmndinfo cmndinfo;
350 DECLARE_COMPLETION_ONSTACK(wait);
351 int rval;
352
353 scp = kzalloc(sizeof(*scp), GFP_KERNEL);
354 if (!scp)
355 return -ENOMEM;
356
357 scp->sense_buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
358 if (!scp->sense_buffer) {
359 kfree(scp);
360 return -ENOMEM;
361 }
362
363 scp->device = sdev;
364 memset(&cmndinfo, 0, sizeof(cmndinfo));
365
366 /* use request field to save the ptr. to completion struct. */
367 scp->request = (struct request *)&wait;
368 scp->cmd_len = 12;
369 scp->cmnd = cmnd;
370 cmndinfo.priority = IOCTL_PRI;
371 cmndinfo.internal_cmd_str = gdtcmd;
372 cmndinfo.internal_command = 1;
373
374 TRACE(("__gdth_execute() cmd 0x%x\n", scp->cmnd[0]));
375 __gdth_queuecommand(ha, scp, &cmndinfo);
376
377 wait_for_completion(&wait);
378
379 rval = cmndinfo.status;
380 if (info)
381 *info = cmndinfo.info;
382 kfree(scp->sense_buffer);
383 kfree(scp);
384 return rval;
385}
386
387int gdth_execute(struct Scsi_Host *shost, gdth_cmd_str *gdtcmd, char *cmnd,
388 int timeout, u32 *info)
389{
390 struct scsi_device *sdev = scsi_get_host_dev(shost);
391 int rval = __gdth_execute(sdev, gdtcmd, cmnd, timeout, info);
392
393 scsi_free_host_dev(sdev);
394 return rval;
395}
396
397static void gdth_eval_mapping(u32 size, u32 *cyls, int *heads, int *secs)
398{
399 *cyls = size /HEADS/SECS;
400 if (*cyls <= MAXCYLS) {
401 *heads = HEADS;
402 *secs = SECS;
403 } else { /* too high for 64*32 */
404 *cyls = size /MEDHEADS/MEDSECS;
405 if (*cyls <= MAXCYLS) {
406 *heads = MEDHEADS;
407 *secs = MEDSECS;
408 } else { /* too high for 127*63 */
409 *cyls = size /BIGHEADS/BIGSECS;
410 *heads = BIGHEADS;
411 *secs = BIGSECS;
412 }
413 }
414}
415
416static bool gdth_search_vortex(u16 device)
417{
418 if (device <= PCI_DEVICE_ID_VORTEX_GDT6555)
419 return true;
420 if (device >= PCI_DEVICE_ID_VORTEX_GDT6x17RP &&
421 device <= PCI_DEVICE_ID_VORTEX_GDTMAXRP)
422 return true;
423 if (device == PCI_DEVICE_ID_VORTEX_GDTNEWRX ||
424 device == PCI_DEVICE_ID_VORTEX_GDTNEWRX2)
425 return true;
426 return false;
427}
428
429static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out);
430static int gdth_pci_init_one(struct pci_dev *pdev,
431 const struct pci_device_id *ent);
432static void gdth_pci_remove_one(struct pci_dev *pdev);
433static void gdth_remove_one(gdth_ha_str *ha);
434
435/* Vortex only makes RAID controllers.
436 * We do not really want to specify all 550 ids here, so wildcard match.
437 */
438static const struct pci_device_id gdthtable[] = {
439 { PCI_VDEVICE(VORTEX, PCI_ANY_ID) },
440 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SRC) },
441 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SRC_XSCALE) },
442 { } /* terminate list */
443};
444MODULE_DEVICE_TABLE(pci, gdthtable);
445
446static struct pci_driver gdth_pci_driver = {
447 .name = "gdth",
448 .id_table = gdthtable,
449 .probe = gdth_pci_init_one,
450 .remove = gdth_pci_remove_one,
451};
452
453static void gdth_pci_remove_one(struct pci_dev *pdev)
454{
455 gdth_ha_str *ha = pci_get_drvdata(pdev);
456
457 list_del(&ha->list);
458 gdth_remove_one(ha);
459
460 pci_disable_device(pdev);
461}
462
463static int gdth_pci_init_one(struct pci_dev *pdev,
464 const struct pci_device_id *ent)
465{
466 u16 vendor = pdev->vendor;
467 u16 device = pdev->device;
468 unsigned long base0, base1, base2;
469 int rc;
470 gdth_pci_str gdth_pcistr;
471 gdth_ha_str *ha = NULL;
472
473 TRACE(("gdth_search_dev() cnt %d vendor %x device %x\n",
474 gdth_ctr_count, vendor, device));
475
476 memset(&gdth_pcistr, 0, sizeof(gdth_pcistr));
477
478 if (vendor == PCI_VENDOR_ID_VORTEX && !gdth_search_vortex(device))
479 return -ENODEV;
480
481 rc = pci_enable_device(pdev);
482 if (rc)
483 return rc;
484
485 if (gdth_ctr_count >= MAXHA)
486 return -EBUSY;
487
488 /* GDT PCI controller found, resources are already in pdev */
489 gdth_pcistr.pdev = pdev;
490 base0 = pci_resource_flags(pdev, 0);
491 base1 = pci_resource_flags(pdev, 1);
492 base2 = pci_resource_flags(pdev, 2);
493 if (device <= PCI_DEVICE_ID_VORTEX_GDT6000B || /* GDT6000/B */
494 device >= PCI_DEVICE_ID_VORTEX_GDT6x17RP) { /* MPR */
495 if (!(base0 & IORESOURCE_MEM))
496 return -ENODEV;
497 gdth_pcistr.dpmem = pci_resource_start(pdev, 0);
498 } else { /* GDT6110, GDT6120, .. */
499 if (!(base0 & IORESOURCE_MEM) ||
500 !(base2 & IORESOURCE_MEM) ||
501 !(base1 & IORESOURCE_IO))
502 return -ENODEV;
503 gdth_pcistr.dpmem = pci_resource_start(pdev, 2);
504 gdth_pcistr.io = pci_resource_start(pdev, 1);
505 }
506 TRACE2(("Controller found at %d/%d, irq %d, dpmem 0x%lx\n",
507 gdth_pcistr.pdev->bus->number,
508 PCI_SLOT(gdth_pcistr.pdev->devfn),
509 gdth_pcistr.irq,
510 gdth_pcistr.dpmem));
511
512 rc = gdth_pci_probe_one(&gdth_pcistr, &ha);
513 if (rc)
514 return rc;
515
516 return 0;
517}
518
519static int gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
520 gdth_ha_str *ha)
521{
522 register gdt6_dpram_str __iomem *dp6_ptr;
523 register gdt6c_dpram_str __iomem *dp6c_ptr;
524 register gdt6m_dpram_str __iomem *dp6m_ptr;
525 u32 retries;
526 u8 prot_ver;
527 u16 command;
528 int i, found = FALSE;
529
530 TRACE(("gdth_init_pci()\n"));
531
532 if (pdev->vendor == PCI_VENDOR_ID_INTEL)
533 ha->oem_id = OEM_ID_INTEL;
534 else
535 ha->oem_id = OEM_ID_ICP;
536 ha->brd_phys = (pdev->bus->number << 8) | (pdev->devfn & 0xf8);
537 ha->stype = (u32)pdev->device;
538 ha->irq = pdev->irq;
539 ha->pdev = pdev;
540
541 if (ha->pdev->device <= PCI_DEVICE_ID_VORTEX_GDT6000B) { /* GDT6000/B */
542 TRACE2(("init_pci() dpmem %lx irq %d\n",pcistr->dpmem,ha->irq));
543 ha->brd = ioremap(pcistr->dpmem, sizeof(gdt6_dpram_str));
544 if (ha->brd == NULL) {
545 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
546 return 0;
547 }
548 /* check and reset interface area */
549 dp6_ptr = ha->brd;
550 writel(DPMEM_MAGIC, &dp6_ptr->u);
551 if (readl(&dp6_ptr->u) != DPMEM_MAGIC) {
552 printk("GDT-PCI: Cannot access DPMEM at 0x%lx (shadowed?)\n",
553 pcistr->dpmem);
554 found = FALSE;
555 for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
556 iounmap(ha->brd);
557 ha->brd = ioremap(i, sizeof(u16));
558 if (ha->brd == NULL) {
559 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
560 return 0;
561 }
562 if (readw(ha->brd) != 0xffff) {
563 TRACE2(("init_pci_old() address 0x%x busy\n", i));
564 continue;
565 }
566 iounmap(ha->brd);
567 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, i);
568 ha->brd = ioremap(i, sizeof(gdt6_dpram_str));
569 if (ha->brd == NULL) {
570 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
571 return 0;
572 }
573 dp6_ptr = ha->brd;
574 writel(DPMEM_MAGIC, &dp6_ptr->u);
575 if (readl(&dp6_ptr->u) == DPMEM_MAGIC) {
576 printk("GDT-PCI: Use free address at 0x%x\n", i);
577 found = TRUE;
578 break;
579 }
580 }
581 if (!found) {
582 printk("GDT-PCI: No free address found!\n");
583 iounmap(ha->brd);
584 return 0;
585 }
586 }
587 memset_io(&dp6_ptr->u, 0, sizeof(dp6_ptr->u));
588 if (readl(&dp6_ptr->u) != 0) {
589 printk("GDT-PCI: Initialization error (DPMEM write error)\n");
590 iounmap(ha->brd);
591 return 0;
592 }
593
594 /* disable board interrupts, deinit services */
595 writeb(0xff, &dp6_ptr->io.irqdel);
596 writeb(0x00, &dp6_ptr->io.irqen);
597 writeb(0x00, &dp6_ptr->u.ic.S_Status);
598 writeb(0x00, &dp6_ptr->u.ic.Cmd_Index);
599
600 writel(pcistr->dpmem, &dp6_ptr->u.ic.S_Info[0]);
601 writeb(0xff, &dp6_ptr->u.ic.S_Cmd_Indx);
602 writeb(0, &dp6_ptr->io.event);
603 retries = INIT_RETRIES;
604 gdth_delay(20);
605 while (readb(&dp6_ptr->u.ic.S_Status) != 0xff) {
606 if (--retries == 0) {
607 printk("GDT-PCI: Initialization error (DEINIT failed)\n");
608 iounmap(ha->brd);
609 return 0;
610 }
611 gdth_delay(1);
612 }
613 prot_ver = (u8)readl(&dp6_ptr->u.ic.S_Info[0]);
614 writeb(0, &dp6_ptr->u.ic.S_Status);
615 writeb(0xff, &dp6_ptr->io.irqdel);
616 if (prot_ver != PROTOCOL_VERSION) {
617 printk("GDT-PCI: Illegal protocol version\n");
618 iounmap(ha->brd);
619 return 0;
620 }
621
622 ha->type = GDT_PCI;
623 ha->ic_all_size = sizeof(dp6_ptr->u);
624
625 /* special command to controller BIOS */
626 writel(0x00, &dp6_ptr->u.ic.S_Info[0]);
627 writel(0x00, &dp6_ptr->u.ic.S_Info[1]);
628 writel(0x00, &dp6_ptr->u.ic.S_Info[2]);
629 writel(0x00, &dp6_ptr->u.ic.S_Info[3]);
630 writeb(0xfe, &dp6_ptr->u.ic.S_Cmd_Indx);
631 writeb(0, &dp6_ptr->io.event);
632 retries = INIT_RETRIES;
633 gdth_delay(20);
634 while (readb(&dp6_ptr->u.ic.S_Status) != 0xfe) {
635 if (--retries == 0) {
636 printk("GDT-PCI: Initialization error\n");
637 iounmap(ha->brd);
638 return 0;
639 }
640 gdth_delay(1);
641 }
642 writeb(0, &dp6_ptr->u.ic.S_Status);
643 writeb(0xff, &dp6_ptr->io.irqdel);
644
645 ha->dma64_support = 0;
646
647 } else if (ha->pdev->device <= PCI_DEVICE_ID_VORTEX_GDT6555) { /* GDT6110, ... */
648 ha->plx = (gdt6c_plx_regs *)pcistr->io;
649 TRACE2(("init_pci_new() dpmem %lx irq %d\n",
650 pcistr->dpmem,ha->irq));
651 ha->brd = ioremap(pcistr->dpmem, sizeof(gdt6c_dpram_str));
652 if (ha->brd == NULL) {
653 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
654 iounmap(ha->brd);
655 return 0;
656 }
657 /* check and reset interface area */
658 dp6c_ptr = ha->brd;
659 writel(DPMEM_MAGIC, &dp6c_ptr->u);
660 if (readl(&dp6c_ptr->u) != DPMEM_MAGIC) {
661 printk("GDT-PCI: Cannot access DPMEM at 0x%lx (shadowed?)\n",
662 pcistr->dpmem);
663 found = FALSE;
664 for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
665 iounmap(ha->brd);
666 ha->brd = ioremap(i, sizeof(u16));
667 if (ha->brd == NULL) {
668 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
669 return 0;
670 }
671 if (readw(ha->brd) != 0xffff) {
672 TRACE2(("init_pci_plx() address 0x%x busy\n", i));
673 continue;
674 }
675 iounmap(ha->brd);
676 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_2, i);
677 ha->brd = ioremap(i, sizeof(gdt6c_dpram_str));
678 if (ha->brd == NULL) {
679 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
680 return 0;
681 }
682 dp6c_ptr = ha->brd;
683 writel(DPMEM_MAGIC, &dp6c_ptr->u);
684 if (readl(&dp6c_ptr->u) == DPMEM_MAGIC) {
685 printk("GDT-PCI: Use free address at 0x%x\n", i);
686 found = TRUE;
687 break;
688 }
689 }
690 if (!found) {
691 printk("GDT-PCI: No free address found!\n");
692 iounmap(ha->brd);
693 return 0;
694 }
695 }
696 memset_io(&dp6c_ptr->u, 0, sizeof(dp6c_ptr->u));
697 if (readl(&dp6c_ptr->u) != 0) {
698 printk("GDT-PCI: Initialization error (DPMEM write error)\n");
699 iounmap(ha->brd);
700 return 0;
701 }
702
703 /* disable board interrupts, deinit services */
704 outb(0x00,PTR2USHORT(&ha->plx->control1));
705 outb(0xff,PTR2USHORT(&ha->plx->edoor_reg));
706
707 writeb(0x00, &dp6c_ptr->u.ic.S_Status);
708 writeb(0x00, &dp6c_ptr->u.ic.Cmd_Index);
709
710 writel(pcistr->dpmem, &dp6c_ptr->u.ic.S_Info[0]);
711 writeb(0xff, &dp6c_ptr->u.ic.S_Cmd_Indx);
712
713 outb(1,PTR2USHORT(&ha->plx->ldoor_reg));
714
715 retries = INIT_RETRIES;
716 gdth_delay(20);
717 while (readb(&dp6c_ptr->u.ic.S_Status) != 0xff) {
718 if (--retries == 0) {
719 printk("GDT-PCI: Initialization error (DEINIT failed)\n");
720 iounmap(ha->brd);
721 return 0;
722 }
723 gdth_delay(1);
724 }
725 prot_ver = (u8)readl(&dp6c_ptr->u.ic.S_Info[0]);
726 writeb(0, &dp6c_ptr->u.ic.Status);
727 if (prot_ver != PROTOCOL_VERSION) {
728 printk("GDT-PCI: Illegal protocol version\n");
729 iounmap(ha->brd);
730 return 0;
731 }
732
733 ha->type = GDT_PCINEW;
734 ha->ic_all_size = sizeof(dp6c_ptr->u);
735
736 /* special command to controller BIOS */
737 writel(0x00, &dp6c_ptr->u.ic.S_Info[0]);
738 writel(0x00, &dp6c_ptr->u.ic.S_Info[1]);
739 writel(0x00, &dp6c_ptr->u.ic.S_Info[2]);
740 writel(0x00, &dp6c_ptr->u.ic.S_Info[3]);
741 writeb(0xfe, &dp6c_ptr->u.ic.S_Cmd_Indx);
742
743 outb(1,PTR2USHORT(&ha->plx->ldoor_reg));
744
745 retries = INIT_RETRIES;
746 gdth_delay(20);
747 while (readb(&dp6c_ptr->u.ic.S_Status) != 0xfe) {
748 if (--retries == 0) {
749 printk("GDT-PCI: Initialization error\n");
750 iounmap(ha->brd);
751 return 0;
752 }
753 gdth_delay(1);
754 }
755 writeb(0, &dp6c_ptr->u.ic.S_Status);
756
757 ha->dma64_support = 0;
758
759 } else { /* MPR */
760 TRACE2(("init_pci_mpr() dpmem %lx irq %d\n",pcistr->dpmem,ha->irq));
761 ha->brd = ioremap(pcistr->dpmem, sizeof(gdt6m_dpram_str));
762 if (ha->brd == NULL) {
763 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
764 return 0;
765 }
766
767 /* manipulate config. space to enable DPMEM, start RP controller */
768 pci_read_config_word(pdev, PCI_COMMAND, &command);
769 command |= 6;
770 pci_write_config_word(pdev, PCI_COMMAND, command);
771 gdth_delay(1);
772
773 dp6m_ptr = ha->brd;
774
775 /* Ensure that it is safe to access the non HW portions of DPMEM.
776 * Aditional check needed for Xscale based RAID controllers */
777 while( ((int)readb(&dp6m_ptr->i960r.sema0_reg) ) & 3 )
778 gdth_delay(1);
779
780 /* check and reset interface area */
781 writel(DPMEM_MAGIC, &dp6m_ptr->u);
782 if (readl(&dp6m_ptr->u) != DPMEM_MAGIC) {
783 printk("GDT-PCI: Cannot access DPMEM at 0x%lx (shadowed?)\n",
784 pcistr->dpmem);
785 found = FALSE;
786 for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
787 iounmap(ha->brd);
788 ha->brd = ioremap(i, sizeof(u16));
789 if (ha->brd == NULL) {
790 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
791 return 0;
792 }
793 if (readw(ha->brd) != 0xffff) {
794 TRACE2(("init_pci_mpr() address 0x%x busy\n", i));
795 continue;
796 }
797 iounmap(ha->brd);
798 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, i);
799 ha->brd = ioremap(i, sizeof(gdt6m_dpram_str));
800 if (ha->brd == NULL) {
801 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
802 return 0;
803 }
804 dp6m_ptr = ha->brd;
805 writel(DPMEM_MAGIC, &dp6m_ptr->u);
806 if (readl(&dp6m_ptr->u) == DPMEM_MAGIC) {
807 printk("GDT-PCI: Use free address at 0x%x\n", i);
808 found = TRUE;
809 break;
810 }
811 }
812 if (!found) {
813 printk("GDT-PCI: No free address found!\n");
814 iounmap(ha->brd);
815 return 0;
816 }
817 }
818 memset_io(&dp6m_ptr->u, 0, sizeof(dp6m_ptr->u));
819
820 /* disable board interrupts, deinit services */
821 writeb(readb(&dp6m_ptr->i960r.edoor_en_reg) | 4,
822 &dp6m_ptr->i960r.edoor_en_reg);
823 writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
824 writeb(0x00, &dp6m_ptr->u.ic.S_Status);
825 writeb(0x00, &dp6m_ptr->u.ic.Cmd_Index);
826
827 writel(pcistr->dpmem, &dp6m_ptr->u.ic.S_Info[0]);
828 writeb(0xff, &dp6m_ptr->u.ic.S_Cmd_Indx);
829 writeb(1, &dp6m_ptr->i960r.ldoor_reg);
830 retries = INIT_RETRIES;
831 gdth_delay(20);
832 while (readb(&dp6m_ptr->u.ic.S_Status) != 0xff) {
833 if (--retries == 0) {
834 printk("GDT-PCI: Initialization error (DEINIT failed)\n");
835 iounmap(ha->brd);
836 return 0;
837 }
838 gdth_delay(1);
839 }
840 prot_ver = (u8)readl(&dp6m_ptr->u.ic.S_Info[0]);
841 writeb(0, &dp6m_ptr->u.ic.S_Status);
842 if (prot_ver != PROTOCOL_VERSION) {
843 printk("GDT-PCI: Illegal protocol version\n");
844 iounmap(ha->brd);
845 return 0;
846 }
847
848 ha->type = GDT_PCIMPR;
849 ha->ic_all_size = sizeof(dp6m_ptr->u);
850
851 /* special command to controller BIOS */
852 writel(0x00, &dp6m_ptr->u.ic.S_Info[0]);
853 writel(0x00, &dp6m_ptr->u.ic.S_Info[1]);
854 writel(0x00, &dp6m_ptr->u.ic.S_Info[2]);
855 writel(0x00, &dp6m_ptr->u.ic.S_Info[3]);
856 writeb(0xfe, &dp6m_ptr->u.ic.S_Cmd_Indx);
857 writeb(1, &dp6m_ptr->i960r.ldoor_reg);
858 retries = INIT_RETRIES;
859 gdth_delay(20);
860 while (readb(&dp6m_ptr->u.ic.S_Status) != 0xfe) {
861 if (--retries == 0) {
862 printk("GDT-PCI: Initialization error\n");
863 iounmap(ha->brd);
864 return 0;
865 }
866 gdth_delay(1);
867 }
868 writeb(0, &dp6m_ptr->u.ic.S_Status);
869
870 /* read FW version to detect 64-bit DMA support */
871 writeb(0xfd, &dp6m_ptr->u.ic.S_Cmd_Indx);
872 writeb(1, &dp6m_ptr->i960r.ldoor_reg);
873 retries = INIT_RETRIES;
874 gdth_delay(20);
875 while (readb(&dp6m_ptr->u.ic.S_Status) != 0xfd) {
876 if (--retries == 0) {
877 printk("GDT-PCI: Initialization error (DEINIT failed)\n");
878 iounmap(ha->brd);
879 return 0;
880 }
881 gdth_delay(1);
882 }
883 prot_ver = (u8)(readl(&dp6m_ptr->u.ic.S_Info[0]) >> 16);
884 writeb(0, &dp6m_ptr->u.ic.S_Status);
885 if (prot_ver < 0x2b) /* FW < x.43: no 64-bit DMA support */
886 ha->dma64_support = 0;
887 else
888 ha->dma64_support = 1;
889 }
890
891 return 1;
892}
893
894/* controller protocol functions */
895
896static void gdth_enable_int(gdth_ha_str *ha)
897{
898 unsigned long flags;
899 gdt6_dpram_str __iomem *dp6_ptr;
900 gdt6m_dpram_str __iomem *dp6m_ptr;
901
902 TRACE(("gdth_enable_int() hanum %d\n",ha->hanum));
903 spin_lock_irqsave(&ha->smp_lock, flags);
904
905 if (ha->type == GDT_PCI) {
906 dp6_ptr = ha->brd;
907 writeb(1, &dp6_ptr->io.irqdel);
908 writeb(0, &dp6_ptr->u.ic.Cmd_Index);
909 writeb(1, &dp6_ptr->io.irqen);
910 } else if (ha->type == GDT_PCINEW) {
911 outb(0xff, PTR2USHORT(&ha->plx->edoor_reg));
912 outb(0x03, PTR2USHORT(&ha->plx->control1));
913 } else if (ha->type == GDT_PCIMPR) {
914 dp6m_ptr = ha->brd;
915 writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
916 writeb(readb(&dp6m_ptr->i960r.edoor_en_reg) & ~4,
917 &dp6m_ptr->i960r.edoor_en_reg);
918 }
919 spin_unlock_irqrestore(&ha->smp_lock, flags);
920}
921
922/* return IStatus if interrupt was from this card else 0 */
923static u8 gdth_get_status(gdth_ha_str *ha)
924{
925 u8 IStatus = 0;
926
927 TRACE(("gdth_get_status() irq %d ctr_count %d\n", ha->irq, gdth_ctr_count));
928
929 if (ha->type == GDT_PCI)
930 IStatus =
931 readb(&((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Cmd_Index);
932 else if (ha->type == GDT_PCINEW)
933 IStatus = inb(PTR2USHORT(&ha->plx->edoor_reg));
934 else if (ha->type == GDT_PCIMPR)
935 IStatus =
936 readb(&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.edoor_reg);
937
938 return IStatus;
939}
940
941static int gdth_test_busy(gdth_ha_str *ha)
942{
943 register int gdtsema0 = 0;
944
945 TRACE(("gdth_test_busy() hanum %d\n", ha->hanum));
946
947 if (ha->type == GDT_PCI)
948 gdtsema0 = (int)readb(&((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Sema0);
949 else if (ha->type == GDT_PCINEW)
950 gdtsema0 = (int)inb(PTR2USHORT(&ha->plx->sema0_reg));
951 else if (ha->type == GDT_PCIMPR)
952 gdtsema0 =
953 (int)readb(&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.sema0_reg);
954
955 return (gdtsema0 & 1);
956}
957
958
959static int gdth_get_cmd_index(gdth_ha_str *ha)
960{
961 int i;
962
963 TRACE(("gdth_get_cmd_index() hanum %d\n", ha->hanum));
964
965 for (i=0; i<GDTH_MAXCMDS; ++i) {
966 if (ha->cmd_tab[i].cmnd == UNUSED_CMND) {
967 ha->cmd_tab[i].cmnd = ha->pccb->RequestBuffer;
968 ha->cmd_tab[i].service = ha->pccb->Service;
969 ha->pccb->CommandIndex = (u32)i+2;
970 return (i+2);
971 }
972 }
973 return 0;
974}
975
976
977static void gdth_set_sema0(gdth_ha_str *ha)
978{
979 TRACE(("gdth_set_sema0() hanum %d\n", ha->hanum));
980
981 if (ha->type == GDT_PCI) {
982 writeb(1, &((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Sema0);
983 } else if (ha->type == GDT_PCINEW) {
984 outb(1, PTR2USHORT(&ha->plx->sema0_reg));
985 } else if (ha->type == GDT_PCIMPR) {
986 writeb(1, &((gdt6m_dpram_str __iomem *)ha->brd)->i960r.sema0_reg);
987 }
988}
989
990
991static void gdth_copy_command(gdth_ha_str *ha)
992{
993 register gdth_cmd_str *cmd_ptr;
994 register gdt6m_dpram_str __iomem *dp6m_ptr;
995 register gdt6c_dpram_str __iomem *dp6c_ptr;
996 gdt6_dpram_str __iomem *dp6_ptr;
997 u16 cp_count,dp_offset,cmd_no;
998
999 TRACE(("gdth_copy_command() hanum %d\n", ha->hanum));
1000
1001 cp_count = ha->cmd_len;
1002 dp_offset= ha->cmd_offs_dpmem;
1003 cmd_no = ha->cmd_cnt;
1004 cmd_ptr = ha->pccb;
1005
1006 ++ha->cmd_cnt;
1007
1008 /* set cpcount dword aligned */
1009 if (cp_count & 3)
1010 cp_count += (4 - (cp_count & 3));
1011
1012 ha->cmd_offs_dpmem += cp_count;
1013
1014 /* set offset and service, copy command to DPMEM */
1015 if (ha->type == GDT_PCI) {
1016 dp6_ptr = ha->brd;
1017 writew(dp_offset + DPMEM_COMMAND_OFFSET,
1018 &dp6_ptr->u.ic.comm_queue[cmd_no].offset);
1019 writew((u16)cmd_ptr->Service,
1020 &dp6_ptr->u.ic.comm_queue[cmd_no].serv_id);
1021 memcpy_toio(&dp6_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
1022 } else if (ha->type == GDT_PCINEW) {
1023 dp6c_ptr = ha->brd;
1024 writew(dp_offset + DPMEM_COMMAND_OFFSET,
1025 &dp6c_ptr->u.ic.comm_queue[cmd_no].offset);
1026 writew((u16)cmd_ptr->Service,
1027 &dp6c_ptr->u.ic.comm_queue[cmd_no].serv_id);
1028 memcpy_toio(&dp6c_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
1029 } else if (ha->type == GDT_PCIMPR) {
1030 dp6m_ptr = ha->brd;
1031 writew(dp_offset + DPMEM_COMMAND_OFFSET,
1032 &dp6m_ptr->u.ic.comm_queue[cmd_no].offset);
1033 writew((u16)cmd_ptr->Service,
1034 &dp6m_ptr->u.ic.comm_queue[cmd_no].serv_id);
1035 memcpy_toio(&dp6m_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
1036 }
1037}
1038
1039
1040static void gdth_release_event(gdth_ha_str *ha)
1041{
1042 TRACE(("gdth_release_event() hanum %d\n", ha->hanum));
1043
1044#ifdef GDTH_STATISTICS
1045 {
1046 u32 i,j;
1047 for (i=0,j=0; j<GDTH_MAXCMDS; ++j) {
1048 if (ha->cmd_tab[j].cmnd != UNUSED_CMND)
1049 ++i;
1050 }
1051 if (max_index < i) {
1052 max_index = i;
1053 TRACE3(("GDT: max_index = %d\n",(u16)i));
1054 }
1055 }
1056#endif
1057
1058 if (ha->pccb->OpCode == GDT_INIT)
1059 ha->pccb->Service |= 0x80;
1060
1061 if (ha->type == GDT_PCI) {
1062 writeb(0, &((gdt6_dpram_str __iomem *)ha->brd)->io.event);
1063 } else if (ha->type == GDT_PCINEW) {
1064 outb(1, PTR2USHORT(&ha->plx->ldoor_reg));
1065 } else if (ha->type == GDT_PCIMPR) {
1066 writeb(1, &((gdt6m_dpram_str __iomem *)ha->brd)->i960r.ldoor_reg);
1067 }
1068}
1069
1070static int gdth_wait(gdth_ha_str *ha, int index, u32 time)
1071{
1072 int answer_found = FALSE;
1073 int wait_index = 0;
1074
1075 TRACE(("gdth_wait() hanum %d index %d time %d\n", ha->hanum, index, time));
1076
1077 if (index == 0)
1078 return 1; /* no wait required */
1079
1080 do {
1081 __gdth_interrupt(ha, true, &wait_index);
1082 if (wait_index == index) {
1083 answer_found = TRUE;
1084 break;
1085 }
1086 gdth_delay(1);
1087 } while (--time);
1088
1089 while (gdth_test_busy(ha))
1090 gdth_delay(0);
1091
1092 return (answer_found);
1093}
1094
1095
1096static int gdth_internal_cmd(gdth_ha_str *ha, u8 service, u16 opcode,
1097 u32 p1, u64 p2, u64 p3)
1098{
1099 register gdth_cmd_str *cmd_ptr;
1100 int retries,index;
1101
1102 TRACE2(("gdth_internal_cmd() service %d opcode %d\n",service,opcode));
1103
1104 cmd_ptr = ha->pccb;
1105 memset((char*)cmd_ptr,0,sizeof(gdth_cmd_str));
1106
1107 /* make command */
1108 for (retries = INIT_RETRIES;;) {
1109 cmd_ptr->Service = service;
1110 cmd_ptr->RequestBuffer = INTERNAL_CMND;
1111 if (!(index=gdth_get_cmd_index(ha))) {
1112 TRACE(("GDT: No free command index found\n"));
1113 return 0;
1114 }
1115 gdth_set_sema0(ha);
1116 cmd_ptr->OpCode = opcode;
1117 cmd_ptr->BoardNode = LOCALBOARD;
1118 if (service == CACHESERVICE) {
1119 if (opcode == GDT_IOCTL) {
1120 cmd_ptr->u.ioctl.subfunc = p1;
1121 cmd_ptr->u.ioctl.channel = (u32)p2;
1122 cmd_ptr->u.ioctl.param_size = (u16)p3;
1123 cmd_ptr->u.ioctl.p_param = ha->scratch_phys;
1124 } else {
1125 if (ha->cache_feat & GDT_64BIT) {
1126 cmd_ptr->u.cache64.DeviceNo = (u16)p1;
1127 cmd_ptr->u.cache64.BlockNo = p2;
1128 } else {
1129 cmd_ptr->u.cache.DeviceNo = (u16)p1;
1130 cmd_ptr->u.cache.BlockNo = (u32)p2;
1131 }
1132 }
1133 } else if (service == SCSIRAWSERVICE) {
1134 if (ha->raw_feat & GDT_64BIT) {
1135 cmd_ptr->u.raw64.direction = p1;
1136 cmd_ptr->u.raw64.bus = (u8)p2;
1137 cmd_ptr->u.raw64.target = (u8)p3;
1138 cmd_ptr->u.raw64.lun = (u8)(p3 >> 8);
1139 } else {
1140 cmd_ptr->u.raw.direction = p1;
1141 cmd_ptr->u.raw.bus = (u8)p2;
1142 cmd_ptr->u.raw.target = (u8)p3;
1143 cmd_ptr->u.raw.lun = (u8)(p3 >> 8);
1144 }
1145 } else if (service == SCREENSERVICE) {
1146 if (opcode == GDT_REALTIME) {
1147 *(u32 *)&cmd_ptr->u.screen.su.data[0] = p1;
1148 *(u32 *)&cmd_ptr->u.screen.su.data[4] = (u32)p2;
1149 *(u32 *)&cmd_ptr->u.screen.su.data[8] = (u32)p3;
1150 }
1151 }
1152 ha->cmd_len = sizeof(gdth_cmd_str);
1153 ha->cmd_offs_dpmem = 0;
1154 ha->cmd_cnt = 0;
1155 gdth_copy_command(ha);
1156 gdth_release_event(ha);
1157 gdth_delay(20);
1158 if (!gdth_wait(ha, index, INIT_TIMEOUT)) {
1159 printk("GDT: Initialization error (timeout service %d)\n",service);
1160 return 0;
1161 }
1162 if (ha->status != S_BSY || --retries == 0)
1163 break;
1164 gdth_delay(1);
1165 }
1166
1167 return (ha->status != S_OK ? 0:1);
1168}
1169
1170
1171/* search for devices */
1172
1173static int gdth_search_drives(gdth_ha_str *ha)
1174{
1175 u16 cdev_cnt, i;
1176 int ok;
1177 u32 bus_no, drv_cnt, drv_no, j;
1178 gdth_getch_str *chn;
1179 gdth_drlist_str *drl;
1180 gdth_iochan_str *ioc;
1181 gdth_raw_iochan_str *iocr;
1182 gdth_arcdl_str *alst;
1183 gdth_alist_str *alst2;
1184 gdth_oem_str_ioctl *oemstr;
1185
1186 TRACE(("gdth_search_drives() hanum %d\n", ha->hanum));
1187 ok = 0;
1188
1189 /* initialize controller services, at first: screen service */
1190 ha->screen_feat = 0;
1191 if (!force_dma32) {
1192 ok = gdth_internal_cmd(ha, SCREENSERVICE, GDT_X_INIT_SCR, 0, 0, 0);
1193 if (ok)
1194 ha->screen_feat = GDT_64BIT;
1195 }
1196 if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
1197 ok = gdth_internal_cmd(ha, SCREENSERVICE, GDT_INIT, 0, 0, 0);
1198 if (!ok) {
1199 printk("GDT-HA %d: Initialization error screen service (code %d)\n",
1200 ha->hanum, ha->status);
1201 return 0;
1202 }
1203 TRACE2(("gdth_search_drives(): SCREENSERVICE initialized\n"));
1204
1205 /* unfreeze all IOs */
1206 gdth_internal_cmd(ha, CACHESERVICE, GDT_UNFREEZE_IO, 0, 0, 0);
1207
1208 /* initialize cache service */
1209 ha->cache_feat = 0;
1210 if (!force_dma32) {
1211 ok = gdth_internal_cmd(ha, CACHESERVICE, GDT_X_INIT_HOST, LINUX_OS,
1212 0, 0);
1213 if (ok)
1214 ha->cache_feat = GDT_64BIT;
1215 }
1216 if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
1217 ok = gdth_internal_cmd(ha, CACHESERVICE, GDT_INIT, LINUX_OS, 0, 0);
1218 if (!ok) {
1219 printk("GDT-HA %d: Initialization error cache service (code %d)\n",
1220 ha->hanum, ha->status);
1221 return 0;
1222 }
1223 TRACE2(("gdth_search_drives(): CACHESERVICE initialized\n"));
1224 cdev_cnt = (u16)ha->info;
1225 ha->fw_vers = ha->service;
1226
1227 /* detect number of buses - try new IOCTL */
1228 iocr = (gdth_raw_iochan_str *)ha->pscratch;
1229 iocr->hdr.version = 0xffffffff;
1230 iocr->hdr.list_entries = MAXBUS;
1231 iocr->hdr.first_chan = 0;
1232 iocr->hdr.last_chan = MAXBUS-1;
1233 iocr->hdr.list_offset = GDTOFFSOF(gdth_raw_iochan_str, list[0]);
1234 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, IOCHAN_RAW_DESC,
1235 INVALID_CHANNEL,sizeof(gdth_raw_iochan_str))) {
1236 TRACE2(("IOCHAN_RAW_DESC supported!\n"));
1237 ha->bus_cnt = iocr->hdr.chan_count;
1238 for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) {
1239 if (iocr->list[bus_no].proc_id < MAXID)
1240 ha->bus_id[bus_no] = iocr->list[bus_no].proc_id;
1241 else
1242 ha->bus_id[bus_no] = 0xff;
1243 }
1244 } else {
1245 /* old method */
1246 chn = (gdth_getch_str *)ha->pscratch;
1247 for (bus_no = 0; bus_no < MAXBUS; ++bus_no) {
1248 chn->channel_no = bus_no;
1249 if (!gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
1250 SCSI_CHAN_CNT | L_CTRL_PATTERN,
1251 IO_CHANNEL | INVALID_CHANNEL,
1252 sizeof(gdth_getch_str))) {
1253 if (bus_no == 0) {
1254 printk("GDT-HA %d: Error detecting channel count (0x%x)\n",
1255 ha->hanum, ha->status);
1256 return 0;
1257 }
1258 break;
1259 }
1260 if (chn->siop_id < MAXID)
1261 ha->bus_id[bus_no] = chn->siop_id;
1262 else
1263 ha->bus_id[bus_no] = 0xff;
1264 }
1265 ha->bus_cnt = (u8)bus_no;
1266 }
1267 TRACE2(("gdth_search_drives() %d channels\n",ha->bus_cnt));
1268
1269 /* read cache configuration */
1270 if (!gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_INFO,
1271 INVALID_CHANNEL,sizeof(gdth_cinfo_str))) {
1272 printk("GDT-HA %d: Initialization error cache service (code %d)\n",
1273 ha->hanum, ha->status);
1274 return 0;
1275 }
1276 ha->cpar = ((gdth_cinfo_str *)ha->pscratch)->cpar;
1277 TRACE2(("gdth_search_drives() cinfo: vs %x sta %d str %d dw %d b %d\n",
1278 ha->cpar.version,ha->cpar.state,ha->cpar.strategy,
1279 ha->cpar.write_back,ha->cpar.block_size));
1280
1281 /* read board info and features */
1282 ha->more_proc = FALSE;
1283 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, BOARD_INFO,
1284 INVALID_CHANNEL,sizeof(gdth_binfo_str))) {
1285 memcpy(&ha->binfo, (gdth_binfo_str *)ha->pscratch,
1286 sizeof(gdth_binfo_str));
1287 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, BOARD_FEATURES,
1288 INVALID_CHANNEL,sizeof(gdth_bfeat_str))) {
1289 TRACE2(("BOARD_INFO/BOARD_FEATURES supported\n"));
1290 ha->bfeat = *(gdth_bfeat_str *)ha->pscratch;
1291 ha->more_proc = TRUE;
1292 }
1293 } else {
1294 TRACE2(("BOARD_INFO requires firmware >= 1.10/2.08\n"));
1295 strcpy(ha->binfo.type_string, gdth_ctr_name(ha));
1296 }
1297 TRACE2(("Controller name: %s\n",ha->binfo.type_string));
1298
1299 /* read more informations */
1300 if (ha->more_proc) {
1301 /* physical drives, channel addresses */
1302 ioc = (gdth_iochan_str *)ha->pscratch;
1303 ioc->hdr.version = 0xffffffff;
1304 ioc->hdr.list_entries = MAXBUS;
1305 ioc->hdr.first_chan = 0;
1306 ioc->hdr.last_chan = MAXBUS-1;
1307 ioc->hdr.list_offset = GDTOFFSOF(gdth_iochan_str, list[0]);
1308 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, IOCHAN_DESC,
1309 INVALID_CHANNEL,sizeof(gdth_iochan_str))) {
1310 for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) {
1311 ha->raw[bus_no].address = ioc->list[bus_no].address;
1312 ha->raw[bus_no].local_no = ioc->list[bus_no].local_no;
1313 }
1314 } else {
1315 for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) {
1316 ha->raw[bus_no].address = IO_CHANNEL;
1317 ha->raw[bus_no].local_no = bus_no;
1318 }
1319 }
1320 for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) {
1321 chn = (gdth_getch_str *)ha->pscratch;
1322 chn->channel_no = ha->raw[bus_no].local_no;
1323 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
1324 SCSI_CHAN_CNT | L_CTRL_PATTERN,
1325 ha->raw[bus_no].address | INVALID_CHANNEL,
1326 sizeof(gdth_getch_str))) {
1327 ha->raw[bus_no].pdev_cnt = chn->drive_cnt;
1328 TRACE2(("Channel %d: %d phys. drives\n",
1329 bus_no,chn->drive_cnt));
1330 }
1331 if (ha->raw[bus_no].pdev_cnt > 0) {
1332 drl = (gdth_drlist_str *)ha->pscratch;
1333 drl->sc_no = ha->raw[bus_no].local_no;
1334 drl->sc_cnt = ha->raw[bus_no].pdev_cnt;
1335 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
1336 SCSI_DR_LIST | L_CTRL_PATTERN,
1337 ha->raw[bus_no].address | INVALID_CHANNEL,
1338 sizeof(gdth_drlist_str))) {
1339 for (j = 0; j < ha->raw[bus_no].pdev_cnt; ++j)
1340 ha->raw[bus_no].id_list[j] = drl->sc_list[j];
1341 } else {
1342 ha->raw[bus_no].pdev_cnt = 0;
1343 }
1344 }
1345 }
1346
1347 /* logical drives */
1348 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_CNT,
1349 INVALID_CHANNEL,sizeof(u32))) {
1350 drv_cnt = *(u32 *)ha->pscratch;
1351 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_LIST,
1352 INVALID_CHANNEL,drv_cnt * sizeof(u32))) {
1353 for (j = 0; j < drv_cnt; ++j) {
1354 drv_no = ((u32 *)ha->pscratch)[j];
1355 if (drv_no < MAX_LDRIVES) {
1356 ha->hdr[drv_no].is_logdrv = TRUE;
1357 TRACE2(("Drive %d is log. drive\n",drv_no));
1358 }
1359 }
1360 }
1361 alst = (gdth_arcdl_str *)ha->pscratch;
1362 alst->entries_avail = MAX_LDRIVES;
1363 alst->first_entry = 0;
1364 alst->list_offset = GDTOFFSOF(gdth_arcdl_str, list[0]);
1365 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
1366 ARRAY_DRV_LIST2 | LA_CTRL_PATTERN,
1367 INVALID_CHANNEL, sizeof(gdth_arcdl_str) +
1368 (alst->entries_avail-1) * sizeof(gdth_alist_str))) {
1369 for (j = 0; j < alst->entries_init; ++j) {
1370 ha->hdr[j].is_arraydrv = alst->list[j].is_arrayd;
1371 ha->hdr[j].is_master = alst->list[j].is_master;
1372 ha->hdr[j].is_parity = alst->list[j].is_parity;
1373 ha->hdr[j].is_hotfix = alst->list[j].is_hotfix;
1374 ha->hdr[j].master_no = alst->list[j].cd_handle;
1375 }
1376 } else if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
1377 ARRAY_DRV_LIST | LA_CTRL_PATTERN,
1378 0, 35 * sizeof(gdth_alist_str))) {
1379 for (j = 0; j < 35; ++j) {
1380 alst2 = &((gdth_alist_str *)ha->pscratch)[j];
1381 ha->hdr[j].is_arraydrv = alst2->is_arrayd;
1382 ha->hdr[j].is_master = alst2->is_master;
1383 ha->hdr[j].is_parity = alst2->is_parity;
1384 ha->hdr[j].is_hotfix = alst2->is_hotfix;
1385 ha->hdr[j].master_no = alst2->cd_handle;
1386 }
1387 }
1388 }
1389 }
1390
1391 /* initialize raw service */
1392 ha->raw_feat = 0;
1393 if (!force_dma32) {
1394 ok = gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_X_INIT_RAW, 0, 0, 0);
1395 if (ok)
1396 ha->raw_feat = GDT_64BIT;
1397 }
1398 if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
1399 ok = gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_INIT, 0, 0, 0);
1400 if (!ok) {
1401 printk("GDT-HA %d: Initialization error raw service (code %d)\n",
1402 ha->hanum, ha->status);
1403 return 0;
1404 }
1405 TRACE2(("gdth_search_drives(): RAWSERVICE initialized\n"));
1406
1407 /* set/get features raw service (scatter/gather) */
1408 if (gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_SET_FEAT, SCATTER_GATHER,
1409 0, 0)) {
1410 TRACE2(("gdth_search_drives(): set features RAWSERVICE OK\n"));
1411 if (gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_GET_FEAT, 0, 0, 0)) {
1412 TRACE2(("gdth_search_dr(): get feat RAWSERVICE %d\n",
1413 ha->info));
1414 ha->raw_feat |= (u16)ha->info;
1415 }
1416 }
1417
1418 /* set/get features cache service (equal to raw service) */
1419 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_SET_FEAT, 0,
1420 SCATTER_GATHER,0)) {
1421 TRACE2(("gdth_search_drives(): set features CACHESERVICE OK\n"));
1422 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_GET_FEAT, 0, 0, 0)) {
1423 TRACE2(("gdth_search_dr(): get feat CACHESERV. %d\n",
1424 ha->info));
1425 ha->cache_feat |= (u16)ha->info;
1426 }
1427 }
1428
1429 /* reserve drives for raw service */
1430 if (reserve_mode != 0) {
1431 gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_RESERVE_ALL,
1432 reserve_mode == 1 ? 1 : 3, 0, 0);
1433 TRACE2(("gdth_search_drives(): RESERVE_ALL code %d\n",
1434 ha->status));
1435 }
1436 for (i = 0; i < MAX_RES_ARGS; i += 4) {
1437 if (reserve_list[i] == ha->hanum && reserve_list[i+1] < ha->bus_cnt &&
1438 reserve_list[i+2] < ha->tid_cnt && reserve_list[i+3] < MAXLUN) {
1439 TRACE2(("gdth_search_drives(): reserve ha %d bus %d id %d lun %d\n",
1440 reserve_list[i], reserve_list[i+1],
1441 reserve_list[i+2], reserve_list[i+3]));
1442 if (!gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_RESERVE, 0,
1443 reserve_list[i+1], reserve_list[i+2] |
1444 (reserve_list[i+3] << 8))) {
1445 printk("GDT-HA %d: Error raw service (RESERVE, code %d)\n",
1446 ha->hanum, ha->status);
1447 }
1448 }
1449 }
1450
1451 /* Determine OEM string using IOCTL */
1452 oemstr = (gdth_oem_str_ioctl *)ha->pscratch;
1453 oemstr->params.ctl_version = 0x01;
1454 oemstr->params.buffer_size = sizeof(oemstr->text);
1455 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
1456 CACHE_READ_OEM_STRING_RECORD,INVALID_CHANNEL,
1457 sizeof(gdth_oem_str_ioctl))) {
1458 TRACE2(("gdth_search_drives(): CACHE_READ_OEM_STRING_RECORD OK\n"));
1459 printk("GDT-HA %d: Vendor: %s Name: %s\n",
1460 ha->hanum, oemstr->text.oem_company_name, ha->binfo.type_string);
1461 /* Save the Host Drive inquiry data */
1462 strlcpy(ha->oem_name,oemstr->text.scsi_host_drive_inquiry_vendor_id,
1463 sizeof(ha->oem_name));
1464 } else {
1465 /* Old method, based on PCI ID */
1466 TRACE2(("gdth_search_drives(): CACHE_READ_OEM_STRING_RECORD failed\n"));
1467 printk("GDT-HA %d: Name: %s\n",
1468 ha->hanum, ha->binfo.type_string);
1469 if (ha->oem_id == OEM_ID_INTEL)
1470 strlcpy(ha->oem_name,"Intel ", sizeof(ha->oem_name));
1471 else
1472 strlcpy(ha->oem_name,"ICP ", sizeof(ha->oem_name));
1473 }
1474
1475 /* scanning for host drives */
1476 for (i = 0; i < cdev_cnt; ++i)
1477 gdth_analyse_hdrive(ha, i);
1478
1479 TRACE(("gdth_search_drives() OK\n"));
1480 return 1;
1481}
1482
1483static int gdth_analyse_hdrive(gdth_ha_str *ha, u16 hdrive)
1484{
1485 u32 drv_cyls;
1486 int drv_hds, drv_secs;
1487
1488 TRACE(("gdth_analyse_hdrive() hanum %d drive %d\n", ha->hanum, hdrive));
1489 if (hdrive >= MAX_HDRIVES)
1490 return 0;
1491
1492 if (!gdth_internal_cmd(ha, CACHESERVICE, GDT_INFO, hdrive, 0, 0))
1493 return 0;
1494 ha->hdr[hdrive].present = TRUE;
1495 ha->hdr[hdrive].size = ha->info;
1496
1497 /* evaluate mapping (sectors per head, heads per cylinder) */
1498 ha->hdr[hdrive].size &= ~SECS32;
1499 if (ha->info2 == 0) {
1500 gdth_eval_mapping(ha->hdr[hdrive].size,&drv_cyls,&drv_hds,&drv_secs);
1501 } else {
1502 drv_hds = ha->info2 & 0xff;
1503 drv_secs = (ha->info2 >> 8) & 0xff;
1504 drv_cyls = (u32)ha->hdr[hdrive].size / drv_hds / drv_secs;
1505 }
1506 ha->hdr[hdrive].heads = (u8)drv_hds;
1507 ha->hdr[hdrive].secs = (u8)drv_secs;
1508 /* round size */
1509 ha->hdr[hdrive].size = drv_cyls * drv_hds * drv_secs;
1510
1511 if (ha->cache_feat & GDT_64BIT) {
1512 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_X_INFO, hdrive, 0, 0)
1513 && ha->info2 != 0) {
1514 ha->hdr[hdrive].size = ((u64)ha->info2 << 32) | ha->info;
1515 }
1516 }
1517 TRACE2(("gdth_search_dr() cdr. %d size %d hds %d scs %d\n",
1518 hdrive,ha->hdr[hdrive].size,drv_hds,drv_secs));
1519
1520 /* get informations about device */
1521 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_DEVTYPE, hdrive, 0, 0)) {
1522 TRACE2(("gdth_search_dr() cache drive %d devtype %d\n",
1523 hdrive,ha->info));
1524 ha->hdr[hdrive].devtype = (u16)ha->info;
1525 }
1526
1527 /* cluster info */
1528 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_CLUST_INFO, hdrive, 0, 0)) {
1529 TRACE2(("gdth_search_dr() cache drive %d cluster info %d\n",
1530 hdrive,ha->info));
1531 if (!shared_access)
1532 ha->hdr[hdrive].cluster_type = (u8)ha->info;
1533 }
1534
1535 /* R/W attributes */
1536 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_RW_ATTRIBS, hdrive, 0, 0)) {
1537 TRACE2(("gdth_search_dr() cache drive %d r/w attrib. %d\n",
1538 hdrive,ha->info));
1539 ha->hdr[hdrive].rw_attribs = (u8)ha->info;
1540 }
1541
1542 return 1;
1543}
1544
1545
1546/* command queueing/sending functions */
1547
1548static void gdth_putq(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 priority)
1549{
1550 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
1551 register struct scsi_cmnd *pscp;
1552 register struct scsi_cmnd *nscp;
1553 unsigned long flags;
1554
1555 TRACE(("gdth_putq() priority %d\n",priority));
1556 spin_lock_irqsave(&ha->smp_lock, flags);
1557
1558 if (!cmndinfo->internal_command)
1559 cmndinfo->priority = priority;
1560
1561 if (ha->req_first==NULL) {
1562 ha->req_first = scp; /* queue was empty */
1563 scp->SCp.ptr = NULL;
1564 } else { /* queue not empty */
1565 pscp = ha->req_first;
1566 nscp = (struct scsi_cmnd *)pscp->SCp.ptr;
1567 /* priority: 0-highest,..,0xff-lowest */
1568 while (nscp && gdth_cmnd_priv(nscp)->priority <= priority) {
1569 pscp = nscp;
1570 nscp = (struct scsi_cmnd *)pscp->SCp.ptr;
1571 }
1572 pscp->SCp.ptr = (char *)scp;
1573 scp->SCp.ptr = (char *)nscp;
1574 }
1575 spin_unlock_irqrestore(&ha->smp_lock, flags);
1576
1577#ifdef GDTH_STATISTICS
1578 flags = 0;
1579 for (nscp=ha->req_first; nscp; nscp=(struct scsi_cmnd*)nscp->SCp.ptr)
1580 ++flags;
1581 if (max_rq < flags) {
1582 max_rq = flags;
1583 TRACE3(("GDT: max_rq = %d\n",(u16)max_rq));
1584 }
1585#endif
1586}
1587
1588static void gdth_next(gdth_ha_str *ha)
1589{
1590 register struct scsi_cmnd *pscp;
1591 register struct scsi_cmnd *nscp;
1592 u8 b, t, l, firsttime;
1593 u8 this_cmd, next_cmd;
1594 unsigned long flags = 0;
1595 int cmd_index;
1596
1597 TRACE(("gdth_next() hanum %d\n", ha->hanum));
1598 if (!gdth_polling)
1599 spin_lock_irqsave(&ha->smp_lock, flags);
1600
1601 ha->cmd_cnt = ha->cmd_offs_dpmem = 0;
1602 this_cmd = firsttime = TRUE;
1603 next_cmd = gdth_polling ? FALSE:TRUE;
1604 cmd_index = 0;
1605
1606 for (nscp = pscp = ha->req_first; nscp; nscp = (struct scsi_cmnd *)nscp->SCp.ptr) {
1607 struct gdth_cmndinfo *nscp_cmndinfo = gdth_cmnd_priv(nscp);
1608 if (nscp != pscp && nscp != (struct scsi_cmnd *)pscp->SCp.ptr)
1609 pscp = (struct scsi_cmnd *)pscp->SCp.ptr;
1610 if (!nscp_cmndinfo->internal_command) {
1611 b = nscp->device->channel;
1612 t = nscp->device->id;
1613 l = nscp->device->lun;
1614 if (nscp_cmndinfo->priority >= DEFAULT_PRI) {
1615 if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha,b)].lock) ||
1616 (b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock))
1617 continue;
1618 }
1619 } else
1620 b = t = l = 0;
1621
1622 if (firsttime) {
1623 if (gdth_test_busy(ha)) { /* controller busy ? */
1624 TRACE(("gdth_next() controller %d busy !\n", ha->hanum));
1625 if (!gdth_polling) {
1626 spin_unlock_irqrestore(&ha->smp_lock, flags);
1627 return;
1628 }
1629 while (gdth_test_busy(ha))
1630 gdth_delay(1);
1631 }
1632 firsttime = FALSE;
1633 }
1634
1635 if (!nscp_cmndinfo->internal_command) {
1636 if (nscp_cmndinfo->phase == -1) {
1637 nscp_cmndinfo->phase = CACHESERVICE; /* default: cache svc. */
1638 if (nscp->cmnd[0] == TEST_UNIT_READY) {
1639 TRACE2(("TEST_UNIT_READY Bus %d Id %d LUN %d\n",
1640 b, t, l));
1641 /* TEST_UNIT_READY -> set scan mode */
1642 if ((ha->scan_mode & 0x0f) == 0) {
1643 if (b == 0 && t == 0 && l == 0) {
1644 ha->scan_mode |= 1;
1645 TRACE2(("Scan mode: 0x%x\n", ha->scan_mode));
1646 }
1647 } else if ((ha->scan_mode & 0x0f) == 1) {
1648 if (b == 0 && ((t == 0 && l == 1) ||
1649 (t == 1 && l == 0))) {
1650 nscp_cmndinfo->OpCode = GDT_SCAN_START;
1651 nscp_cmndinfo->phase = ((ha->scan_mode & 0x10 ? 1:0) << 8)
1652 | SCSIRAWSERVICE;
1653 ha->scan_mode = 0x12;
1654 TRACE2(("Scan mode: 0x%x (SCAN_START)\n",
1655 ha->scan_mode));
1656 } else {
1657 ha->scan_mode &= 0x10;
1658 TRACE2(("Scan mode: 0x%x\n", ha->scan_mode));
1659 }
1660 } else if (ha->scan_mode == 0x12) {
1661 if (b == ha->bus_cnt && t == ha->tid_cnt-1) {
1662 nscp_cmndinfo->phase = SCSIRAWSERVICE;
1663 nscp_cmndinfo->OpCode = GDT_SCAN_END;
1664 ha->scan_mode &= 0x10;
1665 TRACE2(("Scan mode: 0x%x (SCAN_END)\n",
1666 ha->scan_mode));
1667 }
1668 }
1669 }
1670 if (b == ha->virt_bus && nscp->cmnd[0] != INQUIRY &&
1671 nscp->cmnd[0] != READ_CAPACITY && nscp->cmnd[0] != MODE_SENSE &&
1672 (ha->hdr[t].cluster_type & CLUSTER_DRIVE)) {
1673 /* always GDT_CLUST_INFO! */
1674 nscp_cmndinfo->OpCode = GDT_CLUST_INFO;
1675 }
1676 }
1677 }
1678
1679 if (nscp_cmndinfo->OpCode != -1) {
1680 if ((nscp_cmndinfo->phase & 0xff) == CACHESERVICE) {
1681 if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t)))
1682 this_cmd = FALSE;
1683 next_cmd = FALSE;
1684 } else if ((nscp_cmndinfo->phase & 0xff) == SCSIRAWSERVICE) {
1685 if (!(cmd_index=gdth_fill_raw_cmd(ha, nscp, BUS_L2P(ha, b))))
1686 this_cmd = FALSE;
1687 next_cmd = FALSE;
1688 } else {
1689 memset((char*)nscp->sense_buffer,0,16);
1690 nscp->sense_buffer[0] = 0x70;
1691 nscp->sense_buffer[2] = NOT_READY;
1692 nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1693 if (!nscp_cmndinfo->wait_for_completion)
1694 nscp_cmndinfo->wait_for_completion++;
1695 else
1696 gdth_scsi_done(nscp);
1697 }
1698 } else if (gdth_cmnd_priv(nscp)->internal_command) {
1699 if (!(cmd_index=gdth_special_cmd(ha, nscp)))
1700 this_cmd = FALSE;
1701 next_cmd = FALSE;
1702 } else if (b != ha->virt_bus) {
1703 if (ha->raw[BUS_L2P(ha,b)].io_cnt[t] >= GDTH_MAX_RAW ||
1704 !(cmd_index=gdth_fill_raw_cmd(ha, nscp, BUS_L2P(ha, b))))
1705 this_cmd = FALSE;
1706 else
1707 ha->raw[BUS_L2P(ha,b)].io_cnt[t]++;
1708 } else if (t >= MAX_HDRIVES || !ha->hdr[t].present || l != 0) {
1709 TRACE2(("Command 0x%x to bus %d id %d lun %d -> IGNORE\n",
1710 nscp->cmnd[0], b, t, l));
1711 nscp->result = DID_BAD_TARGET << 16;
1712 if (!nscp_cmndinfo->wait_for_completion)
1713 nscp_cmndinfo->wait_for_completion++;
1714 else
1715 gdth_scsi_done(nscp);
1716 } else {
1717 switch (nscp->cmnd[0]) {
1718 case TEST_UNIT_READY:
1719 case INQUIRY:
1720 case REQUEST_SENSE:
1721 case READ_CAPACITY:
1722 case VERIFY:
1723 case START_STOP:
1724 case MODE_SENSE:
1725 case SERVICE_ACTION_IN_16:
1726 TRACE(("cache cmd %x/%x/%x/%x/%x/%x\n",nscp->cmnd[0],
1727 nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3],
1728 nscp->cmnd[4],nscp->cmnd[5]));
1729 if (ha->hdr[t].media_changed && nscp->cmnd[0] != INQUIRY) {
1730 /* return UNIT_ATTENTION */
1731 TRACE2(("cmd 0x%x target %d: UNIT_ATTENTION\n",
1732 nscp->cmnd[0], t));
1733 ha->hdr[t].media_changed = FALSE;
1734 memset((char*)nscp->sense_buffer,0,16);
1735 nscp->sense_buffer[0] = 0x70;
1736 nscp->sense_buffer[2] = UNIT_ATTENTION;
1737 nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1738 if (!nscp_cmndinfo->wait_for_completion)
1739 nscp_cmndinfo->wait_for_completion++;
1740 else
1741 gdth_scsi_done(nscp);
1742 } else if (gdth_internal_cache_cmd(ha, nscp))
1743 gdth_scsi_done(nscp);
1744 break;
1745
1746 case ALLOW_MEDIUM_REMOVAL:
1747 TRACE(("cache cmd %x/%x/%x/%x/%x/%x\n",nscp->cmnd[0],
1748 nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3],
1749 nscp->cmnd[4],nscp->cmnd[5]));
1750 if ( (nscp->cmnd[4]&1) && !(ha->hdr[t].devtype&1) ) {
1751 TRACE(("Prevent r. nonremov. drive->do nothing\n"));
1752 nscp->result = DID_OK << 16;
1753 nscp->sense_buffer[0] = 0;
1754 if (!nscp_cmndinfo->wait_for_completion)
1755 nscp_cmndinfo->wait_for_completion++;
1756 else
1757 gdth_scsi_done(nscp);
1758 } else {
1759 nscp->cmnd[3] = (ha->hdr[t].devtype&1) ? 1:0;
1760 TRACE(("Prevent/allow r. %d rem. drive %d\n",
1761 nscp->cmnd[4],nscp->cmnd[3]));
1762 if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t)))
1763 this_cmd = FALSE;
1764 }
1765 break;
1766
1767 case RESERVE:
1768 case RELEASE:
1769 TRACE2(("cache cmd %s\n",nscp->cmnd[0] == RESERVE ?
1770 "RESERVE" : "RELEASE"));
1771 if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t)))
1772 this_cmd = FALSE;
1773 break;
1774
1775 case READ_6:
1776 case WRITE_6:
1777 case READ_10:
1778 case WRITE_10:
1779 case READ_16:
1780 case WRITE_16:
1781 if (ha->hdr[t].media_changed) {
1782 /* return UNIT_ATTENTION */
1783 TRACE2(("cmd 0x%x target %d: UNIT_ATTENTION\n",
1784 nscp->cmnd[0], t));
1785 ha->hdr[t].media_changed = FALSE;
1786 memset((char*)nscp->sense_buffer,0,16);
1787 nscp->sense_buffer[0] = 0x70;
1788 nscp->sense_buffer[2] = UNIT_ATTENTION;
1789 nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1790 if (!nscp_cmndinfo->wait_for_completion)
1791 nscp_cmndinfo->wait_for_completion++;
1792 else
1793 gdth_scsi_done(nscp);
1794 } else if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t)))
1795 this_cmd = FALSE;
1796 break;
1797
1798 default:
1799 TRACE2(("cache cmd %x/%x/%x/%x/%x/%x unknown\n",nscp->cmnd[0],
1800 nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3],
1801 nscp->cmnd[4],nscp->cmnd[5]));
1802 printk("GDT-HA %d: Unknown SCSI command 0x%x to cache service !\n",
1803 ha->hanum, nscp->cmnd[0]);
1804 nscp->result = DID_ABORT << 16;
1805 if (!nscp_cmndinfo->wait_for_completion)
1806 nscp_cmndinfo->wait_for_completion++;
1807 else
1808 gdth_scsi_done(nscp);
1809 break;
1810 }
1811 }
1812
1813 if (!this_cmd)
1814 break;
1815 if (nscp == ha->req_first)
1816 ha->req_first = pscp = (struct scsi_cmnd *)nscp->SCp.ptr;
1817 else
1818 pscp->SCp.ptr = nscp->SCp.ptr;
1819 if (!next_cmd)
1820 break;
1821 }
1822
1823 if (ha->cmd_cnt > 0) {
1824 gdth_release_event(ha);
1825 }
1826
1827 if (!gdth_polling)
1828 spin_unlock_irqrestore(&ha->smp_lock, flags);
1829
1830 if (gdth_polling && ha->cmd_cnt > 0) {
1831 if (!gdth_wait(ha, cmd_index, POLL_TIMEOUT))
1832 printk("GDT-HA %d: Command %d timed out !\n",
1833 ha->hanum, cmd_index);
1834 }
1835}
1836
1837/*
1838 * gdth_copy_internal_data() - copy to/from a buffer onto a scsi_cmnd's
1839 * buffers, kmap_atomic() as needed.
1840 */
1841static void gdth_copy_internal_data(gdth_ha_str *ha, struct scsi_cmnd *scp,
1842 char *buffer, u16 count)
1843{
1844 u16 cpcount,i, max_sg = scsi_sg_count(scp);
1845 u16 cpsum,cpnow;
1846 struct scatterlist *sl;
1847 char *address;
1848
1849 cpcount = min_t(u16, count, scsi_bufflen(scp));
1850
1851 if (cpcount) {
1852 cpsum=0;
1853 scsi_for_each_sg(scp, sl, max_sg, i) {
1854 unsigned long flags;
1855 cpnow = (u16)sl->length;
1856 TRACE(("copy_internal() now %d sum %d count %d %d\n",
1857 cpnow, cpsum, cpcount, scsi_bufflen(scp)));
1858 if (cpsum+cpnow > cpcount)
1859 cpnow = cpcount - cpsum;
1860 cpsum += cpnow;
1861 if (!sg_page(sl)) {
1862 printk("GDT-HA %d: invalid sc/gt element in gdth_copy_internal_data()\n",
1863 ha->hanum);
1864 return;
1865 }
1866 local_irq_save(flags);
1867 address = kmap_atomic(sg_page(sl)) + sl->offset;
1868 memcpy(address, buffer, cpnow);
1869 flush_dcache_page(sg_page(sl));
1870 kunmap_atomic(address);
1871 local_irq_restore(flags);
1872 if (cpsum == cpcount)
1873 break;
1874 buffer += cpnow;
1875 }
1876 } else if (count) {
1877 printk("GDT-HA %d: SCSI command with no buffers but data transfer expected!\n",
1878 ha->hanum);
1879 WARN_ON(1);
1880 }
1881}
1882
1883static int gdth_internal_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp)
1884{
1885 u8 t;
1886 gdth_inq_data inq;
1887 gdth_rdcap_data rdc;
1888 gdth_sense_data sd;
1889 gdth_modep_data mpd;
1890 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
1891
1892 t = scp->device->id;
1893 TRACE(("gdth_internal_cache_cmd() cmd 0x%x hdrive %d\n",
1894 scp->cmnd[0],t));
1895
1896 scp->result = DID_OK << 16;
1897 scp->sense_buffer[0] = 0;
1898
1899 switch (scp->cmnd[0]) {
1900 case TEST_UNIT_READY:
1901 case VERIFY:
1902 case START_STOP:
1903 TRACE2(("Test/Verify/Start hdrive %d\n",t));
1904 break;
1905
1906 case INQUIRY:
1907 TRACE2(("Inquiry hdrive %d devtype %d\n",
1908 t,ha->hdr[t].devtype));
1909 inq.type_qual = (ha->hdr[t].devtype&4) ? TYPE_ROM:TYPE_DISK;
1910 /* you can here set all disks to removable, if you want to do
1911 a flush using the ALLOW_MEDIUM_REMOVAL command */
1912 inq.modif_rmb = 0x00;
1913 if ((ha->hdr[t].devtype & 1) ||
1914 (ha->hdr[t].cluster_type & CLUSTER_DRIVE))
1915 inq.modif_rmb = 0x80;
1916 inq.version = 2;
1917 inq.resp_aenc = 2;
1918 inq.add_length= 32;
1919 strcpy(inq.vendor,ha->oem_name);
1920 snprintf(inq.product, sizeof(inq.product), "Host Drive #%02d",t);
1921 strcpy(inq.revision," ");
1922 gdth_copy_internal_data(ha, scp, (char*)&inq, sizeof(gdth_inq_data));
1923 break;
1924
1925 case REQUEST_SENSE:
1926 TRACE2(("Request sense hdrive %d\n",t));
1927 sd.errorcode = 0x70;
1928 sd.segno = 0x00;
1929 sd.key = NO_SENSE;
1930 sd.info = 0;
1931 sd.add_length= 0;
1932 gdth_copy_internal_data(ha, scp, (char*)&sd, sizeof(gdth_sense_data));
1933 break;
1934
1935 case MODE_SENSE:
1936 TRACE2(("Mode sense hdrive %d\n",t));
1937 memset((char*)&mpd,0,sizeof(gdth_modep_data));
1938 mpd.hd.data_length = sizeof(gdth_modep_data);
1939 mpd.hd.dev_par = (ha->hdr[t].devtype&2) ? 0x80:0;
1940 mpd.hd.bd_length = sizeof(mpd.bd);
1941 mpd.bd.block_length[0] = (SECTOR_SIZE & 0x00ff0000) >> 16;
1942 mpd.bd.block_length[1] = (SECTOR_SIZE & 0x0000ff00) >> 8;
1943 mpd.bd.block_length[2] = (SECTOR_SIZE & 0x000000ff);
1944 gdth_copy_internal_data(ha, scp, (char*)&mpd, sizeof(gdth_modep_data));
1945 break;
1946
1947 case READ_CAPACITY:
1948 TRACE2(("Read capacity hdrive %d\n",t));
1949 if (ha->hdr[t].size > (u64)0xffffffff)
1950 rdc.last_block_no = 0xffffffff;
1951 else
1952 rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1);
1953 rdc.block_length = cpu_to_be32(SECTOR_SIZE);
1954 gdth_copy_internal_data(ha, scp, (char*)&rdc, sizeof(gdth_rdcap_data));
1955 break;
1956
1957 case SERVICE_ACTION_IN_16:
1958 if ((scp->cmnd[1] & 0x1f) == SAI_READ_CAPACITY_16 &&
1959 (ha->cache_feat & GDT_64BIT)) {
1960 gdth_rdcap16_data rdc16;
1961
1962 TRACE2(("Read capacity (16) hdrive %d\n",t));
1963 rdc16.last_block_no = cpu_to_be64(ha->hdr[t].size-1);
1964 rdc16.block_length = cpu_to_be32(SECTOR_SIZE);
1965 gdth_copy_internal_data(ha, scp, (char*)&rdc16,
1966 sizeof(gdth_rdcap16_data));
1967 } else {
1968 scp->result = DID_ABORT << 16;
1969 }
1970 break;
1971
1972 default:
1973 TRACE2(("Internal cache cmd 0x%x unknown\n",scp->cmnd[0]));
1974 break;
1975 }
1976
1977 if (!cmndinfo->wait_for_completion)
1978 cmndinfo->wait_for_completion++;
1979 else
1980 return 1;
1981
1982 return 0;
1983}
1984
1985static int gdth_fill_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp,
1986 u16 hdrive)
1987{
1988 register gdth_cmd_str *cmdp;
1989 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
1990 u32 cnt, blockcnt;
1991 u64 no, blockno;
1992 int i, cmd_index, read_write, sgcnt, mode64;
1993
1994 cmdp = ha->pccb;
1995 TRACE(("gdth_fill_cache_cmd() cmd 0x%x cmdsize %d hdrive %d\n",
1996 scp->cmnd[0],scp->cmd_len,hdrive));
1997
1998 mode64 = (ha->cache_feat & GDT_64BIT) ? TRUE : FALSE;
1999 /* test for READ_16, WRITE_16 if !mode64 ? ---
2000 not required, should not occur due to error return on
2001 READ_CAPACITY_16 */
2002
2003 cmdp->Service = CACHESERVICE;
2004 cmdp->RequestBuffer = scp;
2005 /* search free command index */
2006 if (!(cmd_index=gdth_get_cmd_index(ha))) {
2007 TRACE(("GDT: No free command index found\n"));
2008 return 0;
2009 }
2010 /* if it's the first command, set command semaphore */
2011 if (ha->cmd_cnt == 0)
2012 gdth_set_sema0(ha);
2013
2014 /* fill command */
2015 read_write = 0;
2016 if (cmndinfo->OpCode != -1)
2017 cmdp->OpCode = cmndinfo->OpCode; /* special cache cmd. */
2018 else if (scp->cmnd[0] == RESERVE)
2019 cmdp->OpCode = GDT_RESERVE_DRV;
2020 else if (scp->cmnd[0] == RELEASE)
2021 cmdp->OpCode = GDT_RELEASE_DRV;
2022 else if (scp->cmnd[0] == ALLOW_MEDIUM_REMOVAL) {
2023 if (scp->cmnd[4] & 1) /* prevent ? */
2024 cmdp->OpCode = GDT_MOUNT;
2025 else if (scp->cmnd[3] & 1) /* removable drive ? */
2026 cmdp->OpCode = GDT_UNMOUNT;
2027 else
2028 cmdp->OpCode = GDT_FLUSH;
2029 } else if (scp->cmnd[0] == WRITE_6 || scp->cmnd[0] == WRITE_10 ||
2030 scp->cmnd[0] == WRITE_12 || scp->cmnd[0] == WRITE_16
2031 ) {
2032 read_write = 1;
2033 if (gdth_write_through || ((ha->hdr[hdrive].rw_attribs & 1) &&
2034 (ha->cache_feat & GDT_WR_THROUGH)))
2035 cmdp->OpCode = GDT_WRITE_THR;
2036 else
2037 cmdp->OpCode = GDT_WRITE;
2038 } else {
2039 read_write = 2;
2040 cmdp->OpCode = GDT_READ;
2041 }
2042
2043 cmdp->BoardNode = LOCALBOARD;
2044 if (mode64) {
2045 cmdp->u.cache64.DeviceNo = hdrive;
2046 cmdp->u.cache64.BlockNo = 1;
2047 cmdp->u.cache64.sg_canz = 0;
2048 } else {
2049 cmdp->u.cache.DeviceNo = hdrive;
2050 cmdp->u.cache.BlockNo = 1;
2051 cmdp->u.cache.sg_canz = 0;
2052 }
2053
2054 if (read_write) {
2055 if (scp->cmd_len == 16) {
2056 memcpy(&no, &scp->cmnd[2], sizeof(u64));
2057 blockno = be64_to_cpu(no);
2058 memcpy(&cnt, &scp->cmnd[10], sizeof(u32));
2059 blockcnt = be32_to_cpu(cnt);
2060 } else if (scp->cmd_len == 10) {
2061 memcpy(&no, &scp->cmnd[2], sizeof(u32));
2062 blockno = be32_to_cpu(no);
2063 memcpy(&cnt, &scp->cmnd[7], sizeof(u16));
2064 blockcnt = be16_to_cpu(cnt);
2065 } else {
2066 memcpy(&no, &scp->cmnd[0], sizeof(u32));
2067 blockno = be32_to_cpu(no) & 0x001fffffUL;
2068 blockcnt= scp->cmnd[4]==0 ? 0x100 : scp->cmnd[4];
2069 }
2070 if (mode64) {
2071 cmdp->u.cache64.BlockNo = blockno;
2072 cmdp->u.cache64.BlockCnt = blockcnt;
2073 } else {
2074 cmdp->u.cache.BlockNo = (u32)blockno;
2075 cmdp->u.cache.BlockCnt = blockcnt;
2076 }
2077
2078 if (scsi_bufflen(scp)) {
2079 cmndinfo->dma_dir = (read_write == 1 ?
2080 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2081 sgcnt = dma_map_sg(&ha->pdev->dev, scsi_sglist(scp),
2082 scsi_sg_count(scp), cmndinfo->dma_dir);
2083 if (mode64) {
2084 struct scatterlist *sl;
2085
2086 cmdp->u.cache64.DestAddr= (u64)-1;
2087 cmdp->u.cache64.sg_canz = sgcnt;
2088 scsi_for_each_sg(scp, sl, sgcnt, i) {
2089 cmdp->u.cache64.sg_lst[i].sg_ptr = sg_dma_address(sl);
2090 cmdp->u.cache64.sg_lst[i].sg_len = sg_dma_len(sl);
2091 }
2092 } else {
2093 struct scatterlist *sl;
2094
2095 cmdp->u.cache.DestAddr= 0xffffffff;
2096 cmdp->u.cache.sg_canz = sgcnt;
2097 scsi_for_each_sg(scp, sl, sgcnt, i) {
2098 cmdp->u.cache.sg_lst[i].sg_ptr = sg_dma_address(sl);
2099 cmdp->u.cache.sg_lst[i].sg_len = sg_dma_len(sl);
2100 }
2101 }
2102
2103#ifdef GDTH_STATISTICS
2104 if (max_sg < (u32)sgcnt) {
2105 max_sg = (u32)sgcnt;
2106 TRACE3(("GDT: max_sg = %d\n",max_sg));
2107 }
2108#endif
2109
2110 }
2111 }
2112 /* evaluate command size, check space */
2113 if (mode64) {
2114 TRACE(("cache cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
2115 cmdp->u.cache64.DestAddr,cmdp->u.cache64.sg_canz,
2116 cmdp->u.cache64.sg_lst[0].sg_ptr,
2117 cmdp->u.cache64.sg_lst[0].sg_len));
2118 TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n",
2119 cmdp->OpCode,cmdp->u.cache64.BlockNo,cmdp->u.cache64.BlockCnt));
2120 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache64.sg_lst) +
2121 (u16)cmdp->u.cache64.sg_canz * sizeof(gdth_sg64_str);
2122 } else {
2123 TRACE(("cache cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
2124 cmdp->u.cache.DestAddr,cmdp->u.cache.sg_canz,
2125 cmdp->u.cache.sg_lst[0].sg_ptr,
2126 cmdp->u.cache.sg_lst[0].sg_len));
2127 TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n",
2128 cmdp->OpCode,cmdp->u.cache.BlockNo,cmdp->u.cache.BlockCnt));
2129 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) +
2130 (u16)cmdp->u.cache.sg_canz * sizeof(gdth_sg_str);
2131 }
2132 if (ha->cmd_len & 3)
2133 ha->cmd_len += (4 - (ha->cmd_len & 3));
2134
2135 if (ha->cmd_cnt > 0) {
2136 if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) >
2137 ha->ic_all_size) {
2138 TRACE2(("gdth_fill_cache() DPMEM overflow\n"));
2139 ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND;
2140 return 0;
2141 }
2142 }
2143
2144 /* copy command */
2145 gdth_copy_command(ha);
2146 return cmd_index;
2147}
2148
2149static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b)
2150{
2151 register gdth_cmd_str *cmdp;
2152 u16 i;
2153 dma_addr_t sense_paddr;
2154 int cmd_index, sgcnt, mode64;
2155 u8 t,l;
2156 struct gdth_cmndinfo *cmndinfo;
2157
2158 t = scp->device->id;
2159 l = scp->device->lun;
2160 cmdp = ha->pccb;
2161 TRACE(("gdth_fill_raw_cmd() cmd 0x%x bus %d ID %d LUN %d\n",
2162 scp->cmnd[0],b,t,l));
2163
2164 mode64 = (ha->raw_feat & GDT_64BIT) ? TRUE : FALSE;
2165
2166 cmdp->Service = SCSIRAWSERVICE;
2167 cmdp->RequestBuffer = scp;
2168 /* search free command index */
2169 if (!(cmd_index=gdth_get_cmd_index(ha))) {
2170 TRACE(("GDT: No free command index found\n"));
2171 return 0;
2172 }
2173 /* if it's the first command, set command semaphore */
2174 if (ha->cmd_cnt == 0)
2175 gdth_set_sema0(ha);
2176
2177 cmndinfo = gdth_cmnd_priv(scp);
2178 /* fill command */
2179 if (cmndinfo->OpCode != -1) {
2180 cmdp->OpCode = cmndinfo->OpCode; /* special raw cmd. */
2181 cmdp->BoardNode = LOCALBOARD;
2182 if (mode64) {
2183 cmdp->u.raw64.direction = (cmndinfo->phase >> 8);
2184 TRACE2(("special raw cmd 0x%x param 0x%x\n",
2185 cmdp->OpCode, cmdp->u.raw64.direction));
2186 /* evaluate command size */
2187 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst);
2188 } else {
2189 cmdp->u.raw.direction = (cmndinfo->phase >> 8);
2190 TRACE2(("special raw cmd 0x%x param 0x%x\n",
2191 cmdp->OpCode, cmdp->u.raw.direction));
2192 /* evaluate command size */
2193 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst);
2194 }
2195
2196 } else {
2197 sense_paddr = dma_map_single(&ha->pdev->dev, scp->sense_buffer, 16,
2198 DMA_FROM_DEVICE);
2199
2200 cmndinfo->sense_paddr = sense_paddr;
2201 cmdp->OpCode = GDT_WRITE; /* always */
2202 cmdp->BoardNode = LOCALBOARD;
2203 if (mode64) {
2204 cmdp->u.raw64.reserved = 0;
2205 cmdp->u.raw64.mdisc_time = 0;
2206 cmdp->u.raw64.mcon_time = 0;
2207 cmdp->u.raw64.clen = scp->cmd_len;
2208 cmdp->u.raw64.target = t;
2209 cmdp->u.raw64.lun = l;
2210 cmdp->u.raw64.bus = b;
2211 cmdp->u.raw64.priority = 0;
2212 cmdp->u.raw64.sdlen = scsi_bufflen(scp);
2213 cmdp->u.raw64.sense_len = 16;
2214 cmdp->u.raw64.sense_data = sense_paddr;
2215 cmdp->u.raw64.direction =
2216 gdth_direction_tab[scp->cmnd[0]]==DOU ? GDTH_DATA_OUT:GDTH_DATA_IN;
2217 memcpy(cmdp->u.raw64.cmd,scp->cmnd,16);
2218 cmdp->u.raw64.sg_ranz = 0;
2219 } else {
2220 cmdp->u.raw.reserved = 0;
2221 cmdp->u.raw.mdisc_time = 0;
2222 cmdp->u.raw.mcon_time = 0;
2223 cmdp->u.raw.clen = scp->cmd_len;
2224 cmdp->u.raw.target = t;
2225 cmdp->u.raw.lun = l;
2226 cmdp->u.raw.bus = b;
2227 cmdp->u.raw.priority = 0;
2228 cmdp->u.raw.link_p = 0;
2229 cmdp->u.raw.sdlen = scsi_bufflen(scp);
2230 cmdp->u.raw.sense_len = 16;
2231 cmdp->u.raw.sense_data = sense_paddr;
2232 cmdp->u.raw.direction =
2233 gdth_direction_tab[scp->cmnd[0]]==DOU ? GDTH_DATA_OUT:GDTH_DATA_IN;
2234 memcpy(cmdp->u.raw.cmd,scp->cmnd,12);
2235 cmdp->u.raw.sg_ranz = 0;
2236 }
2237
2238 if (scsi_bufflen(scp)) {
2239 cmndinfo->dma_dir = DMA_BIDIRECTIONAL;
2240 sgcnt = dma_map_sg(&ha->pdev->dev, scsi_sglist(scp),
2241 scsi_sg_count(scp), cmndinfo->dma_dir);
2242 if (mode64) {
2243 struct scatterlist *sl;
2244
2245 cmdp->u.raw64.sdata = (u64)-1;
2246 cmdp->u.raw64.sg_ranz = sgcnt;
2247 scsi_for_each_sg(scp, sl, sgcnt, i) {
2248 cmdp->u.raw64.sg_lst[i].sg_ptr = sg_dma_address(sl);
2249 cmdp->u.raw64.sg_lst[i].sg_len = sg_dma_len(sl);
2250 }
2251 } else {
2252 struct scatterlist *sl;
2253
2254 cmdp->u.raw.sdata = 0xffffffff;
2255 cmdp->u.raw.sg_ranz = sgcnt;
2256 scsi_for_each_sg(scp, sl, sgcnt, i) {
2257 cmdp->u.raw.sg_lst[i].sg_ptr = sg_dma_address(sl);
2258 cmdp->u.raw.sg_lst[i].sg_len = sg_dma_len(sl);
2259 }
2260 }
2261
2262#ifdef GDTH_STATISTICS
2263 if (max_sg < sgcnt) {
2264 max_sg = sgcnt;
2265 TRACE3(("GDT: max_sg = %d\n",sgcnt));
2266 }
2267#endif
2268
2269 }
2270 if (mode64) {
2271 TRACE(("raw cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
2272 cmdp->u.raw64.sdata,cmdp->u.raw64.sg_ranz,
2273 cmdp->u.raw64.sg_lst[0].sg_ptr,
2274 cmdp->u.raw64.sg_lst[0].sg_len));
2275 /* evaluate command size */
2276 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst) +
2277 (u16)cmdp->u.raw64.sg_ranz * sizeof(gdth_sg64_str);
2278 } else {
2279 TRACE(("raw cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
2280 cmdp->u.raw.sdata,cmdp->u.raw.sg_ranz,
2281 cmdp->u.raw.sg_lst[0].sg_ptr,
2282 cmdp->u.raw.sg_lst[0].sg_len));
2283 /* evaluate command size */
2284 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) +
2285 (u16)cmdp->u.raw.sg_ranz * sizeof(gdth_sg_str);
2286 }
2287 }
2288 /* check space */
2289 if (ha->cmd_len & 3)
2290 ha->cmd_len += (4 - (ha->cmd_len & 3));
2291
2292 if (ha->cmd_cnt > 0) {
2293 if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) >
2294 ha->ic_all_size) {
2295 TRACE2(("gdth_fill_raw() DPMEM overflow\n"));
2296 ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND;
2297 return 0;
2298 }
2299 }
2300
2301 /* copy command */
2302 gdth_copy_command(ha);
2303 return cmd_index;
2304}
2305
2306static int gdth_special_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp)
2307{
2308 register gdth_cmd_str *cmdp;
2309 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
2310 int cmd_index;
2311
2312 cmdp= ha->pccb;
2313 TRACE2(("gdth_special_cmd(): "));
2314
2315 *cmdp = *cmndinfo->internal_cmd_str;
2316 cmdp->RequestBuffer = scp;
2317
2318 /* search free command index */
2319 if (!(cmd_index=gdth_get_cmd_index(ha))) {
2320 TRACE(("GDT: No free command index found\n"));
2321 return 0;
2322 }
2323
2324 /* if it's the first command, set command semaphore */
2325 if (ha->cmd_cnt == 0)
2326 gdth_set_sema0(ha);
2327
2328 /* evaluate command size, check space */
2329 if (cmdp->OpCode == GDT_IOCTL) {
2330 TRACE2(("IOCTL\n"));
2331 ha->cmd_len =
2332 GDTOFFSOF(gdth_cmd_str,u.ioctl.p_param) + sizeof(u64);
2333 } else if (cmdp->Service == CACHESERVICE) {
2334 TRACE2(("cache command %d\n",cmdp->OpCode));
2335 if (ha->cache_feat & GDT_64BIT)
2336 ha->cmd_len =
2337 GDTOFFSOF(gdth_cmd_str,u.cache64.sg_lst) + sizeof(gdth_sg64_str);
2338 else
2339 ha->cmd_len =
2340 GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) + sizeof(gdth_sg_str);
2341 } else if (cmdp->Service == SCSIRAWSERVICE) {
2342 TRACE2(("raw command %d\n",cmdp->OpCode));
2343 if (ha->raw_feat & GDT_64BIT)
2344 ha->cmd_len =
2345 GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst) + sizeof(gdth_sg64_str);
2346 else
2347 ha->cmd_len =
2348 GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) + sizeof(gdth_sg_str);
2349 }
2350
2351 if (ha->cmd_len & 3)
2352 ha->cmd_len += (4 - (ha->cmd_len & 3));
2353
2354 if (ha->cmd_cnt > 0) {
2355 if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) >
2356 ha->ic_all_size) {
2357 TRACE2(("gdth_special_cmd() DPMEM overflow\n"));
2358 ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND;
2359 return 0;
2360 }
2361 }
2362
2363 /* copy command */
2364 gdth_copy_command(ha);
2365 return cmd_index;
2366}
2367
2368
2369/* Controller event handling functions */
2370static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source,
2371 u16 idx, gdth_evt_data *evt)
2372{
2373 gdth_evt_str *e;
2374
2375 /* no GDTH_LOCK_HA() ! */
2376 TRACE2(("gdth_store_event() source %d idx %d\n", source, idx));
2377 if (source == 0) /* no source -> no event */
2378 return NULL;
2379
2380 if (ebuffer[elastidx].event_source == source &&
2381 ebuffer[elastidx].event_idx == idx &&
2382 ((evt->size != 0 && ebuffer[elastidx].event_data.size != 0 &&
2383 !memcmp((char *)&ebuffer[elastidx].event_data.eu,
2384 (char *)&evt->eu, evt->size)) ||
2385 (evt->size == 0 && ebuffer[elastidx].event_data.size == 0 &&
2386 !strcmp((char *)&ebuffer[elastidx].event_data.event_string,
2387 (char *)&evt->event_string)))) {
2388 e = &ebuffer[elastidx];
2389 e->last_stamp = (u32)ktime_get_real_seconds();
2390 ++e->same_count;
2391 } else {
2392 if (ebuffer[elastidx].event_source != 0) { /* entry not free ? */
2393 ++elastidx;
2394 if (elastidx == MAX_EVENTS)
2395 elastidx = 0;
2396 if (elastidx == eoldidx) { /* reached mark ? */
2397 ++eoldidx;
2398 if (eoldidx == MAX_EVENTS)
2399 eoldidx = 0;
2400 }
2401 }
2402 e = &ebuffer[elastidx];
2403 e->event_source = source;
2404 e->event_idx = idx;
2405 e->first_stamp = e->last_stamp = (u32)ktime_get_real_seconds();
2406 e->same_count = 1;
2407 e->event_data = *evt;
2408 e->application = 0;
2409 }
2410 return e;
2411}
2412
2413static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr)
2414{
2415 gdth_evt_str *e;
2416 int eindex;
2417 unsigned long flags;
2418
2419 TRACE2(("gdth_read_event() handle %d\n", handle));
2420 spin_lock_irqsave(&ha->smp_lock, flags);
2421 if (handle == -1)
2422 eindex = eoldidx;
2423 else
2424 eindex = handle;
2425 estr->event_source = 0;
2426
2427 if (eindex < 0 || eindex >= MAX_EVENTS) {
2428 spin_unlock_irqrestore(&ha->smp_lock, flags);
2429 return eindex;
2430 }
2431 e = &ebuffer[eindex];
2432 if (e->event_source != 0) {
2433 if (eindex != elastidx) {
2434 if (++eindex == MAX_EVENTS)
2435 eindex = 0;
2436 } else {
2437 eindex = -1;
2438 }
2439 memcpy(estr, e, sizeof(gdth_evt_str));
2440 }
2441 spin_unlock_irqrestore(&ha->smp_lock, flags);
2442 return eindex;
2443}
2444
2445static void gdth_readapp_event(gdth_ha_str *ha,
2446 u8 application, gdth_evt_str *estr)
2447{
2448 gdth_evt_str *e;
2449 int eindex;
2450 unsigned long flags;
2451 u8 found = FALSE;
2452
2453 TRACE2(("gdth_readapp_event() app. %d\n", application));
2454 spin_lock_irqsave(&ha->smp_lock, flags);
2455 eindex = eoldidx;
2456 for (;;) {
2457 e = &ebuffer[eindex];
2458 if (e->event_source == 0)
2459 break;
2460 if ((e->application & application) == 0) {
2461 e->application |= application;
2462 found = TRUE;
2463 break;
2464 }
2465 if (eindex == elastidx)
2466 break;
2467 if (++eindex == MAX_EVENTS)
2468 eindex = 0;
2469 }
2470 if (found)
2471 memcpy(estr, e, sizeof(gdth_evt_str));
2472 else
2473 estr->event_source = 0;
2474 spin_unlock_irqrestore(&ha->smp_lock, flags);
2475}
2476
2477static void gdth_clear_events(void)
2478{
2479 TRACE(("gdth_clear_events()"));
2480
2481 eoldidx = elastidx = 0;
2482 ebuffer[0].event_source = 0;
2483}
2484
2485
2486/* SCSI interface functions */
2487
2488static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
2489 int gdth_from_wait, int* pIndex)
2490{
2491 gdt6m_dpram_str __iomem *dp6m_ptr = NULL;
2492 gdt6_dpram_str __iomem *dp6_ptr;
2493 struct scsi_cmnd *scp;
2494 int rval, i;
2495 u8 IStatus;
2496 u16 Service;
2497 unsigned long flags = 0;
2498
2499 TRACE(("gdth_interrupt() IRQ %d\n", ha->irq));
2500
2501 /* if polling and not from gdth_wait() -> return */
2502 if (gdth_polling) {
2503 if (!gdth_from_wait) {
2504 return IRQ_HANDLED;
2505 }
2506 }
2507
2508 if (!gdth_polling)
2509 spin_lock_irqsave(&ha->smp_lock, flags);
2510
2511 /* search controller */
2512 IStatus = gdth_get_status(ha);
2513 if (IStatus == 0) {
2514 /* spurious interrupt */
2515 if (!gdth_polling)
2516 spin_unlock_irqrestore(&ha->smp_lock, flags);
2517 return IRQ_HANDLED;
2518 }
2519
2520#ifdef GDTH_STATISTICS
2521 ++act_ints;
2522#endif
2523
2524 if (ha->type == GDT_PCI) {
2525 dp6_ptr = ha->brd;
2526 if (IStatus & 0x80) { /* error flag */
2527 IStatus &= ~0x80;
2528 ha->status = readw(&dp6_ptr->u.ic.Status);
2529 TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
2530 } else /* no error */
2531 ha->status = S_OK;
2532 ha->info = readl(&dp6_ptr->u.ic.Info[0]);
2533 ha->service = readw(&dp6_ptr->u.ic.Service);
2534 ha->info2 = readl(&dp6_ptr->u.ic.Info[1]);
2535
2536 writeb(0xff, &dp6_ptr->io.irqdel); /* acknowledge interrupt */
2537 writeb(0, &dp6_ptr->u.ic.Cmd_Index);/* reset command index */
2538 writeb(0, &dp6_ptr->io.Sema1); /* reset status semaphore */
2539 } else if (ha->type == GDT_PCINEW) {
2540 if (IStatus & 0x80) { /* error flag */
2541 IStatus &= ~0x80;
2542 ha->status = inw(PTR2USHORT(&ha->plx->status));
2543 TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
2544 } else
2545 ha->status = S_OK;
2546 ha->info = inl(PTR2USHORT(&ha->plx->info[0]));
2547 ha->service = inw(PTR2USHORT(&ha->plx->service));
2548 ha->info2 = inl(PTR2USHORT(&ha->plx->info[1]));
2549
2550 outb(0xff, PTR2USHORT(&ha->plx->edoor_reg));
2551 outb(0x00, PTR2USHORT(&ha->plx->sema1_reg));
2552 } else if (ha->type == GDT_PCIMPR) {
2553 dp6m_ptr = ha->brd;
2554 if (IStatus & 0x80) { /* error flag */
2555 IStatus &= ~0x80;
2556 ha->status = readw(&dp6m_ptr->i960r.status);
2557 TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
2558 } else /* no error */
2559 ha->status = S_OK;
2560
2561 ha->info = readl(&dp6m_ptr->i960r.info[0]);
2562 ha->service = readw(&dp6m_ptr->i960r.service);
2563 ha->info2 = readl(&dp6m_ptr->i960r.info[1]);
2564
2565 /* event string */
2566 if (IStatus == ASYNCINDEX) {
2567 if (ha->service != SCREENSERVICE &&
2568 (ha->fw_vers & 0xff) >= 0x1a) {
2569 ha->dvr.severity = readb
2570 (&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.severity);
2571 for (i = 0; i < 256; ++i) {
2572 ha->dvr.event_string[i] = readb
2573 (&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.evt_str[i]);
2574 if (ha->dvr.event_string[i] == 0)
2575 break;
2576 }
2577 }
2578 }
2579 writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
2580 writeb(0, &dp6m_ptr->i960r.sema1_reg);
2581 } else {
2582 TRACE2(("gdth_interrupt() unknown controller type\n"));
2583 if (!gdth_polling)
2584 spin_unlock_irqrestore(&ha->smp_lock, flags);
2585 return IRQ_HANDLED;
2586 }
2587
2588 TRACE(("gdth_interrupt() index %d stat %d info %d\n",
2589 IStatus,ha->status,ha->info));
2590
2591 if (gdth_from_wait) {
2592 *pIndex = (int)IStatus;
2593 }
2594
2595 if (IStatus == ASYNCINDEX) {
2596 TRACE2(("gdth_interrupt() async. event\n"));
2597 gdth_async_event(ha);
2598 if (!gdth_polling)
2599 spin_unlock_irqrestore(&ha->smp_lock, flags);
2600 gdth_next(ha);
2601 return IRQ_HANDLED;
2602 }
2603
2604 if (IStatus == SPEZINDEX) {
2605 TRACE2(("Service unknown or not initialized !\n"));
2606 ha->dvr.size = sizeof(ha->dvr.eu.driver);
2607 ha->dvr.eu.driver.ionode = ha->hanum;
2608 gdth_store_event(ha, ES_DRIVER, 4, &ha->dvr);
2609 if (!gdth_polling)
2610 spin_unlock_irqrestore(&ha->smp_lock, flags);
2611 return IRQ_HANDLED;
2612 }
2613 scp = ha->cmd_tab[IStatus-2].cmnd;
2614 Service = ha->cmd_tab[IStatus-2].service;
2615 ha->cmd_tab[IStatus-2].cmnd = UNUSED_CMND;
2616 if (scp == UNUSED_CMND) {
2617 TRACE2(("gdth_interrupt() index to unused command (%d)\n",IStatus));
2618 ha->dvr.size = sizeof(ha->dvr.eu.driver);
2619 ha->dvr.eu.driver.ionode = ha->hanum;
2620 ha->dvr.eu.driver.index = IStatus;
2621 gdth_store_event(ha, ES_DRIVER, 1, &ha->dvr);
2622 if (!gdth_polling)
2623 spin_unlock_irqrestore(&ha->smp_lock, flags);
2624 return IRQ_HANDLED;
2625 }
2626 if (scp == INTERNAL_CMND) {
2627 TRACE(("gdth_interrupt() answer to internal command\n"));
2628 if (!gdth_polling)
2629 spin_unlock_irqrestore(&ha->smp_lock, flags);
2630 return IRQ_HANDLED;
2631 }
2632
2633 TRACE(("gdth_interrupt() sync. status\n"));
2634 rval = gdth_sync_event(ha,Service,IStatus,scp);
2635 if (!gdth_polling)
2636 spin_unlock_irqrestore(&ha->smp_lock, flags);
2637 if (rval == 2) {
2638 gdth_putq(ha, scp, gdth_cmnd_priv(scp)->priority);
2639 } else if (rval == 1) {
2640 gdth_scsi_done(scp);
2641 }
2642
2643 gdth_next(ha);
2644 return IRQ_HANDLED;
2645}
2646
2647static irqreturn_t gdth_interrupt(int irq, void *dev_id)
2648{
2649 gdth_ha_str *ha = dev_id;
2650
2651 return __gdth_interrupt(ha, false, NULL);
2652}
2653
2654static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index,
2655 struct scsi_cmnd *scp)
2656{
2657 gdth_msg_str *msg;
2658 gdth_cmd_str *cmdp;
2659 u8 b, t;
2660 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
2661
2662 cmdp = ha->pccb;
2663 TRACE(("gdth_sync_event() serv %d status %d\n",
2664 service,ha->status));
2665
2666 if (service == SCREENSERVICE) {
2667 msg = ha->pmsg;
2668 TRACE(("len: %d, answer: %d, ext: %d, alen: %d\n",
2669 msg->msg_len,msg->msg_answer,msg->msg_ext,msg->msg_alen));
2670 if (msg->msg_len > MSGLEN+1)
2671 msg->msg_len = MSGLEN+1;
2672 if (msg->msg_len)
2673 if (!(msg->msg_answer && msg->msg_ext)) {
2674 msg->msg_text[msg->msg_len] = '\0';
2675 printk("%s",msg->msg_text);
2676 }
2677
2678 if (msg->msg_ext && !msg->msg_answer) {
2679 while (gdth_test_busy(ha))
2680 gdth_delay(0);
2681 cmdp->Service = SCREENSERVICE;
2682 cmdp->RequestBuffer = SCREEN_CMND;
2683 gdth_get_cmd_index(ha);
2684 gdth_set_sema0(ha);
2685 cmdp->OpCode = GDT_READ;
2686 cmdp->BoardNode = LOCALBOARD;
2687 cmdp->u.screen.reserved = 0;
2688 cmdp->u.screen.su.msg.msg_handle= msg->msg_handle;
2689 cmdp->u.screen.su.msg.msg_addr = ha->msg_phys;
2690 ha->cmd_offs_dpmem = 0;
2691 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr)
2692 + sizeof(u64);
2693 ha->cmd_cnt = 0;
2694 gdth_copy_command(ha);
2695 gdth_release_event(ha);
2696 return 0;
2697 }
2698
2699 if (msg->msg_answer && msg->msg_alen) {
2700 /* default answers (getchar() not possible) */
2701 if (msg->msg_alen == 1) {
2702 msg->msg_alen = 0;
2703 msg->msg_len = 1;
2704 msg->msg_text[0] = 0;
2705 } else {
2706 msg->msg_alen -= 2;
2707 msg->msg_len = 2;
2708 msg->msg_text[0] = 1;
2709 msg->msg_text[1] = 0;
2710 }
2711 msg->msg_ext = 0;
2712 msg->msg_answer = 0;
2713 while (gdth_test_busy(ha))
2714 gdth_delay(0);
2715 cmdp->Service = SCREENSERVICE;
2716 cmdp->RequestBuffer = SCREEN_CMND;
2717 gdth_get_cmd_index(ha);
2718 gdth_set_sema0(ha);
2719 cmdp->OpCode = GDT_WRITE;
2720 cmdp->BoardNode = LOCALBOARD;
2721 cmdp->u.screen.reserved = 0;
2722 cmdp->u.screen.su.msg.msg_handle= msg->msg_handle;
2723 cmdp->u.screen.su.msg.msg_addr = ha->msg_phys;
2724 ha->cmd_offs_dpmem = 0;
2725 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr)
2726 + sizeof(u64);
2727 ha->cmd_cnt = 0;
2728 gdth_copy_command(ha);
2729 gdth_release_event(ha);
2730 return 0;
2731 }
2732 printk("\n");
2733
2734 } else {
2735 b = scp->device->channel;
2736 t = scp->device->id;
2737 if (cmndinfo->OpCode == -1 && b != ha->virt_bus) {
2738 ha->raw[BUS_L2P(ha,b)].io_cnt[t]--;
2739 }
2740 /* cache or raw service */
2741 if (ha->status == S_BSY) {
2742 TRACE2(("Controller busy -> retry !\n"));
2743 if (cmndinfo->OpCode == GDT_MOUNT)
2744 cmndinfo->OpCode = GDT_CLUST_INFO;
2745 /* retry */
2746 return 2;
2747 }
2748 if (scsi_bufflen(scp))
2749 dma_unmap_sg(&ha->pdev->dev, scsi_sglist(scp), scsi_sg_count(scp),
2750 cmndinfo->dma_dir);
2751
2752 if (cmndinfo->sense_paddr)
2753 dma_unmap_page(&ha->pdev->dev, cmndinfo->sense_paddr, 16,
2754 DMA_FROM_DEVICE);
2755
2756 if (ha->status == S_OK) {
2757 cmndinfo->status = S_OK;
2758 cmndinfo->info = ha->info;
2759 if (cmndinfo->OpCode != -1) {
2760 TRACE2(("gdth_sync_event(): special cmd 0x%x OK\n",
2761 cmndinfo->OpCode));
2762 /* special commands GDT_CLUST_INFO/GDT_MOUNT ? */
2763 if (cmndinfo->OpCode == GDT_CLUST_INFO) {
2764 ha->hdr[t].cluster_type = (u8)ha->info;
2765 if (!(ha->hdr[t].cluster_type &
2766 CLUSTER_MOUNTED)) {
2767 /* NOT MOUNTED -> MOUNT */
2768 cmndinfo->OpCode = GDT_MOUNT;
2769 if (ha->hdr[t].cluster_type &
2770 CLUSTER_RESERVED) {
2771 /* cluster drive RESERVED (on the other node) */
2772 cmndinfo->phase = -2; /* reservation conflict */
2773 }
2774 } else {
2775 cmndinfo->OpCode = -1;
2776 }
2777 } else {
2778 if (cmndinfo->OpCode == GDT_MOUNT) {
2779 ha->hdr[t].cluster_type |= CLUSTER_MOUNTED;
2780 ha->hdr[t].media_changed = TRUE;
2781 } else if (cmndinfo->OpCode == GDT_UNMOUNT) {
2782 ha->hdr[t].cluster_type &= ~CLUSTER_MOUNTED;
2783 ha->hdr[t].media_changed = TRUE;
2784 }
2785 cmndinfo->OpCode = -1;
2786 }
2787 /* retry */
2788 cmndinfo->priority = HIGH_PRI;
2789 return 2;
2790 } else {
2791 /* RESERVE/RELEASE ? */
2792 if (scp->cmnd[0] == RESERVE) {
2793 ha->hdr[t].cluster_type |= CLUSTER_RESERVED;
2794 } else if (scp->cmnd[0] == RELEASE) {
2795 ha->hdr[t].cluster_type &= ~CLUSTER_RESERVED;
2796 }
2797