1/*
2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2016 Microsemi Corporation
4 * Copyright 2014-2015 PMC-Sierra, Inc.
5 * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more details.
15 *
16 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
17 *
18 */
19
20#include <linux/module.h>
21#include <linux/interrupt.h>
22#include <linux/types.h>
23#include <linux/pci.h>
24#include <linux/pci-aspm.h>
25#include <linux/kernel.h>
26#include <linux/slab.h>
27#include <linux/delay.h>
28#include <linux/fs.h>
29#include <linux/timer.h>
30#include <linux/init.h>
31#include <linux/spinlock.h>
32#include <linux/compat.h>
33#include <linux/blktrace_api.h>
34#include <linux/uaccess.h>
35#include <linux/io.h>
36#include <linux/dma-mapping.h>
37#include <linux/completion.h>
38#include <linux/moduleparam.h>
39#include <scsi/scsi.h>
40#include <scsi/scsi_cmnd.h>
41#include <scsi/scsi_device.h>
42#include <scsi/scsi_host.h>
43#include <scsi/scsi_tcq.h>
44#include <scsi/scsi_eh.h>
45#include <scsi/scsi_transport_sas.h>
46#include <scsi/scsi_dbg.h>
47#include <linux/cciss_ioctl.h>
48#include <linux/string.h>
49#include <linux/bitmap.h>
50#include <linux/atomic.h>
51#include <linux/jiffies.h>
52#include <linux/percpu-defs.h>
53#include <linux/percpu.h>
54#include <asm/unaligned.h>
55#include <asm/div64.h>
56#include "hpsa_cmd.h"
57#include "hpsa.h"
58
59/*
60 * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
61 * with an optional trailing '-' followed by a byte value (0-255).
62 */
63#define HPSA_DRIVER_VERSION "3.4.20-125"
64#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
65#define HPSA "hpsa"
66
67/* How long to wait for CISS doorbell communication */
68#define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */
69#define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */
70#define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */
71#define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */
72#define MAX_IOCTL_CONFIG_WAIT 1000
73
74/*define how many times we will try a command because of bus resets */
75#define MAX_CMD_RETRIES 3
76
77/* Embedded module documentation macros - see modules.h */
78MODULE_AUTHOR("Hewlett-Packard Company");
79MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
80 HPSA_DRIVER_VERSION);
81MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
82MODULE_VERSION(HPSA_DRIVER_VERSION);
83MODULE_LICENSE("GPL");
84MODULE_ALIAS("cciss");
85
86static int hpsa_simple_mode;
87module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
88MODULE_PARM_DESC(hpsa_simple_mode,
89 "Use 'simple mode' rather than 'performant mode'");
90
91/* define the PCI info for the cards we can control */
92static const struct pci_device_id hpsa_pci_device_id[] = {
93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1920},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1925},
114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
128 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
129 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
130 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
131 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
132 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
133 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
134 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
135 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
136 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581},
137 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582},
138 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583},
139 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584},
140 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585},
141 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
142 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
143 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
144 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
145 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
146 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
147 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
148 {PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
149 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
150 {0,}
151};
152
153MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
154
155/* board_id = Subsystem Device ID & Vendor ID
156 * product = Marketing Name for the board
157 * access = Address of the struct of function pointers
158 */
159static struct board_type products[] = {
160 {0x40700E11, "Smart Array 5300", &SA5A_access},
161 {0x40800E11, "Smart Array 5i", &SA5B_access},
162 {0x40820E11, "Smart Array 532", &SA5B_access},
163 {0x40830E11, "Smart Array 5312", &SA5B_access},
164 {0x409A0E11, "Smart Array 641", &SA5A_access},
165 {0x409B0E11, "Smart Array 642", &SA5A_access},
166 {0x409C0E11, "Smart Array 6400", &SA5A_access},
167 {0x409D0E11, "Smart Array 6400 EM", &SA5A_access},
168 {0x40910E11, "Smart Array 6i", &SA5A_access},
169 {0x3225103C, "Smart Array P600", &SA5A_access},
170 {0x3223103C, "Smart Array P800", &SA5A_access},
171 {0x3234103C, "Smart Array P400", &SA5A_access},
172 {0x3235103C, "Smart Array P400i", &SA5A_access},
173 {0x3211103C, "Smart Array E200i", &SA5A_access},
174 {0x3212103C, "Smart Array E200", &SA5A_access},
175 {0x3213103C, "Smart Array E200i", &SA5A_access},
176 {0x3214103C, "Smart Array E200i", &SA5A_access},
177 {0x3215103C, "Smart Array E200i", &SA5A_access},
178 {0x3237103C, "Smart Array E500", &SA5A_access},
179 {0x323D103C, "Smart Array P700m", &SA5A_access},
180 {0x3241103C, "Smart Array P212", &SA5_access},
181 {0x3243103C, "Smart Array P410", &SA5_access},
182 {0x3245103C, "Smart Array P410i", &SA5_access},
183 {0x3247103C, "Smart Array P411", &SA5_access},
184 {0x3249103C, "Smart Array P812", &SA5_access},
185 {0x324A103C, "Smart Array P712m", &SA5_access},
186 {0x324B103C, "Smart Array P711m", &SA5_access},
187 {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
188 {0x3350103C, "Smart Array P222", &SA5_access},
189 {0x3351103C, "Smart Array P420", &SA5_access},
190 {0x3352103C, "Smart Array P421", &SA5_access},
191 {0x3353103C, "Smart Array P822", &SA5_access},
192 {0x3354103C, "Smart Array P420i", &SA5_access},
193 {0x3355103C, "Smart Array P220i", &SA5_access},
194 {0x3356103C, "Smart Array P721m", &SA5_access},
195 {0x1920103C, "Smart Array P430i", &SA5_access},
196 {0x1921103C, "Smart Array P830i", &SA5_access},
197 {0x1922103C, "Smart Array P430", &SA5_access},
198 {0x1923103C, "Smart Array P431", &SA5_access},
199 {0x1924103C, "Smart Array P830", &SA5_access},
200 {0x1925103C, "Smart Array P831", &SA5_access},
201 {0x1926103C, "Smart Array P731m", &SA5_access},
202 {0x1928103C, "Smart Array P230i", &SA5_access},
203 {0x1929103C, "Smart Array P530", &SA5_access},
204 {0x21BD103C, "Smart Array P244br", &SA5_access},
205 {0x21BE103C, "Smart Array P741m", &SA5_access},
206 {0x21BF103C, "Smart HBA H240ar", &SA5_access},
207 {0x21C0103C, "Smart Array P440ar", &SA5_access},
208 {0x21C1103C, "Smart Array P840ar", &SA5_access},
209 {0x21C2103C, "Smart Array P440", &SA5_access},
210 {0x21C3103C, "Smart Array P441", &SA5_access},
211 {0x21C4103C, "Smart Array", &SA5_access},
212 {0x21C5103C, "Smart Array P841", &SA5_access},
213 {0x21C6103C, "Smart HBA H244br", &SA5_access},
214 {0x21C7103C, "Smart HBA H240", &SA5_access},
215 {0x21C8103C, "Smart HBA H241", &SA5_access},
216 {0x21C9103C, "Smart Array", &SA5_access},
217 {0x21CA103C, "Smart Array P246br", &SA5_access},
218 {0x21CB103C, "Smart Array P840", &SA5_access},
219 {0x21CC103C, "Smart Array", &SA5_access},
220 {0x21CD103C, "Smart Array", &SA5_access},
221 {0x21CE103C, "Smart HBA", &SA5_access},
222 {0x05809005, "SmartHBA-SA", &SA5_access},
223 {0x05819005, "SmartHBA-SA 8i", &SA5_access},
224 {0x05829005, "SmartHBA-SA 8i8e", &SA5_access},
225 {0x05839005, "SmartHBA-SA 8e", &SA5_access},
226 {0x05849005, "SmartHBA-SA 16i", &SA5_access},
227 {0x05859005, "SmartHBA-SA 4i4e", &SA5_access},
228 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
229 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
230 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
231 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
232 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
233 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
234};
235
236static struct scsi_transport_template *hpsa_sas_transport_template;
237static int hpsa_add_sas_host(struct ctlr_info *h);
238static void hpsa_delete_sas_host(struct ctlr_info *h);
239static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
240 struct hpsa_scsi_dev_t *device);
241static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device);
242static struct hpsa_scsi_dev_t
243 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
244 struct sas_rphy *rphy);
245
246#define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
247static const struct scsi_cmnd hpsa_cmd_busy;
248#define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
249static const struct scsi_cmnd hpsa_cmd_idle;
250static int number_of_controllers;
251
252static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
253static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
254static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd,
255 void __user *arg);
256
257#ifdef CONFIG_COMPAT
258static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd,
259 void __user *arg);
260#endif
261
262static void cmd_free(struct ctlr_info *h, struct CommandList *c);
263static struct CommandList *cmd_alloc(struct ctlr_info *h);
264static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
265static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
266 struct scsi_cmnd *scmd);
267static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
268 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
269 int cmd_type);
270static void hpsa_free_cmd_pool(struct ctlr_info *h);
271#define VPD_PAGE (1 << 8)
272#define HPSA_SIMPLE_ERROR_BITS 0x03
273
274static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
275static void hpsa_scan_start(struct Scsi_Host *);
276static int hpsa_scan_finished(struct Scsi_Host *sh,
277 unsigned long elapsed_time);
278static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
279
280static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
281static int hpsa_slave_alloc(struct scsi_device *sdev);
282static int hpsa_slave_configure(struct scsi_device *sdev);
283static void hpsa_slave_destroy(struct scsi_device *sdev);
284
285static void hpsa_update_scsi_devices(struct ctlr_info *h);
286static int check_for_unit_attention(struct ctlr_info *h,
287 struct CommandList *c);
288static void check_ioctl_unit_attention(struct ctlr_info *h,
289 struct CommandList *c);
290/* performant mode helper functions */
291static void calc_bucket_map(int *bucket, int num_buckets,
292 int nsgs, int min_blocks, u32 *bucket_map);
293static void hpsa_free_performant_mode(struct ctlr_info *h);
294static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
295static inline u32 next_command(struct ctlr_info *h, u8 q);
296static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
297 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
298 u64 *cfg_offset);
299static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
300 unsigned long *memory_bar);
301static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
302 bool *legacy_board);
303static int wait_for_device_to_become_ready(struct ctlr_info *h,
304 unsigned char lunaddr[],
305 int reply_queue);
306static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
307 int wait_for_ready);
308static inline void finish_cmd(struct CommandList *c);
309static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
310#define BOARD_NOT_READY 0
311#define BOARD_READY 1
312static void hpsa_drain_accel_commands(struct ctlr_info *h);
313static void hpsa_flush_cache(struct ctlr_info *h);
314static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
315 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
316 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
317static void hpsa_command_resubmit_worker(struct work_struct *work);
318static u32 lockup_detected(struct ctlr_info *h);
319static int detect_controller_lockup(struct ctlr_info *h);
320static void hpsa_disable_rld_caching(struct ctlr_info *h);
321static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
322 struct ReportExtendedLUNdata *buf, int bufsize);
323static bool hpsa_vpd_page_supported(struct ctlr_info *h,
324 unsigned char scsi3addr[], u8 page);
325static int hpsa_luns_changed(struct ctlr_info *h);
326static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
327 struct hpsa_scsi_dev_t *dev,
328 unsigned char *scsi3addr);
329
330static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
331{
332 unsigned long *priv = shost_priv(sdev->host);
333 return (struct ctlr_info *) *priv;
334}
335
336static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
337{
338 unsigned long *priv = shost_priv(sh);
339 return (struct ctlr_info *) *priv;
340}
341
342static inline bool hpsa_is_cmd_idle(struct CommandList *c)
343{
344 return c->scsi_cmd == SCSI_CMD_IDLE;
345}
346
347static inline bool hpsa_is_pending_event(struct CommandList *c)
348{
349 return c->reset_pending;
350}
351
352/* extract sense key, asc, and ascq from sense data. -1 means invalid. */
353static void decode_sense_data(const u8 *sense_data, int sense_data_len,
354 u8 *sense_key, u8 *asc, u8 *ascq)
355{
356 struct scsi_sense_hdr sshdr;
357 bool rc;
358
359 *sense_key = -1;
360 *asc = -1;
361 *ascq = -1;
362
363 if (sense_data_len < 1)
364 return;
365
366 rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
367 if (rc) {
368 *sense_key = sshdr.sense_key;
369 *asc = sshdr.asc;
370 *ascq = sshdr.ascq;
371 }
372}
373
374static int check_for_unit_attention(struct ctlr_info *h,
375 struct CommandList *c)
376{
377 u8 sense_key, asc, ascq;
378 int sense_len;
379
380 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
381 sense_len = sizeof(c->err_info->SenseInfo);
382 else
383 sense_len = c->err_info->SenseLen;
384
385 decode_sense_data(c->err_info->SenseInfo, sense_len,
386 &sense_key, &asc, &ascq);
387 if (sense_key != UNIT_ATTENTION || asc == 0xff)
388 return 0;
389
390 switch (asc) {
391 case STATE_CHANGED:
392 dev_warn(&h->pdev->dev,
393 "%s: a state change detected, command retried\n",
394 h->devname);
395 break;
396 case LUN_FAILED:
397 dev_warn(&h->pdev->dev,
398 "%s: LUN failure detected\n", h->devname);
399 break;
400 case REPORT_LUNS_CHANGED:
401 dev_warn(&h->pdev->dev,
402 "%s: report LUN data changed\n", h->devname);
403 /*
404 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
405 * target (array) devices.
406 */
407 break;
408 case POWER_OR_RESET:
409 dev_warn(&h->pdev->dev,
410 "%s: a power on or device reset detected\n",
411 h->devname);
412 break;
413 case UNIT_ATTENTION_CLEARED:
414 dev_warn(&h->pdev->dev,
415 "%s: unit attention cleared by another initiator\n",
416 h->devname);
417 break;
418 default:
419 dev_warn(&h->pdev->dev,
420 "%s: unknown unit attention detected\n",
421 h->devname);
422 break;
423 }
424 return 1;
425}
426
427static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
428{
429 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
430 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
431 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
432 return 0;
433 dev_warn(&h->pdev->dev, HPSA "device busy");
434 return 1;
435}
436
437static u32 lockup_detected(struct ctlr_info *h);
438static ssize_t host_show_lockup_detected(struct device *dev,
439 struct device_attribute *attr, char *buf)
440{
441 int ld;
442 struct ctlr_info *h;
443 struct Scsi_Host *shost = class_to_shost(dev);
444
445 h = shost_to_hba(shost);
446 ld = lockup_detected(h);
447
448 return sprintf(buf, "ld=%d\n", ld);
449}
450
451static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
452 struct device_attribute *attr,
453 const char *buf, size_t count)
454{
455 int status, len;
456 struct ctlr_info *h;
457 struct Scsi_Host *shost = class_to_shost(dev);
458 char tmpbuf[10];
459
460 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
461 return -EACCES;
462 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
463 strncpy(tmpbuf, buf, len);
464 tmpbuf[len] = '\0';
465 if (sscanf(tmpbuf, "%d", &status) != 1)
466 return -EINVAL;
467 h = shost_to_hba(shost);
468 h->acciopath_status = !!status;
469 dev_warn(&h->pdev->dev,
470 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
471 h->acciopath_status ? "enabled" : "disabled");
472 return count;
473}
474
475static ssize_t host_store_raid_offload_debug(struct device *dev,
476 struct device_attribute *attr,
477 const char *buf, size_t count)
478{
479 int debug_level, len;
480 struct ctlr_info *h;
481 struct Scsi_Host *shost = class_to_shost(dev);
482 char tmpbuf[10];
483
484 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
485 return -EACCES;
486 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
487 strncpy(tmpbuf, buf, len);
488 tmpbuf[len] = '\0';
489 if (sscanf(tmpbuf, "%d", &debug_level) != 1)
490 return -EINVAL;
491 if (debug_level < 0)
492 debug_level = 0;
493 h = shost_to_hba(shost);
494 h->raid_offload_debug = debug_level;
495 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
496 h->raid_offload_debug);
497 return count;
498}
499
500static ssize_t host_store_rescan(struct device *dev,
501 struct device_attribute *attr,
502 const char *buf, size_t count)
503{
504 struct ctlr_info *h;
505 struct Scsi_Host *shost = class_to_shost(dev);
506 h = shost_to_hba(shost);
507 hpsa_scan_start(h->scsi_host);
508 return count;
509}
510
511static ssize_t host_show_firmware_revision(struct device *dev,
512 struct device_attribute *attr, char *buf)
513{
514 struct ctlr_info *h;
515 struct Scsi_Host *shost = class_to_shost(dev);
516 unsigned char *fwrev;
517
518 h = shost_to_hba(shost);
519 if (!h->hba_inquiry_data)
520 return 0;
521 fwrev = &h->hba_inquiry_data[32];
522 return snprintf(buf, 20, "%c%c%c%c\n",
523 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
524}
525
526static ssize_t host_show_commands_outstanding(struct device *dev,
527 struct device_attribute *attr, char *buf)
528{
529 struct Scsi_Host *shost = class_to_shost(dev);
530 struct ctlr_info *h = shost_to_hba(shost);
531
532 return snprintf(buf, 20, "%d\n",
533 atomic_read(&h->commands_outstanding));
534}
535
536static ssize_t host_show_transport_mode(struct device *dev,
537 struct device_attribute *attr, char *buf)
538{
539 struct ctlr_info *h;
540 struct Scsi_Host *shost = class_to_shost(dev);
541
542 h = shost_to_hba(shost);
543 return snprintf(buf, 20, "%s\n",
544 h->transMethod & CFGTBL_Trans_Performant ?
545 "performant" : "simple");
546}
547
548static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
549 struct device_attribute *attr, char *buf)
550{
551 struct ctlr_info *h;
552 struct Scsi_Host *shost = class_to_shost(dev);
553
554 h = shost_to_hba(shost);
555 return snprintf(buf, 30, "HP SSD Smart Path %s\n",
556 (h->acciopath_status == 1) ? "enabled" : "disabled");
557}
558
559/* List of controllers which cannot be hard reset on kexec with reset_devices */
560static u32 unresettable_controller[] = {
561 0x324a103C, /* Smart Array P712m */
562 0x324b103C, /* Smart Array P711m */
563 0x3223103C, /* Smart Array P800 */
564 0x3234103C, /* Smart Array P400 */
565 0x3235103C, /* Smart Array P400i */
566 0x3211103C, /* Smart Array E200i */
567 0x3212103C, /* Smart Array E200 */
568 0x3213103C, /* Smart Array E200i */
569 0x3214103C, /* Smart Array E200i */
570 0x3215103C, /* Smart Array E200i */
571 0x3237103C, /* Smart Array E500 */
572 0x323D103C, /* Smart Array P700m */
573 0x40800E11, /* Smart Array 5i */
574 0x409C0E11, /* Smart Array 6400 */
575 0x409D0E11, /* Smart Array 6400 EM */
576 0x40700E11, /* Smart Array 5300 */
577 0x40820E11, /* Smart Array 532 */
578 0x40830E11, /* Smart Array 5312 */
579 0x409A0E11, /* Smart Array 641 */
580 0x409B0E11, /* Smart Array 642 */
581 0x40910E11, /* Smart Array 6i */
582};
583
584/* List of controllers which cannot even be soft reset */
585static u32 soft_unresettable_controller[] = {
586 0x40800E11, /* Smart Array 5i */
587 0x40700E11, /* Smart Array 5300 */
588 0x40820E11, /* Smart Array 532 */
589 0x40830E11, /* Smart Array 5312 */
590 0x409A0E11, /* Smart Array 641 */
591 0x409B0E11, /* Smart Array 642 */
592 0x40910E11, /* Smart Array 6i */
593 /* Exclude 640x boards. These are two pci devices in one slot
594 * which share a battery backed cache module. One controls the
595 * cache, the other accesses the cache through the one that controls
596 * it. If we reset the one controlling the cache, the other will
597 * likely not be happy. Just forbid resetting this conjoined mess.
598 * The 640x isn't really supported by hpsa anyway.
599 */
600 0x409C0E11, /* Smart Array 6400 */
601 0x409D0E11, /* Smart Array 6400 EM */
602};
603
604static int board_id_in_array(u32 a[], int nelems, u32 board_id)
605{
606 int i;
607
608 for (i = 0; i < nelems; i++)
609 if (a[i] == board_id)
610 return 1;
611 return 0;
612}
613
614static int ctlr_is_hard_resettable(u32 board_id)
615{
616 return !board_id_in_array(unresettable_controller,
617 ARRAY_SIZE(unresettable_controller), board_id);
618}
619
620static int ctlr_is_soft_resettable(u32 board_id)
621{
622 return !board_id_in_array(soft_unresettable_controller,
623 ARRAY_SIZE(soft_unresettable_controller), board_id);
624}
625
626static int ctlr_is_resettable(u32 board_id)
627{
628 return ctlr_is_hard_resettable(board_id) ||
629 ctlr_is_soft_resettable(board_id);
630}
631
632static ssize_t host_show_resettable(struct device *dev,
633 struct device_attribute *attr, char *buf)
634{
635 struct ctlr_info *h;
636 struct Scsi_Host *shost = class_to_shost(dev);
637
638 h = shost_to_hba(shost);
639 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
640}
641
642static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
643{
644 return (scsi3addr[3] & 0xC0) == 0x40;
645}
646
647static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
648 "1(+0)ADM", "UNKNOWN", "PHYS DRV"
649};
650#define HPSA_RAID_0 0
651#define HPSA_RAID_4 1
652#define HPSA_RAID_1 2 /* also used for RAID 10 */
653#define HPSA_RAID_5 3 /* also used for RAID 50 */
654#define HPSA_RAID_51 4
655#define HPSA_RAID_6 5 /* also used for RAID 60 */
656#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
657#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2)
658#define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1)
659
660static inline bool is_logical_device(struct hpsa_scsi_dev_t *device)
661{
662 return !device->physical_device;
663}
664
665static ssize_t raid_level_show(struct device *dev,
666 struct device_attribute *attr, char *buf)
667{
668 ssize_t l = 0;
669 unsigned char rlevel;
670 struct ctlr_info *h;
671 struct scsi_device *sdev;
672 struct hpsa_scsi_dev_t *hdev;
673 unsigned long flags;
674
675 sdev = to_scsi_device(dev);
676 h = sdev_to_hba(sdev);
677 spin_lock_irqsave(&h->lock, flags);
678 hdev = sdev->hostdata;
679 if (!hdev) {
680 spin_unlock_irqrestore(&h->lock, flags);
681 return -ENODEV;
682 }
683
684 /* Is this even a logical drive? */
685 if (!is_logical_device(hdev)) {
686 spin_unlock_irqrestore(&h->lock, flags);
687 l = snprintf(buf, PAGE_SIZE, "N/A\n");
688 return l;
689 }
690
691 rlevel = hdev->raid_level;
692 spin_unlock_irqrestore(&h->lock, flags);
693 if (rlevel > RAID_UNKNOWN)
694 rlevel = RAID_UNKNOWN;
695 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
696 return l;
697}
698
699static ssize_t lunid_show(struct device *dev,
700 struct device_attribute *attr, char *buf)
701{
702 struct ctlr_info *h;
703 struct scsi_device *sdev;
704 struct hpsa_scsi_dev_t *hdev;
705 unsigned long flags;
706 unsigned char lunid[8];
707
708 sdev = to_scsi_device(dev);
709 h = sdev_to_hba(sdev);
710 spin_lock_irqsave(&h->lock, flags);
711 hdev = sdev->hostdata;
712 if (!hdev) {
713 spin_unlock_irqrestore(&h->lock, flags);
714 return -ENODEV;
715 }
716 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
717 spin_unlock_irqrestore(&h->lock, flags);
718 return snprintf(buf, 20, "0x%8phN\n", lunid);
719}
720
721static ssize_t unique_id_show(struct device *dev,
722 struct device_attribute *attr, char *buf)
723{
724 struct ctlr_info *h;
725 struct scsi_device *sdev;
726 struct hpsa_scsi_dev_t *hdev;
727 unsigned long flags;
728 unsigned char sn[16];
729
730 sdev = to_scsi_device(dev);
731 h = sdev_to_hba(sdev);
732 spin_lock_irqsave(&h->lock, flags);
733 hdev = sdev->hostdata;
734 if (!hdev) {
735 spin_unlock_irqrestore(&h->lock, flags);
736 return -ENODEV;
737 }
738 memcpy(sn, hdev->device_id, sizeof(sn));
739 spin_unlock_irqrestore(&h->lock, flags);
740 return snprintf(buf, 16 * 2 + 2,
741 "%02X%02X%02X%02X%02X%02X%02X%02X"
742 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
743 sn[0], sn[1], sn[2], sn[3],
744 sn[4], sn[5], sn[6], sn[7],
745 sn[8], sn[9], sn[10], sn[11],
746 sn[12], sn[13], sn[14], sn[15]);
747}
748
749static ssize_t sas_address_show(struct device *dev,
750 struct device_attribute *attr, char *buf)
751{
752 struct ctlr_info *h;
753 struct scsi_device *sdev;
754 struct hpsa_scsi_dev_t *hdev;
755 unsigned long flags;
756 u64 sas_address;
757
758 sdev = to_scsi_device(dev);
759 h = sdev_to_hba(sdev);
760 spin_lock_irqsave(&h->lock, flags);
761 hdev = sdev->hostdata;
762 if (!hdev || is_logical_device(hdev) || !hdev->expose_device) {
763 spin_unlock_irqrestore(&h->lock, flags);
764 return -ENODEV;
765 }
766 sas_address = hdev->sas_address;
767 spin_unlock_irqrestore(&h->lock, flags);
768
769 return snprintf(buf, PAGE_SIZE, "0x%016llx\n", sas_address);
770}
771
772static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
773 struct device_attribute *attr, char *buf)
774{
775 struct ctlr_info *h;
776 struct scsi_device *sdev;
777 struct hpsa_scsi_dev_t *hdev;
778 unsigned long flags;
779 int offload_enabled;
780
781 sdev = to_scsi_device(dev);
782 h = sdev_to_hba(sdev);
783 spin_lock_irqsave(&h->lock, flags);
784 hdev = sdev->hostdata;
785 if (!hdev) {
786 spin_unlock_irqrestore(&h->lock, flags);
787 return -ENODEV;
788 }
789 offload_enabled = hdev->offload_enabled;
790 spin_unlock_irqrestore(&h->lock, flags);
791
792 if (hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC)
793 return snprintf(buf, 20, "%d\n", offload_enabled);
794 else
795 return snprintf(buf, 40, "%s\n",
796 "Not applicable for a controller");
797}
798
799#define MAX_PATHS 8
800static ssize_t path_info_show(struct device *dev,
801 struct device_attribute *attr, char *buf)
802{
803 struct ctlr_info *h;
804 struct scsi_device *sdev;
805 struct hpsa_scsi_dev_t *hdev;
806 unsigned long flags;
807 int i;
808 int output_len = 0;
809 u8 box;
810 u8 bay;
811 u8 path_map_index = 0;
812 char *active;
813 unsigned char phys_connector[2];
814
815 sdev = to_scsi_device(dev);
816 h = sdev_to_hba(sdev);
817 spin_lock_irqsave(&h->devlock, flags);
818 hdev = sdev->hostdata;
819 if (!hdev) {
820 spin_unlock_irqrestore(&h->devlock, flags);
821 return -ENODEV;
822 }
823
824 bay = hdev->bay;
825 for (i = 0; i < MAX_PATHS; i++) {
826 path_map_index = 1<<i;
827 if (i == hdev->active_path_index)
828 active = "Active";
829 else if (hdev->path_map & path_map_index)
830 active = "Inactive";
831 else
832 continue;
833
834 output_len += scnprintf(buf + output_len,
835 PAGE_SIZE - output_len,
836 "[%d:%d:%d:%d] %20.20s ",
837 h->scsi_host->host_no,
838 hdev->bus, hdev->target, hdev->lun,
839 scsi_device_type(hdev->devtype));
840
841 if (hdev->devtype == TYPE_RAID || is_logical_device(hdev)) {
842 output_len += scnprintf(buf + output_len,
843 PAGE_SIZE - output_len,
844 "%s\n", active);
845 continue;
846 }
847
848 box = hdev->box[i];
849 memcpy(&phys_connector, &hdev->phys_connector[i],
850 sizeof(phys_connector));
851 if (phys_connector[0] < '0')
852 phys_connector[0] = '0';
853 if (phys_connector[1] < '0')
854 phys_connector[1] = '0';
855 output_len += scnprintf(buf + output_len,
856 PAGE_SIZE - output_len,
857 "PORT: %.2s ",
858 phys_connector);
859 if ((hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) &&
860 hdev->expose_device) {
861 if (box == 0 || box == 0xFF) {
862 output_len += scnprintf(buf + output_len,
863 PAGE_SIZE - output_len,
864 "BAY: %hhu %s\n",
865 bay, active);
866 } else {
867 output_len += scnprintf(buf + output_len,
868 PAGE_SIZE - output_len,
869 "BOX: %hhu BAY: %hhu %s\n",
870 box, bay, active);
871 }
872 } else if (box != 0 && box != 0xFF) {
873 output_len += scnprintf(buf + output_len,
874 PAGE_SIZE - output_len, "BOX: %hhu %s\n",
875 box, active);
876 } else
877 output_len += scnprintf(buf + output_len,
878 PAGE_SIZE - output_len, "%s\n", active);
879 }
880
881 spin_unlock_irqrestore(&h->devlock, flags);
882 return output_len;
883}
884
885static ssize_t host_show_ctlr_num(struct device *dev,
886 struct device_attribute *attr, char *buf)
887{
888 struct ctlr_info *h;
889 struct Scsi_Host *shost = class_to_shost(dev);
890
891 h = shost_to_hba(shost);
892 return snprintf(buf, 20, "%d\n", h->ctlr);
893}
894
895static ssize_t host_show_legacy_board(struct device *dev,
896 struct device_attribute *attr, char *buf)
897{
898 struct ctlr_info *h;
899 struct Scsi_Host *shost = class_to_shost(dev);
900
901 h = shost_to_hba(shost);
902 return snprintf(buf, 20, "%d\n", h->legacy_board ? 1 : 0);
903}
904
905static DEVICE_ATTR_RO(raid_level);
906static DEVICE_ATTR_RO(lunid);
907static DEVICE_ATTR_RO(unique_id);
908static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
909static DEVICE_ATTR_RO(sas_address);
910static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
911 host_show_hp_ssd_smart_path_enabled, NULL);
912static DEVICE_ATTR_RO(path_info);
913static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
914 host_show_hp_ssd_smart_path_status,
915 host_store_hp_ssd_smart_path_status);
916static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
917 host_store_raid_offload_debug);
918static DEVICE_ATTR(firmware_revision, S_IRUGO,
919 host_show_firmware_revision, NULL);
920static DEVICE_ATTR(commands_outstanding, S_IRUGO,
921 host_show_commands_outstanding, NULL);
922static DEVICE_ATTR(transport_mode, S_IRUGO,
923 host_show_transport_mode, NULL);
924static DEVICE_ATTR(resettable, S_IRUGO,
925 host_show_resettable, NULL);
926static DEVICE_ATTR(lockup_detected, S_IRUGO,
927 host_show_lockup_detected, NULL);
928static DEVICE_ATTR(ctlr_num, S_IRUGO,
929 host_show_ctlr_num, NULL);
930static DEVICE_ATTR(legacy_board, S_IRUGO,
931 host_show_legacy_board, NULL);
932
933static struct device_attribute *hpsa_sdev_attrs[] = {
934 &dev_attr_raid_level,
935 &dev_attr_lunid,
936 &dev_attr_unique_id,
937 &dev_attr_hp_ssd_smart_path_enabled,
938 &dev_attr_path_info,
939 &dev_attr_sas_address,
940 NULL,
941};
942
943static struct device_attribute *hpsa_shost_attrs[] = {
944 &dev_attr_rescan,
945 &dev_attr_firmware_revision,
946 &dev_attr_commands_outstanding,
947 &dev_attr_transport_mode,
948 &dev_attr_resettable,
949 &dev_attr_hp_ssd_smart_path_status,
950 &dev_attr_raid_offload_debug,
951 &dev_attr_lockup_detected,
952 &dev_attr_ctlr_num,
953 &dev_attr_legacy_board,
954 NULL,
955};
956
957#define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_DRIVER +\
958 HPSA_MAX_CONCURRENT_PASSTHRUS)
959
960static struct scsi_host_template hpsa_driver_template = {
961 .module = THIS_MODULE,
962 .name = HPSA,
963 .proc_name = HPSA,
964 .queuecommand = hpsa_scsi_queue_command,
965 .scan_start = hpsa_scan_start,
966 .scan_finished = hpsa_scan_finished,
967 .change_queue_depth = hpsa_change_queue_depth,
968 .this_id = -1,
969 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
970 .ioctl = hpsa_ioctl,
971 .slave_alloc = hpsa_slave_alloc,
972 .slave_configure = hpsa_slave_configure,
973 .slave_destroy = hpsa_slave_destroy,
974#ifdef CONFIG_COMPAT
975 .compat_ioctl = hpsa_compat_ioctl,
976#endif
977 .sdev_attrs = hpsa_sdev_attrs,
978 .shost_attrs = hpsa_shost_attrs,
979 .max_sectors = 2048,
980 .no_write_same = 1,
981};
982
983static inline u32 next_command(struct ctlr_info *h, u8 q)
984{
985 u32 a;
986 struct reply_queue_buffer *rq = &h->reply_queue[q];
987
988 if (h->transMethod & CFGTBL_Trans_io_accel1)
989 return h->access.command_completed(h, q);
990
991 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
992 return h->access.command_completed(h, q);
993
994 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
995 a = rq->head[rq->current_entry];
996 rq->current_entry++;
997 atomic_dec(&h->commands_outstanding);
998 } else {
999 a = FIFO_EMPTY;
1000 }
1001 /* Check for wraparound */
1002 if (rq->current_entry == h->max_commands) {
1003 rq->current_entry = 0;
1004 rq->wraparound ^= 1;
1005 }
1006 return a;
1007}
1008
1009/*
1010 * There are some special bits in the bus address of the
1011 * command that we have to set for the controller to know
1012 * how to process the command:
1013 *
1014 * Normal performant mode:
1015 * bit 0: 1 means performant mode, 0 means simple mode.
1016 * bits 1-3 = block fetch table entry
1017 * bits 4-6 = command type (== 0)
1018 *
1019 * ioaccel1 mode:
1020 * bit 0 = "performant mode" bit.
1021 * bits 1-3 = block fetch table entry
1022 * bits 4-6 = command type (== 110)
1023 * (command type is needed because ioaccel1 mode
1024 * commands are submitted through the same register as normal
1025 * mode commands, so this is how the controller knows whether
1026 * the command is normal mode or ioaccel1 mode.)
1027 *
1028 * ioaccel2 mode:
1029 * bit 0 = "performant mode" bit.
1030 * bits 1-4 = block fetch table entry (note extra bit)
1031 * bits 4-6 = not needed, because ioaccel2 mode has
1032 * a separate special register for submitting commands.
1033 */
1034
1035/*
1036 * set_performant_mode: Modify the tag for cciss performant
1037 * set bit 0 for pull model, bits 3-1 for block fetch
1038 * register number
1039 */
1040#define DEFAULT_REPLY_QUEUE (-1)
1041static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
1042 int reply_queue)
1043{
1044 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
1045 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
1046 if (unlikely(!h->msix_vectors))
1047 return;
1048 c->Header.ReplyQueue = reply_queue;
1049 }
1050}
1051
1052static void set_ioaccel1_performant_mode(struct ctlr_info *h,
1053 struct CommandList *c,
1054 int reply_queue)
1055{
1056 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
1057
1058 /*
1059 * Tell the controller to post the reply to the queue for this
1060 * processor. This seems to give the best I/O throughput.
1061 */
1062 cp->ReplyQueue = reply_queue;
1063 /*
1064 * Set the bits in the address sent down to include:
1065 * - performant mode bit (bit 0)
1066 * - pull count (bits 1-3)
1067 * - command type (bits 4-6)
1068 */
1069 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
1070 IOACCEL1_BUSADDR_CMDTYPE;
1071}
1072
1073static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
1074 struct CommandList *c,
1075 int reply_queue)
1076{
1077 struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
1078 &h->ioaccel2_cmd_pool[c->cmdindex];
1079
1080 /* Tell the controller to post the reply to the queue for this
1081 * processor. This seems to give the best I/O throughput.
1082 */
1083 cp->reply_queue = reply_queue;
1084 /* Set the bits in the address sent down to include:
1085 * - performant mode bit not used in ioaccel mode 2
1086 * - pull count (bits 0-3)
1087 * - command type isn't needed for ioaccel2
1088 */
1089 c->busaddr |= h->ioaccel2_blockFetchTable[0];
1090}
1091
1092static void set_ioaccel2_performant_mode(struct ctlr_info *h,
1093 struct CommandList *c,
1094 int reply_queue)
1095{
1096 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
1097
1098 /*
1099 * Tell the controller to post the reply to the queue for this
1100 * processor. This seems to give the best I/O throughput.
1101 */
1102 cp->reply_queue = reply_queue;
1103 /*
1104 * Set the bits in the address sent down to include:
1105 * - performant mode bit not used in ioaccel mode 2
1106 * - pull count (bits 0-3)
1107 * - command type isn't needed for ioaccel2
1108 */
1109 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
1110}
1111
1112static int is_firmware_flash_cmd(u8 *cdb)
1113{
1114 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
1115}
1116
1117/*
1118 * During firmware flash, the heartbeat register may not update as frequently
1119 * as it should. So we dial down lockup detection during firmware flash. and
1120 * dial it back up when firmware flash completes.
1121 */
1122#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
1123#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
1124#define HPSA_EVENT_MONITOR_INTERVAL (15 * HZ)
1125static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
1126 struct CommandList *c)
1127{
1128 if (!is_firmware_flash_cmd(c->Request.CDB))
1129 return;
1130 atomic_inc(&h->firmware_flash_in_progress);
1131 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
1132}
1133
1134static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
1135 struct CommandList *c)
1136{
1137 if (is_firmware_flash_cmd(c->Request.CDB) &&
1138 atomic_dec_and_test(&h->firmware_flash_in_progress))
1139 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
1140}
1141
1142static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
1143 struct CommandList *c, int reply_queue)
1144{
1145 dial_down_lockup_detection_during_fw_flash(h, c);
1146 atomic_inc(&h->commands_outstanding);
1147
1148 reply_queue = h->reply_map[raw_smp_processor_id()];
1149 switch (c->cmd_type) {
1150 case CMD_IOACCEL1:
1151 set_ioaccel1_performant_mode(h, c, reply_queue);
1152 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
1153 break;
1154 case CMD_IOACCEL2:
1155 set_ioaccel2_performant_mode(h, c, reply_queue);
1156 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1157 break;
1158 case IOACCEL2_TMF:
1159 set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
1160 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1161 break;
1162 default:
1163 set_performant_mode(h, c, reply_queue);
1164 h->access.submit_command(h, c);
1165 }
1166}
1167
1168static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
1169{
1170 if (unlikely(hpsa_is_pending_event(c)))
1171 return finish_cmd(c);
1172
1173 __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
1174}
1175
1176static inline int is_hba_lunid(unsigned char scsi3addr[])
1177{
1178 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
1179}
1180
1181static inline int is_scsi_rev_5(struct ctlr_info *h)
1182{
1183 if (!h->hba_inquiry_data)
1184 return 0;
1185 if ((h->hba_inquiry_data[2] & 0x07) == 5)
1186 return 1;
1187 return 0;
1188}
1189
1190static int hpsa_find_target_lun(struct ctlr_info *h,
1191 unsigned char scsi3addr[], int bus, int *target, int *lun)
1192{
1193 /* finds an unused bus, target, lun for a new physical device
1194 * assumes h->devlock is held
1195 */
1196 int i, found = 0;
1197 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
1198
1199 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
1200
1201 for (i = 0; i < h->ndevices; i++) {
1202 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
1203 __set_bit(h->dev[i]->target, lun_taken);
1204 }
1205
1206 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
1207 if (i < HPSA_MAX_DEVICES) {
1208 /* *bus = 1; */
1209 *target = i;
1210 *lun = 0;
1211 found = 1;
1212 }
1213 return !found;
1214}
1215
1216static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
1217 struct hpsa_scsi_dev_t *dev, char *description)
1218{
1219#define LABEL_SIZE 25
1220 char label[LABEL_SIZE];
1221
1222 if (h == NULL || h->pdev == NULL || h->scsi_host == NULL)
1223 return;
1224
1225 switch (dev->devtype) {
1226 case TYPE_RAID:
1227 snprintf(label, LABEL_SIZE, "controller");
1228 break;
1229 case TYPE_ENCLOSURE:
1230 snprintf(label, LABEL_SIZE, "enclosure");
1231 break;
1232 case TYPE_DISK:
1233 case TYPE_ZBC:
1234 if (dev->external)
1235 snprintf(label, LABEL_SIZE, "external");
1236 else if (!is_logical_dev_addr_mode(dev->scsi3addr))
1237 snprintf(label, LABEL_SIZE, "%s",
1238 raid_label[PHYSICAL_DRIVE]);
1239 else
1240 snprintf(label, LABEL_SIZE, "RAID-%s",
1241 dev->raid_level > RAID_UNKNOWN ? "?" :
1242 raid_label[dev->raid_level]);
1243 break;
1244 case TYPE_ROM:
1245 snprintf(label, LABEL_SIZE, "rom");
1246 break;
1247 case TYPE_TAPE:
1248 snprintf(label, LABEL_SIZE, "tape");
1249 break;
1250 case TYPE_MEDIUM_CHANGER:
1251 snprintf(label, LABEL_SIZE, "changer");
1252 break;
1253 default:
1254 snprintf(label, LABEL_SIZE, "UNKNOWN");
1255 break;
1256 }
1257
1258 dev_printk(level, &h->pdev->dev,
1259 "scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n",
1260 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
1261 description,
1262 scsi_device_type(dev->devtype),
1263 dev->vendor,
1264 dev->model,
1265 label,
1266 dev->offload_config ? '+' : '-',
1267 dev->offload_to_be_enabled ? '+' : '-',
1268 dev->expose_device);
1269}
1270
1271/* Add an entry into h->dev[] array. */
1272static int hpsa_scsi_add_entry(struct ctlr_info *h,
1273 struct hpsa_scsi_dev_t *device,
1274 struct hpsa_scsi_dev_t *added[], int *nadded)
1275{
1276 /* assumes h->devlock is held */
1277 int n = h->ndevices;
1278 int i;
1279 unsigned char addr1[8], addr2[8];
1280 struct hpsa_scsi_dev_t *sd;
1281
1282 if (n >= HPSA_MAX_DEVICES) {
1283 dev_err(&h->pdev->dev, "too many devices, some will be "
1284 "inaccessible.\n");
1285 return -1;
1286 }
1287
1288 /* physical devices do not have lun or target assigned until now. */
1289 if (device->lun != -1)
1290 /* Logical device, lun is already assigned. */
1291 goto lun_assigned;
1292
1293 /* If this device a non-zero lun of a multi-lun device
1294 * byte 4 of the 8-byte LUN addr will contain the logical
1295 * unit no, zero otherwise.
1296 */
1297 if (device->scsi3addr[4] == 0) {
1298 /* This is not a non-zero lun of a multi-lun device */
1299 if (hpsa_find_target_lun(h, device->scsi3addr,
1300 device->bus, &device->target, &device->lun) != 0)
1301 return -1;
1302 goto lun_assigned;
1303 }
1304
1305 /* This is a non-zero lun of a multi-lun device.
1306 * Search through our list and find the device which
1307 * has the same 8 byte LUN address, excepting byte 4 and 5.
1308 * Assign the same bus and target for this new LUN.
1309 * Use the logical unit number from the firmware.
1310 */
1311 memcpy(addr1, device->scsi3addr, 8);
1312 addr1[4] = 0;
1313 addr1[5] = 0;
1314 for (i = 0; i < n; i++) {
1315 sd = h->dev[i];
1316 memcpy(addr2, sd->scsi3addr, 8);
1317 addr2[4] = 0;
1318 addr2[5] = 0;
1319 /* differ only in byte 4 and 5? */
1320 if (memcmp(addr1, addr2, 8) == 0) {
1321 device->bus = sd->bus;
1322 device->target = sd->target;
1323 device->lun = device->scsi3addr[4];
1324 break;
1325 }
1326 }
1327 if (device->lun == -1) {
1328 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
1329 " suspect firmware bug or unsupported hardware "
1330 "configuration.\n");
1331 return -1;
1332 }
1333
1334lun_assigned:
1335
1336 h->dev[n] = device;
1337 h->ndevices++;
1338 added[*nadded] = device;
1339 (*nadded)++;
1340 hpsa_show_dev_msg(KERN_INFO, h, device,
1341 device->expose_device ? "added" : "masked");
1342 return 0;
1343}
1344
1345/*
1346 * Called during a scan operation.
1347 *
1348 * Update an entry in h->dev[] array.
1349 */
1350static void hpsa_scsi_update_entry(struct ctlr_info *h,
1351 int entry, struct hpsa_scsi_dev_t *new_entry)
1352{
1353 /* assumes h->devlock is held */
1354 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1355
1356 /* Raid level changed. */
1357 h->dev[entry]->raid_level = new_entry->raid_level;
1358
1359 /*
1360 * ioacccel_handle may have changed for a dual domain disk
1361 */
1362 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1363
1364 /* Raid offload parameters changed. Careful about the ordering. */
1365 if (new_entry->offload_config && new_entry->offload_to_be_enabled) {
1366 /*
1367 * if drive is newly offload_enabled, we want to copy the
1368 * raid map data first. If previously offload_enabled and
1369 * offload_config were set, raid map data had better be
1370 * the same as it was before. If raid map data has changed
1371 * then it had better be the case that
1372 * h->dev[entry]->offload_enabled is currently 0.
1373 */
1374 h->dev[entry]->raid_map = new_entry->raid_map;
1375 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1376 }
1377 if (new_entry->offload_to_be_enabled) {
1378 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1379 wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
1380 }
1381 h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
1382 h->dev[entry]->offload_config = new_entry->offload_config;
1383 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
1384 h->dev[entry]->queue_depth = new_entry->queue_depth;
1385
1386 /*
1387 * We can turn off ioaccel offload now, but need to delay turning
1388 * ioaccel on until we can update h->dev[entry]->phys_disk[], but we
1389 * can't do that until all the devices are updated.
1390 */
1391 h->dev[entry]->offload_to_be_enabled = new_entry->offload_to_be_enabled;
1392
1393 /*
1394 * turn ioaccel off immediately if told to do so.
1395 */
1396 if (!new_entry->offload_to_be_enabled)
1397 h->dev[entry]->offload_enabled = 0;
1398
1399 hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
1400}
1401
1402/* Replace an entry from h->dev[] array. */
1403static void hpsa_scsi_replace_entry(struct ctlr_info *h,
1404 int entry, struct hpsa_scsi_dev_t *new_entry,
1405 struct hpsa_scsi_dev_t *added[], int *nadded,
1406 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1407{
1408 /* assumes h->devlock is held */
1409 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1410 removed[*nremoved] = h->dev[entry];
1411 (*nremoved)++;
1412
1413 /*
1414 * New physical devices won't have target/lun assigned yet
1415 * so we need to preserve the values in the slot we are replacing.
1416 */
1417 if (new_entry->target == -1) {
1418 new_entry->target = h->dev[entry]->target;
1419 new_entry->lun = h->dev[entry]->lun;
1420 }
1421
1422 h->dev[entry] = new_entry;
1423 added[*nadded] = new_entry;
1424 (*nadded)++;
1425
1426 hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
1427}
1428
1429/* Remove an entry from h->dev[] array. */
1430static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry,
1431 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1432{
1433 /* assumes h->devlock is held */
1434 int i;
1435 struct hpsa_scsi_dev_t *sd;
1436
1437 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1438
1439 sd = h->dev[entry];
1440 removed[*nremoved] = h->dev[entry];
1441 (*nremoved)++;
1442
1443 for (i = entry; i < h->ndevices-1; i++)
1444 h->dev[i] = h->dev[i+1];
1445 h->ndevices--;
1446 hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
1447}
1448
1449#define SCSI3ADDR_EQ(a, b) ( \
1450 (a)[7] == (b)[7] && \
1451 (a)[6] == (b)[6] && \
1452 (a)[5] == (b)[5] && \
1453 (a)[4] == (b)[4] && \
1454 (a)[3] == (b)[3] && \
1455 (a)[2] == (b)[2] && \
1456 (a)[1] == (b)[1] && \
1457 (a)[0] == (b)[0])
1458
1459static void fixup_botched_add(struct ctlr_info *h,
1460 struct hpsa_scsi_dev_t *added)
1461{
1462 /* called when scsi_add_device fails in order to re-adjust
1463 * h->dev[] to match the mid layer's view.
1464 */
1465 unsigned long flags;
1466 int i, j;
1467
1468 spin_lock_irqsave(&h->lock, flags);
1469 for (i = 0; i < h->ndevices; i++) {
1470 if (h->dev[i] == added) {
1471 for (j = i; j < h->ndevices-1; j++)
1472 h->dev[j] = h->dev[j+1];
1473 h->ndevices--;
1474 break;
1475 }
1476 }
1477 spin_unlock_irqrestore(&h->lock, flags);
1478 kfree(added);
1479}
1480
1481static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1482 struct hpsa_scsi_dev_t *dev2)
1483{
1484 /* we compare everything except lun and target as these
1485 * are not yet assigned. Compare parts likely
1486 * to differ first
1487 */
1488 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1489 sizeof(dev1->scsi3addr)) != 0)
1490 return 0;
1491 if (memcmp(dev1->device_id, dev2->device_id,
1492 sizeof(dev1->device_id)) != 0)
1493 return 0;
1494 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1495 return 0;
1496 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1497 return 0;
1498 if (dev1->devtype != dev2->devtype)
1499 return 0;
1500 if (dev1->bus != dev2->bus)
1501 return 0;
1502 return 1;
1503}
1504
1505static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1506 struct hpsa_scsi_dev_t *dev2)
1507{
1508 /* Device attributes that can change, but don't mean
1509 * that the device is a different device, nor that the OS
1510 * needs to be told anything about the change.
1511 */
1512 if (dev1->raid_level != dev2->raid_level)
1513 return 1;
1514 if (dev1->offload_config != dev2->offload_config)
1515 return 1;
1516 if (dev1->offload_to_be_enabled != dev2->offload_to_be_enabled)
1517 return 1;
1518 if (!is_logical_dev_addr_mode(dev1->scsi3addr))
1519 if (dev1->queue_depth != dev2->queue_depth)
1520 return 1;
1521 /*
1522 * This can happen for dual domain devices. An active
1523 * path change causes the ioaccel handle to change
1524 *
1525 * for example note the handle differences between p0 and p1
1526 * Device WWN ,WWN hash,Handle
1527 * D016 p0|0x3 [02]P2E:01:01,0x5000C5005FC4DACA,0x9B5616,0x01030003
1528 * p1 0x5000C5005FC4DAC9,0x6798C0,0x00040004
1529 */
1530 if (dev1->ioaccel_handle != dev2->ioaccel_handle)
1531 return 1;
1532 return 0;
1533}
1534
1535/* Find needle in haystack. If exact match found, return DEVICE_SAME,
1536 * and return needle location in *index. If scsi3addr matches, but not
1537 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1538 * location in *index.
1539 * In the case of a minor device attribute change, such as RAID level, just
1540 * return DEVICE_UPDATED, along with the updated device's location in index.
1541 * If needle not found, return DEVICE_NOT_FOUND.
1542 */
1543static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1544 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1545 int *index)
1546{
1547 int i;
1548#define DEVICE_NOT_FOUND 0
1549#define DEVICE_CHANGED 1
1550#define DEVICE_SAME 2
1551#define DEVICE_UPDATED 3
1552 if (needle == NULL)
1553 return DEVICE_NOT_FOUND;
1554
1555 for (i = 0; i < haystack_size; i++) {
1556 if (haystack[i] == NULL) /* previously removed. */
1557 continue;
1558 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1559 *index = i;
1560 if (device_is_the_same(needle, haystack[i])) {
1561 if (device_updated(needle, haystack[i]))
1562 return DEVICE_UPDATED;
1563 return DEVICE_SAME;
1564 } else {
1565 /* Keep offline devices offline */
1566 if (needle->volume_offline)
1567 return DEVICE_NOT_FOUND;
1568 return DEVICE_CHANGED;
1569 }
1570 }
1571 }
1572 *index = -1;
1573 return DEVICE_NOT_FOUND;
1574}
1575
1576static void hpsa_monitor_offline_device(struct ctlr_info *h,
1577 unsigned char scsi3addr[])
1578{
1579 struct offline_device_entry *device;
1580 unsigned long flags;
1581
1582 /* Check to see if device is already on the list */
1583 spin_lock_irqsave(&h->offline_device_lock, flags);
1584 list_for_each_entry(device, &h->offline_device_list, offline_list) {
1585 if (memcmp(device->scsi3addr, scsi3addr,
1586 sizeof(device->scsi3addr)) == 0) {
1587 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1588 return;
1589 }
1590 }
1591 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1592
1593 /* Device is not on the list, add it. */
1594 device = kmalloc(sizeof(*device), GFP_KERNEL);
1595 if (!device)
1596 return;
1597
1598 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1599 spin_lock_irqsave(&h->offline_device_lock, flags);
1600 list_add_tail(&device->offline_list, &h->offline_device_list);
1601 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1602}
1603
1604/* Print a message explaining various offline volume states */
1605static void hpsa_show_volume_status(struct ctlr_info *h,
1606 struct hpsa_scsi_dev_t *sd)
1607{
1608 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1609 dev_info(&h->pdev->dev,
1610 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1611 h->scsi_host->host_no,
1612 sd->bus, sd->target, sd->lun);
1613 switch (sd->volume_offline) {
1614 case HPSA_LV_OK:
1615 break;
1616 case HPSA_LV_UNDERGOING_ERASE:
1617 dev_info(&h->pdev->dev,
1618 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1619 h->scsi_host->host_no,
1620 sd->bus, sd->target, sd->lun);
1621 break;
1622 case HPSA_LV_NOT_AVAILABLE:
1623 dev_info(&h->pdev->dev,
1624 "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
1625 h->scsi_host->host_no,
1626 sd->bus, sd->target, sd->lun);
1627 break;
1628 case HPSA_LV_UNDERGOING_RPI:
1629 dev_info(&h->pdev->dev,
1630 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
1631 h->scsi_host->host_no,
1632 sd->bus, sd->target, sd->lun);
1633 break;
1634 case HPSA_LV_PENDING_RPI:
1635 dev_info(&h->pdev->dev,
1636 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1637 h->scsi_host->host_no,
1638 sd->bus, sd->target, sd->lun);
1639 break;
1640 case HPSA_LV_ENCRYPTED_NO_KEY:
1641 dev_info(&h->pdev->dev,
1642 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1643 h->scsi_host->host_no,
1644 sd->bus, sd->target, sd->lun);
1645 break;
1646 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1647 dev_info(&h->pdev->dev,
1648 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1649 h->scsi_host->host_no,
1650 sd->bus, sd->target, sd->lun);
1651 break;
1652 case HPSA_LV_UNDERGOING_ENCRYPTION:
1653 dev_info(&h->pdev->dev,
1654 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1655 h->scsi_host->host_no,
1656 sd->bus, sd->target, sd->lun);
1657 break;
1658 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1659 dev_info(&h->pdev->dev,
1660 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1661 h->scsi_host->host_no,
1662 sd->bus, sd->target, sd->lun);
1663 break;
1664 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1665 dev_info(&h->pdev->dev,
1666 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1667 h->scsi_host->host_no,
1668 sd->bus, sd->target, sd->lun);
1669 break;
1670 case HPSA_LV_PENDING_ENCRYPTION:
1671 dev_info(&h->pdev->dev,
1672 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1673 h->scsi_host->host_no,
1674 sd->bus, sd->target, sd->lun);
1675 break;
1676 case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1677 dev_info(&h->pdev->dev,
1678 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1679 h->scsi_host->host_no,
1680 sd->bus, sd->target, sd->lun);
1681 break;
1682 }
1683}
1684
1685/*
1686 * Figure the list of physical drive pointers for a logical drive with
1687 * raid offload configured.
1688 */
1689static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1690 struct hpsa_scsi_dev_t *dev[], int ndevices,
1691 struct hpsa_scsi_dev_t *logical_drive)
1692{
1693 struct raid_map_data *map = &logical_drive->raid_map;
1694 struct raid_map_disk_data *dd = &map->data[0];
1695 int i, j;
1696 int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1697 le16_to_cpu(map->metadata_disks_per_row);
1698 int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1699 le16_to_cpu(map->layout_map_count) *
1700 total_disks_per_row;
1701 int nphys_disk = le16_to_cpu(map->layout_map_count) *
1702 total_disks_per_row;
1703 int qdepth;
1704
1705 if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1706 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1707
1708 logical_drive->nphysical_disks = nraid_map_entries;
1709
1710 qdepth = 0;
1711 for (i = 0; i < nraid_map_entries; i++) {
1712 logical_drive->phys_disk[i] = NULL;
1713 if (!logical_drive->offload_config)
1714 continue;
1715 for (j = 0; j < ndevices; j++) {
1716 if (dev[j] == NULL)
1717 continue;
1718 if (dev[j]->devtype != TYPE_DISK &&
1719 dev[j]->devtype != TYPE_ZBC)
1720 continue;
1721 if (is_logical_device(dev[j]))
1722 continue;
1723 if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1724 continue;
1725
1726 logical_drive->phys_disk[i] = dev[j];
1727 if (i < nphys_disk)
1728 qdepth = min(h->nr_cmds, qdepth +
1729 logical_drive->phys_disk[i]->queue_depth);
1730 break;
1731 }
1732
1733 /*
1734 * This can happen if a physical drive is removed and
1735 * the logical drive is degraded. In that case, the RAID
1736 * map data will refer to a physical disk which isn't actually
1737 * present. And in that case offload_enabled should already
1738 * be 0, but we'll turn it off here just in case
1739 */
1740 if (!logical_drive->phys_disk[i]) {
1741 dev_warn(&h->pdev->dev,
1742 "%s: [%d:%d:%d:%d] A phys disk component of LV is missing, turning off offload_enabled for LV.\n",
1743 __func__,
1744 h->scsi_host->host_no, logical_drive->bus,
1745 logical_drive->target, logical_drive->lun);
1746 logical_drive->offload_enabled = 0;
1747 logical_drive->offload_to_be_enabled = 0;
1748 logical_drive->queue_depth = 8;
1749 }
1750 }
1751 if (nraid_map_entries)
1752 /*
1753 * This is correct for reads, too high for full stripe writes,
1754 * way too high for partial stripe writes
1755 */
1756 logical_drive->queue_depth = qdepth;
1757 else {
1758 if (logical_drive->external)
1759 logical_drive->queue_depth = EXTERNAL_QD;
1760 else
1761 logical_drive->queue_depth = h->nr_cmds;
1762 }
1763}
1764
1765static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1766 struct hpsa_scsi_dev_t *dev[], int ndevices)
1767{
1768 int i;
1769
1770 for (i = 0; i < ndevices; i++) {
1771 if (dev[i] == NULL)
1772 continue;
1773 if (dev[i]->devtype != TYPE_DISK &&
1774 dev[i]->devtype != TYPE_ZBC)
1775 continue;
1776 if (!is_logical_device(dev[i]))
1777 continue;
1778
1779 /*
1780 * If offload is currently enabled, the RAID map and
1781 * phys_disk[] assignment *better* not be changing
1782 * because we would be changing ioaccel phsy_disk[] pointers
1783 * on a ioaccel volume processing I/O requests.
1784 *
1785 * If an ioaccel volume status changed, initially because it was
1786 * re-configured and thus underwent a transformation, or
1787 * a drive failed, we would have received a state change
1788 * request and ioaccel should have been turned off. When the
1789 * transformation completes, we get another state change
1790 * request to turn ioaccel back on. In this case, we need
1791 * to update the ioaccel information.
1792 *
1793 * Thus: If it is not currently enabled, but will be after
1794 * the scan completes, make sure the ioaccel pointers
1795 * are up to date.
1796 */
1797
1798 if (!dev[i]->offload_enabled && dev[i]->offload_to_be_enabled)
1799 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1800 }
1801}
1802
1803static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1804{
1805 int rc = 0;
1806
1807 if (!h->scsi_host)
1808 return 1;
1809
1810 if (is_logical_device(device)) /* RAID */
1811 rc = scsi_add_device(h->scsi_host, device->bus,
1812 device->target, device->lun);
1813 else /* HBA */
1814 rc = hpsa_add_sas_device(h->sas_host, device);
1815
1816 return rc;
1817}
1818
1819static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h,
1820 struct hpsa_scsi_dev_t *dev)
1821{
1822 int i;
1823 int count = 0;
1824
1825 for (i = 0; i < h->nr_cmds; i++) {
1826 struct CommandList *c = h->cmd_pool + i;
1827 int refcount = atomic_inc_return(&c->refcount);
1828
1829 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev,
1830 dev->scsi3addr)) {
1831 unsigned long flags;
1832
1833 spin_lock_irqsave(&h->lock, flags); /* Implied MB */
1834 if (!hpsa_is_cmd_idle(c))
1835 ++count;
1836 spin_unlock_irqrestore(&h->lock, flags);
1837 }
1838
1839 cmd_free(h, c);
1840 }
1841
1842 return count;
1843}
1844
1845static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h,
1846 struct hpsa_scsi_dev_t *device)
1847{
1848 int cmds = 0;
1849 int waits = 0;
1850
1851 while (1) {
1852 cmds = hpsa_find_outstanding_commands_for_dev(h, device);
1853 if (cmds == 0)
1854 break;
1855 if (++waits > 20)
1856 break;
1857 msleep(1000);
1858 }
1859
1860 if (waits > 20)
1861 dev_warn(&h->pdev->dev,
1862 "%s: removing device with %d outstanding commands!\n",
1863 __func__, cmds);
1864}
1865
1866static void hpsa_remove_device(struct ctlr_info *h,
1867 struct hpsa_scsi_dev_t *device)
1868{
1869 struct scsi_device *sdev = NULL;
1870
1871 if (!h->scsi_host)
1872 return;
1873
1874 /*
1875 * Allow for commands to drain
1876 */
1877 device->removed = 1;
1878 hpsa_wait_for_outstanding_commands_for_dev(h, device);
1879
1880 if (is_logical_device(device)) { /* RAID */
1881 sdev = scsi_device_lookup(h->scsi_host, device->bus,
1882 device->target, device->lun);
1883 if (sdev) {
1884 scsi_remove_device(sdev);
1885 scsi_device_put(sdev);
1886 } else {
1887 /*
1888 * We don't expect to get here. Future commands
1889 * to this device will get a selection timeout as
1890 * if the device were gone.
1891 */
1892 hpsa_show_dev_msg(KERN_WARNING, h, device,
1893 "didn't find device for removal.");
1894 }
1895 } else { /* HBA */
1896
1897 hpsa_remove_sas_device(device);
1898 }
1899}
1900
1901static void adjust_hpsa_scsi_table(struct ctlr_info *h,
1902 struct hpsa_scsi_dev_t *sd[], int nsds)
1903{
1904 /* sd contains scsi3 addresses and devtypes, and inquiry
1905 * data. This function takes what's in sd to be the current
1906 * reality and updates h->dev[] to reflect that reality.
1907 */
1908 int i, entry, device_change, changes = 0;
1909 struct hpsa_scsi_dev_t *csd;
1910 unsigned long flags;
1911 struct hpsa_scsi_dev_t **added, **removed;
1912 int nadded, nremoved;
1913
1914 /*
1915 * A reset can cause a device status to change
1916 * re-schedule the scan to see what happened.
1917 */
1918 spin_lock_irqsave(&h->reset_lock, flags);
1919 if (h->reset_in_progress) {
1920 h->drv_req_rescan = 1;
1921 spin_unlock_irqrestore(&h->reset_lock, flags);
1922 return;
1923 }
1924 spin_unlock_irqrestore(&h->reset_lock, flags);
1925
1926 added = kcalloc(HPSA_MAX_DEVICES, sizeof(*added), GFP_KERNEL);
1927 removed = kcalloc(HPSA_MAX_DEVICES, sizeof(*removed), GFP_KERNEL);
1928
1929 if (!added || !removed) {
1930 dev_warn(&h->pdev->dev, "out of memory in "
1931 "adjust_hpsa_scsi_table\n");
1932 goto free_and_out;
1933 }
1934
1935 spin_lock_irqsave(&h->devlock, flags);
1936
1937 /* find any devices in h->dev[] that are not in
1938 * sd[] and remove them from h->dev[], and for any
1939 * devices which have changed, remove the old device
1940 * info and add the new device info.
1941 * If minor device attributes change, just update
1942 * the existing device structure.
1943 */
1944 i = 0;
1945 nremoved = 0;
1946 nadded = 0;
1947 while (i < h->ndevices) {
1948 csd = h->dev[i];
1949 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1950 if (device_change == DEVICE_NOT_FOUND) {
1951 changes++;
1952 hpsa_scsi_remove_entry(h, i, removed, &nremoved);
1953 continue; /* remove ^^^, hence i not incremented */
1954 } else if (device_change == DEVICE_CHANGED) {
1955 changes++;
1956 hpsa_scsi_replace_entry(h, i, sd[entry],
1957 added, &nadded, removed, &nremoved);
1958 /* Set it to NULL to prevent it from being freed
1959 * at the bottom of hpsa_update_scsi_devices()
1960 */
1961 sd[entry] = NULL;
1962 } else if (device_change == DEVICE_UPDATED) {
1963 hpsa_scsi_update_entry(h, i, sd[entry]);
1964 }
1965 i++;
1966 }
1967
1968 /* Now, make sure every device listed in sd[] is also
1969 * listed in h->dev[], adding them if they aren't found
1970 */
1971
1972 for (i = 0; i < nsds; i++) {
1973 if (!sd[i]) /* if already added above. */
1974 continue;
1975
1976 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1977 * as the SCSI mid-layer does not handle such devices well.
1978 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1979 * at 160Hz, and prevents the system from coming up.
1980 */
1981 if (sd[i]->volume_offline) {
1982 hpsa_show_volume_status(h, sd[i]);
1983 hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
1984 continue;
1985 }
1986
1987 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1988 h->ndevices, &entry);
1989 if (device_change == DEVICE_NOT_FOUND) {
1990 changes++;
1991 if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0)
1992 break;
1993 sd[i] = NULL; /* prevent from being freed later. */
1994 } else if (device_change == DEVICE_CHANGED) {
1995 /* should never happen... */
1996 changes++;
1997 dev_warn(&h->pdev->dev,
1998 "device unexpectedly changed.\n");
1999 /* but if it does happen, we just ignore that device */
2000 }
2001 }
2002 hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
2003
2004 /*
2005 * Now that h->dev[]->phys_disk[] is coherent, we can enable
2006 * any logical drives that need it enabled.
2007 *
2008 * The raid map should be current by now.
2009 *
2010 * We are updating the device list used for I/O requests.
2011 */
2012 for (i = 0; i < h->ndevices; i++) {
2013 if (h->dev[i] == NULL)
2014 continue;
2015 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
2016 }
2017
2018 spin_unlock_irqrestore(&h->devlock, flags);
2019
2020 /* Monitor devices which are in one of several NOT READY states to be
2021 * brought online later. This must be done without holding h->devlock,
2022 * so don't touch h->dev[]
2023 */
2024 for (i = 0; i < nsds; i++) {
2025 if (!sd[i]) /* if already added above. */
2026 continue;
2027 if (sd[i]->volume_offline)
2028 hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
2029 }
2030
2031 /* Don't notify scsi mid layer of any changes the first time through
2032 * (or if there are no changes) scsi_scan_host will do it later the
2033 * first time through.
2034 */
2035 if (!changes)
2036 goto free_and_out;
2037
2038 /* Notify scsi mid layer of any removed devices */
2039 for (i = 0; i < nremoved; i++) {
2040 if (removed[i] == NULL)
2041 continue;
2042 if (removed[i]->expose_device)
2043 hpsa_remove_device(h, removed[i]);
2044 kfree(removed[i]);
2045 removed[i] = NULL;
2046 }
2047
2048 /* Notify scsi mid layer of any added devices */
2049 for (i = 0; i < nadded; i++) {
2050 int rc = 0;
2051
2052 if (added[i] == NULL)
2053 continue;
2054 if (!(added[i]->expose_device))
2055 continue;
2056 rc = hpsa_add_device(h, added[i]);
2057 if (!rc)
2058 continue;
2059 dev_warn(&h->pdev->dev,
2060 "addition failed %d, device not added.", rc);
2061 /* now we have to remove it from h->dev,
2062 * since it didn't get added to scsi mid layer
2063 */
2064 fixup_botched_add(h, added[i]);
2065 h->drv_req_rescan = 1;
2066 }
2067
2068free_and_out:
2069 kfree(added);
2070 kfree(removed);
2071}
2072
2073/*
2074 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
2075 * Assume's h->devlock is held.
2076 */
2077static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
2078 int bus, int target, int lun)
2079{
2080 int i;
2081 struct hpsa_scsi_dev_t *sd;
2082
2083 for (i = 0; i < h->ndevices; i++) {
2084 sd = h->dev[i];
2085 if (sd->bus == bus && sd->target == target && sd->lun == lun)
2086 return sd;
2087 }
2088 return NULL;
2089}
2090
2091static int hpsa_slave_alloc(struct scsi_device *sdev)
2092{
2093 struct hpsa_scsi_dev_t *sd = NULL;
2094 unsigned long flags;
2095 struct ctlr_info *h;
2096
2097 h = sdev_to_hba(sdev);
2098 spin_lock_irqsave(&h->devlock, flags);
2099 if (sdev_channel(sdev) == HPSA_PHYSICAL_DEVICE_BUS) {
2100 struct scsi_target *starget;
2101 struct sas_rphy *rphy;
2102
2103 starget = scsi_target(sdev);
2104 rphy = target_to_rphy(starget);
2105 sd = hpsa_find_device_by_sas_rphy(h, rphy);
2106 if (sd) {
2107 sd->target = sdev_id(sdev);
2108 sd->lun = sdev->lun;
2109 }
2110 }
2111 if (!sd)
2112 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
2113 sdev_id(sdev), sdev->lun);
2114
2115 if (sd && sd->expose_device) {
2116 atomic_set(&sd->ioaccel_cmds_out, 0);
2117 sdev->hostdata = sd;
2118 } else
2119 sdev->hostdata = NULL;
2120 spin_unlock_irqrestore(&h->devlock, flags);
2121 return 0;
2122}
2123
2124/* configure scsi device based on internal per-device structure */
2125static int hpsa_slave_configure(struct scsi_device *sdev)
2126{
2127 struct hpsa_scsi_dev_t *sd;
2128 int queue_depth;
2129
2130 sd = sdev->hostdata;
2131 sdev->no_uld_attach = !sd || !sd->expose_device;
2132
2133 if (sd) {
2134 if (sd->external)
2135 queue_depth = EXTERNAL_QD;
2136 else
2137 queue_depth = sd->queue_depth != 0 ?
2138 sd->queue_depth : sdev->host->can_queue;
2139 } else
2140 queue_depth = sdev->host->can_queue;
2141
2142 scsi_change_queue_depth(sdev, queue_depth);
2143
2144 return 0;
2145}
2146
2147static void hpsa_slave_destroy(struct scsi_device *sdev)
2148{
2149 /* nothing to do. */
2150}
2151
2152static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2153{
2154 int i;
2155
2156 if (!h->ioaccel2_cmd_sg_list)
2157 return;
2158 for (i = 0; i < h->nr_cmds; i++) {
2159 kfree(h->ioaccel2_cmd_sg_list[i]);
2160 h->ioaccel2_cmd_sg_list[i] = NULL;
2161 }
2162 kfree(h->ioaccel2_cmd_sg_list);
2163 h->ioaccel2_cmd_sg_list = NULL;
2164}
2165
2166static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2167{
2168 int i;
2169
2170 if (h->chainsize <= 0)
2171 return 0;
2172
2173 h->ioaccel2_cmd_sg_list =
2174 kcalloc(h->nr_cmds, sizeof(*h->ioaccel2_cmd_sg_list),
2175 GFP_KERNEL);
2176 if (!h->ioaccel2_cmd_sg_list)
2177 return -ENOMEM;
2178 for (i = 0; i < h->nr_cmds; i++) {
2179 h->ioaccel2_cmd_sg_list[i] =
2180 kmalloc_array(h->maxsgentries,
2181 sizeof(*h->ioaccel2_cmd_sg_list[i]),
2182 GFP_KERNEL);
2183 if (!h->ioaccel2_cmd_sg_list[i])
2184 goto clean;
2185 }
2186 return 0;
2187
2188clean:
2189 hpsa_free_ioaccel2_sg_chain_blocks(h);
2190 return -ENOMEM;
2191}
2192
2193static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
2194{
2195 int i;
2196
2197 if (!h->cmd_sg_list)
2198 return;
2199 for (i = 0; i < h->nr_cmds; i++) {
2200 kfree(h->cmd_sg_list[i]);
2201 h->cmd_sg_list[i] = NULL;
2202 }
2203 kfree(h->cmd_sg_list);
2204 h->cmd_sg_list = NULL;
2205}
2206
2207static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
2208{
2209 int i;
2210
2211 if (h->chainsize <= 0)
2212 return 0;
2213
2214 h->cmd_sg_list = kcalloc(h->nr_cmds, sizeof(*h->cmd_sg_list),
2215 GFP_KERNEL);
2216 if (!h->cmd_sg_list)
2217 return -ENOMEM;
2218
2219 for (i = 0; i < h->nr_cmds; i++) {
2220 h->cmd_sg_list[i] = kmalloc_array(h->chainsize,
2221 sizeof(*h->cmd_sg_list[i]),
2222 GFP_KERNEL);
2223 if (!h->cmd_sg_list[i])
2224 goto clean;
2225
2226 }
2227 return 0;
2228
2229clean:
2230 hpsa_free_sg_chain_blocks(h);
2231 return -ENOMEM;
2232}
2233
2234static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
2235 struct io_accel2_cmd *cp, struct CommandList *c)
2236{
2237 struct ioaccel2_sg_element *chain_block;
2238 u64 temp64;
2239 u32 chain_size;
2240
2241 chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
2242 chain_size = le32_to_cpu(cp->sg[0].length);
2243 temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_size,
2244 DMA_TO_DEVICE);
2245 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2246 /* prevent subsequent unmapping */
2247 cp->sg->address = 0;
2248 return -1;
2249 }
2250 cp->sg->address = cpu_to_le64(temp64);
2251 return 0;
2252}
2253
2254static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
2255 struct io_accel2_cmd *cp)
2256{
2257 struct ioaccel2_sg_element *chain_sg;
2258 u64 temp64;
2259 u32 chain_size;
2260
2261 chain_sg = cp->sg;
2262 temp64 = le64_to_cpu(chain_sg->address);
2263 chain_size = le32_to_cpu(cp->sg[0].length);
2264 dma_unmap_single(&h->pdev->dev, temp64, chain_size, DMA_TO_DEVICE);
2265}
2266
2267static int hpsa_map_sg_chain_block(struct ctlr_info *h,
2268 struct CommandList *c)
2269{
2270 struct SGDescriptor *chain_sg, *chain_block;
2271 u64 temp64;
2272 u32 chain_len;
2273
2274 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2275 chain_block = h->cmd_sg_list[c->cmdindex];
2276 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
2277 chain_len = sizeof(*chain_sg) *
2278 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
2279 chain_sg->Len = cpu_to_le32(chain_len);
2280 temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_len,
2281 DMA_TO_DEVICE);
2282 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2283 /* prevent subsequent unmapping */
2284 chain_sg->Addr = cpu_to_le64(0);
2285 return -1;
2286 }
2287 chain_sg->Addr = cpu_to_le64(temp64);
2288 return 0;
2289}
2290
2291static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
2292 struct CommandList *c)
2293{
2294 struct SGDescriptor *chain_sg;
2295
2296 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
2297 return;
2298
2299 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2300 dma_unmap_single(&h->pdev->dev, le64_to_cpu(chain_sg->Addr),
2301 le32_to_cpu(chain_sg->Len), DMA_TO_DEVICE);
2302}
2303
2304
2305/* Decode the various types of errors on ioaccel2 path.
2306 * Return 1 for any error that should generate a RAID path retry.
2307 * Return 0 for errors that don't require a RAID path retry.
2308 */
2309static int handle_ioaccel_mode2_error(struct ctlr_info *h,
2310 struct CommandList *c,
2311 struct scsi_cmnd *cmd,
2312 struct io_accel2_cmd *c2,
2313 struct hpsa_scsi_dev_t *dev)
2314{
2315 int data_len;
2316 int retry = 0;
2317 u32 ioaccel2_resid = 0;
2318
2319 switch (c2->error_data.serv_response) {
2320 case IOACCEL2_SERV_RESPONSE_COMPLETE:
2321 switch (c2->error_data.status) {
2322 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
2323 break;
2324 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
2325 cmd->result |= SAM_STAT_CHECK_CONDITION;
2326 if (c2->error_data.data_present !=
2327 IOACCEL2_SENSE_DATA_PRESENT) {
2328 memset(cmd->sense_buffer, 0,
2329 SCSI_SENSE_BUFFERSIZE);
2330 break;
2331 }
2332 /* copy the sense data */
2333 data_len = c2->error_data.sense_data_len;
2334 if (data_len > SCSI_SENSE_BUFFERSIZE)
2335 data_len = SCSI_SENSE_BUFFERSIZE;
2336 if (data_len > sizeof(c2->error_data.sense_data_buff))
2337 data_len =
2338 sizeof(c2->error_data.sense_data_buff);
2339 memcpy(cmd->sense_buffer,
2340 c2->error_data.sense_data_buff, data_len);
2341 retry = 1;
2342 break;
2343 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
2344 retry = 1;
2345 break;
2346 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
2347 retry = 1;
2348 break;
2349 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
2350 retry = 1;
2351 break;
2352 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
2353 retry = 1;
2354 break;
2355 default:
2356 retry = 1;
2357 break;
2358 }
2359 break;
2360 case IOACCEL2_SERV_RESPONSE_FAILURE:
2361 switch (c2->error_data.status) {
2362 case IOACCEL2_STATUS_SR_IO_ERROR:
2363 case IOACCEL2_STATUS_SR_IO_ABORTED:
2364 case IOACCEL2_STATUS_SR_OVERRUN:
2365 retry = 1;
2366 break;
2367 case IOACCEL2_STATUS_SR_UNDERRUN:
2368 cmd->result = (DID_OK << 16); /* host byte */
2369 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
2370 ioaccel2_resid = get_unaligned_le32(
2371 &c2->error_data.resid_cnt[0]);
2372 scsi_set_resid(cmd, ioaccel2_resid);
2373 break;
2374 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
2375 case IOACCEL2_STATUS_SR_INVALID_DEVICE:
2376 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
2377 /*
2378 * Did an HBA disk disappear? We will eventually
2379 * get a state change event from the controller but
2380 * in the meantime, we need to tell the OS that the
2381 * HBA disk is no longer there and stop I/O
2382 * from going down. This allows the potential re-insert
2383 * of the disk to get the same device node.
2384 */
2385 if (dev->physical_device && dev->expose_device) {
2386 cmd->result = DID_NO_CONNECT << 16;
2387 dev->removed = 1;
2388 h->drv_req_rescan = 1;
2389 dev_warn(&h->pdev->dev,
2390 "%s: device is gone!\n", __func__);
2391 } else
2392 /*
2393 * Retry by sending down the RAID path.
2394 * We will get an event from ctlr to
2395 * trigger rescan regardless.
2396 */
2397 retry = 1;
2398 break;
2399 default:
2400 retry = 1;
2401 }
2402 break;
2403 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
2404 break;
2405 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
2406 break;
2407 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
2408 retry = 1;
2409 break;
2410 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
2411 break;
2412 default:
2413 retry = 1;
2414 break;
2415 }
2416
2417 return retry; /* retry on raid path? */
2418}
2419
2420static void hpsa_cmd_resolve_events(struct ctlr_info *h,
2421 struct CommandList *c)
2422{
2423 bool do_wake = false;
2424
2425 /*
2426 * Reset c->scsi_cmd here so that the reset handler will know
2427 * this command has completed. Then, check to see if the handler is
2428 * waiting for this command, and, if so, wake it.
2429 */
2430 c->scsi_cmd = SCSI_CMD_IDLE;
2431 mb(); /* Declare command idle before checking for pending events. */
2432 if (c->reset_pending) {
2433 unsigned long flags;
2434 struct hpsa_scsi_dev_t *dev;
2435
2436 /*
2437 * There appears to be a reset pending; lock the lock and
2438 * reconfirm. If so, then decrement the count of outstanding
2439 * commands and wake the reset command if this is the last one.
2440 */
2441 spin_lock_irqsave(&h->lock, flags);
2442 dev = c->reset_pending; /* Re-fetch under the lock. */
2443 if (dev && atomic_dec_and_test(&dev->reset_cmds_out))
2444 do_wake = true;
2445 c->reset_pending = NULL;
2446 spin_unlock_irqrestore(&h->lock, flags);
2447 }
2448
2449 if (do_wake)
2450 wake_up_all(&h->event_sync_wait_queue);
2451}
2452
2453static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
2454 struct CommandList *c)
2455{
2456 hpsa_cmd_resolve_events(h, c);
2457 cmd_tagged_free(h, c);
2458}
2459
2460static void hpsa_cmd_free_and_done(struct ctlr_info *h,
2461 struct CommandList *c, struct scsi_cmnd *cmd)
2462{
2463 hpsa_cmd_resolve_and_free(h, c);
2464 if (cmd && cmd->scsi_done)
2465 cmd->scsi_done(cmd);
2466}
2467
2468static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
2469{
2470 INIT_WORK(&c->work, hpsa_command_resubmit_worker);
2471 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
2472}
2473
2474static void process_ioaccel2_completion(struct ctlr_info *h,
2475 struct CommandList *c, struct scsi_cmnd *cmd,
2476 struct hpsa_scsi_dev_t *dev)
2477{
2478 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2479
2480 /* check for good status */
2481 if (likely(c2->error_data.serv_response == 0 &&
2482 c2->error_data.status == 0))
2483 return hpsa_cmd_free_and_done(h, c, cmd);
2484
2485 /*
2486 * Any RAID offload error results in retry which will use
2487 * the normal I/O path so the controller can handle whatever is
2488 * wrong.
2489 */
2490 if (is_logical_device(dev) &&
2491 c2->error_data.serv_response ==
2492 IOACCEL2_SERV_RESPONSE_FAILURE) {
2493 if (c2->error_data.status ==
2494 IOACCEL2_STATUS_SR_IOACCEL_DISABLED) {
2495 dev->offload_enabled = 0;
2496 dev->offload_to_be_enabled = 0;
2497 }
2498
2499 return hpsa_retry_cmd(h, c);
2500 }
2501
2502 if (handle_ioaccel_mode2_error(h, c, cmd, c2, dev))
2503 return hpsa_retry_cmd(h, c);
2504
2505 return hpsa_cmd_free_and_done(h, c, cmd);
2506}
2507
2508/* Returns 0 on success, < 0 otherwise. */
2509static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
2510 struct CommandList *cp)
2511{
2512 u8 tmf_status = cp->err_info->ScsiStatus;
2513
2514 switch (tmf_status) {
2515 case CISS_TMF_COMPLETE:
2516 /*
2517 * CISS_TMF_COMPLETE never happens, instead,
2518 * ei->CommandStatus == 0 for this case.
2519 */
2520 case CISS_TMF_SUCCESS:
2521 return 0;
2522 case CISS_TMF_INVALID_FRAME:
2523 case CISS_TMF_NOT_SUPPORTED:
2524 case CISS_TMF_FAILED:
2525 case CISS_TMF_WRONG_LUN:
2526 case CISS_TMF_OVERLAPPED_TAG:
2527 break;
2528 default:
2529 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
2530 tmf_status);
2531 break;
2532 }
2533 return -tmf_status;
2534}
2535
2536static void complete_scsi_command(struct CommandList *cp)
2537{
2538 struct scsi_cmnd *cmd;
2539 struct ctlr_info *h;
2540 struct ErrorInfo *ei;
2541 struct hpsa_scsi_dev_t *dev;
2542 struct io_accel2_cmd *c2;
2543
2544 u8 sense_key;
2545 u8 asc; /* additional sense code */
2546 u8 ascq; /* additional sense code qualifier */
2547 unsigned long sense_data_size;
2548
2549 ei = cp->err_info;
2550 cmd = cp->scsi_cmd;
2551 h = cp->h;
2552
2553 if (!cmd->device) {
2554 cmd->result = DID_NO_CONNECT << 16;
2555 return hpsa_cmd_free_and_done(h, cp, cmd);
2556 }
2557
2558 dev = cmd->device->hostdata;
2559 if (!dev) {
2560 cmd->result = DID_NO_CONNECT << 16;
2561 return hpsa_cmd_free_and_done(h, cp, cmd);
2562 }
2563 c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
2564
2565 scsi_dma_unmap(cmd); /* undo the DMA mappings */
2566 if ((cp->cmd_type == CMD_SCSI) &&
2567 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
2568 hpsa_unmap_sg_chain_block(h, cp);
2569
2570 if ((cp->cmd_type == CMD_IOACCEL2) &&
2571 (c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
2572 hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
2573
2574 cmd->result = (DID_OK << 16); /* host byte */
2575 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
2576
2577 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) {
2578 if (dev->physical_device && dev->expose_device &&
2579 dev->removed) {
2580 cmd->result = DID_NO_CONNECT << 16;
2581 return hpsa_cmd_free_and_done(h, cp, cmd);
2582 }
2583 if (likely(cp->phys_disk != NULL))
2584 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
2585 }
2586
2587 /*
2588 * We check for lockup status here as it may be set for
2589 * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
2590 * fail_all_oustanding_cmds()
2591 */
2592 if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
2593 /* DID_NO_CONNECT will prevent a retry */
2594 cmd->result = DID_NO_CONNECT << 16;
2595 return hpsa_cmd_free_and_done(h, cp, cmd);
2596 }
2597
2598 if ((unlikely(hpsa_is_pending_event(cp))))
2599 if (cp->reset_pending)
2600 return hpsa_cmd_free_and_done(h, cp, cmd);
2601
2602 if (cp->cmd_type == CMD_IOACCEL2)
2603 return process_ioaccel2_completion(h, cp, cmd, dev);
2604
2605 scsi_set_resid(cmd, ei->ResidualCnt);
2606 if (ei->CommandStatus == 0)
2607 return hpsa_cmd_free_and_done(h, cp, cmd);
2608
2609 /* For I/O accelerator commands, copy over some fields to the normal
2610 * CISS header used below for error handling.
2611 */
2612 if (cp->cmd_type == CMD_IOACCEL1) {
2613 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
2614 cp->Header.SGList = scsi_sg_count(cmd);
2615 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
2616 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
2617 IOACCEL1_IOFLAGS_CDBLEN_MASK;
2618 cp->Header.tag = c->tag;
2619 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
2620 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
2621
2622 /* Any RAID offload error results in retry which will use
2623 * the normal I/O path so the controller can handle whatever's
2624 * wrong.
2625 */
2626 if (is_logical_device(dev)) {
2627 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
2628 dev->offload_enabled = 0;
2629 return hpsa_retry_cmd(h, cp);
2630 }
2631 }
2632
2633 /* an error has occurred */
2634 switch (ei->CommandStatus) {
2635
2636 case CMD_TARGET_STATUS:
2637 cmd->result |= ei->ScsiStatus;
2638 /* copy the sense data */
2639 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
2640 sense_data_size = SCSI_SENSE_BUFFERSIZE;
2641 else
2642 sense_data_size = sizeof(ei->SenseInfo);
2643 if (ei->SenseLen < sense_data_size)
2644 sense_data_size = ei->SenseLen;
2645 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2646 if (ei->ScsiStatus)
2647 decode_sense_data(ei->SenseInfo, sense_data_size,
2648 &sense_key, &asc, &ascq);
2649 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
2650 if (sense_key == ABORTED_COMMAND) {
2651 cmd->result |= DID_SOFT_ERROR << 16;
2652 break;
2653 }
2654 break;
2655 }
2656 /* Problem was not a check condition
2657 * Pass it up to the upper layers...
2658 */
2659 if (ei->ScsiStatus) {
2660 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
2661 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2662 "Returning result: 0x%x\n",
2663 cp, ei->ScsiStatus,
2664 sense_key, asc, ascq,
2665 cmd->result);
2666 } else { /* scsi status is zero??? How??? */
2667 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
2668 "Returning no connection.\n", cp),
2669
2670 /* Ordinarily, this case should never happen,
2671 * but there is a bug in some released firmware
2672 * revisions that allows it to happen if, for
2673 * example, a 4100 backplane loses power and
2674 * the tape drive is in it. We assume that
2675 * it's a fatal error of some kind because we
2676 * can't show that it wasn't. We will make it
2677 * look like selection timeout since that is
2678 * the most common reason for this to occur,
2679 * and it's severe enough.
2680 */
2681
2682 cmd->result = DID_NO_CONNECT << 16;
2683 }
2684 break;
2685
2686 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2687 break;
2688 case CMD_DATA_OVERRUN:
2689 dev_warn(&h->pdev->dev,
2690 "CDB %16phN data overrun\n", cp->Request.CDB);
2691 break;
2692 case CMD_INVALID: {
2693 /* print_bytes(cp, sizeof(*cp), 1, 0);
2694 print_cmd(cp); */
2695 /* We get CMD_INVALID if you address a non-existent device
2696 * instead of a selection timeout (no response). You will
2697 * see this if you yank out a drive, then try to access it.
2698 * This is kind of a shame because it means that any other
2699 * CMD_INVALID (e.g. driver bug) will get interpreted as a
2700 * missing target. */
2701 cmd->result = DID_NO_CONNECT << 16;
2702 }
2703 break;
2704 case CMD_PROTOCOL_ERR:
2705 cmd->result = DID_ERROR << 16;
2706 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2707 cp->Request.CDB);
2708 break;
2709 case CMD_HARDWARE_ERR:
2710 cmd->result = DID_ERROR << 16;
2711 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2712 cp->Request.CDB);
2713 break;
2714 case CMD_CONNECTION_LOST:
2715 cmd->result = DID_ERROR << 16;
2716 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2717 cp->Request.CDB);
2718 break;
2719 case CMD_ABORTED:
2720 cmd->result = DID_ABORT << 16;
2721 break;
2722 case CMD_ABORT_FAILED:
2723 cmd->result = DID_ERROR << 16;
2724 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2725 cp->Request.CDB);
2726 break;
2727 case CMD_UNSOLICITED_ABORT:
2728 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
2729 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2730 cp->Request.CDB);
2731 break;
2732 case CMD_TIMEOUT:
2733 cmd->result = DID_TIME_OUT << 16;
2734 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2735 cp->Request.CDB);
2736 break;
2737 case CMD_UNABORTABLE:
2738 cmd->result = DID_ERROR << 16;
2739 dev_warn(&h->pdev->dev, "Command unabortable\n");
2740 break;
2741 case CMD_TMF_STATUS:
2742 if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */
2743 cmd->result = DID_ERROR << 16;
2744 break;
2745 case CMD_IOACCEL_DISABLED:
2746 /* This only handles the direct pass-through case since RAID
2747 * offload is handled above. Just attempt a retry.
2748 */
2749 cmd->result = DID_SOFT_ERROR << 16;
2750 dev_warn(&h->pdev->dev,
2751 "cp %p had HP SSD Smart Path error\n", cp);
2752 break;
2753 default:
2754 cmd->result = DID_ERROR << 16;
2755 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2756 cp, ei->CommandStatus);
2757 }
2758
2759 return hpsa_cmd_free_and_done(h, cp, cmd);
2760}
2761
2762static void hpsa_pci_unmap(struct pci_dev *pdev, struct CommandList *c,
2763 int sg_used, enum dma_data_direction data_direction)
2764{
2765 int i;
2766
2767 for (i = 0; i < sg_used; i++)
2768 dma_unmap_single(&pdev->dev, le64_to_cpu(c->SG[i].Addr),
2769 le32_to_cpu(c->SG[i].Len),
2770 data_direction);
2771}
2772
2773static int hpsa_map_one(struct pci_dev *pdev,
2774 struct CommandList *cp,
2775 unsigned char *buf,
2776 size_t buflen,
2777 enum dma_data_direction data_direction)
2778{
2779 u64 addr64;
2780
2781 if (buflen == 0 || data_direction == DMA_NONE) {
2782 cp->Header.SGList = 0;
2783 cp->Header.SGTotal = cpu_to_le16(0);
2784 return 0;
2785 }
2786
2787 addr64 = dma_map_single(&pdev->dev, buf, buflen, data_direction);
2788 if (dma_mapping_error(&pdev->dev, addr64)) {
2789 /* Prevent subsequent unmap of something never mapped */
2790 cp->Header.SGList = 0;
2791 cp->Header.SGTotal = cpu_to_le16(0);
2792 return -1;
2793 }
2794 cp->SG[0].Addr = cpu_to_le64(addr64);
2795 cp->SG[0].Len = cpu_to_le32(buflen);
2796 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
2797 cp->Header.SGList = 1; /* no. SGs contig in this cmd */
2798 cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
2799 return 0;
2800}
2801
2802#define NO_TIMEOUT ((unsigned long) -1)
2803#define DEFAULT_TIMEOUT 30000 /* milliseconds */
2804static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2805 struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
2806{
2807 DECLARE_COMPLETION_ONSTACK(wait);
2808
2809 c->waiting = &wait;
2810 __enqueue_cmd_and_start_io(h, c, reply_queue);
2811 if (timeout_msecs == NO_TIMEOUT) {
2812 /* TODO: get rid of this no-timeout thing */
2813 wait_for_completion_io(&wait);
2814 return IO_OK;
2815 }
2816 if (!wait_for_completion_io_timeout(&wait,
2817 msecs_to_jiffies(timeout_msecs))) {
2818 dev_warn(&h->pdev->dev, "Command timed out.\n");
2819 return -ETIMEDOUT;
2820 }
2821 return IO_OK;
2822}
2823
2824static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2825 int reply_queue, unsigned long timeout_msecs)
2826{
2827 if (unlikely(lockup_detected(h))) {
2828 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2829 return IO_OK;
2830 }
2831 return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
2832}
2833
2834static u32 lockup_detected(struct ctlr_info *h)
2835{
2836 int cpu;
2837 u32 rc, *lockup_detected;
2838
2839 cpu = get_cpu();
2840 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2841 rc = *lockup_detected;
2842 put_cpu();
2843 return rc;
2844}
2845
2846#define MAX_DRIVER_CMD_RETRIES 25
2847static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2848 struct CommandList *c, enum dma_data_direction data_direction,
2849 unsigned long timeout_msecs)
2850{
2851 int backoff_time = 10, retry_count = 0;
2852 int rc;
2853
2854 do {
2855 memset(c->err_info, 0, sizeof(*c->err_info));
2856 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2857 timeout_msecs);
2858 if (rc)
2859 break;
2860 retry_count++;
2861 if (retry_count > 3) {
2862 msleep(backoff_time);
2863 if (backoff_time < 1000)
2864 backoff_time *= 2;
2865 }
2866 } while ((check_for_unit_attention(h, c) ||
2867 check_for_busy(h, c)) &&
2868 retry_count <= MAX_DRIVER_CMD_RETRIES);
2869 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2870 if (retry_count > MAX_DRIVER_CMD_RETRIES)
2871 rc = -EIO;
2872 return rc;
2873}
2874
2875static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2876 struct CommandList *c)
2877{
2878 const u8 *cdb = c->Request.CDB;
2879 const u8 *lun = c->Header.LUN.LunAddrBytes;
2880
2881 dev_warn(&h->pdev->dev, "%s: LUN:%8phN CDB:%16phN\n",
2882 txt, lun, cdb);
2883}
2884
2885static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2886 struct CommandList *cp)
2887{
2888 const struct ErrorInfo *ei = cp->err_info;
2889 struct device *d = &cp->h->pdev->dev;
2890 u8 sense_key, asc, ascq;
2891 int sense_len;
2892
2893 switch (ei->CommandStatus) {
2894 case CMD_TARGET_STATUS:
2895 if (ei->SenseLen > sizeof(ei->SenseInfo))
2896 sense_len = sizeof(ei->SenseInfo);
2897 else
2898 sense_len = ei->SenseLen;
2899 decode_sense_data(ei->SenseInfo, sense_len,
2900 &sense_key, &asc, &ascq);
2901 hpsa_print_cmd(h, "SCSI status", cp);
2902 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2903 dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2904 sense_key, asc, ascq);
2905 else
2906 dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
2907 if (ei->ScsiStatus == 0)
2908 dev_warn(d, "SCSI status is abnormally zero. "
2909 "(probably indicates selection timeout "
2910 "reported incorrectly due to a known "
2911 "firmware bug, circa July, 2001.)\n");
2912 break;
2913 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2914 break;
2915 case CMD_DATA_OVERRUN:
2916 hpsa_print_cmd(h, "overrun condition", cp);
2917 break;
2918 case CMD_INVALID: {
2919 /* controller unfortunately reports SCSI passthru's
2920 * to non-existent targets as invalid commands.
2921 */
2922 hpsa_print_cmd(h, "invalid command", cp);
2923 dev_warn(d, "probably means device no longer present\n");
2924 }
2925 break;
2926 case CMD_PROTOCOL_ERR:
2927 hpsa_print_cmd(h, "protocol error", cp);
2928 break;
2929 case CMD_HARDWARE_ERR:
2930 hpsa_print_cmd(h, "hardware error", cp);
2931 break;
2932 case CMD_CONNECTION_LOST:
2933 hpsa_print_cmd(h, "connection lost", cp);
2934 break;
2935 case CMD_ABORTED:
2936 hpsa_print_cmd(h, "aborted", cp);
2937 break;
2938 case CMD_ABORT_FAILED:
2939 hpsa_print_cmd(h, "abort failed", cp);
2940 break;
2941 case CMD_UNSOLICITED_ABORT:
2942 hpsa_print_cmd(h, "unsolicited abort", cp);
2943 break;
2944 case CMD_TIMEOUT:
2945 hpsa_print_cmd(h, "timed out", cp);
2946 break;
2947 case CMD_UNABORTABLE:
2948 hpsa_print_cmd(h, "unabortable", cp);
2949 break;
2950 case CMD_CTLR_LOCKUP:
2951 hpsa_print_cmd(h, "controller lockup detected", cp);
2952 break;
2953 default:
2954 hpsa_print_cmd(h, "unknown status", cp);
2955 dev_warn(d, "Unknown command status %x\n",
2956 ei->CommandStatus);
2957 }
2958}
2959
2960static int hpsa_do_receive_diagnostic(struct ctlr_info *h, u8 *scsi3addr,
2961 u8 page, u8 *buf, size_t bufsize)
2962{
2963 int rc = IO_OK;
2964 struct CommandList *c;
2965 struct ErrorInfo *ei;
2966
2967 c = cmd_alloc(h);
2968 if (fill_cmd(c, RECEIVE_DIAGNOSTIC, h, buf, bufsize,
2969 page, scsi3addr, TYPE_CMD)) {
2970 rc = -1;
2971 goto out;
2972 }
2973 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
2974 NO_TIMEOUT);
2975 if (rc)
2976 goto out;
2977 ei = c->err_info;
2978 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2979 hpsa_scsi_interpret_error(h, c);
2980 rc = -1;
2981 }
2982out:
2983 cmd_free(h, c);
2984 return rc;
2985}
2986
2987static u64 hpsa_get_enclosure_logical_identifier(struct ctlr_info *h,
2988 u8 *scsi3addr)
2989{
2990 u8 *buf;
2991 u64 sa = 0;
2992 int rc = 0;
2993
2994 buf = kzalloc(1024, GFP_KERNEL);
2995 if (!buf)
2996 return 0;
2997
2998 rc = hpsa_do_receive_diagnostic(h, scsi3addr, RECEIVE_DIAGNOSTIC,
2999 buf, 1024);
3000
3001 if (rc)
3002 goto out;
3003
3004 sa = get_unaligned_be64(buf+12);
3005
3006out:
3007 kfree(buf);
3008 return sa;
3009}
3010
3011static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
3012 u16 page, unsigned char *buf,
3013 unsigned char bufsize)
3014{
3015 int rc = IO_OK;
3016 struct CommandList *c;
3017 struct ErrorInfo *ei;
3018
3019 c = cmd_alloc(h);
3020
3021 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
3022 page, scsi3addr, TYPE_CMD)) {
3023 rc = -1;
3024 goto out;
3025 }
3026 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3027 NO_TIMEOUT);
3028 if (rc)
3029 goto out;
3030 ei = c->err_info;
3031 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3032 hpsa_scsi_interpret_error(h, c);
3033 rc = -1;
3034 }
3035out:
3036 cmd_free(h, c);
3037 return rc;
3038}
3039
3040static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
3041 u8 reset_type, int reply_queue)
3042{
3043 int rc = IO_OK;
3044 struct CommandList *c;
3045 struct ErrorInfo *ei;
3046
3047 c = cmd_alloc(h);
3048
3049
3050 /* fill_cmd can't fail here, no data buffer to map. */
3051 (void) fill_cmd(c, reset_type, h, NULL, 0, 0,
3052 scsi3addr, TYPE_MSG);
3053 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
3054 if (rc) {
3055 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
3056 goto out;
3057 }
3058 /* no unmap needed here because no data xfer. */
3059
3060 ei = c->err_info;
3061 if (ei->CommandStatus != 0) {
3062 hpsa_scsi_interpret_error(h, c);
3063 rc = -1;
3064 }
3065out:
3066 cmd_free(h, c);
3067 return rc;
3068}
3069
3070static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
3071 struct hpsa_scsi_dev_t *dev,
3072 unsigned char *scsi3addr)
3073{
3074 int i;
3075 bool match = false;
3076 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
3077 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
3078
3079 if (hpsa_is_cmd_idle(c))
3080 return false;
3081
3082 switch (c->cmd_type) {
3083 case CMD_SCSI:
3084 case CMD_IOCTL_PEND:
3085 match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes,
3086 sizeof(c->Header.LUN.LunAddrBytes));
3087 break;
3088
3089 case CMD_IOACCEL1:
3090 case CMD_IOACCEL2:
3091 if (c->phys_disk == dev) {
3092 /* HBA mode match */
3093 match = true;
3094 } else {
3095 /* Possible RAID mode -- check each phys dev. */
3096 /* FIXME: Do we need to take out a lock here? If
3097 * so, we could just call hpsa_get_pdisk_of_ioaccel2()
3098 * instead. */
3099 for (i = 0; i < dev->nphysical_disks && !match; i++) {
3100 /* FIXME: an alternate test might be
3101 *
3102 * match = dev->phys_disk[i]->ioaccel_handle
3103 * == c2->scsi_nexus; */
3104 match = dev->phys_disk[i] == c->phys_disk;
3105 }
3106 }
3107 break;
3108
3109 case IOACCEL2_TMF:
3110 for (i = 0; i < dev->nphysical_disks && !match; i++) {
3111 match = dev->phys_disk[i]->ioaccel_handle ==
3112 le32_to_cpu(ac->it_nexus);
3113 }
3114 break;
3115
3116 case 0: /* The command is in the middle of being initialized. */
3117 match = false;
3118 break;
3119
3120 default:
3121 dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n",
3122 c->cmd_type);
3123 BUG();
3124 }
3125
3126 return match;
3127}
3128
3129static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
3130 unsigned char *scsi3addr, u8 reset_type, int reply_queue)
3131{
3132 int i;
3133 int rc = 0;
3134
3135 /* We can really only handle one reset at a time */
3136 if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) {
3137 dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n");
3138 return -EINTR;
3139 }
3140
3141 BUG_ON(atomic_read(&dev->reset_cmds_out) != 0);
3142
3143 for (i = 0; i < h->nr_cmds; i++) {
3144 struct CommandList *c = h->cmd_pool + i;
3145 int refcount = atomic_inc_return(&c->refcount);
3146
3147 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, scsi3addr)) {
3148 unsigned long flags;
3149
3150 /*
3151 * Mark the target command as having a reset pending,
3152 * then lock a lock so that the command cannot complete
3153 * while we're considering it. If the command is not
3154 * idle then count it; otherwise revoke the event.
3155 */
3156 c->reset_pending = dev;
3157 spin_lock_irqsave(&h->lock, flags); /* Implied MB */
3158 if (!hpsa_is_cmd_idle(c))
3159 atomic_inc(&dev->reset_cmds_out);
3160 else
3161 c->reset_pending = NULL;
3162 spin_unlock_irqrestore(&h->lock, flags);
3163 }
3164
3165 cmd_free(h, c);
3166 }
3167
3168 rc = hpsa_send_reset(h, scsi3addr, reset_type, reply_queue);
3169 if (!rc)
3170 wait_event(h->event_sync_wait_queue,
3171 atomic_read(&dev->reset_cmds_out) == 0 ||
3172 lockup_detected(h));
3173
3174 if (unlikely(lockup_detected(h))) {
3175 dev_warn(&h->pdev->dev,
3176 "Controller lockup detected during reset wait\n");
3177 rc = -ENODEV;
3178 }
3179
3180 if (unlikely(rc))
3181 atomic_set(&dev->reset_cmds_out, 0);
3182 else
3183 rc = wait_for_device_to_become_ready(h, scsi3addr, 0);
3184
3185 mutex_unlock(&h->reset_mutex);
3186 return rc;
3187}
3188
3189static void hpsa_get_raid_level(struct ctlr_info *h,
3190 unsigned char *scsi3addr, unsigned char *raid_level)
3191{
3192 int rc;
3193 unsigned char *buf;
3194
3195 *raid_level = RAID_UNKNOWN;
3196 buf = kzalloc(64, GFP_KERNEL);
3197 if (!buf)
3198 return;
3199
3200 if (!hpsa_vpd_page_supported(h, scsi3addr,
3201 HPSA_VPD_LV_DEVICE_GEOMETRY))
3202 goto exit;
3203
3204 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
3205 HPSA_VPD_LV_DEVICE_GEOMETRY, buf, 64);
3206
3207 if (rc == 0)
3208 *raid_level = buf[8];
3209 if (*raid_level > RAID_UNKNOWN)
3210 *raid_level = RAID_UNKNOWN;
3211exit:
3212 kfree(buf);
3213 return;
3214}
3215
3216#define HPSA_MAP_DEBUG
3217#ifdef HPSA_MAP_DEBUG
3218static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
3219 struct raid_map_data *map_buff)
3220{
3221 struct raid_map_disk_data *dd = &map_buff->data[0];
3222 int map, row, col;
3223 u16 map_cnt, row_cnt, disks_per_row;
3224
3225 if (rc != 0)
3226 return;
3227
3228 /* Show details only if debugging has been activated. */
3229 if (h->raid_offload_debug < 2)
3230 return;
3231
3232 dev_info(&h->pdev->dev, "structure_size = %u\n",
3233 le32_to_cpu(map_buff->structure_size));
3234 dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
3235 le32_to_cpu(map_buff->volume_blk_size));
3236 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
3237 le64_to_cpu(map_buff->volume_blk_cnt));
3238 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
3239 map_buff->phys_blk_shift);
3240 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
3241 map_buff->parity_rotation_shift);
3242 dev_info(&h->pdev->dev, "strip_size = %u\n",
3243 le16_to_cpu(map_buff->strip_size));
3244 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
3245 le64_to_cpu(map_buff->disk_starting_blk));
3246 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
3247 le64_to_cpu(map_buff->disk_blk_cnt));
3248 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
3249 le16_to_cpu(map_buff->data_disks_per_row));
3250 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
3251 le16_to_cpu(map_buff->metadata_disks_per_row));
3252 dev_info(&h->pdev->dev, "row_cnt = %u\n",
3253 le16_to_cpu(map_buff->row_cnt));
3254 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
3255 le16_to_cpu(map_buff->layout_map_count));
3256 dev_info(&h->pdev->dev, "flags = 0x%x\n",
3257 le16_to_cpu(map_buff->flags));
3258 dev_info(&h->pdev->dev, "encryption = %s\n",
3259 le16_to_cpu(map_buff->flags) &
3260 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF");
3261 dev_info(&h->pdev->dev, "dekindex = %u\n",
3262 le16_to_cpu(map_buff->dekindex));
3263 map_cnt = le16_to_cpu(map_buff->layout_map_count);
3264 for (map = 0; map < map_cnt; map++) {
3265 dev_info(&h->pdev->dev, "Map%u:\n", map);
3266 row_cnt = le16_to_cpu(map_buff->row_cnt);
3267 for (row = 0; row < row_cnt; row++) {
3268 dev_info(&h->pdev->dev, " Row%u:\n", row);
3269 disks_per_row =
3270 le16_to_cpu(map_buff->data_disks_per_row);
3271 for (col = 0; col < disks_per_row; col++, dd++)
3272 dev_info(&h->pdev->dev,
3273 " D%02u: h=0x%04x xor=%u,%u\n",
3274 col, dd->ioaccel_handle,
3275 dd->xor_mult[0], dd->xor_mult[1]);
3276 disks_per_row =
3277 le16_to_cpu(map_buff->metadata_disks_per_row);
3278 for (col = 0; col < disks_per_row; col++, dd++)
3279 dev_info(&h->pdev->dev,
3280 " M%02u: h=0x%04x xor=%u,%u\n",
3281 col, dd->ioaccel_handle,
3282 dd->xor_mult[0], dd->xor_mult[1]);
3283 }
3284 }
3285}
3286#else
3287static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
3288 __attribute__((unused)) int rc,
3289 __attribute__((unused)) struct raid_map_data *map_buff)
3290{
3291}
3292#endif
3293
3294static int hpsa_get_raid_map(struct ctlr_info *h,
3295 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3296{
3297 int rc = 0;
3298 struct CommandList *c;
3299 struct ErrorInfo *ei;
3300
3301 c = cmd_alloc(h);
3302
3303 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
3304 sizeof(this_device->raid_map), 0,
3305 scsi3addr, TYPE_CMD)) {
3306 dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
3307 cmd_free(h, c);
3308 return -1;
3309 }
3310 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3311 NO_TIMEOUT);
3312 if (rc)
3313 goto out;
3314 ei = c->err_info;
3315 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3316 hpsa_scsi_interpret_error(h, c);
3317 rc = -1;
3318 goto out;
3319 }
3320 cmd_free(h, c);
3321
3322 /* @todo in the future, dynamically allocate RAID map memory */
3323 if (le32_to_cpu(this_device->raid_map.structure_size) >
3324 sizeof(this_device->raid_map)) {
3325 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
3326 rc = -1;
3327 }
3328 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
3329 return rc;
3330out:
3331 cmd_free(h, c);
3332 return rc;
3333}
3334
3335static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h,
3336 unsigned char scsi3addr[], u16 bmic_device_index,
3337 struct bmic_sense_subsystem_info *buf, size_t bufsize)
3338{
3339 int rc = IO_OK;
3340 struct CommandList *c;
3341 struct ErrorInfo *ei;
3342
3343 c = cmd_alloc(h);
3344
3345 rc = fill_cmd(c, BMIC_SENSE_SUBSYSTEM_INFORMATION, h, buf, bufsize,
3346 0, RAID_CTLR_LUNID, TYPE_CMD);
3347 if (rc)
3348 goto out;
3349
3350 c->Request.CDB[2] = bmic_device_index & 0xff;
3351 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3352
3353 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3354 NO_TIMEOUT);
3355 if (rc)
3356 goto out;
3357 ei = c->err_info;
3358 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3359 hpsa_scsi_interpret_error(h, c);
3360 rc = -1;
3361 }
3362out:
3363 cmd_free(h, c);
3364 return rc;
3365}
3366
3367static int hpsa_bmic_id_controller(struct ctlr_info *h,
3368 struct bmic_identify_controller *buf, size_t bufsize)
3369{
3370 int rc = IO_OK;
3371 struct CommandList *c;
3372 struct ErrorInfo *ei;
3373
3374 c = cmd_alloc(h);
3375
3376 rc = fill_cmd(c, BMIC_IDENTIFY_CONTROLLER, h, buf, bufsize,
3377 0, RAID_CTLR_LUNID, TYPE_CMD);
3378 if (rc)
3379 goto out;
3380
3381 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3382 NO_TIMEOUT);
3383 if (rc)
3384 goto out;
3385 ei = c->err_info;
3386 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3387 hpsa_scsi_interpret_error(h, c);
3388 rc = -1;
3389 }
3390out:
3391 cmd_free(h, c);
3392 return rc;
3393}
3394
3395static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
3396 unsigned char scsi3addr[], u16 bmic_device_index,
3397 struct bmic_identify_physical_device *buf, size_t bufsize)
3398{
3399 int rc = IO_OK;
3400 struct CommandList *c;
3401 struct ErrorInfo *ei;
3402
3403 c = cmd_alloc(h);
3404 rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
3405 0, RAID_CTLR_LUNID, TYPE_CMD);
3406 if (rc)
3407 goto out;
3408
3409 c->Request.CDB[2] = bmic_device_index & 0xff;
3410 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3411
3412 hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3413 NO_TIMEOUT);
3414 ei = c->