1 | /* |
2 | * Disk Array driver for HP Smart Array SAS controllers |
3 | * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries |
4 | * Copyright 2016 Microsemi Corporation |
5 | * Copyright 2014-2015 PMC-Sierra, Inc. |
6 | * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P. |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License as published by |
10 | * the Free Software Foundation; version 2 of the License. |
11 | * |
12 | * This program is distributed in the hope that it will be useful, |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or |
15 | * NON INFRINGEMENT. See the GNU General Public License for more details. |
16 | * |
17 | * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com |
18 | * |
19 | */ |
20 | |
21 | #include <linux/module.h> |
22 | #include <linux/interrupt.h> |
23 | #include <linux/types.h> |
24 | #include <linux/pci.h> |
25 | #include <linux/kernel.h> |
26 | #include <linux/slab.h> |
27 | #include <linux/delay.h> |
28 | #include <linux/fs.h> |
29 | #include <linux/timer.h> |
30 | #include <linux/init.h> |
31 | #include <linux/spinlock.h> |
32 | #include <linux/compat.h> |
33 | #include <linux/blktrace_api.h> |
34 | #include <linux/uaccess.h> |
35 | #include <linux/io.h> |
36 | #include <linux/dma-mapping.h> |
37 | #include <linux/completion.h> |
38 | #include <linux/moduleparam.h> |
39 | #include <scsi/scsi.h> |
40 | #include <scsi/scsi_cmnd.h> |
41 | #include <scsi/scsi_device.h> |
42 | #include <scsi/scsi_host.h> |
43 | #include <scsi/scsi_tcq.h> |
44 | #include <scsi/scsi_eh.h> |
45 | #include <scsi/scsi_transport_sas.h> |
46 | #include <scsi/scsi_dbg.h> |
47 | #include <linux/cciss_ioctl.h> |
48 | #include <linux/string.h> |
49 | #include <linux/bitmap.h> |
50 | #include <linux/atomic.h> |
51 | #include <linux/jiffies.h> |
52 | #include <linux/percpu-defs.h> |
53 | #include <linux/percpu.h> |
54 | #include <asm/unaligned.h> |
55 | #include <asm/div64.h> |
56 | #include "hpsa_cmd.h" |
57 | #include "hpsa.h" |
58 | |
59 | /* |
60 | * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' |
61 | * with an optional trailing '-' followed by a byte value (0-255). |
62 | */ |
63 | #define HPSA_DRIVER_VERSION "3.4.20-200" |
64 | #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" |
65 | #define HPSA "hpsa" |
66 | |
67 | /* How long to wait for CISS doorbell communication */ |
68 | #define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */ |
69 | #define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */ |
70 | #define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */ |
71 | #define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */ |
72 | #define MAX_IOCTL_CONFIG_WAIT 1000 |
73 | |
74 | /*define how many times we will try a command because of bus resets */ |
75 | #define MAX_CMD_RETRIES 3 |
76 | /* How long to wait before giving up on a command */ |
77 | #define HPSA_EH_PTRAID_TIMEOUT (240 * HZ) |
78 | |
79 | /* Embedded module documentation macros - see modules.h */ |
80 | MODULE_AUTHOR("Hewlett-Packard Company" ); |
81 | MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \ |
82 | HPSA_DRIVER_VERSION); |
83 | MODULE_VERSION(HPSA_DRIVER_VERSION); |
84 | MODULE_LICENSE("GPL" ); |
85 | MODULE_ALIAS("cciss" ); |
86 | |
87 | static int hpsa_simple_mode; |
88 | module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR); |
89 | MODULE_PARM_DESC(hpsa_simple_mode, |
90 | "Use 'simple mode' rather than 'performant mode'" ); |
91 | |
92 | /* define the PCI info for the cards we can control */ |
93 | static const struct pci_device_id hpsa_pci_device_id[] = { |
94 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241}, |
95 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243}, |
96 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, |
97 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, |
98 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, |
99 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A}, |
100 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B}, |
101 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, |
102 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350}, |
103 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351}, |
104 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352}, |
105 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353}, |
106 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, |
107 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, |
108 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, |
109 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1920}, |
110 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921}, |
111 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922}, |
112 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923}, |
113 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924}, |
114 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1925}, |
115 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926}, |
116 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928}, |
117 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929}, |
118 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD}, |
119 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE}, |
120 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF}, |
121 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0}, |
122 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1}, |
123 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2}, |
124 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3}, |
125 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4}, |
126 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5}, |
127 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6}, |
128 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7}, |
129 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8}, |
130 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9}, |
131 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA}, |
132 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB}, |
133 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC}, |
134 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD}, |
135 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE}, |
136 | {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580}, |
137 | {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581}, |
138 | {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582}, |
139 | {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583}, |
140 | {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584}, |
141 | {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585}, |
142 | {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076}, |
143 | {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087}, |
144 | {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D}, |
145 | {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088}, |
146 | {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f}, |
147 | {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
148 | PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, |
149 | {PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
150 | PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, |
151 | {0,} |
152 | }; |
153 | |
154 | MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id); |
155 | |
156 | /* board_id = Subsystem Device ID & Vendor ID |
157 | * product = Marketing Name for the board |
158 | * access = Address of the struct of function pointers |
159 | */ |
160 | static struct board_type products[] = { |
161 | {0x40700E11, "Smart Array 5300" , &SA5A_access}, |
162 | {0x40800E11, "Smart Array 5i" , &SA5B_access}, |
163 | {0x40820E11, "Smart Array 532" , &SA5B_access}, |
164 | {0x40830E11, "Smart Array 5312" , &SA5B_access}, |
165 | {0x409A0E11, "Smart Array 641" , &SA5A_access}, |
166 | {0x409B0E11, "Smart Array 642" , &SA5A_access}, |
167 | {0x409C0E11, "Smart Array 6400" , &SA5A_access}, |
168 | {0x409D0E11, "Smart Array 6400 EM" , &SA5A_access}, |
169 | {0x40910E11, "Smart Array 6i" , &SA5A_access}, |
170 | {0x3225103C, "Smart Array P600" , &SA5A_access}, |
171 | {0x3223103C, "Smart Array P800" , &SA5A_access}, |
172 | {0x3234103C, "Smart Array P400" , &SA5A_access}, |
173 | {0x3235103C, "Smart Array P400i" , &SA5A_access}, |
174 | {0x3211103C, "Smart Array E200i" , &SA5A_access}, |
175 | {0x3212103C, "Smart Array E200" , &SA5A_access}, |
176 | {0x3213103C, "Smart Array E200i" , &SA5A_access}, |
177 | {0x3214103C, "Smart Array E200i" , &SA5A_access}, |
178 | {0x3215103C, "Smart Array E200i" , &SA5A_access}, |
179 | {0x3237103C, "Smart Array E500" , &SA5A_access}, |
180 | {0x323D103C, "Smart Array P700m" , &SA5A_access}, |
181 | {0x3241103C, "Smart Array P212" , &SA5_access}, |
182 | {0x3243103C, "Smart Array P410" , &SA5_access}, |
183 | {0x3245103C, "Smart Array P410i" , &SA5_access}, |
184 | {0x3247103C, "Smart Array P411" , &SA5_access}, |
185 | {0x3249103C, "Smart Array P812" , &SA5_access}, |
186 | {0x324A103C, "Smart Array P712m" , &SA5_access}, |
187 | {0x324B103C, "Smart Array P711m" , &SA5_access}, |
188 | {0x3233103C, "HP StorageWorks 1210m" , &SA5_access}, /* alias of 333f */ |
189 | {0x3350103C, "Smart Array P222" , &SA5_access}, |
190 | {0x3351103C, "Smart Array P420" , &SA5_access}, |
191 | {0x3352103C, "Smart Array P421" , &SA5_access}, |
192 | {0x3353103C, "Smart Array P822" , &SA5_access}, |
193 | {0x3354103C, "Smart Array P420i" , &SA5_access}, |
194 | {0x3355103C, "Smart Array P220i" , &SA5_access}, |
195 | {0x3356103C, "Smart Array P721m" , &SA5_access}, |
196 | {0x1920103C, "Smart Array P430i" , &SA5_access}, |
197 | {0x1921103C, "Smart Array P830i" , &SA5_access}, |
198 | {0x1922103C, "Smart Array P430" , &SA5_access}, |
199 | {0x1923103C, "Smart Array P431" , &SA5_access}, |
200 | {0x1924103C, "Smart Array P830" , &SA5_access}, |
201 | {0x1925103C, "Smart Array P831" , &SA5_access}, |
202 | {0x1926103C, "Smart Array P731m" , &SA5_access}, |
203 | {0x1928103C, "Smart Array P230i" , &SA5_access}, |
204 | {0x1929103C, "Smart Array P530" , &SA5_access}, |
205 | {0x21BD103C, "Smart Array P244br" , &SA5_access}, |
206 | {0x21BE103C, "Smart Array P741m" , &SA5_access}, |
207 | {0x21BF103C, "Smart HBA H240ar" , &SA5_access}, |
208 | {0x21C0103C, "Smart Array P440ar" , &SA5_access}, |
209 | {0x21C1103C, "Smart Array P840ar" , &SA5_access}, |
210 | {0x21C2103C, "Smart Array P440" , &SA5_access}, |
211 | {0x21C3103C, "Smart Array P441" , &SA5_access}, |
212 | {0x21C4103C, "Smart Array" , &SA5_access}, |
213 | {0x21C5103C, "Smart Array P841" , &SA5_access}, |
214 | {0x21C6103C, "Smart HBA H244br" , &SA5_access}, |
215 | {0x21C7103C, "Smart HBA H240" , &SA5_access}, |
216 | {0x21C8103C, "Smart HBA H241" , &SA5_access}, |
217 | {0x21C9103C, "Smart Array" , &SA5_access}, |
218 | {0x21CA103C, "Smart Array P246br" , &SA5_access}, |
219 | {0x21CB103C, "Smart Array P840" , &SA5_access}, |
220 | {0x21CC103C, "Smart Array" , &SA5_access}, |
221 | {0x21CD103C, "Smart Array" , &SA5_access}, |
222 | {0x21CE103C, "Smart HBA" , &SA5_access}, |
223 | {0x05809005, "SmartHBA-SA" , &SA5_access}, |
224 | {0x05819005, "SmartHBA-SA 8i" , &SA5_access}, |
225 | {0x05829005, "SmartHBA-SA 8i8e" , &SA5_access}, |
226 | {0x05839005, "SmartHBA-SA 8e" , &SA5_access}, |
227 | {0x05849005, "SmartHBA-SA 16i" , &SA5_access}, |
228 | {0x05859005, "SmartHBA-SA 4i4e" , &SA5_access}, |
229 | {0x00761590, "HP Storage P1224 Array Controller" , &SA5_access}, |
230 | {0x00871590, "HP Storage P1224e Array Controller" , &SA5_access}, |
231 | {0x007D1590, "HP Storage P1228 Array Controller" , &SA5_access}, |
232 | {0x00881590, "HP Storage P1228e Array Controller" , &SA5_access}, |
233 | {0x333f103c, "HP StorageWorks 1210m Array Controller" , &SA5_access}, |
234 | {0xFFFF103C, "Unknown Smart Array" , &SA5_access}, |
235 | }; |
236 | |
237 | static struct scsi_transport_template *hpsa_sas_transport_template; |
238 | static int hpsa_add_sas_host(struct ctlr_info *h); |
239 | static void hpsa_delete_sas_host(struct ctlr_info *h); |
240 | static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node, |
241 | struct hpsa_scsi_dev_t *device); |
242 | static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device); |
243 | static struct hpsa_scsi_dev_t |
244 | *hpsa_find_device_by_sas_rphy(struct ctlr_info *h, |
245 | struct sas_rphy *rphy); |
246 | |
247 | #define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy) |
248 | static const struct scsi_cmnd hpsa_cmd_busy; |
249 | #define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle) |
250 | static const struct scsi_cmnd hpsa_cmd_idle; |
251 | static int number_of_controllers; |
252 | |
253 | static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); |
254 | static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); |
255 | static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd, |
256 | void __user *arg); |
257 | static int hpsa_passthru_ioctl(struct ctlr_info *h, |
258 | IOCTL_Command_struct *iocommand); |
259 | static int hpsa_big_passthru_ioctl(struct ctlr_info *h, |
260 | BIG_IOCTL_Command_struct *ioc); |
261 | |
262 | #ifdef CONFIG_COMPAT |
263 | static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd, |
264 | void __user *arg); |
265 | #endif |
266 | |
267 | static void cmd_free(struct ctlr_info *h, struct CommandList *c); |
268 | static struct CommandList *cmd_alloc(struct ctlr_info *h); |
269 | static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c); |
270 | static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h, |
271 | struct scsi_cmnd *scmd); |
272 | static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, |
273 | void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, |
274 | int cmd_type); |
275 | static void hpsa_free_cmd_pool(struct ctlr_info *h); |
276 | #define VPD_PAGE (1 << 8) |
277 | #define HPSA_SIMPLE_ERROR_BITS 0x03 |
278 | |
279 | static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); |
280 | static void hpsa_scan_start(struct Scsi_Host *); |
281 | static int hpsa_scan_finished(struct Scsi_Host *sh, |
282 | unsigned long elapsed_time); |
283 | static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth); |
284 | |
285 | static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); |
286 | static int hpsa_slave_alloc(struct scsi_device *sdev); |
287 | static int hpsa_slave_configure(struct scsi_device *sdev); |
288 | static void hpsa_slave_destroy(struct scsi_device *sdev); |
289 | |
290 | static void hpsa_update_scsi_devices(struct ctlr_info *h); |
291 | static int check_for_unit_attention(struct ctlr_info *h, |
292 | struct CommandList *c); |
293 | static void check_ioctl_unit_attention(struct ctlr_info *h, |
294 | struct CommandList *c); |
295 | /* performant mode helper functions */ |
296 | static void calc_bucket_map(int *bucket, int num_buckets, |
297 | int nsgs, int min_blocks, u32 *bucket_map); |
298 | static void hpsa_free_performant_mode(struct ctlr_info *h); |
299 | static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); |
300 | static inline u32 next_command(struct ctlr_info *h, u8 q); |
301 | static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, |
302 | u32 *cfg_base_addr, u64 *cfg_base_addr_index, |
303 | u64 *cfg_offset); |
304 | static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev, |
305 | unsigned long *memory_bar); |
306 | static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id, |
307 | bool *legacy_board); |
308 | static int wait_for_device_to_become_ready(struct ctlr_info *h, |
309 | unsigned char lunaddr[], |
310 | int reply_queue); |
311 | static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, |
312 | int wait_for_ready); |
313 | static inline void finish_cmd(struct CommandList *c); |
314 | static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h); |
315 | #define BOARD_NOT_READY 0 |
316 | #define BOARD_READY 1 |
317 | static void hpsa_drain_accel_commands(struct ctlr_info *h); |
318 | static void hpsa_flush_cache(struct ctlr_info *h); |
319 | static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, |
320 | struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, |
321 | u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk); |
322 | static void hpsa_command_resubmit_worker(struct work_struct *work); |
323 | static u32 lockup_detected(struct ctlr_info *h); |
324 | static int detect_controller_lockup(struct ctlr_info *h); |
325 | static void hpsa_disable_rld_caching(struct ctlr_info *h); |
326 | static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, |
327 | struct ReportExtendedLUNdata *buf, int bufsize); |
328 | static bool hpsa_vpd_page_supported(struct ctlr_info *h, |
329 | unsigned char scsi3addr[], u8 page); |
330 | static int hpsa_luns_changed(struct ctlr_info *h); |
331 | static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c, |
332 | struct hpsa_scsi_dev_t *dev, |
333 | unsigned char *scsi3addr); |
334 | |
335 | static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) |
336 | { |
337 | unsigned long *priv = shost_priv(shost: sdev->host); |
338 | return (struct ctlr_info *) *priv; |
339 | } |
340 | |
341 | static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh) |
342 | { |
343 | unsigned long *priv = shost_priv(shost: sh); |
344 | return (struct ctlr_info *) *priv; |
345 | } |
346 | |
347 | static inline bool hpsa_is_cmd_idle(struct CommandList *c) |
348 | { |
349 | return c->scsi_cmd == SCSI_CMD_IDLE; |
350 | } |
351 | |
352 | /* extract sense key, asc, and ascq from sense data. -1 means invalid. */ |
353 | static void decode_sense_data(const u8 *sense_data, int sense_data_len, |
354 | u8 *sense_key, u8 *asc, u8 *ascq) |
355 | { |
356 | struct scsi_sense_hdr sshdr; |
357 | bool rc; |
358 | |
359 | *sense_key = -1; |
360 | *asc = -1; |
361 | *ascq = -1; |
362 | |
363 | if (sense_data_len < 1) |
364 | return; |
365 | |
366 | rc = scsi_normalize_sense(sense_buffer: sense_data, sb_len: sense_data_len, sshdr: &sshdr); |
367 | if (rc) { |
368 | *sense_key = sshdr.sense_key; |
369 | *asc = sshdr.asc; |
370 | *ascq = sshdr.ascq; |
371 | } |
372 | } |
373 | |
374 | static int check_for_unit_attention(struct ctlr_info *h, |
375 | struct CommandList *c) |
376 | { |
377 | u8 sense_key, asc, ascq; |
378 | int sense_len; |
379 | |
380 | if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo)) |
381 | sense_len = sizeof(c->err_info->SenseInfo); |
382 | else |
383 | sense_len = c->err_info->SenseLen; |
384 | |
385 | decode_sense_data(sense_data: c->err_info->SenseInfo, sense_data_len: sense_len, |
386 | sense_key: &sense_key, asc: &asc, ascq: &ascq); |
387 | if (sense_key != UNIT_ATTENTION || asc == 0xff) |
388 | return 0; |
389 | |
390 | switch (asc) { |
391 | case STATE_CHANGED: |
392 | dev_warn(&h->pdev->dev, |
393 | "%s: a state change detected, command retried\n" , |
394 | h->devname); |
395 | break; |
396 | case LUN_FAILED: |
397 | dev_warn(&h->pdev->dev, |
398 | "%s: LUN failure detected\n" , h->devname); |
399 | break; |
400 | case REPORT_LUNS_CHANGED: |
401 | dev_warn(&h->pdev->dev, |
402 | "%s: report LUN data changed\n" , h->devname); |
403 | /* |
404 | * Note: this REPORT_LUNS_CHANGED condition only occurs on the external |
405 | * target (array) devices. |
406 | */ |
407 | break; |
408 | case POWER_OR_RESET: |
409 | dev_warn(&h->pdev->dev, |
410 | "%s: a power on or device reset detected\n" , |
411 | h->devname); |
412 | break; |
413 | case UNIT_ATTENTION_CLEARED: |
414 | dev_warn(&h->pdev->dev, |
415 | "%s: unit attention cleared by another initiator\n" , |
416 | h->devname); |
417 | break; |
418 | default: |
419 | dev_warn(&h->pdev->dev, |
420 | "%s: unknown unit attention detected\n" , |
421 | h->devname); |
422 | break; |
423 | } |
424 | return 1; |
425 | } |
426 | |
427 | static int check_for_busy(struct ctlr_info *h, struct CommandList *c) |
428 | { |
429 | if (c->err_info->CommandStatus != CMD_TARGET_STATUS || |
430 | (c->err_info->ScsiStatus != SAM_STAT_BUSY && |
431 | c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL)) |
432 | return 0; |
433 | dev_warn(&h->pdev->dev, HPSA "device busy" ); |
434 | return 1; |
435 | } |
436 | |
437 | static u32 lockup_detected(struct ctlr_info *h); |
438 | static ssize_t host_show_lockup_detected(struct device *dev, |
439 | struct device_attribute *attr, char *buf) |
440 | { |
441 | int ld; |
442 | struct ctlr_info *h; |
443 | struct Scsi_Host *shost = class_to_shost(dev); |
444 | |
445 | h = shost_to_hba(sh: shost); |
446 | ld = lockup_detected(h); |
447 | |
448 | return sprintf(buf, fmt: "ld=%d\n" , ld); |
449 | } |
450 | |
451 | static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev, |
452 | struct device_attribute *attr, |
453 | const char *buf, size_t count) |
454 | { |
455 | int status, len; |
456 | struct ctlr_info *h; |
457 | struct Scsi_Host *shost = class_to_shost(dev); |
458 | char tmpbuf[10]; |
459 | |
460 | if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) |
461 | return -EACCES; |
462 | len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; |
463 | strncpy(p: tmpbuf, q: buf, size: len); |
464 | tmpbuf[len] = '\0'; |
465 | if (sscanf(tmpbuf, "%d" , &status) != 1) |
466 | return -EINVAL; |
467 | h = shost_to_hba(sh: shost); |
468 | h->acciopath_status = !!status; |
469 | dev_warn(&h->pdev->dev, |
470 | "hpsa: HP SSD Smart Path %s via sysfs update.\n" , |
471 | h->acciopath_status ? "enabled" : "disabled" ); |
472 | return count; |
473 | } |
474 | |
475 | static ssize_t host_store_raid_offload_debug(struct device *dev, |
476 | struct device_attribute *attr, |
477 | const char *buf, size_t count) |
478 | { |
479 | int debug_level, len; |
480 | struct ctlr_info *h; |
481 | struct Scsi_Host *shost = class_to_shost(dev); |
482 | char tmpbuf[10]; |
483 | |
484 | if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) |
485 | return -EACCES; |
486 | len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; |
487 | strncpy(p: tmpbuf, q: buf, size: len); |
488 | tmpbuf[len] = '\0'; |
489 | if (sscanf(tmpbuf, "%d" , &debug_level) != 1) |
490 | return -EINVAL; |
491 | if (debug_level < 0) |
492 | debug_level = 0; |
493 | h = shost_to_hba(sh: shost); |
494 | h->raid_offload_debug = debug_level; |
495 | dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n" , |
496 | h->raid_offload_debug); |
497 | return count; |
498 | } |
499 | |
500 | static ssize_t host_store_rescan(struct device *dev, |
501 | struct device_attribute *attr, |
502 | const char *buf, size_t count) |
503 | { |
504 | struct ctlr_info *h; |
505 | struct Scsi_Host *shost = class_to_shost(dev); |
506 | h = shost_to_hba(sh: shost); |
507 | hpsa_scan_start(h->scsi_host); |
508 | return count; |
509 | } |
510 | |
511 | static void hpsa_turn_off_ioaccel_for_device(struct hpsa_scsi_dev_t *device) |
512 | { |
513 | device->offload_enabled = 0; |
514 | device->offload_to_be_enabled = 0; |
515 | } |
516 | |
517 | static ssize_t host_show_firmware_revision(struct device *dev, |
518 | struct device_attribute *attr, char *buf) |
519 | { |
520 | struct ctlr_info *h; |
521 | struct Scsi_Host *shost = class_to_shost(dev); |
522 | unsigned char *fwrev; |
523 | |
524 | h = shost_to_hba(sh: shost); |
525 | if (!h->hba_inquiry_data) |
526 | return 0; |
527 | fwrev = &h->hba_inquiry_data[32]; |
528 | return snprintf(buf, size: 20, fmt: "%c%c%c%c\n" , |
529 | fwrev[0], fwrev[1], fwrev[2], fwrev[3]); |
530 | } |
531 | |
532 | static ssize_t host_show_commands_outstanding(struct device *dev, |
533 | struct device_attribute *attr, char *buf) |
534 | { |
535 | struct Scsi_Host *shost = class_to_shost(dev); |
536 | struct ctlr_info *h = shost_to_hba(sh: shost); |
537 | |
538 | return snprintf(buf, size: 20, fmt: "%d\n" , |
539 | atomic_read(v: &h->commands_outstanding)); |
540 | } |
541 | |
542 | static ssize_t host_show_transport_mode(struct device *dev, |
543 | struct device_attribute *attr, char *buf) |
544 | { |
545 | struct ctlr_info *h; |
546 | struct Scsi_Host *shost = class_to_shost(dev); |
547 | |
548 | h = shost_to_hba(sh: shost); |
549 | return snprintf(buf, size: 20, fmt: "%s\n" , |
550 | h->transMethod & CFGTBL_Trans_Performant ? |
551 | "performant" : "simple" ); |
552 | } |
553 | |
554 | static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev, |
555 | struct device_attribute *attr, char *buf) |
556 | { |
557 | struct ctlr_info *h; |
558 | struct Scsi_Host *shost = class_to_shost(dev); |
559 | |
560 | h = shost_to_hba(sh: shost); |
561 | return snprintf(buf, size: 30, fmt: "HP SSD Smart Path %s\n" , |
562 | (h->acciopath_status == 1) ? "enabled" : "disabled" ); |
563 | } |
564 | |
565 | /* List of controllers which cannot be hard reset on kexec with reset_devices */ |
566 | static u32 unresettable_controller[] = { |
567 | 0x324a103C, /* Smart Array P712m */ |
568 | 0x324b103C, /* Smart Array P711m */ |
569 | 0x3223103C, /* Smart Array P800 */ |
570 | 0x3234103C, /* Smart Array P400 */ |
571 | 0x3235103C, /* Smart Array P400i */ |
572 | 0x3211103C, /* Smart Array E200i */ |
573 | 0x3212103C, /* Smart Array E200 */ |
574 | 0x3213103C, /* Smart Array E200i */ |
575 | 0x3214103C, /* Smart Array E200i */ |
576 | 0x3215103C, /* Smart Array E200i */ |
577 | 0x3237103C, /* Smart Array E500 */ |
578 | 0x323D103C, /* Smart Array P700m */ |
579 | 0x40800E11, /* Smart Array 5i */ |
580 | 0x409C0E11, /* Smart Array 6400 */ |
581 | 0x409D0E11, /* Smart Array 6400 EM */ |
582 | 0x40700E11, /* Smart Array 5300 */ |
583 | 0x40820E11, /* Smart Array 532 */ |
584 | 0x40830E11, /* Smart Array 5312 */ |
585 | 0x409A0E11, /* Smart Array 641 */ |
586 | 0x409B0E11, /* Smart Array 642 */ |
587 | 0x40910E11, /* Smart Array 6i */ |
588 | }; |
589 | |
590 | /* List of controllers which cannot even be soft reset */ |
591 | static u32 soft_unresettable_controller[] = { |
592 | 0x40800E11, /* Smart Array 5i */ |
593 | 0x40700E11, /* Smart Array 5300 */ |
594 | 0x40820E11, /* Smart Array 532 */ |
595 | 0x40830E11, /* Smart Array 5312 */ |
596 | 0x409A0E11, /* Smart Array 641 */ |
597 | 0x409B0E11, /* Smart Array 642 */ |
598 | 0x40910E11, /* Smart Array 6i */ |
599 | /* Exclude 640x boards. These are two pci devices in one slot |
600 | * which share a battery backed cache module. One controls the |
601 | * cache, the other accesses the cache through the one that controls |
602 | * it. If we reset the one controlling the cache, the other will |
603 | * likely not be happy. Just forbid resetting this conjoined mess. |
604 | * The 640x isn't really supported by hpsa anyway. |
605 | */ |
606 | 0x409C0E11, /* Smart Array 6400 */ |
607 | 0x409D0E11, /* Smart Array 6400 EM */ |
608 | }; |
609 | |
610 | static int board_id_in_array(u32 a[], int nelems, u32 board_id) |
611 | { |
612 | int i; |
613 | |
614 | for (i = 0; i < nelems; i++) |
615 | if (a[i] == board_id) |
616 | return 1; |
617 | return 0; |
618 | } |
619 | |
620 | static int ctlr_is_hard_resettable(u32 board_id) |
621 | { |
622 | return !board_id_in_array(a: unresettable_controller, |
623 | ARRAY_SIZE(unresettable_controller), board_id); |
624 | } |
625 | |
626 | static int ctlr_is_soft_resettable(u32 board_id) |
627 | { |
628 | return !board_id_in_array(a: soft_unresettable_controller, |
629 | ARRAY_SIZE(soft_unresettable_controller), board_id); |
630 | } |
631 | |
632 | static int ctlr_is_resettable(u32 board_id) |
633 | { |
634 | return ctlr_is_hard_resettable(board_id) || |
635 | ctlr_is_soft_resettable(board_id); |
636 | } |
637 | |
638 | static ssize_t host_show_resettable(struct device *dev, |
639 | struct device_attribute *attr, char *buf) |
640 | { |
641 | struct ctlr_info *h; |
642 | struct Scsi_Host *shost = class_to_shost(dev); |
643 | |
644 | h = shost_to_hba(sh: shost); |
645 | return snprintf(buf, size: 20, fmt: "%d\n" , ctlr_is_resettable(board_id: h->board_id)); |
646 | } |
647 | |
648 | static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) |
649 | { |
650 | return (scsi3addr[3] & 0xC0) == 0x40; |
651 | } |
652 | |
653 | static const char * const raid_label[] = { "0" , "4" , "1(+0)" , "5" , "5+1" , "6" , |
654 | "1(+0)ADM" , "UNKNOWN" , "PHYS DRV" |
655 | }; |
656 | #define HPSA_RAID_0 0 |
657 | #define HPSA_RAID_4 1 |
658 | #define HPSA_RAID_1 2 /* also used for RAID 10 */ |
659 | #define HPSA_RAID_5 3 /* also used for RAID 50 */ |
660 | #define HPSA_RAID_51 4 |
661 | #define HPSA_RAID_6 5 /* also used for RAID 60 */ |
662 | #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ |
663 | #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2) |
664 | #define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1) |
665 | |
666 | static inline bool is_logical_device(struct hpsa_scsi_dev_t *device) |
667 | { |
668 | return !device->physical_device; |
669 | } |
670 | |
671 | static ssize_t raid_level_show(struct device *dev, |
672 | struct device_attribute *attr, char *buf) |
673 | { |
674 | ssize_t l = 0; |
675 | unsigned char rlevel; |
676 | struct ctlr_info *h; |
677 | struct scsi_device *sdev; |
678 | struct hpsa_scsi_dev_t *hdev; |
679 | unsigned long flags; |
680 | |
681 | sdev = to_scsi_device(dev); |
682 | h = sdev_to_hba(sdev); |
683 | spin_lock_irqsave(&h->lock, flags); |
684 | hdev = sdev->hostdata; |
685 | if (!hdev) { |
686 | spin_unlock_irqrestore(lock: &h->lock, flags); |
687 | return -ENODEV; |
688 | } |
689 | |
690 | /* Is this even a logical drive? */ |
691 | if (!is_logical_device(device: hdev)) { |
692 | spin_unlock_irqrestore(lock: &h->lock, flags); |
693 | l = snprintf(buf, PAGE_SIZE, fmt: "N/A\n" ); |
694 | return l; |
695 | } |
696 | |
697 | rlevel = hdev->raid_level; |
698 | spin_unlock_irqrestore(lock: &h->lock, flags); |
699 | if (rlevel > RAID_UNKNOWN) |
700 | rlevel = RAID_UNKNOWN; |
701 | l = snprintf(buf, PAGE_SIZE, fmt: "RAID %s\n" , raid_label[rlevel]); |
702 | return l; |
703 | } |
704 | |
705 | static ssize_t lunid_show(struct device *dev, |
706 | struct device_attribute *attr, char *buf) |
707 | { |
708 | struct ctlr_info *h; |
709 | struct scsi_device *sdev; |
710 | struct hpsa_scsi_dev_t *hdev; |
711 | unsigned long flags; |
712 | unsigned char lunid[8]; |
713 | |
714 | sdev = to_scsi_device(dev); |
715 | h = sdev_to_hba(sdev); |
716 | spin_lock_irqsave(&h->lock, flags); |
717 | hdev = sdev->hostdata; |
718 | if (!hdev) { |
719 | spin_unlock_irqrestore(lock: &h->lock, flags); |
720 | return -ENODEV; |
721 | } |
722 | memcpy(lunid, hdev->scsi3addr, sizeof(lunid)); |
723 | spin_unlock_irqrestore(lock: &h->lock, flags); |
724 | return snprintf(buf, size: 20, fmt: "0x%8phN\n" , lunid); |
725 | } |
726 | |
727 | static ssize_t unique_id_show(struct device *dev, |
728 | struct device_attribute *attr, char *buf) |
729 | { |
730 | struct ctlr_info *h; |
731 | struct scsi_device *sdev; |
732 | struct hpsa_scsi_dev_t *hdev; |
733 | unsigned long flags; |
734 | unsigned char sn[16]; |
735 | |
736 | sdev = to_scsi_device(dev); |
737 | h = sdev_to_hba(sdev); |
738 | spin_lock_irqsave(&h->lock, flags); |
739 | hdev = sdev->hostdata; |
740 | if (!hdev) { |
741 | spin_unlock_irqrestore(lock: &h->lock, flags); |
742 | return -ENODEV; |
743 | } |
744 | memcpy(sn, hdev->device_id, sizeof(sn)); |
745 | spin_unlock_irqrestore(lock: &h->lock, flags); |
746 | return snprintf(buf, size: 16 * 2 + 2, |
747 | fmt: "%02X%02X%02X%02X%02X%02X%02X%02X" |
748 | "%02X%02X%02X%02X%02X%02X%02X%02X\n" , |
749 | sn[0], sn[1], sn[2], sn[3], |
750 | sn[4], sn[5], sn[6], sn[7], |
751 | sn[8], sn[9], sn[10], sn[11], |
752 | sn[12], sn[13], sn[14], sn[15]); |
753 | } |
754 | |
755 | static ssize_t sas_address_show(struct device *dev, |
756 | struct device_attribute *attr, char *buf) |
757 | { |
758 | struct ctlr_info *h; |
759 | struct scsi_device *sdev; |
760 | struct hpsa_scsi_dev_t *hdev; |
761 | unsigned long flags; |
762 | u64 sas_address; |
763 | |
764 | sdev = to_scsi_device(dev); |
765 | h = sdev_to_hba(sdev); |
766 | spin_lock_irqsave(&h->lock, flags); |
767 | hdev = sdev->hostdata; |
768 | if (!hdev || is_logical_device(device: hdev) || !hdev->expose_device) { |
769 | spin_unlock_irqrestore(lock: &h->lock, flags); |
770 | return -ENODEV; |
771 | } |
772 | sas_address = hdev->sas_address; |
773 | spin_unlock_irqrestore(lock: &h->lock, flags); |
774 | |
775 | return snprintf(buf, PAGE_SIZE, fmt: "0x%016llx\n" , sas_address); |
776 | } |
777 | |
778 | static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev, |
779 | struct device_attribute *attr, char *buf) |
780 | { |
781 | struct ctlr_info *h; |
782 | struct scsi_device *sdev; |
783 | struct hpsa_scsi_dev_t *hdev; |
784 | unsigned long flags; |
785 | int offload_enabled; |
786 | |
787 | sdev = to_scsi_device(dev); |
788 | h = sdev_to_hba(sdev); |
789 | spin_lock_irqsave(&h->lock, flags); |
790 | hdev = sdev->hostdata; |
791 | if (!hdev) { |
792 | spin_unlock_irqrestore(lock: &h->lock, flags); |
793 | return -ENODEV; |
794 | } |
795 | offload_enabled = hdev->offload_enabled; |
796 | spin_unlock_irqrestore(lock: &h->lock, flags); |
797 | |
798 | if (hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) |
799 | return snprintf(buf, size: 20, fmt: "%d\n" , offload_enabled); |
800 | else |
801 | return snprintf(buf, size: 40, fmt: "%s\n" , |
802 | "Not applicable for a controller" ); |
803 | } |
804 | |
805 | #define MAX_PATHS 8 |
806 | static ssize_t path_info_show(struct device *dev, |
807 | struct device_attribute *attr, char *buf) |
808 | { |
809 | struct ctlr_info *h; |
810 | struct scsi_device *sdev; |
811 | struct hpsa_scsi_dev_t *hdev; |
812 | unsigned long flags; |
813 | int i; |
814 | int output_len = 0; |
815 | u8 box; |
816 | u8 bay; |
817 | u8 path_map_index = 0; |
818 | char *active; |
819 | unsigned char phys_connector[2]; |
820 | |
821 | sdev = to_scsi_device(dev); |
822 | h = sdev_to_hba(sdev); |
823 | spin_lock_irqsave(&h->devlock, flags); |
824 | hdev = sdev->hostdata; |
825 | if (!hdev) { |
826 | spin_unlock_irqrestore(lock: &h->devlock, flags); |
827 | return -ENODEV; |
828 | } |
829 | |
830 | bay = hdev->bay; |
831 | for (i = 0; i < MAX_PATHS; i++) { |
832 | path_map_index = 1<<i; |
833 | if (i == hdev->active_path_index) |
834 | active = "Active" ; |
835 | else if (hdev->path_map & path_map_index) |
836 | active = "Inactive" ; |
837 | else |
838 | continue; |
839 | |
840 | output_len += scnprintf(buf: buf + output_len, |
841 | PAGE_SIZE - output_len, |
842 | fmt: "[%d:%d:%d:%d] %20.20s " , |
843 | h->scsi_host->host_no, |
844 | hdev->bus, hdev->target, hdev->lun, |
845 | scsi_device_type(type: hdev->devtype)); |
846 | |
847 | if (hdev->devtype == TYPE_RAID || is_logical_device(device: hdev)) { |
848 | output_len += scnprintf(buf: buf + output_len, |
849 | PAGE_SIZE - output_len, |
850 | fmt: "%s\n" , active); |
851 | continue; |
852 | } |
853 | |
854 | box = hdev->box[i]; |
855 | memcpy(&phys_connector, &hdev->phys_connector[i], |
856 | sizeof(phys_connector)); |
857 | if (phys_connector[0] < '0') |
858 | phys_connector[0] = '0'; |
859 | if (phys_connector[1] < '0') |
860 | phys_connector[1] = '0'; |
861 | output_len += scnprintf(buf: buf + output_len, |
862 | PAGE_SIZE - output_len, |
863 | fmt: "PORT: %.2s " , |
864 | phys_connector); |
865 | if ((hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) && |
866 | hdev->expose_device) { |
867 | if (box == 0 || box == 0xFF) { |
868 | output_len += scnprintf(buf: buf + output_len, |
869 | PAGE_SIZE - output_len, |
870 | fmt: "BAY: %hhu %s\n" , |
871 | bay, active); |
872 | } else { |
873 | output_len += scnprintf(buf: buf + output_len, |
874 | PAGE_SIZE - output_len, |
875 | fmt: "BOX: %hhu BAY: %hhu %s\n" , |
876 | box, bay, active); |
877 | } |
878 | } else if (box != 0 && box != 0xFF) { |
879 | output_len += scnprintf(buf: buf + output_len, |
880 | PAGE_SIZE - output_len, fmt: "BOX: %hhu %s\n" , |
881 | box, active); |
882 | } else |
883 | output_len += scnprintf(buf: buf + output_len, |
884 | PAGE_SIZE - output_len, fmt: "%s\n" , active); |
885 | } |
886 | |
887 | spin_unlock_irqrestore(lock: &h->devlock, flags); |
888 | return output_len; |
889 | } |
890 | |
891 | static ssize_t host_show_ctlr_num(struct device *dev, |
892 | struct device_attribute *attr, char *buf) |
893 | { |
894 | struct ctlr_info *h; |
895 | struct Scsi_Host *shost = class_to_shost(dev); |
896 | |
897 | h = shost_to_hba(sh: shost); |
898 | return snprintf(buf, size: 20, fmt: "%d\n" , h->ctlr); |
899 | } |
900 | |
901 | static ssize_t host_show_legacy_board(struct device *dev, |
902 | struct device_attribute *attr, char *buf) |
903 | { |
904 | struct ctlr_info *h; |
905 | struct Scsi_Host *shost = class_to_shost(dev); |
906 | |
907 | h = shost_to_hba(sh: shost); |
908 | return snprintf(buf, size: 20, fmt: "%d\n" , h->legacy_board ? 1 : 0); |
909 | } |
910 | |
911 | static DEVICE_ATTR_RO(raid_level); |
912 | static DEVICE_ATTR_RO(lunid); |
913 | static DEVICE_ATTR_RO(unique_id); |
914 | static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); |
915 | static DEVICE_ATTR_RO(sas_address); |
916 | static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO, |
917 | host_show_hp_ssd_smart_path_enabled, NULL); |
918 | static DEVICE_ATTR_RO(path_info); |
919 | static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH, |
920 | host_show_hp_ssd_smart_path_status, |
921 | host_store_hp_ssd_smart_path_status); |
922 | static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL, |
923 | host_store_raid_offload_debug); |
924 | static DEVICE_ATTR(firmware_revision, S_IRUGO, |
925 | host_show_firmware_revision, NULL); |
926 | static DEVICE_ATTR(commands_outstanding, S_IRUGO, |
927 | host_show_commands_outstanding, NULL); |
928 | static DEVICE_ATTR(transport_mode, S_IRUGO, |
929 | host_show_transport_mode, NULL); |
930 | static DEVICE_ATTR(resettable, S_IRUGO, |
931 | host_show_resettable, NULL); |
932 | static DEVICE_ATTR(lockup_detected, S_IRUGO, |
933 | host_show_lockup_detected, NULL); |
934 | static DEVICE_ATTR(ctlr_num, S_IRUGO, |
935 | host_show_ctlr_num, NULL); |
936 | static DEVICE_ATTR(legacy_board, S_IRUGO, |
937 | host_show_legacy_board, NULL); |
938 | |
939 | static struct attribute *hpsa_sdev_attrs[] = { |
940 | &dev_attr_raid_level.attr, |
941 | &dev_attr_lunid.attr, |
942 | &dev_attr_unique_id.attr, |
943 | &dev_attr_hp_ssd_smart_path_enabled.attr, |
944 | &dev_attr_path_info.attr, |
945 | &dev_attr_sas_address.attr, |
946 | NULL, |
947 | }; |
948 | |
949 | ATTRIBUTE_GROUPS(hpsa_sdev); |
950 | |
951 | static struct attribute *hpsa_shost_attrs[] = { |
952 | &dev_attr_rescan.attr, |
953 | &dev_attr_firmware_revision.attr, |
954 | &dev_attr_commands_outstanding.attr, |
955 | &dev_attr_transport_mode.attr, |
956 | &dev_attr_resettable.attr, |
957 | &dev_attr_hp_ssd_smart_path_status.attr, |
958 | &dev_attr_raid_offload_debug.attr, |
959 | &dev_attr_lockup_detected.attr, |
960 | &dev_attr_ctlr_num.attr, |
961 | &dev_attr_legacy_board.attr, |
962 | NULL, |
963 | }; |
964 | |
965 | ATTRIBUTE_GROUPS(hpsa_shost); |
966 | |
967 | #define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_DRIVER +\ |
968 | HPSA_MAX_CONCURRENT_PASSTHRUS) |
969 | |
970 | static const struct scsi_host_template hpsa_driver_template = { |
971 | .module = THIS_MODULE, |
972 | .name = HPSA, |
973 | .proc_name = HPSA, |
974 | .queuecommand = hpsa_scsi_queue_command, |
975 | .scan_start = hpsa_scan_start, |
976 | .scan_finished = hpsa_scan_finished, |
977 | .change_queue_depth = hpsa_change_queue_depth, |
978 | .this_id = -1, |
979 | .eh_device_reset_handler = hpsa_eh_device_reset_handler, |
980 | .ioctl = hpsa_ioctl, |
981 | .slave_alloc = hpsa_slave_alloc, |
982 | .slave_configure = hpsa_slave_configure, |
983 | .slave_destroy = hpsa_slave_destroy, |
984 | #ifdef CONFIG_COMPAT |
985 | .compat_ioctl = hpsa_compat_ioctl, |
986 | #endif |
987 | .sdev_groups = hpsa_sdev_groups, |
988 | .shost_groups = hpsa_shost_groups, |
989 | .max_sectors = 2048, |
990 | .no_write_same = 1, |
991 | }; |
992 | |
993 | static inline u32 next_command(struct ctlr_info *h, u8 q) |
994 | { |
995 | u32 a; |
996 | struct reply_queue_buffer *rq = &h->reply_queue[q]; |
997 | |
998 | if (h->transMethod & CFGTBL_Trans_io_accel1) |
999 | return h->access.command_completed(h, q); |
1000 | |
1001 | if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) |
1002 | return h->access.command_completed(h, q); |
1003 | |
1004 | if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { |
1005 | a = rq->head[rq->current_entry]; |
1006 | rq->current_entry++; |
1007 | atomic_dec(v: &h->commands_outstanding); |
1008 | } else { |
1009 | a = FIFO_EMPTY; |
1010 | } |
1011 | /* Check for wraparound */ |
1012 | if (rq->current_entry == h->max_commands) { |
1013 | rq->current_entry = 0; |
1014 | rq->wraparound ^= 1; |
1015 | } |
1016 | return a; |
1017 | } |
1018 | |
1019 | /* |
1020 | * There are some special bits in the bus address of the |
1021 | * command that we have to set for the controller to know |
1022 | * how to process the command: |
1023 | * |
1024 | * Normal performant mode: |
1025 | * bit 0: 1 means performant mode, 0 means simple mode. |
1026 | * bits 1-3 = block fetch table entry |
1027 | * bits 4-6 = command type (== 0) |
1028 | * |
1029 | * ioaccel1 mode: |
1030 | * bit 0 = "performant mode" bit. |
1031 | * bits 1-3 = block fetch table entry |
1032 | * bits 4-6 = command type (== 110) |
1033 | * (command type is needed because ioaccel1 mode |
1034 | * commands are submitted through the same register as normal |
1035 | * mode commands, so this is how the controller knows whether |
1036 | * the command is normal mode or ioaccel1 mode.) |
1037 | * |
1038 | * ioaccel2 mode: |
1039 | * bit 0 = "performant mode" bit. |
1040 | * bits 1-4 = block fetch table entry (note extra bit) |
1041 | * bits 4-6 = not needed, because ioaccel2 mode has |
1042 | * a separate special register for submitting commands. |
1043 | */ |
1044 | |
1045 | /* |
1046 | * set_performant_mode: Modify the tag for cciss performant |
1047 | * set bit 0 for pull model, bits 3-1 for block fetch |
1048 | * register number |
1049 | */ |
1050 | #define DEFAULT_REPLY_QUEUE (-1) |
1051 | static void set_performant_mode(struct ctlr_info *h, struct CommandList *c, |
1052 | int reply_queue) |
1053 | { |
1054 | if (likely(h->transMethod & CFGTBL_Trans_Performant)) { |
1055 | c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); |
1056 | if (unlikely(!h->msix_vectors)) |
1057 | return; |
1058 | c->Header.ReplyQueue = reply_queue; |
1059 | } |
1060 | } |
1061 | |
1062 | static void set_ioaccel1_performant_mode(struct ctlr_info *h, |
1063 | struct CommandList *c, |
1064 | int reply_queue) |
1065 | { |
1066 | struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; |
1067 | |
1068 | /* |
1069 | * Tell the controller to post the reply to the queue for this |
1070 | * processor. This seems to give the best I/O throughput. |
1071 | */ |
1072 | cp->ReplyQueue = reply_queue; |
1073 | /* |
1074 | * Set the bits in the address sent down to include: |
1075 | * - performant mode bit (bit 0) |
1076 | * - pull count (bits 1-3) |
1077 | * - command type (bits 4-6) |
1078 | */ |
1079 | c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) | |
1080 | IOACCEL1_BUSADDR_CMDTYPE; |
1081 | } |
1082 | |
1083 | static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h, |
1084 | struct CommandList *c, |
1085 | int reply_queue) |
1086 | { |
1087 | struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *) |
1088 | &h->ioaccel2_cmd_pool[c->cmdindex]; |
1089 | |
1090 | /* Tell the controller to post the reply to the queue for this |
1091 | * processor. This seems to give the best I/O throughput. |
1092 | */ |
1093 | cp->reply_queue = reply_queue; |
1094 | /* Set the bits in the address sent down to include: |
1095 | * - performant mode bit not used in ioaccel mode 2 |
1096 | * - pull count (bits 0-3) |
1097 | * - command type isn't needed for ioaccel2 |
1098 | */ |
1099 | c->busaddr |= h->ioaccel2_blockFetchTable[0]; |
1100 | } |
1101 | |
1102 | static void set_ioaccel2_performant_mode(struct ctlr_info *h, |
1103 | struct CommandList *c, |
1104 | int reply_queue) |
1105 | { |
1106 | struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; |
1107 | |
1108 | /* |
1109 | * Tell the controller to post the reply to the queue for this |
1110 | * processor. This seems to give the best I/O throughput. |
1111 | */ |
1112 | cp->reply_queue = reply_queue; |
1113 | /* |
1114 | * Set the bits in the address sent down to include: |
1115 | * - performant mode bit not used in ioaccel mode 2 |
1116 | * - pull count (bits 0-3) |
1117 | * - command type isn't needed for ioaccel2 |
1118 | */ |
1119 | c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]); |
1120 | } |
1121 | |
1122 | static int is_firmware_flash_cmd(u8 *cdb) |
1123 | { |
1124 | return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE; |
1125 | } |
1126 | |
1127 | /* |
1128 | * During firmware flash, the heartbeat register may not update as frequently |
1129 | * as it should. So we dial down lockup detection during firmware flash. and |
1130 | * dial it back up when firmware flash completes. |
1131 | */ |
1132 | #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ) |
1133 | #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ) |
1134 | #define HPSA_EVENT_MONITOR_INTERVAL (15 * HZ) |
1135 | static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h, |
1136 | struct CommandList *c) |
1137 | { |
1138 | if (!is_firmware_flash_cmd(cdb: c->Request.CDB)) |
1139 | return; |
1140 | atomic_inc(v: &h->firmware_flash_in_progress); |
1141 | h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH; |
1142 | } |
1143 | |
1144 | static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h, |
1145 | struct CommandList *c) |
1146 | { |
1147 | if (is_firmware_flash_cmd(cdb: c->Request.CDB) && |
1148 | atomic_dec_and_test(v: &h->firmware_flash_in_progress)) |
1149 | h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; |
1150 | } |
1151 | |
1152 | static void __enqueue_cmd_and_start_io(struct ctlr_info *h, |
1153 | struct CommandList *c, int reply_queue) |
1154 | { |
1155 | dial_down_lockup_detection_during_fw_flash(h, c); |
1156 | atomic_inc(v: &h->commands_outstanding); |
1157 | /* |
1158 | * Check to see if the command is being retried. |
1159 | */ |
1160 | if (c->device && !c->retry_pending) |
1161 | atomic_inc(v: &c->device->commands_outstanding); |
1162 | |
1163 | reply_queue = h->reply_map[raw_smp_processor_id()]; |
1164 | switch (c->cmd_type) { |
1165 | case CMD_IOACCEL1: |
1166 | set_ioaccel1_performant_mode(h, c, reply_queue); |
1167 | writel(val: c->busaddr, addr: h->vaddr + SA5_REQUEST_PORT_OFFSET); |
1168 | break; |
1169 | case CMD_IOACCEL2: |
1170 | set_ioaccel2_performant_mode(h, c, reply_queue); |
1171 | writel(val: c->busaddr, addr: h->vaddr + IOACCEL2_INBOUND_POSTQ_32); |
1172 | break; |
1173 | case IOACCEL2_TMF: |
1174 | set_ioaccel2_tmf_performant_mode(h, c, reply_queue); |
1175 | writel(val: c->busaddr, addr: h->vaddr + IOACCEL2_INBOUND_POSTQ_32); |
1176 | break; |
1177 | default: |
1178 | set_performant_mode(h, c, reply_queue); |
1179 | h->access.submit_command(h, c); |
1180 | } |
1181 | } |
1182 | |
1183 | static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c) |
1184 | { |
1185 | __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE); |
1186 | } |
1187 | |
1188 | static inline int is_hba_lunid(unsigned char scsi3addr[]) |
1189 | { |
1190 | return memcmp(p: scsi3addr, RAID_CTLR_LUNID, size: 8) == 0; |
1191 | } |
1192 | |
1193 | static inline int is_scsi_rev_5(struct ctlr_info *h) |
1194 | { |
1195 | if (!h->hba_inquiry_data) |
1196 | return 0; |
1197 | if ((h->hba_inquiry_data[2] & 0x07) == 5) |
1198 | return 1; |
1199 | return 0; |
1200 | } |
1201 | |
1202 | static int hpsa_find_target_lun(struct ctlr_info *h, |
1203 | unsigned char scsi3addr[], int bus, int *target, int *lun) |
1204 | { |
1205 | /* finds an unused bus, target, lun for a new physical device |
1206 | * assumes h->devlock is held |
1207 | */ |
1208 | int i, found = 0; |
1209 | DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES); |
1210 | |
1211 | bitmap_zero(dst: lun_taken, HPSA_MAX_DEVICES); |
1212 | |
1213 | for (i = 0; i < h->ndevices; i++) { |
1214 | if (h->dev[i]->bus == bus && h->dev[i]->target != -1) |
1215 | __set_bit(h->dev[i]->target, lun_taken); |
1216 | } |
1217 | |
1218 | i = find_first_zero_bit(addr: lun_taken, HPSA_MAX_DEVICES); |
1219 | if (i < HPSA_MAX_DEVICES) { |
1220 | /* *bus = 1; */ |
1221 | *target = i; |
1222 | *lun = 0; |
1223 | found = 1; |
1224 | } |
1225 | return !found; |
1226 | } |
1227 | |
1228 | static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h, |
1229 | struct hpsa_scsi_dev_t *dev, char *description) |
1230 | { |
1231 | #define LABEL_SIZE 25 |
1232 | char label[LABEL_SIZE]; |
1233 | |
1234 | if (h == NULL || h->pdev == NULL || h->scsi_host == NULL) |
1235 | return; |
1236 | |
1237 | switch (dev->devtype) { |
1238 | case TYPE_RAID: |
1239 | snprintf(buf: label, LABEL_SIZE, fmt: "controller" ); |
1240 | break; |
1241 | case TYPE_ENCLOSURE: |
1242 | snprintf(buf: label, LABEL_SIZE, fmt: "enclosure" ); |
1243 | break; |
1244 | case TYPE_DISK: |
1245 | case TYPE_ZBC: |
1246 | if (dev->external) |
1247 | snprintf(buf: label, LABEL_SIZE, fmt: "external" ); |
1248 | else if (!is_logical_dev_addr_mode(scsi3addr: dev->scsi3addr)) |
1249 | snprintf(buf: label, LABEL_SIZE, fmt: "%s" , |
1250 | raid_label[PHYSICAL_DRIVE]); |
1251 | else |
1252 | snprintf(buf: label, LABEL_SIZE, fmt: "RAID-%s" , |
1253 | dev->raid_level > RAID_UNKNOWN ? "?" : |
1254 | raid_label[dev->raid_level]); |
1255 | break; |
1256 | case TYPE_ROM: |
1257 | snprintf(buf: label, LABEL_SIZE, fmt: "rom" ); |
1258 | break; |
1259 | case TYPE_TAPE: |
1260 | snprintf(buf: label, LABEL_SIZE, fmt: "tape" ); |
1261 | break; |
1262 | case TYPE_MEDIUM_CHANGER: |
1263 | snprintf(buf: label, LABEL_SIZE, fmt: "changer" ); |
1264 | break; |
1265 | default: |
1266 | snprintf(buf: label, LABEL_SIZE, fmt: "UNKNOWN" ); |
1267 | break; |
1268 | } |
1269 | |
1270 | dev_printk(level, &h->pdev->dev, |
1271 | "scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n" , |
1272 | h->scsi_host->host_no, dev->bus, dev->target, dev->lun, |
1273 | description, |
1274 | scsi_device_type(dev->devtype), |
1275 | dev->vendor, |
1276 | dev->model, |
1277 | label, |
1278 | dev->offload_config ? '+' : '-', |
1279 | dev->offload_to_be_enabled ? '+' : '-', |
1280 | dev->expose_device); |
1281 | } |
1282 | |
1283 | /* Add an entry into h->dev[] array. */ |
1284 | static int hpsa_scsi_add_entry(struct ctlr_info *h, |
1285 | struct hpsa_scsi_dev_t *device, |
1286 | struct hpsa_scsi_dev_t *added[], int *nadded) |
1287 | { |
1288 | /* assumes h->devlock is held */ |
1289 | int n = h->ndevices; |
1290 | int i; |
1291 | unsigned char addr1[8], addr2[8]; |
1292 | struct hpsa_scsi_dev_t *sd; |
1293 | |
1294 | if (n >= HPSA_MAX_DEVICES) { |
1295 | dev_err(&h->pdev->dev, "too many devices, some will be " |
1296 | "inaccessible.\n" ); |
1297 | return -1; |
1298 | } |
1299 | |
1300 | /* physical devices do not have lun or target assigned until now. */ |
1301 | if (device->lun != -1) |
1302 | /* Logical device, lun is already assigned. */ |
1303 | goto lun_assigned; |
1304 | |
1305 | /* If this device a non-zero lun of a multi-lun device |
1306 | * byte 4 of the 8-byte LUN addr will contain the logical |
1307 | * unit no, zero otherwise. |
1308 | */ |
1309 | if (device->scsi3addr[4] == 0) { |
1310 | /* This is not a non-zero lun of a multi-lun device */ |
1311 | if (hpsa_find_target_lun(h, scsi3addr: device->scsi3addr, |
1312 | bus: device->bus, target: &device->target, lun: &device->lun) != 0) |
1313 | return -1; |
1314 | goto lun_assigned; |
1315 | } |
1316 | |
1317 | /* This is a non-zero lun of a multi-lun device. |
1318 | * Search through our list and find the device which |
1319 | * has the same 8 byte LUN address, excepting byte 4 and 5. |
1320 | * Assign the same bus and target for this new LUN. |
1321 | * Use the logical unit number from the firmware. |
1322 | */ |
1323 | memcpy(addr1, device->scsi3addr, 8); |
1324 | addr1[4] = 0; |
1325 | addr1[5] = 0; |
1326 | for (i = 0; i < n; i++) { |
1327 | sd = h->dev[i]; |
1328 | memcpy(addr2, sd->scsi3addr, 8); |
1329 | addr2[4] = 0; |
1330 | addr2[5] = 0; |
1331 | /* differ only in byte 4 and 5? */ |
1332 | if (memcmp(p: addr1, q: addr2, size: 8) == 0) { |
1333 | device->bus = sd->bus; |
1334 | device->target = sd->target; |
1335 | device->lun = device->scsi3addr[4]; |
1336 | break; |
1337 | } |
1338 | } |
1339 | if (device->lun == -1) { |
1340 | dev_warn(&h->pdev->dev, "physical device with no LUN=0," |
1341 | " suspect firmware bug or unsupported hardware " |
1342 | "configuration.\n" ); |
1343 | return -1; |
1344 | } |
1345 | |
1346 | lun_assigned: |
1347 | |
1348 | h->dev[n] = device; |
1349 | h->ndevices++; |
1350 | added[*nadded] = device; |
1351 | (*nadded)++; |
1352 | hpsa_show_dev_msg(KERN_INFO, h, dev: device, |
1353 | description: device->expose_device ? "added" : "masked" ); |
1354 | return 0; |
1355 | } |
1356 | |
1357 | /* |
1358 | * Called during a scan operation. |
1359 | * |
1360 | * Update an entry in h->dev[] array. |
1361 | */ |
1362 | static void hpsa_scsi_update_entry(struct ctlr_info *h, |
1363 | int entry, struct hpsa_scsi_dev_t *new_entry) |
1364 | { |
1365 | /* assumes h->devlock is held */ |
1366 | BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); |
1367 | |
1368 | /* Raid level changed. */ |
1369 | h->dev[entry]->raid_level = new_entry->raid_level; |
1370 | |
1371 | /* |
1372 | * ioacccel_handle may have changed for a dual domain disk |
1373 | */ |
1374 | h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; |
1375 | |
1376 | /* Raid offload parameters changed. Careful about the ordering. */ |
1377 | if (new_entry->offload_config && new_entry->offload_to_be_enabled) { |
1378 | /* |
1379 | * if drive is newly offload_enabled, we want to copy the |
1380 | * raid map data first. If previously offload_enabled and |
1381 | * offload_config were set, raid map data had better be |
1382 | * the same as it was before. If raid map data has changed |
1383 | * then it had better be the case that |
1384 | * h->dev[entry]->offload_enabled is currently 0. |
1385 | */ |
1386 | h->dev[entry]->raid_map = new_entry->raid_map; |
1387 | h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; |
1388 | } |
1389 | if (new_entry->offload_to_be_enabled) { |
1390 | h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; |
1391 | wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */ |
1392 | } |
1393 | h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled; |
1394 | h->dev[entry]->offload_config = new_entry->offload_config; |
1395 | h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror; |
1396 | h->dev[entry]->queue_depth = new_entry->queue_depth; |
1397 | |
1398 | /* |
1399 | * We can turn off ioaccel offload now, but need to delay turning |
1400 | * ioaccel on until we can update h->dev[entry]->phys_disk[], but we |
1401 | * can't do that until all the devices are updated. |
1402 | */ |
1403 | h->dev[entry]->offload_to_be_enabled = new_entry->offload_to_be_enabled; |
1404 | |
1405 | /* |
1406 | * turn ioaccel off immediately if told to do so. |
1407 | */ |
1408 | if (!new_entry->offload_to_be_enabled) |
1409 | h->dev[entry]->offload_enabled = 0; |
1410 | |
1411 | hpsa_show_dev_msg(KERN_INFO, h, dev: h->dev[entry], description: "updated" ); |
1412 | } |
1413 | |
1414 | /* Replace an entry from h->dev[] array. */ |
1415 | static void hpsa_scsi_replace_entry(struct ctlr_info *h, |
1416 | int entry, struct hpsa_scsi_dev_t *new_entry, |
1417 | struct hpsa_scsi_dev_t *added[], int *nadded, |
1418 | struct hpsa_scsi_dev_t *removed[], int *nremoved) |
1419 | { |
1420 | /* assumes h->devlock is held */ |
1421 | BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); |
1422 | removed[*nremoved] = h->dev[entry]; |
1423 | (*nremoved)++; |
1424 | |
1425 | /* |
1426 | * New physical devices won't have target/lun assigned yet |
1427 | * so we need to preserve the values in the slot we are replacing. |
1428 | */ |
1429 | if (new_entry->target == -1) { |
1430 | new_entry->target = h->dev[entry]->target; |
1431 | new_entry->lun = h->dev[entry]->lun; |
1432 | } |
1433 | |
1434 | h->dev[entry] = new_entry; |
1435 | added[*nadded] = new_entry; |
1436 | (*nadded)++; |
1437 | |
1438 | hpsa_show_dev_msg(KERN_INFO, h, dev: new_entry, description: "replaced" ); |
1439 | } |
1440 | |
1441 | /* Remove an entry from h->dev[] array. */ |
1442 | static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry, |
1443 | struct hpsa_scsi_dev_t *removed[], int *nremoved) |
1444 | { |
1445 | /* assumes h->devlock is held */ |
1446 | int i; |
1447 | struct hpsa_scsi_dev_t *sd; |
1448 | |
1449 | BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); |
1450 | |
1451 | sd = h->dev[entry]; |
1452 | removed[*nremoved] = h->dev[entry]; |
1453 | (*nremoved)++; |
1454 | |
1455 | for (i = entry; i < h->ndevices-1; i++) |
1456 | h->dev[i] = h->dev[i+1]; |
1457 | h->ndevices--; |
1458 | hpsa_show_dev_msg(KERN_INFO, h, dev: sd, description: "removed" ); |
1459 | } |
1460 | |
1461 | #define SCSI3ADDR_EQ(a, b) ( \ |
1462 | (a)[7] == (b)[7] && \ |
1463 | (a)[6] == (b)[6] && \ |
1464 | (a)[5] == (b)[5] && \ |
1465 | (a)[4] == (b)[4] && \ |
1466 | (a)[3] == (b)[3] && \ |
1467 | (a)[2] == (b)[2] && \ |
1468 | (a)[1] == (b)[1] && \ |
1469 | (a)[0] == (b)[0]) |
1470 | |
1471 | static void fixup_botched_add(struct ctlr_info *h, |
1472 | struct hpsa_scsi_dev_t *added) |
1473 | { |
1474 | /* called when scsi_add_device fails in order to re-adjust |
1475 | * h->dev[] to match the mid layer's view. |
1476 | */ |
1477 | unsigned long flags; |
1478 | int i, j; |
1479 | |
1480 | spin_lock_irqsave(&h->lock, flags); |
1481 | for (i = 0; i < h->ndevices; i++) { |
1482 | if (h->dev[i] == added) { |
1483 | for (j = i; j < h->ndevices-1; j++) |
1484 | h->dev[j] = h->dev[j+1]; |
1485 | h->ndevices--; |
1486 | break; |
1487 | } |
1488 | } |
1489 | spin_unlock_irqrestore(lock: &h->lock, flags); |
1490 | kfree(objp: added); |
1491 | } |
1492 | |
1493 | static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1, |
1494 | struct hpsa_scsi_dev_t *dev2) |
1495 | { |
1496 | /* we compare everything except lun and target as these |
1497 | * are not yet assigned. Compare parts likely |
1498 | * to differ first |
1499 | */ |
1500 | if (memcmp(p: dev1->scsi3addr, q: dev2->scsi3addr, |
1501 | size: sizeof(dev1->scsi3addr)) != 0) |
1502 | return 0; |
1503 | if (memcmp(p: dev1->device_id, q: dev2->device_id, |
1504 | size: sizeof(dev1->device_id)) != 0) |
1505 | return 0; |
1506 | if (memcmp(p: dev1->model, q: dev2->model, size: sizeof(dev1->model)) != 0) |
1507 | return 0; |
1508 | if (memcmp(p: dev1->vendor, q: dev2->vendor, size: sizeof(dev1->vendor)) != 0) |
1509 | return 0; |
1510 | if (dev1->devtype != dev2->devtype) |
1511 | return 0; |
1512 | if (dev1->bus != dev2->bus) |
1513 | return 0; |
1514 | return 1; |
1515 | } |
1516 | |
1517 | static inline int device_updated(struct hpsa_scsi_dev_t *dev1, |
1518 | struct hpsa_scsi_dev_t *dev2) |
1519 | { |
1520 | /* Device attributes that can change, but don't mean |
1521 | * that the device is a different device, nor that the OS |
1522 | * needs to be told anything about the change. |
1523 | */ |
1524 | if (dev1->raid_level != dev2->raid_level) |
1525 | return 1; |
1526 | if (dev1->offload_config != dev2->offload_config) |
1527 | return 1; |
1528 | if (dev1->offload_to_be_enabled != dev2->offload_to_be_enabled) |
1529 | return 1; |
1530 | if (!is_logical_dev_addr_mode(scsi3addr: dev1->scsi3addr)) |
1531 | if (dev1->queue_depth != dev2->queue_depth) |
1532 | return 1; |
1533 | /* |
1534 | * This can happen for dual domain devices. An active |
1535 | * path change causes the ioaccel handle to change |
1536 | * |
1537 | * for example note the handle differences between p0 and p1 |
1538 | * Device WWN ,WWN hash,Handle |
1539 | * D016 p0|0x3 [02]P2E:01:01,0x5000C5005FC4DACA,0x9B5616,0x01030003 |
1540 | * p1 0x5000C5005FC4DAC9,0x6798C0,0x00040004 |
1541 | */ |
1542 | if (dev1->ioaccel_handle != dev2->ioaccel_handle) |
1543 | return 1; |
1544 | return 0; |
1545 | } |
1546 | |
1547 | /* Find needle in haystack. If exact match found, return DEVICE_SAME, |
1548 | * and return needle location in *index. If scsi3addr matches, but not |
1549 | * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle |
1550 | * location in *index. |
1551 | * In the case of a minor device attribute change, such as RAID level, just |
1552 | * return DEVICE_UPDATED, along with the updated device's location in index. |
1553 | * If needle not found, return DEVICE_NOT_FOUND. |
1554 | */ |
1555 | static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle, |
1556 | struct hpsa_scsi_dev_t *haystack[], int haystack_size, |
1557 | int *index) |
1558 | { |
1559 | int i; |
1560 | #define DEVICE_NOT_FOUND 0 |
1561 | #define DEVICE_CHANGED 1 |
1562 | #define DEVICE_SAME 2 |
1563 | #define DEVICE_UPDATED 3 |
1564 | if (needle == NULL) |
1565 | return DEVICE_NOT_FOUND; |
1566 | |
1567 | for (i = 0; i < haystack_size; i++) { |
1568 | if (haystack[i] == NULL) /* previously removed. */ |
1569 | continue; |
1570 | if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) { |
1571 | *index = i; |
1572 | if (device_is_the_same(dev1: needle, dev2: haystack[i])) { |
1573 | if (device_updated(dev1: needle, dev2: haystack[i])) |
1574 | return DEVICE_UPDATED; |
1575 | return DEVICE_SAME; |
1576 | } else { |
1577 | /* Keep offline devices offline */ |
1578 | if (needle->volume_offline) |
1579 | return DEVICE_NOT_FOUND; |
1580 | return DEVICE_CHANGED; |
1581 | } |
1582 | } |
1583 | } |
1584 | *index = -1; |
1585 | return DEVICE_NOT_FOUND; |
1586 | } |
1587 | |
1588 | static void hpsa_monitor_offline_device(struct ctlr_info *h, |
1589 | unsigned char scsi3addr[]) |
1590 | { |
1591 | struct offline_device_entry *device; |
1592 | unsigned long flags; |
1593 | |
1594 | /* Check to see if device is already on the list */ |
1595 | spin_lock_irqsave(&h->offline_device_lock, flags); |
1596 | list_for_each_entry(device, &h->offline_device_list, offline_list) { |
1597 | if (memcmp(p: device->scsi3addr, q: scsi3addr, |
1598 | size: sizeof(device->scsi3addr)) == 0) { |
1599 | spin_unlock_irqrestore(lock: &h->offline_device_lock, flags); |
1600 | return; |
1601 | } |
1602 | } |
1603 | spin_unlock_irqrestore(lock: &h->offline_device_lock, flags); |
1604 | |
1605 | /* Device is not on the list, add it. */ |
1606 | device = kmalloc(size: sizeof(*device), GFP_KERNEL); |
1607 | if (!device) |
1608 | return; |
1609 | |
1610 | memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); |
1611 | spin_lock_irqsave(&h->offline_device_lock, flags); |
1612 | list_add_tail(new: &device->offline_list, head: &h->offline_device_list); |
1613 | spin_unlock_irqrestore(lock: &h->offline_device_lock, flags); |
1614 | } |
1615 | |
1616 | /* Print a message explaining various offline volume states */ |
1617 | static void hpsa_show_volume_status(struct ctlr_info *h, |
1618 | struct hpsa_scsi_dev_t *sd) |
1619 | { |
1620 | if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED) |
1621 | dev_info(&h->pdev->dev, |
1622 | "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n" , |
1623 | h->scsi_host->host_no, |
1624 | sd->bus, sd->target, sd->lun); |
1625 | switch (sd->volume_offline) { |
1626 | case HPSA_LV_OK: |
1627 | break; |
1628 | case HPSA_LV_UNDERGOING_ERASE: |
1629 | dev_info(&h->pdev->dev, |
1630 | "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n" , |
1631 | h->scsi_host->host_no, |
1632 | sd->bus, sd->target, sd->lun); |
1633 | break; |
1634 | case HPSA_LV_NOT_AVAILABLE: |
1635 | dev_info(&h->pdev->dev, |
1636 | "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n" , |
1637 | h->scsi_host->host_no, |
1638 | sd->bus, sd->target, sd->lun); |
1639 | break; |
1640 | case HPSA_LV_UNDERGOING_RPI: |
1641 | dev_info(&h->pdev->dev, |
1642 | "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n" , |
1643 | h->scsi_host->host_no, |
1644 | sd->bus, sd->target, sd->lun); |
1645 | break; |
1646 | case HPSA_LV_PENDING_RPI: |
1647 | dev_info(&h->pdev->dev, |
1648 | "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n" , |
1649 | h->scsi_host->host_no, |
1650 | sd->bus, sd->target, sd->lun); |
1651 | break; |
1652 | case HPSA_LV_ENCRYPTED_NO_KEY: |
1653 | dev_info(&h->pdev->dev, |
1654 | "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n" , |
1655 | h->scsi_host->host_no, |
1656 | sd->bus, sd->target, sd->lun); |
1657 | break; |
1658 | case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: |
1659 | dev_info(&h->pdev->dev, |
1660 | "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n" , |
1661 | h->scsi_host->host_no, |
1662 | sd->bus, sd->target, sd->lun); |
1663 | break; |
1664 | case HPSA_LV_UNDERGOING_ENCRYPTION: |
1665 | dev_info(&h->pdev->dev, |
1666 | "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n" , |
1667 | h->scsi_host->host_no, |
1668 | sd->bus, sd->target, sd->lun); |
1669 | break; |
1670 | case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING: |
1671 | dev_info(&h->pdev->dev, |
1672 | "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n" , |
1673 | h->scsi_host->host_no, |
1674 | sd->bus, sd->target, sd->lun); |
1675 | break; |
1676 | case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: |
1677 | dev_info(&h->pdev->dev, |
1678 | "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n" , |
1679 | h->scsi_host->host_no, |
1680 | sd->bus, sd->target, sd->lun); |
1681 | break; |
1682 | case HPSA_LV_PENDING_ENCRYPTION: |
1683 | dev_info(&h->pdev->dev, |
1684 | "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n" , |
1685 | h->scsi_host->host_no, |
1686 | sd->bus, sd->target, sd->lun); |
1687 | break; |
1688 | case HPSA_LV_PENDING_ENCRYPTION_REKEYING: |
1689 | dev_info(&h->pdev->dev, |
1690 | "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n" , |
1691 | h->scsi_host->host_no, |
1692 | sd->bus, sd->target, sd->lun); |
1693 | break; |
1694 | } |
1695 | } |
1696 | |
1697 | /* |
1698 | * Figure the list of physical drive pointers for a logical drive with |
1699 | * raid offload configured. |
1700 | */ |
1701 | static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h, |
1702 | struct hpsa_scsi_dev_t *dev[], int ndevices, |
1703 | struct hpsa_scsi_dev_t *logical_drive) |
1704 | { |
1705 | struct raid_map_data *map = &logical_drive->raid_map; |
1706 | struct raid_map_disk_data *dd = &map->data[0]; |
1707 | int i, j; |
1708 | int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) + |
1709 | le16_to_cpu(map->metadata_disks_per_row); |
1710 | int nraid_map_entries = le16_to_cpu(map->row_cnt) * |
1711 | le16_to_cpu(map->layout_map_count) * |
1712 | total_disks_per_row; |
1713 | int nphys_disk = le16_to_cpu(map->layout_map_count) * |
1714 | total_disks_per_row; |
1715 | int qdepth; |
1716 | |
1717 | if (nraid_map_entries > RAID_MAP_MAX_ENTRIES) |
1718 | nraid_map_entries = RAID_MAP_MAX_ENTRIES; |
1719 | |
1720 | logical_drive->nphysical_disks = nraid_map_entries; |
1721 | |
1722 | qdepth = 0; |
1723 | for (i = 0; i < nraid_map_entries; i++) { |
1724 | logical_drive->phys_disk[i] = NULL; |
1725 | if (!logical_drive->offload_config) |
1726 | continue; |
1727 | for (j = 0; j < ndevices; j++) { |
1728 | if (dev[j] == NULL) |
1729 | continue; |
1730 | if (dev[j]->devtype != TYPE_DISK && |
1731 | dev[j]->devtype != TYPE_ZBC) |
1732 | continue; |
1733 | if (is_logical_device(device: dev[j])) |
1734 | continue; |
1735 | if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle) |
1736 | continue; |
1737 | |
1738 | logical_drive->phys_disk[i] = dev[j]; |
1739 | if (i < nphys_disk) |
1740 | qdepth = min(h->nr_cmds, qdepth + |
1741 | logical_drive->phys_disk[i]->queue_depth); |
1742 | break; |
1743 | } |
1744 | |
1745 | /* |
1746 | * This can happen if a physical drive is removed and |
1747 | * the logical drive is degraded. In that case, the RAID |
1748 | * map data will refer to a physical disk which isn't actually |
1749 | * present. And in that case offload_enabled should already |
1750 | * be 0, but we'll turn it off here just in case |
1751 | */ |
1752 | if (!logical_drive->phys_disk[i]) { |
1753 | dev_warn(&h->pdev->dev, |
1754 | "%s: [%d:%d:%d:%d] A phys disk component of LV is missing, turning off offload_enabled for LV.\n" , |
1755 | __func__, |
1756 | h->scsi_host->host_no, logical_drive->bus, |
1757 | logical_drive->target, logical_drive->lun); |
1758 | hpsa_turn_off_ioaccel_for_device(device: logical_drive); |
1759 | logical_drive->queue_depth = 8; |
1760 | } |
1761 | } |
1762 | if (nraid_map_entries) |
1763 | /* |
1764 | * This is correct for reads, too high for full stripe writes, |
1765 | * way too high for partial stripe writes |
1766 | */ |
1767 | logical_drive->queue_depth = qdepth; |
1768 | else { |
1769 | if (logical_drive->external) |
1770 | logical_drive->queue_depth = EXTERNAL_QD; |
1771 | else |
1772 | logical_drive->queue_depth = h->nr_cmds; |
1773 | } |
1774 | } |
1775 | |
1776 | static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h, |
1777 | struct hpsa_scsi_dev_t *dev[], int ndevices) |
1778 | { |
1779 | int i; |
1780 | |
1781 | for (i = 0; i < ndevices; i++) { |
1782 | if (dev[i] == NULL) |
1783 | continue; |
1784 | if (dev[i]->devtype != TYPE_DISK && |
1785 | dev[i]->devtype != TYPE_ZBC) |
1786 | continue; |
1787 | if (!is_logical_device(device: dev[i])) |
1788 | continue; |
1789 | |
1790 | /* |
1791 | * If offload is currently enabled, the RAID map and |
1792 | * phys_disk[] assignment *better* not be changing |
1793 | * because we would be changing ioaccel phsy_disk[] pointers |
1794 | * on a ioaccel volume processing I/O requests. |
1795 | * |
1796 | * If an ioaccel volume status changed, initially because it was |
1797 | * re-configured and thus underwent a transformation, or |
1798 | * a drive failed, we would have received a state change |
1799 | * request and ioaccel should have been turned off. When the |
1800 | * transformation completes, we get another state change |
1801 | * request to turn ioaccel back on. In this case, we need |
1802 | * to update the ioaccel information. |
1803 | * |
1804 | * Thus: If it is not currently enabled, but will be after |
1805 | * the scan completes, make sure the ioaccel pointers |
1806 | * are up to date. |
1807 | */ |
1808 | |
1809 | if (!dev[i]->offload_enabled && dev[i]->offload_to_be_enabled) |
1810 | hpsa_figure_phys_disk_ptrs(h, dev, ndevices, logical_drive: dev[i]); |
1811 | } |
1812 | } |
1813 | |
1814 | static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) |
1815 | { |
1816 | int rc = 0; |
1817 | |
1818 | if (!h->scsi_host) |
1819 | return 1; |
1820 | |
1821 | if (is_logical_device(device)) /* RAID */ |
1822 | rc = scsi_add_device(host: h->scsi_host, channel: device->bus, |
1823 | target: device->target, lun: device->lun); |
1824 | else /* HBA */ |
1825 | rc = hpsa_add_sas_device(hpsa_sas_node: h->sas_host, device); |
1826 | |
1827 | return rc; |
1828 | } |
1829 | |
1830 | static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h, |
1831 | struct hpsa_scsi_dev_t *dev) |
1832 | { |
1833 | int i; |
1834 | int count = 0; |
1835 | |
1836 | for (i = 0; i < h->nr_cmds; i++) { |
1837 | struct CommandList *c = h->cmd_pool + i; |
1838 | int refcount = atomic_inc_return(v: &c->refcount); |
1839 | |
1840 | if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, |
1841 | scsi3addr: dev->scsi3addr)) { |
1842 | unsigned long flags; |
1843 | |
1844 | spin_lock_irqsave(&h->lock, flags); /* Implied MB */ |
1845 | if (!hpsa_is_cmd_idle(c)) |
1846 | ++count; |
1847 | spin_unlock_irqrestore(lock: &h->lock, flags); |
1848 | } |
1849 | |
1850 | cmd_free(h, c); |
1851 | } |
1852 | |
1853 | return count; |
1854 | } |
1855 | |
1856 | #define NUM_WAIT 20 |
1857 | static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h, |
1858 | struct hpsa_scsi_dev_t *device) |
1859 | { |
1860 | int cmds = 0; |
1861 | int waits = 0; |
1862 | int num_wait = NUM_WAIT; |
1863 | |
1864 | if (device->external) |
1865 | num_wait = HPSA_EH_PTRAID_TIMEOUT; |
1866 | |
1867 | while (1) { |
1868 | cmds = hpsa_find_outstanding_commands_for_dev(h, dev: device); |
1869 | if (cmds == 0) |
1870 | break; |
1871 | if (++waits > num_wait) |
1872 | break; |
1873 | msleep(msecs: 1000); |
1874 | } |
1875 | |
1876 | if (waits > num_wait) { |
1877 | dev_warn(&h->pdev->dev, |
1878 | "%s: removing device [%d:%d:%d:%d] with %d outstanding commands!\n" , |
1879 | __func__, |
1880 | h->scsi_host->host_no, |
1881 | device->bus, device->target, device->lun, cmds); |
1882 | } |
1883 | } |
1884 | |
1885 | static void hpsa_remove_device(struct ctlr_info *h, |
1886 | struct hpsa_scsi_dev_t *device) |
1887 | { |
1888 | struct scsi_device *sdev = NULL; |
1889 | |
1890 | if (!h->scsi_host) |
1891 | return; |
1892 | |
1893 | /* |
1894 | * Allow for commands to drain |
1895 | */ |
1896 | device->removed = 1; |
1897 | hpsa_wait_for_outstanding_commands_for_dev(h, device); |
1898 | |
1899 | if (is_logical_device(device)) { /* RAID */ |
1900 | sdev = scsi_device_lookup(h->scsi_host, device->bus, |
1901 | device->target, device->lun); |
1902 | if (sdev) { |
1903 | scsi_remove_device(sdev); |
1904 | scsi_device_put(sdev); |
1905 | } else { |
1906 | /* |
1907 | * We don't expect to get here. Future commands |
1908 | * to this device will get a selection timeout as |
1909 | * if the device were gone. |
1910 | */ |
1911 | hpsa_show_dev_msg(KERN_WARNING, h, dev: device, |
1912 | description: "didn't find device for removal." ); |
1913 | } |
1914 | } else { /* HBA */ |
1915 | |
1916 | hpsa_remove_sas_device(device); |
1917 | } |
1918 | } |
1919 | |
1920 | static void adjust_hpsa_scsi_table(struct ctlr_info *h, |
1921 | struct hpsa_scsi_dev_t *sd[], int nsds) |
1922 | { |
1923 | /* sd contains scsi3 addresses and devtypes, and inquiry |
1924 | * data. This function takes what's in sd to be the current |
1925 | * reality and updates h->dev[] to reflect that reality. |
1926 | */ |
1927 | int i, entry, device_change, changes = 0; |
1928 | struct hpsa_scsi_dev_t *csd; |
1929 | unsigned long flags; |
1930 | struct hpsa_scsi_dev_t **added, **removed; |
1931 | int nadded, nremoved; |
1932 | |
1933 | /* |
1934 | * A reset can cause a device status to change |
1935 | * re-schedule the scan to see what happened. |
1936 | */ |
1937 | spin_lock_irqsave(&h->reset_lock, flags); |
1938 | if (h->reset_in_progress) { |
1939 | h->drv_req_rescan = 1; |
1940 | spin_unlock_irqrestore(lock: &h->reset_lock, flags); |
1941 | return; |
1942 | } |
1943 | spin_unlock_irqrestore(lock: &h->reset_lock, flags); |
1944 | |
1945 | added = kcalloc(HPSA_MAX_DEVICES, size: sizeof(*added), GFP_KERNEL); |
1946 | removed = kcalloc(HPSA_MAX_DEVICES, size: sizeof(*removed), GFP_KERNEL); |
1947 | |
1948 | if (!added || !removed) { |
1949 | dev_warn(&h->pdev->dev, "out of memory in " |
1950 | "adjust_hpsa_scsi_table\n" ); |
1951 | goto free_and_out; |
1952 | } |
1953 | |
1954 | spin_lock_irqsave(&h->devlock, flags); |
1955 | |
1956 | /* find any devices in h->dev[] that are not in |
1957 | * sd[] and remove them from h->dev[], and for any |
1958 | * devices which have changed, remove the old device |
1959 | * info and add the new device info. |
1960 | * If minor device attributes change, just update |
1961 | * the existing device structure. |
1962 | */ |
1963 | i = 0; |
1964 | nremoved = 0; |
1965 | nadded = 0; |
1966 | while (i < h->ndevices) { |
1967 | csd = h->dev[i]; |
1968 | device_change = hpsa_scsi_find_entry(needle: csd, haystack: sd, haystack_size: nsds, index: &entry); |
1969 | if (device_change == DEVICE_NOT_FOUND) { |
1970 | changes++; |
1971 | hpsa_scsi_remove_entry(h, entry: i, removed, nremoved: &nremoved); |
1972 | continue; /* remove ^^^, hence i not incremented */ |
1973 | } else if (device_change == DEVICE_CHANGED) { |
1974 | changes++; |
1975 | hpsa_scsi_replace_entry(h, entry: i, new_entry: sd[entry], |
1976 | added, nadded: &nadded, removed, nremoved: &nremoved); |
1977 | /* Set it to NULL to prevent it from being freed |
1978 | * at the bottom of hpsa_update_scsi_devices() |
1979 | */ |
1980 | sd[entry] = NULL; |
1981 | } else if (device_change == DEVICE_UPDATED) { |
1982 | hpsa_scsi_update_entry(h, entry: i, new_entry: sd[entry]); |
1983 | } |
1984 | i++; |
1985 | } |
1986 | |
1987 | /* Now, make sure every device listed in sd[] is also |
1988 | * listed in h->dev[], adding them if they aren't found |
1989 | */ |
1990 | |
1991 | for (i = 0; i < nsds; i++) { |
1992 | if (!sd[i]) /* if already added above. */ |
1993 | continue; |
1994 | |
1995 | /* Don't add devices which are NOT READY, FORMAT IN PROGRESS |
1996 | * as the SCSI mid-layer does not handle such devices well. |
1997 | * It relentlessly loops sending TUR at 3Hz, then READ(10) |
1998 | * at 160Hz, and prevents the system from coming up. |
1999 | */ |
2000 | if (sd[i]->volume_offline) { |
2001 | hpsa_show_volume_status(h, sd: sd[i]); |
2002 | hpsa_show_dev_msg(KERN_INFO, h, dev: sd[i], description: "offline" ); |
2003 | continue; |
2004 | } |
2005 | |
2006 | device_change = hpsa_scsi_find_entry(needle: sd[i], haystack: h->dev, |
2007 | haystack_size: h->ndevices, index: &entry); |
2008 | if (device_change == DEVICE_NOT_FOUND) { |
2009 | changes++; |
2010 | if (hpsa_scsi_add_entry(h, device: sd[i], added, nadded: &nadded) != 0) |
2011 | break; |
2012 | sd[i] = NULL; /* prevent from being freed later. */ |
2013 | } else if (device_change == DEVICE_CHANGED) { |
2014 | /* should never happen... */ |
2015 | changes++; |
2016 | dev_warn(&h->pdev->dev, |
2017 | "device unexpectedly changed.\n" ); |
2018 | /* but if it does happen, we just ignore that device */ |
2019 | } |
2020 | } |
2021 | hpsa_update_log_drive_phys_drive_ptrs(h, dev: h->dev, ndevices: h->ndevices); |
2022 | |
2023 | /* |
2024 | * Now that h->dev[]->phys_disk[] is coherent, we can enable |
2025 | * any logical drives that need it enabled. |
2026 | * |
2027 | * The raid map should be current by now. |
2028 | * |
2029 | * We are updating the device list used for I/O requests. |
2030 | */ |
2031 | for (i = 0; i < h->ndevices; i++) { |
2032 | if (h->dev[i] == NULL) |
2033 | continue; |
2034 | h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled; |
2035 | } |
2036 | |
2037 | spin_unlock_irqrestore(lock: &h->devlock, flags); |
2038 | |
2039 | /* Monitor devices which are in one of several NOT READY states to be |
2040 | * brought online later. This must be done without holding h->devlock, |
2041 | * so don't touch h->dev[] |
2042 | */ |
2043 | for (i = 0; i < nsds; i++) { |
2044 | if (!sd[i]) /* if already added above. */ |
2045 | continue; |
2046 | if (sd[i]->volume_offline) |
2047 | hpsa_monitor_offline_device(h, scsi3addr: sd[i]->scsi3addr); |
2048 | } |
2049 | |
2050 | /* Don't notify scsi mid layer of any changes the first time through |
2051 | * (or if there are no changes) scsi_scan_host will do it later the |
2052 | * first time through. |
2053 | */ |
2054 | if (!changes) |
2055 | goto free_and_out; |
2056 | |
2057 | /* Notify scsi mid layer of any removed devices */ |
2058 | for (i = 0; i < nremoved; i++) { |
2059 | if (removed[i] == NULL) |
2060 | continue; |
2061 | if (removed[i]->expose_device) |
2062 | hpsa_remove_device(h, device: removed[i]); |
2063 | kfree(objp: removed[i]); |
2064 | removed[i] = NULL; |
2065 | } |
2066 | |
2067 | /* Notify scsi mid layer of any added devices */ |
2068 | for (i = 0; i < nadded; i++) { |
2069 | int rc = 0; |
2070 | |
2071 | if (added[i] == NULL) |
2072 | continue; |
2073 | if (!(added[i]->expose_device)) |
2074 | continue; |
2075 | rc = hpsa_add_device(h, device: added[i]); |
2076 | if (!rc) |
2077 | continue; |
2078 | dev_warn(&h->pdev->dev, |
2079 | "addition failed %d, device not added." , rc); |
2080 | /* now we have to remove it from h->dev, |
2081 | * since it didn't get added to scsi mid layer |
2082 | */ |
2083 | fixup_botched_add(h, added: added[i]); |
2084 | h->drv_req_rescan = 1; |
2085 | } |
2086 | |
2087 | free_and_out: |
2088 | kfree(objp: added); |
2089 | kfree(objp: removed); |
2090 | } |
2091 | |
2092 | /* |
2093 | * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t * |
2094 | * Assume's h->devlock is held. |
2095 | */ |
2096 | static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, |
2097 | int bus, int target, int lun) |
2098 | { |
2099 | int i; |
2100 | struct hpsa_scsi_dev_t *sd; |
2101 | |
2102 | for (i = 0; i < h->ndevices; i++) { |
2103 | sd = h->dev[i]; |
2104 | if (sd->bus == bus && sd->target == target && sd->lun == lun) |
2105 | return sd; |
2106 | } |
2107 | return NULL; |
2108 | } |
2109 | |
2110 | static int hpsa_slave_alloc(struct scsi_device *sdev) |
2111 | { |
2112 | struct hpsa_scsi_dev_t *sd = NULL; |
2113 | unsigned long flags; |
2114 | struct ctlr_info *h; |
2115 | |
2116 | h = sdev_to_hba(sdev); |
2117 | spin_lock_irqsave(&h->devlock, flags); |
2118 | if (sdev_channel(sdev) == HPSA_PHYSICAL_DEVICE_BUS) { |
2119 | struct scsi_target *starget; |
2120 | struct sas_rphy *rphy; |
2121 | |
2122 | starget = scsi_target(sdev); |
2123 | rphy = target_to_rphy(starget); |
2124 | sd = hpsa_find_device_by_sas_rphy(h, rphy); |
2125 | if (sd) { |
2126 | sd->target = sdev_id(sdev); |
2127 | sd->lun = sdev->lun; |
2128 | } |
2129 | } |
2130 | if (!sd) |
2131 | sd = lookup_hpsa_scsi_dev(h, bus: sdev_channel(sdev), |
2132 | target: sdev_id(sdev), lun: sdev->lun); |
2133 | |
2134 | if (sd && sd->expose_device) { |
2135 | atomic_set(v: &sd->ioaccel_cmds_out, i: 0); |
2136 | sdev->hostdata = sd; |
2137 | } else |
2138 | sdev->hostdata = NULL; |
2139 | spin_unlock_irqrestore(lock: &h->devlock, flags); |
2140 | return 0; |
2141 | } |
2142 | |
2143 | /* configure scsi device based on internal per-device structure */ |
2144 | #define CTLR_TIMEOUT (120 * HZ) |
2145 | static int hpsa_slave_configure(struct scsi_device *sdev) |
2146 | { |
2147 | struct hpsa_scsi_dev_t *sd; |
2148 | int queue_depth; |
2149 | |
2150 | sd = sdev->hostdata; |
2151 | sdev->no_uld_attach = !sd || !sd->expose_device; |
2152 | |
2153 | if (sd) { |
2154 | sd->was_removed = 0; |
2155 | queue_depth = sd->queue_depth != 0 ? |
2156 | sd->queue_depth : sdev->host->can_queue; |
2157 | if (sd->external) { |
2158 | queue_depth = EXTERNAL_QD; |
2159 | sdev->eh_timeout = HPSA_EH_PTRAID_TIMEOUT; |
2160 | blk_queue_rq_timeout(sdev->request_queue, |
2161 | HPSA_EH_PTRAID_TIMEOUT); |
2162 | } |
2163 | if (is_hba_lunid(scsi3addr: sd->scsi3addr)) { |
2164 | sdev->eh_timeout = CTLR_TIMEOUT; |
2165 | blk_queue_rq_timeout(sdev->request_queue, CTLR_TIMEOUT); |
2166 | } |
2167 | } else { |
2168 | queue_depth = sdev->host->can_queue; |
2169 | } |
2170 | |
2171 | scsi_change_queue_depth(sdev, queue_depth); |
2172 | |
2173 | return 0; |
2174 | } |
2175 | |
2176 | static void hpsa_slave_destroy(struct scsi_device *sdev) |
2177 | { |
2178 | struct hpsa_scsi_dev_t *hdev = NULL; |
2179 | |
2180 | hdev = sdev->hostdata; |
2181 | |
2182 | if (hdev) |
2183 | hdev->was_removed = 1; |
2184 | } |
2185 | |
2186 | static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h) |
2187 | { |
2188 | int i; |
2189 | |
2190 | if (!h->ioaccel2_cmd_sg_list) |
2191 | return; |
2192 | for (i = 0; i < h->nr_cmds; i++) { |
2193 | kfree(objp: h->ioaccel2_cmd_sg_list[i]); |
2194 | h->ioaccel2_cmd_sg_list[i] = NULL; |
2195 | } |
2196 | kfree(objp: h->ioaccel2_cmd_sg_list); |
2197 | h->ioaccel2_cmd_sg_list = NULL; |
2198 | } |
2199 | |
2200 | static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h) |
2201 | { |
2202 | int i; |
2203 | |
2204 | if (h->chainsize <= 0) |
2205 | return 0; |
2206 | |
2207 | h->ioaccel2_cmd_sg_list = |
2208 | kcalloc(n: h->nr_cmds, size: sizeof(*h->ioaccel2_cmd_sg_list), |
2209 | GFP_KERNEL); |
2210 | if (!h->ioaccel2_cmd_sg_list) |
2211 | return -ENOMEM; |
2212 | for (i = 0; i < h->nr_cmds; i++) { |
2213 | h->ioaccel2_cmd_sg_list[i] = |
2214 | kmalloc_array(n: h->maxsgentries, |
2215 | size: sizeof(*h->ioaccel2_cmd_sg_list[i]), |
2216 | GFP_KERNEL); |
2217 | if (!h->ioaccel2_cmd_sg_list[i]) |
2218 | goto clean; |
2219 | } |
2220 | return 0; |
2221 | |
2222 | clean: |
2223 | hpsa_free_ioaccel2_sg_chain_blocks(h); |
2224 | return -ENOMEM; |
2225 | } |
2226 | |
2227 | static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) |
2228 | { |
2229 | int i; |
2230 | |
2231 | if (!h->cmd_sg_list) |
2232 | return; |
2233 | for (i = 0; i < h->nr_cmds; i++) { |
2234 | kfree(objp: h->cmd_sg_list[i]); |
2235 | h->cmd_sg_list[i] = NULL; |
2236 | } |
2237 | kfree(objp: h->cmd_sg_list); |
2238 | h->cmd_sg_list = NULL; |
2239 | } |
2240 | |
2241 | static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h) |
2242 | { |
2243 | int i; |
2244 | |
2245 | if (h->chainsize <= 0) |
2246 | return 0; |
2247 | |
2248 | h->cmd_sg_list = kcalloc(n: h->nr_cmds, size: sizeof(*h->cmd_sg_list), |
2249 | GFP_KERNEL); |
2250 | if (!h->cmd_sg_list) |
2251 | return -ENOMEM; |
2252 | |
2253 | for (i = 0; i < h->nr_cmds; i++) { |
2254 | h->cmd_sg_list[i] = kmalloc_array(n: h->chainsize, |
2255 | size: sizeof(*h->cmd_sg_list[i]), |
2256 | GFP_KERNEL); |
2257 | if (!h->cmd_sg_list[i]) |
2258 | goto clean; |
2259 | |
2260 | } |
2261 | return 0; |
2262 | |
2263 | clean: |
2264 | hpsa_free_sg_chain_blocks(h); |
2265 | return -ENOMEM; |
2266 | } |
2267 | |
2268 | static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h, |
2269 | struct io_accel2_cmd *cp, struct CommandList *c) |
2270 | { |
2271 | struct ioaccel2_sg_element *chain_block; |
2272 | u64 temp64; |
2273 | u32 chain_size; |
2274 | |
2275 | chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex]; |
2276 | chain_size = le32_to_cpu(cp->sg[0].length); |
2277 | temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_size, |
2278 | DMA_TO_DEVICE); |
2279 | if (dma_mapping_error(dev: &h->pdev->dev, dma_addr: temp64)) { |
2280 | /* prevent subsequent unmapping */ |
2281 | cp->sg->address = 0; |
2282 | return -1; |
2283 | } |
2284 | cp->sg->address = cpu_to_le64(temp64); |
2285 | return 0; |
2286 | } |
2287 | |
2288 | static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h, |
2289 | struct io_accel2_cmd *cp) |
2290 | { |
2291 | struct ioaccel2_sg_element *chain_sg; |
2292 | u64 temp64; |
2293 | u32 chain_size; |
2294 | |
2295 | chain_sg = cp->sg; |
2296 | temp64 = le64_to_cpu(chain_sg->address); |
2297 | chain_size = le32_to_cpu(cp->sg[0].length); |
2298 | dma_unmap_single(&h->pdev->dev, temp64, chain_size, DMA_TO_DEVICE); |
2299 | } |
2300 | |
2301 | static int hpsa_map_sg_chain_block(struct ctlr_info *h, |
2302 | struct CommandList *c) |
2303 | { |
2304 | struct SGDescriptor *chain_sg, *chain_block; |
2305 | u64 temp64; |
2306 | u32 chain_len; |
2307 | |
2308 | chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; |
2309 | chain_block = h->cmd_sg_list[c->cmdindex]; |
2310 | chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN); |
2311 | chain_len = sizeof(*chain_sg) * |
2312 | (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries); |
2313 | chain_sg->Len = cpu_to_le32(chain_len); |
2314 | temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_len, |
2315 | DMA_TO_DEVICE); |
2316 | if (dma_mapping_error(dev: &h->pdev->dev, dma_addr: temp64)) { |
2317 | /* prevent subsequent unmapping */ |
2318 | chain_sg->Addr = cpu_to_le64(0); |
2319 | return -1; |
2320 | } |
2321 | chain_sg->Addr = cpu_to_le64(temp64); |
2322 | return 0; |
2323 | } |
2324 | |
2325 | static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, |
2326 | struct CommandList *c) |
2327 | { |
2328 | struct SGDescriptor *chain_sg; |
2329 | |
2330 | if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries) |
2331 | return; |
2332 | |
2333 | chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; |
2334 | dma_unmap_single(&h->pdev->dev, le64_to_cpu(chain_sg->Addr), |
2335 | le32_to_cpu(chain_sg->Len), DMA_TO_DEVICE); |
2336 | } |
2337 | |
2338 | |
2339 | /* Decode the various types of errors on ioaccel2 path. |
2340 | * Return 1 for any error that should generate a RAID path retry. |
2341 | * Return 0 for errors that don't require a RAID path retry. |
2342 | */ |
2343 | static int handle_ioaccel_mode2_error(struct ctlr_info *h, |
2344 | struct CommandList *c, |
2345 | struct scsi_cmnd *cmd, |
2346 | struct io_accel2_cmd *c2, |
2347 | struct hpsa_scsi_dev_t *dev) |
2348 | { |
2349 | int data_len; |
2350 | int retry = 0; |
2351 | u32 ioaccel2_resid = 0; |
2352 | |
2353 | switch (c2->error_data.serv_response) { |
2354 | case IOACCEL2_SERV_RESPONSE_COMPLETE: |
2355 | switch (c2->error_data.status) { |
2356 | case IOACCEL2_STATUS_SR_TASK_COMP_GOOD: |
2357 | if (cmd) |
2358 | cmd->result = 0; |
2359 | break; |
2360 | case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND: |
2361 | cmd->result |= SAM_STAT_CHECK_CONDITION; |
2362 | if (c2->error_data.data_present != |
2363 | IOACCEL2_SENSE_DATA_PRESENT) { |
2364 | memset(cmd->sense_buffer, 0, |
2365 | SCSI_SENSE_BUFFERSIZE); |
2366 | break; |
2367 | } |
2368 | /* copy the sense data */ |
2369 | data_len = c2->error_data.sense_data_len; |
2370 | if (data_len > SCSI_SENSE_BUFFERSIZE) |
2371 | data_len = SCSI_SENSE_BUFFERSIZE; |
2372 | if (data_len > sizeof(c2->error_data.sense_data_buff)) |
2373 | data_len = |
2374 | sizeof(c2->error_data.sense_data_buff); |
2375 | memcpy(cmd->sense_buffer, |
2376 | c2->error_data.sense_data_buff, data_len); |
2377 | retry = 1; |
2378 | break; |
2379 | case IOACCEL2_STATUS_SR_TASK_COMP_BUSY: |
2380 | retry = 1; |
2381 | break; |
2382 | case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON: |
2383 | retry = 1; |
2384 | break; |
2385 | case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL: |
2386 | retry = 1; |
2387 | break; |
2388 | case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED: |
2389 | retry = 1; |
2390 | break; |
2391 | default: |
2392 | retry = 1; |
2393 | break; |
2394 | } |
2395 | break; |
2396 | case IOACCEL2_SERV_RESPONSE_FAILURE: |
2397 | switch (c2->error_data.status) { |
2398 | case IOACCEL2_STATUS_SR_IO_ERROR: |
2399 | case IOACCEL2_STATUS_SR_IO_ABORTED: |
2400 | case IOACCEL2_STATUS_SR_OVERRUN: |
2401 | retry = 1; |
2402 | break; |
2403 | case IOACCEL2_STATUS_SR_UNDERRUN: |
2404 | cmd->result = (DID_OK << 16); /* host byte */ |
2405 | ioaccel2_resid = get_unaligned_le32( |
2406 | p: &c2->error_data.resid_cnt[0]); |
2407 | scsi_set_resid(cmd, resid: ioaccel2_resid); |
2408 | break; |
2409 | case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE: |
2410 | case IOACCEL2_STATUS_SR_INVALID_DEVICE: |
2411 | case IOACCEL2_STATUS_SR_IOACCEL_DISABLED: |
2412 | /* |
2413 | * Did an HBA disk disappear? We will eventually |
2414 | * get a state change event from the controller but |
2415 | * in the meantime, we need to tell the OS that the |
2416 | * HBA disk is no longer there and stop I/O |
2417 | * from going down. This allows the potential re-insert |
2418 | * of the disk to get the same device node. |
2419 | */ |
2420 | if (dev->physical_device && dev->expose_device) { |
2421 | cmd->result = DID_NO_CONNECT << 16; |
2422 | dev->removed = 1; |
2423 | h->drv_req_rescan = 1; |
2424 | dev_warn(&h->pdev->dev, |
2425 | "%s: device is gone!\n" , __func__); |
2426 | } else |
2427 | /* |
2428 | * Retry by sending down the RAID path. |
2429 | * We will get an event from ctlr to |
2430 | * trigger rescan regardless. |
2431 | */ |
2432 | retry = 1; |
2433 | break; |
2434 | default: |
2435 | retry = 1; |
2436 | } |
2437 | break; |
2438 | case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE: |
2439 | break; |
2440 | case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS: |
2441 | break; |
2442 | case IOACCEL2_SERV_RESPONSE_TMF_REJECTED: |
2443 | retry = 1; |
2444 | break; |
2445 | case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN: |
2446 | break; |
2447 | default: |
2448 | retry = 1; |
2449 | break; |
2450 | } |
2451 | |
2452 | if (dev->in_reset) |
2453 | retry = 0; |
2454 | |
2455 | return retry; /* retry on raid path? */ |
2456 | } |
2457 | |
2458 | static void hpsa_cmd_resolve_events(struct ctlr_info *h, |
2459 | struct CommandList *c) |
2460 | { |
2461 | struct hpsa_scsi_dev_t *dev = c->device; |
2462 | |
2463 | /* |
2464 | * Reset c->scsi_cmd here so that the reset handler will know |
2465 | * this command has completed. Then, check to see if the handler is |
2466 | * waiting for this command, and, if so, wake it. |
2467 | */ |
2468 | c->scsi_cmd = SCSI_CMD_IDLE; |
2469 | mb(); /* Declare command idle before checking for pending events. */ |
2470 | if (dev) { |
2471 | atomic_dec(v: &dev->commands_outstanding); |
2472 | if (dev->in_reset && |
2473 | atomic_read(v: &dev->commands_outstanding) <= 0) |
2474 | wake_up_all(&h->event_sync_wait_queue); |
2475 | } |
2476 | } |
2477 | |
2478 | static void hpsa_cmd_resolve_and_free(struct ctlr_info *h, |
2479 | struct CommandList *c) |
2480 | { |
2481 | hpsa_cmd_resolve_events(h, c); |
2482 | cmd_tagged_free(h, c); |
2483 | } |
2484 | |
2485 | static void hpsa_cmd_free_and_done(struct ctlr_info *h, |
2486 | struct CommandList *c, struct scsi_cmnd *cmd) |
2487 | { |
2488 | hpsa_cmd_resolve_and_free(h, c); |
2489 | if (cmd) |
2490 | scsi_done(cmd); |
2491 | } |
2492 | |
2493 | static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c) |
2494 | { |
2495 | INIT_WORK(&c->work, hpsa_command_resubmit_worker); |
2496 | queue_work_on(raw_smp_processor_id(), wq: h->resubmit_wq, work: &c->work); |
2497 | } |
2498 | |
2499 | static void process_ioaccel2_completion(struct ctlr_info *h, |
2500 | struct CommandList *c, struct scsi_cmnd *cmd, |
2501 | struct hpsa_scsi_dev_t *dev) |
2502 | { |
2503 | struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; |
2504 | |
2505 | /* check for good status */ |
2506 | if (likely(c2->error_data.serv_response == 0 && |
2507 | c2->error_data.status == 0)) { |
2508 | cmd->result = 0; |
2509 | return hpsa_cmd_free_and_done(h, c, cmd); |
2510 | } |
2511 | |
2512 | /* |
2513 | * Any RAID offload error results in retry which will use |
2514 | * the normal I/O path so the controller can handle whatever is |
2515 | * wrong. |
2516 | */ |
2517 | if (is_logical_device(device: dev) && |
2518 | c2->error_data.serv_response == |
2519 | IOACCEL2_SERV_RESPONSE_FAILURE) { |
2520 | if (c2->error_data.status == |
2521 | IOACCEL2_STATUS_SR_IOACCEL_DISABLED) { |
2522 | hpsa_turn_off_ioaccel_for_device(device: dev); |
2523 | } |
2524 | |
2525 | if (dev->in_reset) { |
2526 | cmd->result = DID_RESET << 16; |
2527 | return hpsa_cmd_free_and_done(h, c, cmd); |
2528 | } |
2529 | |
2530 | return hpsa_retry_cmd(h, c); |
2531 | } |
2532 | |
2533 | if (handle_ioaccel_mode2_error(h, c, cmd, c2, dev)) |
2534 | return hpsa_retry_cmd(h, c); |
2535 | |
2536 | return hpsa_cmd_free_and_done(h, c, cmd); |
2537 | } |
2538 | |
2539 | /* Returns 0 on success, < 0 otherwise. */ |
2540 | static int hpsa_evaluate_tmf_status(struct ctlr_info *h, |
2541 | struct CommandList *cp) |
2542 | { |
2543 | u8 tmf_status = cp->err_info->ScsiStatus; |
2544 | |
2545 | switch (tmf_status) { |
2546 | case CISS_TMF_COMPLETE: |
2547 | /* |
2548 | * CISS_TMF_COMPLETE never happens, instead, |
2549 | * ei->CommandStatus == 0 for this case. |
2550 | */ |
2551 | case CISS_TMF_SUCCESS: |
2552 | return 0; |
2553 | case CISS_TMF_INVALID_FRAME: |
2554 | case CISS_TMF_NOT_SUPPORTED: |
2555 | case CISS_TMF_FAILED: |
2556 | case CISS_TMF_WRONG_LUN: |
2557 | case CISS_TMF_OVERLAPPED_TAG: |
2558 | break; |
2559 | default: |
2560 | dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n" , |
2561 | tmf_status); |
2562 | break; |
2563 | } |
2564 | return -tmf_status; |
2565 | } |
2566 | |
2567 | static void complete_scsi_command(struct CommandList *cp) |
2568 | { |
2569 | struct scsi_cmnd *cmd; |
2570 | struct ctlr_info *h; |
2571 | struct ErrorInfo *ei; |
2572 | struct hpsa_scsi_dev_t *dev; |
2573 | struct io_accel2_cmd *c2; |
2574 | |
2575 | u8 sense_key; |
2576 | u8 asc; /* additional sense code */ |
2577 | u8 ascq; /* additional sense code qualifier */ |
2578 | unsigned long sense_data_size; |
2579 | |
2580 | ei = cp->err_info; |
2581 | cmd = cp->scsi_cmd; |
2582 | h = cp->h; |
2583 | |
2584 | if (!cmd->device) { |
2585 | cmd->result = DID_NO_CONNECT << 16; |
2586 | return hpsa_cmd_free_and_done(h, c: cp, cmd); |
2587 | } |
2588 | |
2589 | dev = cmd->device->hostdata; |
2590 | if (!dev) { |
2591 | cmd->result = DID_NO_CONNECT << 16; |
2592 | return hpsa_cmd_free_and_done(h, c: cp, cmd); |
2593 | } |
2594 | c2 = &h->ioaccel2_cmd_pool[cp->cmdindex]; |
2595 | |
2596 | scsi_dma_unmap(cmd); /* undo the DMA mappings */ |
2597 | if ((cp->cmd_type == CMD_SCSI) && |
2598 | (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries)) |
2599 | hpsa_unmap_sg_chain_block(h, c: cp); |
2600 | |
2601 | if ((cp->cmd_type == CMD_IOACCEL2) && |
2602 | (c2->sg[0].chain_indicator == IOACCEL2_CHAIN)) |
2603 | hpsa_unmap_ioaccel2_sg_chain_block(h, cp: c2); |
2604 | |
2605 | cmd->result = (DID_OK << 16); /* host byte */ |
2606 | |
2607 | /* SCSI command has already been cleaned up in SML */ |
2608 | if (dev->was_removed) { |
2609 | hpsa_cmd_resolve_and_free(h, c: cp); |
2610 | return; |
2611 | } |
2612 | |
2613 | if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) { |
2614 | if (dev->physical_device && dev->expose_device && |
2615 | dev->removed) { |
2616 | cmd->result = DID_NO_CONNECT << 16; |
2617 | return hpsa_cmd_free_and_done(h, c: cp, cmd); |
2618 | } |
2619 | if (likely(cp->phys_disk != NULL)) |
2620 | atomic_dec(v: &cp->phys_disk->ioaccel_cmds_out); |
2621 | } |
2622 | |
2623 | /* |
2624 | * We check for lockup status here as it may be set for |
2625 | * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by |
2626 | * fail_all_oustanding_cmds() |
2627 | */ |
2628 | if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) { |
2629 | /* DID_NO_CONNECT will prevent a retry */ |
2630 | cmd->result = DID_NO_CONNECT << 16; |
2631 | return hpsa_cmd_free_and_done(h, c: cp, cmd); |
2632 | } |
2633 | |
2634 | if (cp->cmd_type == CMD_IOACCEL2) |
2635 | return process_ioaccel2_completion(h, c: cp, cmd, dev); |
2636 | |
2637 | scsi_set_resid(cmd, resid: ei->ResidualCnt); |
2638 | if (ei->CommandStatus == 0) |
2639 | return hpsa_cmd_free_and_done(h, c: cp, cmd); |
2640 | |
2641 | /* For I/O accelerator commands, copy over some fields to the normal |
2642 | * CISS header used below for error handling. |
2643 | */ |
2644 | if (cp->cmd_type == CMD_IOACCEL1) { |
2645 | struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex]; |
2646 | cp->Header.SGList = scsi_sg_count(cmd); |
2647 | cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList); |
2648 | cp->Request.CDBLen = le16_to_cpu(c->io_flags) & |
2649 | IOACCEL1_IOFLAGS_CDBLEN_MASK; |
2650 | cp->Header.tag = c->tag; |
2651 | memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8); |
2652 | memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen); |
2653 | |
2654 | /* Any RAID offload error results in retry which will use |
2655 | * the normal I/O path so the controller can handle whatever's |
2656 | * wrong. |
2657 | */ |
2658 | if (is_logical_device(device: dev)) { |
2659 | if (ei->CommandStatus == CMD_IOACCEL_DISABLED) |
2660 | dev->offload_enabled = 0; |
2661 | return hpsa_retry_cmd(h, c: cp); |
2662 | } |
2663 | } |
2664 | |
2665 | /* an error has occurred */ |
2666 | switch (ei->CommandStatus) { |
2667 | |
2668 | case CMD_TARGET_STATUS: |
2669 | cmd->result |= ei->ScsiStatus; |
2670 | /* copy the sense data */ |
2671 | if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo)) |
2672 | sense_data_size = SCSI_SENSE_BUFFERSIZE; |
2673 | else |
2674 | sense_data_size = sizeof(ei->SenseInfo); |
2675 | if (ei->SenseLen < sense_data_size) |
2676 | sense_data_size = ei->SenseLen; |
2677 | memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size); |
2678 | if (ei->ScsiStatus) |
2679 | decode_sense_data(sense_data: ei->SenseInfo, sense_data_len: sense_data_size, |
2680 | sense_key: &sense_key, asc: &asc, ascq: &ascq); |
2681 | if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { |
2682 | switch (sense_key) { |
2683 | case ABORTED_COMMAND: |
2684 | cmd->result |= DID_SOFT_ERROR << 16; |
2685 | break; |
2686 | case UNIT_ATTENTION: |
2687 | if (asc == 0x3F && ascq == 0x0E) |
2688 | h->drv_req_rescan = 1; |
2689 | break; |
2690 | case ILLEGAL_REQUEST: |
2691 | if (asc == 0x25 && ascq == 0x00) { |
2692 | dev->removed = 1; |
2693 | cmd->result = DID_NO_CONNECT << 16; |
2694 | } |
2695 | break; |
2696 | } |
2697 | break; |
2698 | } |
2699 | /* Problem was not a check condition |
2700 | * Pass it up to the upper layers... |
2701 | */ |
2702 | if (ei->ScsiStatus) { |
2703 | dev_warn(&h->pdev->dev, "cp %p has status 0x%x " |
2704 | "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " |
2705 | "Returning result: 0x%x\n" , |
2706 | cp, ei->ScsiStatus, |
2707 | sense_key, asc, ascq, |
2708 | cmd->result); |
2709 | } else { /* scsi status is zero??? How??? */ |
2710 | dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. " |
2711 | "Returning no connection.\n" , cp), |
2712 | |
2713 | /* Ordinarily, this case should never happen, |
2714 | * but there is a bug in some released firmware |
2715 | * revisions that allows it to happen if, for |
2716 | * example, a 4100 backplane loses power and |
2717 | * the tape drive is in it. We assume that |
2718 | * it's a fatal error of some kind because we |
2719 | * can't show that it wasn't. We will make it |
2720 | * look like selection timeout since that is |
2721 | * the most common reason for this to occur, |
2722 | * and it's severe enough. |
2723 | */ |
2724 | |
2725 | cmd->result = DID_NO_CONNECT << 16; |
2726 | } |
2727 | break; |
2728 | |
2729 | case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ |
2730 | break; |
2731 | case CMD_DATA_OVERRUN: |
2732 | dev_warn(&h->pdev->dev, |
2733 | "CDB %16phN data overrun\n" , cp->Request.CDB); |
2734 | break; |
2735 | case CMD_INVALID: { |
2736 | /* print_bytes(cp, sizeof(*cp), 1, 0); |
2737 | print_cmd(cp); */ |
2738 | /* We get CMD_INVALID if you address a non-existent device |
2739 | * instead of a selection timeout (no response). You will |
2740 | * see this if you yank out a drive, then try to access it. |
2741 | * This is kind of a shame because it means that any other |
2742 | * CMD_INVALID (e.g. driver bug) will get interpreted as a |
2743 | * missing target. */ |
2744 | cmd->result = DID_NO_CONNECT << 16; |
2745 | } |
2746 | break; |
2747 | case CMD_PROTOCOL_ERR: |
2748 | cmd->result = DID_ERROR << 16; |
2749 | dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n" , |
2750 | cp->Request.CDB); |
2751 | break; |
2752 | case CMD_HARDWARE_ERR: |
2753 | cmd->result = DID_ERROR << 16; |
2754 | dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n" , |
2755 | cp->Request.CDB); |
2756 | break; |
2757 | case CMD_CONNECTION_LOST: |
2758 | cmd->result = DID_ERROR << 16; |
2759 | dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n" , |
2760 | cp->Request.CDB); |
2761 | break; |
2762 | case CMD_ABORTED: |
2763 | cmd->result = DID_ABORT << 16; |
2764 | break; |
2765 | case CMD_ABORT_FAILED: |
2766 | cmd->result = DID_ERROR << 16; |
2767 | dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n" , |
2768 | cp->Request.CDB); |
2769 | break; |
2770 | case CMD_UNSOLICITED_ABORT: |
2771 | cmd->result = DID_SOFT_ERROR << 16; /* retry the command */ |
2772 | dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n" , |
2773 | cp->Request.CDB); |
2774 | break; |
2775 | case CMD_TIMEOUT: |
2776 | cmd->result = DID_TIME_OUT << 16; |
2777 | dev_warn(&h->pdev->dev, "CDB %16phN timed out\n" , |
2778 | cp->Request.CDB); |
2779 | break; |
2780 | case CMD_UNABORTABLE: |
2781 | cmd->result = DID_ERROR << 16; |
2782 | dev_warn(&h->pdev->dev, "Command unabortable\n" ); |
2783 | break; |
2784 | case CMD_TMF_STATUS: |
2785 | if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */ |
2786 | cmd->result = DID_ERROR << 16; |
2787 | break; |
2788 | case CMD_IOACCEL_DISABLED: |
2789 | /* This only handles the direct pass-through case since RAID |
2790 | * offload is handled above. Just attempt a retry. |
2791 | */ |
2792 | cmd->result = DID_SOFT_ERROR << 16; |
2793 | dev_warn(&h->pdev->dev, |
2794 | "cp %p had HP SSD Smart Path error\n" , cp); |
2795 | break; |
2796 | default: |
2797 | cmd->result = DID_ERROR << 16; |
2798 | dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n" , |
2799 | cp, ei->CommandStatus); |
2800 | } |
2801 | |
2802 | return hpsa_cmd_free_and_done(h, c: cp, cmd); |
2803 | } |
2804 | |
2805 | static void hpsa_pci_unmap(struct pci_dev *pdev, struct CommandList *c, |
2806 | int sg_used, enum dma_data_direction data_direction) |
2807 | { |
2808 | int i; |
2809 | |
2810 | for (i = 0; i < sg_used; i++) |
2811 | dma_unmap_single(&pdev->dev, le64_to_cpu(c->SG[i].Addr), |
2812 | le32_to_cpu(c->SG[i].Len), |
2813 | data_direction); |
2814 | } |
2815 | |
2816 | static int hpsa_map_one(struct pci_dev *pdev, |
2817 | struct CommandList *cp, |
2818 | unsigned char *buf, |
2819 | size_t buflen, |
2820 | enum dma_data_direction data_direction) |
2821 | { |
2822 | u64 addr64; |
2823 | |
2824 | if (buflen == 0 || data_direction == DMA_NONE) { |
2825 | cp->Header.SGList = 0; |
2826 | cp->Header.SGTotal = cpu_to_le16(0); |
2827 | return 0; |
2828 | } |
2829 | |
2830 | addr64 = dma_map_single(&pdev->dev, buf, buflen, data_direction); |
2831 | if (dma_mapping_error(dev: &pdev->dev, dma_addr: addr64)) { |
2832 | /* Prevent subsequent unmap of something never mapped */ |
2833 | cp->Header.SGList = 0; |
2834 | cp->Header.SGTotal = cpu_to_le16(0); |
2835 | return -1; |
2836 | } |
2837 | cp->SG[0].Addr = cpu_to_le64(addr64); |
2838 | cp->SG[0].Len = cpu_to_le32(buflen); |
2839 | cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */ |
2840 | cp->Header.SGList = 1; /* no. SGs contig in this cmd */ |
2841 | cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */ |
2842 | return 0; |
2843 | } |
2844 | |
2845 | #define NO_TIMEOUT ((unsigned long) -1) |
2846 | #define DEFAULT_TIMEOUT 30000 /* milliseconds */ |
2847 | static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, |
2848 | struct CommandList *c, int reply_queue, unsigned long timeout_msecs) |
2849 | { |
2850 | DECLARE_COMPLETION_ONSTACK(wait); |
2851 | |
2852 | c->waiting = &wait; |
2853 | __enqueue_cmd_and_start_io(h, c, reply_queue); |
2854 | if (timeout_msecs == NO_TIMEOUT) { |
2855 | /* TODO: get rid of this no-timeout thing */ |
2856 | wait_for_completion_io(&wait); |
2857 | return IO_OK; |
2858 | } |
2859 | if (!wait_for_completion_io_timeout(x: &wait, |
2860 | timeout: msecs_to_jiffies(m: timeout_msecs))) { |
2861 | dev_warn(&h->pdev->dev, "Command timed out.\n" ); |
2862 | return -ETIMEDOUT; |
2863 | } |
2864 | return IO_OK; |
2865 | } |
2866 | |
2867 | static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c, |
2868 | int reply_queue, unsigned long timeout_msecs) |
2869 | { |
2870 | if (unlikely(lockup_detected(h))) { |
2871 | c->err_info->CommandStatus = CMD_CTLR_LOCKUP; |
2872 | return IO_OK; |
2873 | } |
2874 | return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs); |
2875 | } |
2876 | |
2877 | static u32 lockup_detected(struct ctlr_info *h) |
2878 | { |
2879 | int cpu; |
2880 | u32 rc, *lockup_detected; |
2881 | |
2882 | cpu = get_cpu(); |
2883 | lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); |
2884 | rc = *lockup_detected; |
2885 | put_cpu(); |
2886 | return rc; |
2887 | } |
2888 | |
2889 | #define MAX_DRIVER_CMD_RETRIES 25 |
2890 | static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, |
2891 | struct CommandList *c, enum dma_data_direction data_direction, |
2892 | unsigned long timeout_msecs) |
2893 | { |
2894 | int backoff_time = 10, retry_count = 0; |
2895 | int rc; |
2896 | |
2897 | do { |
2898 | memset(c->err_info, 0, sizeof(*c->err_info)); |
2899 | rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, |
2900 | timeout_msecs); |
2901 | if (rc) |
2902 | break; |
2903 | retry_count++; |
2904 | if (retry_count > 3) { |
2905 | msleep(msecs: backoff_time); |
2906 | if (backoff_time < 1000) |
2907 | backoff_time *= 2; |
2908 | } |
2909 | } while ((check_for_unit_attention(h, c) || |
2910 | check_for_busy(h, c)) && |
2911 | retry_count <= MAX_DRIVER_CMD_RETRIES); |
2912 | hpsa_pci_unmap(pdev: h->pdev, c, sg_used: 1, data_direction); |
2913 | if (retry_count > MAX_DRIVER_CMD_RETRIES) |
2914 | rc = -EIO; |
2915 | return rc; |
2916 | } |
2917 | |
2918 | static void hpsa_print_cmd(struct ctlr_info *h, char *txt, |
2919 | struct CommandList *c) |
2920 | { |
2921 | const u8 *cdb = c->Request.CDB; |
2922 | const u8 *lun = c->Header.LUN.LunAddrBytes; |
2923 | |
2924 | dev_warn(&h->pdev->dev, "%s: LUN:%8phN CDB:%16phN\n" , |
2925 | txt, lun, cdb); |
2926 | } |
2927 | |
2928 | static void hpsa_scsi_interpret_error(struct ctlr_info *h, |
2929 | struct CommandList *cp) |
2930 | { |
2931 | const struct ErrorInfo *ei = cp->err_info; |
2932 | struct device *d = &cp->h->pdev->dev; |
2933 | u8 sense_key, asc, ascq; |
2934 | int sense_len; |
2935 | |
2936 | switch (ei->CommandStatus) { |
2937 | case CMD_TARGET_STATUS: |
2938 | if (ei->SenseLen > sizeof(ei->SenseInfo)) |
2939 | sense_len = sizeof(ei->SenseInfo); |
2940 | else |
2941 | sense_len = ei->SenseLen; |
2942 | decode_sense_data(sense_data: ei->SenseInfo, sense_data_len: sense_len, |
2943 | sense_key: &sense_key, asc: &asc, ascq: &ascq); |
2944 | hpsa_print_cmd(h, txt: "SCSI status" , c: cp); |
2945 | if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) |
2946 | dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n" , |
2947 | sense_key, asc, ascq); |
2948 | else |
2949 | dev_warn(d, "SCSI Status = 0x%02x\n" , ei->ScsiStatus); |
2950 | if (ei->ScsiStatus == 0) |
2951 | dev_warn(d, "SCSI status is abnormally zero. " |
2952 | "(probably indicates selection timeout " |
2953 | "reported incorrectly due to a known " |
2954 | "firmware bug, circa July, 2001.)\n" ); |
2955 | break; |
2956 | case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ |
2957 | break; |
2958 | case CMD_DATA_OVERRUN: |
2959 | hpsa_print_cmd(h, txt: "overrun condition" , c: cp); |
2960 | break; |
2961 | case CMD_INVALID: { |
2962 | /* controller unfortunately reports SCSI passthru's |
2963 | * to non-existent targets as invalid commands. |
2964 | */ |
2965 | hpsa_print_cmd(h, txt: "invalid command" , c: cp); |
2966 | dev_warn(d, "probably means device no longer present\n" ); |
2967 | } |
2968 | break; |
2969 | case CMD_PROTOCOL_ERR: |
2970 | hpsa_print_cmd(h, txt: "protocol error" , c: cp); |
2971 | break; |
2972 | case CMD_HARDWARE_ERR: |
2973 | hpsa_print_cmd(h, txt: "hardware error" , c: cp); |
2974 | break; |
2975 | case CMD_CONNECTION_LOST: |
2976 | hpsa_print_cmd(h, txt: "connection lost" , c: cp); |
2977 | break; |
2978 | case CMD_ABORTED: |
2979 | hpsa_print_cmd(h, txt: "aborted" , c: cp); |
2980 | break; |
2981 | case CMD_ABORT_FAILED: |
2982 | hpsa_print_cmd(h, txt: "abort failed" , c: cp); |
2983 | break; |
2984 | case CMD_UNSOLICITED_ABORT: |
2985 | hpsa_print_cmd(h, txt: "unsolicited abort" , c: cp); |
2986 | break; |
2987 | case CMD_TIMEOUT: |
2988 | hpsa_print_cmd(h, txt: "timed out" , c: cp); |
2989 | break; |
2990 | case CMD_UNABORTABLE: |
2991 | hpsa_print_cmd(h, txt: "unabortable" , c: cp); |
2992 | break; |
2993 | case CMD_CTLR_LOCKUP: |
2994 | hpsa_print_cmd(h, txt: "controller lockup detected" , c: cp); |
2995 | break; |
2996 | default: |
2997 | hpsa_print_cmd(h, txt: "unknown status" , c: cp); |
2998 | dev_warn(d, "Unknown command status %x\n" , |
2999 | ei->CommandStatus); |
3000 | } |
3001 | } |
3002 | |
3003 | static int hpsa_do_receive_diagnostic(struct ctlr_info *h, u8 *scsi3addr, |
3004 | u8 page, u8 *buf, size_t bufsize) |
3005 | { |
3006 | int rc = IO_OK; |
3007 | struct CommandList *c; |
3008 | struct ErrorInfo *ei; |
3009 | |
3010 | c = cmd_alloc(h); |
3011 | if (fill_cmd(c, RECEIVE_DIAGNOSTIC, h, buff: buf, size: bufsize, |
3012 | page_code: page, scsi3addr, TYPE_CMD)) { |
3013 | rc = -1; |
3014 | goto out; |
3015 | } |
3016 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, data_direction: DMA_FROM_DEVICE, |
3017 | NO_TIMEOUT); |
3018 | if (rc) |
3019 | goto out; |
3020 | ei = c->err_info; |
3021 | if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { |
3022 | hpsa_scsi_interpret_error(h, cp: c); |
3023 | rc = -1; |
3024 | } |
3025 | out: |
3026 | cmd_free(h, c); |
3027 | return rc; |
3028 | } |
3029 | |
3030 | static u64 hpsa_get_enclosure_logical_identifier(struct ctlr_info *h, |
3031 | u8 *scsi3addr) |
3032 | { |
3033 | u8 *buf; |
3034 | u64 sa = 0; |
3035 | int rc = 0; |
3036 | |
3037 | buf = kzalloc(size: 1024, GFP_KERNEL); |
3038 | if (!buf) |
3039 | return 0; |
3040 | |
3041 | rc = hpsa_do_receive_diagnostic(h, scsi3addr, RECEIVE_DIAGNOSTIC, |
3042 | buf, bufsize: 1024); |
3043 | |
3044 | if (rc) |
3045 | goto out; |
3046 | |
3047 | sa = get_unaligned_be64(p: buf+12); |
3048 | |
3049 | out: |
3050 | kfree(objp: buf); |
3051 | return sa; |
3052 | } |
3053 | |
3054 | static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, |
3055 | u16 page, unsigned char *buf, |
3056 | unsigned char bufsize) |
3057 | { |
3058 | int rc = IO_OK; |
3059 | struct CommandList *c; |
3060 | struct ErrorInfo *ei; |
3061 | |
3062 | c = cmd_alloc(h); |
3063 | |
3064 | if (fill_cmd(c, HPSA_INQUIRY, h, buff: buf, size: bufsize, |
3065 | page_code: page, scsi3addr, TYPE_CMD)) { |
3066 | rc = -1; |
3067 | goto out; |
3068 | } |
3069 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, data_direction: DMA_FROM_DEVICE, |
3070 | NO_TIMEOUT); |
3071 | if (rc) |
3072 | goto out; |
3073 | ei = c->err_info; |
3074 | if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { |
3075 | hpsa_scsi_interpret_error(h, cp: c); |
3076 | rc = -1; |
3077 | } |
3078 | out: |
3079 | cmd_free(h, c); |
3080 | return rc; |
3081 | } |
3082 | |
3083 | static int hpsa_send_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev, |
3084 | u8 reset_type, int reply_queue) |
3085 | { |
3086 | int rc = IO_OK; |
3087 | struct CommandList *c; |
3088 | struct ErrorInfo *ei; |
3089 | |
3090 | c = cmd_alloc(h); |
3091 | c->device = dev; |
3092 | |
3093 | /* fill_cmd can't fail here, no data buffer to map. */ |
3094 | (void) fill_cmd(c, cmd: reset_type, h, NULL, size: 0, page_code: 0, scsi3addr: dev->scsi3addr, TYPE_MSG); |
3095 | rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); |
3096 | if (rc) { |
3097 | dev_warn(&h->pdev->dev, "Failed to send reset command\n" ); |
3098 | goto out; |
3099 | } |
3100 | /* no unmap needed here because no data xfer. */ |
3101 | |
3102 | ei = c->err_info; |
3103 | if (ei->CommandStatus != 0) { |
3104 | hpsa_scsi_interpret_error(h, cp: c); |
3105 | rc = -1; |
3106 | } |
3107 | out: |
3108 | cmd_free(h, c); |
3109 | return rc; |
3110 | } |
3111 | |
3112 | static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c, |
3113 | struct hpsa_scsi_dev_t *dev, |
3114 | unsigned char *scsi3addr) |
3115 | { |
3116 | int i; |
3117 | bool match = false; |
3118 | struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; |
3119 | struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2; |
3120 | |
3121 | if (hpsa_is_cmd_idle(c)) |
3122 | return false; |
3123 | |
3124 | switch (c->cmd_type) { |
3125 | case CMD_SCSI: |
3126 | case CMD_IOCTL_PEND: |
3127 | match = !memcmp(p: scsi3addr, q: &c->Header.LUN.LunAddrBytes, |
3128 | size: sizeof(c->Header.LUN.LunAddrBytes)); |
3129 | break; |
3130 | |
3131 | case CMD_IOACCEL1: |
3132 | case CMD_IOACCEL2: |
3133 | if (c->phys_disk == dev) { |
3134 | /* HBA mode match */ |
3135 | match = true; |
3136 | } else { |
3137 | /* Possible RAID mode -- check each phys dev. */ |
3138 | /* FIXME: Do we need to take out a lock here? If |
3139 | * so, we could just call hpsa_get_pdisk_of_ioaccel2() |
3140 | * instead. */ |
3141 | for (i = 0; i < dev->nphysical_disks && !match; i++) { |
3142 | /* FIXME: an alternate test might be |
3143 | * |
3144 | * match = dev->phys_disk[i]->ioaccel_handle |
3145 | * == c2->scsi_nexus; */ |
3146 | match = dev->phys_disk[i] == c->phys_disk; |
3147 | } |
3148 | } |
3149 | break; |
3150 | |
3151 | case IOACCEL2_TMF: |
3152 | for (i = 0; i < dev->nphysical_disks && !match; i++) { |
3153 | match = dev->phys_disk[i]->ioaccel_handle == |
3154 | le32_to_cpu(ac->it_nexus); |
3155 | } |
3156 | break; |
3157 | |
3158 | case 0: /* The command is in the middle of being initialized. */ |
3159 | match = false; |
3160 | break; |
3161 | |
3162 | default: |
3163 | dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n" , |
3164 | c->cmd_type); |
3165 | BUG(); |
3166 | } |
3167 | |
3168 | return match; |
3169 | } |
3170 | |
3171 | static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev, |
3172 | u8 reset_type, int reply_queue) |
3173 | { |
3174 | int rc = 0; |
3175 | |
3176 | /* We can really only handle one reset at a time */ |
3177 | if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) { |
3178 | dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n" ); |
3179 | return -EINTR; |
3180 | } |
3181 | |
3182 | rc = hpsa_send_reset(h, dev, reset_type, reply_queue); |
3183 | if (!rc) { |
3184 | /* incremented by sending the reset request */ |
3185 | atomic_dec(v: &dev->commands_outstanding); |
3186 | wait_event(h->event_sync_wait_queue, |
3187 | atomic_read(&dev->commands_outstanding) <= 0 || |
3188 | lockup_detected(h)); |
3189 | } |
3190 | |
3191 | if (unlikely(lockup_detected(h))) { |
3192 | dev_warn(&h->pdev->dev, |
3193 | "Controller lockup detected during reset wait\n" ); |
3194 | rc = -ENODEV; |
3195 | } |
3196 | |
3197 | if (!rc) |
3198 | rc = wait_for_device_to_become_ready(h, lunaddr: dev->scsi3addr, reply_queue: 0); |
3199 | |
3200 | mutex_unlock(lock: &h->reset_mutex); |
3201 | return rc; |
3202 | } |
3203 | |
3204 | static void hpsa_get_raid_level(struct ctlr_info *h, |
3205 | unsigned char *scsi3addr, unsigned char *raid_level) |
3206 | { |
3207 | int rc; |
3208 | unsigned char *buf; |
3209 | |
3210 | *raid_level = RAID_UNKNOWN; |
3211 | buf = kzalloc(size: 64, GFP_KERNEL); |
3212 | if (!buf) |
3213 | return; |
3214 | |
3215 | if (!hpsa_vpd_page_supported(h, scsi3addr, |
3216 | HPSA_VPD_LV_DEVICE_GEOMETRY)) |
3217 | goto exit; |
3218 | |
3219 | rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | |
3220 | HPSA_VPD_LV_DEVICE_GEOMETRY, buf, bufsize: 64); |
3221 | |
3222 | if (rc == 0) |
3223 | *raid_level = buf[8]; |
3224 | if (*raid_level > RAID_UNKNOWN) |
3225 | *raid_level = RAID_UNKNOWN; |
3226 | exit: |
3227 | kfree(objp: buf); |
3228 | return; |
3229 | } |
3230 | |
3231 | #define HPSA_MAP_DEBUG |
3232 | #ifdef HPSA_MAP_DEBUG |
3233 | static void hpsa_debug_map_buff(struct ctlr_info *h, int rc, |
3234 | struct raid_map_data *map_buff) |
3235 | { |
3236 | struct raid_map_disk_data *dd = &map_buff->data[0]; |
3237 | int map, row, col; |
3238 | u16 map_cnt, row_cnt, disks_per_row; |
3239 | |
3240 | if (rc != 0) |
3241 | return; |
3242 | |
3243 | /* Show details only if debugging has been activated. */ |
3244 | if (h->raid_offload_debug < 2) |
3245 | return; |
3246 | |
3247 | dev_info(&h->pdev->dev, "structure_size = %u\n" , |
3248 | le32_to_cpu(map_buff->structure_size)); |
3249 | dev_info(&h->pdev->dev, "volume_blk_size = %u\n" , |
3250 | le32_to_cpu(map_buff->volume_blk_size)); |
3251 | dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n" , |
3252 | le64_to_cpu(map_buff->volume_blk_cnt)); |
3253 | dev_info(&h->pdev->dev, "physicalBlockShift = %u\n" , |
3254 | map_buff->phys_blk_shift); |
3255 | dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n" , |
3256 | map_buff->parity_rotation_shift); |
3257 | dev_info(&h->pdev->dev, "strip_size = %u\n" , |
3258 | le16_to_cpu(map_buff->strip_size)); |
3259 | dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n" , |
3260 | le64_to_cpu(map_buff->disk_starting_blk)); |
3261 | dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n" , |
3262 | le64_to_cpu(map_buff->disk_blk_cnt)); |
3263 | dev_info(&h->pdev->dev, "data_disks_per_row = %u\n" , |
3264 | le16_to_cpu(map_buff->data_disks_per_row)); |
3265 | dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n" , |
3266 | le16_to_cpu(map_buff->metadata_disks_per_row)); |
3267 | dev_info(&h->pdev->dev, "row_cnt = %u\n" , |
3268 | le16_to_cpu(map_buff->row_cnt)); |
3269 | dev_info(&h->pdev->dev, "layout_map_count = %u\n" , |
3270 | le16_to_cpu(map_buff->layout_map_count)); |
3271 | dev_info(&h->pdev->dev, "flags = 0x%x\n" , |
3272 | le16_to_cpu(map_buff->flags)); |
3273 | dev_info(&h->pdev->dev, "encryption = %s\n" , |
3274 | le16_to_cpu(map_buff->flags) & |
3275 | RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF" ); |
3276 | dev_info(&h->pdev->dev, "dekindex = %u\n" , |
3277 | le16_to_cpu(map_buff->dekindex)); |
3278 | map_cnt = le16_to_cpu(map_buff->layout_map_count); |
3279 | for (map = 0; map < map_cnt; map++) { |
3280 | dev_info(&h->pdev->dev, "Map%u:\n" , map); |
3281 | row_cnt = le16_to_cpu(map_buff->row_cnt); |
3282 | for (row = 0; row < row_cnt; row++) { |
3283 | dev_info(&h->pdev->dev, " Row%u:\n" , row); |
3284 | disks_per_row = |
3285 | le16_to_cpu(map_buff->data_disks_per_row); |
3286 | for (col = 0; col < disks_per_row; col++, dd++) |
3287 | dev_info(&h->pdev->dev, |
3288 | " D%02u: h=0x%04x xor=%u,%u\n" , |
3289 | col, dd->ioaccel_handle, |
3290 | dd->xor_mult[0], dd->xor_mult[1]); |
3291 | disks_per_row = |
3292 | le16_to_cpu(map_buff->metadata_disks_per_row); |
3293 | for (col = 0; col < disks_per_row; col++, dd++) |
3294 | dev_info(&h->pdev->dev, |
3295 | " M%02u: h=0x%04x xor=%u,%u\n" , |
3296 | col, dd->ioaccel_handle, |
3297 | dd->xor_mult[0], dd->xor_mult[1]); |
3298 | } |
3299 | } |
3300 | } |
3301 | #else |
3302 | static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h, |
3303 | __attribute__((unused)) int rc, |
3304 | __attribute__((unused)) struct raid_map_data *map_buff) |
3305 | { |
3306 | } |
3307 | #endif |
3308 | |
3309 | static int hpsa_get_raid_map(struct ctlr_info *h, |
3310 | unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) |
3311 | { |
3312 | int rc = 0; |
3313 | struct CommandList *c; |
3314 | struct ErrorInfo *ei; |
3315 | |
3316 | c = cmd_alloc(h); |
3317 | |
3318 | if (fill_cmd(c, HPSA_GET_RAID_MAP, h, buff: &this_device->raid_map, |
3319 | size: sizeof(this_device->raid_map), page_code: 0, |
3320 | scsi3addr, TYPE_CMD)) { |
3321 | dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n" ); |
3322 | cmd_free(h, c); |
3323 | return -1; |
3324 | } |
3325 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, data_direction: DMA_FROM_DEVICE, |
3326 | NO_TIMEOUT); |
3327 | if (rc) |
3328 | goto out; |
3329 | ei = c->err_info; |
3330 | if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { |
3331 | hpsa_scsi_interpret_error(h, cp: c); |
3332 | rc = -1; |
3333 | goto out; |
3334 | } |
3335 | cmd_free(h, c); |
3336 | |
3337 | /* @todo in the future, dynamically allocate RAID map memory */ |
3338 | if (le32_to_cpu(this_device->raid_map.structure_size) > |
3339 | sizeof(this_device->raid_map)) { |
3340 | dev_warn(&h->pdev->dev, "RAID map size is too large!\n" ); |
3341 | rc = -1; |
3342 | } |
3343 | hpsa_debug_map_buff(h, rc, map_buff: &this_device->raid_map); |
3344 | return rc; |
3345 | out: |
3346 | cmd_free(h, c); |
3347 | return rc; |
3348 | } |
3349 | |
3350 | static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h, |
3351 | unsigned char scsi3addr[], u16 bmic_device_index, |
3352 | struct bmic_sense_subsystem_info *buf, size_t bufsize) |
3353 | { |
3354 | int rc = IO_OK; |
3355 | struct CommandList *c; |
3356 | struct ErrorInfo *ei; |
3357 | |
3358 | c = cmd_alloc(h); |
3359 | |
3360 | rc = fill_cmd(c, BMIC_SENSE_SUBSYSTEM_INFORMATION, h, buff: buf, size: bufsize, |
3361 | page_code: 0, RAID_CTLR_LUNID, TYPE_CMD); |
3362 | if (rc) |
3363 | goto out; |
3364 | |
3365 | c->Request.CDB[2] = bmic_device_index & 0xff; |
3366 | c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; |
3367 | |
3368 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, data_direction: DMA_FROM_DEVICE, |
3369 | NO_TIMEOUT); |
3370 | if (rc) |
3371 | goto out; |
3372 | ei = c->err_info; |
3373 | if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { |
3374 | hpsa_scsi_interpret_error(h, cp: c); |
3375 | rc = -1; |
3376 | } |
3377 | out: |
3378 | cmd_free(h, c); |
3379 | return rc; |
3380 | } |
3381 | |
3382 | static int hpsa_bmic_id_controller(struct ctlr_info *h, |
3383 | struct bmic_identify_controller *buf, size_t bufsize) |
3384 | { |
3385 | int rc = IO_OK; |
3386 | struct CommandList *c; |
3387 | struct ErrorInfo *ei; |
3388 | |
3389 | c = cmd_alloc(h); |
3390 | |
3391 | rc = fill_cmd(c, BMIC_IDENTIFY_CONTROLLER, h, buff: buf, size: bufsize, |
3392 | page_code: 0, RAID_CTLR_LUNID, TYPE_CMD); |
3393 | if (rc) |
3394 | goto out; |
3395 | |
3396 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, data_direction: DMA_FROM_DEVICE, |
3397 | NO_TIMEOUT); |
3398 | if (rc) |
3399 | goto out; |
3400 | ei = c->err_info; |
3401 | if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { |
3402 | hpsa_scsi_interpret_error(h, cp: c); |
3403 | rc = -1; |
3404 | } |
3405 | out: |
3406 | cmd_free(h, c); |
3407 | return rc; |
3408 | } |
3409 | |
3410 | static int hpsa_bmic_id_physical_device(struct ctlr_info *h, |
3411 | unsigned char scsi3addr[], u16 bmic_device_index, |
3412 | struct bmic_identify_physical_device *buf, size_t bufsize) |
3413 | { |
3414 | int rc = IO_OK; |
3415 | struct CommandList *c; |
3416 | struct ErrorInfo *ei; |
3417 | |
3418 | c = cmd_alloc(h); |
3419 | rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buff: buf, size: bufsize, |
3420 | page_code: 0, RAID_CTLR_LUNID, TYPE_CMD); |
3421 | if (rc) |
3422 | goto out; |
3423 | |
3424 | c->Request.CDB[2] = bmic_device_index & 0xff; |
3425 | c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; |
3426 | |
3427 | hpsa_scsi_do_simple_cmd_with_retry(h, c, data_direction: DMA_FROM_DEVICE, |
3428 | NO_TIMEOUT); |
3429 | ei = c->err_info; |
3430 | if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { |
3431 | hpsa_scsi_interpret_error(h, cp: c); |
3432 | rc = -1; |
3433 | } |
3434 | out: |
3435 | cmd_free(h, c); |
3436 | |
3437 | return rc; |
3438 | } |
3439 | |
3440 | /* |
3441 | * get enclosure information |
3442 | * struct ReportExtendedLUNdata *rlep - Used for BMIC drive number |
3443 | * struct hpsa_scsi_dev_t *encl_dev - device entry for enclosure |
3444 | * Uses id_physical_device to determine the box_index. |
3445 | */ |
3446 | static void hpsa_get_enclosure_info(struct ctlr_info *h, |
3447 | unsigned char *scsi3addr, |
3448 | struct ReportExtendedLUNdata *rlep, int rle_index, |
3449 | struct hpsa_scsi_dev_t *encl_dev) |
3450 | { |
3451 | int rc = -1; |
3452 | struct CommandList *c = NULL; |
3453 | struct ErrorInfo *ei = NULL; |
3454 | struct bmic_sense_storage_box_params *bssbp = NULL; |
3455 | struct bmic_identify_physical_device *id_phys = NULL; |
3456 | struct ext_report_lun_entry *rle; |
3457 | u16 bmic_device_index = 0; |
3458 | |
3459 | if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN) |
3460 | return; |
3461 | |
3462 | rle = &rlep->LUN[rle_index]; |
3463 | |
3464 | encl_dev->eli = |
3465 | hpsa_get_enclosure_logical_identifier(h, scsi3addr); |
3466 | |
3467 | bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]); |
3468 | |
3469 | if (encl_dev->target == -1 || encl_dev->lun == -1) { |
3470 | rc = IO_OK; |
3471 | goto out; |
3472 | } |
3473 | |
3474 | if (bmic_device_index == 0xFF00 || MASKED_DEVICE(&rle->lunid[0])) { |
3475 | rc = IO_OK; |
3476 | goto out; |
3477 | } |
3478 | |
3479 | bssbp = kzalloc(size: sizeof(*bssbp), GFP_KERNEL); |
3480 | if (!bssbp) |
3481 | goto out; |
3482 | |
3483 | id_phys = kzalloc(size: sizeof(*id_phys), GFP_KERNEL); |
3484 | if (!id_phys) |
3485 | goto out; |
3486 | |
3487 | rc = hpsa_bmic_id_physical_device(h, scsi3addr, bmic_device_index, |
3488 | buf: id_phys, bufsize: sizeof(*id_phys)); |
3489 | if (rc) { |
3490 | dev_warn(&h->pdev->dev, "%s: id_phys failed %d bdi[0x%x]\n" , |
3491 | __func__, encl_dev->external, bmic_device_index); |
3492 | goto out; |
3493 | } |
3494 | |
3495 | c = cmd_alloc(h); |
3496 | |
3497 | rc = fill_cmd(c, BMIC_SENSE_STORAGE_BOX_PARAMS, h, buff: bssbp, |
3498 | size: sizeof(*bssbp), page_code: 0, RAID_CTLR_LUNID, TYPE_CMD); |
3499 | |
3500 | if (rc) |
3501 | goto out; |
3502 | |
3503 | if (id_phys->phys_connector[1] == 'E') |
3504 | c->Request.CDB[5] = id_phys->box_index; |
3505 | else |
3506 | c->Request.CDB[5] = 0; |
3507 | |
3508 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, data_direction: DMA_FROM_DEVICE, |
3509 | NO_TIMEOUT); |
3510 | if (rc) |
3511 | goto out; |
3512 | |
3513 | ei = c->err_info; |
3514 | if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { |
3515 | rc = -1; |
3516 | goto out; |
3517 | } |
3518 | |
3519 | encl_dev->box[id_phys->active_path_number] = bssbp->phys_box_on_port; |
3520 | memcpy(&encl_dev->phys_connector[id_phys->active_path_number], |
3521 | bssbp->phys_connector, sizeof(bssbp->phys_connector)); |
3522 | |
3523 | rc = IO_OK; |
3524 | out: |
3525 | kfree(objp: bssbp); |
3526 | kfree(objp: id_phys); |
3527 | |
3528 | if (c) |
3529 | cmd_free(h, c); |
3530 | |
3531 | if (rc != IO_OK) |
3532 | hpsa_show_dev_msg(KERN_INFO, h, dev: encl_dev, |
3533 | description: "Error, could not get enclosure information" ); |
3534 | } |
3535 | |
3536 | static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h, |
3537 | unsigned char *scsi3addr) |
3538 | { |
3539 | struct ReportExtendedLUNdata *physdev; |
3540 | u32 nphysicals; |
3541 | u64 sa = 0; |
3542 | int i; |
3543 | |
3544 | physdev = kzalloc(size: sizeof(*physdev), GFP_KERNEL); |
3545 | if (!physdev) |
3546 | return 0; |
3547 | |
3548 | if (hpsa_scsi_do_report_phys_luns(h, buf: physdev, bufsize: sizeof(*physdev))) { |
3549 | dev_err(&h->pdev->dev, "report physical LUNs failed.\n" ); |
3550 | kfree(objp: physdev); |
3551 | return 0; |
3552 | } |
3553 | nphysicals = get_unaligned_be32(p: physdev->LUNListLength) / 24; |
3554 | |
3555 | for (i = 0; i < nphysicals; i++) |
3556 | if (!memcmp(p: &physdev->LUN[i].lunid[0], q: scsi3addr, size: 8)) { |
3557 | sa = get_unaligned_be64(p: &physdev->LUN[i].wwid[0]); |
3558 | break; |
3559 | } |
3560 | |
3561 | kfree(objp: physdev); |
3562 | |
3563 | return sa; |
3564 | } |
3565 | |
3566 | static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr, |
3567 | struct hpsa_scsi_dev_t *dev) |
3568 | { |
3569 | int rc; |
3570 | u64 sa = 0; |
3571 | |
3572 | if (is_hba_lunid(scsi3addr)) { |
3573 | struct bmic_sense_subsystem_info *ssi; |
3574 | |
3575 | ssi = kzalloc(size: sizeof(*ssi), GFP_KERNEL); |
3576 | if (!ssi) |
3577 | return; |
3578 | |
3579 | rc = hpsa_bmic_sense_subsystem_information(h, |
3580 | scsi3addr, bmic_device_index: 0, buf: ssi, bufsize: sizeof(*ssi)); |
3581 | if (rc == 0) { |
3582 | sa = get_unaligned_be64(p: ssi->primary_world_wide_id); |
3583 | h->sas_address = sa; |
3584 | } |
3585 | |
3586 | kfree(objp: ssi); |
3587 | } else |
3588 | sa = hpsa_get_sas_address_from_report_physical(h, scsi3addr); |
3589 | |
3590 | dev->sas_address = sa; |
3591 | } |
3592 | |
3593 | static void hpsa_ext_ctrl_present(struct ctlr_info *h, |
3594 | struct ReportExtendedLUNdata *physdev) |
3595 | { |
3596 | u32 nphysicals; |
3597 | int i; |
3598 | |
3599 | if (h->discovery_polling) |
3600 | return; |
3601 | |
3602 | nphysicals = (get_unaligned_be32(p: physdev->LUNListLength) / 24) + 1; |
3603 | |
3604 | for (i = 0; i < nphysicals; i++) { |
3605 | if (physdev->LUN[i].device_type == |
3606 | BMIC_DEVICE_TYPE_CONTROLLER |
3607 | && !is_hba_lunid(scsi3addr: physdev->LUN[i].lunid)) { |
3608 | dev_info(&h->pdev->dev, |
3609 | "External controller present, activate discovery polling and disable rld caching\n" ); |
3610 | hpsa_disable_rld_caching(h); |
3611 | h->discovery_polling = 1; |
3612 | break; |
3613 | } |
3614 | } |
3615 | } |
3616 | |
3617 | /* Get a device id from inquiry page 0x83 */ |
3618 | static bool hpsa_vpd_page_supported(struct ctlr_info *h, |
3619 | unsigned char scsi3addr[], u8 page) |
3620 | { |
3621 | int rc; |
3622 | int i; |
3623 | int pages; |
3624 | unsigned char *buf, bufsize; |
3625 | |
3626 | buf = kzalloc(size: 256, GFP_KERNEL); |
3627 | if (!buf) |
3628 | return false; |
3629 | |
3630 | /* Get the size of the page list first */ |
3631 | rc = hpsa_scsi_do_inquiry(h, scsi3addr, |
3632 | VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES, |
3633 | buf, HPSA_VPD_HEADER_SZ); |
3634 | if (rc != 0) |
3635 | goto exit_unsupported; |
3636 | pages = buf[3]; |
3637 | if ((pages + HPSA_VPD_HEADER_SZ) <= 255) |
3638 | bufsize = pages + HPSA_VPD_HEADER_SZ; |
3639 | else |
3640 | bufsize = 255; |
3641 | |
3642 | /* Get the whole VPD page list */ |
3643 | rc = hpsa_scsi_do_inquiry(h, scsi3addr, |
3644 | VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES, |
3645 | buf, bufsize); |
3646 | if (rc != 0) |
3647 | goto exit_unsupported; |
3648 | |
3649 | pages = buf[3]; |
3650 | for (i = 1; i <= pages; i++) |
3651 | if (buf[3 + i] == page) |
3652 | goto exit_supported; |
3653 | exit_unsupported: |
3654 | kfree(objp: buf); |
3655 | return false; |
3656 | exit_supported: |
3657 | kfree(objp: buf); |
3658 | return true; |
3659 | } |
3660 | |
3661 | /* |
3662 | * Called during a scan operation. |
3663 | * Sets ioaccel status on the new device list, not the existing device list |
3664 | * |
3665 | * The device list used during I/O will be updated later in |
3666 | * adjust_hpsa_scsi_table. |
3667 | */ |
3668 | static void hpsa_get_ioaccel_status(struct ctlr_info *h, |
3669 | unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) |
3670 | { |
3671 | int rc; |
3672 | unsigned char *buf; |
3673 | u8 ioaccel_status; |
3674 | |
3675 | this_device->offload_config = 0; |
3676 | this_device->offload_enabled = 0; |
3677 | this_device->offload_to_be_enabled = 0; |
3678 | |
3679 | buf = kzalloc(size: 64, GFP_KERNEL); |
3680 | if (!buf) |
3681 | return; |
3682 | if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS)) |
3683 | goto out; |
3684 | rc = hpsa_scsi_do_inquiry(h, scsi3addr, |
3685 | VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, bufsize: 64); |
3686 | if (rc != 0) |
3687 | goto out; |
3688 | |
3689 | #define IOACCEL_STATUS_BYTE 4 |
3690 | #define OFFLOAD_CONFIGURED_BIT 0x01 |
3691 | #define OFFLOAD_ENABLED_BIT 0x02 |
3692 | ioaccel_status = buf[IOACCEL_STATUS_BYTE]; |
3693 | this_device->offload_config = |
3694 | !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); |
3695 | if (this_device->offload_config) { |
3696 | bool offload_enabled = |
3697 | !!(ioaccel_status & OFFLOAD_ENABLED_BIT); |
3698 | /* |
3699 | * Check to see if offload can be enabled. |
3700 | */ |
3701 | if (offload_enabled) { |
3702 | rc = hpsa_get_raid_map(h, scsi3addr, this_device); |
3703 | if (rc) /* could not load raid_map */ |
3704 | goto out; |
3705 | this_device->offload_to_be_enabled = 1; |
3706 | } |
3707 | } |
3708 | |
3709 | out: |
3710 | kfree(objp: buf); |
3711 | return; |
3712 | } |
3713 | |
3714 | /* Get the device id from inquiry page 0x83 */ |
3715 | static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, |
3716 | unsigned char *device_id, int index, int buflen) |
3717 | { |
3718 | int rc; |
3719 | unsigned char *buf; |
3720 | |
3721 | /* Does controller have VPD for device id? */ |
3722 | if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_DEVICE_ID)) |
3723 | return 1; /* not supported */ |
3724 | |
3725 | buf = kzalloc(size: 64, GFP_KERNEL); |
3726 | if (!buf) |
3727 | return -ENOMEM; |
3728 | |
3729 | rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | |
3730 | HPSA_VPD_LV_DEVICE_ID, buf, bufsize: 64); |
3731 | if (rc == 0) { |
3732 | if (buflen > 16) |
3733 | buflen = 16; |
3734 | memcpy(device_id, &buf[8], buflen); |
3735 | } |
3736 | |
3737 | kfree(objp: buf); |
3738 | |
3739 | return rc; /*0 - got id, otherwise, didn't */ |
3740 | } |
3741 | |
3742 | static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, |
3743 | void *buf, int bufsize, |
3744 | int extended_response) |
3745 | { |
3746 | int rc = IO_OK; |
3747 | struct CommandList *c; |
3748 | unsigned char scsi3addr[8]; |
3749 | struct ErrorInfo *ei; |
3750 | |
3751 | c = cmd_alloc(h); |
3752 | |
3753 | /* address the controller */ |
3754 | memset(scsi3addr, 0, sizeof(scsi3addr)); |
3755 | if (fill_cmd(c, cmd: logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, |
3756 | buff: buf, size: bufsize, page_code: 0, scsi3addr, TYPE_CMD)) { |
3757 | rc = -EAGAIN; |
3758 | goto out; |
3759 | } |
3760 | if (extended_response) |
3761 | c->Request.CDB[1] = extended_response; |
3762 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, data_direction: DMA_FROM_DEVICE, |
3763 | NO_TIMEOUT); |
3764 | if (rc) |
3765 | goto out; |
3766 | ei = c->err_info; |
3767 | if (ei->CommandStatus != 0 && |
3768 | ei->CommandStatus != CMD_DATA_UNDERRUN) { |
3769 | hpsa_scsi_interpret_error(h, cp: c); |
3770 | rc = -EIO; |
3771 | } else { |
3772 | struct ReportLUNdata *rld = buf; |
3773 | |
3774 | if (rld->extended_response_flag != extended_response) { |
3775 | if (!h->legacy_board) { |
3776 | dev_err(&h->pdev->dev, |
3777 | "report luns requested format %u, got %u\n" , |
3778 | extended_response, |
3779 | rld->extended_response_flag); |
3780 | rc = -EINVAL; |
3781 | } else |
3782 | rc = -EOPNOTSUPP; |
3783 | } |
3784 | } |
3785 | out: |
3786 | cmd_free(h, c); |
3787 | return rc; |
3788 | } |
3789 | |
3790 | static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, |
3791 | struct ReportExtendedLUNdata *buf, int bufsize) |
3792 | { |
3793 | int rc; |
3794 | struct ReportLUNdata *lbuf; |
3795 | |
3796 | rc = hpsa_scsi_do_report_luns(h, logical: 0, buf, bufsize, |
3797 | HPSA_REPORT_PHYS_EXTENDED); |
3798 | if (!rc || rc != -EOPNOTSUPP) |
3799 | return rc; |
3800 | |
3801 | /* REPORT PHYS EXTENDED is not supported */ |
3802 | lbuf = kzalloc(size: sizeof(*lbuf), GFP_KERNEL); |
3803 | if (!lbuf) |
3804 | return -ENOMEM; |
3805 | |
3806 | rc = hpsa_scsi_do_report_luns(h, logical: 0, buf: lbuf, bufsize: sizeof(*lbuf), extended_response: 0); |
3807 | if (!rc) { |
3808 | int i; |
3809 | u32 nphys; |
3810 | |
3811 | /* Copy ReportLUNdata header */ |
3812 | memcpy(buf, lbuf, 8); |
3813 | nphys = be32_to_cpu(*((__be32 *)lbuf->LUNListLength)) / 8; |
3814 | for (i = 0; i < nphys; i++) |
3815 | memcpy(buf->LUN[i].lunid, lbuf->LUN[i], 8); |
3816 | } |
3817 | kfree(objp: lbuf); |
3818 | return rc; |
3819 | } |
3820 | |
3821 | static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, |
3822 | struct ReportLUNdata *buf, int bufsize) |
3823 | { |
3824 | return hpsa_scsi_do_report_luns(h, logical: 1, buf, bufsize, extended_response: 0); |
3825 | } |
3826 | |
3827 | static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device, |
3828 | int bus, int target, int lun) |
3829 | { |
3830 | device->bus = bus; |
3831 | device->target = target; |
3832 | device->lun = lun; |
3833 | } |
3834 | |
3835 | /* Use VPD inquiry to get details of volume status */ |
3836 | static int hpsa_get_volume_status(struct ctlr_info *h, |
3837 | unsigned char scsi3addr[]) |
3838 | { |
3839 | int rc; |
3840 | int status; |
3841 | int size; |
3842 | unsigned char *buf; |
3843 | |
3844 | buf = kzalloc(size: 64, GFP_KERNEL); |
3845 | if (!buf) |
3846 | return HPSA_VPD_LV_STATUS_UNSUPPORTED; |
3847 | |
3848 | /* Does controller have VPD for logical volume status? */ |
3849 | if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) |
3850 | goto exit_failed; |
3851 | |
3852 | /* Get the size of the VPD return buffer */ |
3853 | rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, |
3854 | buf, HPSA_VPD_HEADER_SZ); |
3855 | if (rc != 0) |
3856 | goto exit_failed; |
3857 | size = buf[3]; |
3858 | |
3859 | /* Now get the whole VPD buffer */ |
3860 | rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, |
3861 | buf, bufsize: size + HPSA_VPD_HEADER_SZ); |
3862 | if (rc != 0) |
3863 | goto exit_failed; |
3864 | status = buf[4]; /* status byte */ |
3865 | |
3866 | kfree(objp: buf); |
3867 | return status; |
3868 | exit_failed: |
3869 | kfree(objp: buf); |
3870 | return HPSA_VPD_LV_STATUS_UNSUPPORTED; |
3871 | } |
3872 | |
3873 | /* Determine offline status of a volume. |
3874 | * Return either: |
3875 | * 0 (not offline) |
3876 | * 0xff (offline for unknown reasons) |
3877 | * # (integer code indicating one of several NOT READY states |
3878 | * describing why a volume is to be kept offline) |
3879 | */ |
3880 | static unsigned char hpsa_volume_offline(struct ctlr_info *h, |
3881 | unsigned char scsi3addr[]) |
3882 | { |
3883 | struct CommandList *c; |
3884 | unsigned char *sense; |
3885 | u8 sense_key, asc, ascq; |
3886 | int sense_len; |
3887 | int rc, ldstat = 0; |
3888 | #define ASC_LUN_NOT_READY 0x04 |
3889 | #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04 |
3890 | #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02 |
3891 | |
3892 | c = cmd_alloc(h); |
3893 | |
3894 | (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, size: 0, page_code: 0, scsi3addr, TYPE_CMD); |
3895 | rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, |
3896 | NO_TIMEOUT); |
3897 | if (rc) { |
3898 | cmd_free(h, c); |
3899 | return HPSA_VPD_LV_STATUS_UNSUPPORTED; |
3900 | } |
3901 | sense = c->err_info->SenseInfo; |
3902 | if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo)) |
3903 | sense_len = sizeof(c->err_info->SenseInfo); |
3904 | else |
3905 | sense_len = c->err_info->SenseLen; |
3906 | decode_sense_data(sense_data: sense, sense_data_len: sense_len, sense_key: &sense_key, asc: &asc, ascq: &ascq); |
3907 | cmd_free(h, c); |
3908 | |
3909 | /* Determine the reason for not ready state */ |
3910 | ldstat = hpsa_get_volume_status(h, scsi3addr); |
3911 | |
3912 | /* Keep volume offline in certain cases: */ |
3913 | switch (ldstat) { |
3914 | case HPSA_LV_FAILED: |
3915 | case HPSA_LV_UNDERGOING_ERASE: |
3916 | case HPSA_LV_NOT_AVAILABLE: |
3917 | case HPSA_LV_UNDERGOING_RPI: |
3918 | case HPSA_LV_PENDING_RPI: |
3919 | case HPSA_LV_ENCRYPTED_NO_KEY: |
3920 | case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: |
3921 | case HPSA_LV_UNDERGOING_ENCRYPTION: |
3922 | case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING: |
3923 | case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: |
3924 | return ldstat; |
3925 | case HPSA_VPD_LV_STATUS_UNSUPPORTED: |
3926 | /* If VPD status page isn't available, |
3927 | * use ASC/ASCQ to determine state |
3928 | */ |
3929 | if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) || |
3930 | (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ)) |
3931 | return ldstat; |
3932 | break; |
3933 | default: |
3934 | break; |
3935 | } |
3936 | return HPSA_LV_OK; |
3937 | } |
3938 | |
3939 | static int hpsa_update_device_info(struct ctlr_info *h, |
3940 | unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, |
3941 | unsigned char *is_OBDR_device) |
3942 | { |
3943 | |
3944 | #define OBDR_SIG_OFFSET 43 |
3945 | #define OBDR_TAPE_SIG "$DR-10" |
3946 | #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1) |
3947 | #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN) |
3948 | |
3949 | unsigned char *inq_buff; |
3950 | unsigned char *obdr_sig; |
3951 | int rc = 0; |
3952 | |
3953 | inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); |
3954 | if (!inq_buff) { |
3955 | rc = -ENOMEM; |
3956 | goto bail_out; |
3957 | } |
3958 | |
3959 | /* Do an inquiry to the device to see what it is. */ |
3960 | if (hpsa_scsi_do_inquiry(h, scsi3addr, page: 0, buf: inq_buff, |
3961 | bufsize: (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { |
3962 | dev_err(&h->pdev->dev, |
3963 | "%s: inquiry failed, device will be skipped.\n" , |
3964 | __func__); |
3965 | rc = HPSA_INQUIRY_FAILED; |
3966 | goto bail_out; |
3967 | } |
3968 | |
3969 | scsi_sanitize_inquiry_string(s: &inq_buff[8], len: 8); |
3970 | scsi_sanitize_inquiry_string(s: &inq_buff[16], len: 16); |
3971 | |
3972 | this_device->devtype = (inq_buff[0] & 0x1f); |
3973 | memcpy(this_device->scsi3addr, scsi3addr, 8); |
3974 | memcpy(this_device->vendor, &inq_buff[8], |
3975 | sizeof(this_device->vendor)); |
3976 | memcpy(this_device->model, &inq_buff[16], |
3977 | sizeof(this_device->model)); |
3978 | this_device->rev = inq_buff[2]; |
3979 | memset(this_device->device_id, 0, |
3980 | sizeof(this_device->device_id)); |
3981 | if (hpsa_get_device_id(h, scsi3addr, device_id: this_device->device_id, index: 8, |
3982 | buflen: sizeof(this_device->device_id)) < 0) { |
3983 | dev_err(&h->pdev->dev, |
3984 | "hpsa%d: %s: can't get device id for [%d:%d:%d:%d]\t%s\t%.16s\n" , |
3985 | h->ctlr, __func__, |
3986 | h->scsi_host->host_no, |
3987 | this_device->bus, this_device->target, |
3988 | this_device->lun, |
3989 | scsi_device_type(this_device->devtype), |
3990 | this_device->model); |
3991 | rc = HPSA_LV_FAILED; |
3992 | goto bail_out; |
3993 | } |
3994 | |
3995 | if ((this_device->devtype == TYPE_DISK || |
3996 | this_device->devtype == TYPE_ZBC) && |
3997 | is_logical_dev_addr_mode(scsi3addr)) { |
3998 | unsigned char volume_offline; |
3999 | |
4000 | hpsa_get_raid_level(h, scsi3addr, raid_level: &this_device->raid_level); |
4001 | if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) |
4002 | hpsa_get_ioaccel_status(h, scsi3addr, this_device); |
4003 | volume_offline = hpsa_volume_offline(h, scsi3addr); |
4004 | if (volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED && |
4005 | h->legacy_board) { |
4006 | /* |
4007 | * Legacy boards might not support volume status |
4008 | */ |
4009 | dev_info(&h->pdev->dev, |
4010 | "C0:T%d:L%d Volume status not available, assuming online.\n" , |
4011 | this_device->target, this_device->lun); |
4012 | volume_offline = 0; |
4013 | } |
4014 | this_device->volume_offline = volume_offline; |
4015 | if (volume_offline == HPSA_LV_FAILED) { |
4016 | rc = HPSA_LV_FAILED; |
4017 | dev_err(&h->pdev->dev, |
4018 | "%s: LV failed, device will be skipped.\n" , |
4019 | __func__); |
4020 | goto bail_out; |
4021 | } |
4022 | } else { |
4023 | this_device->raid_level = RAID_UNKNOWN; |
4024 | this_device->offload_config = 0; |
4025 | hpsa_turn_off_ioaccel_for_device(device: this_device); |
4026 | this_device->hba_ioaccel_enabled = 0; |
4027 | this_device->volume_offline = 0; |
4028 | this_device->queue_depth = h->nr_cmds; |
4029 | } |
4030 | |
4031 | if (this_device->external) |
4032 | this_device->queue_depth = EXTERNAL_QD; |
4033 | |
4034 | if (is_OBDR_device) { |
4035 | /* See if this is a One-Button-Disaster-Recovery device |
4036 | * by looking for "$DR-10" at offset 43 in inquiry data. |
4037 | */ |
4038 | obdr_sig = &inq_buff[OBDR_SIG_OFFSET]; |
4039 | *is_OBDR_device = (this_device->devtype == TYPE_ROM && |
4040 | strncmp(obdr_sig, OBDR_TAPE_SIG, |
4041 | OBDR_SIG_LEN) == 0); |
4042 | } |
4043 | kfree(objp: inq_buff); |
4044 | return 0; |
4045 | |
4046 | bail_out: |
4047 | kfree(objp: inq_buff); |
4048 | return rc; |
4049 | } |
4050 | |
4051 | /* |
4052 | * Helper function to assign bus, target, lun mapping of devices. |
4053 | * Logical drive target and lun are assigned at this time, but |
4054 | * physical device lun and target assignment are deferred (assigned |
4055 | * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.) |
4056 | */ |
4057 | static void figure_bus_target_lun(struct ctlr_info *h, |
4058 | u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device) |
4059 | { |
4060 | u32 lunid = get_unaligned_le32(p: lunaddrbytes); |
4061 | |
4062 | if (!is_logical_dev_addr_mode(scsi3addr: lunaddrbytes)) { |
4063 | /* physical device, target and lun filled in later */ |
4064 | if (is_hba_lunid(scsi3addr: lunaddrbytes)) { |
4065 | int bus = HPSA_HBA_BUS; |
4066 | |
4067 | if (!device->rev) |
4068 | bus = HPSA_LEGACY_HBA_BUS; |
4069 | hpsa_set_bus_target_lun(device, |
4070 | bus, target: 0, lun: lunid & 0x3fff); |
4071 | } else |
4072 | /* defer target, lun assignment for physical devices */ |
4073 | hpsa_set_bus_target_lun(device, |
4074 | HPSA_PHYSICAL_DEVICE_BUS, target: -1, lun: -1); |
4075 | return; |
4076 | } |
4077 | /* It's a logical device */ |
4078 | if (device->external) { |
4079 | hpsa_set_bus_target_lun(device, |
4080 | HPSA_EXTERNAL_RAID_VOLUME_BUS, target: (lunid >> 16) & 0x3fff, |
4081 | lun: lunid & 0x00ff); |
4082 | return; |
4083 | } |
4084 | hpsa_set_bus_target_lun(device, HPSA_RAID_VOLUME_BUS, |
4085 | target: 0, lun: lunid & 0x3fff); |
4086 | } |
4087 | |
4088 | static int figure_external_status(struct ctlr_info *h, int raid_ctlr_position, |
4089 | int i, int nphysicals, int nlocal_logicals) |
4090 | { |
4091 | /* In report logicals, local logicals are listed first, |
4092 | * then any externals. |
4093 | */ |
4094 | int logicals_start = nphysicals + (raid_ctlr_position == 0); |
4095 | |
4096 | if (i == raid_ctlr_position) |
4097 | return 0; |
4098 | |
4099 | if (i < logicals_start) |
4100 | return 0; |
4101 | |
4102 | /* i is in logicals range, but still within local logicals */ |
4103 | if ((i - nphysicals - (raid_ctlr_position == 0)) < nlocal_logicals) |
4104 | return 0; |
4105 | |
4106 | return 1; /* it's an external lun */ |
4107 | } |
4108 | |
4109 | /* |
4110 | * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev, |
4111 | * logdev. The number of luns in physdev and logdev are returned in |
4112 | * *nphysicals and *nlogicals, respectively. |
4113 | * Returns 0 on success, -1 otherwise. |
4114 | */ |
4115 | static int hpsa_gather_lun_info(struct ctlr_info *h, |
4116 | struct ReportExtendedLUNdata *physdev, u32 *nphysicals, |
4117 | struct ReportLUNdata *logdev, u32 *nlogicals) |
4118 | { |
4119 | if (hpsa_scsi_do_report_phys_luns(h, buf: physdev, bufsize: sizeof(*physdev))) { |
4120 | dev_err(&h->pdev->dev, "report physical LUNs failed.\n" ); |
4121 | return -1; |
4122 | } |
4123 | *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24; |
4124 | if (*nphysicals > HPSA_MAX_PHYS_LUN) { |
4125 | dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n" , |
4126 | HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN); |
4127 | *nphysicals = HPSA_MAX_PHYS_LUN; |
4128 | } |
4129 | if (hpsa_scsi_do_report_log_luns(h, buf: logdev, bufsize: sizeof(*logdev))) { |
4130 | dev_err(&h->pdev->dev, "report logical LUNs failed.\n" ); |
4131 | return -1; |
4132 | } |
4133 | *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8; |
4134 | /* Reject Logicals in excess of our max capability. */ |
4135 | if (*nlogicals > HPSA_MAX_LUN) { |
4136 | dev_warn(&h->pdev->dev, |
4137 | "maximum logical LUNs (%d) exceeded. " |
4138 | "%d LUNs ignored.\n" , HPSA_MAX_LUN, |
4139 | *nlogicals - HPSA_MAX_LUN); |
4140 | *nlogicals = HPSA_MAX_LUN; |
4141 | } |
4142 | if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) { |
4143 | dev_warn(&h->pdev->dev, |
4144 | "maximum logical + physical LUNs (%d) exceeded. " |
4145 | "%d LUNs ignored.\n" , HPSA_MAX_PHYS_LUN, |
4146 | *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN); |
4147 | *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals; |
4148 | } |
4149 | return 0; |
4150 | } |
4151 | |
4152 | static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, |
4153 | int i, int nphysicals, int nlogicals, |
4154 | struct ReportExtendedLUNdata *physdev_list, |
4155 | struct ReportLUNdata *logdev_list) |
4156 | { |
4157 | /* Helper function, figure out where the LUN ID info is coming from |
4158 | * given index i, lists of physical and logical devices, where in |
4159 | * the list the raid controller is supposed to appear (first or last) |
4160 | */ |
4161 | |
4162 | int logicals_start = nphysicals + (raid_ctlr_position == 0); |
4163 | int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0); |
4164 | |
4165 | if (i == raid_ctlr_position) |
4166 | return RAID_CTLR_LUNID; |
4167 | |
4168 | if (i < logicals_start) |
4169 | return &physdev_list->LUN[i - |
4170 | (raid_ctlr_position == 0)].lunid[0]; |
4171 | |
4172 | if (i < last_device) |
4173 | return &logdev_list->LUN[i - nphysicals - |
4174 | (raid_ctlr_position == 0)][0]; |
4175 | BUG(); |
4176 | return NULL; |
4177 | } |
4178 | |
4179 | /* get physical drive ioaccel handle and queue depth */ |
4180 | static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h, |
4181 | struct hpsa_scsi_dev_t *dev, |
4182 | struct ReportExtendedLUNdata *rlep, int rle_index, |
4183 | struct bmic_identify_physical_device *id_phys) |
4184 | { |
4185 | int rc; |
4186 | struct ext_report_lun_entry *rle; |
4187 | |
4188 | if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN) |
4189 | return; |
4190 | |
4191 | rle = &rlep->LUN[rle_index]; |
4192 | |
4193 | dev->ioaccel_handle = rle->ioaccel_handle; |
4194 | if ((rle->device_flags & 0x08) && dev->ioaccel_handle) |
4195 | dev->hba_ioaccel_enabled = 1; |
4196 | memset(id_phys, 0, sizeof(*id_phys)); |
4197 | rc = hpsa_bmic_id_physical_device(h, scsi3addr: &rle->lunid[0], |
4198 | GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]), buf: id_phys, |
4199 | bufsize: sizeof(*id_phys)); |
4200 | if (!rc) |
4201 | /* Reserve space for FW operations */ |
4202 | #define DRIVE_CMDS_RESERVED_FOR_FW 2 |
4203 | #define DRIVE_QUEUE_DEPTH 7 |
4204 | dev->queue_depth = |
4205 | le16_to_cpu(id_phys->current_queue_depth_limit) - |
4206 | DRIVE_CMDS_RESERVED_FOR_FW; |
4207 | else |
4208 | dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */ |
4209 | } |
4210 | |
4211 | static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device, |
4212 | struct ReportExtendedLUNdata *rlep, int rle_index, |
4213 | struct bmic_identify_physical_device *id_phys) |
4214 | { |
4215 | struct ext_report_lun_entry *rle; |
4216 | |
4217 | if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN) |
4218 | return; |
4219 | |
4220 | rle = &rlep->LUN[rle_index]; |
4221 | |
4222 | if ((rle->device_flags & 0x08) && this_device->ioaccel_handle) |
4223 | this_device->hba_ioaccel_enabled = 1; |
4224 | |
4225 | memcpy(&this_device->active_path_index, |
4226 | &id_phys->active_path_number, |
4227 | sizeof(this_device->active_path_index)); |
4228 | memcpy(&this_device->path_map, |
4229 | &id_phys->redundant_path_present_map, |
4230 | sizeof(this_device->path_map)); |
4231 | memcpy(&this_device->box, |
4232 | &id_phys->alternate_paths_phys_box_on_port, |
4233 | sizeof(this_device->box)); |
4234 | memcpy(&this_device->phys_connector, |
4235 | &id_phys->alternate_paths_phys_connector, |
4236 | sizeof(this_device->phys_connector)); |
4237 | memcpy(&this_device->bay, |
4238 | &id_phys->phys_bay_in_box, |
4239 | sizeof(this_device->bay)); |
4240 | } |
4241 | |
4242 | /* get number of local logical disks. */ |
4243 | static int hpsa_set_local_logical_count(struct ctlr_info *h, |
4244 | struct bmic_identify_controller *id_ctlr, |
4245 | u32 *nlocals) |
4246 | { |
4247 | int rc; |
4248 | |
4249 | if (!id_ctlr) { |
4250 | dev_warn(&h->pdev->dev, "%s: id_ctlr buffer is NULL.\n" , |
4251 | __func__); |
4252 | return -ENOMEM; |
4253 | } |
4254 | memset(id_ctlr, 0, sizeof(*id_ctlr)); |
4255 | rc = hpsa_bmic_id_controller(h, buf: id_ctlr, bufsize: sizeof(*id_ctlr)); |
4256 | if (!rc) |
4257 | if (id_ctlr->configured_logical_drive_count < 255) |
4258 | *nlocals = id_ctlr->configured_logical_drive_count; |
4259 | else |
4260 | *nlocals = le16_to_cpu( |
4261 | id_ctlr->extended_logical_unit_count); |
4262 | else |
4263 | *nlocals = -1; |
4264 | return rc; |
4265 | } |
4266 | |
4267 | static bool hpsa_is_disk_spare(struct ctlr_info *h, u8 *lunaddrbytes) |
4268 | { |
4269 | struct bmic_identify_physical_device *id_phys; |
4270 | bool is_spare = false; |
4271 | int rc; |
4272 | |
4273 | id_phys = kzalloc(size: sizeof(*id_phys), GFP_KERNEL); |
4274 | if (!id_phys) |
4275 | return false; |
4276 | |
4277 | rc = hpsa_bmic_id_physical_device(h, |
4278 | scsi3addr: lunaddrbytes, |
4279 | GET_BMIC_DRIVE_NUMBER(lunaddrbytes), |
4280 | buf: id_phys, bufsize: sizeof(*id_phys)); |
4281 | if (rc == 0) |
4282 | is_spare = (id_phys->more_flags >> 6) & 0x01; |
4283 | |
4284 | kfree(objp: id_phys); |
4285 | return is_spare; |
4286 | } |
4287 | |
4288 | #define RPL_DEV_FLAG_NON_DISK 0x1 |
4289 | #define RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED 0x2 |
4290 | #define RPL_DEV_FLAG_UNCONFIG_DISK 0x4 |
4291 | |
4292 | #define BMIC_DEVICE_TYPE_ENCLOSURE 6 |
4293 | |
4294 | static bool hpsa_skip_device(struct ctlr_info *h, u8 *lunaddrbytes, |
4295 | struct ext_report_lun_entry *rle) |
4296 | { |
4297 | u8 device_flags; |
4298 | u8 device_type; |
4299 | |
4300 | if (!MASKED_DEVICE(lunaddrbytes)) |
4301 | return false; |
4302 | |
4303 | device_flags = rle->device_flags; |
4304 | device_type = rle->device_type; |
4305 | |
4306 | if (device_flags & RPL_DEV_FLAG_NON_DISK) { |
4307 | if (device_type == BMIC_DEVICE_TYPE_ENCLOSURE) |
4308 | return false; |
4309 | return true; |
4310 | } |
4311 | |
4312 | if (!(device_flags & RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED)) |
4313 | return false; |
4314 | |
4315 | if (device_flags & RPL_DEV_FLAG_UNCONFIG_DISK) |
4316 | return false; |
4317 | |
4318 | /* |
4319 | * Spares may be spun down, we do not want to |
4320 | * do an Inquiry to a RAID set spare drive as |
4321 | * that would have them spun up, that is a |
4322 | * performance hit because I/O to the RAID device |
4323 | * stops while the spin up occurs which can take |
4324 | * over 50 seconds. |
4325 | */ |
4326 | if (hpsa_is_disk_spare(h, lunaddrbytes)) |
4327 | return true; |
4328 | |
4329 | return false; |
4330 | } |
4331 | |
4332 | static void hpsa_update_scsi_devices(struct ctlr_info *h) |
4333 | { |
4334 | /* the idea here is we could get notified |
4335 | * that some devices have changed, so we do a report |
4336 | * physical luns and report logical luns cmd, and adjust |
4337 | * our list of devices accordingly. |
4338 | * |
4339 | * The scsi3addr's of devices won't change so long as the |
4340 | * adapter is not reset. That means we can rescan and |
4341 | * tell which devices we already know about, vs. new |
4342 | * devices, vs. disappearing devices. |
4343 | */ |
4344 | struct ReportExtendedLUNdata *physdev_list = NULL; |
4345 | struct ReportLUNdata *logdev_list = NULL; |
4346 | struct bmic_identify_physical_device *id_phys = NULL; |
4347 | struct bmic_identify_controller *id_ctlr = NULL; |
4348 | u32 nphysicals = 0; |
4349 | u32 nlogicals = 0; |
4350 | u32 nlocal_logicals = 0; |
4351 | u32 ndev_allocated = 0; |
4352 | struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; |
4353 | int ncurrent = 0; |
4354 | int i, ndevs_to_allocate; |
4355 | int raid_ctlr_position; |
4356 | bool physical_device; |
4357 | |
4358 | currentsd = kcalloc(HPSA_MAX_DEVICES, size: sizeof(*currentsd), GFP_KERNEL); |
4359 | physdev_list = kzalloc(size: sizeof(*physdev_list), GFP_KERNEL); |
4360 | logdev_list = kzalloc(size: sizeof(*logdev_list), GFP_KERNEL); |
4361 | tmpdevice = kzalloc(size: sizeof(*tmpdevice), GFP_KERNEL); |
4362 | id_phys = kzalloc(size: sizeof(*id_phys), GFP_KERNEL); |
4363 | id_ctlr = kzalloc(size: sizeof(*id_ctlr), GFP_KERNEL); |
4364 | |
4365 | if (!currentsd || !physdev_list || !logdev_list || |
4366 | !tmpdevice || !id_phys || !id_ctlr) { |
4367 | dev_err(&h->pdev->dev, "out of memory\n" ); |
4368 | goto out; |
4369 | } |
4370 | |
4371 | h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */ |
4372 | |
4373 | if (hpsa_gather_lun_info(h, physdev: physdev_list, nphysicals: &nphysicals, |
4374 | logdev: logdev_list, nlogicals: &nlogicals)) { |
4375 | h->drv_req_rescan = 1; |
4376 | goto out; |
4377 | } |
4378 | |
4379 | /* Set number of local logicals (non PTRAID) */ |
4380 | if (hpsa_set_local_logical_count(h, id_ctlr, nlocals: &nlocal_logicals)) { |
4381 | dev_warn(&h->pdev->dev, |
4382 | "%s: Can't determine number of local logical devices.\n" , |
4383 | __func__); |
4384 | } |
4385 | |
4386 | /* We might see up to the maximum number of logical and physical disks |
4387 | * plus external target devices, and a device for the local RAID |
4388 | * controller. |
4389 | */ |
4390 | ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1; |
4391 | |
4392 | hpsa_ext_ctrl_present(h, physdev: physdev_list); |
4393 | |
4394 | /* Allocate the per device structures */ |
4395 | for (i = 0; i < ndevs_to_allocate; i++) { |
4396 | if (i >= HPSA_MAX_DEVICES) { |
4397 | dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded." |
4398 | " %d devices ignored.\n" , HPSA_MAX_DEVICES, |
4399 | ndevs_to_allocate - HPSA_MAX_DEVICES); |
4400 | break; |
4401 | } |
4402 | |
4403 | currentsd[i] = kzalloc(size: sizeof(*currentsd[i]), GFP_KERNEL); |
4404 | if (!currentsd[i]) { |
4405 | h->drv_req_rescan = 1; |
4406 | goto out; |
4407 | } |
4408 | ndev_allocated++; |
4409 | } |
4410 | |
4411 | if (is_scsi_rev_5(h)) |
4412 | raid_ctlr_position = 0; |
4413 | else |
4414 | raid_ctlr_position = nphysicals + nlogicals; |
4415 | |
4416 | /* adjust our table of devices */ |
4417 | for (i = 0; i < nphysicals + nlogicals + 1; i++) { |
4418 | u8 *lunaddrbytes, is_OBDR = 0; |
4419 | int rc = 0; |
4420 | int phys_dev_index = i - (raid_ctlr_position == 0); |
4421 | bool skip_device = false; |
4422 | |
4423 | memset(tmpdevice, 0, sizeof(*tmpdevice)); |
4424 | |
4425 | physical_device = i < nphysicals + (raid_ctlr_position == 0); |
4426 | |
4427 | /* Figure out where the LUN ID info is coming from */ |
4428 | lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, |
4429 | i, nphysicals, nlogicals, physdev_list, logdev_list); |
4430 | |
4431 | /* Determine if this is a lun from an external target array */ |
4432 | tmpdevice->external = |
4433 | figure_external_status(h, raid_ctlr_position, i, |
4434 | nphysicals, nlocal_logicals); |
4435 | |
4436 | /* |
4437 | * Skip over some devices such as a spare. |
4438 | */ |
4439 | if (phys_dev_index >= 0 && !tmpdevice->external && |
4440 | physical_device) { |
4441 | skip_device = hpsa_skip_device(h, lunaddrbytes, |
4442 | rle: &physdev_list->LUN[phys_dev_index]); |
4443 | if (skip_device) |
4444 | continue; |
4445 | } |
4446 | |
4447 | /* Get device type, vendor, model, device id, raid_map */ |
4448 | rc = hpsa_update_device_info(h, scsi3addr: lunaddrbytes, this_device: tmpdevice, |
4449 | is_OBDR_device: &is_OBDR); |
4450 | if (rc == -ENOMEM) { |
4451 | dev_warn(&h->pdev->dev, |
4452 | "Out of memory, rescan deferred.\n" ); |
4453 | h->drv_req_rescan = 1; |
4454 | goto out; |
4455 | } |
4456 | if (rc) { |
4457 | h->drv_req_rescan = 1; |
4458 | continue; |
4459 | } |
4460 | |
4461 | figure_bus_target_lun(h, lunaddrbytes, device: tmpdevice); |
4462 | this_device = currentsd[ncurrent]; |
4463 | |
4464 | *this_device = *tmpdevice; |
4465 | this_device->physical_device = physical_device; |
4466 | |
4467 | /* |
4468 | * Expose all devices except for physical devices that |
4469 | * are masked. |
4470 | */ |
4471 | if (MASKED_DEVICE(lunaddrbytes) && this_device->physical_device) |
4472 | this_device->expose_device = 0; |
4473 | else |
4474 | this_device->expose_device = 1; |
4475 | |
4476 | |
4477 | /* |
4478 | * Get the SAS address for physical devices that are exposed. |
4479 | */ |
4480 | if (this_device->physical_device && this_device->expose_device) |
4481 | hpsa_get_sas_address(h, scsi3addr: lunaddrbytes, dev: this_device); |
4482 | |
4483 | switch (this_device->devtype) { |
4484 | case TYPE_ROM: |
4485 | /* We don't *really* support actual CD-ROM devices, |
4486 | * just "One Button Disaster Recovery" tape drive |
4487 | * which temporarily pretends to be a CD-ROM drive. |
4488 | * So we check that the device is really an OBDR tape |
4489 | * device by checking for "$DR-10" in bytes 43-48 of |
4490 | * the inquiry data. |
4491 | */ |
4492 | if (is_OBDR) |
4493 | ncurrent++; |
4494 | break; |
4495 | case TYPE_DISK: |
4496 | case TYPE_ZBC: |
4497 | if (this_device->physical_device) { |
4498 | /* The disk is in HBA mode. */ |
4499 | /* Never use RAID mapper in HBA mode. */ |
4500 | this_device->offload_enabled = 0; |
4501 | hpsa_get_ioaccel_drive_info(h, dev: this_device, |
4502 | rlep: physdev_list, rle_index: phys_dev_index, id_phys); |
4503 | hpsa_get_path_info(this_device, |
4504 | rlep: physdev_list, rle_index: phys_dev_index, id_phys); |
4505 | } |
4506 | ncurrent++; |
4507 | break; |
4508 | case TYPE_TAPE: |
4509 | case TYPE_MEDIUM_CHANGER: |
4510 | ncurrent++; |
4511 | break; |
4512 | case TYPE_ENCLOSURE: |
4513 | if (!this_device->external) |
4514 | hpsa_get_enclosure_info(h, scsi3addr: lunaddrbytes, |
4515 | rlep: physdev_list, rle_index: phys_dev_index, |
4516 | encl_dev: this_device); |
4517 | ncurrent++; |
4518 | break; |
4519 | case TYPE_RAID: |
4520 | /* Only present the Smartarray HBA as a RAID controller. |
4521 | * If it's a RAID controller other than the HBA itself |
4522 | * (an external RAID controller, MSA500 or similar) |
4523 | * don't present it. |
4524 | */ |
4525 | if (!is_hba_lunid(scsi3addr: lunaddrbytes)) |
4526 | break; |
4527 | ncurrent++; |
4528 | break; |
4529 | default: |
4530 | break; |
4531 | } |
4532 | if (ncurrent >= HPSA_MAX_DEVICES) |
4533 | break; |
4534 | } |
4535 | |
4536 | if (h->sas_host == NULL) { |
4537 | int rc = 0; |
4538 | |
4539 | rc = hpsa_add_sas_host(h); |
4540 | if (rc) { |
4541 | dev_warn(&h->pdev->dev, |
4542 | "Could not add sas host %d\n" , rc); |
4543 | goto out; |
4544 | } |
4545 | } |
4546 | |
4547 | adjust_hpsa_scsi_table(h, sd: currentsd, nsds: ncurrent); |
4548 | out: |
4549 | kfree(objp: tmpdevice); |
4550 | for (i = 0; i < ndev_allocated; i++) |
4551 | kfree(objp: currentsd[i]); |
4552 | kfree(objp: currentsd); |
4553 | kfree(objp: physdev_list); |
4554 | kfree(objp: logdev_list); |
4555 | kfree(objp: id_ctlr); |
4556 | kfree(objp: id_phys); |
4557 | } |
4558 | |
4559 | static void hpsa_set_sg_descriptor(struct SGDescriptor *desc, |
4560 | struct scatterlist *sg) |
4561 | { |
4562 | u64 addr64 = (u64) sg_dma_address(sg); |
4563 | unsigned int len = sg_dma_len(sg); |
4564 | |
4565 | desc->Addr = cpu_to_le64(addr64); |
4566 | desc->Len = cpu_to_le32(len); |
4567 | desc->Ext = 0; |
4568 | } |
4569 | |
4570 | /* |
4571 | * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci |
4572 | * dma mapping and fills in the scatter gather entries of the |
4573 | * hpsa command, cp. |
4574 | */ |
4575 | static int hpsa_scatter_gather(struct ctlr_info *h, |
4576 | struct CommandList *cp, |
4577 | struct scsi_cmnd *cmd) |
4578 | { |
4579 | struct scatterlist *sg; |
4580 | int use_sg, i, sg_limit, chained; |
4581 | struct SGDescriptor *curr_sg; |
4582 | |
4583 | BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); |
4584 | |
4585 | use_sg = scsi_dma_map(cmd); |
4586 | if (use_sg < 0) |
4587 | return use_sg; |
4588 | |
4589 | if (!use_sg) |
4590 | goto sglist_finished; |
4591 | |
4592 | /* |
4593 | * If the number of entries is greater than the max for a single list, |
4594 | * then we have a chained list; we will set up all but one entry in the |
4595 | * first list (the last entry is saved for link information); |
4596 | * otherwise, we don't have a chained list and we'll set up at each of |
4597 | * the entries in the one list. |
4598 | */ |
4599 | curr_sg = cp->SG; |
4600 | chained = use_sg > h->max_cmd_sg_entries; |
4601 | sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg; |
4602 | scsi_for_each_sg(cmd, sg, sg_limit, i) { |
4603 | hpsa_set_sg_descriptor(desc: curr_sg, sg); |
4604 | curr_sg++; |
4605 | } |
4606 | |
4607 | if (chained) { |
4608 | /* |
4609 | * Continue with the chained list. Set curr_sg to the chained |
4610 | * list. Modify the limit to the total count less the entries |
4611 | * we've already set up. Resume the scan at the list entry |
4612 | * where the previous loop left off. |
4613 | */ |
4614 | curr_sg = h->cmd_sg_list[cp->cmdindex]; |
4615 | sg_limit = use_sg - sg_limit; |
4616 | for_each_sg(sg, sg, sg_limit, i) { |
4617 | hpsa_set_sg_descriptor(desc: curr_sg, sg); |
4618 | curr_sg++; |
4619 | } |
4620 | } |
4621 | |
4622 | /* Back the pointer up to the last entry and mark it as "last". */ |
4623 | (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST); |
4624 | |
4625 | if (use_sg + chained > h->maxSG) |
4626 | h->maxSG = use_sg + chained; |
4627 | |
4628 | if (chained) { |
4629 | cp->Header.SGList = h->max_cmd_sg_entries; |
4630 | cp->Header.SGTotal = cpu_to_le16(use_sg + 1); |
4631 | if (hpsa_map_sg_chain_block(h, c: cp)) { |
4632 | scsi_dma_unmap(cmd); |
4633 | return -1; |
4634 | } |
4635 | return 0; |
4636 | } |
4637 | |
4638 | sglist_finished: |
4639 | |
4640 | cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ |
4641 | cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */ |
4642 | return 0; |
4643 | } |
4644 | |
4645 | static inline void warn_zero_length_transfer(struct ctlr_info *h, |
4646 | u8 *cdb, int cdb_len, |
4647 | const char *func) |
4648 | { |
4649 | dev_warn(&h->pdev->dev, |
4650 | "%s: Blocking zero-length request: CDB:%*phN\n" , |
4651 | func, cdb_len, cdb); |
4652 | } |
4653 | |
4654 | #define IO_ACCEL_INELIGIBLE 1 |
4655 | /* zero-length transfers trigger hardware errors. */ |
4656 | static bool is_zero_length_transfer(u8 *cdb) |
4657 | { |
4658 | u32 block_cnt; |
4659 | |
4660 | /* Block zero-length transfer sizes on certain commands. */ |
4661 | switch (cdb[0]) { |
4662 | case READ_10: |
4663 | case WRITE_10: |
4664 | case VERIFY: /* 0x2F */ |
4665 | case WRITE_VERIFY: /* 0x2E */ |
4666 | block_cnt = get_unaligned_be16(p: &cdb[7]); |
4667 | break; |
4668 | case READ_12: |
4669 | case WRITE_12: |
4670 | case VERIFY_12: /* 0xAF */ |
4671 | case WRITE_VERIFY_12: /* 0xAE */ |
4672 | block_cnt = get_unaligned_be32(p: &cdb[6]); |
4673 | break; |
4674 | case READ_16: |
4675 | case WRITE_16: |
4676 | case VERIFY_16: /* 0x8F */ |
4677 | block_cnt = get_unaligned_be32(p: &cdb[10]); |
4678 | break; |
4679 | default: |
4680 | return false; |
4681 | } |
4682 | |
4683 | return block_cnt == 0; |
4684 | } |
4685 | |
4686 | static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len) |
4687 | { |
4688 | int is_write = 0; |
4689 | u32 block; |
4690 | u32 block_cnt; |
4691 | |
4692 | /* Perform some CDB fixups if needed using 10 byte reads/writes only */ |
4693 | switch (cdb[0]) { |
4694 | case WRITE_6: |
4695 | case WRITE_12: |
4696 | is_write = 1; |
4697 | fallthrough; |
4698 | case READ_6: |
4699 | case READ_12: |
4700 | if (*cdb_len == 6) { |
4701 | block = (((cdb[1] & 0x1F) << 16) | |
4702 | (cdb[2] << 8) | |
4703 | cdb[3]); |
4704 | block_cnt = cdb[4]; |
4705 | if (block_cnt == 0) |
4706 | block_cnt = 256; |
4707 | } else { |
4708 | BUG_ON(*cdb_len != 12); |
4709 | block = get_unaligned_be32(p: &cdb[2]); |
4710 | block_cnt = get_unaligned_be32(p: &cdb[6]); |
4711 | } |
4712 | if (block_cnt > 0xffff) |
4713 | return IO_ACCEL_INELIGIBLE; |
4714 | |
4715 | cdb[0] = is_write ? WRITE_10 : READ_10; |
4716 | cdb[1] = 0; |
4717 | cdb[2] = (u8) (block >> 24); |
4718 | cdb[3] = (u8) (block >> 16); |
4719 | cdb[4] = (u8) (block >> 8); |
4720 | cdb[5] = (u8) (block); |
4721 | cdb[6] = 0; |
4722 | cdb[7] = (u8) (block_cnt >> 8); |
4723 | cdb[8] = (u8) (block_cnt); |
4724 | cdb[9] = 0; |
4725 | *cdb_len = 10; |
4726 | break; |
4727 | } |
4728 | return 0; |
4729 | } |
4730 | |
4731 | static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h, |
4732 | struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, |
4733 | u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk) |
4734 | { |
4735 | struct scsi_cmnd *cmd = c->scsi_cmd; |
4736 | struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; |
4737 | unsigned int len; |
4738 | unsigned int total_len = 0; |
4739 | struct scatterlist *sg; |
4740 | u64 addr64; |
4741 | int use_sg, i; |
4742 | struct SGDescriptor *curr_sg; |
4743 | u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE; |
4744 | |
4745 | /* TODO: implement chaining support */ |
4746 | if (scsi_sg_count(cmd) > h->ioaccel_maxsg) { |
4747 | atomic_dec(v: &phys_disk->ioaccel_cmds_out); |
4748 | return IO_ACCEL_INELIGIBLE; |
4749 | } |
4750 | |
4751 | BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX); |
4752 | |
4753 | if (is_zero_length_transfer(cdb)) { |
4754 | warn_zero_length_transfer(h, cdb, cdb_len, func: __func__); |
4755 | atomic_dec(v: &phys_disk->ioaccel_cmds_out); |
4756 | return IO_ACCEL_INELIGIBLE; |
4757 | } |
4758 | |
4759 | if (fixup_ioaccel_cdb(cdb, cdb_len: &cdb_len)) { |
4760 | atomic_dec(v: &phys_disk->ioaccel_cmds_out); |
4761 | return IO_ACCEL_INELIGIBLE; |
4762 | } |
4763 | |
4764 | c->cmd_type = CMD_IOACCEL1; |
4765 | |
4766 | /* Adjust the DMA address to point to the accelerated command buffer */ |
4767 | c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle + |
4768 | (c->cmdindex * sizeof(*cp)); |
4769 | BUG_ON(c->busaddr & 0x0000007F); |
4770 | |
4771 | use_sg = scsi_dma_map(cmd); |
4772 | if (use_sg < 0) { |
4773 | atomic_dec(v: &phys_disk->ioaccel_cmds_out); |
4774 | return use_sg; |
4775 | } |
4776 | |
4777 | if (use_sg) { |
4778 | curr_sg = cp->SG; |
4779 | scsi_for_each_sg(cmd, sg, use_sg, i) { |
4780 | addr64 = (u64) sg_dma_address(sg); |
4781 | len = sg_dma_len(sg); |
4782 | total_len += len; |
4783 | curr_sg->Addr = cpu_to_le64(addr64); |
4784 | curr_sg->Len = cpu_to_le32(len); |
4785 | curr_sg->Ext = cpu_to_le32(0); |
4786 | curr_sg++; |
4787 | } |
4788 | (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST); |
4789 | |
4790 | switch (cmd->sc_data_direction) { |
4791 | case DMA_TO_DEVICE: |
4792 | control |= IOACCEL1_CONTROL_DATA_OUT; |
4793 | break; |
4794 | case DMA_FROM_DEVICE: |
4795 | control |= IOACCEL1_CONTROL_DATA_IN; |
4796 | break; |
4797 | case DMA_NONE: |
4798 | control |= IOACCEL1_CONTROL_NODATAXFER; |
4799 | break; |
4800 | default: |
4801 | dev_err(&h->pdev->dev, "unknown data direction: %d\n" , |
4802 | cmd->sc_data_direction); |
4803 | BUG(); |
4804 | break; |
4805 | } |
4806 | } else { |
4807 | control |= IOACCEL1_CONTROL_NODATAXFER; |
4808 | } |
4809 | |
4810 | c->Header.SGList = use_sg; |
4811 | /* Fill out the command structure to submit */ |
4812 | cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF); |
4813 | cp->transfer_len = cpu_to_le32(total_len); |
4814 | cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ | |
4815 | (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK)); |
4816 | cp->control = cpu_to_le32(control); |
4817 | memcpy(cp->CDB, cdb, cdb_len); |
4818 | memcpy(cp->CISS_LUN, scsi3addr, 8); |
4819 | /* Tag was already set at init time. */ |
4820 | enqueue_cmd_and_start_io(h, c); |
4821 | return 0; |
4822 | } |
4823 | |
4824 | /* |
4825 | * Queue a command directly to a device behind the controller using the |
4826 | * I/O accelerator path. |
4827 | */ |
4828 | static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h, |
4829 | struct CommandList *c) |
4830 | { |
4831 | struct scsi_cmnd *cmd = c->scsi_cmd; |
4832 | struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; |
4833 | |
4834 | if (!dev) |
4835 | return -1; |
4836 | |
4837 | c->phys_disk = dev; |
4838 | |
4839 | if (dev->in_reset) |
4840 | return -1; |
4841 | |
4842 | return hpsa_scsi_ioaccel_queue_command(h, c, ioaccel_handle: dev->ioaccel_handle, |
4843 | cdb: cmd->cmnd, cdb_len: cmd->cmd_len, scsi3addr: dev->scsi3addr, phys_disk: dev); |
4844 | } |
4845 | |
4846 | /* |
4847 | * Set encryption parameters for the ioaccel2 request |
4848 | */ |
4849 | static void set_encrypt_ioaccel2(struct ctlr_info *h, |
4850 | struct CommandList *c, struct io_accel2_cmd *cp) |
4851 | { |
4852 | struct scsi_cmnd *cmd = c->scsi_cmd; |
4853 | struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; |
4854 | struct raid_map_data *map = &dev->raid_map; |
4855 | u64 first_block; |
4856 | |
4857 | /* Are we doing encryption on this device */ |
4858 | if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON)) |
4859 | return; |
4860 | /* Set the data encryption key index. */ |
4861 | cp->dekindex = map->dekindex; |
4862 | |
4863 | /* Set the encryption enable flag, encoded into direction field. */ |
4864 | cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK; |
4865 | |
4866 | /* Set encryption tweak values based on logical block address |
4867 | * If block size is 512, tweak value is LBA. |
4868 | * For other block sizes, tweak is (LBA * block size)/ 512) |
4869 | */ |
4870 | switch (cmd->cmnd[0]) { |
4871 | /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */ |
4872 | case READ_6: |
4873 | case WRITE_6: |
4874 | first_block = (((cmd->cmnd[1] & 0x1F) << 16) | |
4875 | (cmd->cmnd[2] << 8) | |
4876 | cmd->cmnd[3]); |
4877 | break; |
4878 | case WRITE_10: |
4879 | case READ_10: |
4880 | /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */ |
4881 | case WRITE_12: |
4882 | case READ_12: |
4883 | first_block = get_unaligned_be32(p: &cmd->cmnd[2]); |
4884 | break; |
4885 | case WRITE_16: |
4886 | case READ_16: |
4887 | first_block = get_unaligned_be64(p: &cmd->cmnd[2]); |
4888 | break; |
4889 | default: |
4890 | dev_err(&h->pdev->dev, |
4891 | "ERROR: %s: size (0x%x) not supported for encryption\n" , |
4892 | __func__, cmd->cmnd[0]); |
4893 | BUG(); |
4894 | break; |
4895 | } |
4896 | |
4897 | if (le32_to_cpu(map->volume_blk_size) != 512) |
4898 | first_block = first_block * |
4899 | le32_to_cpu(map->volume_blk_size)/512; |
4900 | |
4901 | cp->tweak_lower = cpu_to_le32(first_block); |
4902 | cp->tweak_upper = cpu_to_le32(first_block >> 32); |
4903 | } |
4904 | |
4905 | static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, |
4906 | struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, |
4907 | u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk) |
4908 | { |
4909 | struct scsi_cmnd *cmd = c->scsi_cmd; |
4910 | struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; |
4911 | struct ioaccel2_sg_element *curr_sg; |
4912 | int use_sg, i; |
4913 | struct scatterlist *sg; |
4914 | u64 addr64; |
4915 | u32 len; |
4916 | u32 total_len = 0; |
4917 | |
4918 | if (!cmd->device) |
4919 | return -1; |
4920 | |
4921 | if (!cmd->device->hostdata) |
4922 | return -1; |
4923 | |
4924 | BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); |
4925 | |
4926 | if (is_zero_length_transfer(cdb)) { |
4927 | warn_zero_length_transfer(h, cdb, cdb_len, func: __func__); |
4928 | atomic_dec(v: &phys_disk->ioaccel_cmds_out); |
4929 | return IO_ACCEL_INELIGIBLE; |
4930 | } |
4931 | |
4932 | if (fixup_ioaccel_cdb(cdb, cdb_len: &cdb_len)) { |
4933 | atomic_dec(v: &phys_disk->ioaccel_cmds_out); |
4934 | return IO_ACCEL_INELIGIBLE; |
4935 | } |
4936 | |
4937 | c->cmd_type = CMD_IOACCEL2; |
4938 | /* Adjust the DMA address to point to the accelerated command buffer */ |
4939 | c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + |
4940 | (c->cmdindex * sizeof(*cp)); |
4941 | BUG_ON(c->busaddr & 0x0000007F); |
4942 | |
4943 | memset(cp, 0, sizeof(*cp)); |
4944 | cp->IU_type = IOACCEL2_IU_TYPE; |
4945 | |
4946 | use_sg = scsi_dma_map(cmd); |
4947 | if (use_sg < 0) { |
4948 | atomic_dec(v: &phys_disk->ioaccel_cmds_out); |
4949 | return use_sg; |
4950 | } |
4951 | |
4952 | if (use_sg) { |
4953 | curr_sg = cp->sg; |
4954 | if (use_sg > h->ioaccel_maxsg) { |
4955 | addr64 = le64_to_cpu( |
4956 | h->ioaccel2_cmd_sg_list[c->cmdindex]->address); |
4957 | curr_sg->address = cpu_to_le64(addr64); |
4958 | curr_sg->length = 0; |
4959 | curr_sg->reserved[0] = 0; |
4960 | curr_sg->reserved[1] = 0; |
4961 | curr_sg->reserved[2] = 0; |
4962 | curr_sg->chain_indicator = IOACCEL2_CHAIN; |
4963 | |
4964 | curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex]; |
4965 | } |
4966 | scsi_for_each_sg(cmd, sg, use_sg, i) { |
4967 | addr64 = (u64) sg_dma_address(sg); |
4968 | len = sg_dma_len(sg); |
4969 | total_len += len; |
4970 | curr_sg->address = cpu_to_le64(addr64); |
4971 | curr_sg->length = cpu_to_le32(len); |
4972 | curr_sg->reserved[0] = 0; |
4973 | curr_sg->reserved[1] = 0; |
4974 | curr_sg->reserved[2] = 0; |
4975 | curr_sg->chain_indicator = 0; |
4976 | curr_sg++; |
4977 | } |
4978 | |
4979 | /* |
4980 | * Set the last s/g element bit |
4981 | */ |
4982 | (curr_sg - 1)->chain_indicator = IOACCEL2_LAST_SG; |
4983 | |
4984 | switch (cmd->sc_data_direction) { |
4985 | case DMA_TO_DEVICE: |
4986 | cp->direction &= ~IOACCEL2_DIRECTION_MASK; |
4987 | cp->direction |= IOACCEL2_DIR_DATA_OUT; |
4988 | break; |
4989 | case DMA_FROM_DEVICE: |
4990 | cp->direction &= ~IOACCEL2_DIRECTION_MASK; |
4991 | cp->direction |= IOACCEL2_DIR_DATA_IN; |
4992 | break; |
4993 | case DMA_NONE: |
4994 | cp->direction &= ~IOACCEL2_DIRECTION_MASK; |
4995 | cp->direction |= IOACCEL2_DIR_NO_DATA; |
4996 | break; |
4997 | default: |
4998 | dev_err(&h->pdev->dev, "unknown data direction: %d\n" , |
4999 | cmd->sc_data_direction); |
5000 | BUG(); |
5001 | break; |
5002 | } |
5003 | } else { |
5004 | cp->direction &= ~IOACCEL2_DIRECTION_MASK; |
5005 | cp->direction |= IOACCEL2_DIR_NO_DATA; |
5006 | } |
5007 | |
5008 | /* Set encryption parameters, if necessary */ |
5009 | set_encrypt_ioaccel2(h, c, cp); |
5010 | |
5011 | cp->scsi_nexus = cpu_to_le32(ioaccel_handle); |
5012 | cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT); |
5013 | memcpy(cp->cdb, cdb, sizeof(cp->cdb)); |
5014 | |
5015 | cp->data_len = cpu_to_le32(total_len); |
5016 | cp->err_ptr = cpu_to_le64(c->busaddr + |
5017 | offsetof(struct io_accel2_cmd, error_data)); |
5018 | cp->err_len = cpu_to_le32(sizeof(cp->error_data)); |
5019 | |
5020 | /* fill in sg elements */ |
5021 | if (use_sg > h->ioaccel_maxsg) { |
5022 | cp->sg_count = 1; |
5023 | cp->sg[0].length = cpu_to_le32(use_sg * sizeof(cp->sg[0])); |
5024 | if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) { |
5025 | atomic_dec(v: &phys_disk->ioaccel_cmds_out); |
5026 | scsi_dma_unmap(cmd); |
5027 | return -1; |
5028 | } |
5029 | } else |
5030 | cp->sg_count = (u8) use_sg; |
5031 | |
5032 | if (phys_disk->in_reset) { |
5033 | cmd->result = DID_RESET << 16; |
5034 | return -1; |
5035 | } |
5036 | |
5037 | enqueue_cmd_and_start_io(h, c); |
5038 | return 0; |
5039 | } |
5040 | |
5041 | /* |
5042 | * Queue a command to the correct I/O accelerator path. |
5043 | */ |
5044 | static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, |
5045 | struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, |
5046 | u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk) |
5047 | { |
5048 | if (!c->scsi_cmd->device) |
5049 | return -1; |
5050 | |
5051 | if (!c->scsi_cmd->device->hostdata) |
5052 | return -1; |
5053 | |
5054 | if (phys_disk->in_reset) |
5055 | return -1; |
5056 | |
5057 | /* Try to honor the device's queue depth */ |
5058 | if (atomic_inc_return(v: &phys_disk->ioaccel_cmds_out) > |
5059 | phys_disk->queue_depth) { |
5060 | atomic_dec(v: &phys_disk->ioaccel_cmds_out); |
5061 | return IO_ACCEL_INELIGIBLE; |
5062 | } |
5063 | if (h->transMethod & CFGTBL_Trans_io_accel1) |
5064 | return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle, |
5065 | cdb, cdb_len, scsi3addr, |
5066 | phys_disk); |
5067 | else |
5068 | return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle, |
5069 | cdb, cdb_len, scsi3addr, |
5070 | phys_disk); |
5071 | } |
5072 | |
5073 | static void raid_map_helper(struct raid_map_data *map, |
5074 | int offload_to_mirror, u32 *map_index, u32 *current_group) |
5075 | { |
5076 | if (offload_to_mirror == 0) { |
5077 | /* use physical disk in the first mirrored group. */ |
5078 | *map_index %= le16_to_cpu(map->data_disks_per_row); |
5079 | return; |
5080 | } |
5081 | do { |
5082 | /* determine mirror group that *map_index indicates */ |
5083 | *current_group = *map_index / |
5084 | le16_to_cpu(map->data_disks_per_row); |
5085 | if (offload_to_mirror == *current_group) |
5086 | continue; |
5087 | if (*current_group < le16_to_cpu(map->layout_map_count) - 1) { |
5088 | /* select map index from next group */ |
5089 | *map_index += le16_to_cpu(map->data_disks_per_row); |
5090 | (*current_group)++; |
5091 | } else { |
5092 | /* select map index from first group */ |
5093 | *map_index %= le16_to_cpu(map->data_disks_per_row); |
5094 | *current_group = 0; |
5095 | } |
5096 | } while (offload_to_mirror != *current_group); |
5097 | } |
5098 | |
5099 | /* |
5100 | * Attempt to perform offload RAID mapping for a logical volume I/O. |
5101 | */ |
5102 | static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, |
5103 | struct CommandList *c) |
5104 | { |
5105 | struct scsi_cmnd *cmd = c->scsi_cmd; |
5106 | struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; |
5107 | struct raid_map_data *map = &dev->raid_map; |
5108 | struct raid_map_disk_data *dd = &map->data[0]; |
5109 | int is_write = 0; |
5110 | u32 map_index; |
5111 | u64 first_block, last_block; |
5112 | u32 block_cnt; |
5113 | u32 blocks_per_row; |
5114 | u64 first_row, last_row; |
5115 | u32 first_row_offset, last_row_offset; |
5116 | u32 first_column, last_column; |
5117 | u64 r0_first_row, r0_last_row; |
5118 | u32 r5or6_blocks_per_row; |
5119 | u64 r5or6_first_row, r5or6_last_row; |
5120 | u32 r5or6_first_row_offset, r5or6_last_row_offset; |
5121 | u32 r5or6_first_column, r5or6_last_column; |
5122 | u32 total_disks_per_row; |
5123 | u32 stripesize; |
5124 | u32 first_group, last_group, current_group; |
5125 | u32 map_row; |
5126 | u32 disk_handle; |
5127 | u64 disk_block; |
5128 | u32 disk_block_cnt; |
5129 | u8 cdb[16]; |
5130 | u8 cdb_len; |
5131 | u16 strip_size; |
5132 | #if BITS_PER_LONG == 32 |
5133 | u64 tmpdiv; |
5134 | #endif |
5135 | int offload_to_mirror; |
5136 | |
5137 | if (!dev) |
5138 | return -1; |
5139 | |
5140 | if (dev->in_reset) |
5141 | return -1; |
5142 | |
5143 | /* check for valid opcode, get LBA and block count */ |
5144 | switch (cmd->cmnd[0]) { |
5145 | case WRITE_6: |
5146 | is_write = 1; |
5147 | fallthrough; |
5148 | case READ_6: |
5149 | first_block = (((cmd->cmnd[1] & 0x1F) << 16) | |
5150 | (cmd->cmnd[2] << 8) | |
5151 | cmd->cmnd[3]); |
5152 | block_cnt = cmd->cmnd[4]; |
5153 | if (block_cnt == 0) |
5154 | block_cnt = 256; |
5155 | break; |
5156 | case WRITE_10: |
5157 | is_write = 1; |
5158 | fallthrough; |
5159 | case READ_10: |
5160 | first_block = |
5161 | (((u64) cmd->cmnd[2]) << 24) | |
5162 | (((u64) cmd->cmnd[3]) << 16) | |
5163 | (((u64) cmd->cmnd[4]) << 8) | |
5164 | cmd->cmnd[5]; |
5165 | block_cnt = |
5166 | (((u32) cmd->cmnd[7]) << 8) | |
5167 | cmd->cmnd[8]; |
5168 | break; |
5169 | case WRITE_12: |
5170 | is_write = 1; |
5171 | fallthrough; |
5172 | case READ_12: |
5173 | first_block = |
5174 | (((u64) cmd->cmnd[2]) << 24) | |
5175 | (((u64) cmd->cmnd[3]) << 16) | |
5176 | (((u64) cmd->cmnd[4]) << 8) | |
5177 | cmd->cmnd[5]; |
5178 | block_cnt = |
5179 | (((u32) cmd->cmnd[6]) << 24) | |
5180 | (((u32) cmd->cmnd[7]) << 16) | |
5181 | (((u32) cmd->cmnd[8]) << 8) | |
5182 | cmd->cmnd[9]; |
5183 | break; |
5184 | case WRITE_16: |
5185 | is_write = 1; |
5186 | fallthrough; |
5187 | case READ_16: |
5188 | first_block = |
5189 | (((u64) cmd->cmnd[2]) << 56) | |
5190 | (((u64) cmd->cmnd[3]) << 48) | |
5191 | (((u64) cmd->cmnd[4]) << 40) | |
5192 | (((u64) cmd->cmnd[5]) << 32) | |
5193 | (((u64) cmd->cmnd[6]) << 24) | |
5194 | (((u64) cmd->cmnd[7]) << 16) | |
5195 | (((u64) cmd->cmnd[8]) << 8) | |
5196 | cmd->cmnd[9]; |
5197 | block_cnt = |
5198 | (((u32) cmd->cmnd[10]) << 24) | |
5199 | (((u32) cmd->cmnd[11]) << 16) | |
5200 | (((u32) cmd->cmnd[12]) << 8) | |
5201 | cmd->cmnd[13]; |
5202 | break; |
5203 | default: |
5204 | return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */ |
5205 | } |
5206 | last_block = first_block + block_cnt - 1; |
5207 | |
5208 | /* check for write to non-RAID-0 */ |
5209 | if (is_write && dev->raid_level != 0) |
5210 | return IO_ACCEL_INELIGIBLE; |
5211 | |
5212 | /* check for invalid block or wraparound */ |
5213 | if (last_block >= le64_to_cpu(map->volume_blk_cnt) || |
5214 | last_block < first_block) |
5215 | return IO_ACCEL_INELIGIBLE; |
5216 | |
5217 | /* calculate stripe information for the request */ |
5218 | blocks_per_row = le16_to_cpu(map->data_disks_per_row) * |
5219 | le16_to_cpu(map->strip_size); |
5220 | strip_size = le16_to_cpu(map->strip_size); |
5221 | #if BITS_PER_LONG == 32 |
5222 | tmpdiv = first_block; |
5223 | (void) do_div(tmpdiv, blocks_per_row); |
5224 | first_row = tmpdiv; |
5225 | tmpdiv = last_block; |
5226 | (void) do_div(tmpdiv, blocks_per_row); |
5227 | last_row = tmpdiv; |
5228 | first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); |
5229 | last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); |
5230 | tmpdiv = first_row_offset; |
5231 | (void) do_div(tmpdiv, strip_size); |
5232 | first_column = tmpdiv; |
5233 | tmpdiv = last_row_offset; |
5234 | (void) do_div(tmpdiv, strip_size); |
5235 | last_column = tmpdiv; |
5236 | #else |
5237 | first_row = first_block / blocks_per_row; |
5238 | last_row = last_block / blocks_per_row; |
5239 | first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); |
5240 | last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); |
5241 | first_column = first_row_offset / strip_size; |
5242 | last_column = last_row_offset / strip_size; |
5243 | #endif |
5244 | |
5245 | /* if this isn't a single row/column then give to the controller */ |
5246 | if ((first_row != last_row) || (first_column != last_column)) |
5247 | return IO_ACCEL_INELIGIBLE; |
5248 | |
5249 | /* proceeding with driver mapping */ |
5250 | total_disks_per_row = le16_to_cpu(map->data_disks_per_row) + |
5251 | le16_to_cpu(map->metadata_disks_per_row); |
5252 | map_row = ((u32)(first_row >> map->parity_rotation_shift)) % |
5253 | le16_to_cpu(map->row_cnt); |
5254 | map_index = (map_row * total_disks_per_row) + first_column; |
5255 | |
5256 | switch (dev->raid_level) { |
5257 | case HPSA_RAID_0: |
5258 | break; /* nothing special to do */ |
5259 | case HPSA_RAID_1: |
5260 | /* Handles load balance across RAID 1 members. |
5261 | * (2-drive R1 and R10 with even # of drives.) |
5262 | * Appropriate for SSDs, not optimal for HDDs |
5263 | * Ensure we have the correct raid_map. |
5264 | */ |
5265 | if (le16_to_cpu(map->layout_map_count) != 2) { |
5266 | hpsa_turn_off_ioaccel_for_device(device: dev); |
5267 | return IO_ACCEL_INELIGIBLE; |
5268 | } |
5269 | if (dev->offload_to_mirror) |
5270 | map_index += le16_to_cpu(map->data_disks_per_row); |
5271 | dev->offload_to_mirror = !dev->offload_to_mirror; |
5272 | break; |
5273 | case HPSA_RAID_ADM: |
5274 | /* Handles N-way mirrors (R1-ADM) |
5275 | * and R10 with # of drives divisible by 3.) |
5276 | * Ensure we have the correct raid_map. |
5277 | */ |
5278 | if (le16_to_cpu(map->layout_map_count) != 3) { |
5279 | hpsa_turn_off_ioaccel_for_device(device: dev); |
5280 | return IO_ACCEL_INELIGIBLE; |
5281 | } |
5282 | |
5283 | offload_to_mirror = dev->offload_to_mirror; |
5284 | raid_map_helper(map, offload_to_mirror, |
5285 | map_index: &map_index, current_group: ¤t_group); |
5286 | /* set mirror group to use next time */ |
5287 | offload_to_mirror = |
5288 | (offload_to_mirror >= |
5289 | le16_to_cpu(map->layout_map_count) - 1) |
5290 | ? 0 : offload_to_mirror + 1; |
5291 | dev->offload_to_mirror = offload_to_mirror; |
5292 | /* Avoid direct use of dev->offload_to_mirror within this |
5293 | * function since multiple threads might simultaneously |
5294 | * increment it beyond the range of dev->layout_map_count -1. |
5295 | */ |
5296 | break; |
5297 | case HPSA_RAID_5: |
5298 | case HPSA_RAID_6: |
5299 | if (le16_to_cpu(map->layout_map_count) <= 1) |
5300 | break; |
5301 | |
5302 | /* Verify first and last block are in same RAID group */ |
5303 | r5or6_blocks_per_row = |
5304 | le16_to_cpu(map->strip_size) * |
5305 | le16_to_cpu(map->data_disks_per_row); |
5306 | if (r5or6_blocks_per_row == 0) { |
5307 | hpsa_turn_off_ioaccel_for_device(device: dev); |
5308 | return IO_ACCEL_INELIGIBLE; |
5309 | } |
5310 | stripesize = r5or6_blocks_per_row * |
5311 | le16_to_cpu(map->layout_map_count); |
5312 | #if BITS_PER_LONG == 32 |
5313 | tmpdiv = first_block; |
5314 | first_group = do_div(tmpdiv, stripesize); |
5315 | tmpdiv = first_group; |
5316 | (void) do_div(tmpdiv, r5or6_blocks_per_row); |
5317 | first_group = tmpdiv; |
5318 | tmpdiv = last_block; |
5319 | last_group = do_div(tmpdiv, stripesize); |
5320 | tmpdiv = last_group; |
5321 | (void) do_div(tmpdiv, r5or6_blocks_per_row); |
5322 | last_group = tmpdiv; |
5323 | #else |
5324 | first_group = (first_block % stripesize) / r5or6_blocks_per_row; |
5325 | last_group = (last_block % stripesize) / r5or6_blocks_per_row; |
5326 | #endif |
5327 | if (first_group != last_group) |
5328 | return IO_ACCEL_INELIGIBLE; |
5329 | |
5330 | /* Verify request is in a single row of RAID 5/6 */ |
5331 | #if BITS_PER_LONG == 32 |
5332 | tmpdiv = first_block; |
5333 | (void) do_div(tmpdiv, stripesize); |
5334 | first_row = r5or6_first_row = r0_first_row = tmpdiv; |
5335 | tmpdiv = last_block; |
5336 | (void) do_div(tmpdiv, stripesize); |
5337 | r5or6_last_row = r0_last_row = tmpdiv; |
5338 | #else |
5339 | first_row = r5or6_first_row = r0_first_row = |
5340 | first_block / stripesize; |
5341 | r5or6_last_row = r0_last_row = last_block / stripesize; |
5342 | #endif |
5343 | if (r5or6_first_row != r5or6_last_row) |
5344 | return IO_ACCEL_INELIGIBLE; |
5345 | |
5346 | |
5347 | /* Verify request is in a single column */ |
5348 | #if BITS_PER_LONG == 32 |
5349 | tmpdiv = first_block; |
5350 | first_row_offset = do_div(tmpdiv, stripesize); |
5351 | tmpdiv = first_row_offset; |
5352 | first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row); |
5353 | r5or6_first_row_offset = first_row_offset; |
5354 | tmpdiv = last_block; |
5355 | r5or6_last_row_offset = do_div(tmpdiv, stripesize); |
5356 | tmpdiv = r5or6_last_row_offset; |
5357 | r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row); |
5358 | tmpdiv = r5or6_first_row_offset; |
5359 | (void) do_div(tmpdiv, map->strip_size); |
5360 | first_column = r5or6_first_column = tmpdiv; |
5361 | tmpdiv = r5or6_last_row_offset; |
5362 | (void) do_div(tmpdiv, map->strip_size); |
5363 | r5or6_last_column = tmpdiv; |
5364 | #else |
5365 | first_row_offset = r5or6_first_row_offset = |
5366 | (u32)((first_block % stripesize) % |
5367 | r5or6_blocks_per_row); |
5368 | |
5369 | r5or6_last_row_offset = |
5370 | (u32)((last_block % stripesize) % |
5371 | r5or6_blocks_per_row); |
5372 | |
5373 | first_column = r5or6_first_column = |
5374 | r5or6_first_row_offset / le16_to_cpu(map->strip_size); |
5375 | r5or6_last_column = |
5376 | r5or6_last_row_offset / le16_to_cpu(map->strip_size); |
5377 | #endif |
5378 | if (r5or6_first_column != r5or6_last_column) |
5379 | return IO_ACCEL_INELIGIBLE; |
5380 | |
5381 | /* Request is eligible */ |
5382 | map_row = ((u32)(first_row >> map->parity_rotation_shift)) % |
5383 | le16_to_cpu(map->row_cnt); |
5384 | |
5385 | map_index = (first_group * |
5386 | (le16_to_cpu(map->row_cnt) * total_disks_per_row)) + |
5387 | (map_row * total_disks_per_row) + first_column; |
5388 | break; |
5389 | default: |
5390 | return IO_ACCEL_INELIGIBLE; |
5391 | } |
5392 | |
5393 | if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES)) |
5394 | return IO_ACCEL_INELIGIBLE; |
5395 | |
5396 | c->phys_disk = dev->phys_disk[map_index]; |
5397 | if (!c->phys_disk) |
5398 | return IO_ACCEL_INELIGIBLE; |
5399 | |
5400 | disk_handle = dd[map_index].ioaccel_handle; |
5401 | disk_block = le64_to_cpu(map->disk_starting_blk) + |
5402 | first_row * le16_to_cpu(map->strip_size) + |
5403 | (first_row_offset - first_column * |
5404 | le16_to_cpu(map->strip_size)); |
5405 | disk_block_cnt = block_cnt; |
5406 | |
5407 | /* handle differing logical/physical block sizes */ |
5408 | if (map->phys_blk_shift) { |
5409 | disk_block <<= map->phys_blk_shift; |
5410 | disk_block_cnt <<= map->phys_blk_shift; |
5411 | } |
5412 | BUG_ON(disk_block_cnt > 0xffff); |
5413 | |
5414 | /* build the new CDB for the physical disk I/O */ |
5415 | if (disk_block > 0xffffffff) { |
5416 | cdb[0] = is_write ? WRITE_16 : READ_16; |
5417 | cdb[1] = 0; |
5418 | cdb[2] = (u8) (disk_block >> 56); |
5419 | cdb[3] = (u8) (disk_block >> 48); |
5420 | cdb[4] = (u8) (disk_block >> 40); |
5421 | cdb[5] = (u8) (disk_block >> 32); |
5422 | cdb[6] = (u8) (disk_block >> 24); |
5423 | cdb[7] = (u8) (disk_block >> 16); |
5424 | cdb[8] = (u8) (disk_block >> 8); |
5425 | cdb[9] = (u8) (disk_block); |
5426 | cdb[10] = (u8) (disk_block_cnt >> 24); |
5427 | cdb[11] = (u8) (disk_block_cnt >> 16); |
5428 | cdb[12] = (u8) (disk_block_cnt >> 8); |
5429 | cdb[13] = (u8) (disk_block_cnt); |
5430 | cdb[14] = 0; |
5431 | cdb[15] = 0; |
5432 | cdb_len = 16; |
5433 | } else { |
5434 | cdb[0] = is_write ? WRITE_10 : READ_10; |
5435 | cdb[1] = 0; |
5436 | cdb[2] = (u8) (disk_block >> 24); |
5437 | cdb[3] = (u8) (disk_block >> 16); |
5438 | cdb[4] = (u8) (disk_block >> 8); |
5439 | cdb[5] = (u8) (disk_block); |
5440 | cdb[6] = 0; |
5441 | cdb[7] = (u8) (disk_block_cnt >> 8); |
5442 | cdb[8] = (u8) (disk_block_cnt); |
5443 | cdb[9] = 0; |
5444 | cdb_len = 10; |
5445 | } |
5446 | return hpsa_scsi_ioaccel_queue_command(h, c, ioaccel_handle: disk_handle, cdb, cdb_len, |
5447 | scsi3addr: dev->scsi3addr, |
5448 | phys_disk: dev->phys_disk[map_index]); |
5449 | } |
5450 | |
5451 | /* |
5452 | * Submit commands down the "normal" RAID stack path |
5453 | * All callers to hpsa_ciss_submit must check lockup_detected |
5454 | * beforehand, before (opt.) and after calling cmd_alloc |
5455 | */ |
5456 | static int hpsa_ciss_submit(struct ctlr_info *h, |
5457 | struct CommandList *c, struct scsi_cmnd *cmd, |
5458 | struct hpsa_scsi_dev_t *dev) |
5459 | { |
5460 | cmd->host_scribble = (unsigned char *) c; |
5461 | c->cmd_type = CMD_SCSI; |
5462 | c->scsi_cmd = cmd; |
5463 | c->Header.ReplyQueue = 0; /* unused in simple mode */ |
5464 | memcpy(&c->Header.LUN.LunAddrBytes[0], &dev->scsi3addr[0], 8); |
5465 | c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT)); |
5466 | |
5467 | /* Fill in the request block... */ |
5468 | |
5469 | c->Request.Timeout = 0; |
5470 | BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); |
5471 | c->Request.CDBLen = cmd->cmd_len; |
5472 | memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); |
5473 | switch (cmd->sc_data_direction) { |
5474 | case DMA_TO_DEVICE: |
5475 | c->Request.type_attr_dir = |
5476 | TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE); |
5477 | break; |
5478 | case DMA_FROM_DEVICE: |
5479 | c->Request.type_attr_dir = |
5480 | TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ); |
5481 | break; |
5482 | case DMA_NONE: |
5483 | c->Request.type_attr_dir = |
5484 | TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE); |
5485 | break; |
5486 | case DMA_BIDIRECTIONAL: |
5487 | /* This can happen if a buggy application does a scsi passthru |
5488 | * and sets both inlen and outlen to non-zero. ( see |
5489 | * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) |
5490 | */ |
5491 | |
5492 | c->Request.type_attr_dir = |
5493 | TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD); |
5494 | /* This is technically wrong, and hpsa controllers should |
5495 | * reject it with CMD_INVALID, which is the most correct |
5496 | * response, but non-fibre backends appear to let it |
5497 | * slide by, and give the same results as if this field |
5498 | * were set correctly. Either way is acceptable for |
5499 | * our purposes here. |
5500 | */ |
5501 | |
5502 | break; |
5503 | |
5504 | default: |
5505 | dev_err(&h->pdev->dev, "unknown data direction: %d\n" , |
5506 | cmd->sc_data_direction); |
5507 | BUG(); |
5508 | break; |
5509 | } |
5510 | |
5511 | if (hpsa_scatter_gather(h, cp: c, cmd) < 0) { /* Fill SG list */ |
5512 | hpsa_cmd_resolve_and_free(h, c); |
5513 | return SCSI_MLQUEUE_HOST_BUSY; |
5514 | } |
5515 | |
5516 | if (dev->in_reset) { |
5517 | hpsa_cmd_resolve_and_free(h, c); |
5518 | return SCSI_MLQUEUE_HOST_BUSY; |
5519 | } |
5520 | |
5521 | c->device = dev; |
5522 | |
5523 | enqueue_cmd_and_start_io(h, c); |
5524 | /* the cmd'll come back via intr handler in complete_scsi_command() */ |
5525 | return 0; |
5526 | } |
5527 | |
5528 | static void hpsa_cmd_init(struct ctlr_info *h, int index, |
5529 | struct CommandList *c) |
5530 | { |
5531 | dma_addr_t cmd_dma_handle, err_dma_handle; |
5532 | |
5533 | /* Zero out all of commandlist except the last field, refcount */ |
5534 | memset(c, 0, offsetof(struct CommandList, refcount)); |
5535 | c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT)); |
5536 | cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c); |
5537 | c->err_info = h->errinfo_pool + index; |
5538 | memset(c->err_info, 0, sizeof(*c->err_info)); |
5539 | err_dma_handle = h->errinfo_pool_dhandle |
5540 | + index * sizeof(*c->err_info); |
5541 | c->cmdindex = index; |
5542 | c->busaddr = (u32) cmd_dma_handle; |
5543 | c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle); |
5544 | c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info)); |
5545 | c->h = h; |
5546 | c->scsi_cmd = SCSI_CMD_IDLE; |
5547 | } |
5548 | |
5549 | static void hpsa_preinitialize_commands(struct ctlr_info *h) |
5550 | { |
5551 | int i; |
5552 | |
5553 | for (i = 0; i < h->nr_cmds; i++) { |
5554 | struct CommandList *c = h->cmd_pool + i; |
5555 | |
5556 | hpsa_cmd_init(h, index: i, c); |
5557 | atomic_set(v: &c->refcount, i: 0); |
5558 | } |
5559 | } |
5560 | |
5561 | static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index, |
5562 | struct CommandList *c) |
5563 | { |
5564 | dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c); |
5565 | |
5566 | BUG_ON(c->cmdindex != index); |
5567 | |
5568 | memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); |
5569 | memset(c->err_info, 0, sizeof(*c->err_info)); |
5570 | c->busaddr = (u32) cmd_dma_handle; |
5571 | } |
5572 | |
5573 | static int hpsa_ioaccel_submit(struct ctlr_info *h, |
5574 | struct CommandList *c, struct scsi_cmnd *cmd, |
5575 | bool retry) |
5576 | { |
5577 | struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; |
5578 | int rc = IO_ACCEL_INELIGIBLE; |
5579 | |
5580 | if (!dev) |
5581 | return SCSI_MLQUEUE_HOST_BUSY; |
5582 | |
5583 | if (dev->in_reset) |
5584 | return SCSI_MLQUEUE_HOST_BUSY; |
5585 | |
5586 | if (hpsa_simple_mode) |
5587 | return IO_ACCEL_INELIGIBLE; |
5588 | |
5589 | cmd->host_scribble = (unsigned char *) c; |
5590 | |
5591 | if (dev->offload_enabled) { |
5592 | hpsa_cmd_init(h, index: c->cmdindex, c); /* Zeroes out all fields */ |
5593 | c->cmd_type = CMD_SCSI; |
5594 | c->scsi_cmd = cmd; |
5595 | c->device = dev; |
5596 | if (retry) /* Resubmit but do not increment device->commands_outstanding. */ |
5597 | c->retry_pending = true; |
5598 | rc = hpsa_scsi_ioaccel_raid_map(h, c); |
5599 | if (rc < 0) /* scsi_dma_map failed. */ |
5600 | rc = SCSI_MLQUEUE_HOST_BUSY; |
5601 | } else if (dev->hba_ioaccel_enabled) { |
5602 | hpsa_cmd_init(h, index: c->cmdindex, c); /* Zeroes out all fields */ |
5603 | c->cmd_type = CMD_SCSI; |
5604 | c->scsi_cmd = cmd; |
5605 | c->device = dev; |
5606 | if (retry) /* Resubmit but do not increment device->commands_outstanding. */ |
5607 | c->retry_pending = true; |
5608 | rc = hpsa_scsi_ioaccel_direct_map(h, c); |
5609 | if (rc < 0) /* scsi_dma_map failed. */ |
5610 | rc = SCSI_MLQUEUE_HOST_BUSY; |
5611 | } |
5612 | return rc; |
5613 | } |
5614 | |
5615 | static void hpsa_command_resubmit_worker(struct work_struct *work) |
5616 | { |
5617 | struct scsi_cmnd *cmd; |
5618 | struct hpsa_scsi_dev_t *dev; |
5619 | struct CommandList *c = container_of(work, struct CommandList, work); |
5620 | |
5621 | cmd = c->scsi_cmd; |
5622 | dev = cmd->device->hostdata; |
5623 | if (!dev) { |
5624 | cmd->result = DID_NO_CONNECT << 16; |
5625 | return hpsa_cmd_free_and_done(h: c->h, c, cmd); |
5626 | } |
5627 | |
5628 | if (dev->in_reset) { |
5629 | cmd->result = DID_RESET << 16; |
5630 | return hpsa_cmd_free_and_done(h: c->h, c, cmd); |
5631 | } |
5632 | |
5633 | if (c->cmd_type == CMD_IOACCEL2) { |
5634 | struct ctlr_info *h = c->h; |
5635 | struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; |
5636 | int rc; |
5637 | |
5638 | if (c2->error_data.serv_response == |
5639 | IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) { |
5640 | /* Resubmit with the retry_pending flag set. */ |
5641 | rc = hpsa_ioaccel_submit(h, c, cmd, retry: true); |
5642 | if (rc == 0) |
5643 | return; |
5644 | if (rc == SCSI_MLQUEUE_HOST_BUSY) { |
5645 | /* |
5646 | * If we get here, it means dma mapping failed. |
5647 | * Try again via scsi mid layer, which will |
5648 | * then get SCSI_MLQUEUE_HOST_BUSY. |
5649 | */ |
5650 | cmd->result = DID_IMM_RETRY << 16; |
5651 | return hpsa_cmd_free_and_done(h, c, cmd); |
5652 | } |
5653 | /* else, fall thru and resubmit down CISS path */ |
5654 | } |
5655 | } |
5656 | hpsa_cmd_partial_init(h: c->h, index: c->cmdindex, c); |
5657 | /* |
5658 | * Here we have not come in though queue_command, so we |
5659 | * can set the retry_pending flag to true for a driver initiated |
5660 | * retry attempt (I.E. not a SML retry). |
5661 | * I.E. We are submitting a driver initiated retry. |
5662 | * Note: hpsa_ciss_submit does not zero out the command fields like |
5663 | * ioaccel submit does. |
5664 | */ |
5665 | c->retry_pending = true; |
5666 | if (hpsa_ciss_submit(h: c->h, c, cmd, dev)) { |
5667 | /* |
5668 | * If we get here, it means dma mapping failed. Try |
5669 | * again via scsi mid layer, which will then get |
5670 | * SCSI_MLQUEUE_HOST_BUSY. |
5671 | * |
5672 | * hpsa_ciss_submit will have already freed c |
5673 | * if it encountered a dma mapping failure. |
5674 | */ |
5675 | cmd->result = DID_IMM_RETRY << 16; |
5676 | scsi_done(cmd); |
5677 | } |
5678 | } |
5679 | |
5680 | /* Running in struct Scsi_Host->host_lock less mode */ |
5681 | static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) |
5682 | { |
5683 | struct ctlr_info *h; |
5684 | struct hpsa_scsi_dev_t *dev; |
5685 | struct CommandList *c; |
5686 | int rc = 0; |
5687 | |
5688 | /* Get the ptr to our adapter structure out of cmd->host. */ |
5689 | h = sdev_to_hba(sdev: cmd->device); |
5690 | |
5691 | BUG_ON(scsi_cmd_to_rq(cmd)->tag < 0); |
5692 | |
5693 | dev = cmd->device->hostdata; |
5694 | if (!dev) { |
5695 | cmd->result = DID_NO_CONNECT << 16; |
5696 | scsi_done(cmd); |
5697 | return 0; |
5698 | } |
5699 | |
5700 | if (dev->removed) { |
5701 | cmd->result = DID_NO_CONNECT << 16; |
5702 | scsi_done(cmd); |
5703 | return 0; |
5704 | } |
5705 | |
5706 | if (unlikely(lockup_detected(h))) { |
5707 | cmd->result = DID_NO_CONNECT << 16; |
5708 | scsi_done(cmd); |
5709 | return 0; |
5710 | } |
5711 | |
5712 | if (dev->in_reset) |
5713 | return SCSI_MLQUEUE_DEVICE_BUSY; |
5714 | |
5715 | c = cmd_tagged_alloc(h, scmd: cmd); |
5716 | if (c == NULL) |
5717 | return SCSI_MLQUEUE_DEVICE_BUSY; |
5718 | |
5719 | /* |
5720 | * This is necessary because the SML doesn't zero out this field during |
5721 | * error recovery. |
5722 | */ |
5723 | cmd->result = 0; |
5724 | |
5725 | /* |
5726 | * Call alternate submit routine for I/O accelerated commands. |
5727 | * Retries always go down the normal I/O path. |
5728 | * Note: If cmd->retries is non-zero, then this is a SML |
5729 | * initiated retry and not a driver initiated retry. |
5730 | * This command has been obtained from cmd_tagged_alloc |
5731 | * and is therefore a brand-new command. |
5732 | */ |
5733 | if (likely(cmd->retries == 0 && |
5734 | !blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) && |
5735 | h->acciopath_status)) { |
5736 | /* Submit with the retry_pending flag unset. */ |
5737 | rc = hpsa_ioaccel_submit(h, c, cmd, retry: false); |
5738 | if (rc == 0) |
5739 | return 0; |
5740 | if (rc == SCSI_MLQUEUE_HOST_BUSY) { |
5741 | hpsa_cmd_resolve_and_free(h, c); |
5742 | return SCSI_MLQUEUE_HOST_BUSY; |
5743 | } |
5744 | } |
5745 | return hpsa_ciss_submit(h, c, cmd, dev); |
5746 | } |
5747 | |
5748 | static void hpsa_scan_complete(struct ctlr_info *h) |
5749 | { |
5750 | unsigned long flags; |
5751 | |
5752 | spin_lock_irqsave(&h->scan_lock, flags); |
5753 | h->scan_finished = 1; |
5754 | wake_up(&h->scan_wait_queue); |
5755 | spin_unlock_irqrestore(lock: &h->scan_lock, flags); |
5756 | } |
5757 | |
5758 | static void hpsa_scan_start(struct Scsi_Host *sh) |
5759 | { |
5760 | struct ctlr_info *h = shost_to_hba(sh); |
5761 | unsigned long flags; |
5762 | |
5763 | /* |
5764 | * Don't let rescans be initiated on a controller known to be locked |
5765 | * up. If the controller locks up *during* a rescan, that thread is |
5766 | * probably hosed, but at least we can prevent new rescan threads from |
5767 | * piling up on a locked up controller. |
5768 | */ |
5769 | if (unlikely(lockup_detected(h))) |
5770 | return hpsa_scan_complete(h); |
5771 | |
5772 | /* |
5773 | * If a scan is already waiting to run, no need to add another |
5774 | */ |
5775 | spin_lock_irqsave(&h->scan_lock, flags); |
5776 | if (h->scan_waiting) { |
5777 | spin_unlock_irqrestore(lock: &h->scan_lock, flags); |
5778 | return; |
5779 | } |
5780 | |
5781 | spin_unlock_irqrestore(lock: &h->scan_lock, flags); |
5782 | |
5783 | /* wait until any scan already in progress is finished. */ |
5784 | while (1) { |
5785 | spin_lock_irqsave(&h->scan_lock, flags); |
5786 | if (h->scan_finished) |
5787 | break; |
5788 | h->scan_waiting = 1; |
5789 | spin_unlock_irqrestore(lock: &h->scan_lock, flags); |
5790 | wait_event(h->scan_wait_queue, h->scan_finished); |
5791 | /* Note: We don't need to worry about a race between this |
5792 | * thread and driver unload because the midlayer will |
5793 | * have incremented the reference count, so unload won't |
5794 | * happen if we're in here. |
5795 | */ |
5796 | } |
5797 | h->scan_finished = 0; /* mark scan as in progress */ |
5798 | h->scan_waiting = 0; |
5799 | spin_unlock_irqrestore(lock: &h->scan_lock, flags); |
5800 | |
5801 | if (unlikely(lockup_detected(h))) |
5802 | return hpsa_scan_complete(h); |
5803 | |
5804 | /* |
5805 | * Do the scan after a reset completion |
5806 | */ |
5807 | spin_lock_irqsave(&h->reset_lock, flags); |
5808 | if (h->reset_in_progress) { |
5809 | h->drv_req_rescan = 1; |
5810 | spin_unlock_irqrestore(lock: &h->reset_lock, flags); |
5811 | hpsa_scan_complete(h); |
5812 | return; |
5813 | } |
5814 | spin_unlock_irqrestore(lock: &h->reset_lock, flags); |
5815 | |
5816 | hpsa_update_scsi_devices(h); |
5817 | |
5818 | hpsa_scan_complete(h); |
5819 | } |
5820 | |
5821 | static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth) |
5822 | { |
5823 | struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata; |
5824 | |
5825 | if (!logical_drive) |
5826 | return -ENODEV; |
5827 | |
5828 | if (qdepth < 1) |
5829 | qdepth = 1; |
5830 | else if (qdepth > logical_drive->queue_depth) |
5831 | qdepth = logical_drive->queue_depth; |
5832 | |
5833 | return scsi_change_queue_depth(sdev, qdepth); |
5834 | } |
5835 | |
5836 | static int hpsa_scan_finished(struct Scsi_Host *sh, |
5837 | unsigned long elapsed_time) |
5838 | { |
5839 | struct ctlr_info *h = shost_to_hba(sh); |
5840 | unsigned long flags; |
5841 | int finished; |
5842 | |
5843 | spin_lock_irqsave(&h->scan_lock, flags); |
5844 | finished = h->scan_finished; |
5845 | spin_unlock_irqrestore(lock: &h->scan_lock, flags); |
5846 | return finished; |
5847 | } |
5848 | |
5849 | static int hpsa_scsi_host_alloc(struct ctlr_info *h) |
5850 | { |
5851 | struct Scsi_Host *sh; |
5852 | |
5853 | sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info)); |
5854 | if (sh == NULL) { |
5855 | dev_err(&h->pdev->dev, "scsi_host_alloc failed\n" ); |
5856 | return -ENOMEM; |
5857 | } |
5858 | |
5859 | sh->io_port = 0; |
5860 | sh->n_io_port = 0; |
5861 | sh->this_id = -1; |
5862 | sh->max_channel = 3; |
5863 | sh->max_cmd_len = MAX_COMMAND_SIZE; |
5864 | sh->max_lun = HPSA_MAX_LUN; |
5865 | sh->max_id = HPSA_MAX_LUN; |
5866 | sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS; |
5867 | sh->cmd_per_lun = sh->can_queue; |
5868 | sh->sg_tablesize = h->maxsgentries; |
5869 | sh->transportt = hpsa_sas_transport_template; |
5870 | sh->hostdata[0] = (unsigned long) h; |
5871 | sh->irq = pci_irq_vector(dev: h->pdev, nr: 0); |
5872 | sh->unique_id = sh->irq; |
5873 | |
5874 | h->scsi_host = sh; |
5875 | return 0; |
5876 | } |
5877 | |
5878 | static int hpsa_scsi_add_host(struct ctlr_info *h) |
5879 | { |
5880 | int rv; |
5881 | |
5882 | rv = scsi_add_host(host: h->scsi_host, dev: &h->pdev->dev); |
5883 | if (rv) { |
5884 | dev_err(&h->pdev->dev, "scsi_add_host failed\n" ); |
5885 | return rv; |
5886 | } |
5887 | scsi_scan_host(h->scsi_host); |
5888 | return 0; |
5889 | } |
5890 | |
5891 | /* |
5892 | * The block layer has already gone to the trouble of picking out a unique, |
5893 | * small-integer tag for this request. We use an offset from that value as |
5894 | * an index to select our command block. (The offset allows us to reserve the |
5895 | * low-numbered entries for our own uses.) |
5896 | */ |
5897 | static int hpsa_get_cmd_index(struct scsi_cmnd *scmd) |
5898 | { |
5899 | int idx = scsi_cmd_to_rq(scmd)->tag; |
5900 | |
5901 | if (idx < 0) |
5902 | return idx; |
5903 | |
5904 | /* Offset to leave space for internal cmds. */ |
5905 | return idx += HPSA_NRESERVED_CMDS; |
5906 | } |
5907 | |
5908 | /* |
5909 | * Send a TEST_UNIT_READY command to the specified LUN using the specified |
5910 | * reply queue; returns zero if the unit is ready, and non-zero otherwise. |
5911 | */ |
5912 | static int hpsa_send_test_unit_ready(struct ctlr_info *h, |
5913 | struct CommandList *c, unsigned char lunaddr[], |
5914 | int reply_queue) |
5915 | { |
5916 | int rc; |
5917 | |
5918 | /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */ |
5919 | (void) fill_cmd(c, TEST_UNIT_READY, h, |
5920 | NULL, size: 0, page_code: 0, scsi3addr: lunaddr, TYPE_CMD); |
5921 | rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); |
5922 | if (rc) |
5923 | return rc; |
5924 | /* no unmap needed here because no data xfer. */ |
5925 | |
5926 | /* Check if the unit is already ready. */ |
5927 | if (c->err_info->CommandStatus == CMD_SUCCESS) |
5928 | return 0; |
5929 | |
5930 | /* |
5931 | * The first command sent after reset will receive "unit attention" to |
5932 | * indicate that the LUN has been reset...this is actually what we're |
5933 | * looking for (but, success is good too). |
5934 | */ |
5935 | if (c->err_info->CommandStatus == CMD_TARGET_STATUS && |
5936 | c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION && |
5937 | (c->err_info->SenseInfo[2] == NO_SENSE || |
5938 | c->err_info->SenseInfo[2] == UNIT_ATTENTION)) |
5939 | return 0; |
5940 | |
5941 | return 1; |
5942 | } |
5943 | |
5944 | /* |
5945 | * Wait for a TEST_UNIT_READY command to complete, retrying as necessary; |
5946 | * returns zero when the unit is ready, and non-zero when giving up. |
5947 | */ |
5948 | static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h, |
5949 | struct CommandList *c, |
5950 | unsigned char lunaddr[], int reply_queue) |
5951 | { |
5952 | int rc; |
5953 | int count = 0; |
5954 | int waittime = 1; /* seconds */ |
5955 | |
5956 | /* Send test unit ready until device ready, or give up. */ |
5957 | for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) { |
5958 | |
5959 | /* |
5960 | * Wait for a bit. do this first, because if we send |
5961 | * the TUR right away, the reset will just abort it. |
5962 | */ |
5963 | msleep(msecs: 1000 * waittime); |
5964 | |
5965 | rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue); |
5966 | if (!rc) |
5967 | break; |
5968 | |
5969 | /* Increase wait time with each try, up to a point. */ |
5970 | if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS) |
5971 | waittime *= 2; |
5972 | |
5973 | dev_warn(&h->pdev->dev, |
5974 | "waiting %d secs for device to become ready.\n" , |
5975 | waittime); |
5976 | } |
5977 | |
5978 | return rc; |
5979 | } |
5980 | |
5981 | static int wait_for_device_to_become_ready(struct ctlr_info *h, |
5982 | unsigned char lunaddr[], |
5983 | int reply_queue) |
5984 | { |
5985 | int first_queue; |
5986 | int last_queue; |
5987 | int rq; |
5988 | int rc = 0; |
5989 | struct CommandList *c; |
5990 | |
5991 | c = cmd_alloc(h); |
5992 | |
5993 | /* |
5994 | * If no specific reply queue was requested, then send the TUR |
5995 | * repeatedly, requesting a reply on each reply queue; otherwise execute |
5996 | * the loop exactly once using only the specified queue. |
5997 | */ |
5998 | if (reply_queue == DEFAULT_REPLY_QUEUE) { |
5999 | first_queue = 0; |
6000 | last_queue = h->nreply_queues - 1; |
6001 | } else { |
6002 | first_queue = reply_queue; |
6003 | last_queue = reply_queue; |
6004 | } |
6005 | |
6006 | for (rq = first_queue; rq <= last_queue; rq++) { |
6007 | rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, reply_queue: rq); |
6008 | if (rc) |
6009 | break; |
6010 | } |
6011 | |
6012 | if (rc) |
6013 | dev_warn(&h->pdev->dev, "giving up on device.\n" ); |
6014 | else |
6015 | dev_warn(&h->pdev->dev, "device is ready.\n" ); |
6016 | |
6017 | cmd_free(h, c); |
6018 | return rc; |
6019 | } |
6020 | |
6021 | /* Need at least one of these error handlers to keep ../scsi/hosts.c from |
6022 | * complaining. Doing a host- or bus-reset can't do anything good here. |
6023 | */ |
6024 | static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) |
6025 | { |
6026 | int rc = SUCCESS; |
6027 | int i; |
6028 | struct ctlr_info *h; |
6029 | struct hpsa_scsi_dev_t *dev = NULL; |
6030 | u8 reset_type; |
6031 | char msg[48]; |
6032 | unsigned long flags; |
6033 | |
6034 | /* find the controller to which the command to be aborted was sent */ |
6035 | h = sdev_to_hba(sdev: scsicmd->device); |
6036 | if (h == NULL) /* paranoia */ |
6037 | return FAILED; |
6038 | |
6039 | spin_lock_irqsave(&h->reset_lock, flags); |
6040 | h->reset_in_progress = 1; |
6041 | spin_unlock_irqrestore(lock: &h->reset_lock, flags); |
6042 | |
6043 | if (lockup_detected(h)) { |
6044 | rc = FAILED; |
6045 | goto return_reset_status; |
6046 | } |
6047 | |
6048 | dev = scsicmd->device->hostdata; |
6049 | if (!dev) { |
6050 | dev_err(&h->pdev->dev, "%s: device lookup failed\n" , __func__); |
6051 | rc = FAILED; |
6052 | goto return_reset_status; |
6053 | } |
6054 | |
6055 | if (dev->devtype == TYPE_ENCLOSURE) { |
6056 | rc = SUCCESS; |
6057 | goto return_reset_status; |
6058 | } |
6059 | |
6060 | /* if controller locked up, we can guarantee command won't complete */ |
6061 | if (lockup_detected(h)) { |
6062 | snprintf(buf: msg, size: sizeof(msg), |
6063 | fmt: "cmd %d RESET FAILED, lockup detected" , |
6064 | hpsa_get_cmd_index(scmd: scsicmd)); |
6065 | hpsa_show_dev_msg(KERN_WARNING, h, dev, description: msg); |
6066 | rc = FAILED; |
6067 | goto return_reset_status; |
6068 | } |
6069 | |
6070 | /* this reset request might be the result of a lockup; check */ |
6071 | if (detect_controller_lockup(h)) { |
6072 | snprintf(buf: msg, size: sizeof(msg), |
6073 | fmt: "cmd %d RESET FAILED, new lockup detected" , |
6074 | hpsa_get_cmd_index(scmd: scsicmd)); |
6075 | hpsa_show_dev_msg(KERN_WARNING, h, dev, description: msg); |
6076 | rc = FAILED; |
6077 | goto return_reset_status; |
6078 | } |
6079 | |
6080 | /* Do not attempt on controller */ |
6081 | if (is_hba_lunid(scsi3addr: dev->scsi3addr)) { |
6082 | rc = SUCCESS; |
6083 | goto return_reset_status; |
6084 | } |
6085 | |
6086 | if (is_logical_dev_addr_mode(scsi3addr: dev->scsi3addr)) |
6087 | reset_type = HPSA_DEVICE_RESET_MSG; |
6088 | else |
6089 | reset_type = HPSA_PHYS_TARGET_RESET; |
6090 | |
6091 | sprintf(buf: msg, fmt: "resetting %s" , |
6092 | reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical " ); |
6093 | hpsa_show_dev_msg(KERN_WARNING, h, dev, description: msg); |
6094 | |
6095 | /* |
6096 | * wait to see if any commands will complete before sending reset |
6097 | */ |
6098 | dev->in_reset = true; /* block any new cmds from OS for this device */ |
6099 | for (i = 0; i < 10; i++) { |
6100 | if (atomic_read(v: &dev->commands_outstanding) > 0) |
6101 | msleep(msecs: 1000); |
6102 | else |
6103 | break; |
6104 | } |
6105 | |
6106 | /* send a reset to the SCSI LUN which the command was sent to */ |
6107 | rc = hpsa_do_reset(h, dev, reset_type, DEFAULT_REPLY_QUEUE); |
6108 | if (rc == 0) |
6109 | rc = SUCCESS; |
6110 | else |
6111 | rc = FAILED; |
6112 | |
6113 | sprintf(buf: msg, fmt: "reset %s %s" , |
6114 | reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical " , |
6115 | rc == SUCCESS ? "completed successfully" : "failed" ); |
6116 | hpsa_show_dev_msg(KERN_WARNING, h, dev, description: msg); |
6117 | |
6118 | return_reset_status: |
6119 | spin_lock_irqsave(&h->reset_lock, flags); |
6120 | h->reset_in_progress = 0; |
6121 | if (dev) |
6122 | dev->in_reset = false; |
6123 | spin_unlock_irqrestore(lock: &h->reset_lock, flags); |
6124 | return rc; |
6125 | } |
6126 | |
6127 | /* |
6128 | * For operations with an associated SCSI command, a command block is allocated |
6129 | * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the |
6130 | * block request tag as an index into a table of entries. cmd_tagged_free() is |
6131 | * the complement, although cmd_free() may be called instead. |
6132 | * This function is only called for new requests from queue_command. |
6133 | */ |
6134 | static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h, |
6135 | struct scsi_cmnd *scmd) |
6136 | { |
6137 | int idx = hpsa_get_cmd_index(scmd); |
6138 | struct CommandList *c = h->cmd_pool + idx; |
6139 | |
6140 | if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) { |
6141 | dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n" , |
6142 | idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1); |
6143 | /* The index value comes from the block layer, so if it's out of |
6144 | * bounds, it's probably not our bug. |
6145 | */ |
6146 | BUG(); |
6147 | } |
6148 | |
6149 | if (unlikely(!hpsa_is_cmd_idle(c))) { |
6150 | /* |
6151 | * We expect that the SCSI layer will hand us a unique tag |
6152 | * value. Thus, there should never be a collision here between |
6153 | * two requests...because if the selected command isn't idle |
6154 | * then someone is going to be very disappointed. |
6155 | */ |
6156 | if (idx != h->last_collision_tag) { /* Print once per tag */ |
6157 | dev_warn(&h->pdev->dev, |
6158 | "%s: tag collision (tag=%d)\n" , __func__, idx); |
6159 | if (scmd) |
6160 | scsi_print_command(scmd); |
6161 | h->last_collision_tag = idx; |
6162 | } |
6163 | return NULL; |
6164 | } |
6165 | |
6166 | atomic_inc(v: &c->refcount); |
6167 | hpsa_cmd_partial_init(h, index: idx, c); |
6168 | |
6169 | /* |
6170 | * This is a new command obtained from queue_command so |
6171 | * there have not been any driver initiated retry attempts. |
6172 | */ |
6173 | c->retry_pending = false; |
6174 | |
6175 | return c; |
6176 | } |
6177 | |
6178 | static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c) |
6179 | { |
6180 | /* |
6181 | * Release our reference to the block. We don't need to do anything |
6182 | * else to free it, because it is accessed by index. |
6183 | */ |
6184 | (void)atomic_dec(v: &c->refcount); |
6185 | } |
6186 | |
6187 | /* |
6188 | * For operations that cannot sleep, a command block is allocated at init, |
6189 | * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track |
6190 | * which ones are free or in use. Lock must be held when calling this. |
6191 | * cmd_free() is the complement. |
6192 | * This function never gives up and returns NULL. If it hangs, |
6193 | * another thread must call cmd_free() to free some tags. |
6194 | */ |
6195 | |
6196 | static struct CommandList *cmd_alloc(struct ctlr_info *h) |
6197 | { |
6198 | struct CommandList *c; |
6199 | int refcount, i; |
6200 | int offset = 0; |
6201 | |
6202 | /* |
6203 | * There is some *extremely* small but non-zero chance that that |
6204 | * multiple threads could get in here, and one thread could |
6205 | * be scanning through the list of bits looking for a free |
6206 | * one, but the free ones are always behind him, and other |
6207 | * threads sneak in behind him and eat them before he can |
6208 | * get to them, so that while there is always a free one, a |
6209 | * very unlucky thread might be starved anyway, never able to |
6210 | * beat the other threads. In reality, this happens so |
6211 | * infrequently as to be indistinguishable from never. |
6212 | * |
6213 | * Note that we start allocating commands before the SCSI host structure |
6214 | * is initialized. Since the search starts at bit zero, this |
6215 | * all works, since we have at least one command structure available; |
6216 | * however, it means that the structures with the low indexes have to be |
6217 | * reserved for driver-initiated requests, while requests from the block |
6218 | * layer will use the higher indexes. |
6219 | */ |
6220 | |
6221 | for (;;) { |
6222 | i = find_next_zero_bit(addr: h->cmd_pool_bits, |
6223 | HPSA_NRESERVED_CMDS, |
6224 | offset); |
6225 | if (unlikely(i >= HPSA_NRESERVED_CMDS)) { |
6226 | offset = 0; |
6227 | continue; |
6228 | } |
6229 | c = h->cmd_pool + i; |
6230 | refcount = atomic_inc_return(v: &c->refcount); |
6231 | if (unlikely(refcount > 1)) { |
6232 | cmd_free(h, c); /* already in use */ |
6233 | offset = (i + 1) % HPSA_NRESERVED_CMDS; |
6234 | continue; |
6235 | } |
6236 | set_bit(nr: i, addr: h->cmd_pool_bits); |
6237 | break; /* it's ours now. */ |
6238 | } |
6239 | hpsa_cmd_partial_init(h, index: i, c); |
6240 | c->device = NULL; |
6241 | |
6242 | /* |
6243 | * cmd_alloc is for "internal" commands and they are never |
6244 | * retried. |
6245 | */ |
6246 | c->retry_pending = false; |
6247 | |
6248 | return c; |
6249 | } |
6250 | |
6251 | /* |
6252 | * This is the complementary operation to cmd_alloc(). Note, however, in some |
6253 | * corner cases it may also be used to free blocks allocated by |
6254 | * cmd_tagged_alloc() in which case the ref-count decrement does the trick and |
6255 | * the clear-bit is harmless. |
6256 | */ |
6257 | static void cmd_free(struct ctlr_info *h, struct CommandList *c) |
6258 | { |
6259 | if (atomic_dec_and_test(v: &c->refcount)) { |
6260 | int i; |
6261 | |
6262 | i = c - h->cmd_pool; |
6263 | clear_bit(nr: i, addr: h->cmd_pool_bits); |
6264 | } |
6265 | } |
6266 | |
6267 | #ifdef CONFIG_COMPAT |
6268 | |
6269 | static int hpsa_ioctl32_passthru(struct scsi_device *dev, unsigned int cmd, |
6270 | void __user *arg) |
6271 | { |
6272 | struct ctlr_info *h = sdev_to_hba(sdev: dev); |
6273 | IOCTL32_Command_struct __user *arg32 = arg; |
6274 | IOCTL_Command_struct arg64; |
6275 | int err; |
6276 | u32 cp; |
6277 | |
6278 | if (!arg) |
6279 | return -EINVAL; |
6280 | |
6281 | memset(&arg64, 0, sizeof(arg64)); |
6282 | if (copy_from_user(to: &arg64, from: arg32, offsetof(IOCTL_Command_struct, buf))) |
6283 | return -EFAULT; |
6284 | if (get_user(cp, &arg32->buf)) |
6285 | return -EFAULT; |
6286 | arg64.buf = compat_ptr(uptr: cp); |
6287 | |
6288 | if (atomic_dec_if_positive(v: &h->passthru_cmds_avail) < 0) |
6289 | return -EAGAIN; |
6290 | err = hpsa_passthru_ioctl(h, iocommand: &arg64); |
6291 | atomic_inc(v: &h->passthru_cmds_avail); |
6292 | if (err) |
6293 | return err; |
6294 | if (copy_to_user(to: &arg32->error_info, from: &arg64.error_info, |
6295 | n: sizeof(arg32->error_info))) |
6296 | return -EFAULT; |
6297 | return 0; |
6298 | } |
6299 | |
6300 | static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, |
6301 | unsigned int cmd, void __user *arg) |
6302 | { |
6303 | struct ctlr_info *h = sdev_to_hba(sdev: dev); |
6304 | BIG_IOCTL32_Command_struct __user *arg32 = arg; |
6305 | BIG_IOCTL_Command_struct arg64; |
6306 | int err; |
6307 | u32 cp; |
6308 | |
6309 | if (!arg) |
6310 | return -EINVAL; |
6311 | memset(&arg64, 0, sizeof(arg64)); |
6312 | if (copy_from_user(to: &arg64, from: arg32, |
6313 | offsetof(BIG_IOCTL32_Command_struct, buf))) |
6314 | return -EFAULT; |
6315 | if (get_user(cp, &arg32->buf)) |
6316 | return -EFAULT; |
6317 | arg64.buf = compat_ptr(uptr: cp); |
6318 | |
6319 | if (atomic_dec_if_positive(v: &h->passthru_cmds_avail) < 0) |
6320 | return -EAGAIN; |
6321 | err = hpsa_big_passthru_ioctl(h, ioc: &arg64); |
6322 | atomic_inc(v: &h->passthru_cmds_avail); |
6323 | if (err) |
6324 | return err; |
6325 | if (copy_to_user(to: &arg32->error_info, from: &arg64.error_info, |
6326 | n: sizeof(arg32->error_info))) |
6327 | return -EFAULT; |
6328 | return 0; |
6329 | } |
6330 | |
6331 | static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd, |
6332 | void __user *arg) |
6333 | { |
6334 | switch (cmd) { |
6335 | case CCISS_GETPCIINFO: |
6336 | case CCISS_GETINTINFO: |
6337 | case CCISS_SETINTINFO: |
6338 | case CCISS_GETNODENAME: |
6339 | case CCISS_SETNODENAME: |
6340 | case CCISS_GETHEARTBEAT: |
6341 | case CCISS_GETBUSTYPES: |
6342 | case CCISS_GETFIRMVER: |
6343 | case CCISS_GETDRIVVER: |
6344 | case CCISS_REVALIDVOLS: |
6345 | case CCISS_DEREGDISK: |
6346 | case CCISS_REGNEWDISK: |
6347 | case CCISS_REGNEWD: |
6348 | case CCISS_RESCANDISK: |
6349 | case CCISS_GETLUNINFO: |
6350 | return hpsa_ioctl(dev, cmd, arg); |
6351 | |
6352 | case CCISS_PASSTHRU32: |
6353 | return hpsa_ioctl32_passthru(dev, cmd, arg); |
6354 | case CCISS_BIG_PASSTHRU32: |
6355 | return hpsa_ioctl32_big_passthru(dev, cmd, arg); |
6356 | |
6357 | default: |
6358 | return -ENOIOCTLCMD; |
6359 | } |
6360 | } |
6361 | #endif |
6362 | |
6363 | static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp) |
6364 | { |
6365 | struct hpsa_pci_info pciinfo; |
6366 | |
6367 | if (!argp) |
6368 | return -EINVAL; |
6369 | pciinfo.domain = pci_domain_nr(bus: h->pdev->bus); |
6370 | pciinfo.bus = h->pdev->bus->number; |
6371 | pciinfo.dev_fn = h->pdev->devfn; |
6372 | pciinfo.board_id = h->board_id; |
6373 | if (copy_to_user(to: argp, from: &pciinfo, n: sizeof(pciinfo))) |
6374 | return -EFAULT; |
6375 | return 0; |
6376 | } |
6377 | |
6378 | static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp) |
6379 | { |
6380 | DriverVer_type DriverVer; |
6381 | unsigned char vmaj, vmin, vsubmin; |
6382 | int rc; |
6383 | |
6384 | rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu" , |
6385 | &vmaj, &vmin, &vsubmin); |
6386 | if (rc != 3) { |
6387 | dev_info(&h->pdev->dev, "driver version string '%s' " |
6388 | "unrecognized." , HPSA_DRIVER_VERSION); |
6389 | vmaj = 0; |
6390 | vmin = 0; |
6391 | vsubmin = 0; |
6392 | } |
6393 | DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin; |
6394 | if (!argp) |
6395 | return -EINVAL; |
6396 | if (copy_to_user(to: argp, from: &DriverVer, n: sizeof(DriverVer_type))) |
6397 | return -EFAULT; |
6398 | return 0; |
6399 | } |
6400 | |
6401 | static int hpsa_passthru_ioctl(struct ctlr_info *h, |
6402 | IOCTL_Command_struct *iocommand) |
6403 | { |
6404 | struct CommandList *c; |
6405 | char *buff = NULL; |
6406 | u64 temp64; |
6407 | int rc = 0; |
6408 | |
6409 | if (!capable(CAP_SYS_RAWIO)) |
6410 | return -EPERM; |
6411 | if ((iocommand->buf_size < 1) && |
6412 | (iocommand->Request.Type.Direction != XFER_NONE)) { |
6413 | return -EINVAL; |
6414 | } |
6415 | if (iocommand->buf_size > 0) { |
6416 | buff = kmalloc(size: iocommand->buf_size, GFP_KERNEL); |
6417 | if (buff == NULL) |
6418 | return -ENOMEM; |
6419 | if (iocommand->Request.Type.Direction & XFER_WRITE) { |
6420 | /* Copy the data into the buffer we created */ |
6421 | if (copy_from_user(to: buff, from: iocommand->buf, |
6422 | n: iocommand->buf_size)) { |
6423 | rc = -EFAULT; |
6424 | goto out_kfree; |
6425 | } |
6426 | } else { |
6427 | memset(buff, 0, iocommand->buf_size); |
6428 | } |
6429 | } |
6430 | c = cmd_alloc(h); |
6431 | |
6432 | /* Fill in the command type */ |
6433 | c->cmd_type = CMD_IOCTL_PEND; |
6434 | c->scsi_cmd = SCSI_CMD_BUSY; |
6435 | /* Fill in Command Header */ |
6436 | c->Header.ReplyQueue = 0; /* unused in simple mode */ |
6437 | if (iocommand->buf_size > 0) { /* buffer to fill */ |
6438 | c->Header.SGList = 1; |
6439 | c->Header.SGTotal = cpu_to_le16(1); |
6440 | } else { /* no buffers to fill */ |
6441 | c->Header.SGList = 0; |
6442 | c->Header.SGTotal = cpu_to_le16(0); |
6443 | } |
6444 | memcpy(&c->Header.LUN, &iocommand->LUN_info, sizeof(c->Header.LUN)); |
6445 | |
6446 | /* Fill in Request block */ |
6447 | memcpy(&c->Request, &iocommand->Request, |
6448 | sizeof(c->Request)); |
6449 | |
6450 | /* Fill in the scatter gather information */ |
6451 | if (iocommand->buf_size > 0) { |
6452 | temp64 = dma_map_single(&h->pdev->dev, buff, |
6453 | iocommand->buf_size, DMA_BIDIRECTIONAL); |
6454 | if (dma_mapping_error(dev: &h->pdev->dev, dma_addr: (dma_addr_t) temp64)) { |
6455 | c->SG[0].Addr = cpu_to_le64(0); |
6456 | c->SG[0].Len = cpu_to_le32(0); |
6457 | rc = -ENOMEM; |
6458 | goto out; |
6459 | } |
6460 | c->SG[0].Addr = cpu_to_le64(temp64); |
6461 | c->SG[0].Len = cpu_to_le32(iocommand->buf_size); |
6462 | c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */ |
6463 | } |
6464 | rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, |
6465 | NO_TIMEOUT); |
6466 | if (iocommand->buf_size > 0) |
6467 | hpsa_pci_unmap(pdev: h->pdev, c, sg_used: 1, data_direction: DMA_BIDIRECTIONAL); |
6468 | check_ioctl_unit_attention(h, c); |
6469 | if (rc) { |
6470 | rc = -EIO; |
6471 | goto out; |
6472 | } |
6473 | |
6474 | /* Copy the error information out */ |
6475 | memcpy(&iocommand->error_info, c->err_info, |
6476 | sizeof(iocommand->error_info)); |
6477 | if ((iocommand->Request.Type.Direction & XFER_READ) && |
6478 | iocommand->buf_size > 0) { |
6479 | /* Copy the data out of the buffer we created */ |
6480 | if (copy_to_user(to: iocommand->buf, from: buff, n: iocommand->buf_size)) { |
6481 | rc = -EFAULT; |
6482 | goto out; |
6483 | } |
6484 | } |
6485 | out: |
6486 | cmd_free(h, c); |
6487 | out_kfree: |
6488 | kfree(objp: buff); |
6489 | return rc; |
6490 | } |
6491 | |
6492 | static int hpsa_big_passthru_ioctl(struct ctlr_info *h, |
6493 | BIG_IOCTL_Command_struct *ioc) |
6494 | { |
6495 | struct CommandList *c; |
6496 | unsigned char **buff = NULL; |
6497 | int *buff_size = NULL; |
6498 | u64 temp64; |
6499 | BYTE sg_used = 0; |
6500 | int status = 0; |
6501 | u32 left; |
6502 | u32 sz; |
6503 | BYTE __user *data_ptr; |
6504 | |
6505 | if (!capable(CAP_SYS_RAWIO)) |
6506 | return -EPERM; |
6507 | |
6508 | if ((ioc->buf_size < 1) && |
6509 | (ioc->Request.Type.Direction != XFER_NONE)) |
6510 | return -EINVAL; |
6511 | /* Check kmalloc limits using all SGs */ |
6512 | if (ioc->malloc_size > MAX_KMALLOC_SIZE) |
6513 | return -EINVAL; |
6514 | if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) |
6515 | return -EINVAL; |
6516 | buff = kcalloc(SG_ENTRIES_IN_CMD, size: sizeof(char *), GFP_KERNEL); |
6517 | if (!buff) { |
6518 | status = -ENOMEM; |
6519 | goto cleanup1; |
6520 | } |
6521 | buff_size = kmalloc_array(SG_ENTRIES_IN_CMD, size: sizeof(int), GFP_KERNEL); |
6522 | if (!buff_size) { |
6523 | status = -ENOMEM; |
6524 | goto cleanup1; |
6525 | } |
6526 | left = ioc->buf_size; |
6527 | data_ptr = ioc->buf; |
6528 | while (left) { |
6529 | sz = (left > ioc->malloc_size) ? ioc->malloc_size : left; |
6530 | buff_size[sg_used] = sz; |
6531 | buff[sg_used] = kmalloc(size: sz, GFP_KERNEL); |
6532 | if (buff[sg_used] == NULL) { |
6533 | status = -ENOMEM; |
6534 | goto cleanup1; |
6535 | } |
6536 | if (ioc->Request.Type.Direction & XFER_WRITE) { |
6537 | if (copy_from_user(to: buff[sg_used], from: data_ptr, n: sz)) { |
6538 | status = -EFAULT; |
6539 | goto cleanup1; |
6540 | } |
6541 | } else |
6542 | memset(buff[sg_used], 0, sz); |
6543 | left -= sz; |
6544 | data_ptr += sz; |
6545 | sg_used++; |
6546 | } |
6547 | c = cmd_alloc(h); |
6548 | |
6549 | c->cmd_type = CMD_IOCTL_PEND; |
6550 | c->scsi_cmd = SCSI_CMD_BUSY; |
6551 | c->Header.ReplyQueue = 0; |
6552 | c->Header.SGList = (u8) sg_used; |
6553 | c->Header.SGTotal = cpu_to_le16(sg_used); |
6554 | memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); |
6555 | memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); |
6556 | if (ioc->buf_size > 0) { |
6557 | int i; |
6558 | for (i = 0; i < sg_used; i++) { |
6559 | temp64 = dma_map_single(&h->pdev->dev, buff[i], |
6560 | buff_size[i], DMA_BIDIRECTIONAL); |
6561 | if (dma_mapping_error(dev: &h->pdev->dev, |
6562 | dma_addr: (dma_addr_t) temp64)) { |
6563 | c->SG[i].Addr = cpu_to_le64(0); |
6564 | c->SG[i].Len = cpu_to_le32(0); |
6565 | hpsa_pci_unmap(pdev: h->pdev, c, sg_used: i, |
6566 | data_direction: DMA_BIDIRECTIONAL); |
6567 | status = -ENOMEM; |
6568 | goto cleanup0; |
6569 | } |
6570 | c->SG[i].Addr = cpu_to_le64(temp64); |
6571 | c->SG[i].Len = cpu_to_le32(buff_size[i]); |
6572 | c->SG[i].Ext = cpu_to_le32(0); |
6573 | } |
6574 | c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST); |
6575 | } |
6576 | status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, |
6577 | NO_TIMEOUT); |
6578 | if (sg_used) |
6579 | hpsa_pci_unmap(pdev: h->pdev, c, sg_used, data_direction: DMA_BIDIRECTIONAL); |
6580 | check_ioctl_unit_attention(h, c); |
6581 | if (status) { |
6582 | status = -EIO; |
6583 | goto cleanup0; |
6584 | } |
6585 | |
6586 | /* Copy the error information out */ |
6587 | memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); |
6588 | if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) { |
6589 | int i; |
6590 | |
6591 | /* Copy the data out of the buffer we created */ |
6592 | BYTE __user *ptr = ioc->buf; |
6593 | for (i = 0; i < sg_used; i++) { |
6594 | if (copy_to_user(to: ptr, from: buff[i], n: buff_size[i])) { |
6595 | status = -EFAULT; |
6596 | goto cleanup0; |
6597 | } |
6598 | ptr += buff_size[i]; |
6599 | } |
6600 | } |
6601 | status = 0; |
6602 | cleanup0: |
6603 | cmd_free(h, c); |
6604 | cleanup1: |
6605 | if (buff) { |
6606 | int i; |
6607 | |
6608 | for (i = 0; i < sg_used; i++) |
6609 | kfree(objp: buff[i]); |
6610 | kfree(objp: buff); |
6611 | } |
6612 | kfree(objp: buff_size); |
6613 | return status; |
6614 | } |
6615 | |
6616 | static void check_ioctl_unit_attention(struct ctlr_info *h, |
6617 | struct CommandList *c) |
6618 | { |
6619 | if (c->err_info->CommandStatus == CMD_TARGET_STATUS && |
6620 | c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) |
6621 | (void) check_for_unit_attention(h, c); |
6622 | } |
6623 | |
6624 | /* |
6625 | * ioctl |
6626 | */ |
6627 | static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd, |
6628 | void __user *argp) |
6629 | { |
6630 | struct ctlr_info *h = sdev_to_hba(sdev: dev); |
6631 | int rc; |
6632 | |
6633 | switch (cmd) { |
6634 | case CCISS_DEREGDISK: |
6635 | case CCISS_REGNEWDISK: |
6636 | case CCISS_REGNEWD: |
6637 | hpsa_scan_start(sh: h->scsi_host); |
6638 | return 0; |
6639 | case CCISS_GETPCIINFO: |
6640 | return hpsa_getpciinfo_ioctl(h, argp); |
6641 | case CCISS_GETDRIVVER: |
6642 | return hpsa_getdrivver_ioctl(h, argp); |
6643 | case CCISS_PASSTHRU: { |
6644 | IOCTL_Command_struct iocommand; |
6645 | |
6646 | if (!argp) |
6647 | return -EINVAL; |
6648 | if (copy_from_user(to: &iocommand, from: argp, n: sizeof(iocommand))) |
6649 | return -EFAULT; |
6650 | if (atomic_dec_if_positive(v: &h->passthru_cmds_avail) < 0) |
6651 | return -EAGAIN; |
6652 | rc = hpsa_passthru_ioctl(h, iocommand: &iocommand); |
6653 | atomic_inc(v: &h->passthru_cmds_avail); |
6654 | if (!rc && copy_to_user(to: argp, from: &iocommand, n: sizeof(iocommand))) |
6655 | rc = -EFAULT; |
6656 | return rc; |
6657 | } |
6658 | case CCISS_BIG_PASSTHRU: { |
6659 | BIG_IOCTL_Command_struct ioc; |
6660 | if (!argp) |
6661 | return -EINVAL; |
6662 | if (copy_from_user(to: &ioc, from: argp, n: sizeof(ioc))) |
6663 | return -EFAULT; |
6664 | if (atomic_dec_if_positive(v: &h->passthru_cmds_avail) < 0) |
6665 | return -EAGAIN; |
6666 | rc = hpsa_big_passthru_ioctl(h, ioc: &ioc); |
6667 | atomic_inc(v: &h->passthru_cmds_avail); |
6668 | if (!rc && copy_to_user(to: argp, from: &ioc, n: sizeof(ioc))) |
6669 | rc = -EFAULT; |
6670 | return rc; |
6671 | } |
6672 | default: |
6673 | return -ENOTTY; |
6674 | } |
6675 | } |
6676 | |
6677 | static void hpsa_send_host_reset(struct ctlr_info *h, u8 reset_type) |
6678 | { |
6679 | struct CommandList *c; |
6680 | |
6681 | c = cmd_alloc(h); |
6682 | |
6683 | /* fill_cmd can't fail here, no data buffer to map */ |
6684 | (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, size: 0, page_code: 0, |
6685 | RAID_CTLR_LUNID, TYPE_MSG); |
6686 | c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */ |
6687 | c->waiting = NULL; |
6688 | enqueue_cmd_and_start_io(h, c); |
6689 | /* Don't wait for completion, the reset won't complete. Don't free |
6690 | * the command either. This is the last command we will send before |
6691 | * re-initializing everything, so it doesn't matter and won't leak. |
6692 | */ |
6693 | return; |
6694 | } |
6695 | |
6696 | static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, |
6697 | void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, |
6698 | int cmd_type) |
6699 | { |
6700 | enum dma_data_direction dir = DMA_NONE; |
6701 | |
6702 | c->cmd_type = CMD_IOCTL_PEND; |
6703 | c->scsi_cmd = SCSI_CMD_BUSY; |
6704 | c->Header.ReplyQueue = 0; |
6705 | if (buff != NULL && size > 0) { |
6706 | c->Header.SGList = 1; |
6707 | c->Header.SGTotal = cpu_to_le16(1); |
6708 | } else { |
6709 | c->Header.SGList = 0; |
6710 | c->Header.SGTotal = cpu_to_le16(0); |
6711 | } |
6712 | memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); |
6713 | |
6714 | if (cmd_type == TYPE_CMD) { |
6715 | switch (cmd) { |
6716 | case HPSA_INQUIRY: |
6717 | /* are we trying to read a vital product page */ |
6718 | if (page_code & VPD_PAGE) { |
6719 | c->Request.CDB[1] = 0x01; |
6720 | c->Request.CDB[2] = (page_code & 0xff); |
6721 | } |
6722 | c->Request.CDBLen = 6; |
6723 | c->Request.type_attr_dir = |
6724 | TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); |
6725 | c->Request.Timeout = 0; |
6726 | c->Request.CDB[0] = HPSA_INQUIRY; |
6727 | c->Request.CDB[4] = size & 0xFF; |
6728 | break; |
6729 | case RECEIVE_DIAGNOSTIC: |
6730 | c->Request.CDBLen = 6; |
6731 | c->Request.type_attr_dir = |
6732 | TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); |
6733 | c->Request.Timeout = 0; |
6734 | c->Request.CDB[0] = cmd; |
6735 | c->Request.CDB[1] = 1; |
6736 | c->Request.CDB[2] = 1; |
6737 | c->Request.CDB[3] = (size >> 8) & 0xFF; |
6738 | c->Request.CDB[4] = size & 0xFF; |
6739 | break; |
6740 | case HPSA_REPORT_LOG: |
6741 | case HPSA_REPORT_PHYS: |
6742 | /* Talking to controller so It's a physical command |
6743 | mode = 00 target = 0. Nothing to write. |
6744 | */ |
6745 | c->Request.CDBLen = 12; |
6746 | c->Request.type_attr_dir = |
6747 | TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); |
6748 | c->Request.Timeout = 0; |
6749 | c->Request.CDB[0] = cmd; |
6750 | c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ |
6751 | c->Request.CDB[7] = (size >> 16) & 0xFF; |
6752 | c->Request.CDB[8] = (size >> 8) & 0xFF; |
6753 | c->Request.CDB[9] = size & 0xFF; |
6754 | break; |
6755 | case BMIC_SENSE_DIAG_OPTIONS: |
6756 | c->Request.CDBLen = 16; |
6757 | c->Request.type_attr_dir = |
6758 | TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); |
6759 | c->Request.Timeout = 0; |
6760 | /* Spec says this should be BMIC_WRITE */ |
6761 | c->Request.CDB[0] = BMIC_READ; |
6762 | c->Request.CDB[6] = BMIC_SENSE_DIAG_OPTIONS; |
6763 | break; |
6764 | case BMIC_SET_DIAG_OPTIONS: |
6765 | c->Request.CDBLen = 16; |
6766 | c->Request.type_attr_dir = |
6767 | TYPE_ATTR_DIR(cmd_type, |
6768 | ATTR_SIMPLE, XFER_WRITE); |
6769 | c->Request.Timeout = 0; |
6770 | c->Request.CDB[0] = BMIC_WRITE; |
6771 | c->Request.CDB[6] = BMIC_SET_DIAG_OPTIONS; |
6772 | break; |
6773 | case HPSA_CACHE_FLUSH: |
6774 | c->Request.CDBLen = 12; |
6775 | c->Request.type_attr_dir = |
6776 | TYPE_ATTR_DIR(cmd_type, |
6777 | ATTR_SIMPLE, XFER_WRITE); |
6778 | c->Request.Timeout = 0; |
6779 | c->Request.CDB[0] = BMIC_WRITE; |
6780 | c->Request.CDB[6] = BMIC_CACHE_FLUSH; |
6781 | c->Request.CDB[7] = (size >> 8) & 0xFF; |
6782 | c->Request.CDB[8] = size & 0xFF; |
6783 | break; |
6784 | case TEST_UNIT_READY: |
6785 | c->Request.CDBLen = 6; |
6786 | c->Request.type_attr_dir = |
6787 | TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE); |
6788 | c->Request.Timeout = 0; |
6789 | break; |
6790 | case HPSA_GET_RAID_MAP: |
6791 | c->Request.CDBLen = 12; |
6792 | c->Request.type_attr_dir = |
6793 | TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); |
6794 | c->Request.Timeout = 0; |
6795 | c->Request.CDB[0] = HPSA_CISS_READ; |
6796 | c->Request.CDB[1] = cmd; |
6797 | c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ |
6798 | c->Request.CDB[7] = (size >> 16) & 0xFF; |
6799 | c->Request.CDB[8] = (size >> 8) & 0xFF; |
6800 | c->Request.CDB[9] = size & 0xFF; |
6801 | break; |
6802 | case BMIC_SENSE_CONTROLLER_PARAMETERS: |
6803 | c->Request.CDBLen = 10; |
6804 | c->Request.type_attr_dir = |
6805 | TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); |
6806 | c->Request.Timeout = 0; |
6807 | c->Request.CDB[0] = BMIC_READ; |
6808 | c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS; |
6809 | c->Request.CDB[7] = (size >> 16) & 0xFF; |
6810 | c->Request.CDB[8] = (size >> 8) & 0xFF; |
6811 | break; |
6812 | case BMIC_IDENTIFY_PHYSICAL_DEVICE: |
6813 | c->Request.CDBLen = 10; |
6814 | c->Request.type_attr_dir = |
6815 | TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); |
6816 | c->Request.Timeout = 0; |
6817 | c->Request.CDB[0] = BMIC_READ; |
6818 | c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE; |
6819 | c->Request.CDB[7] = (size >> 16) & 0xFF; |
6820 | c->Request.CDB[8] = (size >> 8) & 0XFF; |
6821 | break; |
6822 | case BMIC_SENSE_SUBSYSTEM_INFORMATION: |
6823 | c->Request.CDBLen = 10; |
6824 | c->Request.type_attr_dir = |
6825 | TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); |
6826 | c->Request.Timeout = 0; |
6827 | c->Request.CDB[0] = BMIC_READ; |
6828 | c->Request.CDB[6] = BMIC_SENSE_SUBSYSTEM_INFORMATION; |
6829 | c->Request.CDB[7] = (size >> 16) & 0xFF; |
6830 | c->Request.CDB[8] = (size >> 8) & 0XFF; |
6831 | break; |
6832 | case BMIC_SENSE_STORAGE_BOX_PARAMS: |
6833 | c->Request.CDBLen = 10; |
6834 | c->Request.type_attr_dir = |
6835 | TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); |
6836 | c->Request.Timeout = 0; |
6837 | c->Request.CDB[0] = BMIC_READ; |
6838 | c->Request.CDB[6] = BMIC_SENSE_STORAGE_BOX_PARAMS; |
6839 | c->Request.CDB[7] = (size >> 16) & 0xFF; |
6840 | c->Request.CDB[8] = (size >> 8) & 0XFF; |
6841 | break; |
6842 | case BMIC_IDENTIFY_CONTROLLER: |
6843 | c->Request.CDBLen = 10; |
6844 | c->Request.type_attr_dir = |
6845 | TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); |
6846 | c->Request.Timeout = 0; |
6847 | c->Request.CDB[0] = BMIC_READ; |
6848 | c->Request.CDB[1] = 0; |
6849 | c->Request.CDB[2] = 0; |
6850 | c->Request.CDB[3] = 0; |
6851 | c->Request.CDB[4] = 0; |
6852 | c->Request.CDB[5] = 0; |
6853 | c->Request.CDB[6] = BMIC_IDENTIFY_CONTROLLER; |
6854 | c->Request.CDB[7] = (size >> 16) & 0xFF; |
6855 | c->Request.CDB[8] = (size >> 8) & 0XFF; |
6856 | c->Request.CDB[9] = 0; |
6857 | break; |
6858 | default: |
6859 | dev_warn(&h->pdev->dev, "unknown command 0x%c\n" , cmd); |
6860 | BUG(); |
6861 | } |
6862 | } else if (cmd_type == TYPE_MSG) { |
6863 | switch (cmd) { |
6864 | |
6865 | case HPSA_PHYS_TARGET_RESET: |
6866 | c->Request.CDBLen = 16; |
6867 | c->Request.type_attr_dir = |
6868 | TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE); |
6869 | c->Request.Timeout = 0; /* Don't time out */ |
6870 | memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); |
6871 | c->Request.CDB[0] = HPSA_RESET; |
6872 | c->Request.CDB[1] = HPSA_TARGET_RESET_TYPE; |
6873 | /* Physical target reset needs no control bytes 4-7*/ |
6874 | c->Request.CDB[4] = 0x00; |
6875 | c->Request.CDB[5] = 0x00; |
6876 | c->Request.CDB[6] = 0x00; |
6877 | c->Request.CDB[7] = 0x00; |
6878 | break; |
6879 | case HPSA_DEVICE_RESET_MSG: |
6880 | c->Request.CDBLen = 16; |
6881 | c->Request.type_attr_dir = |
6882 | TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE); |
6883 | c->Request.Timeout = 0; /* Don't time out */ |
6884 | memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); |
6885 | c->Request.CDB[0] = cmd; |
6886 | c->Request.CDB[1] = HPSA_RESET_TYPE_LUN; |
6887 | /* If bytes 4-7 are zero, it means reset the */ |
6888 | /* LunID device */ |
6889 | c->Request.CDB[4] = 0x00; |
6890 | c->Request.CDB[5] = 0x00; |
6891 | c->Request.CDB[6] = 0x00; |
6892 | c->Request.CDB[7] = 0x00; |
6893 | break; |
6894 | default: |
6895 | dev_warn(&h->pdev->dev, "unknown message type %d\n" , |
6896 | cmd); |
6897 | BUG(); |
6898 | } |
6899 | } else { |
6900 | dev_warn(&h->pdev->dev, "unknown command type %d\n" , cmd_type); |
6901 | BUG(); |
6902 | } |
6903 | |
6904 | switch (GET_DIR(c->Request.type_attr_dir)) { |
6905 | case XFER_READ: |
6906 | dir = DMA_FROM_DEVICE; |
6907 | break; |
6908 | case XFER_WRITE: |
6909 | dir = DMA_TO_DEVICE; |
6910 | break; |
6911 | case XFER_NONE: |
6912 | dir = DMA_NONE; |
6913 | break; |
6914 | default: |
6915 | dir = DMA_BIDIRECTIONAL; |
6916 | } |
6917 | if (hpsa_map_one(pdev: h->pdev, cp: c, buf: buff, buflen: size, data_direction: dir)) |
6918 | return -1; |
6919 | return 0; |
6920 | } |
6921 | |
6922 | /* |
6923 | * Map (physical) PCI mem into (virtual) kernel space |
6924 | */ |
6925 | static void __iomem *remap_pci_mem(ulong base, ulong size) |
6926 | { |
6927 | ulong page_base = ((ulong) base) & PAGE_MASK; |
6928 | ulong page_offs = ((ulong) base) - page_base; |
6929 | void __iomem *page_remapped = ioremap(offset: page_base, |
6930 | size: page_offs + size); |
6931 | |
6932 | return page_remapped ? (page_remapped + page_offs) : NULL; |
6933 | } |
6934 | |
6935 | static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q) |
6936 | { |
6937 | return h->access.command_completed(h, q); |
6938 | } |
6939 | |
6940 | static inline bool interrupt_pending(struct ctlr_info *h) |
6941 | { |
6942 | return h->access.intr_pending(h); |
6943 | } |
6944 | |
6945 | static inline long interrupt_not_for_us(struct ctlr_info *h) |
6946 | { |
6947 | return (h->access.intr_pending(h) == 0) || |
6948 | (h->interrupts_enabled == 0); |
6949 | } |
6950 | |
6951 | static inline int bad_tag(struct ctlr_info *h, u32 tag_index, |
6952 | u32 raw_tag) |
6953 | { |
6954 | if (unlikely(tag_index >= h->nr_cmds)) { |
6955 | dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n" , raw_tag); |
6956 | return 1; |
6957 | } |
6958 | return 0; |
6959 | } |
6960 | |
6961 | static inline void finish_cmd(struct CommandList *c) |
6962 | { |
6963 | dial_up_lockup_detection_on_fw_flash_complete(h: c->h, c); |
6964 | if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI |
6965 | || c->cmd_type == CMD_IOACCEL2)) |
6966 | complete_scsi_command(cp: c); |
6967 | else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF) |
6968 | complete(c->waiting); |
6969 | } |
6970 | |
6971 | /* process completion of an indexed ("direct lookup") command */ |
6972 | static inline void process_indexed_cmd(struct ctlr_info *h, |
6973 | u32 raw_tag) |
6974 | { |
6975 | u32 tag_index; |
6976 | struct CommandList *c; |
6977 | |
6978 | tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT; |
6979 | if (!bad_tag(h, tag_index, raw_tag)) { |
6980 | c = h->cmd_pool + tag_index; |
6981 | finish_cmd(c); |
6982 | } |
6983 | } |
6984 | |
6985 | /* Some controllers, like p400, will give us one interrupt |
6986 | * after a soft reset, even if we turned interrupts off. |
6987 | * Only need to check for this in the hpsa_xxx_discard_completions |
6988 | * functions. |
6989 | */ |
6990 | static int ignore_bogus_interrupt(struct ctlr_info *h) |
6991 | { |
6992 | if (likely(!reset_devices)) |
6993 | return 0; |
6994 | |
6995 | if (likely(h->interrupts_enabled)) |
6996 | return 0; |
6997 | |
6998 | dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled " |
6999 | "(known firmware bug.) Ignoring.\n" ); |
7000 | |
7001 | return 1; |
7002 | } |
7003 | |
7004 | /* |
7005 | * Convert &h->q[x] (passed to interrupt handlers) back to h. |
7006 | * Relies on (h-q[x] == x) being true for x such that |
7007 | * 0 <= x < MAX_REPLY_QUEUES. |
7008 | */ |
7009 | static struct ctlr_info *queue_to_hba(u8 *queue) |
7010 | { |
7011 | return container_of((queue - *queue), struct ctlr_info, q[0]); |
7012 | } |
7013 | |
7014 | static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue) |
7015 | { |
7016 | struct ctlr_info *h = queue_to_hba(queue); |
7017 | u8 q = *(u8 *) queue; |
7018 | u32 raw_tag; |
7019 | |
7020 | if (ignore_bogus_interrupt(h)) |
7021 | return IRQ_NONE; |
7022 | |
7023 | if (interrupt_not_for_us(h)) |
7024 | return IRQ_NONE; |
7025 | h->last_intr_timestamp = get_jiffies_64(); |
7026 | while (interrupt_pending(h)) { |
7027 | raw_tag = get_next_completion(h, q); |
7028 | while (raw_tag != FIFO_EMPTY) |
7029 | raw_tag = next_command(h, q); |
7030 | } |
7031 | return IRQ_HANDLED; |
7032 | } |
7033 | |
7034 | static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue) |
7035 | { |
7036 | struct ctlr_info *h = queue_to_hba(queue); |
7037 | u32 raw_tag; |
7038 | u8 q = *(u8 *) queue; |
7039 | |
7040 | if (ignore_bogus_interrupt(h)) |
7041 | return IRQ_NONE; |
7042 | |
7043 | h->last_intr_timestamp = get_jiffies_64(); |
7044 | raw_tag = get_next_completion(h, q); |
7045 | while (raw_tag != FIFO_EMPTY) |
7046 | raw_tag = next_command(h, q); |
7047 | return IRQ_HANDLED; |
7048 | } |
7049 | |
7050 | static irqreturn_t do_hpsa_intr_intx(int irq, void *queue) |
7051 | { |
7052 | struct ctlr_info *h = queue_to_hba(queue: (u8 *) queue); |
7053 | u32 raw_tag; |
7054 | u8 q = *(u8 *) queue; |
7055 | |
7056 | if (interrupt_not_for_us(h)) |
7057 | return IRQ_NONE; |
7058 | h->last_intr_timestamp = get_jiffies_64(); |
7059 | while (interrupt_pending(h)) { |
7060 | raw_tag = get_next_completion(h, q); |
7061 | while (raw_tag != FIFO_EMPTY) { |
7062 | process_indexed_cmd(h, raw_tag); |
7063 | raw_tag = next_command(h, q); |
7064 | } |
7065 | } |
7066 | return IRQ_HANDLED; |
7067 | } |
7068 | |
7069 | static irqreturn_t do_hpsa_intr_msi(int irq, void *queue) |
7070 | { |
7071 | struct ctlr_info *h = queue_to_hba(queue); |
7072 | u32 raw_tag; |
7073 | u8 q = *(u8 *) queue; |
7074 | |
7075 | h->last_intr_timestamp = get_jiffies_64(); |
7076 | raw_tag = get_next_completion(h, q); |
7077 | while (raw_tag != FIFO_EMPTY) { |
7078 | process_indexed_cmd(h, raw_tag); |
7079 | raw_tag = next_command(h, q); |
7080 | } |
7081 | return IRQ_HANDLED; |
7082 | } |
7083 | |
7084 | /* Send a message CDB to the firmware. Careful, this only works |
7085 | * in simple mode, not performant mode due to the tag lookup. |
7086 | * We only ever use this immediately after a controller reset. |
7087 | */ |
7088 | static int hpsa_message(struct pci_dev *pdev, unsigned char opcode, |
7089 | unsigned char type) |
7090 | { |
7091 | struct Command { |
7092 | struct CommandListHeader CommandHeader; |
7093 | struct RequestBlock Request; |
7094 | struct ErrDescriptor ErrorDescriptor; |
7095 | }; |
7096 | struct Command *cmd; |
7097 | static const size_t cmd_sz = sizeof(*cmd) + |
7098 | sizeof(cmd->ErrorDescriptor); |
7099 | dma_addr_t paddr64; |
7100 | __le32 paddr32; |
7101 | u32 tag; |
7102 | void __iomem *vaddr; |
7103 | int i, err; |
7104 | |
7105 | vaddr = pci_ioremap_bar(pdev, bar: 0); |
7106 | if (vaddr == NULL) |
7107 | return -ENOMEM; |
7108 | |
7109 | /* The Inbound Post Queue only accepts 32-bit physical addresses for the |
7110 | * CCISS commands, so they must be allocated from the lower 4GiB of |
7111 | * memory. |
7112 | */ |
7113 | err = dma_set_coherent_mask(dev: &pdev->dev, DMA_BIT_MASK(32)); |
7114 | if (err) { |
7115 | iounmap(addr: vaddr); |
7116 | return err; |
7117 | } |
7118 | |
7119 | cmd = dma_alloc_coherent(dev: &pdev->dev, size: cmd_sz, dma_handle: &paddr64, GFP_KERNEL); |
7120 | if (cmd == NULL) { |
7121 | iounmap(addr: vaddr); |
7122 | return -ENOMEM; |
7123 | } |
7124 | |
7125 | /* This must fit, because of the 32-bit consistent DMA mask. Also, |
7126 | * although there's no guarantee, we assume that the address is at |
7127 | * least 4-byte aligned (most likely, it's page-aligned). |
7128 | */ |
7129 | paddr32 = cpu_to_le32(paddr64); |
7130 | |
7131 | cmd->CommandHeader.ReplyQueue = 0; |
7132 | cmd->CommandHeader.SGList = 0; |
7133 | cmd->CommandHeader.SGTotal = cpu_to_le16(0); |
7134 | cmd->CommandHeader.tag = cpu_to_le64(paddr64); |
7135 | memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); |
7136 | |
7137 | cmd->Request.CDBLen = 16; |
7138 | cmd->Request.type_attr_dir = |
7139 | TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE); |
7140 | cmd->Request.Timeout = 0; /* Don't time out */ |
7141 | cmd->Request.CDB[0] = opcode; |
7142 | cmd->Request.CDB[1] = type; |
7143 | memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */ |
7144 | cmd->ErrorDescriptor.Addr = |
7145 | cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd))); |
7146 | cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo)); |
7147 | |
7148 | writel(le32_to_cpu(paddr32), addr: vaddr + SA5_REQUEST_PORT_OFFSET); |
7149 | |
7150 | for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { |
7151 | tag = readl(addr: vaddr + SA5_REPLY_PORT_OFFSET); |
7152 | if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64) |
7153 | break; |
7154 | msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); |
7155 | } |
7156 | |
7157 | iounmap(addr: vaddr); |
7158 | |
7159 | /* we leak the DMA buffer here ... no choice since the controller could |
7160 | * still complete the command. |
7161 | */ |
7162 | if (i == HPSA_MSG_SEND_RETRY_LIMIT) { |
7163 | dev_err(&pdev->dev, "controller message %02x:%02x timed out\n" , |
7164 | opcode, type); |
7165 | return -ETIMEDOUT; |
7166 | } |
7167 | |
7168 | dma_free_coherent(dev: &pdev->dev, size: cmd_sz, cpu_addr: cmd, dma_handle: paddr64); |
7169 | |
7170 | if (tag & HPSA_ERROR_BIT) { |
7171 | dev_err(&pdev->dev, "controller message %02x:%02x failed\n" , |
7172 | opcode, type); |
7173 | return -EIO; |
7174 | } |
7175 | |
7176 | dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n" , |
7177 | opcode, type); |
7178 | return 0; |
7179 | } |
7180 | |
7181 | #define hpsa_noop(p) hpsa_message(p, 3, 0) |
7182 | |
7183 | static int hpsa_controller_hard_reset(struct pci_dev *pdev, |
7184 | void __iomem *vaddr, u32 use_doorbell) |
7185 | { |
7186 | |
7187 | if (use_doorbell) { |
7188 | /* For everything after the P600, the PCI power state method |
7189 | * of resetting the controller doesn't work, so we have this |
7190 | * other way using the doorbell register. |
7191 | */ |
7192 | dev_info(&pdev->dev, "using doorbell to reset controller\n" ); |
7193 | writel(val: use_doorbell, addr: vaddr + SA5_DOORBELL); |
7194 | |
7195 | /* PMC hardware guys tell us we need a 10 second delay after |
7196 | * doorbell reset and before any attempt to talk to the board |
7197 | * at all to ensure that this actually works and doesn't fall |
7198 | * over in some weird corner cases. |
7199 | */ |
7200 | msleep(msecs: 10000); |
7201 | } else { /* Try to do it the PCI power state way */ |
7202 | |
7203 | /* Quoting from the Open CISS Specification: "The Power |
7204 | * Management Control/Status Register (CSR) controls the power |
7205 | * state of the device. The normal operating state is D0, |
7206 | * CSR=00h. The software off state is D3, CSR=03h. To reset |
7207 | * the controller, place the interface device in D3 then to D0, |
7208 | * this causes a secondary PCI reset which will reset the |
7209 | * controller." */ |
7210 | |
7211 | int rc = 0; |
7212 | |
7213 | dev_info(&pdev->dev, "using PCI PM to reset controller\n" ); |
7214 | |
7215 | /* enter the D3hot power management state */ |
7216 | rc = pci_set_power_state(dev: pdev, PCI_D3hot); |
7217 | if (rc) |
7218 | return rc; |
7219 | |
7220 | msleep(msecs: 500); |
7221 | |
7222 | /* enter the D0 power management state */ |
7223 | rc = pci_set_power_state(dev: pdev, PCI_D0); |
7224 | if (rc) |
7225 | return rc; |
7226 | |
7227 | /* |
7228 | * The P600 requires a small delay when changing states. |
7229 | * Otherwise we may think the board did not reset and we bail. |
7230 | * This for kdump only and is particular to the P600. |
7231 | */ |
7232 | msleep(msecs: 500); |
7233 | } |
7234 | return 0; |
7235 | } |
7236 | |
7237 | static void init_driver_version(char *driver_version, int len) |
7238 | { |
7239 | memset(driver_version, 0, len); |
7240 | strncpy(p: driver_version, HPSA " " HPSA_DRIVER_VERSION, size: len - 1); |
7241 | } |
7242 | |
7243 | static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable) |
7244 | { |
7245 | char *driver_version; |
7246 | int i, size = sizeof(cfgtable->driver_version); |
7247 | |
7248 | driver_version = kmalloc(size, GFP_KERNEL); |
7249 | if (!driver_version) |
7250 | return -ENOMEM; |
7251 | |
7252 | init_driver_version(driver_version, len: size); |
7253 | for (i = 0; i < size; i++) |
7254 | writeb(val: driver_version[i], addr: &cfgtable->driver_version[i]); |
7255 | kfree(objp: driver_version); |
7256 | return 0; |
7257 | } |
7258 | |
7259 | static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable, |
7260 | unsigned char *driver_ver) |
7261 | { |
7262 | int i; |
7263 | |
7264 | for (i = 0; i < sizeof(cfgtable->driver_version); i++) |
7265 | driver_ver[i] = readb(addr: &cfgtable->driver_version[i]); |
7266 | } |
7267 | |
7268 | static int controller_reset_failed(struct CfgTable __iomem *cfgtable) |
7269 | { |
7270 | |
7271 | char *driver_ver, *old_driver_ver; |
7272 | int rc, size = sizeof(cfgtable->driver_version); |
7273 | |
7274 | old_driver_ver = kmalloc_array(n: 2, size, GFP_KERNEL); |
7275 | if (!old_driver_ver) |
7276 | return -ENOMEM; |
7277 | driver_ver = old_driver_ver + size; |
7278 | |
7279 | /* After a reset, the 32 bytes of "driver version" in the cfgtable |
7280 | * should have been changed, otherwise we know the reset failed. |
7281 | */ |
7282 | init_driver_version(driver_version: old_driver_ver, len: size); |
7283 | read_driver_ver_from_cfgtable(cfgtable, driver_ver); |
7284 | rc = !memcmp(p: driver_ver, q: old_driver_ver, size); |
7285 | kfree(objp: old_driver_ver); |
7286 | return rc; |
7287 | } |
7288 | /* This does a hard reset of the controller using PCI power management |
7289 | * states or the using the doorbell register. |
7290 | */ |
7291 | static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id) |
7292 | { |
7293 | u64 cfg_offset; |
7294 | u32 cfg_base_addr; |
7295 | u64 cfg_base_addr_index; |
7296 | void __iomem *vaddr; |
7297 | unsigned long paddr; |
7298 | u32 misc_fw_support; |
7299 | int rc; |
7300 | struct CfgTable __iomem *cfgtable; |
7301 | u32 use_doorbell; |
7302 | u16 command_register; |
7303 | |
7304 | /* For controllers as old as the P600, this is very nearly |
7305 | * the same thing as |
7306 | * |
7307 | * pci_save_state(pci_dev); |
7308 | * pci_set_power_state(pci_dev, PCI_D3hot); |
7309 | * pci_set_power_state(pci_dev, PCI_D0); |
7310 | * pci_restore_state(pci_dev); |
7311 | * |
7312 | * For controllers newer than the P600, the pci power state |
7313 | * method of resetting doesn't work so we have another way |
7314 | * using the doorbell register. |
7315 | */ |
7316 | |
7317 | if (!ctlr_is_resettable(board_id)) { |
7318 | dev_warn(&pdev->dev, "Controller not resettable\n" ); |
7319 | return -ENODEV; |
7320 | } |
7321 | |
7322 | /* if controller is soft- but not hard resettable... */ |
7323 | if (!ctlr_is_hard_resettable(board_id)) |
7324 | return -ENOTSUPP; /* try soft reset later. */ |
7325 | |
7326 | /* Save the PCI command register */ |
7327 | pci_read_config_word(dev: pdev, where: 4, val: &command_register); |
7328 | pci_save_state(dev: pdev); |
7329 | |
7330 | /* find the first memory BAR, so we can find the cfg table */ |
7331 | rc = hpsa_pci_find_memory_BAR(pdev, memory_bar: &paddr); |
7332 | if (rc) |
7333 | return rc; |
7334 | vaddr = remap_pci_mem(base: paddr, size: 0x250); |
7335 | if (!vaddr) |
7336 | return -ENOMEM; |
7337 | |
7338 | /* find cfgtable in order to check if reset via doorbell is supported */ |
7339 | rc = hpsa_find_cfg_addrs(pdev, vaddr, cfg_base_addr: &cfg_base_addr, |
7340 | cfg_base_addr_index: &cfg_base_addr_index, cfg_offset: &cfg_offset); |
7341 | if (rc) |
7342 | goto unmap_vaddr; |
7343 | cfgtable = remap_pci_mem(pci_resource_start(pdev, |
7344 | cfg_base_addr_index) + cfg_offset, size: sizeof(*cfgtable)); |
7345 | if (!cfgtable) { |
7346 | rc = -ENOMEM; |
7347 | goto unmap_vaddr; |
7348 | } |
7349 | rc = write_driver_ver_to_cfgtable(cfgtable); |
7350 | if (rc) |
7351 | goto unmap_cfgtable; |
7352 | |
7353 | /* If reset via doorbell register is supported, use that. |
7354 | * There are two such methods. Favor the newest method. |
7355 | */ |
7356 | misc_fw_support = readl(addr: &cfgtable->misc_fw_support); |
7357 | use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2; |
7358 | if (use_doorbell) { |
7359 | use_doorbell = DOORBELL_CTLR_RESET2; |
7360 | } else { |
7361 | use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; |
7362 | if (use_doorbell) { |
7363 | dev_warn(&pdev->dev, |
7364 | "Soft reset not supported. Firmware update is required.\n" ); |
7365 | rc = -ENOTSUPP; /* try soft reset */ |
7366 | goto unmap_cfgtable; |
7367 | } |
7368 | } |
7369 | |
7370 | rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); |
7371 | if (rc) |
7372 | goto unmap_cfgtable; |
7373 | |
7374 | pci_restore_state(dev: pdev); |
7375 | pci_write_config_word(dev: pdev, where: 4, val: command_register); |
7376 | |
7377 | /* Some devices (notably the HP Smart Array 5i Controller) |
7378 | need a little pause here */ |
7379 | msleep(HPSA_POST_RESET_PAUSE_MSECS); |
7380 | |
7381 | rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY); |
7382 | if (rc) { |
7383 | dev_warn(&pdev->dev, |
7384 | "Failed waiting for board to become ready after hard reset\n" ); |
7385 | goto unmap_cfgtable; |
7386 | } |
7387 | |
7388 | rc = controller_reset_failed(cfgtable: vaddr); |
7389 | if (rc < 0) |
7390 | goto unmap_cfgtable; |
7391 | if (rc) { |
7392 | dev_warn(&pdev->dev, "Unable to successfully reset " |
7393 | "controller. Will try soft reset.\n" ); |
7394 | rc = -ENOTSUPP; |
7395 | } else { |
7396 | dev_info(&pdev->dev, "board ready after hard reset.\n" ); |
7397 | } |
7398 | |
7399 | unmap_cfgtable: |
7400 | iounmap(addr: cfgtable); |
7401 | |
7402 | unmap_vaddr: |
7403 | iounmap(addr: vaddr); |
7404 | return rc; |
7405 | } |
7406 | |
7407 | /* |
7408 | * We cannot read the structure directly, for portability we must use |
7409 | * the io functions. |
7410 | * This is for debug only. |
7411 | */ |
7412 | static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb) |
7413 | { |
7414 | #ifdef HPSA_DEBUG |
7415 | int i; |
7416 | char temp_name[17]; |
7417 | |
7418 | dev_info(dev, "Controller Configuration information\n" ); |
7419 | dev_info(dev, "------------------------------------\n" ); |
7420 | for (i = 0; i < 4; i++) |
7421 | temp_name[i] = readb(&(tb->Signature[i])); |
7422 | temp_name[4] = '\0'; |
7423 | dev_info(dev, " Signature = %s\n" , temp_name); |
7424 | dev_info(dev, " Spec Number = %d\n" , readl(&(tb->SpecValence))); |
7425 | dev_info(dev, " Transport methods supported = 0x%x\n" , |
7426 | readl(&(tb->TransportSupport))); |
7427 | dev_info(dev, " Transport methods active = 0x%x\n" , |
7428 | readl(&(tb->TransportActive))); |
7429 | dev_info(dev, " Requested transport Method = 0x%x\n" , |
7430 | readl(&(tb->HostWrite.TransportRequest))); |
7431 | dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n" , |
7432 | readl(&(tb->HostWrite.CoalIntDelay))); |
7433 | dev_info(dev, " Coalesce Interrupt Count = 0x%x\n" , |
7434 | readl(&(tb->HostWrite.CoalIntCount))); |
7435 | dev_info(dev, " Max outstanding commands = %d\n" , |
7436 | readl(&(tb->CmdsOutMax))); |
7437 | dev_info(dev, " Bus Types = 0x%x\n" , readl(&(tb->BusTypes))); |
7438 | for (i = 0; i < 16; i++) |
7439 | temp_name[i] = readb(&(tb->ServerName[i])); |
7440 | temp_name[16] = '\0'; |
7441 | dev_info(dev, " Server Name = %s\n" , temp_name); |
7442 | dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n" , |
7443 | readl(&(tb->HeartBeat))); |
7444 | #endif /* HPSA_DEBUG */ |
7445 | } |
7446 | |
7447 | static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) |
7448 | { |
7449 | int i, offset, mem_type, bar_type; |
7450 | |
7451 | if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */ |
7452 | return 0; |
7453 | offset = 0; |
7454 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { |
7455 | bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE; |
7456 | if (bar_type == PCI_BASE_ADDRESS_SPACE_IO) |
7457 | offset += 4; |
7458 | else { |
7459 | mem_type = pci_resource_flags(pdev, i) & |
7460 | PCI_BASE_ADDRESS_MEM_TYPE_MASK; |
7461 | switch (mem_type) { |
7462 | case PCI_BASE_ADDRESS_MEM_TYPE_32: |
7463 | case PCI_BASE_ADDRESS_MEM_TYPE_1M: |
7464 | offset += 4; /* 32 bit */ |
7465 | break; |
7466 | case PCI_BASE_ADDRESS_MEM_TYPE_64: |
7467 | offset += 8; |
7468 | break; |
7469 | default: /* reserved in PCI 2.2 */ |
7470 | dev_warn(&pdev->dev, |
7471 | "base address is invalid\n" ); |
7472 | return -1; |
7473 | } |
7474 | } |
7475 | if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0) |
7476 | return i + 1; |
7477 | } |
7478 | return -1; |
7479 | } |
7480 | |
7481 | static void hpsa_disable_interrupt_mode(struct ctlr_info *h) |
7482 | { |
7483 | pci_free_irq_vectors(dev: h->pdev); |
7484 | h->msix_vectors = 0; |
7485 | } |
7486 | |
7487 | static void hpsa_setup_reply_map(struct ctlr_info *h) |
7488 | { |
7489 | const struct cpumask *mask; |
7490 | unsigned int queue, cpu; |
7491 | |
7492 | for (queue = 0; queue < h->msix_vectors; queue++) { |
7493 | mask = pci_irq_get_affinity(pdev: h->pdev, vec: queue); |
7494 | if (!mask) |
7495 | goto fallback; |
7496 | |
7497 | for_each_cpu(cpu, mask) |
7498 | h->reply_map[cpu] = queue; |
7499 | } |
7500 | return; |
7501 | |
7502 | fallback: |
7503 | for_each_possible_cpu(cpu) |
7504 | h->reply_map[cpu] = 0; |
7505 | } |
7506 | |
7507 | /* If MSI/MSI-X is supported by the kernel we will try to enable it on |
7508 | * controllers that are capable. If not, we use legacy INTx mode. |
7509 | */ |
7510 | static int hpsa_interrupt_mode(struct ctlr_info *h) |
7511 | { |
7512 | unsigned int flags = PCI_IRQ_LEGACY; |
7513 | int ret; |
7514 | |
7515 | /* Some boards advertise MSI but don't really support it */ |
7516 | switch (h->board_id) { |
7517 | case 0x40700E11: |
7518 | case 0x40800E11: |
7519 | case 0x40820E11: |
7520 | case 0x40830E11: |
7521 | break; |
7522 | default: |
7523 | ret = pci_alloc_irq_vectors(dev: h->pdev, min_vecs: 1, MAX_REPLY_QUEUES, |
7524 | PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); |
7525 | if (ret > 0) { |
7526 | h->msix_vectors = ret; |
7527 | return 0; |
7528 | } |
7529 | |
7530 | flags |= PCI_IRQ_MSI; |
7531 | break; |
7532 | } |
7533 | |
7534 | ret = pci_alloc_irq_vectors(dev: h->pdev, min_vecs: 1, max_vecs: 1, flags); |
7535 | if (ret < 0) |
7536 | return ret; |
7537 | return 0; |
7538 | } |
7539 | |
7540 | static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id, |
7541 | bool *legacy_board) |
7542 | { |
7543 | int i; |
7544 | u32 subsystem_vendor_id, subsystem_device_id; |
7545 | |
7546 | subsystem_vendor_id = pdev->subsystem_vendor; |
7547 | subsystem_device_id = pdev->subsystem_device; |
7548 | *board_id = ((subsystem_device_id << 16) & 0xffff0000) | |
7549 | subsystem_vendor_id; |
7550 | |
7551 | if (legacy_board) |
7552 | *legacy_board = false; |
7553 | for (i = 0; i < ARRAY_SIZE(products); i++) |
7554 | if (*board_id == products[i].board_id) { |
7555 | if (products[i].access != &SA5A_access && |
7556 | products[i].access != &SA5B_access) |
7557 | return i; |
7558 | dev_warn(&pdev->dev, |
7559 | "legacy board ID: 0x%08x\n" , |
7560 | *board_id); |
7561 | if (legacy_board) |
7562 | *legacy_board = true; |
7563 | return i; |
7564 | } |
7565 | |
7566 | dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x\n" , *board_id); |
7567 | if (legacy_board) |
7568 | *legacy_board = true; |
7569 | return ARRAY_SIZE(products) - 1; /* generic unknown smart array */ |
7570 | } |
7571 | |
7572 | static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev, |
7573 | unsigned long *memory_bar) |
7574 | { |
7575 | int i; |
7576 | |
7577 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) |
7578 | if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { |
7579 | /* addressing mode bits already removed */ |
7580 | *memory_bar = pci_resource_start(pdev, i); |
7581 | dev_dbg(&pdev->dev, "memory BAR = %lx\n" , |
7582 | *memory_bar); |
7583 | return 0; |
7584 | } |
7585 | dev_warn(&pdev->dev, "no memory BAR found\n" ); |
7586 | return -ENODEV; |
7587 | } |
7588 | |
7589 | static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, |
7590 | int wait_for_ready) |
7591 | { |
7592 | int i, iterations; |
7593 | u32 scratchpad; |
7594 | if (wait_for_ready) |
7595 | iterations = HPSA_BOARD_READY_ITERATIONS; |
7596 | else |
7597 | iterations = HPSA_BOARD_NOT_READY_ITERATIONS; |
7598 | |
7599 | for (i = 0; i < iterations; i++) { |
7600 | scratchpad = readl(addr: vaddr + SA5_SCRATCHPAD_OFFSET); |
7601 | if (wait_for_ready) { |
7602 | if (scratchpad == HPSA_FIRMWARE_READY) |
7603 | return 0; |
7604 | } else { |
7605 | if (scratchpad != HPSA_FIRMWARE_READY) |
7606 | return 0; |
7607 | } |
7608 | msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS); |
7609 | } |
7610 | dev_warn(&pdev->dev, "board not ready, timed out.\n" ); |
7611 | return -ENODEV; |
7612 | } |
7613 | |
7614 | static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, |
7615 | u32 *cfg_base_addr, u64 *cfg_base_addr_index, |
7616 | u64 *cfg_offset) |
7617 | { |
7618 | *cfg_base_addr = readl(addr: vaddr + SA5_CTCFG_OFFSET); |
7619 | *cfg_offset = readl(addr: vaddr + SA5_CTMEM_OFFSET); |
7620 | *cfg_base_addr &= (u32) 0x0000ffff; |
7621 | *cfg_base_addr_index = find_PCI_BAR_index(pdev, pci_bar_addr: *cfg_base_addr); |
7622 | if (*cfg_base_addr_index == -1) { |
7623 | dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n" ); |
7624 | return -ENODEV; |
7625 | } |
7626 | return 0; |
7627 | } |
7628 | |
7629 | static void hpsa_free_cfgtables(struct ctlr_info *h) |
7630 | { |
7631 | if (h->transtable) { |
7632 | iounmap(addr: h->transtable); |
7633 | h->transtable = NULL; |
7634 | } |
7635 | if (h->cfgtable) { |
7636 | iounmap(addr: h->cfgtable); |
7637 | h->cfgtable = NULL; |
7638 | } |
7639 | } |
7640 | |
7641 | /* Find and map CISS config table and transfer table |
7642 | + * several items must be unmapped (freed) later |
7643 | + * */ |
7644 | static int hpsa_find_cfgtables(struct ctlr_info *h) |
7645 | { |
7646 | u64 cfg_offset; |
7647 | u32 cfg_base_addr; |
7648 | u64 cfg_base_addr_index; |
7649 | u32 trans_offset; |
7650 | int rc; |
7651 | |
7652 | rc = hpsa_find_cfg_addrs(pdev: h->pdev, vaddr: h->vaddr, cfg_base_addr: &cfg_base_addr, |
7653 | cfg_base_addr_index: &cfg_base_addr_index, cfg_offset: &cfg_offset); |
7654 | if (rc) |
7655 | return rc; |
7656 | h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, |
7657 | cfg_base_addr_index) + cfg_offset, size: sizeof(*h->cfgtable)); |
7658 | if (!h->cfgtable) { |
7659 | dev_err(&h->pdev->dev, "Failed mapping cfgtable\n" ); |
7660 | return -ENOMEM; |
7661 | } |
7662 | rc = write_driver_ver_to_cfgtable(cfgtable: h->cfgtable); |
7663 | if (rc) |
7664 | return rc; |
7665 | /* Find performant mode table. */ |
7666 | trans_offset = readl(addr: &h->cfgtable->TransMethodOffset); |
7667 | h->transtable = remap_pci_mem(pci_resource_start(h->pdev, |
7668 | cfg_base_addr_index)+cfg_offset+trans_offset, |
7669 | size: sizeof(*h->transtable)); |
7670 | if (!h->transtable) { |
7671 | dev_err(&h->pdev->dev, "Failed mapping transfer table\n" ); |
7672 | hpsa_free_cfgtables(h); |
7673 | return -ENOMEM; |
7674 | } |
7675 | return 0; |
7676 | } |
7677 | |
7678 | static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) |
7679 | { |
7680 | #define MIN_MAX_COMMANDS 16 |
7681 | BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS); |
7682 | |
7683 | h->max_commands = readl(addr: &h->cfgtable->MaxPerformantModeCommands); |
7684 | |
7685 | /* Limit commands in memory limited kdump scenario. */ |
7686 | if (reset_devices && h->max_commands > 32) |
7687 | h->max_commands = 32; |
7688 | |
7689 | if (h->max_commands < MIN_MAX_COMMANDS) { |
7690 | dev_warn(&h->pdev->dev, |
7691 | "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n" , |
7692 | h->max_commands, |
7693 | MIN_MAX_COMMANDS); |
7694 | h->max_commands = MIN_MAX_COMMANDS; |
7695 | } |
7696 | } |
7697 | |
7698 | /* If the controller reports that the total max sg entries is greater than 512, |
7699 | * then we know that chained SG blocks work. (Original smart arrays did not |
7700 | * support chained SG blocks and would return zero for max sg entries.) |
7701 | */ |
7702 | static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h) |
7703 | { |
7704 | return h->maxsgentries > 512; |
7705 | } |
7706 | |
7707 | /* Interrogate the hardware for some limits: |
7708 | * max commands, max SG elements without chaining, and with chaining, |
7709 | * SG chain block size, etc. |
7710 | */ |
7711 | static void hpsa_find_board_params(struct ctlr_info *h) |
7712 | { |
7713 | hpsa_get_max_perf_mode_cmds(h); |
7714 | h->nr_cmds = h->max_commands; |
7715 | h->maxsgentries = readl(addr: &(h->cfgtable->MaxScatterGatherElements)); |
7716 | h->fw_support = readl(addr: &(h->cfgtable->misc_fw_support)); |
7717 | if (hpsa_supports_chained_sg_blocks(h)) { |
7718 | /* Limit in-command s/g elements to 32 save dma'able memory. */ |
7719 | h->max_cmd_sg_entries = 32; |
7720 | h->chainsize = h->maxsgentries - h->max_cmd_sg_entries; |
7721 | h->maxsgentries--; /* save one for chain pointer */ |
7722 | } else { |
7723 | /* |
7724 | * Original smart arrays supported at most 31 s/g entries |
7725 | * embedded inline in the command (trying to use more |
7726 | * would lock up the controller) |
7727 | */ |
7728 | h->max_cmd_sg_entries = 31; |
7729 | h->maxsgentries = 31; /* default to traditional values */ |
7730 | h->chainsize = 0; |
7731 | } |
7732 | |
7733 | /* Find out what task management functions are supported and cache */ |
7734 | h->TMFSupportFlags = readl(addr: &(h->cfgtable->TMFSupportFlags)); |
7735 | if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags)) |
7736 | dev_warn(&h->pdev->dev, "Physical aborts not supported\n" ); |
7737 | if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) |
7738 | dev_warn(&h->pdev->dev, "Logical aborts not supported\n" ); |
7739 | if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags)) |
7740 | dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n" ); |
7741 | } |
7742 | |
7743 | static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) |
7744 | { |
7745 | if (!check_signature(io_addr: h->cfgtable->Signature, signature: "CISS" , length: 4)) { |
7746 | dev_err(&h->pdev->dev, "not a valid CISS config table\n" ); |
7747 | return false; |
7748 | } |
7749 | return true; |
7750 | } |
7751 | |
7752 | static inline void hpsa_set_driver_support_bits(struct ctlr_info *h) |
7753 | { |
7754 | u32 driver_support; |
7755 | |
7756 | driver_support = readl(addr: &(h->cfgtable->driver_support)); |
7757 | /* Need to enable prefetch in the SCSI core for 6400 in x86 */ |
7758 | #ifdef CONFIG_X86 |
7759 | driver_support |= ENABLE_SCSI_PREFETCH; |
7760 | #endif |
7761 | driver_support |= ENABLE_UNIT_ATTN; |
7762 | writel(val: driver_support, addr: &(h->cfgtable->driver_support)); |
7763 | } |
7764 | |
7765 | /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result |
7766 | * in a prefetch beyond physical memory. |
7767 | */ |
7768 | static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h) |
7769 | { |
7770 | u32 dma_prefetch; |
7771 | |
7772 | if (h->board_id != 0x3225103C) |
7773 | return; |
7774 | dma_prefetch = readl(addr: h->vaddr + I2O_DMA1_CFG); |
7775 | dma_prefetch |= 0x8000; |
7776 | writel(val: dma_prefetch, addr: h->vaddr + I2O_DMA1_CFG); |
7777 | } |
7778 | |
7779 | static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h) |
7780 | { |
7781 | int i; |
7782 | u32 doorbell_value; |
7783 | unsigned long flags; |
7784 | /* wait until the clear_event_notify bit 6 is cleared by controller. */ |
7785 | for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) { |
7786 | spin_lock_irqsave(&h->lock, flags); |
7787 | doorbell_value = readl(addr: h->vaddr + SA5_DOORBELL); |
7788 | spin_unlock_irqrestore(lock: &h->lock, flags); |
7789 | if (!(doorbell_value & DOORBELL_CLEAR_EVENTS)) |
7790 | goto done; |
7791 | /* delay and try again */ |
7792 | msleep(CLEAR_EVENT_WAIT_INTERVAL); |
7793 | } |
7794 | return -ENODEV; |
7795 | done: |
7796 | return 0; |
7797 | } |
7798 | |
7799 | static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h) |
7800 | { |
7801 | int i; |
7802 | u32 doorbell_value; |
7803 | unsigned long flags; |
7804 | |
7805 | /* under certain very rare conditions, this can take awhile. |
7806 | * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right |
7807 | * as we enter this code.) |
7808 | */ |
7809 | for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) { |
7810 | if (h->remove_in_progress) |
7811 | goto done; |
7812 | spin_lock_irqsave(&h->lock, flags); |
7813 | doorbell_value = readl(addr: h->vaddr + SA5_DOORBELL); |
7814 | spin_unlock_irqrestore(lock: &h->lock, flags); |
7815 | if (!(doorbell_value & CFGTBL_ChangeReq)) |
7816 | goto done; |
7817 | /* delay and try again */ |
7818 | msleep(MODE_CHANGE_WAIT_INTERVAL); |
7819 | } |
7820 | return -ENODEV; |
7821 | done: |
7822 | return 0; |
7823 | } |
7824 | |
7825 | /* return -ENODEV or other reason on error, 0 on success */ |
7826 | static int hpsa_enter_simple_mode(struct ctlr_info *h) |
7827 | { |
7828 | u32 trans_support; |
7829 | |
7830 | trans_support = readl(addr: &(h->cfgtable->TransportSupport)); |
7831 | if (!(trans_support & SIMPLE_MODE)) |
7832 | return -ENOTSUPP; |
7833 | |
7834 | h->max_commands = readl(addr: &(h->cfgtable->CmdsOutMax)); |
7835 | |
7836 | /* Update the field, and then ring the doorbell */ |
7837 | writel(CFGTBL_Trans_Simple, addr: &(h->cfgtable->HostWrite.TransportRequest)); |
7838 | writel(val: 0, addr: &h->cfgtable->HostWrite.command_pool_addr_hi); |
7839 | writel(CFGTBL_ChangeReq, addr: h->vaddr + SA5_DOORBELL); |
7840 | if (hpsa_wait_for_mode_change_ack(h)) |
7841 | goto error; |
7842 | print_cfg_table(dev: &h->pdev->dev, tb: h->cfgtable); |
7843 | if (!(readl(addr: &(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) |
7844 | goto error; |
7845 | h->transMethod = CFGTBL_Trans_Simple; |
7846 | return 0; |
7847 | error: |
7848 | dev_err(&h->pdev->dev, "failed to enter simple mode\n" ); |
7849 | return -ENODEV; |
7850 | } |
7851 | |
7852 | /* free items allocated or mapped by hpsa_pci_init */ |
7853 | static void hpsa_free_pci_init(struct ctlr_info *h) |
7854 | { |
7855 | hpsa_free_cfgtables(h); /* pci_init 4 */ |
7856 | iounmap(addr: h->vaddr); /* pci_init 3 */ |
7857 | h->vaddr = NULL; |
7858 | hpsa_disable_interrupt_mode(h); /* pci_init 2 */ |
7859 | /* |
7860 | * call pci_disable_device before pci_release_regions per |
7861 | * Documentation/driver-api/pci/pci.rst |
7862 | */ |
7863 | pci_disable_device(dev: h->pdev); /* pci_init 1 */ |
7864 | pci_release_regions(h->pdev); /* pci_init 2 */ |
7865 | } |
7866 | |
7867 | /* several items must be freed later */ |
7868 | static int hpsa_pci_init(struct ctlr_info *h) |
7869 | { |
7870 | int prod_index, err; |
7871 | bool legacy_board; |
7872 | |
7873 | prod_index = hpsa_lookup_board_id(pdev: h->pdev, board_id: &h->board_id, legacy_board: &legacy_board); |
7874 | if (prod_index < 0) |
7875 | return prod_index; |
7876 | h->product_name = products[prod_index].product_name; |
7877 | h->access = *(products[prod_index].access); |
7878 | h->legacy_board = legacy_board; |
7879 | pci_disable_link_state(pdev: h->pdev, PCIE_LINK_STATE_L0S | |
7880 | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); |
7881 | |
7882 | err = pci_enable_device(dev: h->pdev); |
7883 | if (err) { |
7884 | dev_err(&h->pdev->dev, "failed to enable PCI device\n" ); |
7885 | pci_disable_device(dev: h->pdev); |
7886 | return err; |
7887 | } |
7888 | |
7889 | err = pci_request_regions(h->pdev, HPSA); |
7890 | if (err) { |
7891 | dev_err(&h->pdev->dev, |
7892 | "failed to obtain PCI resources\n" ); |
7893 | pci_disable_device(dev: h->pdev); |
7894 | return err; |
7895 | } |
7896 | |
7897 | pci_set_master(dev: h->pdev); |
7898 | |
7899 | err = hpsa_interrupt_mode(h); |
7900 | if (err) |
7901 | goto clean1; |
7902 | |
7903 | /* setup mapping between CPU and reply queue */ |
7904 | hpsa_setup_reply_map(h); |
7905 | |
7906 | err = hpsa_pci_find_memory_BAR(pdev: h->pdev, memory_bar: &h->paddr); |
7907 | if (err) |
7908 | goto clean2; /* intmode+region, pci */ |
7909 | h->vaddr = remap_pci_mem(base: h->paddr, size: 0x250); |
7910 | if (!h->vaddr) { |
7911 | dev_err(&h->pdev->dev, "failed to remap PCI mem\n" ); |
7912 | err = -ENOMEM; |
7913 | goto clean2; /* intmode+region, pci */ |
7914 | } |
7915 | err = hpsa_wait_for_board_state(pdev: h->pdev, vaddr: h->vaddr, BOARD_READY); |
7916 | if (err) |
7917 | goto clean3; /* vaddr, intmode+region, pci */ |
7918 | err = hpsa_find_cfgtables(h); |
7919 | if (err) |
7920 | goto clean3; /* vaddr, intmode+region, pci */ |
7921 | hpsa_find_board_params(h); |
7922 | |
7923 | if (!hpsa_CISS_signature_present(h)) { |
7924 | err = -ENODEV; |
7925 | goto clean4; /* cfgtables, vaddr, intmode+region, pci */ |
7926 | } |
7927 | hpsa_set_driver_support_bits(h); |
7928 | hpsa_p600_dma_prefetch_quirk(h); |
7929 | err = hpsa_enter_simple_mode(h); |
7930 | if (err) |
7931 | goto clean4; /* cfgtables, vaddr, intmode+region, pci */ |
7932 | return 0; |
7933 | |
7934 | clean4: /* cfgtables, vaddr, intmode+region, pci */ |
7935 | hpsa_free_cfgtables(h); |
7936 | clean3: /* vaddr, intmode+region, pci */ |
7937 | iounmap(addr: h->vaddr); |
7938 | h->vaddr = NULL; |
7939 | clean2: /* intmode+region, pci */ |
7940 | hpsa_disable_interrupt_mode(h); |
7941 | clean1: |
7942 | /* |
7943 | * call pci_disable_device before pci_release_regions per |
7944 | * Documentation/driver-api/pci/pci.rst |
7945 | */ |
7946 | pci_disable_device(dev: h->pdev); |
7947 | pci_release_regions(h->pdev); |
7948 | return err; |
7949 | } |
7950 | |
7951 | static void hpsa_hba_inquiry(struct ctlr_info *h) |
7952 | { |
7953 | int rc; |
7954 | |
7955 | #define HBA_INQUIRY_BYTE_COUNT 64 |
7956 | h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL); |
7957 | if (!h->hba_inquiry_data) |
7958 | return; |
7959 | rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, page: 0, |
7960 | buf: h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT); |
7961 | if (rc != 0) { |
7962 | kfree(objp: h->hba_inquiry_data); |
7963 | h->hba_inquiry_data = NULL; |
7964 | } |
7965 | } |
7966 | |
7967 | static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id) |
7968 | { |
7969 | int rc, i; |
7970 | void __iomem *vaddr; |
7971 | |
7972 | if (!reset_devices) |
7973 | return 0; |
7974 | |
7975 | /* kdump kernel is loading, we don't know in which state is |
7976 | * the pci interface. The dev->enable_cnt is equal zero |
7977 | * so we call enable+disable, wait a while and switch it on. |
7978 | */ |
7979 | rc = pci_enable_device(dev: pdev); |
7980 | if (rc) { |
7981 | dev_warn(&pdev->dev, "Failed to enable PCI device\n" ); |
7982 | return -ENODEV; |
7983 | } |
7984 | pci_disable_device(dev: pdev); |
7985 | msleep(msecs: 260); /* a randomly chosen number */ |
7986 | rc = pci_enable_device(dev: pdev); |
7987 | if (rc) { |
7988 | dev_warn(&pdev->dev, "failed to enable device.\n" ); |
7989 | return -ENODEV; |
7990 | } |
7991 | |
7992 | pci_set_master(dev: pdev); |
7993 | |
7994 | vaddr = pci_ioremap_bar(pdev, bar: 0); |
7995 | if (vaddr == NULL) { |
7996 | rc = -ENOMEM; |
7997 | goto out_disable; |
7998 | } |
7999 | writel(SA5_INTR_OFF, addr: vaddr + SA5_REPLY_INTR_MASK_OFFSET); |
8000 | iounmap(addr: vaddr); |
8001 | |
8002 | /* Reset the controller with a PCI power-cycle or via doorbell */ |
8003 | rc = hpsa_kdump_hard_reset_controller(pdev, board_id); |
8004 | |
8005 | /* -ENOTSUPP here means we cannot reset the controller |
8006 | * but it's already (and still) up and running in |
8007 | * "performant mode". Or, it might be 640x, which can't reset |
8008 | * due to concerns about shared bbwc between 6402/6404 pair. |
8009 | */ |
8010 | if (rc) |
8011 | goto out_disable; |
8012 | |
8013 | /* Now try to get the controller to respond to a no-op */ |
8014 | dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n" ); |
8015 | for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) { |
8016 | if (hpsa_noop(pdev) == 0) |
8017 | break; |
8018 | else |
8019 | dev_warn(&pdev->dev, "no-op failed%s\n" , |
8020 | (i < 11 ? "; re-trying" : "" )); |
8021 | } |
8022 | |
8023 | out_disable: |
8024 | |
8025 | pci_disable_device(dev: pdev); |
8026 | return rc; |
8027 | } |
8028 | |
8029 | static void hpsa_free_cmd_pool(struct ctlr_info *h) |
8030 | { |
8031 | bitmap_free(bitmap: h->cmd_pool_bits); |
8032 | h->cmd_pool_bits = NULL; |
8033 | if (h->cmd_pool) { |
8034 | dma_free_coherent(dev: &h->pdev->dev, |
8035 | size: h->nr_cmds * sizeof(struct CommandList), |
8036 | cpu_addr: h->cmd_pool, |
8037 | dma_handle: h->cmd_pool_dhandle); |
8038 | h->cmd_pool = NULL; |
8039 | h->cmd_pool_dhandle = 0; |
8040 | } |
8041 | if (h->errinfo_pool) { |
8042 | dma_free_coherent(dev: &h->pdev->dev, |
8043 | size: h->nr_cmds * sizeof(struct ErrorInfo), |
8044 | cpu_addr: h->errinfo_pool, |
8045 | dma_handle: h->errinfo_pool_dhandle); |
8046 | h->errinfo_pool = NULL; |
8047 | h->errinfo_pool_dhandle = 0; |
8048 | } |
8049 | } |
8050 | |
8051 | static int hpsa_alloc_cmd_pool(struct ctlr_info *h) |
8052 | { |
8053 | h->cmd_pool_bits = bitmap_zalloc(nbits: h->nr_cmds, GFP_KERNEL); |
8054 | h->cmd_pool = dma_alloc_coherent(dev: &h->pdev->dev, |
8055 | size: h->nr_cmds * sizeof(*h->cmd_pool), |
8056 | dma_handle: &h->cmd_pool_dhandle, GFP_KERNEL); |
8057 | h->errinfo_pool = dma_alloc_coherent(dev: &h->pdev->dev, |
8058 | size: h->nr_cmds * sizeof(*h->errinfo_pool), |
8059 | dma_handle: &h->errinfo_pool_dhandle, GFP_KERNEL); |
8060 | if ((h->cmd_pool_bits == NULL) |
8061 | || (h->cmd_pool == NULL) |
8062 | || (h->errinfo_pool == NULL)) { |
8063 | dev_err(&h->pdev->dev, "out of memory in %s" , __func__); |
8064 | goto clean_up; |
8065 | } |
8066 | hpsa_preinitialize_commands(h); |
8067 | return 0; |
8068 | clean_up: |
8069 | hpsa_free_cmd_pool(h); |
8070 | return -ENOMEM; |
8071 | } |
8072 | |
8073 | /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */ |
8074 | static void hpsa_free_irqs(struct ctlr_info *h) |
8075 | { |
8076 | int i; |
8077 | int irq_vector = 0; |
8078 | |
8079 | if (hpsa_simple_mode) |
8080 | irq_vector = h->intr_mode; |
8081 | |
8082 | if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) { |
8083 | /* Single reply queue, only one irq to free */ |
8084 | free_irq(pci_irq_vector(dev: h->pdev, nr: irq_vector), |
8085 | &h->q[h->intr_mode]); |
8086 | h->q[h->intr_mode] = 0; |
8087 | return; |
8088 | } |
8089 | |
8090 | for (i = 0; i < h->msix_vectors; i++) { |
8091 | free_irq(pci_irq_vector(dev: h->pdev, nr: i), &h->q[i]); |
8092 | h->q[i] = 0; |
8093 | } |
8094 | for (; i < MAX_REPLY_QUEUES; i++) |
8095 | h->q[i] = 0; |
8096 | } |
8097 | |
8098 | /* returns 0 on success; cleans up and returns -Enn on error */ |
8099 | static int hpsa_request_irqs(struct ctlr_info *h, |
8100 | irqreturn_t (*msixhandler)(int, void *), |
8101 | irqreturn_t (*intxhandler)(int, void *)) |
8102 | { |
8103 | int rc, i; |
8104 | int irq_vector = 0; |
8105 | |
8106 | if (hpsa_simple_mode) |
8107 | irq_vector = h->intr_mode; |
8108 | |
8109 | /* |
8110 | * initialize h->q[x] = x so that interrupt handlers know which |
8111 | * queue to process. |
8112 | */ |
8113 | for (i = 0; i < MAX_REPLY_QUEUES; i++) |
8114 | h->q[i] = (u8) i; |
8115 | |
8116 | if (h->intr_mode == PERF_MODE_INT && h->msix_vectors > 0) { |
8117 | /* If performant mode and MSI-X, use multiple reply queues */ |
8118 | for (i = 0; i < h->msix_vectors; i++) { |
8119 | sprintf(buf: h->intrname[i], fmt: "%s-msix%d" , h->devname, i); |
8120 | rc = request_irq(irq: pci_irq_vector(dev: h->pdev, nr: i), handler: msixhandler, |
8121 | flags: 0, name: h->intrname[i], |
8122 | dev: &h->q[i]); |
8123 | if (rc) { |
8124 | int j; |
8125 | |
8126 | dev_err(&h->pdev->dev, |
8127 | "failed to get irq %d for %s\n" , |
8128 | pci_irq_vector(h->pdev, i), h->devname); |
8129 | for (j = 0; j < i; j++) { |
8130 | free_irq(pci_irq_vector(dev: h->pdev, nr: j), &h->q[j]); |
8131 | h->q[j] = 0; |
8132 | } |
8133 | for (; j < MAX_REPLY_QUEUES; j++) |
8134 | h->q[j] = 0; |
8135 | return rc; |
8136 | } |
8137 | } |
8138 | } else { |
8139 | /* Use single reply pool */ |
8140 | if (h->msix_vectors > 0 || h->pdev->msi_enabled) { |
8141 | sprintf(buf: h->intrname[0], fmt: "%s-msi%s" , h->devname, |
8142 | h->msix_vectors ? "x" : "" ); |
8143 | rc = request_irq(irq: pci_irq_vector(dev: h->pdev, nr: irq_vector), |
8144 | handler: msixhandler, flags: 0, |
8145 | name: h->intrname[0], |
8146 | dev: &h->q[h->intr_mode]); |
8147 | } else { |
8148 | sprintf(buf: h->intrname[h->intr_mode], |
8149 | fmt: "%s-intx" , h->devname); |
8150 | rc = request_irq(irq: pci_irq_vector(dev: h->pdev, nr: irq_vector), |
8151 | handler: intxhandler, IRQF_SHARED, |
8152 | name: h->intrname[0], |
8153 | dev: &h->q[h->intr_mode]); |
8154 | } |
8155 | } |
8156 | if (rc) { |
8157 | dev_err(&h->pdev->dev, "failed to get irq %d for %s\n" , |
8158 | pci_irq_vector(h->pdev, irq_vector), h->devname); |
8159 | hpsa_free_irqs(h); |
8160 | return -ENODEV; |
8161 | } |
8162 | return 0; |
8163 | } |
8164 | |
8165 | static int hpsa_kdump_soft_reset(struct ctlr_info *h) |
8166 | { |
8167 | int rc; |
8168 | hpsa_send_host_reset(h, HPSA_RESET_TYPE_CONTROLLER); |
8169 | |
8170 | dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n" ); |
8171 | rc = hpsa_wait_for_board_state(pdev: h->pdev, vaddr: h->vaddr, BOARD_NOT_READY); |
8172 | if (rc) { |
8173 | dev_warn(&h->pdev->dev, "Soft reset had no effect.\n" ); |
8174 | return rc; |
8175 | } |
8176 | |
8177 | dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n" ); |
8178 | rc = hpsa_wait_for_board_state(pdev: h->pdev, vaddr: h->vaddr, BOARD_READY); |
8179 | if (rc) { |
8180 | dev_warn(&h->pdev->dev, "Board failed to become ready " |
8181 | "after soft reset.\n" ); |
8182 | return rc; |
8183 | } |
8184 | |
8185 | return 0; |
8186 | } |
8187 | |
8188 | static void hpsa_free_reply_queues(struct ctlr_info *h) |
8189 | { |
8190 | int i; |
8191 | |
8192 | for (i = 0; i < h->nreply_queues; i++) { |
8193 | if (!h->reply_queue[i].head) |
8194 | continue; |
8195 | dma_free_coherent(dev: &h->pdev->dev, |
8196 | size: h->reply_queue_size, |
8197 | cpu_addr: h->reply_queue[i].head, |
8198 | dma_handle: h->reply_queue[i].busaddr); |
8199 | h->reply_queue[i].head = NULL; |
8200 | h->reply_queue[i].busaddr = 0; |
8201 | } |
8202 | h->reply_queue_size = 0; |
8203 | } |
8204 | |
8205 | static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) |
8206 | { |
8207 | hpsa_free_performant_mode(h); /* init_one 7 */ |
8208 | hpsa_free_sg_chain_blocks(h); /* init_one 6 */ |
8209 | hpsa_free_cmd_pool(h); /* init_one 5 */ |
8210 | hpsa_free_irqs(h); /* init_one 4 */ |
8211 | scsi_host_put(t: h->scsi_host); /* init_one 3 */ |
8212 | h->scsi_host = NULL; /* init_one 3 */ |
8213 | hpsa_free_pci_init(h); /* init_one 2_5 */ |
8214 | free_percpu(pdata: h->lockup_detected); /* init_one 2 */ |
8215 | h->lockup_detected = NULL; /* init_one 2 */ |
8216 | if (h->resubmit_wq) { |
8217 | destroy_workqueue(wq: h->resubmit_wq); /* init_one 1 */ |
8218 | h->resubmit_wq = NULL; |
8219 | } |
8220 | if (h->rescan_ctlr_wq) { |
8221 | destroy_workqueue(wq: h->rescan_ctlr_wq); |
8222 | h->rescan_ctlr_wq = NULL; |
8223 | } |
8224 | if (h->monitor_ctlr_wq) { |
8225 | destroy_workqueue(wq: h->monitor_ctlr_wq); |
8226 | h->monitor_ctlr_wq = NULL; |
8227 | } |
8228 | |
8229 | kfree(objp: h); /* init_one 1 */ |
8230 | } |
8231 | |
8232 | /* Called when controller lockup detected. */ |
8233 | static void fail_all_outstanding_cmds(struct ctlr_info *h) |
8234 | { |
8235 | int i, refcount; |
8236 | struct CommandList *c; |
8237 | int failcount = 0; |
8238 | |
8239 | flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */ |
8240 | for (i = 0; i < h->nr_cmds; i++) { |
8241 | c = h->cmd_pool + i; |
8242 | refcount = atomic_inc_return(v: &c->refcount); |
8243 | if (refcount > 1) { |
8244 | c->err_info->CommandStatus = CMD_CTLR_LOCKUP; |
8245 | finish_cmd(c); |
8246 | atomic_dec(v: &h->commands_outstanding); |
8247 | failcount++; |
8248 | } |
8249 | cmd_free(h, c); |
8250 | } |
8251 | dev_warn(&h->pdev->dev, |
8252 | "failed %d commands in fail_all\n" , failcount); |
8253 | } |
8254 | |
8255 | static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value) |
8256 | { |
8257 | int cpu; |
8258 | |
8259 | for_each_online_cpu(cpu) { |
8260 | u32 *lockup_detected; |
8261 | lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); |
8262 | *lockup_detected = value; |
8263 | } |
8264 | wmb(); /* be sure the per-cpu variables are out to memory */ |
8265 | } |
8266 | |
8267 | static void controller_lockup_detected(struct ctlr_info *h) |
8268 | { |
8269 | unsigned long flags; |
8270 | u32 lockup_detected; |
8271 | |
8272 | h->access.set_intr_mask(h, HPSA_INTR_OFF); |
8273 | spin_lock_irqsave(&h->lock, flags); |
8274 | lockup_detected = readl(addr: h->vaddr + SA5_SCRATCHPAD_OFFSET); |
8275 | if (!lockup_detected) { |
8276 | /* no heartbeat, but controller gave us a zero. */ |
8277 | dev_warn(&h->pdev->dev, |
8278 | "lockup detected after %d but scratchpad register is zero\n" , |
8279 | h->heartbeat_sample_interval / HZ); |
8280 | lockup_detected = 0xffffffff; |
8281 | } |
8282 | set_lockup_detected_for_all_cpus(h, value: lockup_detected); |
8283 | spin_unlock_irqrestore(lock: &h->lock, flags); |
8284 | dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n" , |
8285 | lockup_detected, h->heartbeat_sample_interval / HZ); |
8286 | if (lockup_detected == 0xffff0000) { |
8287 | dev_warn(&h->pdev->dev, "Telling controller to do a CHKPT\n" ); |
8288 | writel(DOORBELL_GENERATE_CHKPT, addr: h->vaddr + SA5_DOORBELL); |
8289 | } |
8290 | pci_disable_device(dev: h->pdev); |
8291 | fail_all_outstanding_cmds(h); |
8292 | } |
8293 | |
8294 | static int detect_controller_lockup(struct ctlr_info *h) |
8295 | { |
8296 | u64 now; |
8297 | u32 heartbeat; |
8298 | unsigned long flags; |
8299 | |
8300 | now = get_jiffies_64(); |
8301 | /* If we've received an interrupt recently, we're ok. */ |
8302 | if (time_after64(h->last_intr_timestamp + |
8303 | (h->heartbeat_sample_interval), now)) |
8304 | return false; |
8305 | |
8306 | /* |
8307 | * If we've already checked the heartbeat recently, we're ok. |
8308 | * This could happen if someone sends us a signal. We |
8309 | * otherwise don't care about signals in this thread. |
8310 | */ |
8311 | if (time_after64(h->last_heartbeat_timestamp + |
8312 | (h->heartbeat_sample_interval), now)) |
8313 | return false; |
8314 | |
8315 | /* If heartbeat has not changed since we last looked, we're not ok. */ |
8316 | spin_lock_irqsave(&h->lock, flags); |
8317 | heartbeat = readl(addr: &h->cfgtable->HeartBeat); |
8318 | spin_unlock_irqrestore(lock: &h->lock, flags); |
8319 | if (h->last_heartbeat == heartbeat) { |
8320 | controller_lockup_detected(h); |
8321 | return true; |
8322 | } |
8323 | |
8324 | /* We're ok. */ |
8325 | h->last_heartbeat = heartbeat; |
8326 | h->last_heartbeat_timestamp = now; |
8327 | return false; |
8328 | } |
8329 | |
8330 | /* |
8331 | * Set ioaccel status for all ioaccel volumes. |
8332 | * |
8333 | * Called from monitor controller worker (hpsa_event_monitor_worker) |
8334 | * |
8335 | * A Volume (or Volumes that comprise an Array set) may be undergoing a |
8336 | * transformation, so we will be turning off ioaccel for all volumes that |
8337 | * make up the Array. |
8338 | */ |
8339 | static void hpsa_set_ioaccel_status(struct ctlr_info *h) |
8340 | { |
8341 | int rc; |
8342 | int i; |
8343 | u8 ioaccel_status; |
8344 | unsigned char *buf; |
8345 | struct hpsa_scsi_dev_t *device; |
8346 | |
8347 | if (!h) |
8348 | return; |
8349 | |
8350 | buf = kmalloc(size: 64, GFP_KERNEL); |
8351 | if (!buf) |
8352 | return; |
8353 | |
8354 | /* |
8355 | * Run through current device list used during I/O requests. |
8356 | */ |
8357 | for (i = 0; i < h->ndevices; i++) { |
8358 | int offload_to_be_enabled = 0; |
8359 | int offload_config = 0; |
8360 | |
8361 | device = h->dev[i]; |
8362 | |
8363 | if (!device) |
8364 | continue; |
8365 | if (!hpsa_vpd_page_supported(h, scsi3addr: device->scsi3addr, |
8366 | HPSA_VPD_LV_IOACCEL_STATUS)) |
8367 | continue; |
8368 | |
8369 | memset(buf, 0, 64); |
8370 | |
8371 | rc = hpsa_scsi_do_inquiry(h, scsi3addr: device->scsi3addr, |
8372 | VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, |
8373 | buf, bufsize: 64); |
8374 | if (rc != 0) |
8375 | continue; |
8376 | |
8377 | ioaccel_status = buf[IOACCEL_STATUS_BYTE]; |
8378 | |
8379 | /* |
8380 | * Check if offload is still configured on |
8381 | */ |
8382 | offload_config = |
8383 | !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); |
8384 | /* |
8385 | * If offload is configured on, check to see if ioaccel |
8386 | * needs to be enabled. |
8387 | */ |
8388 | if (offload_config) |
8389 | offload_to_be_enabled = |
8390 | !!(ioaccel_status & OFFLOAD_ENABLED_BIT); |
8391 | |
8392 | /* |
8393 | * If ioaccel is to be re-enabled, re-enable later during the |
8394 | * scan operation so the driver can get a fresh raidmap |
8395 | * before turning ioaccel back on. |
8396 | */ |
8397 | if (offload_to_be_enabled) |
8398 | continue; |
8399 | |
8400 | /* |
8401 | * Immediately turn off ioaccel for any volume the |
8402 | * controller tells us to. Some of the reasons could be: |
8403 | * transformation - change to the LVs of an Array. |
8404 | * degraded volume - component failure |
8405 | */ |
8406 | hpsa_turn_off_ioaccel_for_device(device); |
8407 | } |
8408 | |
8409 | kfree(objp: buf); |
8410 | } |
8411 | |
8412 | static void hpsa_ack_ctlr_events(struct ctlr_info *h) |
8413 | { |
8414 | char *event_type; |
8415 | |
8416 | if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) |
8417 | return; |
8418 | |
8419 | /* Ask the controller to clear the events we're handling. */ |
8420 | if ((h->transMethod & (CFGTBL_Trans_io_accel1 |
8421 | | CFGTBL_Trans_io_accel2)) && |
8422 | (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE || |
8423 | h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) { |
8424 | |
8425 | if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE) |
8426 | event_type = "state change" ; |
8427 | if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE) |
8428 | event_type = "configuration change" ; |
8429 | /* Stop sending new RAID offload reqs via the IO accelerator */ |
8430 | scsi_block_requests(h->scsi_host); |
8431 | hpsa_set_ioaccel_status(h); |
8432 | hpsa_drain_accel_commands(h); |
8433 | /* Set 'accelerator path config change' bit */ |
8434 | dev_warn(&h->pdev->dev, |
8435 | "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n" , |
8436 | h->events, event_type); |
8437 | writel(val: h->events, addr: &(h->cfgtable->clear_event_notify)); |
8438 | /* Set the "clear event notify field update" bit 6 */ |
8439 | writel(DOORBELL_CLEAR_EVENTS, addr: h->vaddr + SA5_DOORBELL); |
8440 | /* Wait until ctlr clears 'clear event notify field', bit 6 */ |
8441 | hpsa_wait_for_clear_event_notify_ack(h); |
8442 | scsi_unblock_requests(h->scsi_host); |
8443 | } else { |
8444 | /* Acknowledge controller notification events. */ |
8445 | writel(val: h->events, addr: &(h->cfgtable->clear_event_notify)); |
8446 | writel(DOORBELL_CLEAR_EVENTS, addr: h->vaddr + SA5_DOORBELL); |
8447 | hpsa_wait_for_clear_event_notify_ack(h); |
8448 | } |
8449 | return; |
8450 | } |
8451 | |
8452 | /* Check a register on the controller to see if there are configuration |
8453 | * changes (added/changed/removed logical drives, etc.) which mean that |
8454 | * we should rescan the controller for devices. |
8455 | * Also check flag for driver-initiated rescan. |
8456 | */ |
8457 | static int hpsa_ctlr_needs_rescan(struct ctlr_info *h) |
8458 | { |
8459 | if (h->drv_req_rescan) { |
8460 | h->drv_req_rescan = 0; |
8461 | return 1; |
8462 | } |
8463 | |
8464 | if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) |
8465 | return 0; |
8466 | |
8467 | h->events = readl(addr: &(h->cfgtable->event_notify)); |
8468 | return h->events & RESCAN_REQUIRED_EVENT_BITS; |
8469 | } |
8470 | |
8471 | /* |
8472 | * Check if any of the offline devices have become ready |
8473 | */ |
8474 | static int hpsa_offline_devices_ready(struct ctlr_info *h) |
8475 | { |
8476 | unsigned long flags; |
8477 | struct offline_device_entry *d; |
8478 | struct list_head *this, *tmp; |
8479 | |
8480 | spin_lock_irqsave(&h->offline_device_lock, flags); |
8481 | list_for_each_safe(this, tmp, &h->offline_device_list) { |
8482 | d = list_entry(this, struct offline_device_entry, |
8483 | offline_list); |
8484 | spin_unlock_irqrestore(lock: &h->offline_device_lock, flags); |
8485 | if (!hpsa_volume_offline(h, scsi3addr: d->scsi3addr)) { |
8486 | spin_lock_irqsave(&h->offline_device_lock, flags); |
8487 | list_del(entry: &d->offline_list); |
8488 | spin_unlock_irqrestore(lock: &h->offline_device_lock, flags); |
8489 | return 1; |
8490 | } |
8491 | spin_lock_irqsave(&h->offline_device_lock, flags); |
8492 | } |
8493 | spin_unlock_irqrestore(lock: &h->offline_device_lock, flags); |
8494 | return 0; |
8495 | } |
8496 | |
8497 | static int hpsa_luns_changed(struct ctlr_info *h) |
8498 | { |
8499 | int rc = 1; /* assume there are changes */ |
8500 | struct ReportLUNdata *logdev = NULL; |
8501 | |
8502 | /* if we can't find out if lun data has changed, |
8503 | * assume that it has. |
8504 | */ |
8505 | |
8506 | if (!h->lastlogicals) |
8507 | return rc; |
8508 | |
8509 | logdev = kzalloc(size: sizeof(*logdev), GFP_KERNEL); |
8510 | if (!logdev) |
8511 | return rc; |
8512 | |
8513 | if (hpsa_scsi_do_report_luns(h, logical: 1, buf: logdev, bufsize: sizeof(*logdev), extended_response: 0)) { |
8514 | dev_warn(&h->pdev->dev, |
8515 | "report luns failed, can't track lun changes.\n" ); |
8516 | goto out; |
8517 | } |
8518 | if (memcmp(p: logdev, q: h->lastlogicals, size: sizeof(*logdev))) { |
8519 | dev_info(&h->pdev->dev, |
8520 | "Lun changes detected.\n" ); |
8521 | memcpy(h->lastlogicals, logdev, sizeof(*logdev)); |
8522 | goto out; |
8523 | } else |
8524 | rc = 0; /* no changes detected. */ |
8525 | out: |
8526 | kfree(objp: logdev); |
8527 | return rc; |
8528 | } |
8529 | |
8530 | static void hpsa_perform_rescan(struct ctlr_info *h) |
8531 | { |
8532 | struct Scsi_Host *sh = NULL; |
8533 | unsigned long flags; |
8534 | |
8535 | /* |
8536 | * Do the scan after the reset |
8537 | */ |
8538 | spin_lock_irqsave(&h->reset_lock, flags); |
8539 | if (h->reset_in_progress) { |
8540 | h->drv_req_rescan = 1; |
8541 | spin_unlock_irqrestore(lock: &h->reset_lock, flags); |
8542 | return; |
8543 | } |
8544 | spin_unlock_irqrestore(lock: &h->reset_lock, flags); |
8545 | |
8546 | sh = scsi_host_get(h->scsi_host); |
8547 | if (sh != NULL) { |
8548 | hpsa_scan_start(sh); |
8549 | scsi_host_put(t: sh); |
8550 | h->drv_req_rescan = 0; |
8551 | } |
8552 | } |
8553 | |
8554 | /* |
8555 | * watch for controller events |
8556 | */ |
8557 | static void hpsa_event_monitor_worker(struct work_struct *work) |
8558 | { |
8559 | struct ctlr_info *h = container_of(to_delayed_work(work), |
8560 | struct ctlr_info, event_monitor_work); |
8561 | unsigned long flags; |
8562 | |
8563 | spin_lock_irqsave(&h->lock, flags); |
8564 | if (h->remove_in_progress) { |
8565 | spin_unlock_irqrestore(lock: &h->lock, flags); |
8566 | return; |
8567 | } |
8568 | spin_unlock_irqrestore(lock: &h->lock, flags); |
8569 | |
8570 | if (hpsa_ctlr_needs_rescan(h)) { |
8571 | hpsa_ack_ctlr_events(h); |
8572 | hpsa_perform_rescan(h); |
8573 | } |
8574 | |
8575 | spin_lock_irqsave(&h->lock, flags); |
8576 | if (!h->remove_in_progress) |
8577 | queue_delayed_work(wq: h->monitor_ctlr_wq, dwork: &h->event_monitor_work, |
8578 | HPSA_EVENT_MONITOR_INTERVAL); |
8579 | spin_unlock_irqrestore(lock: &h->lock, flags); |
8580 | } |
8581 | |
8582 | static void hpsa_rescan_ctlr_worker(struct work_struct *work) |
8583 | { |
8584 | unsigned long flags; |
8585 | struct ctlr_info *h = container_of(to_delayed_work(work), |
8586 | struct ctlr_info, rescan_ctlr_work); |
8587 | |
8588 | spin_lock_irqsave(&h->lock, flags); |
8589 | if (h->remove_in_progress) { |
8590 | spin_unlock_irqrestore(lock: &h->lock, flags); |
8591 | return; |
8592 | } |
8593 | spin_unlock_irqrestore(lock: &h->lock, flags); |
8594 | |
8595 | if (h->drv_req_rescan || hpsa_offline_devices_ready(h)) { |
8596 | hpsa_perform_rescan(h); |
8597 | } else if (h->discovery_polling) { |
8598 | if (hpsa_luns_changed(h)) { |
8599 | dev_info(&h->pdev->dev, |
8600 | "driver discovery polling rescan.\n" ); |
8601 | hpsa_perform_rescan(h); |
8602 | } |
8603 | } |
8604 | spin_lock_irqsave(&h->lock, flags); |
8605 | if (!h->remove_in_progress) |
8606 | queue_delayed_work(wq: h->rescan_ctlr_wq, dwork: &h->rescan_ctlr_work, |
8607 | delay: h->heartbeat_sample_interval); |
8608 | spin_unlock_irqrestore(lock: &h->lock, flags); |
8609 | } |
8610 | |
8611 | static void hpsa_monitor_ctlr_worker(struct work_struct *work) |
8612 | { |
8613 | unsigned long flags; |
8614 | struct ctlr_info *h = container_of(to_delayed_work(work), |
8615 | struct ctlr_info, monitor_ctlr_work); |
8616 | |
8617 | detect_controller_lockup(h); |
8618 | if (lockup_detected(h)) |
8619 | return; |
8620 | |
8621 | spin_lock_irqsave(&h->lock, flags); |
8622 | if (!h->remove_in_progress) |
8623 | queue_delayed_work(wq: h->monitor_ctlr_wq, dwork: &h->monitor_ctlr_work, |
8624 | delay: h->heartbeat_sample_interval); |
8625 | spin_unlock_irqrestore(lock: &h->lock, flags); |
8626 | } |
8627 | |
8628 | static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h, |
8629 | char *name) |
8630 | { |
8631 | struct workqueue_struct *wq = NULL; |
8632 | |
8633 | wq = alloc_ordered_workqueue("%s_%d_hpsa" , 0, name, h->ctlr); |
8634 | if (!wq) |
8635 | dev_err(&h->pdev->dev, "failed to create %s workqueue\n" , name); |
8636 | |
8637 | return wq; |
8638 | } |
8639 | |
8640 | static void hpda_free_ctlr_info(struct ctlr_info *h) |
8641 | { |
8642 | kfree(objp: h->reply_map); |
8643 | kfree(objp: h); |
8644 | } |
8645 | |
8646 | static struct ctlr_info *hpda_alloc_ctlr_info(void) |
8647 | { |
8648 | struct ctlr_info *h; |
8649 | |
8650 | h = kzalloc(size: sizeof(*h), GFP_KERNEL); |
8651 | if (!h) |
8652 | return NULL; |
8653 | |
8654 | h->reply_map = kcalloc(n: nr_cpu_ids, size: sizeof(*h->reply_map), GFP_KERNEL); |
8655 | if (!h->reply_map) { |
8656 | kfree(objp: h); |
8657 | return NULL; |
8658 | } |
8659 | return h; |
8660 | } |
8661 | |
8662 | static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
8663 | { |
8664 | int rc; |
8665 | struct ctlr_info *h; |
8666 | int try_soft_reset = 0; |
8667 | unsigned long flags; |
8668 | u32 board_id; |
8669 | |
8670 | if (number_of_controllers == 0) |
8671 | printk(KERN_INFO DRIVER_NAME "\n" ); |
8672 | |
8673 | rc = hpsa_lookup_board_id(pdev, board_id: &board_id, NULL); |
8674 | if (rc < 0) { |
8675 | dev_warn(&pdev->dev, "Board ID not found\n" ); |
8676 | return rc; |
8677 | } |
8678 | |
8679 | rc = hpsa_init_reset_devices(pdev, board_id); |
8680 | if (rc) { |
8681 | if (rc != -ENOTSUPP) |
8682 | return rc; |
8683 | /* If the reset fails in a particular way (it has no way to do |
8684 | * a proper hard reset, so returns -ENOTSUPP) we can try to do |
8685 | * a soft reset once we get the controller configured up to the |
8686 | * point that it can accept a command. |
8687 | */ |
8688 | try_soft_reset = 1; |
8689 | rc = 0; |
8690 | } |
8691 | |
8692 | reinit_after_soft_reset: |
8693 | |
8694 | /* Command structures must be aligned on a 32-byte boundary because |
8695 | * the 5 lower bits of the address are used by the hardware. and by |
8696 | * the driver. See comments in hpsa.h for more info. |
8697 | */ |
8698 | BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); |
8699 | h = hpda_alloc_ctlr_info(); |
8700 | if (!h) { |
8701 | dev_err(&pdev->dev, "Failed to allocate controller head\n" ); |
8702 | return -ENOMEM; |
8703 | } |
8704 | |
8705 | h->pdev = pdev; |
8706 | |
8707 | h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; |
8708 | INIT_LIST_HEAD(list: &h->offline_device_list); |
8709 | spin_lock_init(&h->lock); |
8710 | spin_lock_init(&h->offline_device_lock); |
8711 | spin_lock_init(&h->scan_lock); |
8712 | spin_lock_init(&h->reset_lock); |
8713 | atomic_set(v: &h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS); |
8714 | |
8715 | /* Allocate and clear per-cpu variable lockup_detected */ |
8716 | h->lockup_detected = alloc_percpu(u32); |
8717 | if (!h->lockup_detected) { |
8718 | dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n" ); |
8719 | rc = -ENOMEM; |
8720 | goto clean1; /* aer/h */ |
8721 | } |
8722 | set_lockup_detected_for_all_cpus(h, value: 0); |
8723 | |
8724 | rc = hpsa_pci_init(h); |
8725 | if (rc) |
8726 | goto clean2; /* lu, aer/h */ |
8727 | |
8728 | /* relies on h-> settings made by hpsa_pci_init, including |
8729 | * interrupt_mode h->intr */ |
8730 | rc = hpsa_scsi_host_alloc(h); |
8731 | if (rc) |
8732 | goto clean2_5; /* pci, lu, aer/h */ |
8733 | |
8734 | sprintf(buf: h->devname, HPSA "%d" , h->scsi_host->host_no); |
8735 | h->ctlr = number_of_controllers; |
8736 | number_of_controllers++; |
8737 | |
8738 | /* configure PCI DMA stuff */ |
8739 | rc = dma_set_mask(dev: &pdev->dev, DMA_BIT_MASK(64)); |
8740 | if (rc != 0) { |
8741 | rc = dma_set_mask(dev: &pdev->dev, DMA_BIT_MASK(32)); |
8742 | if (rc != 0) { |
8743 | dev_err(&pdev->dev, "no suitable DMA available\n" ); |
8744 | goto clean3; /* shost, pci, lu, aer/h */ |
8745 | } |
8746 | } |
8747 | |
8748 | /* make sure the board interrupts are off */ |
8749 | h->access.set_intr_mask(h, HPSA_INTR_OFF); |
8750 | |
8751 | rc = hpsa_request_irqs(h, msixhandler: do_hpsa_intr_msi, intxhandler: do_hpsa_intr_intx); |
8752 | if (rc) |
8753 | goto clean3; /* shost, pci, lu, aer/h */ |
8754 | rc = hpsa_alloc_cmd_pool(h); |
8755 | if (rc) |
8756 | goto clean4; /* irq, shost, pci, lu, aer/h */ |
8757 | rc = hpsa_alloc_sg_chain_blocks(h); |
8758 | if (rc) |
8759 | goto clean5; /* cmd, irq, shost, pci, lu, aer/h */ |
8760 | init_waitqueue_head(&h->scan_wait_queue); |
8761 | init_waitqueue_head(&h->event_sync_wait_queue); |
8762 | mutex_init(&h->reset_mutex); |
8763 | h->scan_finished = 1; /* no scan currently in progress */ |
8764 | h->scan_waiting = 0; |
8765 | |
8766 | pci_set_drvdata(pdev, data: h); |
8767 | h->ndevices = 0; |
8768 | |
8769 | spin_lock_init(&h->devlock); |
8770 | rc = hpsa_put_ctlr_into_performant_mode(h); |
8771 | if (rc) |
8772 | goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */ |
8773 | |
8774 | /* create the resubmit workqueue */ |
8775 | h->rescan_ctlr_wq = hpsa_create_controller_wq(h, name: "rescan" ); |
8776 | if (!h->rescan_ctlr_wq) { |
8777 | rc = -ENOMEM; |
8778 | goto clean7; |
8779 | } |
8780 | |
8781 | h->resubmit_wq = hpsa_create_controller_wq(h, name: "resubmit" ); |
8782 | if (!h->resubmit_wq) { |
8783 | rc = -ENOMEM; |
8784 | goto clean7; /* aer/h */ |
8785 | } |
8786 | |
8787 | h->monitor_ctlr_wq = hpsa_create_controller_wq(h, name: "monitor" ); |
8788 | if (!h->monitor_ctlr_wq) { |
8789 | rc = -ENOMEM; |
8790 | goto clean7; |
8791 | } |
8792 | |
8793 | /* |
8794 | * At this point, the controller is ready to take commands. |
8795 | * Now, if reset_devices and the hard reset didn't work, try |
8796 | * the soft reset and see if that works. |
8797 | */ |
8798 | if (try_soft_reset) { |
8799 | |
8800 | /* This is kind of gross. We may or may not get a completion |
8801 | * from the soft reset command, and if we do, then the value |
8802 | * from the fifo may or may not be valid. So, we wait 10 secs |
8803 | * after the reset throwing away any completions we get during |
8804 | * that time. Unregister the interrupt handler and register |
8805 | * fake ones to scoop up any residual completions. |
8806 | */ |
8807 | spin_lock_irqsave(&h->lock, flags); |
8808 | h->access.set_intr_mask(h, HPSA_INTR_OFF); |
8809 | spin_unlock_irqrestore(lock: &h->lock, flags); |
8810 | hpsa_free_irqs(h); |
8811 | rc = hpsa_request_irqs(h, msixhandler: hpsa_msix_discard_completions, |
8812 | intxhandler: hpsa_intx_discard_completions); |
8813 | if (rc) { |
8814 | dev_warn(&h->pdev->dev, |
8815 | "Failed to request_irq after soft reset.\n" ); |
8816 | /* |
8817 | * cannot goto clean7 or free_irqs will be called |
8818 | * again. Instead, do its work |
8819 | */ |
8820 | hpsa_free_performant_mode(h); /* clean7 */ |
8821 | hpsa_free_sg_chain_blocks(h); /* clean6 */ |
8822 | hpsa_free_cmd_pool(h); /* clean5 */ |
8823 | /* |
8824 | * skip hpsa_free_irqs(h) clean4 since that |
8825 | * was just called before request_irqs failed |
8826 | */ |
8827 | goto clean3; |
8828 | } |
8829 | |
8830 | rc = hpsa_kdump_soft_reset(h); |
8831 | if (rc) |
8832 | /* Neither hard nor soft reset worked, we're hosed. */ |
8833 | goto clean7; |
8834 | |
8835 | dev_info(&h->pdev->dev, "Board READY.\n" ); |
8836 | dev_info(&h->pdev->dev, |
8837 | "Waiting for stale completions to drain.\n" ); |
8838 | h->access.set_intr_mask(h, HPSA_INTR_ON); |
8839 | msleep(msecs: 10000); |
8840 | h->access.set_intr_mask(h, HPSA_INTR_OFF); |
8841 | |
8842 | rc = controller_reset_failed(cfgtable: h->cfgtable); |
8843 | if (rc) |
8844 | dev_info(&h->pdev->dev, |
8845 | "Soft reset appears to have failed.\n" ); |
8846 | |
8847 | /* since the controller's reset, we have to go back and re-init |
8848 | * everything. Easiest to just forget what we've done and do it |
8849 | * all over again. |
8850 | */ |
8851 | hpsa_undo_allocations_after_kdump_soft_reset(h); |
8852 | try_soft_reset = 0; |
8853 | if (rc) |
8854 | /* don't goto clean, we already unallocated */ |
8855 | return -ENODEV; |
8856 | |
8857 | goto reinit_after_soft_reset; |
8858 | } |
8859 | |
8860 | /* Enable Accelerated IO path at driver layer */ |
8861 | h->acciopath_status = 1; |
8862 | /* Disable discovery polling.*/ |
8863 | h->discovery_polling = 0; |
8864 | |
8865 | |
8866 | /* Turn the interrupts on so we can service requests */ |
8867 | h->access.set_intr_mask(h, HPSA_INTR_ON); |
8868 | |
8869 | hpsa_hba_inquiry(h); |
8870 | |
8871 | h->lastlogicals = kzalloc(size: sizeof(*(h->lastlogicals)), GFP_KERNEL); |
8872 | if (!h->lastlogicals) |
8873 | dev_info(&h->pdev->dev, |
8874 | "Can't track change to report lun data\n" ); |
8875 | |
8876 | /* hook into SCSI subsystem */ |
8877 | rc = hpsa_scsi_add_host(h); |
8878 | if (rc) |
8879 | goto clean8; /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */ |
8880 | |
8881 | /* Monitor the controller for firmware lockups */ |
8882 | h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; |
8883 | INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker); |
8884 | schedule_delayed_work(dwork: &h->monitor_ctlr_work, |
8885 | delay: h->heartbeat_sample_interval); |
8886 | INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker); |
8887 | queue_delayed_work(wq: h->rescan_ctlr_wq, dwork: &h->rescan_ctlr_work, |
8888 | delay: h->heartbeat_sample_interval); |
8889 | INIT_DELAYED_WORK(&h->event_monitor_work, hpsa_event_monitor_worker); |
8890 | schedule_delayed_work(dwork: &h->event_monitor_work, |
8891 | HPSA_EVENT_MONITOR_INTERVAL); |
8892 | return 0; |
8893 | |
8894 | clean8: /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */ |
8895 | kfree(objp: h->lastlogicals); |
8896 | clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ |
8897 | hpsa_free_performant_mode(h); |
8898 | h->access.set_intr_mask(h, HPSA_INTR_OFF); |
8899 | clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */ |
8900 | hpsa_free_sg_chain_blocks(h); |
8901 | clean5: /* cmd, irq, shost, pci, lu, aer/h */ |
8902 | hpsa_free_cmd_pool(h); |
8903 | clean4: /* irq, shost, pci, lu, aer/h */ |
8904 | hpsa_free_irqs(h); |
8905 | clean3: /* shost, pci, lu, aer/h */ |
8906 | scsi_host_put(t: h->scsi_host); |
8907 | h->scsi_host = NULL; |
8908 | clean2_5: /* pci, lu, aer/h */ |
8909 | hpsa_free_pci_init(h); |
8910 | clean2: /* lu, aer/h */ |
8911 | if (h->lockup_detected) { |
8912 | free_percpu(pdata: h->lockup_detected); |
8913 | h->lockup_detected = NULL; |
8914 | } |
8915 | clean1: /* wq/aer/h */ |
8916 | if (h->resubmit_wq) { |
8917 | destroy_workqueue(wq: h->resubmit_wq); |
8918 | h->resubmit_wq = NULL; |
8919 | } |
8920 | if (h->rescan_ctlr_wq) { |
8921 | destroy_workqueue(wq: h->rescan_ctlr_wq); |
8922 | h->rescan_ctlr_wq = NULL; |
8923 | } |
8924 | if (h->monitor_ctlr_wq) { |
8925 | destroy_workqueue(wq: h->monitor_ctlr_wq); |
8926 | h->monitor_ctlr_wq = NULL; |
8927 | } |
8928 | hpda_free_ctlr_info(h); |
8929 | return rc; |
8930 | } |
8931 | |
8932 | static void hpsa_flush_cache(struct ctlr_info *h) |
8933 | { |
8934 | char *flush_buf; |
8935 | struct CommandList *c; |
8936 | int rc; |
8937 | |
8938 | if (unlikely(lockup_detected(h))) |
8939 | return; |
8940 | flush_buf = kzalloc(size: 4, GFP_KERNEL); |
8941 | if (!flush_buf) |
8942 | return; |
8943 | |
8944 | c = cmd_alloc(h); |
8945 | |
8946 | if (fill_cmd(c, HPSA_CACHE_FLUSH, h, buff: flush_buf, size: 4, page_code: 0, |
8947 | RAID_CTLR_LUNID, TYPE_CMD)) { |
8948 | goto out; |
8949 | } |
8950 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, data_direction: DMA_TO_DEVICE, |
8951 | DEFAULT_TIMEOUT); |
8952 | if (rc) |
8953 | goto out; |
8954 | if (c->err_info->CommandStatus != 0) |
8955 | out: |
8956 | dev_warn(&h->pdev->dev, |
8957 | "error flushing cache on controller\n" ); |
8958 | cmd_free(h, c); |
8959 | kfree(objp: flush_buf); |
8960 | } |
8961 | |
8962 | /* Make controller gather fresh report lun data each time we |
8963 | * send down a report luns request |
8964 | */ |
8965 | static void hpsa_disable_rld_caching(struct ctlr_info *h) |
8966 | { |
8967 | u32 *options; |
8968 | struct CommandList *c; |
8969 | int rc; |
8970 | |
8971 | /* Don't bother trying to set diag options if locked up */ |
8972 | if (unlikely(h->lockup_detected)) |
8973 | return; |
8974 | |
8975 | options = kzalloc(size: sizeof(*options), GFP_KERNEL); |
8976 | if (!options) |
8977 | return; |
8978 | |
8979 | c = cmd_alloc(h); |
8980 | |
8981 | /* first, get the current diag options settings */ |
8982 | if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, buff: options, size: 4, page_code: 0, |
8983 | RAID_CTLR_LUNID, TYPE_CMD)) |
8984 | goto errout; |
8985 | |
8986 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, data_direction: DMA_FROM_DEVICE, |
8987 | NO_TIMEOUT); |
8988 | if ((rc != 0) || (c->err_info->CommandStatus != 0)) |
8989 | goto errout; |
8990 | |
8991 | /* Now, set the bit for disabling the RLD caching */ |
8992 | *options |= HPSA_DIAG_OPTS_DISABLE_RLD_CACHING; |
8993 | |
8994 | if (fill_cmd(c, BMIC_SET_DIAG_OPTIONS, h, buff: options, size: 4, page_code: 0, |
8995 | RAID_CTLR_LUNID, TYPE_CMD)) |
8996 | goto errout; |
8997 | |
8998 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, data_direction: DMA_TO_DEVICE, |
8999 | NO_TIMEOUT); |
9000 | if ((rc != 0) || (c->err_info->CommandStatus != 0)) |
9001 | goto errout; |
9002 | |
9003 | /* Now verify that it got set: */ |
9004 | if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, buff: options, size: 4, page_code: 0, |
9005 | RAID_CTLR_LUNID, TYPE_CMD)) |
9006 | goto errout; |
9007 | |
9008 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, data_direction: DMA_FROM_DEVICE, |
9009 | NO_TIMEOUT); |
9010 | if ((rc != 0) || (c->err_info->CommandStatus != 0)) |
9011 | goto errout; |
9012 | |
9013 | if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING) |
9014 | goto out; |
9015 | |
9016 | errout: |
9017 | dev_err(&h->pdev->dev, |
9018 | "Error: failed to disable report lun data caching.\n" ); |
9019 | out: |
9020 | cmd_free(h, c); |
9021 | kfree(objp: options); |
9022 | } |
9023 | |
9024 | static void __hpsa_shutdown(struct pci_dev *pdev) |
9025 | { |
9026 | struct ctlr_info *h; |
9027 | |
9028 | h = pci_get_drvdata(pdev); |
9029 | /* Turn board interrupts off and send the flush cache command |
9030 | * sendcmd will turn off interrupt, and send the flush... |
9031 | * To write all data in the battery backed cache to disks |
9032 | */ |
9033 | hpsa_flush_cache(h); |
9034 | h->access.set_intr_mask(h, HPSA_INTR_OFF); |
9035 | hpsa_free_irqs(h); /* init_one 4 */ |
9036 | hpsa_disable_interrupt_mode(h); /* pci_init 2 */ |
9037 | } |
9038 | |
9039 | static void hpsa_shutdown(struct pci_dev *pdev) |
9040 | { |
9041 | __hpsa_shutdown(pdev); |
9042 | pci_disable_device(dev: pdev); |
9043 | } |
9044 | |
9045 | static void hpsa_free_device_info(struct ctlr_info *h) |
9046 | { |
9047 | int i; |
9048 | |
9049 | for (i = 0; i < h->ndevices; i++) { |
9050 | kfree(objp: h->dev[i]); |
9051 | h->dev[i] = NULL; |
9052 | } |
9053 | } |
9054 | |
9055 | static void hpsa_remove_one(struct pci_dev *pdev) |
9056 | { |
9057 | struct ctlr_info *h; |
9058 | unsigned long flags; |
9059 | |
9060 | if (pci_get_drvdata(pdev) == NULL) { |
9061 | dev_err(&pdev->dev, "unable to remove device\n" ); |
9062 | return; |
9063 | } |
9064 | h = pci_get_drvdata(pdev); |
9065 | |
9066 | /* Get rid of any controller monitoring work items */ |
9067 | spin_lock_irqsave(&h->lock, flags); |
9068 | h->remove_in_progress = 1; |
9069 | spin_unlock_irqrestore(lock: &h->lock, flags); |
9070 | cancel_delayed_work_sync(dwork: &h->monitor_ctlr_work); |
9071 | cancel_delayed_work_sync(dwork: &h->rescan_ctlr_work); |
9072 | cancel_delayed_work_sync(dwork: &h->event_monitor_work); |
9073 | destroy_workqueue(wq: h->rescan_ctlr_wq); |
9074 | destroy_workqueue(wq: h->resubmit_wq); |
9075 | destroy_workqueue(wq: h->monitor_ctlr_wq); |
9076 | |
9077 | hpsa_delete_sas_host(h); |
9078 | |
9079 | /* |
9080 | * Call before disabling interrupts. |
9081 | * scsi_remove_host can trigger I/O operations especially |
9082 | * when multipath is enabled. There can be SYNCHRONIZE CACHE |
9083 | * operations which cannot complete and will hang the system. |
9084 | */ |
9085 | if (h->scsi_host) |
9086 | scsi_remove_host(h->scsi_host); /* init_one 8 */ |
9087 | /* includes hpsa_free_irqs - init_one 4 */ |
9088 | /* includes hpsa_disable_interrupt_mode - pci_init 2 */ |
9089 | __hpsa_shutdown(pdev); |
9090 | |
9091 | hpsa_free_device_info(h); /* scan */ |
9092 | |
9093 | kfree(objp: h->hba_inquiry_data); /* init_one 10 */ |
9094 | h->hba_inquiry_data = NULL; /* init_one 10 */ |
9095 | hpsa_free_ioaccel2_sg_chain_blocks(h); |
9096 | hpsa_free_performant_mode(h); /* init_one 7 */ |
9097 | hpsa_free_sg_chain_blocks(h); /* init_one 6 */ |
9098 | hpsa_free_cmd_pool(h); /* init_one 5 */ |
9099 | kfree(objp: h->lastlogicals); |
9100 | |
9101 | /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */ |
9102 | |
9103 | scsi_host_put(t: h->scsi_host); /* init_one 3 */ |
9104 | h->scsi_host = NULL; /* init_one 3 */ |
9105 | |
9106 | /* includes hpsa_disable_interrupt_mode - pci_init 2 */ |
9107 | hpsa_free_pci_init(h); /* init_one 2.5 */ |
9108 | |
9109 | free_percpu(pdata: h->lockup_detected); /* init_one 2 */ |
9110 | h->lockup_detected = NULL; /* init_one 2 */ |
9111 | |
9112 | hpda_free_ctlr_info(h); /* init_one 1 */ |
9113 | } |
9114 | |
9115 | static int __maybe_unused hpsa_suspend( |
9116 | __attribute__((unused)) struct device *dev) |
9117 | { |
9118 | return -ENOSYS; |
9119 | } |
9120 | |
9121 | static int __maybe_unused hpsa_resume |
9122 | (__attribute__((unused)) struct device *dev) |
9123 | { |
9124 | return -ENOSYS; |
9125 | } |
9126 | |
9127 | static SIMPLE_DEV_PM_OPS(hpsa_pm_ops, hpsa_suspend, hpsa_resume); |
9128 | |
9129 | static struct pci_driver hpsa_pci_driver = { |
9130 | .name = HPSA, |
9131 | .probe = hpsa_init_one, |
9132 | .remove = hpsa_remove_one, |
9133 | .id_table = hpsa_pci_device_id, /* id_table */ |
9134 | .shutdown = hpsa_shutdown, |
9135 | .driver.pm = &hpsa_pm_ops, |
9136 | }; |
9137 | |
9138 | /* Fill in bucket_map[], given nsgs (the max number of |
9139 | * scatter gather elements supported) and bucket[], |
9140 | * which is an array of 8 integers. The bucket[] array |
9141 | * contains 8 different DMA transfer sizes (in 16 |
9142 | * byte increments) which the controller uses to fetch |
9143 | * commands. This function fills in bucket_map[], which |
9144 | * maps a given number of scatter gather elements to one of |
9145 | * the 8 DMA transfer sizes. The point of it is to allow the |
9146 | * controller to only do as much DMA as needed to fetch the |
9147 | * command, with the DMA transfer size encoded in the lower |
9148 | * bits of the command address. |
9149 | */ |
9150 | static void calc_bucket_map(int bucket[], int num_buckets, |
9151 | int nsgs, int min_blocks, u32 *bucket_map) |
9152 | { |
9153 | int i, j, b, size; |
9154 | |
9155 | /* Note, bucket_map must have nsgs+1 entries. */ |
9156 | for (i = 0; i <= nsgs; i++) { |
9157 | /* Compute size of a command with i SG entries */ |
9158 | size = i + min_blocks; |
9159 | b = num_buckets; /* Assume the biggest bucket */ |
9160 | /* Find the bucket that is just big enough */ |
9161 | for (j = 0; j < num_buckets; j++) { |
9162 | if (bucket[j] >= size) { |
9163 | b = j; |
9164 | break; |
9165 | } |
9166 | } |
9167 | /* for a command with i SG entries, use bucket b. */ |
9168 | bucket_map[i] = b; |
9169 | } |
9170 | } |
9171 | |
9172 | /* |
9173 | * return -ENODEV on err, 0 on success (or no action) |
9174 | * allocates numerous items that must be freed later |
9175 | */ |
9176 | static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) |
9177 | { |
9178 | int i; |
9179 | unsigned long register_value; |
9180 | unsigned long transMethod = CFGTBL_Trans_Performant | |
9181 | (trans_support & CFGTBL_Trans_use_short_tags) | |
9182 | CFGTBL_Trans_enable_directed_msix | |
9183 | (trans_support & (CFGTBL_Trans_io_accel1 | |
9184 | CFGTBL_Trans_io_accel2)); |
9185 | struct access_method access = SA5_performant_access; |
9186 | |
9187 | /* This is a bit complicated. There are 8 registers on |
9188 | * the controller which we write to to tell it 8 different |
9189 | * sizes of commands which there may be. It's a way of |
9190 | * reducing the DMA done to fetch each command. Encoded into |
9191 | * each command's tag are 3 bits which communicate to the controller |
9192 | * which of the eight sizes that command fits within. The size of |
9193 | * each command depends on how many scatter gather entries there are. |
9194 | * Each SG entry requires 16 bytes. The eight registers are programmed |
9195 | * with the number of 16-byte blocks a command of that size requires. |
9196 | * The smallest command possible requires 5 such 16 byte blocks. |
9197 | * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte |
9198 | * blocks. Note, this only extends to the SG entries contained |
9199 | * within the command block, and does not extend to chained blocks |
9200 | * of SG elements. bft[] contains the eight values we write to |
9201 | * the registers. They are not evenly distributed, but have more |
9202 | * sizes for small commands, and fewer sizes for larger commands. |
9203 | */ |
9204 | int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4}; |
9205 | #define MIN_IOACCEL2_BFT_ENTRY 5 |
9206 | #define 4 |
9207 | int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12, |
9208 | 13, 14, 15, 16, 17, 18, 19, |
9209 | HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES}; |
9210 | BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16); |
9211 | BUILD_BUG_ON(ARRAY_SIZE(bft) != 8); |
9212 | BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) > |
9213 | 16 * MIN_IOACCEL2_BFT_ENTRY); |
9214 | BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16); |
9215 | BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4); |
9216 | /* 5 = 1 s/g entry or 4k |
9217 | * 6 = 2 s/g entry or 8k |
9218 | * 8 = 4 s/g entry or 16k |
9219 | * 10 = 6 s/g entry or 24k |
9220 | */ |
9221 | |
9222 | /* If the controller supports either ioaccel method then |
9223 | * we can also use the RAID stack submit path that does not |
9224 | * perform the superfluous readl() after each command submission. |
9225 | */ |
9226 | if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2)) |
9227 | access = SA5_performant_access_no_read; |
9228 | |
9229 | /* Controller spec: zero out this buffer. */ |
9230 | for (i = 0; i < h->nreply_queues; i++) |
9231 | memset(h->reply_queue[i].head, 0, h->reply_queue_size); |
9232 | |
9233 | bft[7] = SG_ENTRIES_IN_CMD + 4; |
9234 | calc_bucket_map(bucket: bft, ARRAY_SIZE(bft), |
9235 | SG_ENTRIES_IN_CMD, min_blocks: 4, bucket_map: h->blockFetchTable); |
9236 | for (i = 0; i < 8; i++) |
9237 | writel(val: bft[i], addr: &h->transtable->BlockFetch[i]); |
9238 | |
9239 | /* size of controller ring buffer */ |
9240 | writel(val: h->max_commands, addr: &h->transtable->RepQSize); |
9241 | writel(val: h->nreply_queues, addr: &h->transtable->RepQCount); |
9242 | writel(val: 0, addr: &h->transtable->RepQCtrAddrLow32); |
9243 | writel(val: 0, addr: &h->transtable->RepQCtrAddrHigh32); |
9244 | |
9245 | for (i = 0; i < h->nreply_queues; i++) { |
9246 | writel(val: 0, addr: &h->transtable->RepQAddr[i].upper); |
9247 | writel(val: h->reply_queue[i].busaddr, |
9248 | addr: &h->transtable->RepQAddr[i].lower); |
9249 | } |
9250 | |
9251 | writel(val: 0, addr: &h->cfgtable->HostWrite.command_pool_addr_hi); |
9252 | writel(val: transMethod, addr: &(h->cfgtable->HostWrite.TransportRequest)); |
9253 | /* |
9254 | * enable outbound interrupt coalescing in accelerator mode; |
9255 | */ |
9256 | if (trans_support & CFGTBL_Trans_io_accel1) { |
9257 | access = SA5_ioaccel_mode1_access; |
9258 | writel(val: 10, addr: &h->cfgtable->HostWrite.CoalIntDelay); |
9259 | writel(val: 4, addr: &h->cfgtable->HostWrite.CoalIntCount); |
9260 | } else |
9261 | if (trans_support & CFGTBL_Trans_io_accel2) |
9262 | access = SA5_ioaccel_mode2_access; |
9263 | writel(CFGTBL_ChangeReq, addr: h->vaddr + SA5_DOORBELL); |
9264 | if (hpsa_wait_for_mode_change_ack(h)) { |
9265 | dev_err(&h->pdev->dev, |
9266 | "performant mode problem - doorbell timeout\n" ); |
9267 | return -ENODEV; |
9268 | } |
9269 | register_value = readl(addr: &(h->cfgtable->TransportActive)); |
9270 | if (!(register_value & CFGTBL_Trans_Performant)) { |
9271 | dev_err(&h->pdev->dev, |
9272 | "performant mode problem - transport not active\n" ); |
9273 | return -ENODEV; |
9274 | } |
9275 | /* Change the access methods to the performant access methods */ |
9276 | h->access = access; |
9277 | h->transMethod = transMethod; |
9278 | |
9279 | if (!((trans_support & CFGTBL_Trans_io_accel1) || |
9280 | (trans_support & CFGTBL_Trans_io_accel2))) |
9281 | return 0; |
9282 | |
9283 | if (trans_support & CFGTBL_Trans_io_accel1) { |
9284 | /* Set up I/O accelerator mode */ |
9285 | for (i = 0; i < h->nreply_queues; i++) { |
9286 | writel(val: i, addr: h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX); |
9287 | h->reply_queue[i].current_entry = |
9288 | readl(addr: h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX); |
9289 | } |
9290 | bft[7] = h->ioaccel_maxsg + 8; |
9291 | calc_bucket_map(bucket: bft, ARRAY_SIZE(bft), nsgs: h->ioaccel_maxsg, min_blocks: 8, |
9292 | bucket_map: h->ioaccel1_blockFetchTable); |
9293 | |
9294 | /* initialize all reply queue entries to unused */ |
9295 | for (i = 0; i < h->nreply_queues; i++) |
9296 | memset(h->reply_queue[i].head, |
9297 | (u8) IOACCEL_MODE1_REPLY_UNUSED, |
9298 | h->reply_queue_size); |
9299 | |
9300 | /* set all the constant fields in the accelerator command |
9301 | * frames once at init time to save CPU cycles later. |
9302 | */ |
9303 | for (i = 0; i < h->nr_cmds; i++) { |
9304 | struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i]; |
9305 | |
9306 | cp->function = IOACCEL1_FUNCTION_SCSIIO; |
9307 | cp->err_info = (u32) (h->errinfo_pool_dhandle + |
9308 | (i * sizeof(struct ErrorInfo))); |
9309 | cp->err_info_len = sizeof(struct ErrorInfo); |
9310 | cp->sgl_offset = IOACCEL1_SGLOFFSET; |
9311 | cp->host_context_flags = |
9312 | cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT); |
9313 | cp->timeout_sec = 0; |
9314 | cp->ReplyQueue = 0; |
9315 | cp->tag = |
9316 | cpu_to_le64((i << DIRECT_LOOKUP_SHIFT)); |
9317 | cp->host_addr = |
9318 | cpu_to_le64(h->ioaccel_cmd_pool_dhandle + |
9319 | (i * sizeof(struct io_accel1_cmd))); |
9320 | } |
9321 | } else if (trans_support & CFGTBL_Trans_io_accel2) { |
9322 | u64 cfg_offset, cfg_base_addr_index; |
9323 | u32 bft2_offset, cfg_base_addr; |
9324 | |
9325 | hpsa_find_cfg_addrs(pdev: h->pdev, vaddr: h->vaddr, cfg_base_addr: &cfg_base_addr, |
9326 | cfg_base_addr_index: &cfg_base_addr_index, cfg_offset: &cfg_offset); |
9327 | BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64); |
9328 | bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ; |
9329 | calc_bucket_map(bucket: bft2, ARRAY_SIZE(bft2), nsgs: h->ioaccel_maxsg, |
9330 | min_blocks: 4, bucket_map: h->ioaccel2_blockFetchTable); |
9331 | bft2_offset = readl(addr: &h->cfgtable->io_accel_request_size_offset); |
9332 | BUILD_BUG_ON(offsetof(struct CfgTable, |
9333 | io_accel_request_size_offset) != 0xb8); |
9334 | h->ioaccel2_bft2_regs = |
9335 | remap_pci_mem(pci_resource_start(h->pdev, |
9336 | cfg_base_addr_index) + |
9337 | cfg_offset + bft2_offset, |
9338 | ARRAY_SIZE(bft2) * |
9339 | sizeof(*h->ioaccel2_bft2_regs)); |
9340 | for (i = 0; i < ARRAY_SIZE(bft2); i++) |
9341 | writel(val: bft2[i], addr: &h->ioaccel2_bft2_regs[i]); |
9342 | } |
9343 | writel(CFGTBL_ChangeReq, addr: h->vaddr + SA5_DOORBELL); |
9344 | if (hpsa_wait_for_mode_change_ack(h)) { |
9345 | dev_err(&h->pdev->dev, |
9346 | "performant mode problem - enabling ioaccel mode\n" ); |
9347 | return -ENODEV; |
9348 | } |
9349 | return 0; |
9350 | } |
9351 | |
9352 | /* Free ioaccel1 mode command blocks and block fetch table */ |
9353 | static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h) |
9354 | { |
9355 | if (h->ioaccel_cmd_pool) { |
9356 | dma_free_coherent(dev: &h->pdev->dev, |
9357 | size: h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), |
9358 | cpu_addr: h->ioaccel_cmd_pool, |
9359 | dma_handle: h->ioaccel_cmd_pool_dhandle); |
9360 | h->ioaccel_cmd_pool = NULL; |
9361 | h->ioaccel_cmd_pool_dhandle = 0; |
9362 | } |
9363 | kfree(objp: h->ioaccel1_blockFetchTable); |
9364 | h->ioaccel1_blockFetchTable = NULL; |
9365 | } |
9366 | |
9367 | /* Allocate ioaccel1 mode command blocks and block fetch table */ |
9368 | static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h) |
9369 | { |
9370 | h->ioaccel_maxsg = |
9371 | readl(addr: &(h->cfgtable->io_accel_max_embedded_sg_count)); |
9372 | if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES) |
9373 | h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES; |
9374 | |
9375 | /* Command structures must be aligned on a 128-byte boundary |
9376 | * because the 7 lower bits of the address are used by the |
9377 | * hardware. |
9378 | */ |
9379 | BUILD_BUG_ON(sizeof(struct io_accel1_cmd) % |
9380 | IOACCEL1_COMMANDLIST_ALIGNMENT); |
9381 | h->ioaccel_cmd_pool = |
9382 | dma_alloc_coherent(dev: &h->pdev->dev, |
9383 | size: h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), |
9384 | dma_handle: &h->ioaccel_cmd_pool_dhandle, GFP_KERNEL); |
9385 | |
9386 | h->ioaccel1_blockFetchTable = |
9387 | kmalloc(size: ((h->ioaccel_maxsg + 1) * |
9388 | sizeof(u32)), GFP_KERNEL); |
9389 | |
9390 | if ((h->ioaccel_cmd_pool == NULL) || |
9391 | (h->ioaccel1_blockFetchTable == NULL)) |
9392 | goto clean_up; |
9393 | |
9394 | memset(h->ioaccel_cmd_pool, 0, |
9395 | h->nr_cmds * sizeof(*h->ioaccel_cmd_pool)); |
9396 | return 0; |
9397 | |
9398 | clean_up: |
9399 | hpsa_free_ioaccel1_cmd_and_bft(h); |
9400 | return -ENOMEM; |
9401 | } |
9402 | |
9403 | /* Free ioaccel2 mode command blocks and block fetch table */ |
9404 | static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h) |
9405 | { |
9406 | hpsa_free_ioaccel2_sg_chain_blocks(h); |
9407 | |
9408 | if (h->ioaccel2_cmd_pool) { |
9409 | dma_free_coherent(dev: &h->pdev->dev, |
9410 | size: h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), |
9411 | cpu_addr: h->ioaccel2_cmd_pool, |
9412 | dma_handle: h->ioaccel2_cmd_pool_dhandle); |
9413 | h->ioaccel2_cmd_pool = NULL; |
9414 | h->ioaccel2_cmd_pool_dhandle = 0; |
9415 | } |
9416 | kfree(objp: h->ioaccel2_blockFetchTable); |
9417 | h->ioaccel2_blockFetchTable = NULL; |
9418 | } |
9419 | |
9420 | /* Allocate ioaccel2 mode command blocks and block fetch table */ |
9421 | static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h) |
9422 | { |
9423 | int rc; |
9424 | |
9425 | /* Allocate ioaccel2 mode command blocks and block fetch table */ |
9426 | |
9427 | h->ioaccel_maxsg = |
9428 | readl(addr: &(h->cfgtable->io_accel_max_embedded_sg_count)); |
9429 | if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES) |
9430 | h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES; |
9431 | |
9432 | BUILD_BUG_ON(sizeof(struct io_accel2_cmd) % |
9433 | IOACCEL2_COMMANDLIST_ALIGNMENT); |
9434 | h->ioaccel2_cmd_pool = |
9435 | dma_alloc_coherent(dev: &h->pdev->dev, |
9436 | size: h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), |
9437 | dma_handle: &h->ioaccel2_cmd_pool_dhandle, GFP_KERNEL); |
9438 | |
9439 | h->ioaccel2_blockFetchTable = |
9440 | kmalloc(size: ((h->ioaccel_maxsg + 1) * |
9441 | sizeof(u32)), GFP_KERNEL); |
9442 | |
9443 | if ((h->ioaccel2_cmd_pool == NULL) || |
9444 | (h->ioaccel2_blockFetchTable == NULL)) { |
9445 | rc = -ENOMEM; |
9446 | goto clean_up; |
9447 | } |
9448 | |
9449 | rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h); |
9450 | if (rc) |
9451 | goto clean_up; |
9452 | |
9453 | memset(h->ioaccel2_cmd_pool, 0, |
9454 | h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool)); |
9455 | return 0; |
9456 | |
9457 | clean_up: |
9458 | hpsa_free_ioaccel2_cmd_and_bft(h); |
9459 | return rc; |
9460 | } |
9461 | |
9462 | /* Free items allocated by hpsa_put_ctlr_into_performant_mode */ |
9463 | static void hpsa_free_performant_mode(struct ctlr_info *h) |
9464 | { |
9465 | kfree(objp: h->blockFetchTable); |
9466 | h->blockFetchTable = NULL; |
9467 | hpsa_free_reply_queues(h); |
9468 | hpsa_free_ioaccel1_cmd_and_bft(h); |
9469 | hpsa_free_ioaccel2_cmd_and_bft(h); |
9470 | } |
9471 | |
9472 | /* return -ENODEV on error, 0 on success (or no action) |
9473 | * allocates numerous items that must be freed later |
9474 | */ |
9475 | static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) |
9476 | { |
9477 | u32 trans_support; |
9478 | int i, rc; |
9479 | |
9480 | if (hpsa_simple_mode) |
9481 | return 0; |
9482 | |
9483 | trans_support = readl(addr: &(h->cfgtable->TransportSupport)); |
9484 | if (!(trans_support & PERFORMANT_MODE)) |
9485 | return 0; |
9486 | |
9487 | /* Check for I/O accelerator mode support */ |
9488 | if (trans_support & CFGTBL_Trans_io_accel1) { |
9489 | rc = hpsa_alloc_ioaccel1_cmd_and_bft(h); |
9490 | if (rc) |
9491 | return rc; |
9492 | } else if (trans_support & CFGTBL_Trans_io_accel2) { |
9493 | rc = hpsa_alloc_ioaccel2_cmd_and_bft(h); |
9494 | if (rc) |
9495 | return rc; |
9496 | } |
9497 | |
9498 | h->nreply_queues = h->msix_vectors > 0 ? h->msix_vectors : 1; |
9499 | hpsa_get_max_perf_mode_cmds(h); |
9500 | /* Performant mode ring buffer and supporting data structures */ |
9501 | h->reply_queue_size = h->max_commands * sizeof(u64); |
9502 | |
9503 | for (i = 0; i < h->nreply_queues; i++) { |
9504 | h->reply_queue[i].head = dma_alloc_coherent(dev: &h->pdev->dev, |
9505 | size: h->reply_queue_size, |
9506 | dma_handle: &h->reply_queue[i].busaddr, |
9507 | GFP_KERNEL); |
9508 | if (!h->reply_queue[i].head) { |
9509 | rc = -ENOMEM; |
9510 | goto clean1; /* rq, ioaccel */ |
9511 | } |
9512 | h->reply_queue[i].size = h->max_commands; |
9513 | h->reply_queue[i].wraparound = 1; /* spec: init to 1 */ |
9514 | h->reply_queue[i].current_entry = 0; |
9515 | } |
9516 | |
9517 | /* Need a block fetch table for performant mode */ |
9518 | h->blockFetchTable = kmalloc(size: ((SG_ENTRIES_IN_CMD + 1) * |
9519 | sizeof(u32)), GFP_KERNEL); |
9520 | if (!h->blockFetchTable) { |
9521 | rc = -ENOMEM; |
9522 | goto clean1; /* rq, ioaccel */ |
9523 | } |
9524 | |
9525 | rc = hpsa_enter_performant_mode(h, trans_support); |
9526 | if (rc) |
9527 | goto clean2; /* bft, rq, ioaccel */ |
9528 | return 0; |
9529 | |
9530 | clean2: /* bft, rq, ioaccel */ |
9531 | kfree(objp: h->blockFetchTable); |
9532 | h->blockFetchTable = NULL; |
9533 | clean1: /* rq, ioaccel */ |
9534 | hpsa_free_reply_queues(h); |
9535 | hpsa_free_ioaccel1_cmd_and_bft(h); |
9536 | hpsa_free_ioaccel2_cmd_and_bft(h); |
9537 | return rc; |
9538 | } |
9539 | |
9540 | static int is_accelerated_cmd(struct CommandList *c) |
9541 | { |
9542 | return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2; |
9543 | } |
9544 | |
9545 | static void hpsa_drain_accel_commands(struct ctlr_info *h) |
9546 | { |
9547 | struct CommandList *c = NULL; |
9548 | int i, accel_cmds_out; |
9549 | int refcount; |
9550 | |
9551 | do { /* wait for all outstanding ioaccel commands to drain out */ |
9552 | accel_cmds_out = 0; |
9553 | for (i = 0; i < h->nr_cmds; i++) { |
9554 | c = h->cmd_pool + i; |
9555 | refcount = atomic_inc_return(v: &c->refcount); |
9556 | if (refcount > 1) /* Command is allocated */ |
9557 | accel_cmds_out += is_accelerated_cmd(c); |
9558 | cmd_free(h, c); |
9559 | } |
9560 | if (accel_cmds_out <= 0) |
9561 | break; |
9562 | msleep(msecs: 100); |
9563 | } while (1); |
9564 | } |
9565 | |
9566 | static struct hpsa_sas_phy *hpsa_alloc_sas_phy( |
9567 | struct hpsa_sas_port *hpsa_sas_port) |
9568 | { |
9569 | struct hpsa_sas_phy *hpsa_sas_phy; |
9570 | struct sas_phy *phy; |
9571 | |
9572 | hpsa_sas_phy = kzalloc(size: sizeof(*hpsa_sas_phy), GFP_KERNEL); |
9573 | if (!hpsa_sas_phy) |
9574 | return NULL; |
9575 | |
9576 | phy = sas_phy_alloc(hpsa_sas_port->parent_node->parent_dev, |
9577 | hpsa_sas_port->next_phy_index); |
9578 | if (!phy) { |
9579 | kfree(objp: hpsa_sas_phy); |
9580 | return NULL; |
9581 | } |
9582 | |
9583 | hpsa_sas_port->next_phy_index++; |
9584 | hpsa_sas_phy->phy = phy; |
9585 | hpsa_sas_phy->parent_port = hpsa_sas_port; |
9586 | |
9587 | return hpsa_sas_phy; |
9588 | } |
9589 | |
9590 | static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy) |
9591 | { |
9592 | struct sas_phy *phy = hpsa_sas_phy->phy; |
9593 | |
9594 | sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy); |
9595 | if (hpsa_sas_phy->added_to_port) |
9596 | list_del(entry: &hpsa_sas_phy->phy_list_entry); |
9597 | sas_phy_delete(phy); |
9598 | kfree(objp: hpsa_sas_phy); |
9599 | } |
9600 | |
9601 | static int hpsa_sas_port_add_phy(struct hpsa_sas_phy *hpsa_sas_phy) |
9602 | { |
9603 | int rc; |
9604 | struct hpsa_sas_port *hpsa_sas_port; |
9605 | struct sas_phy *phy; |
9606 | struct sas_identify *identify; |
9607 | |
9608 | hpsa_sas_port = hpsa_sas_phy->parent_port; |
9609 | phy = hpsa_sas_phy->phy; |
9610 | |
9611 | identify = &phy->identify; |
9612 | memset(identify, 0, sizeof(*identify)); |
9613 | identify->sas_address = hpsa_sas_port->sas_address; |
9614 | identify->device_type = SAS_END_DEVICE; |
9615 | identify->initiator_port_protocols = SAS_PROTOCOL_STP; |
9616 | identify->target_port_protocols = SAS_PROTOCOL_STP; |
9617 | phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN; |
9618 | phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN; |
9619 | phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN; |
9620 | phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN; |
9621 | phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; |
9622 | |
9623 | rc = sas_phy_add(hpsa_sas_phy->phy); |
9624 | if (rc) |
9625 | return rc; |
9626 | |
9627 | sas_port_add_phy(hpsa_sas_port->port, hpsa_sas_phy->phy); |
9628 | list_add_tail(new: &hpsa_sas_phy->phy_list_entry, |
9629 | head: &hpsa_sas_port->phy_list_head); |
9630 | hpsa_sas_phy->added_to_port = true; |
9631 | |
9632 | return 0; |
9633 | } |
9634 | |
9635 | static int |
9636 | hpsa_sas_port_add_rphy(struct hpsa_sas_port *hpsa_sas_port, |
9637 | struct sas_rphy *rphy) |
9638 | { |
9639 | struct sas_identify *identify; |
9640 | |
9641 | identify = &rphy->identify; |
9642 | identify->sas_address = hpsa_sas_port->sas_address; |
9643 | identify->initiator_port_protocols = SAS_PROTOCOL_STP; |
9644 | identify->target_port_protocols = SAS_PROTOCOL_STP; |
9645 | |
9646 | return sas_rphy_add(rphy); |
9647 | } |
9648 | |
9649 | static struct hpsa_sas_port |
9650 | *hpsa_alloc_sas_port(struct hpsa_sas_node *hpsa_sas_node, |
9651 | u64 sas_address) |
9652 | { |
9653 | int rc; |
9654 | struct hpsa_sas_port *hpsa_sas_port; |
9655 | struct sas_port *port; |
9656 | |
9657 | hpsa_sas_port = kzalloc(size: sizeof(*hpsa_sas_port), GFP_KERNEL); |
9658 | if (!hpsa_sas_port) |
9659 | return NULL; |
9660 | |
9661 | INIT_LIST_HEAD(list: &hpsa_sas_port->phy_list_head); |
9662 | hpsa_sas_port->parent_node = hpsa_sas_node; |
9663 | |
9664 | port = sas_port_alloc_num(hpsa_sas_node->parent_dev); |
9665 | if (!port) |
9666 | goto free_hpsa_port; |
9667 | |
9668 | rc = sas_port_add(port); |
9669 | if (rc) |
9670 | goto free_sas_port; |
9671 | |
9672 | hpsa_sas_port->port = port; |
9673 | hpsa_sas_port->sas_address = sas_address; |
9674 | list_add_tail(new: &hpsa_sas_port->port_list_entry, |
9675 | head: &hpsa_sas_node->port_list_head); |
9676 | |
9677 | return hpsa_sas_port; |
9678 | |
9679 | free_sas_port: |
9680 | sas_port_free(port); |
9681 | free_hpsa_port: |
9682 | kfree(objp: hpsa_sas_port); |
9683 | |
9684 | return NULL; |
9685 | } |
9686 | |
9687 | static void hpsa_free_sas_port(struct hpsa_sas_port *hpsa_sas_port) |
9688 | { |
9689 | struct hpsa_sas_phy *hpsa_sas_phy; |
9690 | struct hpsa_sas_phy *next; |
9691 | |
9692 | list_for_each_entry_safe(hpsa_sas_phy, next, |
9693 | &hpsa_sas_port->phy_list_head, phy_list_entry) |
9694 | hpsa_free_sas_phy(hpsa_sas_phy); |
9695 | |
9696 | sas_port_delete(hpsa_sas_port->port); |
9697 | list_del(entry: &hpsa_sas_port->port_list_entry); |
9698 | kfree(objp: hpsa_sas_port); |
9699 | } |
9700 | |
9701 | static struct hpsa_sas_node *hpsa_alloc_sas_node(struct device *parent_dev) |
9702 | { |
9703 | struct hpsa_sas_node *hpsa_sas_node; |
9704 | |
9705 | hpsa_sas_node = kzalloc(size: sizeof(*hpsa_sas_node), GFP_KERNEL); |
9706 | if (hpsa_sas_node) { |
9707 | hpsa_sas_node->parent_dev = parent_dev; |
9708 | INIT_LIST_HEAD(list: &hpsa_sas_node->port_list_head); |
9709 | } |
9710 | |
9711 | return hpsa_sas_node; |
9712 | } |
9713 | |
9714 | static void hpsa_free_sas_node(struct hpsa_sas_node *hpsa_sas_node) |
9715 | { |
9716 | struct hpsa_sas_port *hpsa_sas_port; |
9717 | struct hpsa_sas_port *next; |
9718 | |
9719 | if (!hpsa_sas_node) |
9720 | return; |
9721 | |
9722 | list_for_each_entry_safe(hpsa_sas_port, next, |
9723 | &hpsa_sas_node->port_list_head, port_list_entry) |
9724 | hpsa_free_sas_port(hpsa_sas_port); |
9725 | |
9726 | kfree(objp: hpsa_sas_node); |
9727 | } |
9728 | |
9729 | static struct hpsa_scsi_dev_t |
9730 | *hpsa_find_device_by_sas_rphy(struct ctlr_info *h, |
9731 | struct sas_rphy *rphy) |
9732 | { |
9733 | int i; |
9734 | struct hpsa_scsi_dev_t *device; |
9735 | |
9736 | for (i = 0; i < h->ndevices; i++) { |
9737 | device = h->dev[i]; |
9738 | if (!device->sas_port) |
9739 | continue; |
9740 | if (device->sas_port->rphy == rphy) |
9741 | return device; |
9742 | } |
9743 | |
9744 | return NULL; |
9745 | } |
9746 | |
9747 | static int hpsa_add_sas_host(struct ctlr_info *h) |
9748 | { |
9749 | int rc; |
9750 | struct device *parent_dev; |
9751 | struct hpsa_sas_node *hpsa_sas_node; |
9752 | struct hpsa_sas_port *hpsa_sas_port; |
9753 | struct hpsa_sas_phy *hpsa_sas_phy; |
9754 | |
9755 | parent_dev = &h->scsi_host->shost_dev; |
9756 | |
9757 | hpsa_sas_node = hpsa_alloc_sas_node(parent_dev); |
9758 | if (!hpsa_sas_node) |
9759 | return -ENOMEM; |
9760 | |
9761 | hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, sas_address: h->sas_address); |
9762 | if (!hpsa_sas_port) { |
9763 | rc = -ENODEV; |
9764 | goto free_sas_node; |
9765 | } |
9766 | |
9767 | hpsa_sas_phy = hpsa_alloc_sas_phy(hpsa_sas_port); |
9768 | if (!hpsa_sas_phy) { |
9769 | rc = -ENODEV; |
9770 | goto free_sas_port; |
9771 | } |
9772 | |
9773 | rc = hpsa_sas_port_add_phy(hpsa_sas_phy); |
9774 | if (rc) |
9775 | goto free_sas_phy; |
9776 | |
9777 | h->sas_host = hpsa_sas_node; |
9778 | |
9779 | return 0; |
9780 | |
9781 | free_sas_phy: |
9782 | sas_phy_free(hpsa_sas_phy->phy); |
9783 | kfree(objp: hpsa_sas_phy); |
9784 | free_sas_port: |
9785 | hpsa_free_sas_port(hpsa_sas_port); |
9786 | free_sas_node: |
9787 | hpsa_free_sas_node(hpsa_sas_node); |
9788 | |
9789 | return rc; |
9790 | } |
9791 | |
9792 | static void hpsa_delete_sas_host(struct ctlr_info *h) |
9793 | { |
9794 | hpsa_free_sas_node(hpsa_sas_node: h->sas_host); |
9795 | } |
9796 | |
9797 | static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node, |
9798 | struct hpsa_scsi_dev_t *device) |
9799 | { |
9800 | int rc; |
9801 | struct hpsa_sas_port *hpsa_sas_port; |
9802 | struct sas_rphy *rphy; |
9803 | |
9804 | hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, sas_address: device->sas_address); |
9805 | if (!hpsa_sas_port) |
9806 | return -ENOMEM; |
9807 | |
9808 | rphy = sas_end_device_alloc(hpsa_sas_port->port); |
9809 | if (!rphy) { |
9810 | rc = -ENODEV; |
9811 | goto free_sas_port; |
9812 | } |
9813 | |
9814 | hpsa_sas_port->rphy = rphy; |
9815 | device->sas_port = hpsa_sas_port; |
9816 | |
9817 | rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy); |
9818 | if (rc) |
9819 | goto free_sas_rphy; |
9820 | |
9821 | return 0; |
9822 | |
9823 | free_sas_rphy: |
9824 | sas_rphy_free(rphy); |
9825 | free_sas_port: |
9826 | hpsa_free_sas_port(hpsa_sas_port); |
9827 | device->sas_port = NULL; |
9828 | |
9829 | return rc; |
9830 | } |
9831 | |
9832 | static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device) |
9833 | { |
9834 | if (device->sas_port) { |
9835 | hpsa_free_sas_port(hpsa_sas_port: device->sas_port); |
9836 | device->sas_port = NULL; |
9837 | } |
9838 | } |
9839 | |
9840 | static int |
9841 | hpsa_sas_get_linkerrors(struct sas_phy *phy) |
9842 | { |
9843 | return 0; |
9844 | } |
9845 | |
9846 | static int |
9847 | hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) |
9848 | { |
9849 | struct Scsi_Host *shost = phy_to_shost(rphy); |
9850 | struct ctlr_info *h; |
9851 | struct hpsa_scsi_dev_t *sd; |
9852 | |
9853 | if (!shost) |
9854 | return -ENXIO; |
9855 | |
9856 | h = shost_to_hba(sh: shost); |
9857 | |
9858 | if (!h) |
9859 | return -ENXIO; |
9860 | |
9861 | sd = hpsa_find_device_by_sas_rphy(h, rphy); |
9862 | if (!sd) |
9863 | return -ENXIO; |
9864 | |
9865 | *identifier = sd->eli; |
9866 | |
9867 | return 0; |
9868 | } |
9869 | |
9870 | static int |
9871 | hpsa_sas_get_bay_identifier(struct sas_rphy *rphy) |
9872 | { |
9873 | return -ENXIO; |
9874 | } |
9875 | |
9876 | static int |
9877 | hpsa_sas_phy_reset(struct sas_phy *phy, int hard_reset) |
9878 | { |
9879 | return 0; |
9880 | } |
9881 | |
9882 | static int |
9883 | hpsa_sas_phy_enable(struct sas_phy *phy, int enable) |
9884 | { |
9885 | return 0; |
9886 | } |
9887 | |
9888 | static int |
9889 | hpsa_sas_phy_setup(struct sas_phy *phy) |
9890 | { |
9891 | return 0; |
9892 | } |
9893 | |
9894 | static void |
9895 | hpsa_sas_phy_release(struct sas_phy *phy) |
9896 | { |
9897 | } |
9898 | |
9899 | static int |
9900 | hpsa_sas_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates) |
9901 | { |
9902 | return -EINVAL; |
9903 | } |
9904 | |
9905 | static struct sas_function_template hpsa_sas_transport_functions = { |
9906 | .get_linkerrors = hpsa_sas_get_linkerrors, |
9907 | .get_enclosure_identifier = hpsa_sas_get_enclosure_identifier, |
9908 | .get_bay_identifier = hpsa_sas_get_bay_identifier, |
9909 | .phy_reset = hpsa_sas_phy_reset, |
9910 | .phy_enable = hpsa_sas_phy_enable, |
9911 | .phy_setup = hpsa_sas_phy_setup, |
9912 | .phy_release = hpsa_sas_phy_release, |
9913 | .set_phy_speed = hpsa_sas_phy_speed, |
9914 | }; |
9915 | |
9916 | /* |
9917 | * This is it. Register the PCI driver information for the cards we control |
9918 | * the OS will call our registered routines when it finds one of our cards. |
9919 | */ |
9920 | static int __init hpsa_init(void) |
9921 | { |
9922 | int rc; |
9923 | |
9924 | hpsa_sas_transport_template = |
9925 | sas_attach_transport(&hpsa_sas_transport_functions); |
9926 | if (!hpsa_sas_transport_template) |
9927 | return -ENODEV; |
9928 | |
9929 | rc = pci_register_driver(&hpsa_pci_driver); |
9930 | |
9931 | if (rc) |
9932 | sas_release_transport(hpsa_sas_transport_template); |
9933 | |
9934 | return rc; |
9935 | } |
9936 | |
9937 | static void __exit hpsa_cleanup(void) |
9938 | { |
9939 | pci_unregister_driver(dev: &hpsa_pci_driver); |
9940 | sas_release_transport(hpsa_sas_transport_template); |
9941 | } |
9942 | |
9943 | static void __attribute__((unused)) verify_offsets(void) |
9944 | { |
9945 | #define VERIFY_OFFSET(member, offset) \ |
9946 | BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset) |
9947 | |
9948 | VERIFY_OFFSET(structure_size, 0); |
9949 | VERIFY_OFFSET(volume_blk_size, 4); |
9950 | VERIFY_OFFSET(volume_blk_cnt, 8); |
9951 | VERIFY_OFFSET(phys_blk_shift, 16); |
9952 | VERIFY_OFFSET(parity_rotation_shift, 17); |
9953 | VERIFY_OFFSET(strip_size, 18); |
9954 | VERIFY_OFFSET(disk_starting_blk, 20); |
9955 | VERIFY_OFFSET(disk_blk_cnt, 28); |
9956 | VERIFY_OFFSET(data_disks_per_row, 36); |
9957 | VERIFY_OFFSET(metadata_disks_per_row, 38); |
9958 | VERIFY_OFFSET(row_cnt, 40); |
9959 | VERIFY_OFFSET(layout_map_count, 42); |
9960 | VERIFY_OFFSET(flags, 44); |
9961 | VERIFY_OFFSET(dekindex, 46); |
9962 | /* VERIFY_OFFSET(reserved, 48 */ |
9963 | VERIFY_OFFSET(data, 64); |
9964 | |
9965 | #undef VERIFY_OFFSET |
9966 | |
9967 | #define VERIFY_OFFSET(member, offset) \ |
9968 | BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset) |
9969 | |
9970 | VERIFY_OFFSET(IU_type, 0); |
9971 | VERIFY_OFFSET(direction, 1); |
9972 | VERIFY_OFFSET(reply_queue, 2); |
9973 | /* VERIFY_OFFSET(reserved1, 3); */ |
9974 | VERIFY_OFFSET(scsi_nexus, 4); |
9975 | VERIFY_OFFSET(Tag, 8); |
9976 | VERIFY_OFFSET(cdb, 16); |
9977 | VERIFY_OFFSET(cciss_lun, 32); |
9978 | VERIFY_OFFSET(data_len, 40); |
9979 | VERIFY_OFFSET(cmd_priority_task_attr, 44); |
9980 | VERIFY_OFFSET(sg_count, 45); |
9981 | /* VERIFY_OFFSET(reserved3 */ |
9982 | VERIFY_OFFSET(err_ptr, 48); |
9983 | VERIFY_OFFSET(err_len, 56); |
9984 | /* VERIFY_OFFSET(reserved4 */ |
9985 | VERIFY_OFFSET(sg, 64); |
9986 | |
9987 | #undef VERIFY_OFFSET |
9988 | |
9989 | #define VERIFY_OFFSET(member, offset) \ |
9990 | BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset) |
9991 | |
9992 | VERIFY_OFFSET(dev_handle, 0x00); |
9993 | VERIFY_OFFSET(reserved1, 0x02); |
9994 | VERIFY_OFFSET(function, 0x03); |
9995 | VERIFY_OFFSET(reserved2, 0x04); |
9996 | VERIFY_OFFSET(err_info, 0x0C); |
9997 | VERIFY_OFFSET(reserved3, 0x10); |
9998 | VERIFY_OFFSET(err_info_len, 0x12); |
9999 | VERIFY_OFFSET(reserved4, 0x13); |
10000 | VERIFY_OFFSET(sgl_offset, 0x14); |
10001 | VERIFY_OFFSET(reserved5, 0x15); |
10002 | VERIFY_OFFSET(transfer_len, 0x1C); |
10003 | VERIFY_OFFSET(reserved6, 0x20); |
10004 | VERIFY_OFFSET(io_flags, 0x24); |
10005 | VERIFY_OFFSET(reserved7, 0x26); |
10006 | VERIFY_OFFSET(LUN, 0x34); |
10007 | VERIFY_OFFSET(control, 0x3C); |
10008 | VERIFY_OFFSET(CDB, 0x40); |
10009 | VERIFY_OFFSET(reserved8, 0x50); |
10010 | VERIFY_OFFSET(host_context_flags, 0x60); |
10011 | VERIFY_OFFSET(timeout_sec, 0x62); |
10012 | VERIFY_OFFSET(ReplyQueue, 0x64); |
10013 | VERIFY_OFFSET(reserved9, 0x65); |
10014 | VERIFY_OFFSET(tag, 0x68); |
10015 | VERIFY_OFFSET(host_addr, 0x70); |
10016 | VERIFY_OFFSET(CISS_LUN, 0x78); |
10017 | VERIFY_OFFSET(SG, 0x78 + 8); |
10018 | #undef VERIFY_OFFSET |
10019 | } |
10020 | |
10021 | module_init(hpsa_init); |
10022 | module_exit(hpsa_cleanup); |
10023 | |