1 | /* |
2 | * linux/drivers/scsi/esas2r/esas2r_ioctl.c |
3 | * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers |
4 | * |
5 | * Copyright (c) 2001-2013 ATTO Technology, Inc. |
6 | * (mailto:linuxdrivers@attotech.com) |
7 | * |
8 | * This program is free software; you can redistribute it and/or |
9 | * modify it under the terms of the GNU General Public License |
10 | * as published by the Free Software Foundation; either version 2 |
11 | * of the License, or (at your option) any later version. |
12 | * |
13 | * This program is distributed in the hope that it will be useful, |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
16 | * GNU General Public License for more details. |
17 | * |
18 | * NO WARRANTY |
19 | * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR |
20 | * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT |
21 | * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, |
22 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is |
23 | * solely responsible for determining the appropriateness of using and |
24 | * distributing the Program and assumes all risks associated with its |
25 | * exercise of rights under this Agreement, including but not limited to |
26 | * the risks and costs of program errors, damage to or loss of data, |
27 | * programs or equipment, and unavailability or interruption of operations. |
28 | * |
29 | * DISCLAIMER OF LIABILITY |
30 | * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY |
31 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
32 | * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND |
33 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR |
34 | * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE |
35 | * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED |
36 | * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES |
37 | * |
38 | * You should have received a copy of the GNU General Public License |
39 | * along with this program; if not, write to the Free Software |
40 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, |
41 | * USA. |
42 | */ |
43 | |
44 | #include <linux/bitfield.h> |
45 | |
46 | #include "esas2r.h" |
47 | |
48 | /* |
49 | * Buffered ioctl handlers. A buffered ioctl is one which requires that we |
50 | * allocate a DMA-able memory area to communicate with the firmware. In |
51 | * order to prevent continually allocating and freeing consistent memory, |
52 | * we will allocate a global buffer the first time we need it and re-use |
53 | * it for subsequent ioctl calls that require it. |
54 | */ |
55 | |
56 | u8 *esas2r_buffered_ioctl; |
57 | dma_addr_t esas2r_buffered_ioctl_addr; |
58 | u32 esas2r_buffered_ioctl_size; |
59 | struct pci_dev *esas2r_buffered_ioctl_pcid; |
60 | |
61 | static DEFINE_SEMAPHORE(buffered_ioctl_semaphore, 1); |
62 | typedef int (*BUFFERED_IOCTL_CALLBACK)(struct esas2r_adapter *, |
63 | struct esas2r_request *, |
64 | struct esas2r_sg_context *, |
65 | void *); |
66 | typedef void (*BUFFERED_IOCTL_DONE_CALLBACK)(struct esas2r_adapter *, |
67 | struct esas2r_request *, void *); |
68 | |
69 | struct esas2r_buffered_ioctl { |
70 | struct esas2r_adapter *a; |
71 | void *ioctl; |
72 | u32 length; |
73 | u32 control_code; |
74 | u32 offset; |
75 | BUFFERED_IOCTL_CALLBACK |
76 | callback; |
77 | void *context; |
78 | BUFFERED_IOCTL_DONE_CALLBACK |
79 | done_callback; |
80 | void *done_context; |
81 | |
82 | }; |
83 | |
84 | static void complete_fm_api_req(struct esas2r_adapter *a, |
85 | struct esas2r_request *rq) |
86 | { |
87 | a->fm_api_command_done = 1; |
88 | wake_up_interruptible(&a->fm_api_waiter); |
89 | } |
90 | |
91 | /* Callbacks for building scatter/gather lists for FM API requests */ |
92 | static u32 get_physaddr_fm_api(struct esas2r_sg_context *sgc, u64 *addr) |
93 | { |
94 | struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter; |
95 | int offset = sgc->cur_offset - a->save_offset; |
96 | |
97 | (*addr) = a->firmware.phys + offset; |
98 | return a->firmware.orig_len - offset; |
99 | } |
100 | |
101 | static u32 (struct esas2r_sg_context *sgc, u64 *addr) |
102 | { |
103 | struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter; |
104 | int offset = sgc->cur_offset - a->save_offset; |
105 | |
106 | (*addr) = a->firmware.header_buff_phys + offset; |
107 | return sizeof(struct esas2r_flash_img) - offset; |
108 | } |
109 | |
110 | /* Handle EXPRESS_IOCTL_RW_FIRMWARE ioctl with img_type = FW_IMG_FM_API. */ |
111 | static void do_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi) |
112 | { |
113 | struct esas2r_request *rq; |
114 | |
115 | if (mutex_lock_interruptible(&a->fm_api_mutex)) { |
116 | fi->status = FI_STAT_BUSY; |
117 | return; |
118 | } |
119 | |
120 | rq = esas2r_alloc_request(a); |
121 | if (rq == NULL) { |
122 | fi->status = FI_STAT_BUSY; |
123 | goto free_sem; |
124 | } |
125 | |
126 | if (fi == &a->firmware.header) { |
127 | a->firmware.header_buff = dma_alloc_coherent(dev: &a->pcid->dev, |
128 | size: (size_t)sizeof( |
129 | struct |
130 | esas2r_flash_img), |
131 | dma_handle: (dma_addr_t *)&a-> |
132 | firmware. |
133 | header_buff_phys, |
134 | GFP_KERNEL); |
135 | |
136 | if (a->firmware.header_buff == NULL) { |
137 | esas2r_debug("failed to allocate header buffer!" ); |
138 | fi->status = FI_STAT_BUSY; |
139 | goto free_req; |
140 | } |
141 | |
142 | memcpy(a->firmware.header_buff, fi, |
143 | sizeof(struct esas2r_flash_img)); |
144 | a->save_offset = a->firmware.header_buff; |
145 | a->fm_api_sgc.get_phys_addr = |
146 | (PGETPHYSADDR)get_physaddr_fm_api_header; |
147 | } else { |
148 | a->save_offset = (u8 *)fi; |
149 | a->fm_api_sgc.get_phys_addr = |
150 | (PGETPHYSADDR)get_physaddr_fm_api; |
151 | } |
152 | |
153 | rq->comp_cb = complete_fm_api_req; |
154 | a->fm_api_command_done = 0; |
155 | a->fm_api_sgc.cur_offset = a->save_offset; |
156 | |
157 | if (!esas2r_fm_api(a, fi: (struct esas2r_flash_img *)a->save_offset, rq, |
158 | sgc: &a->fm_api_sgc)) |
159 | goto all_done; |
160 | |
161 | /* Now wait around for it to complete. */ |
162 | while (!a->fm_api_command_done) |
163 | wait_event_interruptible(a->fm_api_waiter, |
164 | a->fm_api_command_done); |
165 | all_done: |
166 | if (fi == &a->firmware.header) { |
167 | memcpy(fi, a->firmware.header_buff, |
168 | sizeof(struct esas2r_flash_img)); |
169 | |
170 | dma_free_coherent(dev: &a->pcid->dev, |
171 | size: (size_t)sizeof(struct esas2r_flash_img), |
172 | cpu_addr: a->firmware.header_buff, |
173 | dma_handle: (dma_addr_t)a->firmware.header_buff_phys); |
174 | } |
175 | free_req: |
176 | esas2r_free_request(a, rq: (struct esas2r_request *)rq); |
177 | free_sem: |
178 | mutex_unlock(lock: &a->fm_api_mutex); |
179 | return; |
180 | |
181 | } |
182 | |
183 | static void complete_nvr_req(struct esas2r_adapter *a, |
184 | struct esas2r_request *rq) |
185 | { |
186 | a->nvram_command_done = 1; |
187 | wake_up_interruptible(&a->nvram_waiter); |
188 | } |
189 | |
190 | /* Callback for building scatter/gather lists for buffered ioctls */ |
191 | static u32 get_physaddr_buffered_ioctl(struct esas2r_sg_context *sgc, |
192 | u64 *addr) |
193 | { |
194 | int offset = (u8 *)sgc->cur_offset - esas2r_buffered_ioctl; |
195 | |
196 | (*addr) = esas2r_buffered_ioctl_addr + offset; |
197 | return esas2r_buffered_ioctl_size - offset; |
198 | } |
199 | |
200 | static void complete_buffered_ioctl_req(struct esas2r_adapter *a, |
201 | struct esas2r_request *rq) |
202 | { |
203 | a->buffered_ioctl_done = 1; |
204 | wake_up_interruptible(&a->buffered_ioctl_waiter); |
205 | } |
206 | |
207 | static u8 handle_buffered_ioctl(struct esas2r_buffered_ioctl *bi) |
208 | { |
209 | struct esas2r_adapter *a = bi->a; |
210 | struct esas2r_request *rq; |
211 | struct esas2r_sg_context sgc; |
212 | u8 result = IOCTL_SUCCESS; |
213 | |
214 | if (down_interruptible(sem: &buffered_ioctl_semaphore)) |
215 | return IOCTL_OUT_OF_RESOURCES; |
216 | |
217 | /* allocate a buffer or use the existing buffer. */ |
218 | if (esas2r_buffered_ioctl) { |
219 | if (esas2r_buffered_ioctl_size < bi->length) { |
220 | /* free the too-small buffer and get a new one */ |
221 | dma_free_coherent(dev: &a->pcid->dev, |
222 | size: (size_t)esas2r_buffered_ioctl_size, |
223 | cpu_addr: esas2r_buffered_ioctl, |
224 | dma_handle: esas2r_buffered_ioctl_addr); |
225 | |
226 | goto allocate_buffer; |
227 | } |
228 | } else { |
229 | allocate_buffer: |
230 | esas2r_buffered_ioctl_size = bi->length; |
231 | esas2r_buffered_ioctl_pcid = a->pcid; |
232 | esas2r_buffered_ioctl = dma_alloc_coherent(dev: &a->pcid->dev, |
233 | size: (size_t) |
234 | esas2r_buffered_ioctl_size, |
235 | dma_handle: & |
236 | esas2r_buffered_ioctl_addr, |
237 | GFP_KERNEL); |
238 | } |
239 | |
240 | if (!esas2r_buffered_ioctl) { |
241 | esas2r_log(level: ESAS2R_LOG_CRIT, |
242 | format: "could not allocate %d bytes of consistent memory " |
243 | "for a buffered ioctl!" , |
244 | bi->length); |
245 | |
246 | esas2r_debug("buffered ioctl alloc failure" ); |
247 | result = IOCTL_OUT_OF_RESOURCES; |
248 | goto exit_cleanly; |
249 | } |
250 | |
251 | memcpy(esas2r_buffered_ioctl, bi->ioctl, bi->length); |
252 | |
253 | rq = esas2r_alloc_request(a); |
254 | if (rq == NULL) { |
255 | esas2r_log(level: ESAS2R_LOG_CRIT, |
256 | format: "could not allocate an internal request" ); |
257 | |
258 | result = IOCTL_OUT_OF_RESOURCES; |
259 | esas2r_debug("buffered ioctl - no requests" ); |
260 | goto exit_cleanly; |
261 | } |
262 | |
263 | a->buffered_ioctl_done = 0; |
264 | rq->comp_cb = complete_buffered_ioctl_req; |
265 | sgc.cur_offset = esas2r_buffered_ioctl + bi->offset; |
266 | sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_buffered_ioctl; |
267 | sgc.length = esas2r_buffered_ioctl_size; |
268 | |
269 | if (!(*bi->callback)(a, rq, &sgc, bi->context)) { |
270 | /* completed immediately, no need to wait */ |
271 | a->buffered_ioctl_done = 0; |
272 | goto free_andexit_cleanly; |
273 | } |
274 | |
275 | /* now wait around for it to complete. */ |
276 | while (!a->buffered_ioctl_done) |
277 | wait_event_interruptible(a->buffered_ioctl_waiter, |
278 | a->buffered_ioctl_done); |
279 | |
280 | free_andexit_cleanly: |
281 | if (result == IOCTL_SUCCESS && bi->done_callback) |
282 | (*bi->done_callback)(a, rq, bi->done_context); |
283 | |
284 | esas2r_free_request(a, rq); |
285 | |
286 | exit_cleanly: |
287 | if (result == IOCTL_SUCCESS) |
288 | memcpy(bi->ioctl, esas2r_buffered_ioctl, bi->length); |
289 | |
290 | up(sem: &buffered_ioctl_semaphore); |
291 | return result; |
292 | } |
293 | |
294 | /* SMP ioctl support */ |
295 | static int smp_ioctl_callback(struct esas2r_adapter *a, |
296 | struct esas2r_request *rq, |
297 | struct esas2r_sg_context *sgc, void *context) |
298 | { |
299 | struct atto_ioctl_smp *si = |
300 | (struct atto_ioctl_smp *)esas2r_buffered_ioctl; |
301 | |
302 | esas2r_sgc_init(sgc, a, rq, first: rq->vrq->ioctl.sge); |
303 | esas2r_build_ioctl_req(a, rq, length: sgc->length, VDA_IOCTL_SMP); |
304 | |
305 | if (!esas2r_build_sg_list(a, rq, sgc)) { |
306 | si->status = ATTO_STS_OUT_OF_RSRC; |
307 | return false; |
308 | } |
309 | |
310 | esas2r_start_request(a, rq); |
311 | return true; |
312 | } |
313 | |
314 | static u8 handle_smp_ioctl(struct esas2r_adapter *a, struct atto_ioctl_smp *si) |
315 | { |
316 | struct esas2r_buffered_ioctl bi; |
317 | |
318 | memset(&bi, 0, sizeof(bi)); |
319 | |
320 | bi.a = a; |
321 | bi.ioctl = si; |
322 | bi.length = sizeof(struct atto_ioctl_smp) |
323 | + le32_to_cpu(si->req_length) |
324 | + le32_to_cpu(si->rsp_length); |
325 | bi.offset = 0; |
326 | bi.callback = smp_ioctl_callback; |
327 | return handle_buffered_ioctl(bi: &bi); |
328 | } |
329 | |
330 | |
331 | /* CSMI ioctl support */ |
332 | static void esas2r_csmi_ioctl_tunnel_comp_cb(struct esas2r_adapter *a, |
333 | struct esas2r_request *rq) |
334 | { |
335 | rq->target_id = le16_to_cpu(rq->func_rsp.ioctl_rsp.csmi.target_id); |
336 | rq->vrq->scsi.flags |= cpu_to_le32(rq->func_rsp.ioctl_rsp.csmi.lun); |
337 | |
338 | /* Now call the original completion callback. */ |
339 | (*rq->aux_req_cb)(a, rq); |
340 | } |
341 | |
342 | /* Tunnel a CSMI IOCTL to the back end driver for processing. */ |
343 | static bool csmi_ioctl_tunnel(struct esas2r_adapter *a, |
344 | union atto_ioctl_csmi *ci, |
345 | struct esas2r_request *rq, |
346 | struct esas2r_sg_context *sgc, |
347 | u32 ctrl_code, |
348 | u16 target_id) |
349 | { |
350 | struct atto_vda_ioctl_req *ioctl = &rq->vrq->ioctl; |
351 | |
352 | if (test_bit(AF_DEGRADED_MODE, &a->flags)) |
353 | return false; |
354 | |
355 | esas2r_sgc_init(sgc, a, rq, first: rq->vrq->ioctl.sge); |
356 | esas2r_build_ioctl_req(a, rq, length: sgc->length, VDA_IOCTL_CSMI); |
357 | ioctl->csmi.ctrl_code = cpu_to_le32(ctrl_code); |
358 | ioctl->csmi.target_id = cpu_to_le16(target_id); |
359 | ioctl->csmi.lun = (u8)le32_to_cpu(rq->vrq->scsi.flags); |
360 | |
361 | /* |
362 | * Always usurp the completion callback since the interrupt callback |
363 | * mechanism may be used. |
364 | */ |
365 | rq->aux_req_cx = ci; |
366 | rq->aux_req_cb = rq->comp_cb; |
367 | rq->comp_cb = esas2r_csmi_ioctl_tunnel_comp_cb; |
368 | |
369 | if (!esas2r_build_sg_list(a, rq, sgc)) |
370 | return false; |
371 | |
372 | esas2r_start_request(a, rq); |
373 | return true; |
374 | } |
375 | |
376 | static bool check_lun(struct scsi_lun lun) |
377 | { |
378 | bool result; |
379 | |
380 | result = ((lun.scsi_lun[7] == 0) && |
381 | (lun.scsi_lun[6] == 0) && |
382 | (lun.scsi_lun[5] == 0) && |
383 | (lun.scsi_lun[4] == 0) && |
384 | (lun.scsi_lun[3] == 0) && |
385 | (lun.scsi_lun[2] == 0) && |
386 | /* Byte 1 is intentionally skipped */ |
387 | (lun.scsi_lun[0] == 0)); |
388 | |
389 | return result; |
390 | } |
391 | |
392 | static int csmi_ioctl_callback(struct esas2r_adapter *a, |
393 | struct esas2r_request *rq, |
394 | struct esas2r_sg_context *sgc, void *context) |
395 | { |
396 | struct atto_csmi *ci = (struct atto_csmi *)context; |
397 | union atto_ioctl_csmi *ioctl_csmi = |
398 | (union atto_ioctl_csmi *)esas2r_buffered_ioctl; |
399 | u8 path = 0; |
400 | u8 tid = 0; |
401 | u8 lun = 0; |
402 | u32 sts = CSMI_STS_SUCCESS; |
403 | struct esas2r_target *t; |
404 | unsigned long flags; |
405 | |
406 | if (ci->control_code == CSMI_CC_GET_DEV_ADDR) { |
407 | struct atto_csmi_get_dev_addr *gda = &ci->data.dev_addr; |
408 | |
409 | path = gda->path_id; |
410 | tid = gda->target_id; |
411 | lun = gda->lun; |
412 | } else if (ci->control_code == CSMI_CC_TASK_MGT) { |
413 | struct atto_csmi_task_mgmt *tm = &ci->data.tsk_mgt; |
414 | |
415 | path = tm->path_id; |
416 | tid = tm->target_id; |
417 | lun = tm->lun; |
418 | } |
419 | |
420 | if (path > 0) { |
421 | rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32( |
422 | CSMI_STS_INV_PARAM); |
423 | return false; |
424 | } |
425 | |
426 | rq->target_id = tid; |
427 | rq->vrq->scsi.flags |= cpu_to_le32(lun); |
428 | |
429 | switch (ci->control_code) { |
430 | case CSMI_CC_GET_DRVR_INFO: |
431 | { |
432 | struct atto_csmi_get_driver_info *gdi = &ioctl_csmi->drvr_info; |
433 | |
434 | strcpy(p: gdi->description, q: esas2r_get_model_name(a)); |
435 | gdi->csmi_major_rev = CSMI_MAJOR_REV; |
436 | gdi->csmi_minor_rev = CSMI_MINOR_REV; |
437 | break; |
438 | } |
439 | |
440 | case CSMI_CC_GET_CNTLR_CFG: |
441 | { |
442 | struct atto_csmi_get_cntlr_cfg *gcc = &ioctl_csmi->cntlr_cfg; |
443 | |
444 | gcc->base_io_addr = 0; |
445 | pci_read_config_dword(dev: a->pcid, PCI_BASE_ADDRESS_2, |
446 | val: &gcc->base_memaddr_lo); |
447 | pci_read_config_dword(dev: a->pcid, PCI_BASE_ADDRESS_3, |
448 | val: &gcc->base_memaddr_hi); |
449 | gcc->board_id = MAKEDWORD(a->pcid->subsystem_device, |
450 | a->pcid->subsystem_vendor); |
451 | gcc->slot_num = CSMI_SLOT_NUM_UNKNOWN; |
452 | gcc->cntlr_class = CSMI_CNTLR_CLASS_HBA; |
453 | gcc->io_bus_type = CSMI_BUS_TYPE_PCI; |
454 | gcc->pci_addr.bus_num = a->pcid->bus->number; |
455 | gcc->pci_addr.device_num = PCI_SLOT(a->pcid->devfn); |
456 | gcc->pci_addr.function_num = PCI_FUNC(a->pcid->devfn); |
457 | |
458 | memset(gcc->serial_num, 0, sizeof(gcc->serial_num)); |
459 | |
460 | gcc->major_rev = LOBYTE(LOWORD(a->fw_version)); |
461 | gcc->minor_rev = HIBYTE(LOWORD(a->fw_version)); |
462 | gcc->build_rev = LOBYTE(HIWORD(a->fw_version)); |
463 | gcc->release_rev = HIBYTE(HIWORD(a->fw_version)); |
464 | gcc->bios_major_rev = HIBYTE(HIWORD(a->flash_ver)); |
465 | gcc->bios_minor_rev = LOBYTE(HIWORD(a->flash_ver)); |
466 | gcc->bios_build_rev = LOWORD(a->flash_ver); |
467 | |
468 | if (test_bit(AF2_THUNDERLINK, &a->flags2)) |
469 | gcc->cntlr_flags = CSMI_CNTLRF_SAS_HBA |
470 | | CSMI_CNTLRF_SATA_HBA; |
471 | else |
472 | gcc->cntlr_flags = CSMI_CNTLRF_SAS_RAID |
473 | | CSMI_CNTLRF_SATA_RAID; |
474 | |
475 | gcc->rrom_major_rev = 0; |
476 | gcc->rrom_minor_rev = 0; |
477 | gcc->rrom_build_rev = 0; |
478 | gcc->rrom_release_rev = 0; |
479 | gcc->rrom_biosmajor_rev = 0; |
480 | gcc->rrom_biosminor_rev = 0; |
481 | gcc->rrom_biosbuild_rev = 0; |
482 | gcc->rrom_biosrelease_rev = 0; |
483 | break; |
484 | } |
485 | |
486 | case CSMI_CC_GET_CNTLR_STS: |
487 | { |
488 | struct atto_csmi_get_cntlr_sts *gcs = &ioctl_csmi->cntlr_sts; |
489 | |
490 | if (test_bit(AF_DEGRADED_MODE, &a->flags)) |
491 | gcs->status = CSMI_CNTLR_STS_FAILED; |
492 | else |
493 | gcs->status = CSMI_CNTLR_STS_GOOD; |
494 | |
495 | gcs->offline_reason = CSMI_OFFLINE_NO_REASON; |
496 | break; |
497 | } |
498 | |
499 | case CSMI_CC_FW_DOWNLOAD: |
500 | case CSMI_CC_GET_RAID_INFO: |
501 | case CSMI_CC_GET_RAID_CFG: |
502 | |
503 | sts = CSMI_STS_BAD_CTRL_CODE; |
504 | break; |
505 | |
506 | case CSMI_CC_SMP_PASSTHRU: |
507 | case CSMI_CC_SSP_PASSTHRU: |
508 | case CSMI_CC_STP_PASSTHRU: |
509 | case CSMI_CC_GET_PHY_INFO: |
510 | case CSMI_CC_SET_PHY_INFO: |
511 | case CSMI_CC_GET_LINK_ERRORS: |
512 | case CSMI_CC_GET_SATA_SIG: |
513 | case CSMI_CC_GET_CONN_INFO: |
514 | case CSMI_CC_PHY_CTRL: |
515 | |
516 | if (!csmi_ioctl_tunnel(a, ci: ioctl_csmi, rq, sgc, |
517 | ctrl_code: ci->control_code, |
518 | ESAS2R_TARG_ID_INV)) { |
519 | sts = CSMI_STS_FAILED; |
520 | break; |
521 | } |
522 | |
523 | return true; |
524 | |
525 | case CSMI_CC_GET_SCSI_ADDR: |
526 | { |
527 | struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr; |
528 | |
529 | struct scsi_lun lun; |
530 | |
531 | memcpy(&lun, gsa->sas_lun, sizeof(struct scsi_lun)); |
532 | |
533 | if (!check_lun(lun)) { |
534 | sts = CSMI_STS_NO_SCSI_ADDR; |
535 | break; |
536 | } |
537 | |
538 | /* make sure the device is present */ |
539 | spin_lock_irqsave(&a->mem_lock, flags); |
540 | t = esas2r_targ_db_find_by_sas_addr(a, sas_addr: (u64 *)gsa->sas_addr); |
541 | spin_unlock_irqrestore(lock: &a->mem_lock, flags); |
542 | |
543 | if (t == NULL) { |
544 | sts = CSMI_STS_NO_SCSI_ADDR; |
545 | break; |
546 | } |
547 | |
548 | gsa->host_index = 0xFF; |
549 | gsa->lun = gsa->sas_lun[1]; |
550 | rq->target_id = esas2r_targ_get_id(t, a); |
551 | break; |
552 | } |
553 | |
554 | case CSMI_CC_GET_DEV_ADDR: |
555 | { |
556 | struct atto_csmi_get_dev_addr *gda = &ioctl_csmi->dev_addr; |
557 | |
558 | /* make sure the target is present */ |
559 | t = a->targetdb + rq->target_id; |
560 | |
561 | if (t >= a->targetdb_end |
562 | || t->target_state != TS_PRESENT |
563 | || t->sas_addr == 0) { |
564 | sts = CSMI_STS_NO_DEV_ADDR; |
565 | break; |
566 | } |
567 | |
568 | /* fill in the result */ |
569 | *(u64 *)gda->sas_addr = t->sas_addr; |
570 | memset(gda->sas_lun, 0, sizeof(gda->sas_lun)); |
571 | gda->sas_lun[1] = (u8)le32_to_cpu(rq->vrq->scsi.flags); |
572 | break; |
573 | } |
574 | |
575 | case CSMI_CC_TASK_MGT: |
576 | |
577 | /* make sure the target is present */ |
578 | t = a->targetdb + rq->target_id; |
579 | |
580 | if (t >= a->targetdb_end |
581 | || t->target_state != TS_PRESENT |
582 | || !(t->flags & TF_PASS_THRU)) { |
583 | sts = CSMI_STS_NO_DEV_ADDR; |
584 | break; |
585 | } |
586 | |
587 | if (!csmi_ioctl_tunnel(a, ci: ioctl_csmi, rq, sgc, |
588 | ctrl_code: ci->control_code, |
589 | target_id: t->phys_targ_id)) { |
590 | sts = CSMI_STS_FAILED; |
591 | break; |
592 | } |
593 | |
594 | return true; |
595 | |
596 | default: |
597 | |
598 | sts = CSMI_STS_BAD_CTRL_CODE; |
599 | break; |
600 | } |
601 | |
602 | rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(sts); |
603 | |
604 | return false; |
605 | } |
606 | |
607 | |
608 | static void csmi_ioctl_done_callback(struct esas2r_adapter *a, |
609 | struct esas2r_request *rq, void *context) |
610 | { |
611 | struct atto_csmi *ci = (struct atto_csmi *)context; |
612 | union atto_ioctl_csmi *ioctl_csmi = |
613 | (union atto_ioctl_csmi *)esas2r_buffered_ioctl; |
614 | |
615 | switch (ci->control_code) { |
616 | case CSMI_CC_GET_DRVR_INFO: |
617 | { |
618 | struct atto_csmi_get_driver_info *gdi = |
619 | &ioctl_csmi->drvr_info; |
620 | |
621 | strcpy(p: gdi->name, ESAS2R_VERSION_STR); |
622 | |
623 | gdi->major_rev = ESAS2R_MAJOR_REV; |
624 | gdi->minor_rev = ESAS2R_MINOR_REV; |
625 | gdi->build_rev = 0; |
626 | gdi->release_rev = 0; |
627 | break; |
628 | } |
629 | |
630 | case CSMI_CC_GET_SCSI_ADDR: |
631 | { |
632 | struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr; |
633 | |
634 | if (le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status) == |
635 | CSMI_STS_SUCCESS) { |
636 | gsa->target_id = rq->target_id; |
637 | gsa->path_id = 0; |
638 | } |
639 | |
640 | break; |
641 | } |
642 | } |
643 | |
644 | ci->status = le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status); |
645 | } |
646 | |
647 | |
648 | static u8 handle_csmi_ioctl(struct esas2r_adapter *a, struct atto_csmi *ci) |
649 | { |
650 | struct esas2r_buffered_ioctl bi; |
651 | |
652 | memset(&bi, 0, sizeof(bi)); |
653 | |
654 | bi.a = a; |
655 | bi.ioctl = &ci->data; |
656 | bi.length = sizeof(union atto_ioctl_csmi); |
657 | bi.offset = 0; |
658 | bi.callback = csmi_ioctl_callback; |
659 | bi.context = ci; |
660 | bi.done_callback = csmi_ioctl_done_callback; |
661 | bi.done_context = ci; |
662 | |
663 | return handle_buffered_ioctl(bi: &bi); |
664 | } |
665 | |
666 | /* ATTO HBA ioctl support */ |
667 | |
668 | /* Tunnel an ATTO HBA IOCTL to the back end driver for processing. */ |
669 | static bool hba_ioctl_tunnel(struct esas2r_adapter *a, |
670 | struct atto_ioctl *hi, |
671 | struct esas2r_request *rq, |
672 | struct esas2r_sg_context *sgc) |
673 | { |
674 | esas2r_sgc_init(sgc, a, rq, first: rq->vrq->ioctl.sge); |
675 | |
676 | esas2r_build_ioctl_req(a, rq, length: sgc->length, VDA_IOCTL_HBA); |
677 | |
678 | if (!esas2r_build_sg_list(a, rq, sgc)) { |
679 | hi->status = ATTO_STS_OUT_OF_RSRC; |
680 | |
681 | return false; |
682 | } |
683 | |
684 | esas2r_start_request(a, rq); |
685 | |
686 | return true; |
687 | } |
688 | |
689 | static void scsi_passthru_comp_cb(struct esas2r_adapter *a, |
690 | struct esas2r_request *rq) |
691 | { |
692 | struct atto_ioctl *hi = (struct atto_ioctl *)rq->aux_req_cx; |
693 | struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru; |
694 | u8 sts = ATTO_SPT_RS_FAILED; |
695 | |
696 | spt->scsi_status = rq->func_rsp.scsi_rsp.scsi_stat; |
697 | spt->sense_length = rq->sense_len; |
698 | spt->residual_length = |
699 | le32_to_cpu(rq->func_rsp.scsi_rsp.residual_length); |
700 | |
701 | switch (rq->req_stat) { |
702 | case RS_SUCCESS: |
703 | case RS_SCSI_ERROR: |
704 | sts = ATTO_SPT_RS_SUCCESS; |
705 | break; |
706 | case RS_UNDERRUN: |
707 | sts = ATTO_SPT_RS_UNDERRUN; |
708 | break; |
709 | case RS_OVERRUN: |
710 | sts = ATTO_SPT_RS_OVERRUN; |
711 | break; |
712 | case RS_SEL: |
713 | case RS_SEL2: |
714 | sts = ATTO_SPT_RS_NO_DEVICE; |
715 | break; |
716 | case RS_NO_LUN: |
717 | sts = ATTO_SPT_RS_NO_LUN; |
718 | break; |
719 | case RS_TIMEOUT: |
720 | sts = ATTO_SPT_RS_TIMEOUT; |
721 | break; |
722 | case RS_DEGRADED: |
723 | sts = ATTO_SPT_RS_DEGRADED; |
724 | break; |
725 | case RS_BUSY: |
726 | sts = ATTO_SPT_RS_BUSY; |
727 | break; |
728 | case RS_ABORTED: |
729 | sts = ATTO_SPT_RS_ABORTED; |
730 | break; |
731 | case RS_RESET: |
732 | sts = ATTO_SPT_RS_BUS_RESET; |
733 | break; |
734 | } |
735 | |
736 | spt->req_status = sts; |
737 | |
738 | /* Update the target ID to the next one present. */ |
739 | spt->target_id = |
740 | esas2r_targ_db_find_next_present(a, target_id: (u16)spt->target_id); |
741 | |
742 | /* Done, call the completion callback. */ |
743 | (*rq->aux_req_cb)(a, rq); |
744 | } |
745 | |
746 | static int hba_ioctl_callback(struct esas2r_adapter *a, |
747 | struct esas2r_request *rq, |
748 | struct esas2r_sg_context *sgc, |
749 | void *context) |
750 | { |
751 | struct atto_ioctl *hi = (struct atto_ioctl *)esas2r_buffered_ioctl; |
752 | |
753 | hi->status = ATTO_STS_SUCCESS; |
754 | |
755 | switch (hi->function) { |
756 | case ATTO_FUNC_GET_ADAP_INFO: |
757 | { |
758 | u8 *class_code = (u8 *)&a->pcid->class; |
759 | |
760 | struct atto_hba_get_adapter_info *gai = |
761 | &hi->data.get_adap_info; |
762 | |
763 | if (hi->flags & HBAF_TUNNEL) { |
764 | hi->status = ATTO_STS_UNSUPPORTED; |
765 | break; |
766 | } |
767 | |
768 | if (hi->version > ATTO_VER_GET_ADAP_INFO0) { |
769 | hi->status = ATTO_STS_INV_VERSION; |
770 | hi->version = ATTO_VER_GET_ADAP_INFO0; |
771 | break; |
772 | } |
773 | |
774 | memset(gai, 0, sizeof(*gai)); |
775 | |
776 | gai->pci.vendor_id = a->pcid->vendor; |
777 | gai->pci.device_id = a->pcid->device; |
778 | gai->pci.ss_vendor_id = a->pcid->subsystem_vendor; |
779 | gai->pci.ss_device_id = a->pcid->subsystem_device; |
780 | gai->pci.class_code[0] = class_code[0]; |
781 | gai->pci.class_code[1] = class_code[1]; |
782 | gai->pci.class_code[2] = class_code[2]; |
783 | gai->pci.rev_id = a->pcid->revision; |
784 | gai->pci.bus_num = a->pcid->bus->number; |
785 | gai->pci.dev_num = PCI_SLOT(a->pcid->devfn); |
786 | gai->pci.func_num = PCI_FUNC(a->pcid->devfn); |
787 | |
788 | if (pci_is_pcie(dev: a->pcid)) { |
789 | u16 stat; |
790 | u32 caps; |
791 | |
792 | pcie_capability_read_word(dev: a->pcid, PCI_EXP_LNKSTA, |
793 | val: &stat); |
794 | pcie_capability_read_dword(dev: a->pcid, PCI_EXP_LNKCAP, |
795 | val: &caps); |
796 | |
797 | gai->pci.link_speed_curr = FIELD_GET(PCI_EXP_LNKSTA_CLS, stat); |
798 | gai->pci.link_speed_max = FIELD_GET(PCI_EXP_LNKCAP_SLS, caps); |
799 | gai->pci.link_width_curr = FIELD_GET(PCI_EXP_LNKSTA_NLW, stat); |
800 | gai->pci.link_width_max = FIELD_GET(PCI_EXP_LNKCAP_MLW, caps); |
801 | } |
802 | |
803 | gai->pci.msi_vector_cnt = 1; |
804 | |
805 | if (a->pcid->msix_enabled) |
806 | gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSIX; |
807 | else if (a->pcid->msi_enabled) |
808 | gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSI; |
809 | else |
810 | gai->pci.interrupt_mode = ATTO_GAI_PCIIM_LEGACY; |
811 | |
812 | gai->adap_type = ATTO_GAI_AT_ESASRAID2; |
813 | |
814 | if (test_bit(AF2_THUNDERLINK, &a->flags2)) |
815 | gai->adap_type = ATTO_GAI_AT_TLSASHBA; |
816 | |
817 | if (test_bit(AF_DEGRADED_MODE, &a->flags)) |
818 | gai->adap_flags |= ATTO_GAI_AF_DEGRADED; |
819 | |
820 | gai->adap_flags |= ATTO_GAI_AF_SPT_SUPP | |
821 | ATTO_GAI_AF_DEVADDR_SUPP; |
822 | |
823 | if (a->pcid->subsystem_device == ATTO_ESAS_R60F |
824 | || a->pcid->subsystem_device == ATTO_ESAS_R608 |
825 | || a->pcid->subsystem_device == ATTO_ESAS_R644 |
826 | || a->pcid->subsystem_device == ATTO_TSSC_3808E) |
827 | gai->adap_flags |= ATTO_GAI_AF_VIRT_SES; |
828 | |
829 | gai->num_ports = ESAS2R_NUM_PHYS; |
830 | gai->num_phys = ESAS2R_NUM_PHYS; |
831 | |
832 | strcpy(p: gai->firmware_rev, q: a->fw_rev); |
833 | strcpy(p: gai->flash_rev, q: a->flash_rev); |
834 | strcpy(p: gai->model_name_short, q: esas2r_get_model_name_short(a)); |
835 | strcpy(p: gai->model_name, q: esas2r_get_model_name(a)); |
836 | |
837 | gai->num_targets = ESAS2R_MAX_TARGETS; |
838 | |
839 | gai->num_busses = 1; |
840 | gai->num_targsper_bus = gai->num_targets; |
841 | gai->num_lunsper_targ = 256; |
842 | |
843 | if (a->pcid->subsystem_device == ATTO_ESAS_R6F0 |
844 | || a->pcid->subsystem_device == ATTO_ESAS_R60F) |
845 | gai->num_connectors = 4; |
846 | else |
847 | gai->num_connectors = 2; |
848 | |
849 | gai->adap_flags2 |= ATTO_GAI_AF2_ADAP_CTRL_SUPP; |
850 | |
851 | gai->num_targets_backend = a->num_targets_backend; |
852 | |
853 | gai->tunnel_flags = a->ioctl_tunnel |
854 | & (ATTO_GAI_TF_MEM_RW |
855 | | ATTO_GAI_TF_TRACE |
856 | | ATTO_GAI_TF_SCSI_PASS_THRU |
857 | | ATTO_GAI_TF_GET_DEV_ADDR |
858 | | ATTO_GAI_TF_PHY_CTRL |
859 | | ATTO_GAI_TF_CONN_CTRL |
860 | | ATTO_GAI_TF_GET_DEV_INFO); |
861 | break; |
862 | } |
863 | |
864 | case ATTO_FUNC_GET_ADAP_ADDR: |
865 | { |
866 | struct atto_hba_get_adapter_address *gaa = |
867 | &hi->data.get_adap_addr; |
868 | |
869 | if (hi->flags & HBAF_TUNNEL) { |
870 | hi->status = ATTO_STS_UNSUPPORTED; |
871 | break; |
872 | } |
873 | |
874 | if (hi->version > ATTO_VER_GET_ADAP_ADDR0) { |
875 | hi->status = ATTO_STS_INV_VERSION; |
876 | hi->version = ATTO_VER_GET_ADAP_ADDR0; |
877 | } else if (gaa->addr_type == ATTO_GAA_AT_PORT |
878 | || gaa->addr_type == ATTO_GAA_AT_NODE) { |
879 | if (gaa->addr_type == ATTO_GAA_AT_PORT |
880 | && gaa->port_id >= ESAS2R_NUM_PHYS) { |
881 | hi->status = ATTO_STS_NOT_APPL; |
882 | } else { |
883 | memcpy((u64 *)gaa->address, |
884 | &a->nvram->sas_addr[0], sizeof(u64)); |
885 | gaa->addr_len = sizeof(u64); |
886 | } |
887 | } else { |
888 | hi->status = ATTO_STS_INV_PARAM; |
889 | } |
890 | |
891 | break; |
892 | } |
893 | |
894 | case ATTO_FUNC_MEM_RW: |
895 | { |
896 | if (hi->flags & HBAF_TUNNEL) { |
897 | if (hba_ioctl_tunnel(a, hi, rq, sgc)) |
898 | return true; |
899 | |
900 | break; |
901 | } |
902 | |
903 | hi->status = ATTO_STS_UNSUPPORTED; |
904 | |
905 | break; |
906 | } |
907 | |
908 | case ATTO_FUNC_TRACE: |
909 | { |
910 | struct atto_hba_trace *trc = &hi->data.trace; |
911 | |
912 | if (hi->flags & HBAF_TUNNEL) { |
913 | if (hba_ioctl_tunnel(a, hi, rq, sgc)) |
914 | return true; |
915 | |
916 | break; |
917 | } |
918 | |
919 | if (hi->version > ATTO_VER_TRACE1) { |
920 | hi->status = ATTO_STS_INV_VERSION; |
921 | hi->version = ATTO_VER_TRACE1; |
922 | break; |
923 | } |
924 | |
925 | if (trc->trace_type == ATTO_TRC_TT_FWCOREDUMP |
926 | && hi->version >= ATTO_VER_TRACE1) { |
927 | if (trc->trace_func == ATTO_TRC_TF_UPLOAD) { |
928 | u32 len = hi->data_length; |
929 | u32 offset = trc->current_offset; |
930 | u32 total_len = ESAS2R_FWCOREDUMP_SZ; |
931 | |
932 | /* Size is zero if a core dump isn't present */ |
933 | if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2)) |
934 | total_len = 0; |
935 | |
936 | if (len > total_len) |
937 | len = total_len; |
938 | |
939 | if (offset >= total_len |
940 | || offset + len > total_len |
941 | || len == 0) { |
942 | hi->status = ATTO_STS_INV_PARAM; |
943 | break; |
944 | } |
945 | |
946 | memcpy(trc->contents, |
947 | a->fw_coredump_buff + offset, |
948 | len); |
949 | hi->data_length = len; |
950 | } else if (trc->trace_func == ATTO_TRC_TF_RESET) { |
951 | memset(a->fw_coredump_buff, 0, |
952 | ESAS2R_FWCOREDUMP_SZ); |
953 | |
954 | clear_bit(AF2_COREDUMP_SAVED, addr: &a->flags2); |
955 | } else if (trc->trace_func != ATTO_TRC_TF_GET_INFO) { |
956 | hi->status = ATTO_STS_UNSUPPORTED; |
957 | break; |
958 | } |
959 | |
960 | /* Always return all the info we can. */ |
961 | trc->trace_mask = 0; |
962 | trc->current_offset = 0; |
963 | trc->total_length = ESAS2R_FWCOREDUMP_SZ; |
964 | |
965 | /* Return zero length buffer if core dump not present */ |
966 | if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2)) |
967 | trc->total_length = 0; |
968 | } else { |
969 | hi->status = ATTO_STS_UNSUPPORTED; |
970 | } |
971 | |
972 | break; |
973 | } |
974 | |
975 | case ATTO_FUNC_SCSI_PASS_THRU: |
976 | { |
977 | struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru; |
978 | struct scsi_lun lun; |
979 | |
980 | memcpy(&lun, spt->lun, sizeof(struct scsi_lun)); |
981 | |
982 | if (hi->flags & HBAF_TUNNEL) { |
983 | if (hba_ioctl_tunnel(a, hi, rq, sgc)) |
984 | return true; |
985 | |
986 | break; |
987 | } |
988 | |
989 | if (hi->version > ATTO_VER_SCSI_PASS_THRU0) { |
990 | hi->status = ATTO_STS_INV_VERSION; |
991 | hi->version = ATTO_VER_SCSI_PASS_THRU0; |
992 | break; |
993 | } |
994 | |
995 | if (spt->target_id >= ESAS2R_MAX_TARGETS || !check_lun(lun)) { |
996 | hi->status = ATTO_STS_INV_PARAM; |
997 | break; |
998 | } |
999 | |
1000 | esas2r_sgc_init(sgc, a, rq, NULL); |
1001 | |
1002 | sgc->length = hi->data_length; |
1003 | sgc->cur_offset += offsetof(struct atto_ioctl, data.byte) |
1004 | + sizeof(struct atto_hba_scsi_pass_thru); |
1005 | |
1006 | /* Finish request initialization */ |
1007 | rq->target_id = (u16)spt->target_id; |
1008 | rq->vrq->scsi.flags |= cpu_to_le32(spt->lun[1]); |
1009 | memcpy(rq->vrq->scsi.cdb, spt->cdb, 16); |
1010 | rq->vrq->scsi.length = cpu_to_le32(hi->data_length); |
1011 | rq->sense_len = spt->sense_length; |
1012 | rq->sense_buf = (u8 *)spt->sense_data; |
1013 | /* NOTE: we ignore spt->timeout */ |
1014 | |
1015 | /* |
1016 | * always usurp the completion callback since the interrupt |
1017 | * callback mechanism may be used. |
1018 | */ |
1019 | |
1020 | rq->aux_req_cx = hi; |
1021 | rq->aux_req_cb = rq->comp_cb; |
1022 | rq->comp_cb = scsi_passthru_comp_cb; |
1023 | |
1024 | if (spt->flags & ATTO_SPTF_DATA_IN) { |
1025 | rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD); |
1026 | } else if (spt->flags & ATTO_SPTF_DATA_OUT) { |
1027 | rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD); |
1028 | } else { |
1029 | if (sgc->length) { |
1030 | hi->status = ATTO_STS_INV_PARAM; |
1031 | break; |
1032 | } |
1033 | } |
1034 | |
1035 | if (spt->flags & ATTO_SPTF_ORDERED_Q) |
1036 | rq->vrq->scsi.flags |= |
1037 | cpu_to_le32(FCP_CMND_TA_ORDRD_Q); |
1038 | else if (spt->flags & ATTO_SPTF_HEAD_OF_Q) |
1039 | rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_TA_HEAD_Q); |
1040 | |
1041 | |
1042 | if (!esas2r_build_sg_list(a, rq, sgc)) { |
1043 | hi->status = ATTO_STS_OUT_OF_RSRC; |
1044 | break; |
1045 | } |
1046 | |
1047 | esas2r_start_request(a, rq); |
1048 | |
1049 | return true; |
1050 | } |
1051 | |
1052 | case ATTO_FUNC_GET_DEV_ADDR: |
1053 | { |
1054 | struct atto_hba_get_device_address *gda = |
1055 | &hi->data.get_dev_addr; |
1056 | struct esas2r_target *t; |
1057 | |
1058 | if (hi->flags & HBAF_TUNNEL) { |
1059 | if (hba_ioctl_tunnel(a, hi, rq, sgc)) |
1060 | return true; |
1061 | |
1062 | break; |
1063 | } |
1064 | |
1065 | if (hi->version > ATTO_VER_GET_DEV_ADDR0) { |
1066 | hi->status = ATTO_STS_INV_VERSION; |
1067 | hi->version = ATTO_VER_GET_DEV_ADDR0; |
1068 | break; |
1069 | } |
1070 | |
1071 | if (gda->target_id >= ESAS2R_MAX_TARGETS) { |
1072 | hi->status = ATTO_STS_INV_PARAM; |
1073 | break; |
1074 | } |
1075 | |
1076 | t = a->targetdb + (u16)gda->target_id; |
1077 | |
1078 | if (t->target_state != TS_PRESENT) { |
1079 | hi->status = ATTO_STS_FAILED; |
1080 | } else if (gda->addr_type == ATTO_GDA_AT_PORT) { |
1081 | if (t->sas_addr == 0) { |
1082 | hi->status = ATTO_STS_UNSUPPORTED; |
1083 | } else { |
1084 | *(u64 *)gda->address = t->sas_addr; |
1085 | |
1086 | gda->addr_len = sizeof(u64); |
1087 | } |
1088 | } else if (gda->addr_type == ATTO_GDA_AT_NODE) { |
1089 | hi->status = ATTO_STS_NOT_APPL; |
1090 | } else { |
1091 | hi->status = ATTO_STS_INV_PARAM; |
1092 | } |
1093 | |
1094 | /* update the target ID to the next one present. */ |
1095 | |
1096 | gda->target_id = |
1097 | esas2r_targ_db_find_next_present(a, |
1098 | target_id: (u16)gda->target_id); |
1099 | break; |
1100 | } |
1101 | |
1102 | case ATTO_FUNC_PHY_CTRL: |
1103 | case ATTO_FUNC_CONN_CTRL: |
1104 | { |
1105 | if (hba_ioctl_tunnel(a, hi, rq, sgc)) |
1106 | return true; |
1107 | |
1108 | break; |
1109 | } |
1110 | |
1111 | case ATTO_FUNC_ADAP_CTRL: |
1112 | { |
1113 | struct atto_hba_adap_ctrl *ac = &hi->data.adap_ctrl; |
1114 | |
1115 | if (hi->flags & HBAF_TUNNEL) { |
1116 | hi->status = ATTO_STS_UNSUPPORTED; |
1117 | break; |
1118 | } |
1119 | |
1120 | if (hi->version > ATTO_VER_ADAP_CTRL0) { |
1121 | hi->status = ATTO_STS_INV_VERSION; |
1122 | hi->version = ATTO_VER_ADAP_CTRL0; |
1123 | break; |
1124 | } |
1125 | |
1126 | if (ac->adap_func == ATTO_AC_AF_HARD_RST) { |
1127 | esas2r_reset_adapter(a); |
1128 | } else if (ac->adap_func != ATTO_AC_AF_GET_STATE) { |
1129 | hi->status = ATTO_STS_UNSUPPORTED; |
1130 | break; |
1131 | } |
1132 | |
1133 | if (test_bit(AF_CHPRST_NEEDED, &a->flags)) |
1134 | ac->adap_state = ATTO_AC_AS_RST_SCHED; |
1135 | else if (test_bit(AF_CHPRST_PENDING, &a->flags)) |
1136 | ac->adap_state = ATTO_AC_AS_RST_IN_PROG; |
1137 | else if (test_bit(AF_DISC_PENDING, &a->flags)) |
1138 | ac->adap_state = ATTO_AC_AS_RST_DISC; |
1139 | else if (test_bit(AF_DISABLED, &a->flags)) |
1140 | ac->adap_state = ATTO_AC_AS_DISABLED; |
1141 | else if (test_bit(AF_DEGRADED_MODE, &a->flags)) |
1142 | ac->adap_state = ATTO_AC_AS_DEGRADED; |
1143 | else |
1144 | ac->adap_state = ATTO_AC_AS_OK; |
1145 | |
1146 | break; |
1147 | } |
1148 | |
1149 | case ATTO_FUNC_GET_DEV_INFO: |
1150 | { |
1151 | struct atto_hba_get_device_info *gdi = &hi->data.get_dev_info; |
1152 | struct esas2r_target *t; |
1153 | |
1154 | if (hi->flags & HBAF_TUNNEL) { |
1155 | if (hba_ioctl_tunnel(a, hi, rq, sgc)) |
1156 | return true; |
1157 | |
1158 | break; |
1159 | } |
1160 | |
1161 | if (hi->version > ATTO_VER_GET_DEV_INFO0) { |
1162 | hi->status = ATTO_STS_INV_VERSION; |
1163 | hi->version = ATTO_VER_GET_DEV_INFO0; |
1164 | break; |
1165 | } |
1166 | |
1167 | if (gdi->target_id >= ESAS2R_MAX_TARGETS) { |
1168 | hi->status = ATTO_STS_INV_PARAM; |
1169 | break; |
1170 | } |
1171 | |
1172 | t = a->targetdb + (u16)gdi->target_id; |
1173 | |
1174 | /* update the target ID to the next one present. */ |
1175 | |
1176 | gdi->target_id = |
1177 | esas2r_targ_db_find_next_present(a, |
1178 | target_id: (u16)gdi->target_id); |
1179 | |
1180 | if (t->target_state != TS_PRESENT) { |
1181 | hi->status = ATTO_STS_FAILED; |
1182 | break; |
1183 | } |
1184 | |
1185 | hi->status = ATTO_STS_UNSUPPORTED; |
1186 | break; |
1187 | } |
1188 | |
1189 | default: |
1190 | |
1191 | hi->status = ATTO_STS_INV_FUNC; |
1192 | break; |
1193 | } |
1194 | |
1195 | return false; |
1196 | } |
1197 | |
1198 | static void hba_ioctl_done_callback(struct esas2r_adapter *a, |
1199 | struct esas2r_request *rq, void *context) |
1200 | { |
1201 | struct atto_ioctl *ioctl_hba = |
1202 | (struct atto_ioctl *)esas2r_buffered_ioctl; |
1203 | |
1204 | esas2r_debug("hba_ioctl_done_callback %d" , a->index); |
1205 | |
1206 | if (ioctl_hba->function == ATTO_FUNC_GET_ADAP_INFO) { |
1207 | struct atto_hba_get_adapter_info *gai = |
1208 | &ioctl_hba->data.get_adap_info; |
1209 | |
1210 | esas2r_debug("ATTO_FUNC_GET_ADAP_INFO" ); |
1211 | |
1212 | gai->drvr_rev_major = ESAS2R_MAJOR_REV; |
1213 | gai->drvr_rev_minor = ESAS2R_MINOR_REV; |
1214 | |
1215 | strcpy(p: gai->drvr_rev_ascii, ESAS2R_VERSION_STR); |
1216 | strcpy(p: gai->drvr_name, ESAS2R_DRVR_NAME); |
1217 | |
1218 | gai->num_busses = 1; |
1219 | gai->num_targsper_bus = ESAS2R_MAX_ID + 1; |
1220 | gai->num_lunsper_targ = 1; |
1221 | } |
1222 | } |
1223 | |
1224 | u8 handle_hba_ioctl(struct esas2r_adapter *a, |
1225 | struct atto_ioctl *ioctl_hba) |
1226 | { |
1227 | struct esas2r_buffered_ioctl bi; |
1228 | |
1229 | memset(&bi, 0, sizeof(bi)); |
1230 | |
1231 | bi.a = a; |
1232 | bi.ioctl = ioctl_hba; |
1233 | bi.length = sizeof(struct atto_ioctl) + ioctl_hba->data_length; |
1234 | bi.callback = hba_ioctl_callback; |
1235 | bi.context = NULL; |
1236 | bi.done_callback = hba_ioctl_done_callback; |
1237 | bi.done_context = NULL; |
1238 | bi.offset = 0; |
1239 | |
1240 | return handle_buffered_ioctl(bi: &bi); |
1241 | } |
1242 | |
1243 | |
1244 | int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq, |
1245 | struct esas2r_sas_nvram *data) |
1246 | { |
1247 | int result = 0; |
1248 | |
1249 | a->nvram_command_done = 0; |
1250 | rq->comp_cb = complete_nvr_req; |
1251 | |
1252 | if (esas2r_nvram_write(a, rq, nvram: data)) { |
1253 | /* now wait around for it to complete. */ |
1254 | while (!a->nvram_command_done) |
1255 | wait_event_interruptible(a->nvram_waiter, |
1256 | a->nvram_command_done); |
1257 | ; |
1258 | |
1259 | /* done, check the status. */ |
1260 | if (rq->req_stat == RS_SUCCESS) |
1261 | result = 1; |
1262 | } |
1263 | return result; |
1264 | } |
1265 | |
1266 | |
1267 | /* This function only cares about ATTO-specific ioctls (atto_express_ioctl) */ |
1268 | int esas2r_ioctl_handler(void *hostdata, unsigned int cmd, void __user *arg) |
1269 | { |
1270 | struct atto_express_ioctl *ioctl = NULL; |
1271 | struct esas2r_adapter *a; |
1272 | struct esas2r_request *rq; |
1273 | u16 code; |
1274 | int err; |
1275 | |
1276 | esas2r_log(level: ESAS2R_LOG_DEBG, format: "ioctl (%p, %x, %p)" , hostdata, cmd, arg); |
1277 | |
1278 | if ((arg == NULL) |
1279 | || (cmd < EXPRESS_IOCTL_MIN) |
1280 | || (cmd > EXPRESS_IOCTL_MAX)) |
1281 | return -ENOTSUPP; |
1282 | |
1283 | ioctl = memdup_user(arg, sizeof(struct atto_express_ioctl)); |
1284 | if (IS_ERR(ptr: ioctl)) { |
1285 | esas2r_log(level: ESAS2R_LOG_WARN, |
1286 | format: "ioctl_handler access_ok failed for cmd %u, address %p" , |
1287 | cmd, arg); |
1288 | return PTR_ERR(ptr: ioctl); |
1289 | } |
1290 | |
1291 | /* verify the signature */ |
1292 | |
1293 | if (memcmp(p: ioctl->header.signature, |
1294 | EXPRESS_IOCTL_SIGNATURE, |
1295 | EXPRESS_IOCTL_SIGNATURE_SIZE) != 0) { |
1296 | esas2r_log(level: ESAS2R_LOG_WARN, format: "invalid signature" ); |
1297 | kfree(objp: ioctl); |
1298 | |
1299 | return -ENOTSUPP; |
1300 | } |
1301 | |
1302 | /* assume success */ |
1303 | |
1304 | ioctl->header.return_code = IOCTL_SUCCESS; |
1305 | err = 0; |
1306 | |
1307 | /* |
1308 | * handle EXPRESS_IOCTL_GET_CHANNELS |
1309 | * without paying attention to channel |
1310 | */ |
1311 | |
1312 | if (cmd == EXPRESS_IOCTL_GET_CHANNELS) { |
1313 | int i = 0, k = 0; |
1314 | |
1315 | ioctl->data.chanlist.num_channels = 0; |
1316 | |
1317 | while (i < MAX_ADAPTERS) { |
1318 | if (esas2r_adapters[i]) { |
1319 | ioctl->data.chanlist.num_channels++; |
1320 | ioctl->data.chanlist.channel[k] = i; |
1321 | k++; |
1322 | } |
1323 | i++; |
1324 | } |
1325 | |
1326 | goto ioctl_done; |
1327 | } |
1328 | |
1329 | /* get the channel */ |
1330 | |
1331 | if (ioctl->header.channel == 0xFF) { |
1332 | a = (struct esas2r_adapter *)hostdata; |
1333 | } else { |
1334 | if (ioctl->header.channel >= MAX_ADAPTERS || |
1335 | esas2r_adapters[ioctl->header.channel] == NULL) { |
1336 | ioctl->header.return_code = IOCTL_BAD_CHANNEL; |
1337 | esas2r_log(level: ESAS2R_LOG_WARN, format: "bad channel value" ); |
1338 | kfree(objp: ioctl); |
1339 | |
1340 | return -ENOTSUPP; |
1341 | } |
1342 | a = esas2r_adapters[ioctl->header.channel]; |
1343 | } |
1344 | |
1345 | switch (cmd) { |
1346 | case EXPRESS_IOCTL_RW_FIRMWARE: |
1347 | |
1348 | if (ioctl->data.fwrw.img_type == FW_IMG_FM_API) { |
1349 | err = esas2r_write_fw(a, |
1350 | buf: (char *)ioctl->data.fwrw.image, |
1351 | off: 0, |
1352 | count: sizeof(struct |
1353 | atto_express_ioctl)); |
1354 | |
1355 | if (err >= 0) { |
1356 | err = esas2r_read_fw(a, |
1357 | buf: (char *)ioctl->data.fwrw. |
1358 | image, |
1359 | off: 0, |
1360 | count: sizeof(struct |
1361 | atto_express_ioctl)); |
1362 | } |
1363 | } else if (ioctl->data.fwrw.img_type == FW_IMG_FS_API) { |
1364 | err = esas2r_write_fs(a, |
1365 | buf: (char *)ioctl->data.fwrw.image, |
1366 | off: 0, |
1367 | count: sizeof(struct |
1368 | atto_express_ioctl)); |
1369 | |
1370 | if (err >= 0) { |
1371 | err = esas2r_read_fs(a, |
1372 | buf: (char *)ioctl->data.fwrw. |
1373 | image, |
1374 | off: 0, |
1375 | count: sizeof(struct |
1376 | atto_express_ioctl)); |
1377 | } |
1378 | } else { |
1379 | ioctl->header.return_code = IOCTL_BAD_FLASH_IMGTYPE; |
1380 | } |
1381 | |
1382 | break; |
1383 | |
1384 | case EXPRESS_IOCTL_READ_PARAMS: |
1385 | |
1386 | memcpy(ioctl->data.prw.data_buffer, a->nvram, |
1387 | sizeof(struct esas2r_sas_nvram)); |
1388 | ioctl->data.prw.code = 1; |
1389 | break; |
1390 | |
1391 | case EXPRESS_IOCTL_WRITE_PARAMS: |
1392 | |
1393 | rq = esas2r_alloc_request(a); |
1394 | if (rq == NULL) { |
1395 | kfree(objp: ioctl); |
1396 | esas2r_log(level: ESAS2R_LOG_WARN, |
1397 | format: "could not allocate an internal request" ); |
1398 | return -ENOMEM; |
1399 | } |
1400 | |
1401 | code = esas2r_write_params(a, rq, |
1402 | data: (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer); |
1403 | ioctl->data.prw.code = code; |
1404 | |
1405 | esas2r_free_request(a, rq); |
1406 | |
1407 | break; |
1408 | |
1409 | case EXPRESS_IOCTL_DEFAULT_PARAMS: |
1410 | |
1411 | esas2r_nvram_get_defaults(a, |
1412 | nvram: (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer); |
1413 | ioctl->data.prw.code = 1; |
1414 | break; |
1415 | |
1416 | case EXPRESS_IOCTL_CHAN_INFO: |
1417 | |
1418 | ioctl->data.chaninfo.major_rev = ESAS2R_MAJOR_REV; |
1419 | ioctl->data.chaninfo.minor_rev = ESAS2R_MINOR_REV; |
1420 | ioctl->data.chaninfo.IRQ = a->pcid->irq; |
1421 | ioctl->data.chaninfo.device_id = a->pcid->device; |
1422 | ioctl->data.chaninfo.vendor_id = a->pcid->vendor; |
1423 | ioctl->data.chaninfo.ven_dev_id = a->pcid->subsystem_device; |
1424 | ioctl->data.chaninfo.revision_id = a->pcid->revision; |
1425 | ioctl->data.chaninfo.pci_bus = a->pcid->bus->number; |
1426 | ioctl->data.chaninfo.pci_dev_func = a->pcid->devfn; |
1427 | ioctl->data.chaninfo.core_rev = 0; |
1428 | ioctl->data.chaninfo.host_no = a->host->host_no; |
1429 | ioctl->data.chaninfo.hbaapi_rev = 0; |
1430 | break; |
1431 | |
1432 | case EXPRESS_IOCTL_SMP: |
1433 | ioctl->header.return_code = handle_smp_ioctl(a, |
1434 | si: &ioctl->data. |
1435 | ioctl_smp); |
1436 | break; |
1437 | |
1438 | case EXPRESS_CSMI: |
1439 | ioctl->header.return_code = |
1440 | handle_csmi_ioctl(a, ci: &ioctl->data.csmi); |
1441 | break; |
1442 | |
1443 | case EXPRESS_IOCTL_HBA: |
1444 | ioctl->header.return_code = handle_hba_ioctl(a, |
1445 | ioctl_hba: &ioctl->data. |
1446 | ioctl_hba); |
1447 | break; |
1448 | |
1449 | case EXPRESS_IOCTL_VDA: |
1450 | err = esas2r_write_vda(a, |
1451 | buf: (char *)&ioctl->data.ioctl_vda, |
1452 | off: 0, |
1453 | count: sizeof(struct atto_ioctl_vda) + |
1454 | ioctl->data.ioctl_vda.data_length); |
1455 | |
1456 | if (err >= 0) { |
1457 | err = esas2r_read_vda(a, |
1458 | buf: (char *)&ioctl->data.ioctl_vda, |
1459 | off: 0, |
1460 | count: sizeof(struct atto_ioctl_vda) + |
1461 | ioctl->data.ioctl_vda.data_length); |
1462 | } |
1463 | |
1464 | |
1465 | |
1466 | |
1467 | break; |
1468 | |
1469 | case EXPRESS_IOCTL_GET_MOD_INFO: |
1470 | |
1471 | ioctl->data.modinfo.adapter = a; |
1472 | ioctl->data.modinfo.pci_dev = a->pcid; |
1473 | ioctl->data.modinfo.scsi_host = a->host; |
1474 | ioctl->data.modinfo.host_no = a->host->host_no; |
1475 | |
1476 | break; |
1477 | |
1478 | default: |
1479 | esas2r_debug("esas2r_ioctl invalid cmd %p!" , cmd); |
1480 | ioctl->header.return_code = IOCTL_ERR_INVCMD; |
1481 | } |
1482 | |
1483 | ioctl_done: |
1484 | |
1485 | if (err < 0) { |
1486 | esas2r_log(level: ESAS2R_LOG_WARN, format: "err %d on ioctl cmd %u" , err, |
1487 | cmd); |
1488 | |
1489 | switch (err) { |
1490 | case -ENOMEM: |
1491 | case -EBUSY: |
1492 | ioctl->header.return_code = IOCTL_OUT_OF_RESOURCES; |
1493 | break; |
1494 | |
1495 | case -ENOSYS: |
1496 | case -EINVAL: |
1497 | ioctl->header.return_code = IOCTL_INVALID_PARAM; |
1498 | break; |
1499 | |
1500 | default: |
1501 | ioctl->header.return_code = IOCTL_GENERAL_ERROR; |
1502 | break; |
1503 | } |
1504 | |
1505 | } |
1506 | |
1507 | /* Always copy the buffer back, if only to pick up the status */ |
1508 | err = copy_to_user(to: arg, from: ioctl, n: sizeof(struct atto_express_ioctl)); |
1509 | if (err != 0) { |
1510 | esas2r_log(level: ESAS2R_LOG_WARN, |
1511 | format: "ioctl_handler copy_to_user didn't copy everything (err %d, cmd %u)" , |
1512 | err, cmd); |
1513 | kfree(objp: ioctl); |
1514 | |
1515 | return -EFAULT; |
1516 | } |
1517 | |
1518 | kfree(objp: ioctl); |
1519 | |
1520 | return 0; |
1521 | } |
1522 | |
1523 | int esas2r_ioctl(struct scsi_device *sd, unsigned int cmd, void __user *arg) |
1524 | { |
1525 | return esas2r_ioctl_handler(hostdata: sd->host->hostdata, cmd, arg); |
1526 | } |
1527 | |
1528 | static void free_fw_buffers(struct esas2r_adapter *a) |
1529 | { |
1530 | if (a->firmware.data) { |
1531 | dma_free_coherent(dev: &a->pcid->dev, |
1532 | size: (size_t)a->firmware.orig_len, |
1533 | cpu_addr: a->firmware.data, |
1534 | dma_handle: (dma_addr_t)a->firmware.phys); |
1535 | |
1536 | a->firmware.data = NULL; |
1537 | } |
1538 | } |
1539 | |
1540 | static int allocate_fw_buffers(struct esas2r_adapter *a, u32 length) |
1541 | { |
1542 | free_fw_buffers(a); |
1543 | |
1544 | a->firmware.orig_len = length; |
1545 | |
1546 | a->firmware.data = dma_alloc_coherent(dev: &a->pcid->dev, |
1547 | size: (size_t)length, |
1548 | dma_handle: (dma_addr_t *)&a->firmware.phys, |
1549 | GFP_KERNEL); |
1550 | |
1551 | if (!a->firmware.data) { |
1552 | esas2r_debug("buffer alloc failed!" ); |
1553 | return 0; |
1554 | } |
1555 | |
1556 | return 1; |
1557 | } |
1558 | |
1559 | /* Handle a call to read firmware. */ |
1560 | int esas2r_read_fw(struct esas2r_adapter *a, char *buf, long off, int count) |
1561 | { |
1562 | esas2r_trace_enter(); |
1563 | /* if the cached header is a status, simply copy it over and return. */ |
1564 | if (a->firmware.state == FW_STATUS_ST) { |
1565 | int size = min_t(int, count, sizeof(a->firmware.header)); |
1566 | esas2r_trace_exit(); |
1567 | memcpy(buf, &a->firmware.header, size); |
1568 | esas2r_debug("esas2r_read_fw: STATUS size %d" , size); |
1569 | return size; |
1570 | } |
1571 | |
1572 | /* |
1573 | * if the cached header is a command, do it if at |
1574 | * offset 0, otherwise copy the pieces. |
1575 | */ |
1576 | |
1577 | if (a->firmware.state == FW_COMMAND_ST) { |
1578 | u32 length = a->firmware.header.length; |
1579 | esas2r_trace_exit(); |
1580 | |
1581 | esas2r_debug("esas2r_read_fw: COMMAND length %d off %d" , |
1582 | length, |
1583 | off); |
1584 | |
1585 | if (off == 0) { |
1586 | if (a->firmware.header.action == FI_ACT_UP) { |
1587 | if (!allocate_fw_buffers(a, length)) |
1588 | return -ENOMEM; |
1589 | |
1590 | |
1591 | /* copy header over */ |
1592 | |
1593 | memcpy(a->firmware.data, |
1594 | &a->firmware.header, |
1595 | sizeof(a->firmware.header)); |
1596 | |
1597 | do_fm_api(a, |
1598 | fi: (struct esas2r_flash_img *)a->firmware.data); |
1599 | } else if (a->firmware.header.action == FI_ACT_UPSZ) { |
1600 | int size = |
1601 | min((int)count, |
1602 | (int)sizeof(a->firmware.header)); |
1603 | do_fm_api(a, fi: &a->firmware.header); |
1604 | memcpy(buf, &a->firmware.header, size); |
1605 | esas2r_debug("FI_ACT_UPSZ size %d" , size); |
1606 | return size; |
1607 | } else { |
1608 | esas2r_debug("invalid action %d" , |
1609 | a->firmware.header.action); |
1610 | return -ENOSYS; |
1611 | } |
1612 | } |
1613 | |
1614 | if (count + off > length) |
1615 | count = length - off; |
1616 | |
1617 | if (count < 0) |
1618 | return 0; |
1619 | |
1620 | if (!a->firmware.data) { |
1621 | esas2r_debug( |
1622 | "read: nonzero offset but no buffer available!" ); |
1623 | return -ENOMEM; |
1624 | } |
1625 | |
1626 | esas2r_debug("esas2r_read_fw: off %d count %d length %d " , off, |
1627 | count, |
1628 | length); |
1629 | |
1630 | memcpy(buf, &a->firmware.data[off], count); |
1631 | |
1632 | /* when done, release the buffer */ |
1633 | |
1634 | if (length <= off + count) { |
1635 | esas2r_debug("esas2r_read_fw: freeing buffer!" ); |
1636 | |
1637 | free_fw_buffers(a); |
1638 | } |
1639 | |
1640 | return count; |
1641 | } |
1642 | |
1643 | esas2r_trace_exit(); |
1644 | esas2r_debug("esas2r_read_fw: invalid firmware state %d" , |
1645 | a->firmware.state); |
1646 | |
1647 | return -EINVAL; |
1648 | } |
1649 | |
1650 | /* Handle a call to write firmware. */ |
1651 | int esas2r_write_fw(struct esas2r_adapter *a, const char *buf, long off, |
1652 | int count) |
1653 | { |
1654 | u32 length; |
1655 | |
1656 | if (off == 0) { |
1657 | struct esas2r_flash_img * = |
1658 | (struct esas2r_flash_img *)buf; |
1659 | |
1660 | /* assume version 0 flash image */ |
1661 | |
1662 | int min_size = sizeof(struct esas2r_flash_img_v0); |
1663 | |
1664 | a->firmware.state = FW_INVALID_ST; |
1665 | |
1666 | /* validate the version field first */ |
1667 | |
1668 | if (count < 4 |
1669 | || header->fi_version > FI_VERSION_1) { |
1670 | esas2r_debug( |
1671 | "esas2r_write_fw: short header or invalid version" ); |
1672 | return -EINVAL; |
1673 | } |
1674 | |
1675 | /* See if its a version 1 flash image */ |
1676 | |
1677 | if (header->fi_version == FI_VERSION_1) |
1678 | min_size = sizeof(struct esas2r_flash_img); |
1679 | |
1680 | /* If this is the start, the header must be full and valid. */ |
1681 | if (count < min_size) { |
1682 | esas2r_debug("esas2r_write_fw: short header, aborting" ); |
1683 | return -EINVAL; |
1684 | } |
1685 | |
1686 | /* Make sure the size is reasonable. */ |
1687 | length = header->length; |
1688 | |
1689 | if (length > 1024 * 1024) { |
1690 | esas2r_debug( |
1691 | "esas2r_write_fw: hosed, length %d fi_version %d" , |
1692 | length, header->fi_version); |
1693 | return -EINVAL; |
1694 | } |
1695 | |
1696 | /* |
1697 | * If this is a write command, allocate memory because |
1698 | * we have to cache everything. otherwise, just cache |
1699 | * the header, because the read op will do the command. |
1700 | */ |
1701 | |
1702 | if (header->action == FI_ACT_DOWN) { |
1703 | if (!allocate_fw_buffers(a, length)) |
1704 | return -ENOMEM; |
1705 | |
1706 | /* |
1707 | * Store the command, so there is context on subsequent |
1708 | * calls. |
1709 | */ |
1710 | memcpy(&a->firmware.header, |
1711 | buf, |
1712 | sizeof(*header)); |
1713 | } else if (header->action == FI_ACT_UP |
1714 | || header->action == FI_ACT_UPSZ) { |
1715 | /* Save the command, result will be picked up on read */ |
1716 | memcpy(&a->firmware.header, |
1717 | buf, |
1718 | sizeof(*header)); |
1719 | |
1720 | a->firmware.state = FW_COMMAND_ST; |
1721 | |
1722 | esas2r_debug( |
1723 | "esas2r_write_fw: COMMAND, count %d, action %d " , |
1724 | count, header->action); |
1725 | |
1726 | /* |
1727 | * Pretend we took the whole buffer, |
1728 | * so we don't get bothered again. |
1729 | */ |
1730 | |
1731 | return count; |
1732 | } else { |
1733 | esas2r_debug("esas2r_write_fw: invalid action %d " , |
1734 | a->firmware.header.action); |
1735 | return -ENOSYS; |
1736 | } |
1737 | } else { |
1738 | length = a->firmware.header.length; |
1739 | } |
1740 | |
1741 | /* |
1742 | * We only get here on a download command, regardless of offset. |
1743 | * the chunks written by the system need to be cached, and when |
1744 | * the final one arrives, issue the fmapi command. |
1745 | */ |
1746 | |
1747 | if (off + count > length) |
1748 | count = length - off; |
1749 | |
1750 | if (count > 0) { |
1751 | esas2r_debug("esas2r_write_fw: off %d count %d length %d" , off, |
1752 | count, |
1753 | length); |
1754 | |
1755 | /* |
1756 | * On a full upload, the system tries sending the whole buffer. |
1757 | * there's nothing to do with it, so just drop it here, before |
1758 | * trying to copy over into unallocated memory! |
1759 | */ |
1760 | if (a->firmware.header.action == FI_ACT_UP) |
1761 | return count; |
1762 | |
1763 | if (!a->firmware.data) { |
1764 | esas2r_debug( |
1765 | "write: nonzero offset but no buffer available!" ); |
1766 | return -ENOMEM; |
1767 | } |
1768 | |
1769 | memcpy(&a->firmware.data[off], buf, count); |
1770 | |
1771 | if (length == off + count) { |
1772 | do_fm_api(a, |
1773 | fi: (struct esas2r_flash_img *)a->firmware.data); |
1774 | |
1775 | /* |
1776 | * Now copy the header result to be picked up by the |
1777 | * next read |
1778 | */ |
1779 | memcpy(&a->firmware.header, |
1780 | a->firmware.data, |
1781 | sizeof(a->firmware.header)); |
1782 | |
1783 | a->firmware.state = FW_STATUS_ST; |
1784 | |
1785 | esas2r_debug("write completed" ); |
1786 | |
1787 | /* |
1788 | * Since the system has the data buffered, the only way |
1789 | * this can leak is if a root user writes a program |
1790 | * that writes a shorter buffer than it claims, and the |
1791 | * copyin fails. |
1792 | */ |
1793 | free_fw_buffers(a); |
1794 | } |
1795 | } |
1796 | |
1797 | return count; |
1798 | } |
1799 | |
1800 | /* Callback for the completion of a VDA request. */ |
1801 | static void vda_complete_req(struct esas2r_adapter *a, |
1802 | struct esas2r_request *rq) |
1803 | { |
1804 | a->vda_command_done = 1; |
1805 | wake_up_interruptible(&a->vda_waiter); |
1806 | } |
1807 | |
1808 | /* Scatter/gather callback for VDA requests */ |
1809 | static u32 get_physaddr_vda(struct esas2r_sg_context *sgc, u64 *addr) |
1810 | { |
1811 | struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter; |
1812 | int offset = (u8 *)sgc->cur_offset - (u8 *)a->vda_buffer; |
1813 | |
1814 | (*addr) = a->ppvda_buffer + offset; |
1815 | return VDA_MAX_BUFFER_SIZE - offset; |
1816 | } |
1817 | |
1818 | /* Handle a call to read a VDA command. */ |
1819 | int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count) |
1820 | { |
1821 | if (!a->vda_buffer) |
1822 | return -ENOMEM; |
1823 | |
1824 | if (off == 0) { |
1825 | struct esas2r_request *rq; |
1826 | struct atto_ioctl_vda *vi = |
1827 | (struct atto_ioctl_vda *)a->vda_buffer; |
1828 | struct esas2r_sg_context sgc; |
1829 | bool wait_for_completion; |
1830 | |
1831 | /* |
1832 | * Presumeably, someone has already written to the vda_buffer, |
1833 | * and now they are reading the node the response, so now we |
1834 | * will actually issue the request to the chip and reply. |
1835 | */ |
1836 | |
1837 | /* allocate a request */ |
1838 | rq = esas2r_alloc_request(a); |
1839 | if (rq == NULL) { |
1840 | esas2r_debug("esas2r_read_vda: out of requests" ); |
1841 | return -EBUSY; |
1842 | } |
1843 | |
1844 | rq->comp_cb = vda_complete_req; |
1845 | |
1846 | sgc.first_req = rq; |
1847 | sgc.adapter = a; |
1848 | sgc.cur_offset = a->vda_buffer + VDA_BUFFER_HEADER_SZ; |
1849 | sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_vda; |
1850 | |
1851 | a->vda_command_done = 0; |
1852 | |
1853 | wait_for_completion = |
1854 | esas2r_process_vda_ioctl(a, vi, rq, sgc: &sgc); |
1855 | |
1856 | if (wait_for_completion) { |
1857 | /* now wait around for it to complete. */ |
1858 | |
1859 | while (!a->vda_command_done) |
1860 | wait_event_interruptible(a->vda_waiter, |
1861 | a->vda_command_done); |
1862 | } |
1863 | |
1864 | esas2r_free_request(a, rq: (struct esas2r_request *)rq); |
1865 | } |
1866 | |
1867 | if (off > VDA_MAX_BUFFER_SIZE) |
1868 | return 0; |
1869 | |
1870 | if (count + off > VDA_MAX_BUFFER_SIZE) |
1871 | count = VDA_MAX_BUFFER_SIZE - off; |
1872 | |
1873 | if (count < 0) |
1874 | return 0; |
1875 | |
1876 | memcpy(buf, a->vda_buffer + off, count); |
1877 | |
1878 | return count; |
1879 | } |
1880 | |
1881 | /* Handle a call to write a VDA command. */ |
1882 | int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off, |
1883 | int count) |
1884 | { |
1885 | /* |
1886 | * allocate memory for it, if not already done. once allocated, |
1887 | * we will keep it around until the driver is unloaded. |
1888 | */ |
1889 | |
1890 | if (!a->vda_buffer) { |
1891 | dma_addr_t dma_addr; |
1892 | a->vda_buffer = dma_alloc_coherent(dev: &a->pcid->dev, |
1893 | size: (size_t) |
1894 | VDA_MAX_BUFFER_SIZE, |
1895 | dma_handle: &dma_addr, |
1896 | GFP_KERNEL); |
1897 | |
1898 | a->ppvda_buffer = dma_addr; |
1899 | } |
1900 | |
1901 | if (!a->vda_buffer) |
1902 | return -ENOMEM; |
1903 | |
1904 | if (off > VDA_MAX_BUFFER_SIZE) |
1905 | return 0; |
1906 | |
1907 | if (count + off > VDA_MAX_BUFFER_SIZE) |
1908 | count = VDA_MAX_BUFFER_SIZE - off; |
1909 | |
1910 | if (count < 1) |
1911 | return 0; |
1912 | |
1913 | memcpy(a->vda_buffer + off, buf, count); |
1914 | |
1915 | return count; |
1916 | } |
1917 | |
1918 | /* Callback for the completion of an FS_API request.*/ |
1919 | static void fs_api_complete_req(struct esas2r_adapter *a, |
1920 | struct esas2r_request *rq) |
1921 | { |
1922 | a->fs_api_command_done = 1; |
1923 | |
1924 | wake_up_interruptible(&a->fs_api_waiter); |
1925 | } |
1926 | |
1927 | /* Scatter/gather callback for VDA requests */ |
1928 | static u32 get_physaddr_fs_api(struct esas2r_sg_context *sgc, u64 *addr) |
1929 | { |
1930 | struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter; |
1931 | struct esas2r_ioctl_fs *fs = |
1932 | (struct esas2r_ioctl_fs *)a->fs_api_buffer; |
1933 | u32 offset = (u8 *)sgc->cur_offset - (u8 *)fs; |
1934 | |
1935 | (*addr) = a->ppfs_api_buffer + offset; |
1936 | |
1937 | return a->fs_api_buffer_size - offset; |
1938 | } |
1939 | |
1940 | /* Handle a call to read firmware via FS_API. */ |
1941 | int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count) |
1942 | { |
1943 | if (!a->fs_api_buffer) |
1944 | return -ENOMEM; |
1945 | |
1946 | if (off == 0) { |
1947 | struct esas2r_request *rq; |
1948 | struct esas2r_sg_context sgc; |
1949 | struct esas2r_ioctl_fs *fs = |
1950 | (struct esas2r_ioctl_fs *)a->fs_api_buffer; |
1951 | |
1952 | /* If another flash request is already in progress, return. */ |
1953 | if (mutex_lock_interruptible(&a->fs_api_mutex)) { |
1954 | busy: |
1955 | fs->status = ATTO_STS_OUT_OF_RSRC; |
1956 | return -EBUSY; |
1957 | } |
1958 | |
1959 | /* |
1960 | * Presumeably, someone has already written to the |
1961 | * fs_api_buffer, and now they are reading the node the |
1962 | * response, so now we will actually issue the request to the |
1963 | * chip and reply. Allocate a request |
1964 | */ |
1965 | |
1966 | rq = esas2r_alloc_request(a); |
1967 | if (rq == NULL) { |
1968 | esas2r_debug("esas2r_read_fs: out of requests" ); |
1969 | mutex_unlock(lock: &a->fs_api_mutex); |
1970 | goto busy; |
1971 | } |
1972 | |
1973 | rq->comp_cb = fs_api_complete_req; |
1974 | |
1975 | /* Set up the SGCONTEXT for to build the s/g table */ |
1976 | |
1977 | sgc.cur_offset = fs->data; |
1978 | sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_fs_api; |
1979 | |
1980 | a->fs_api_command_done = 0; |
1981 | |
1982 | if (!esas2r_process_fs_ioctl(a, fs, rq, sgc: &sgc)) { |
1983 | if (fs->status == ATTO_STS_OUT_OF_RSRC) |
1984 | count = -EBUSY; |
1985 | |
1986 | goto dont_wait; |
1987 | } |
1988 | |
1989 | /* Now wait around for it to complete. */ |
1990 | |
1991 | while (!a->fs_api_command_done) |
1992 | wait_event_interruptible(a->fs_api_waiter, |
1993 | a->fs_api_command_done); |
1994 | ; |
1995 | dont_wait: |
1996 | /* Free the request and keep going */ |
1997 | mutex_unlock(lock: &a->fs_api_mutex); |
1998 | esas2r_free_request(a, rq: (struct esas2r_request *)rq); |
1999 | |
2000 | /* Pick up possible error code from above */ |
2001 | if (count < 0) |
2002 | return count; |
2003 | } |
2004 | |
2005 | if (off > a->fs_api_buffer_size) |
2006 | return 0; |
2007 | |
2008 | if (count + off > a->fs_api_buffer_size) |
2009 | count = a->fs_api_buffer_size - off; |
2010 | |
2011 | if (count < 0) |
2012 | return 0; |
2013 | |
2014 | memcpy(buf, a->fs_api_buffer + off, count); |
2015 | |
2016 | return count; |
2017 | } |
2018 | |
2019 | /* Handle a call to write firmware via FS_API. */ |
2020 | int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off, |
2021 | int count) |
2022 | { |
2023 | if (off == 0) { |
2024 | struct esas2r_ioctl_fs *fs = (struct esas2r_ioctl_fs *)buf; |
2025 | u32 length = fs->command.length + offsetof( |
2026 | struct esas2r_ioctl_fs, |
2027 | data); |
2028 | |
2029 | /* |
2030 | * Special case, for BEGIN commands, the length field |
2031 | * is lying to us, so just get enough for the header. |
2032 | */ |
2033 | |
2034 | if (fs->command.command == ESAS2R_FS_CMD_BEGINW) |
2035 | length = offsetof(struct esas2r_ioctl_fs, data); |
2036 | |
2037 | /* |
2038 | * Beginning a command. We assume we'll get at least |
2039 | * enough in the first write so we can look at the |
2040 | * header and see how much we need to alloc. |
2041 | */ |
2042 | |
2043 | if (count < offsetof(struct esas2r_ioctl_fs, data)) |
2044 | return -EINVAL; |
2045 | |
2046 | /* Allocate a buffer or use the existing buffer. */ |
2047 | if (a->fs_api_buffer) { |
2048 | if (a->fs_api_buffer_size < length) { |
2049 | /* Free too-small buffer and get a new one */ |
2050 | dma_free_coherent(dev: &a->pcid->dev, |
2051 | size: (size_t)a->fs_api_buffer_size, |
2052 | cpu_addr: a->fs_api_buffer, |
2053 | dma_handle: (dma_addr_t)a->ppfs_api_buffer); |
2054 | |
2055 | goto re_allocate_buffer; |
2056 | } |
2057 | } else { |
2058 | re_allocate_buffer: |
2059 | a->fs_api_buffer_size = length; |
2060 | |
2061 | a->fs_api_buffer = dma_alloc_coherent(dev: &a->pcid->dev, |
2062 | size: (size_t)a->fs_api_buffer_size, |
2063 | dma_handle: (dma_addr_t *)&a->ppfs_api_buffer, |
2064 | GFP_KERNEL); |
2065 | } |
2066 | } |
2067 | |
2068 | if (!a->fs_api_buffer) |
2069 | return -ENOMEM; |
2070 | |
2071 | if (off > a->fs_api_buffer_size) |
2072 | return 0; |
2073 | |
2074 | if (count + off > a->fs_api_buffer_size) |
2075 | count = a->fs_api_buffer_size - off; |
2076 | |
2077 | if (count < 1) |
2078 | return 0; |
2079 | |
2080 | memcpy(a->fs_api_buffer + off, buf, count); |
2081 | |
2082 | return count; |
2083 | } |
2084 | |