1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Adaptec AAC series RAID controller driver |
4 | * (c) Copyright 2001 Red Hat Inc. |
5 | * |
6 | * based on the old aacraid driver that is.. |
7 | * Adaptec aacraid device driver for Linux. |
8 | * |
9 | * Copyright (c) 2000-2010 Adaptec, Inc. |
10 | * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) |
11 | * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) |
12 | * |
13 | * Module Name: |
14 | * commctrl.c |
15 | * |
16 | * Abstract: Contains all routines for control of the AFA comm layer |
17 | */ |
18 | |
19 | #include <linux/kernel.h> |
20 | #include <linux/init.h> |
21 | #include <linux/types.h> |
22 | #include <linux/pci.h> |
23 | #include <linux/spinlock.h> |
24 | #include <linux/slab.h> |
25 | #include <linux/completion.h> |
26 | #include <linux/dma-mapping.h> |
27 | #include <linux/blkdev.h> |
28 | #include <linux/compat.h> |
29 | #include <linux/delay.h> /* ssleep prototype */ |
30 | #include <linux/kthread.h> |
31 | #include <linux/uaccess.h> |
32 | #include <scsi/scsi_host.h> |
33 | |
34 | #include "aacraid.h" |
35 | |
36 | # define AAC_DEBUG_PREAMBLE KERN_INFO |
37 | # define AAC_DEBUG_POSTAMBLE |
38 | /** |
39 | * ioctl_send_fib - send a FIB from userspace |
40 | * @dev: adapter is being processed |
41 | * @arg: arguments to the ioctl call |
42 | * |
43 | * This routine sends a fib to the adapter on behalf of a user level |
44 | * program. |
45 | */ |
46 | static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) |
47 | { |
48 | struct hw_fib * kfib; |
49 | struct fib *fibptr; |
50 | struct hw_fib * hw_fib = (struct hw_fib *)0; |
51 | dma_addr_t hw_fib_pa = (dma_addr_t)0LL; |
52 | unsigned int size, osize; |
53 | int retval; |
54 | |
55 | if (dev->in_reset) { |
56 | return -EBUSY; |
57 | } |
58 | fibptr = aac_fib_alloc(dev); |
59 | if(fibptr == NULL) { |
60 | return -ENOMEM; |
61 | } |
62 | |
63 | kfib = fibptr->hw_fib_va; |
64 | /* |
65 | * First copy in the header so that we can check the size field. |
66 | */ |
67 | if (copy_from_user(to: (void *)kfib, from: arg, n: sizeof(struct aac_fibhdr))) { |
68 | aac_fib_free(context: fibptr); |
69 | return -EFAULT; |
70 | } |
71 | /* |
72 | * Since we copy based on the fib header size, make sure that we |
73 | * will not overrun the buffer when we copy the memory. Return |
74 | * an error if we would. |
75 | */ |
76 | osize = size = le16_to_cpu(kfib->header.Size) + |
77 | sizeof(struct aac_fibhdr); |
78 | if (size < le16_to_cpu(kfib->header.SenderSize)) |
79 | size = le16_to_cpu(kfib->header.SenderSize); |
80 | if (size > dev->max_fib_size) { |
81 | dma_addr_t daddr; |
82 | |
83 | if (size > 2048) { |
84 | retval = -EINVAL; |
85 | goto cleanup; |
86 | } |
87 | |
88 | kfib = dma_alloc_coherent(dev: &dev->pdev->dev, size, dma_handle: &daddr, |
89 | GFP_KERNEL); |
90 | if (!kfib) { |
91 | retval = -ENOMEM; |
92 | goto cleanup; |
93 | } |
94 | |
95 | /* Highjack the hw_fib */ |
96 | hw_fib = fibptr->hw_fib_va; |
97 | hw_fib_pa = fibptr->hw_fib_pa; |
98 | fibptr->hw_fib_va = kfib; |
99 | fibptr->hw_fib_pa = daddr; |
100 | memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size); |
101 | memcpy(kfib, hw_fib, dev->max_fib_size); |
102 | } |
103 | |
104 | if (copy_from_user(to: kfib, from: arg, n: size)) { |
105 | retval = -EFAULT; |
106 | goto cleanup; |
107 | } |
108 | |
109 | /* Sanity check the second copy */ |
110 | if ((osize != le16_to_cpu(kfib->header.Size) + |
111 | sizeof(struct aac_fibhdr)) |
112 | || (size < le16_to_cpu(kfib->header.SenderSize))) { |
113 | retval = -EINVAL; |
114 | goto cleanup; |
115 | } |
116 | |
117 | if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) { |
118 | aac_adapter_interrupt(dev); |
119 | /* |
120 | * Since we didn't really send a fib, zero out the state to allow |
121 | * cleanup code not to assert. |
122 | */ |
123 | kfib->header.XferState = 0; |
124 | } else { |
125 | retval = aac_fib_send(le16_to_cpu(kfib->header.Command), context: fibptr, |
126 | le16_to_cpu(kfib->header.Size) , FsaNormal, |
127 | wait: 1, reply: 1, NULL, NULL); |
128 | if (retval) { |
129 | goto cleanup; |
130 | } |
131 | if (aac_fib_complete(context: fibptr) != 0) { |
132 | retval = -EINVAL; |
133 | goto cleanup; |
134 | } |
135 | } |
136 | /* |
137 | * Make sure that the size returned by the adapter (which includes |
138 | * the header) is less than or equal to the size of a fib, so we |
139 | * don't corrupt application data. Then copy that size to the user |
140 | * buffer. (Don't try to add the header information again, since it |
141 | * was already included by the adapter.) |
142 | */ |
143 | |
144 | retval = 0; |
145 | if (copy_to_user(to: arg, from: (void *)kfib, n: size)) |
146 | retval = -EFAULT; |
147 | cleanup: |
148 | if (hw_fib) { |
149 | dma_free_coherent(dev: &dev->pdev->dev, size, cpu_addr: kfib, |
150 | dma_handle: fibptr->hw_fib_pa); |
151 | fibptr->hw_fib_pa = hw_fib_pa; |
152 | fibptr->hw_fib_va = hw_fib; |
153 | } |
154 | if (retval != -ERESTARTSYS) |
155 | aac_fib_free(context: fibptr); |
156 | return retval; |
157 | } |
158 | |
159 | /** |
160 | * open_getadapter_fib - Get the next fib |
161 | * @dev: adapter is being processed |
162 | * @arg: arguments to the open call |
163 | * |
164 | * This routine will get the next Fib, if available, from the AdapterFibContext |
165 | * passed in from the user. |
166 | */ |
167 | static int open_getadapter_fib(struct aac_dev * dev, void __user *arg) |
168 | { |
169 | struct aac_fib_context * fibctx; |
170 | int status; |
171 | |
172 | fibctx = kmalloc(size: sizeof(struct aac_fib_context), GFP_KERNEL); |
173 | if (fibctx == NULL) { |
174 | status = -ENOMEM; |
175 | } else { |
176 | unsigned long flags; |
177 | struct list_head * entry; |
178 | struct aac_fib_context * context; |
179 | |
180 | fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT; |
181 | fibctx->size = sizeof(struct aac_fib_context); |
182 | /* |
183 | * Yes yes, I know this could be an index, but we have a |
184 | * better guarantee of uniqueness for the locked loop below. |
185 | * Without the aid of a persistent history, this also helps |
186 | * reduce the chance that the opaque context would be reused. |
187 | */ |
188 | fibctx->unique = (u32)((ulong)fibctx & 0xFFFFFFFF); |
189 | /* |
190 | * Initialize the mutex used to wait for the next AIF. |
191 | */ |
192 | init_completion(x: &fibctx->completion); |
193 | fibctx->wait = 0; |
194 | /* |
195 | * Initialize the fibs and set the count of fibs on |
196 | * the list to 0. |
197 | */ |
198 | fibctx->count = 0; |
199 | INIT_LIST_HEAD(list: &fibctx->fib_list); |
200 | fibctx->jiffies = jiffies/HZ; |
201 | /* |
202 | * Now add this context onto the adapter's |
203 | * AdapterFibContext list. |
204 | */ |
205 | spin_lock_irqsave(&dev->fib_lock, flags); |
206 | /* Ensure that we have a unique identifier */ |
207 | entry = dev->fib_list.next; |
208 | while (entry != &dev->fib_list) { |
209 | context = list_entry(entry, struct aac_fib_context, next); |
210 | if (context->unique == fibctx->unique) { |
211 | /* Not unique (32 bits) */ |
212 | fibctx->unique++; |
213 | entry = dev->fib_list.next; |
214 | } else { |
215 | entry = entry->next; |
216 | } |
217 | } |
218 | list_add_tail(new: &fibctx->next, head: &dev->fib_list); |
219 | spin_unlock_irqrestore(lock: &dev->fib_lock, flags); |
220 | if (copy_to_user(to: arg, from: &fibctx->unique, |
221 | n: sizeof(fibctx->unique))) { |
222 | status = -EFAULT; |
223 | } else { |
224 | status = 0; |
225 | } |
226 | } |
227 | return status; |
228 | } |
229 | |
230 | struct compat_fib_ioctl { |
231 | u32 fibctx; |
232 | s32 wait; |
233 | compat_uptr_t fib; |
234 | }; |
235 | |
236 | /** |
237 | * next_getadapter_fib - get the next fib |
238 | * @dev: adapter to use |
239 | * @arg: ioctl argument |
240 | * |
241 | * This routine will get the next Fib, if available, from the AdapterFibContext |
242 | * passed in from the user. |
243 | */ |
244 | static int next_getadapter_fib(struct aac_dev * dev, void __user *arg) |
245 | { |
246 | struct fib_ioctl f; |
247 | struct fib *fib; |
248 | struct aac_fib_context *fibctx; |
249 | int status; |
250 | struct list_head * entry; |
251 | unsigned long flags; |
252 | |
253 | if (in_compat_syscall()) { |
254 | struct compat_fib_ioctl cf; |
255 | |
256 | if (copy_from_user(to: &cf, from: arg, n: sizeof(struct compat_fib_ioctl))) |
257 | return -EFAULT; |
258 | |
259 | f.fibctx = cf.fibctx; |
260 | f.wait = cf.wait; |
261 | f.fib = compat_ptr(uptr: cf.fib); |
262 | } else { |
263 | if (copy_from_user(to: &f, from: arg, n: sizeof(struct fib_ioctl))) |
264 | return -EFAULT; |
265 | } |
266 | /* |
267 | * Verify that the HANDLE passed in was a valid AdapterFibContext |
268 | * |
269 | * Search the list of AdapterFibContext addresses on the adapter |
270 | * to be sure this is a valid address |
271 | */ |
272 | spin_lock_irqsave(&dev->fib_lock, flags); |
273 | entry = dev->fib_list.next; |
274 | fibctx = NULL; |
275 | |
276 | while (entry != &dev->fib_list) { |
277 | fibctx = list_entry(entry, struct aac_fib_context, next); |
278 | /* |
279 | * Extract the AdapterFibContext from the Input parameters. |
280 | */ |
281 | if (fibctx->unique == f.fibctx) { /* We found a winner */ |
282 | break; |
283 | } |
284 | entry = entry->next; |
285 | fibctx = NULL; |
286 | } |
287 | if (!fibctx) { |
288 | spin_unlock_irqrestore(lock: &dev->fib_lock, flags); |
289 | dprintk ((KERN_INFO "Fib Context not found\n" )); |
290 | return -EINVAL; |
291 | } |
292 | |
293 | if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) || |
294 | (fibctx->size != sizeof(struct aac_fib_context))) { |
295 | spin_unlock_irqrestore(lock: &dev->fib_lock, flags); |
296 | dprintk ((KERN_INFO "Fib Context corrupt?\n" )); |
297 | return -EINVAL; |
298 | } |
299 | status = 0; |
300 | /* |
301 | * If there are no fibs to send back, then either wait or return |
302 | * -EAGAIN |
303 | */ |
304 | return_fib: |
305 | if (!list_empty(head: &fibctx->fib_list)) { |
306 | /* |
307 | * Pull the next fib from the fibs |
308 | */ |
309 | entry = fibctx->fib_list.next; |
310 | list_del(entry); |
311 | |
312 | fib = list_entry(entry, struct fib, fiblink); |
313 | fibctx->count--; |
314 | spin_unlock_irqrestore(lock: &dev->fib_lock, flags); |
315 | if (copy_to_user(to: f.fib, from: fib->hw_fib_va, n: sizeof(struct hw_fib))) { |
316 | kfree(objp: fib->hw_fib_va); |
317 | kfree(objp: fib); |
318 | return -EFAULT; |
319 | } |
320 | /* |
321 | * Free the space occupied by this copy of the fib. |
322 | */ |
323 | kfree(objp: fib->hw_fib_va); |
324 | kfree(objp: fib); |
325 | status = 0; |
326 | } else { |
327 | spin_unlock_irqrestore(lock: &dev->fib_lock, flags); |
328 | /* If someone killed the AIF aacraid thread, restart it */ |
329 | status = !dev->aif_thread; |
330 | if (status && !dev->in_reset && dev->queues && dev->fsa_dev) { |
331 | /* Be paranoid, be very paranoid! */ |
332 | kthread_stop(k: dev->thread); |
333 | ssleep(seconds: 1); |
334 | dev->aif_thread = 0; |
335 | dev->thread = kthread_run(aac_command_thread, dev, |
336 | "%s" , dev->name); |
337 | ssleep(seconds: 1); |
338 | } |
339 | if (f.wait) { |
340 | if (wait_for_completion_interruptible(x: &fibctx->completion) < 0) { |
341 | status = -ERESTARTSYS; |
342 | } else { |
343 | /* Lock again and retry */ |
344 | spin_lock_irqsave(&dev->fib_lock, flags); |
345 | goto return_fib; |
346 | } |
347 | } else { |
348 | status = -EAGAIN; |
349 | } |
350 | } |
351 | fibctx->jiffies = jiffies/HZ; |
352 | return status; |
353 | } |
354 | |
355 | int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx) |
356 | { |
357 | struct fib *fib; |
358 | |
359 | /* |
360 | * First free any FIBs that have not been consumed. |
361 | */ |
362 | while (!list_empty(head: &fibctx->fib_list)) { |
363 | struct list_head * entry; |
364 | /* |
365 | * Pull the next fib from the fibs |
366 | */ |
367 | entry = fibctx->fib_list.next; |
368 | list_del(entry); |
369 | fib = list_entry(entry, struct fib, fiblink); |
370 | fibctx->count--; |
371 | /* |
372 | * Free the space occupied by this copy of the fib. |
373 | */ |
374 | kfree(objp: fib->hw_fib_va); |
375 | kfree(objp: fib); |
376 | } |
377 | /* |
378 | * Remove the Context from the AdapterFibContext List |
379 | */ |
380 | list_del(entry: &fibctx->next); |
381 | /* |
382 | * Invalidate context |
383 | */ |
384 | fibctx->type = 0; |
385 | /* |
386 | * Free the space occupied by the Context |
387 | */ |
388 | kfree(objp: fibctx); |
389 | return 0; |
390 | } |
391 | |
392 | /** |
393 | * close_getadapter_fib - close down user fib context |
394 | * @dev: adapter |
395 | * @arg: ioctl arguments |
396 | * |
397 | * This routine will close down the fibctx passed in from the user. |
398 | */ |
399 | |
400 | static int close_getadapter_fib(struct aac_dev * dev, void __user *arg) |
401 | { |
402 | struct aac_fib_context *fibctx; |
403 | int status; |
404 | unsigned long flags; |
405 | struct list_head * entry; |
406 | |
407 | /* |
408 | * Verify that the HANDLE passed in was a valid AdapterFibContext |
409 | * |
410 | * Search the list of AdapterFibContext addresses on the adapter |
411 | * to be sure this is a valid address |
412 | */ |
413 | |
414 | entry = dev->fib_list.next; |
415 | fibctx = NULL; |
416 | |
417 | while(entry != &dev->fib_list) { |
418 | fibctx = list_entry(entry, struct aac_fib_context, next); |
419 | /* |
420 | * Extract the fibctx from the input parameters |
421 | */ |
422 | if (fibctx->unique == (u32)(uintptr_t)arg) /* We found a winner */ |
423 | break; |
424 | entry = entry->next; |
425 | fibctx = NULL; |
426 | } |
427 | |
428 | if (!fibctx) |
429 | return 0; /* Already gone */ |
430 | |
431 | if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) || |
432 | (fibctx->size != sizeof(struct aac_fib_context))) |
433 | return -EINVAL; |
434 | spin_lock_irqsave(&dev->fib_lock, flags); |
435 | status = aac_close_fib_context(dev, fibctx); |
436 | spin_unlock_irqrestore(lock: &dev->fib_lock, flags); |
437 | return status; |
438 | } |
439 | |
440 | /** |
441 | * check_revision - close down user fib context |
442 | * @dev: adapter |
443 | * @arg: ioctl arguments |
444 | * |
445 | * This routine returns the driver version. |
446 | * Under Linux, there have been no version incompatibilities, so this is |
447 | * simple! |
448 | */ |
449 | |
450 | static int check_revision(struct aac_dev *dev, void __user *arg) |
451 | { |
452 | struct revision response; |
453 | char *driver_version = aac_driver_version; |
454 | u32 version; |
455 | |
456 | response.compat = 1; |
457 | version = (simple_strtol(driver_version, |
458 | &driver_version, 10) << 24) | 0x00000400; |
459 | version += simple_strtol(driver_version + 1, &driver_version, 10) << 16; |
460 | version += simple_strtol(driver_version + 1, NULL, 10); |
461 | response.version = cpu_to_le32(version); |
462 | # ifdef AAC_DRIVER_BUILD |
463 | response.build = cpu_to_le32(AAC_DRIVER_BUILD); |
464 | # else |
465 | response.build = cpu_to_le32(9999); |
466 | # endif |
467 | |
468 | if (copy_to_user(to: arg, from: &response, n: sizeof(response))) |
469 | return -EFAULT; |
470 | return 0; |
471 | } |
472 | |
473 | |
474 | /** |
475 | * aac_send_raw_srb() |
476 | * @dev: adapter is being processed |
477 | * @arg: arguments to the send call |
478 | */ |
479 | static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) |
480 | { |
481 | struct fib* srbfib; |
482 | int status; |
483 | struct aac_srb *srbcmd = NULL; |
484 | struct aac_hba_cmd_req *hbacmd = NULL; |
485 | struct user_aac_srb *user_srbcmd = NULL; |
486 | struct user_aac_srb __user *user_srb = arg; |
487 | struct aac_srb_reply __user *user_reply; |
488 | u32 chn; |
489 | u32 fibsize = 0; |
490 | u32 flags = 0; |
491 | s32 rcode = 0; |
492 | u32 data_dir; |
493 | void __user *sg_user[HBA_MAX_SG_EMBEDDED]; |
494 | void *sg_list[HBA_MAX_SG_EMBEDDED]; |
495 | u32 sg_count[HBA_MAX_SG_EMBEDDED]; |
496 | u32 sg_indx = 0; |
497 | u32 byte_count = 0; |
498 | u32 actual_fibsize64, actual_fibsize = 0; |
499 | int i; |
500 | int is_native_device; |
501 | u64 address; |
502 | |
503 | |
504 | if (dev->in_reset) { |
505 | dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n" )); |
506 | return -EBUSY; |
507 | } |
508 | if (!capable(CAP_SYS_ADMIN)){ |
509 | dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n" )); |
510 | return -EPERM; |
511 | } |
512 | /* |
513 | * Allocate and initialize a Fib then setup a SRB command |
514 | */ |
515 | if (!(srbfib = aac_fib_alloc(dev))) { |
516 | return -ENOMEM; |
517 | } |
518 | |
519 | memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */ |
520 | if(copy_from_user(to: &fibsize, from: &user_srb->count,n: sizeof(u32))){ |
521 | dprintk((KERN_DEBUG"aacraid: Could not copy data size from user\n" )); |
522 | rcode = -EFAULT; |
523 | goto cleanup; |
524 | } |
525 | |
526 | if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) || |
527 | (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) { |
528 | rcode = -EINVAL; |
529 | goto cleanup; |
530 | } |
531 | |
532 | user_srbcmd = memdup_user(user_srb, fibsize); |
533 | if (IS_ERR(ptr: user_srbcmd)) { |
534 | rcode = PTR_ERR(ptr: user_srbcmd); |
535 | user_srbcmd = NULL; |
536 | goto cleanup; |
537 | } |
538 | |
539 | flags = user_srbcmd->flags; /* from user in cpu order */ |
540 | switch (flags & (SRB_DataIn | SRB_DataOut)) { |
541 | case SRB_DataOut: |
542 | data_dir = DMA_TO_DEVICE; |
543 | break; |
544 | case (SRB_DataIn | SRB_DataOut): |
545 | data_dir = DMA_BIDIRECTIONAL; |
546 | break; |
547 | case SRB_DataIn: |
548 | data_dir = DMA_FROM_DEVICE; |
549 | break; |
550 | default: |
551 | data_dir = DMA_NONE; |
552 | } |
553 | if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) { |
554 | dprintk((KERN_DEBUG"aacraid: too many sg entries %d\n" , |
555 | user_srbcmd->sg.count)); |
556 | rcode = -EINVAL; |
557 | goto cleanup; |
558 | } |
559 | if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) { |
560 | dprintk((KERN_DEBUG"aacraid:SG with no direction specified\n" )); |
561 | rcode = -EINVAL; |
562 | goto cleanup; |
563 | } |
564 | actual_fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) + |
565 | ((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry)); |
566 | actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) * |
567 | (sizeof(struct sgentry64) - sizeof(struct sgentry)); |
568 | /* User made a mistake - should not continue */ |
569 | if ((actual_fibsize != fibsize) && (actual_fibsize64 != fibsize)) { |
570 | dprintk((KERN_DEBUG"aacraid: Bad Size specified in " |
571 | "Raw SRB command calculated fibsize=%lu;%lu " |
572 | "user_srbcmd->sg.count=%d aac_srb=%lu sgentry=%lu;%lu " |
573 | "issued fibsize=%d\n" , |
574 | actual_fibsize, actual_fibsize64, user_srbcmd->sg.count, |
575 | sizeof(struct aac_srb), sizeof(struct sgentry), |
576 | sizeof(struct sgentry64), fibsize)); |
577 | rcode = -EINVAL; |
578 | goto cleanup; |
579 | } |
580 | |
581 | chn = user_srbcmd->channel; |
582 | if (chn < AAC_MAX_BUSES && user_srbcmd->id < AAC_MAX_TARGETS && |
583 | dev->hba_map[chn][user_srbcmd->id].devtype == |
584 | AAC_DEVTYPE_NATIVE_RAW) { |
585 | is_native_device = 1; |
586 | hbacmd = (struct aac_hba_cmd_req *)srbfib->hw_fib_va; |
587 | memset(hbacmd, 0, 96); /* sizeof(*hbacmd) is not necessary */ |
588 | |
589 | /* iu_type is a parameter of aac_hba_send */ |
590 | switch (data_dir) { |
591 | case DMA_TO_DEVICE: |
592 | hbacmd->byte1 = 2; |
593 | break; |
594 | case DMA_FROM_DEVICE: |
595 | case DMA_BIDIRECTIONAL: |
596 | hbacmd->byte1 = 1; |
597 | break; |
598 | case DMA_NONE: |
599 | default: |
600 | break; |
601 | } |
602 | hbacmd->lun[1] = cpu_to_le32(user_srbcmd->lun); |
603 | hbacmd->it_nexus = dev->hba_map[chn][user_srbcmd->id].rmw_nexus; |
604 | |
605 | /* |
606 | * we fill in reply_qid later in aac_src_deliver_message |
607 | * we fill in iu_type, request_id later in aac_hba_send |
608 | * we fill in emb_data_desc_count, data_length later |
609 | * in sg list build |
610 | */ |
611 | |
612 | memcpy(hbacmd->cdb, user_srbcmd->cdb, sizeof(hbacmd->cdb)); |
613 | |
614 | address = (u64)srbfib->hw_error_pa; |
615 | hbacmd->error_ptr_hi = cpu_to_le32((u32)(address >> 32)); |
616 | hbacmd->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff)); |
617 | hbacmd->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE); |
618 | hbacmd->emb_data_desc_count = |
619 | cpu_to_le32(user_srbcmd->sg.count); |
620 | srbfib->hbacmd_size = 64 + |
621 | user_srbcmd->sg.count * sizeof(struct aac_hba_sgl); |
622 | |
623 | } else { |
624 | is_native_device = 0; |
625 | aac_fib_init(context: srbfib); |
626 | |
627 | /* raw_srb FIB is not FastResponseCapable */ |
628 | srbfib->hw_fib_va->header.XferState &= |
629 | ~cpu_to_le32(FastResponseCapable); |
630 | |
631 | srbcmd = (struct aac_srb *) fib_data(srbfib); |
632 | |
633 | // Fix up srb for endian and force some values |
634 | |
635 | srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this |
636 | srbcmd->channel = cpu_to_le32(user_srbcmd->channel); |
637 | srbcmd->id = cpu_to_le32(user_srbcmd->id); |
638 | srbcmd->lun = cpu_to_le32(user_srbcmd->lun); |
639 | srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout); |
640 | srbcmd->flags = cpu_to_le32(flags); |
641 | srbcmd->retry_limit = 0; // Obsolete parameter |
642 | srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size); |
643 | memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb)); |
644 | } |
645 | |
646 | byte_count = 0; |
647 | if (is_native_device) { |
648 | struct user_sgmap *usg32 = &user_srbcmd->sg; |
649 | struct user_sgmap64 *usg64 = |
650 | (struct user_sgmap64 *)&user_srbcmd->sg; |
651 | |
652 | for (i = 0; i < usg32->count; i++) { |
653 | void *p; |
654 | u64 addr; |
655 | |
656 | sg_count[i] = (actual_fibsize64 == fibsize) ? |
657 | usg64->sg[i].count : usg32->sg[i].count; |
658 | if (sg_count[i] > |
659 | (dev->scsi_host_ptr->max_sectors << 9)) { |
660 | pr_err("aacraid: upsg->sg[%d].count=%u>%u\n" , |
661 | i, sg_count[i], |
662 | dev->scsi_host_ptr->max_sectors << 9); |
663 | rcode = -EINVAL; |
664 | goto cleanup; |
665 | } |
666 | |
667 | p = kmalloc(size: sg_count[i], GFP_KERNEL); |
668 | if (!p) { |
669 | rcode = -ENOMEM; |
670 | goto cleanup; |
671 | } |
672 | |
673 | if (actual_fibsize64 == fibsize) { |
674 | addr = (u64)usg64->sg[i].addr[0]; |
675 | addr += ((u64)usg64->sg[i].addr[1]) << 32; |
676 | } else { |
677 | addr = (u64)usg32->sg[i].addr; |
678 | } |
679 | |
680 | sg_user[i] = (void __user *)(uintptr_t)addr; |
681 | sg_list[i] = p; // save so we can clean up later |
682 | sg_indx = i; |
683 | |
684 | if (flags & SRB_DataOut) { |
685 | if (copy_from_user(to: p, from: sg_user[i], |
686 | n: sg_count[i])) { |
687 | rcode = -EFAULT; |
688 | goto cleanup; |
689 | } |
690 | } |
691 | addr = dma_map_single(&dev->pdev->dev, p, sg_count[i], |
692 | data_dir); |
693 | hbacmd->sge[i].addr_hi = cpu_to_le32((u32)(addr>>32)); |
694 | hbacmd->sge[i].addr_lo = cpu_to_le32( |
695 | (u32)(addr & 0xffffffff)); |
696 | hbacmd->sge[i].len = cpu_to_le32(sg_count[i]); |
697 | hbacmd->sge[i].flags = 0; |
698 | byte_count += sg_count[i]; |
699 | } |
700 | |
701 | if (usg32->count > 0) /* embedded sglist */ |
702 | hbacmd->sge[usg32->count-1].flags = |
703 | cpu_to_le32(0x40000000); |
704 | hbacmd->data_length = cpu_to_le32(byte_count); |
705 | |
706 | status = aac_hba_send(command: HBA_IU_TYPE_SCSI_CMD_REQ, context: srbfib, |
707 | NULL, NULL); |
708 | |
709 | } else if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) { |
710 | struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg; |
711 | struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg; |
712 | |
713 | /* |
714 | * This should also catch if user used the 32 bit sgmap |
715 | */ |
716 | if (actual_fibsize64 == fibsize) { |
717 | actual_fibsize = actual_fibsize64; |
718 | for (i = 0; i < upsg->count; i++) { |
719 | u64 addr; |
720 | void* p; |
721 | |
722 | sg_count[i] = upsg->sg[i].count; |
723 | if (sg_count[i] > |
724 | ((dev->adapter_info.options & |
725 | AAC_OPT_NEW_COMM) ? |
726 | (dev->scsi_host_ptr->max_sectors << 9) : |
727 | 65536)) { |
728 | rcode = -EINVAL; |
729 | goto cleanup; |
730 | } |
731 | |
732 | p = kmalloc(size: sg_count[i], GFP_KERNEL); |
733 | if(!p) { |
734 | dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n" , |
735 | sg_count[i], i, upsg->count)); |
736 | rcode = -ENOMEM; |
737 | goto cleanup; |
738 | } |
739 | addr = (u64)upsg->sg[i].addr[0]; |
740 | addr += ((u64)upsg->sg[i].addr[1]) << 32; |
741 | sg_user[i] = (void __user *)(uintptr_t)addr; |
742 | sg_list[i] = p; // save so we can clean up later |
743 | sg_indx = i; |
744 | |
745 | if (flags & SRB_DataOut) { |
746 | if (copy_from_user(to: p, from: sg_user[i], |
747 | n: sg_count[i])){ |
748 | dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n" )); |
749 | rcode = -EFAULT; |
750 | goto cleanup; |
751 | } |
752 | } |
753 | addr = dma_map_single(&dev->pdev->dev, p, |
754 | sg_count[i], data_dir); |
755 | |
756 | psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); |
757 | psg->sg[i].addr[1] = cpu_to_le32(addr>>32); |
758 | byte_count += sg_count[i]; |
759 | psg->sg[i].count = cpu_to_le32(sg_count[i]); |
760 | } |
761 | } else { |
762 | struct user_sgmap* usg; |
763 | usg = kmemdup(p: upsg, |
764 | size: actual_fibsize - sizeof(struct aac_srb) |
765 | + sizeof(struct sgmap), GFP_KERNEL); |
766 | if (!usg) { |
767 | dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n" )); |
768 | rcode = -ENOMEM; |
769 | goto cleanup; |
770 | } |
771 | actual_fibsize = actual_fibsize64; |
772 | |
773 | for (i = 0; i < usg->count; i++) { |
774 | u64 addr; |
775 | void* p; |
776 | |
777 | sg_count[i] = usg->sg[i].count; |
778 | if (sg_count[i] > |
779 | ((dev->adapter_info.options & |
780 | AAC_OPT_NEW_COMM) ? |
781 | (dev->scsi_host_ptr->max_sectors << 9) : |
782 | 65536)) { |
783 | kfree(objp: usg); |
784 | rcode = -EINVAL; |
785 | goto cleanup; |
786 | } |
787 | |
788 | p = kmalloc(size: sg_count[i], GFP_KERNEL); |
789 | if(!p) { |
790 | dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n" , |
791 | sg_count[i], i, usg->count)); |
792 | kfree(objp: usg); |
793 | rcode = -ENOMEM; |
794 | goto cleanup; |
795 | } |
796 | sg_user[i] = (void __user *)(uintptr_t)usg->sg[i].addr; |
797 | sg_list[i] = p; // save so we can clean up later |
798 | sg_indx = i; |
799 | |
800 | if (flags & SRB_DataOut) { |
801 | if (copy_from_user(to: p, from: sg_user[i], |
802 | n: sg_count[i])) { |
803 | kfree (objp: usg); |
804 | dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n" )); |
805 | rcode = -EFAULT; |
806 | goto cleanup; |
807 | } |
808 | } |
809 | addr = dma_map_single(&dev->pdev->dev, p, |
810 | sg_count[i], data_dir); |
811 | |
812 | psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); |
813 | psg->sg[i].addr[1] = cpu_to_le32(addr>>32); |
814 | byte_count += sg_count[i]; |
815 | psg->sg[i].count = cpu_to_le32(sg_count[i]); |
816 | } |
817 | kfree (objp: usg); |
818 | } |
819 | srbcmd->count = cpu_to_le32(byte_count); |
820 | if (user_srbcmd->sg.count) |
821 | psg->count = cpu_to_le32(sg_indx+1); |
822 | else |
823 | psg->count = 0; |
824 | status = aac_fib_send(ScsiPortCommand64, context: srbfib, size: actual_fibsize, FsaNormal, wait: 1, reply: 1,NULL,NULL); |
825 | } else { |
826 | struct user_sgmap* upsg = &user_srbcmd->sg; |
827 | struct sgmap* psg = &srbcmd->sg; |
828 | |
829 | if (actual_fibsize64 == fibsize) { |
830 | struct user_sgmap64* usg = (struct user_sgmap64 *)upsg; |
831 | for (i = 0; i < upsg->count; i++) { |
832 | uintptr_t addr; |
833 | void* p; |
834 | |
835 | sg_count[i] = usg->sg[i].count; |
836 | if (sg_count[i] > |
837 | ((dev->adapter_info.options & |
838 | AAC_OPT_NEW_COMM) ? |
839 | (dev->scsi_host_ptr->max_sectors << 9) : |
840 | 65536)) { |
841 | rcode = -EINVAL; |
842 | goto cleanup; |
843 | } |
844 | p = kmalloc(size: sg_count[i], GFP_KERNEL); |
845 | if (!p) { |
846 | dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n" , |
847 | sg_count[i], i, usg->count)); |
848 | rcode = -ENOMEM; |
849 | goto cleanup; |
850 | } |
851 | addr = (u64)usg->sg[i].addr[0]; |
852 | addr += ((u64)usg->sg[i].addr[1]) << 32; |
853 | sg_user[i] = (void __user *)addr; |
854 | sg_list[i] = p; // save so we can clean up later |
855 | sg_indx = i; |
856 | |
857 | if (flags & SRB_DataOut) { |
858 | if (copy_from_user(to: p, from: sg_user[i], |
859 | n: sg_count[i])){ |
860 | dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n" )); |
861 | rcode = -EFAULT; |
862 | goto cleanup; |
863 | } |
864 | } |
865 | addr = dma_map_single(&dev->pdev->dev, p, |
866 | usg->sg[i].count, |
867 | data_dir); |
868 | |
869 | psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff); |
870 | byte_count += usg->sg[i].count; |
871 | psg->sg[i].count = cpu_to_le32(sg_count[i]); |
872 | } |
873 | } else { |
874 | for (i = 0; i < upsg->count; i++) { |
875 | dma_addr_t addr; |
876 | void* p; |
877 | |
878 | sg_count[i] = upsg->sg[i].count; |
879 | if (sg_count[i] > |
880 | ((dev->adapter_info.options & |
881 | AAC_OPT_NEW_COMM) ? |
882 | (dev->scsi_host_ptr->max_sectors << 9) : |
883 | 65536)) { |
884 | rcode = -EINVAL; |
885 | goto cleanup; |
886 | } |
887 | p = kmalloc(size: sg_count[i], GFP_KERNEL); |
888 | if (!p) { |
889 | dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n" , |
890 | sg_count[i], i, upsg->count)); |
891 | rcode = -ENOMEM; |
892 | goto cleanup; |
893 | } |
894 | sg_user[i] = (void __user *)(uintptr_t)upsg->sg[i].addr; |
895 | sg_list[i] = p; // save so we can clean up later |
896 | sg_indx = i; |
897 | |
898 | if (flags & SRB_DataOut) { |
899 | if (copy_from_user(to: p, from: sg_user[i], |
900 | n: sg_count[i])) { |
901 | dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n" )); |
902 | rcode = -EFAULT; |
903 | goto cleanup; |
904 | } |
905 | } |
906 | addr = dma_map_single(&dev->pdev->dev, p, |
907 | sg_count[i], data_dir); |
908 | |
909 | psg->sg[i].addr = cpu_to_le32(addr); |
910 | byte_count += sg_count[i]; |
911 | psg->sg[i].count = cpu_to_le32(sg_count[i]); |
912 | } |
913 | } |
914 | srbcmd->count = cpu_to_le32(byte_count); |
915 | if (user_srbcmd->sg.count) |
916 | psg->count = cpu_to_le32(sg_indx+1); |
917 | else |
918 | psg->count = 0; |
919 | status = aac_fib_send(ScsiPortCommand, context: srbfib, size: actual_fibsize, FsaNormal, wait: 1, reply: 1, NULL, NULL); |
920 | } |
921 | |
922 | if (status == -ERESTARTSYS) { |
923 | rcode = -ERESTARTSYS; |
924 | goto cleanup; |
925 | } |
926 | |
927 | if (status != 0) { |
928 | dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n" )); |
929 | rcode = -ENXIO; |
930 | goto cleanup; |
931 | } |
932 | |
933 | if (flags & SRB_DataIn) { |
934 | for(i = 0 ; i <= sg_indx; i++){ |
935 | if (copy_to_user(to: sg_user[i], from: sg_list[i], n: sg_count[i])) { |
936 | dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n" )); |
937 | rcode = -EFAULT; |
938 | goto cleanup; |
939 | |
940 | } |
941 | } |
942 | } |
943 | |
944 | user_reply = arg + fibsize; |
945 | if (is_native_device) { |
946 | struct aac_hba_resp *err = |
947 | &((struct aac_native_hba *)srbfib->hw_fib_va)->resp.err; |
948 | struct aac_srb_reply reply; |
949 | |
950 | memset(&reply, 0, sizeof(reply)); |
951 | reply.status = ST_OK; |
952 | if (srbfib->flags & FIB_CONTEXT_FLAG_FASTRESP) { |
953 | /* fast response */ |
954 | reply.srb_status = SRB_STATUS_SUCCESS; |
955 | reply.scsi_status = 0; |
956 | reply.data_xfer_length = byte_count; |
957 | reply.sense_data_size = 0; |
958 | memset(reply.sense_data, 0, AAC_SENSE_BUFFERSIZE); |
959 | } else { |
960 | reply.srb_status = err->service_response; |
961 | reply.scsi_status = err->status; |
962 | reply.data_xfer_length = byte_count - |
963 | le32_to_cpu(err->residual_count); |
964 | reply.sense_data_size = err->sense_response_data_len; |
965 | memcpy(reply.sense_data, err->sense_response_buf, |
966 | AAC_SENSE_BUFFERSIZE); |
967 | } |
968 | if (copy_to_user(to: user_reply, from: &reply, |
969 | n: sizeof(struct aac_srb_reply))) { |
970 | dprintk((KERN_DEBUG"aacraid: Copy to user failed\n" )); |
971 | rcode = -EFAULT; |
972 | goto cleanup; |
973 | } |
974 | } else { |
975 | struct aac_srb_reply *reply; |
976 | |
977 | reply = (struct aac_srb_reply *) fib_data(srbfib); |
978 | if (copy_to_user(to: user_reply, from: reply, |
979 | n: sizeof(struct aac_srb_reply))) { |
980 | dprintk((KERN_DEBUG"aacraid: Copy to user failed\n" )); |
981 | rcode = -EFAULT; |
982 | goto cleanup; |
983 | } |
984 | } |
985 | |
986 | cleanup: |
987 | kfree(objp: user_srbcmd); |
988 | if (rcode != -ERESTARTSYS) { |
989 | for (i = 0; i <= sg_indx; i++) |
990 | kfree(objp: sg_list[i]); |
991 | aac_fib_complete(context: srbfib); |
992 | aac_fib_free(context: srbfib); |
993 | } |
994 | |
995 | return rcode; |
996 | } |
997 | |
998 | struct aac_pci_info { |
999 | u32 bus; |
1000 | u32 slot; |
1001 | }; |
1002 | |
1003 | |
1004 | static int aac_get_pci_info(struct aac_dev* dev, void __user *arg) |
1005 | { |
1006 | struct aac_pci_info pci_info; |
1007 | |
1008 | pci_info.bus = dev->pdev->bus->number; |
1009 | pci_info.slot = PCI_SLOT(dev->pdev->devfn); |
1010 | |
1011 | if (copy_to_user(to: arg, from: &pci_info, n: sizeof(struct aac_pci_info))) { |
1012 | dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n" )); |
1013 | return -EFAULT; |
1014 | } |
1015 | return 0; |
1016 | } |
1017 | |
1018 | static int aac_get_hba_info(struct aac_dev *dev, void __user *arg) |
1019 | { |
1020 | struct aac_hba_info hbainfo; |
1021 | |
1022 | memset(&hbainfo, 0, sizeof(hbainfo)); |
1023 | hbainfo.adapter_number = (u8) dev->id; |
1024 | hbainfo.system_io_bus_number = dev->pdev->bus->number; |
1025 | hbainfo.device_number = (dev->pdev->devfn >> 3); |
1026 | hbainfo.function_number = (dev->pdev->devfn & 0x0007); |
1027 | |
1028 | hbainfo.vendor_id = dev->pdev->vendor; |
1029 | hbainfo.device_id = dev->pdev->device; |
1030 | hbainfo.sub_vendor_id = dev->pdev->subsystem_vendor; |
1031 | hbainfo.sub_system_id = dev->pdev->subsystem_device; |
1032 | |
1033 | if (copy_to_user(to: arg, from: &hbainfo, n: sizeof(struct aac_hba_info))) { |
1034 | dprintk((KERN_DEBUG "aacraid: Could not copy hba info\n" )); |
1035 | return -EFAULT; |
1036 | } |
1037 | |
1038 | return 0; |
1039 | } |
1040 | |
1041 | struct aac_reset_iop { |
1042 | u8 reset_type; |
1043 | }; |
1044 | |
1045 | static int aac_send_reset_adapter(struct aac_dev *dev, void __user *arg) |
1046 | { |
1047 | struct aac_reset_iop reset; |
1048 | int retval; |
1049 | |
1050 | if (copy_from_user(to: (void *)&reset, from: arg, n: sizeof(struct aac_reset_iop))) |
1051 | return -EFAULT; |
1052 | |
1053 | dev->adapter_shutdown = 1; |
1054 | |
1055 | mutex_unlock(lock: &dev->ioctl_mutex); |
1056 | retval = aac_reset_adapter(dev, forced: 0, reset_type: reset.reset_type); |
1057 | mutex_lock(&dev->ioctl_mutex); |
1058 | |
1059 | return retval; |
1060 | } |
1061 | |
1062 | int aac_do_ioctl(struct aac_dev *dev, unsigned int cmd, void __user *arg) |
1063 | { |
1064 | int status; |
1065 | |
1066 | mutex_lock(&dev->ioctl_mutex); |
1067 | |
1068 | if (dev->adapter_shutdown) { |
1069 | status = -EACCES; |
1070 | goto cleanup; |
1071 | } |
1072 | |
1073 | /* |
1074 | * HBA gets first crack |
1075 | */ |
1076 | |
1077 | status = aac_dev_ioctl(dev, cmd, arg); |
1078 | if (status != -ENOTTY) |
1079 | goto cleanup; |
1080 | |
1081 | switch (cmd) { |
1082 | case FSACTL_MINIPORT_REV_CHECK: |
1083 | status = check_revision(dev, arg); |
1084 | break; |
1085 | case FSACTL_SEND_LARGE_FIB: |
1086 | case FSACTL_SENDFIB: |
1087 | status = ioctl_send_fib(dev, arg); |
1088 | break; |
1089 | case FSACTL_OPEN_GET_ADAPTER_FIB: |
1090 | status = open_getadapter_fib(dev, arg); |
1091 | break; |
1092 | case FSACTL_GET_NEXT_ADAPTER_FIB: |
1093 | status = next_getadapter_fib(dev, arg); |
1094 | break; |
1095 | case FSACTL_CLOSE_GET_ADAPTER_FIB: |
1096 | status = close_getadapter_fib(dev, arg); |
1097 | break; |
1098 | case FSACTL_SEND_RAW_SRB: |
1099 | status = aac_send_raw_srb(dev,arg); |
1100 | break; |
1101 | case FSACTL_GET_PCI_INFO: |
1102 | status = aac_get_pci_info(dev,arg); |
1103 | break; |
1104 | case FSACTL_GET_HBA_INFO: |
1105 | status = aac_get_hba_info(dev, arg); |
1106 | break; |
1107 | case FSACTL_RESET_IOP: |
1108 | status = aac_send_reset_adapter(dev, arg); |
1109 | break; |
1110 | |
1111 | default: |
1112 | status = -ENOTTY; |
1113 | break; |
1114 | } |
1115 | |
1116 | cleanup: |
1117 | mutex_unlock(lock: &dev->ioctl_mutex); |
1118 | |
1119 | return status; |
1120 | } |
1121 | |
1122 | |