1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Marvell UMI driver |
4 | * |
5 | * Copyright 2011 Marvell. <jyli@marvell.com> |
6 | */ |
7 | |
8 | #include <linux/kernel.h> |
9 | #include <linux/module.h> |
10 | #include <linux/moduleparam.h> |
11 | #include <linux/init.h> |
12 | #include <linux/device.h> |
13 | #include <linux/pci.h> |
14 | #include <linux/list.h> |
15 | #include <linux/spinlock.h> |
16 | #include <linux/interrupt.h> |
17 | #include <linux/delay.h> |
18 | #include <linux/ktime.h> |
19 | #include <linux/blkdev.h> |
20 | #include <linux/io.h> |
21 | #include <scsi/scsi.h> |
22 | #include <scsi/scsi_cmnd.h> |
23 | #include <scsi/scsi_device.h> |
24 | #include <scsi/scsi_host.h> |
25 | #include <scsi/scsi_transport.h> |
26 | #include <scsi/scsi_eh.h> |
27 | #include <linux/uaccess.h> |
28 | #include <linux/kthread.h> |
29 | |
30 | #include "mvumi.h" |
31 | |
32 | MODULE_LICENSE("GPL" ); |
33 | MODULE_AUTHOR("jyli@marvell.com" ); |
34 | MODULE_DESCRIPTION("Marvell UMI Driver" ); |
35 | |
36 | static const struct pci_device_id mvumi_pci_table[] = { |
37 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9143) }, |
38 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9580) }, |
39 | { 0 } |
40 | }; |
41 | |
42 | MODULE_DEVICE_TABLE(pci, mvumi_pci_table); |
43 | |
44 | static void tag_init(struct mvumi_tag *st, unsigned short size) |
45 | { |
46 | unsigned short i; |
47 | BUG_ON(size != st->size); |
48 | st->top = size; |
49 | for (i = 0; i < size; i++) |
50 | st->stack[i] = size - 1 - i; |
51 | } |
52 | |
53 | static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st) |
54 | { |
55 | BUG_ON(st->top <= 0); |
56 | return st->stack[--st->top]; |
57 | } |
58 | |
59 | static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st, |
60 | unsigned short tag) |
61 | { |
62 | BUG_ON(st->top >= st->size); |
63 | st->stack[st->top++] = tag; |
64 | } |
65 | |
66 | static bool tag_is_empty(struct mvumi_tag *st) |
67 | { |
68 | if (st->top == 0) |
69 | return true; |
70 | else |
71 | return false; |
72 | } |
73 | |
74 | static void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array) |
75 | { |
76 | int i; |
77 | |
78 | for (i = 0; i < MAX_BASE_ADDRESS; i++) |
79 | if ((pci_resource_flags(dev, i) & IORESOURCE_MEM) && |
80 | addr_array[i]) |
81 | pci_iounmap(dev, addr_array[i]); |
82 | } |
83 | |
84 | static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array) |
85 | { |
86 | int i; |
87 | |
88 | for (i = 0; i < MAX_BASE_ADDRESS; i++) { |
89 | if (pci_resource_flags(dev, i) & IORESOURCE_MEM) { |
90 | addr_array[i] = pci_iomap(dev, bar: i, max: 0); |
91 | if (!addr_array[i]) { |
92 | dev_err(&dev->dev, "failed to map Bar[%d]\n" , |
93 | i); |
94 | mvumi_unmap_pci_addr(dev, addr_array); |
95 | return -ENOMEM; |
96 | } |
97 | } else |
98 | addr_array[i] = NULL; |
99 | |
100 | dev_dbg(&dev->dev, "Bar %d : %p.\n" , i, addr_array[i]); |
101 | } |
102 | |
103 | return 0; |
104 | } |
105 | |
106 | static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba, |
107 | enum resource_type type, unsigned int size) |
108 | { |
109 | struct mvumi_res *res = kzalloc(size: sizeof(*res), GFP_ATOMIC); |
110 | |
111 | if (!res) { |
112 | dev_err(&mhba->pdev->dev, |
113 | "Failed to allocate memory for resource manager.\n" ); |
114 | return NULL; |
115 | } |
116 | |
117 | switch (type) { |
118 | case RESOURCE_CACHED_MEMORY: |
119 | res->virt_addr = kzalloc(size, GFP_ATOMIC); |
120 | if (!res->virt_addr) { |
121 | dev_err(&mhba->pdev->dev, |
122 | "unable to allocate memory,size = %d.\n" , size); |
123 | kfree(objp: res); |
124 | return NULL; |
125 | } |
126 | break; |
127 | |
128 | case RESOURCE_UNCACHED_MEMORY: |
129 | size = round_up(size, 8); |
130 | res->virt_addr = dma_alloc_coherent(dev: &mhba->pdev->dev, size, |
131 | dma_handle: &res->bus_addr, |
132 | GFP_KERNEL); |
133 | if (!res->virt_addr) { |
134 | dev_err(&mhba->pdev->dev, |
135 | "unable to allocate consistent mem," |
136 | "size = %d.\n" , size); |
137 | kfree(objp: res); |
138 | return NULL; |
139 | } |
140 | break; |
141 | |
142 | default: |
143 | dev_err(&mhba->pdev->dev, "unknown resource type %d.\n" , type); |
144 | kfree(objp: res); |
145 | return NULL; |
146 | } |
147 | |
148 | res->type = type; |
149 | res->size = size; |
150 | INIT_LIST_HEAD(list: &res->entry); |
151 | list_add_tail(new: &res->entry, head: &mhba->res_list); |
152 | |
153 | return res; |
154 | } |
155 | |
156 | static void mvumi_release_mem_resource(struct mvumi_hba *mhba) |
157 | { |
158 | struct mvumi_res *res, *tmp; |
159 | |
160 | list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) { |
161 | switch (res->type) { |
162 | case RESOURCE_UNCACHED_MEMORY: |
163 | dma_free_coherent(dev: &mhba->pdev->dev, size: res->size, |
164 | cpu_addr: res->virt_addr, dma_handle: res->bus_addr); |
165 | break; |
166 | case RESOURCE_CACHED_MEMORY: |
167 | kfree(objp: res->virt_addr); |
168 | break; |
169 | default: |
170 | dev_err(&mhba->pdev->dev, |
171 | "unknown resource type %d\n" , res->type); |
172 | break; |
173 | } |
174 | list_del(entry: &res->entry); |
175 | kfree(objp: res); |
176 | } |
177 | mhba->fw_flag &= ~MVUMI_FW_ALLOC; |
178 | } |
179 | |
180 | /** |
181 | * mvumi_make_sgl - Prepares SGL |
182 | * @mhba: Adapter soft state |
183 | * @scmd: SCSI command from the mid-layer |
184 | * @sgl_p: SGL to be filled in |
185 | * @sg_count: return the number of SG elements |
186 | * |
187 | * If successful, this function returns 0. otherwise, it returns -1. |
188 | */ |
189 | static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd, |
190 | void *sgl_p, unsigned char *sg_count) |
191 | { |
192 | struct scatterlist *sg; |
193 | struct mvumi_sgl *m_sg = (struct mvumi_sgl *) sgl_p; |
194 | unsigned int i; |
195 | unsigned int sgnum = scsi_sg_count(cmd: scmd); |
196 | dma_addr_t busaddr; |
197 | |
198 | *sg_count = dma_map_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum, |
199 | scmd->sc_data_direction); |
200 | if (*sg_count > mhba->max_sge) { |
201 | dev_err(&mhba->pdev->dev, |
202 | "sg count[0x%x] is bigger than max sg[0x%x].\n" , |
203 | *sg_count, mhba->max_sge); |
204 | dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum, |
205 | scmd->sc_data_direction); |
206 | return -1; |
207 | } |
208 | scsi_for_each_sg(scmd, sg, *sg_count, i) { |
209 | busaddr = sg_dma_address(sg); |
210 | m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr)); |
211 | m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr)); |
212 | m_sg->flags = 0; |
213 | sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(sg))); |
214 | if ((i + 1) == *sg_count) |
215 | m_sg->flags |= 1U << mhba->eot_flag; |
216 | |
217 | sgd_inc(mhba, m_sg); |
218 | } |
219 | |
220 | return 0; |
221 | } |
222 | |
223 | static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, |
224 | unsigned int size) |
225 | { |
226 | struct mvumi_sgl *m_sg; |
227 | void *virt_addr; |
228 | dma_addr_t phy_addr; |
229 | |
230 | if (size == 0) |
231 | return 0; |
232 | |
233 | virt_addr = dma_alloc_coherent(dev: &mhba->pdev->dev, size, dma_handle: &phy_addr, |
234 | GFP_KERNEL); |
235 | if (!virt_addr) |
236 | return -1; |
237 | |
238 | m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0]; |
239 | cmd->frame->sg_counts = 1; |
240 | cmd->data_buf = virt_addr; |
241 | |
242 | m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr)); |
243 | m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr)); |
244 | m_sg->flags = 1U << mhba->eot_flag; |
245 | sgd_setsz(mhba, m_sg, cpu_to_le32(size)); |
246 | |
247 | return 0; |
248 | } |
249 | |
250 | static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba, |
251 | unsigned int buf_size) |
252 | { |
253 | struct mvumi_cmd *cmd; |
254 | |
255 | cmd = kzalloc(size: sizeof(*cmd), GFP_KERNEL); |
256 | if (!cmd) { |
257 | dev_err(&mhba->pdev->dev, "failed to create a internal cmd\n" ); |
258 | return NULL; |
259 | } |
260 | INIT_LIST_HEAD(list: &cmd->queue_pointer); |
261 | |
262 | cmd->frame = dma_alloc_coherent(dev: &mhba->pdev->dev, size: mhba->ib_max_size, |
263 | dma_handle: &cmd->frame_phys, GFP_KERNEL); |
264 | if (!cmd->frame) { |
265 | dev_err(&mhba->pdev->dev, "failed to allocate memory for FW" |
266 | " frame,size = %d.\n" , mhba->ib_max_size); |
267 | kfree(objp: cmd); |
268 | return NULL; |
269 | } |
270 | |
271 | if (buf_size) { |
272 | if (mvumi_internal_cmd_sgl(mhba, cmd, size: buf_size)) { |
273 | dev_err(&mhba->pdev->dev, "failed to allocate memory" |
274 | " for internal frame\n" ); |
275 | dma_free_coherent(dev: &mhba->pdev->dev, size: mhba->ib_max_size, |
276 | cpu_addr: cmd->frame, dma_handle: cmd->frame_phys); |
277 | kfree(objp: cmd); |
278 | return NULL; |
279 | } |
280 | } else |
281 | cmd->frame->sg_counts = 0; |
282 | |
283 | return cmd; |
284 | } |
285 | |
286 | static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba, |
287 | struct mvumi_cmd *cmd) |
288 | { |
289 | struct mvumi_sgl *m_sg; |
290 | unsigned int size; |
291 | dma_addr_t phy_addr; |
292 | |
293 | if (cmd && cmd->frame) { |
294 | if (cmd->frame->sg_counts) { |
295 | m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0]; |
296 | sgd_getsz(mhba, m_sg, size); |
297 | |
298 | phy_addr = (dma_addr_t) m_sg->baseaddr_l | |
299 | (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16); |
300 | |
301 | dma_free_coherent(dev: &mhba->pdev->dev, size, cpu_addr: cmd->data_buf, |
302 | dma_handle: phy_addr); |
303 | } |
304 | dma_free_coherent(dev: &mhba->pdev->dev, size: mhba->ib_max_size, |
305 | cpu_addr: cmd->frame, dma_handle: cmd->frame_phys); |
306 | kfree(objp: cmd); |
307 | } |
308 | } |
309 | |
310 | /** |
311 | * mvumi_get_cmd - Get a command from the free pool |
312 | * @mhba: Adapter soft state |
313 | * |
314 | * Returns a free command from the pool |
315 | */ |
316 | static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba) |
317 | { |
318 | struct mvumi_cmd *cmd = NULL; |
319 | |
320 | if (likely(!list_empty(&mhba->cmd_pool))) { |
321 | cmd = list_entry((&mhba->cmd_pool)->next, |
322 | struct mvumi_cmd, queue_pointer); |
323 | list_del_init(entry: &cmd->queue_pointer); |
324 | } else |
325 | dev_warn(&mhba->pdev->dev, "command pool is empty!\n" ); |
326 | |
327 | return cmd; |
328 | } |
329 | |
330 | /** |
331 | * mvumi_return_cmd - Return a cmd to free command pool |
332 | * @mhba: Adapter soft state |
333 | * @cmd: Command packet to be returned to free command pool |
334 | */ |
335 | static inline void mvumi_return_cmd(struct mvumi_hba *mhba, |
336 | struct mvumi_cmd *cmd) |
337 | { |
338 | cmd->scmd = NULL; |
339 | list_add_tail(new: &cmd->queue_pointer, head: &mhba->cmd_pool); |
340 | } |
341 | |
342 | /** |
343 | * mvumi_free_cmds - Free all the cmds in the free cmd pool |
344 | * @mhba: Adapter soft state |
345 | */ |
346 | static void mvumi_free_cmds(struct mvumi_hba *mhba) |
347 | { |
348 | struct mvumi_cmd *cmd; |
349 | |
350 | while (!list_empty(head: &mhba->cmd_pool)) { |
351 | cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd, |
352 | queue_pointer); |
353 | list_del(entry: &cmd->queue_pointer); |
354 | if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)) |
355 | kfree(objp: cmd->frame); |
356 | kfree(objp: cmd); |
357 | } |
358 | } |
359 | |
360 | /** |
361 | * mvumi_alloc_cmds - Allocates the command packets |
362 | * @mhba: Adapter soft state |
363 | * |
364 | */ |
365 | static int mvumi_alloc_cmds(struct mvumi_hba *mhba) |
366 | { |
367 | int i; |
368 | struct mvumi_cmd *cmd; |
369 | |
370 | for (i = 0; i < mhba->max_io; i++) { |
371 | cmd = kzalloc(size: sizeof(*cmd), GFP_KERNEL); |
372 | if (!cmd) |
373 | goto err_exit; |
374 | |
375 | INIT_LIST_HEAD(list: &cmd->queue_pointer); |
376 | list_add_tail(new: &cmd->queue_pointer, head: &mhba->cmd_pool); |
377 | if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { |
378 | cmd->frame = mhba->ib_frame + i * mhba->ib_max_size; |
379 | cmd->frame_phys = mhba->ib_frame_phys |
380 | + i * mhba->ib_max_size; |
381 | } else |
382 | cmd->frame = kzalloc(size: mhba->ib_max_size, GFP_KERNEL); |
383 | if (!cmd->frame) |
384 | goto err_exit; |
385 | } |
386 | return 0; |
387 | |
388 | err_exit: |
389 | dev_err(&mhba->pdev->dev, |
390 | "failed to allocate memory for cmd[0x%x].\n" , i); |
391 | while (!list_empty(head: &mhba->cmd_pool)) { |
392 | cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd, |
393 | queue_pointer); |
394 | list_del(entry: &cmd->queue_pointer); |
395 | if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)) |
396 | kfree(objp: cmd->frame); |
397 | kfree(objp: cmd); |
398 | } |
399 | return -ENOMEM; |
400 | } |
401 | |
402 | static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba) |
403 | { |
404 | unsigned int ib_rp_reg; |
405 | struct mvumi_hw_regs *regs = mhba->regs; |
406 | |
407 | ib_rp_reg = ioread32(mhba->regs->inb_read_pointer); |
408 | |
409 | if (unlikely(((ib_rp_reg & regs->cl_slot_num_mask) == |
410 | (mhba->ib_cur_slot & regs->cl_slot_num_mask)) && |
411 | ((ib_rp_reg & regs->cl_pointer_toggle) |
412 | != (mhba->ib_cur_slot & regs->cl_pointer_toggle)))) { |
413 | dev_warn(&mhba->pdev->dev, "no free slot to use.\n" ); |
414 | return 0; |
415 | } |
416 | if (atomic_read(v: &mhba->fw_outstanding) >= mhba->max_io) { |
417 | dev_warn(&mhba->pdev->dev, "firmware io overflow.\n" ); |
418 | return 0; |
419 | } else { |
420 | return mhba->max_io - atomic_read(v: &mhba->fw_outstanding); |
421 | } |
422 | } |
423 | |
424 | static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba) |
425 | { |
426 | unsigned int count; |
427 | if (atomic_read(v: &mhba->fw_outstanding) >= (mhba->max_io - 1)) |
428 | return 0; |
429 | count = ioread32(mhba->ib_shadow); |
430 | if (count == 0xffff) |
431 | return 0; |
432 | return count; |
433 | } |
434 | |
435 | static void mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry) |
436 | { |
437 | unsigned int cur_ib_entry; |
438 | |
439 | cur_ib_entry = mhba->ib_cur_slot & mhba->regs->cl_slot_num_mask; |
440 | cur_ib_entry++; |
441 | if (cur_ib_entry >= mhba->list_num_io) { |
442 | cur_ib_entry -= mhba->list_num_io; |
443 | mhba->ib_cur_slot ^= mhba->regs->cl_pointer_toggle; |
444 | } |
445 | mhba->ib_cur_slot &= ~mhba->regs->cl_slot_num_mask; |
446 | mhba->ib_cur_slot |= (cur_ib_entry & mhba->regs->cl_slot_num_mask); |
447 | if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { |
448 | *ib_entry = mhba->ib_list + cur_ib_entry * |
449 | sizeof(struct mvumi_dyn_list_entry); |
450 | } else { |
451 | *ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size; |
452 | } |
453 | atomic_inc(v: &mhba->fw_outstanding); |
454 | } |
455 | |
456 | static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba) |
457 | { |
458 | iowrite32(0xffff, mhba->ib_shadow); |
459 | iowrite32(mhba->ib_cur_slot, mhba->regs->inb_write_pointer); |
460 | } |
461 | |
462 | static char mvumi_check_ob_frame(struct mvumi_hba *mhba, |
463 | unsigned int cur_obf, struct mvumi_rsp_frame *p_outb_frame) |
464 | { |
465 | unsigned short tag, request_id; |
466 | |
467 | udelay(1); |
468 | p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size; |
469 | request_id = p_outb_frame->request_id; |
470 | tag = p_outb_frame->tag; |
471 | if (tag > mhba->tag_pool.size) { |
472 | dev_err(&mhba->pdev->dev, "ob frame data error\n" ); |
473 | return -1; |
474 | } |
475 | if (mhba->tag_cmd[tag] == NULL) { |
476 | dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command\n" , tag); |
477 | return -1; |
478 | } else if (mhba->tag_cmd[tag]->request_id != request_id && |
479 | mhba->request_id_enabled) { |
480 | dev_err(&mhba->pdev->dev, "request ID from FW:0x%x," |
481 | "cmd request ID:0x%x\n" , request_id, |
482 | mhba->tag_cmd[tag]->request_id); |
483 | return -1; |
484 | } |
485 | |
486 | return 0; |
487 | } |
488 | |
489 | static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba, |
490 | unsigned int *cur_obf, unsigned int *assign_obf_end) |
491 | { |
492 | unsigned int ob_write, ob_write_shadow; |
493 | struct mvumi_hw_regs *regs = mhba->regs; |
494 | |
495 | do { |
496 | ob_write = ioread32(regs->outb_copy_pointer); |
497 | ob_write_shadow = ioread32(mhba->ob_shadow); |
498 | } while ((ob_write & regs->cl_slot_num_mask) != ob_write_shadow); |
499 | |
500 | *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask; |
501 | *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask; |
502 | |
503 | if ((ob_write & regs->cl_pointer_toggle) != |
504 | (mhba->ob_cur_slot & regs->cl_pointer_toggle)) { |
505 | *assign_obf_end += mhba->list_num_io; |
506 | } |
507 | return 0; |
508 | } |
509 | |
510 | static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba, |
511 | unsigned int *cur_obf, unsigned int *assign_obf_end) |
512 | { |
513 | unsigned int ob_write; |
514 | struct mvumi_hw_regs *regs = mhba->regs; |
515 | |
516 | ob_write = ioread32(regs->outb_read_pointer); |
517 | ob_write = ioread32(regs->outb_copy_pointer); |
518 | *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask; |
519 | *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask; |
520 | if (*assign_obf_end < *cur_obf) |
521 | *assign_obf_end += mhba->list_num_io; |
522 | else if (*assign_obf_end == *cur_obf) |
523 | return -1; |
524 | return 0; |
525 | } |
526 | |
527 | static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba) |
528 | { |
529 | unsigned int cur_obf, assign_obf_end, i; |
530 | struct mvumi_ob_data *ob_data; |
531 | struct mvumi_rsp_frame *p_outb_frame; |
532 | struct mvumi_hw_regs *regs = mhba->regs; |
533 | |
534 | if (mhba->instancet->check_ob_list(mhba, &cur_obf, &assign_obf_end)) |
535 | return; |
536 | |
537 | for (i = (assign_obf_end - cur_obf); i != 0; i--) { |
538 | cur_obf++; |
539 | if (cur_obf >= mhba->list_num_io) { |
540 | cur_obf -= mhba->list_num_io; |
541 | mhba->ob_cur_slot ^= regs->cl_pointer_toggle; |
542 | } |
543 | |
544 | p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size; |
545 | |
546 | /* Copy pointer may point to entry in outbound list |
547 | * before entry has valid data |
548 | */ |
549 | if (unlikely(p_outb_frame->tag > mhba->tag_pool.size || |
550 | mhba->tag_cmd[p_outb_frame->tag] == NULL || |
551 | p_outb_frame->request_id != |
552 | mhba->tag_cmd[p_outb_frame->tag]->request_id)) |
553 | if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame)) |
554 | continue; |
555 | |
556 | if (!list_empty(head: &mhba->ob_data_list)) { |
557 | ob_data = (struct mvumi_ob_data *) |
558 | list_first_entry(&mhba->ob_data_list, |
559 | struct mvumi_ob_data, list); |
560 | list_del_init(entry: &ob_data->list); |
561 | } else { |
562 | ob_data = NULL; |
563 | if (cur_obf == 0) { |
564 | cur_obf = mhba->list_num_io - 1; |
565 | mhba->ob_cur_slot ^= regs->cl_pointer_toggle; |
566 | } else |
567 | cur_obf -= 1; |
568 | break; |
569 | } |
570 | |
571 | memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size); |
572 | p_outb_frame->tag = 0xff; |
573 | |
574 | list_add_tail(new: &ob_data->list, head: &mhba->free_ob_list); |
575 | } |
576 | mhba->ob_cur_slot &= ~regs->cl_slot_num_mask; |
577 | mhba->ob_cur_slot |= (cur_obf & regs->cl_slot_num_mask); |
578 | iowrite32(mhba->ob_cur_slot, regs->outb_read_pointer); |
579 | } |
580 | |
581 | static void mvumi_reset(struct mvumi_hba *mhba) |
582 | { |
583 | struct mvumi_hw_regs *regs = mhba->regs; |
584 | |
585 | iowrite32(0, regs->enpointa_mask_reg); |
586 | if (ioread32(regs->arm_to_pciea_msg1) != HANDSHAKE_DONESTATE) |
587 | return; |
588 | |
589 | iowrite32(DRBL_SOFT_RESET, regs->pciea_to_arm_drbl_reg); |
590 | } |
591 | |
592 | static unsigned char mvumi_start(struct mvumi_hba *mhba); |
593 | |
594 | static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba) |
595 | { |
596 | mhba->fw_state = FW_STATE_ABORT; |
597 | mvumi_reset(mhba); |
598 | |
599 | if (mvumi_start(mhba)) |
600 | return FAILED; |
601 | else |
602 | return SUCCESS; |
603 | } |
604 | |
605 | static int mvumi_wait_for_fw(struct mvumi_hba *mhba) |
606 | { |
607 | struct mvumi_hw_regs *regs = mhba->regs; |
608 | u32 tmp; |
609 | unsigned long before; |
610 | before = jiffies; |
611 | |
612 | iowrite32(0, regs->enpointa_mask_reg); |
613 | tmp = ioread32(regs->arm_to_pciea_msg1); |
614 | while (tmp != HANDSHAKE_READYSTATE) { |
615 | iowrite32(DRBL_MU_RESET, regs->pciea_to_arm_drbl_reg); |
616 | if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) { |
617 | dev_err(&mhba->pdev->dev, |
618 | "FW reset failed [0x%x].\n" , tmp); |
619 | return FAILED; |
620 | } |
621 | |
622 | msleep(msecs: 500); |
623 | rmb(); |
624 | tmp = ioread32(regs->arm_to_pciea_msg1); |
625 | } |
626 | |
627 | return SUCCESS; |
628 | } |
629 | |
630 | static void mvumi_backup_bar_addr(struct mvumi_hba *mhba) |
631 | { |
632 | unsigned char i; |
633 | |
634 | for (i = 0; i < MAX_BASE_ADDRESS; i++) { |
635 | pci_read_config_dword(dev: mhba->pdev, where: 0x10 + i * 4, |
636 | val: &mhba->pci_base[i]); |
637 | } |
638 | } |
639 | |
640 | static void mvumi_restore_bar_addr(struct mvumi_hba *mhba) |
641 | { |
642 | unsigned char i; |
643 | |
644 | for (i = 0; i < MAX_BASE_ADDRESS; i++) { |
645 | if (mhba->pci_base[i]) |
646 | pci_write_config_dword(dev: mhba->pdev, where: 0x10 + i * 4, |
647 | val: mhba->pci_base[i]); |
648 | } |
649 | } |
650 | |
651 | static int mvumi_pci_set_master(struct pci_dev *pdev) |
652 | { |
653 | int ret = 0; |
654 | |
655 | pci_set_master(dev: pdev); |
656 | |
657 | if (IS_DMA64) { |
658 | if (dma_set_mask(dev: &pdev->dev, DMA_BIT_MASK(64))) |
659 | ret = dma_set_mask(dev: &pdev->dev, DMA_BIT_MASK(32)); |
660 | } else |
661 | ret = dma_set_mask(dev: &pdev->dev, DMA_BIT_MASK(32)); |
662 | |
663 | return ret; |
664 | } |
665 | |
666 | static int mvumi_reset_host_9580(struct mvumi_hba *mhba) |
667 | { |
668 | mhba->fw_state = FW_STATE_ABORT; |
669 | |
670 | iowrite32(0, mhba->regs->reset_enable); |
671 | iowrite32(0xf, mhba->regs->reset_request); |
672 | |
673 | iowrite32(0x10, mhba->regs->reset_enable); |
674 | iowrite32(0x10, mhba->regs->reset_request); |
675 | msleep(msecs: 100); |
676 | pci_disable_device(dev: mhba->pdev); |
677 | |
678 | if (pci_enable_device(dev: mhba->pdev)) { |
679 | dev_err(&mhba->pdev->dev, "enable device failed\n" ); |
680 | return FAILED; |
681 | } |
682 | if (mvumi_pci_set_master(pdev: mhba->pdev)) { |
683 | dev_err(&mhba->pdev->dev, "set master failed\n" ); |
684 | return FAILED; |
685 | } |
686 | mvumi_restore_bar_addr(mhba); |
687 | if (mvumi_wait_for_fw(mhba) == FAILED) |
688 | return FAILED; |
689 | |
690 | return mvumi_wait_for_outstanding(mhba); |
691 | } |
692 | |
693 | static int mvumi_reset_host_9143(struct mvumi_hba *mhba) |
694 | { |
695 | return mvumi_wait_for_outstanding(mhba); |
696 | } |
697 | |
698 | static int mvumi_host_reset(struct scsi_cmnd *scmd) |
699 | { |
700 | struct mvumi_hba *mhba; |
701 | |
702 | mhba = (struct mvumi_hba *) scmd->device->host->hostdata; |
703 | |
704 | scmd_printk(KERN_NOTICE, scmd, "RESET -%u cmd=%x retries=%x\n" , |
705 | scsi_cmd_to_rq(scmd)->tag, scmd->cmnd[0], scmd->retries); |
706 | |
707 | return mhba->instancet->reset_host(mhba); |
708 | } |
709 | |
710 | static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba, |
711 | struct mvumi_cmd *cmd) |
712 | { |
713 | unsigned long flags; |
714 | |
715 | cmd->cmd_status = REQ_STATUS_PENDING; |
716 | |
717 | if (atomic_read(v: &cmd->sync_cmd)) { |
718 | dev_err(&mhba->pdev->dev, |
719 | "last blocked cmd not finished, sync_cmd = %d\n" , |
720 | atomic_read(&cmd->sync_cmd)); |
721 | BUG_ON(1); |
722 | return -1; |
723 | } |
724 | atomic_inc(v: &cmd->sync_cmd); |
725 | spin_lock_irqsave(mhba->shost->host_lock, flags); |
726 | mhba->instancet->fire_cmd(mhba, cmd); |
727 | spin_unlock_irqrestore(lock: mhba->shost->host_lock, flags); |
728 | |
729 | wait_event_timeout(mhba->int_cmd_wait_q, |
730 | (cmd->cmd_status != REQ_STATUS_PENDING), |
731 | MVUMI_INTERNAL_CMD_WAIT_TIME * HZ); |
732 | |
733 | /* command timeout */ |
734 | if (atomic_read(v: &cmd->sync_cmd)) { |
735 | spin_lock_irqsave(mhba->shost->host_lock, flags); |
736 | atomic_dec(v: &cmd->sync_cmd); |
737 | if (mhba->tag_cmd[cmd->frame->tag]) { |
738 | mhba->tag_cmd[cmd->frame->tag] = NULL; |
739 | dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n" , |
740 | cmd->frame->tag); |
741 | tag_release_one(mhba, st: &mhba->tag_pool, tag: cmd->frame->tag); |
742 | } |
743 | if (!list_empty(head: &cmd->queue_pointer)) { |
744 | dev_warn(&mhba->pdev->dev, |
745 | "TIMEOUT:A internal command doesn't send!\n" ); |
746 | list_del_init(entry: &cmd->queue_pointer); |
747 | } else |
748 | atomic_dec(v: &mhba->fw_outstanding); |
749 | |
750 | spin_unlock_irqrestore(lock: mhba->shost->host_lock, flags); |
751 | } |
752 | return 0; |
753 | } |
754 | |
755 | static void mvumi_release_fw(struct mvumi_hba *mhba) |
756 | { |
757 | mvumi_free_cmds(mhba); |
758 | mvumi_release_mem_resource(mhba); |
759 | mvumi_unmap_pci_addr(dev: mhba->pdev, addr_array: mhba->base_addr); |
760 | dma_free_coherent(dev: &mhba->pdev->dev, HSP_MAX_SIZE, |
761 | cpu_addr: mhba->handshake_page, dma_handle: mhba->handshake_page_phys); |
762 | kfree(objp: mhba->regs); |
763 | pci_release_regions(mhba->pdev); |
764 | } |
765 | |
766 | static unsigned char mvumi_flush_cache(struct mvumi_hba *mhba) |
767 | { |
768 | struct mvumi_cmd *cmd; |
769 | struct mvumi_msg_frame *frame; |
770 | unsigned char device_id, retry = 0; |
771 | unsigned char bitcount = sizeof(unsigned char) * 8; |
772 | |
773 | for (device_id = 0; device_id < mhba->max_target_id; device_id++) { |
774 | if (!(mhba->target_map[device_id / bitcount] & |
775 | (1 << (device_id % bitcount)))) |
776 | continue; |
777 | get_cmd: cmd = mvumi_create_internal_cmd(mhba, buf_size: 0); |
778 | if (!cmd) { |
779 | if (retry++ >= 5) { |
780 | dev_err(&mhba->pdev->dev, "failed to get memory" |
781 | " for internal flush cache cmd for " |
782 | "device %d" , device_id); |
783 | retry = 0; |
784 | continue; |
785 | } else |
786 | goto get_cmd; |
787 | } |
788 | cmd->scmd = NULL; |
789 | cmd->cmd_status = REQ_STATUS_PENDING; |
790 | atomic_set(v: &cmd->sync_cmd, i: 0); |
791 | frame = cmd->frame; |
792 | frame->req_function = CL_FUN_SCSI_CMD; |
793 | frame->device_id = device_id; |
794 | frame->cmd_flag = CMD_FLAG_NON_DATA; |
795 | frame->data_transfer_length = 0; |
796 | frame->cdb_length = MAX_COMMAND_SIZE; |
797 | memset(frame->cdb, 0, MAX_COMMAND_SIZE); |
798 | frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC; |
799 | frame->cdb[1] = CDB_CORE_MODULE; |
800 | frame->cdb[2] = CDB_CORE_SHUTDOWN; |
801 | |
802 | mvumi_issue_blocked_cmd(mhba, cmd); |
803 | if (cmd->cmd_status != SAM_STAT_GOOD) { |
804 | dev_err(&mhba->pdev->dev, |
805 | "device %d flush cache failed, status=0x%x.\n" , |
806 | device_id, cmd->cmd_status); |
807 | } |
808 | |
809 | mvumi_delete_internal_cmd(mhba, cmd); |
810 | } |
811 | return 0; |
812 | } |
813 | |
814 | static unsigned char |
815 | mvumi_calculate_checksum(struct mvumi_hs_header *, |
816 | unsigned short len) |
817 | { |
818 | unsigned char *ptr; |
819 | unsigned char ret = 0, i; |
820 | |
821 | ptr = (unsigned char *) p_header->frame_content; |
822 | for (i = 0; i < len; i++) { |
823 | ret ^= *ptr; |
824 | ptr++; |
825 | } |
826 | |
827 | return ret; |
828 | } |
829 | |
830 | static void mvumi_hs_build_page(struct mvumi_hba *mhba, |
831 | struct mvumi_hs_header *) |
832 | { |
833 | struct mvumi_hs_page2 *hs_page2; |
834 | struct mvumi_hs_page4 *hs_page4; |
835 | struct mvumi_hs_page3 *hs_page3; |
836 | u64 time; |
837 | u64 local_time; |
838 | |
839 | switch (hs_header->page_code) { |
840 | case HS_PAGE_HOST_INFO: |
841 | hs_page2 = (struct mvumi_hs_page2 *) hs_header; |
842 | hs_header->frame_length = sizeof(*hs_page2) - 4; |
843 | memset(hs_header->frame_content, 0, hs_header->frame_length); |
844 | hs_page2->host_type = 3; /* 3 mean linux*/ |
845 | if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) |
846 | hs_page2->host_cap = 0x08;/* host dynamic source mode */ |
847 | hs_page2->host_ver.ver_major = VER_MAJOR; |
848 | hs_page2->host_ver.ver_minor = VER_MINOR; |
849 | hs_page2->host_ver.ver_oem = VER_OEM; |
850 | hs_page2->host_ver.ver_build = VER_BUILD; |
851 | hs_page2->system_io_bus = 0; |
852 | hs_page2->slot_number = 0; |
853 | hs_page2->intr_level = 0; |
854 | hs_page2->intr_vector = 0; |
855 | time = ktime_get_real_seconds(); |
856 | local_time = (time - (sys_tz.tz_minuteswest * 60)); |
857 | hs_page2->seconds_since1970 = local_time; |
858 | hs_header->checksum = mvumi_calculate_checksum(p_header: hs_header, |
859 | len: hs_header->frame_length); |
860 | break; |
861 | |
862 | case HS_PAGE_FIRM_CTL: |
863 | hs_page3 = (struct mvumi_hs_page3 *) hs_header; |
864 | hs_header->frame_length = sizeof(*hs_page3) - 4; |
865 | memset(hs_header->frame_content, 0, hs_header->frame_length); |
866 | hs_header->checksum = mvumi_calculate_checksum(p_header: hs_header, |
867 | len: hs_header->frame_length); |
868 | break; |
869 | |
870 | case HS_PAGE_CL_INFO: |
871 | hs_page4 = (struct mvumi_hs_page4 *) hs_header; |
872 | hs_header->frame_length = sizeof(*hs_page4) - 4; |
873 | memset(hs_header->frame_content, 0, hs_header->frame_length); |
874 | hs_page4->ib_baseaddr_l = lower_32_bits(mhba->ib_list_phys); |
875 | hs_page4->ib_baseaddr_h = upper_32_bits(mhba->ib_list_phys); |
876 | |
877 | hs_page4->ob_baseaddr_l = lower_32_bits(mhba->ob_list_phys); |
878 | hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys); |
879 | hs_page4->ib_entry_size = mhba->ib_max_size_setting; |
880 | hs_page4->ob_entry_size = mhba->ob_max_size_setting; |
881 | if (mhba->hba_capability |
882 | & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) { |
883 | hs_page4->ob_depth = find_first_bit(addr: (unsigned long *) |
884 | &mhba->list_num_io, |
885 | BITS_PER_LONG); |
886 | hs_page4->ib_depth = find_first_bit(addr: (unsigned long *) |
887 | &mhba->list_num_io, |
888 | BITS_PER_LONG); |
889 | } else { |
890 | hs_page4->ob_depth = (u8) mhba->list_num_io; |
891 | hs_page4->ib_depth = (u8) mhba->list_num_io; |
892 | } |
893 | hs_header->checksum = mvumi_calculate_checksum(p_header: hs_header, |
894 | len: hs_header->frame_length); |
895 | break; |
896 | |
897 | default: |
898 | dev_err(&mhba->pdev->dev, "cannot build page, code[0x%x]\n" , |
899 | hs_header->page_code); |
900 | break; |
901 | } |
902 | } |
903 | |
904 | /** |
905 | * mvumi_init_data - Initialize requested date for FW |
906 | * @mhba: Adapter soft state |
907 | */ |
908 | static int mvumi_init_data(struct mvumi_hba *mhba) |
909 | { |
910 | struct mvumi_ob_data *ob_pool; |
911 | struct mvumi_res *res_mgnt; |
912 | unsigned int tmp_size, offset, i; |
913 | void *virmem, *v; |
914 | dma_addr_t p; |
915 | |
916 | if (mhba->fw_flag & MVUMI_FW_ALLOC) |
917 | return 0; |
918 | |
919 | tmp_size = mhba->ib_max_size * mhba->max_io; |
920 | if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) |
921 | tmp_size += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io; |
922 | |
923 | tmp_size += 128 + mhba->ob_max_size * mhba->max_io; |
924 | tmp_size += 8 + sizeof(u32)*2 + 16; |
925 | |
926 | res_mgnt = mvumi_alloc_mem_resource(mhba, |
927 | type: RESOURCE_UNCACHED_MEMORY, size: tmp_size); |
928 | if (!res_mgnt) { |
929 | dev_err(&mhba->pdev->dev, |
930 | "failed to allocate memory for inbound list\n" ); |
931 | goto fail_alloc_dma_buf; |
932 | } |
933 | |
934 | p = res_mgnt->bus_addr; |
935 | v = res_mgnt->virt_addr; |
936 | /* ib_list */ |
937 | offset = round_up(p, 128) - p; |
938 | p += offset; |
939 | v += offset; |
940 | mhba->ib_list = v; |
941 | mhba->ib_list_phys = p; |
942 | if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { |
943 | v += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io; |
944 | p += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io; |
945 | mhba->ib_frame = v; |
946 | mhba->ib_frame_phys = p; |
947 | } |
948 | v += mhba->ib_max_size * mhba->max_io; |
949 | p += mhba->ib_max_size * mhba->max_io; |
950 | |
951 | /* ib shadow */ |
952 | offset = round_up(p, 8) - p; |
953 | p += offset; |
954 | v += offset; |
955 | mhba->ib_shadow = v; |
956 | mhba->ib_shadow_phys = p; |
957 | p += sizeof(u32)*2; |
958 | v += sizeof(u32)*2; |
959 | /* ob shadow */ |
960 | if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) { |
961 | offset = round_up(p, 8) - p; |
962 | p += offset; |
963 | v += offset; |
964 | mhba->ob_shadow = v; |
965 | mhba->ob_shadow_phys = p; |
966 | p += 8; |
967 | v += 8; |
968 | } else { |
969 | offset = round_up(p, 4) - p; |
970 | p += offset; |
971 | v += offset; |
972 | mhba->ob_shadow = v; |
973 | mhba->ob_shadow_phys = p; |
974 | p += 4; |
975 | v += 4; |
976 | } |
977 | |
978 | /* ob list */ |
979 | offset = round_up(p, 128) - p; |
980 | p += offset; |
981 | v += offset; |
982 | |
983 | mhba->ob_list = v; |
984 | mhba->ob_list_phys = p; |
985 | |
986 | /* ob data pool */ |
987 | tmp_size = mhba->max_io * (mhba->ob_max_size + sizeof(*ob_pool)); |
988 | tmp_size = round_up(tmp_size, 8); |
989 | |
990 | res_mgnt = mvumi_alloc_mem_resource(mhba, |
991 | type: RESOURCE_CACHED_MEMORY, size: tmp_size); |
992 | if (!res_mgnt) { |
993 | dev_err(&mhba->pdev->dev, |
994 | "failed to allocate memory for outbound data buffer\n" ); |
995 | goto fail_alloc_dma_buf; |
996 | } |
997 | virmem = res_mgnt->virt_addr; |
998 | |
999 | for (i = mhba->max_io; i != 0; i--) { |
1000 | ob_pool = (struct mvumi_ob_data *) virmem; |
1001 | list_add_tail(new: &ob_pool->list, head: &mhba->ob_data_list); |
1002 | virmem += mhba->ob_max_size + sizeof(*ob_pool); |
1003 | } |
1004 | |
1005 | tmp_size = sizeof(unsigned short) * mhba->max_io + |
1006 | sizeof(struct mvumi_cmd *) * mhba->max_io; |
1007 | tmp_size += round_up(mhba->max_target_id, sizeof(unsigned char) * 8) / |
1008 | (sizeof(unsigned char) * 8); |
1009 | |
1010 | res_mgnt = mvumi_alloc_mem_resource(mhba, |
1011 | type: RESOURCE_CACHED_MEMORY, size: tmp_size); |
1012 | if (!res_mgnt) { |
1013 | dev_err(&mhba->pdev->dev, |
1014 | "failed to allocate memory for tag and target map\n" ); |
1015 | goto fail_alloc_dma_buf; |
1016 | } |
1017 | |
1018 | virmem = res_mgnt->virt_addr; |
1019 | mhba->tag_pool.stack = virmem; |
1020 | mhba->tag_pool.size = mhba->max_io; |
1021 | tag_init(st: &mhba->tag_pool, size: mhba->max_io); |
1022 | virmem += sizeof(unsigned short) * mhba->max_io; |
1023 | |
1024 | mhba->tag_cmd = virmem; |
1025 | virmem += sizeof(struct mvumi_cmd *) * mhba->max_io; |
1026 | |
1027 | mhba->target_map = virmem; |
1028 | |
1029 | mhba->fw_flag |= MVUMI_FW_ALLOC; |
1030 | return 0; |
1031 | |
1032 | fail_alloc_dma_buf: |
1033 | mvumi_release_mem_resource(mhba); |
1034 | return -1; |
1035 | } |
1036 | |
1037 | static int mvumi_hs_process_page(struct mvumi_hba *mhba, |
1038 | struct mvumi_hs_header *) |
1039 | { |
1040 | struct mvumi_hs_page1 *hs_page1; |
1041 | unsigned char page_checksum; |
1042 | |
1043 | page_checksum = mvumi_calculate_checksum(p_header: hs_header, |
1044 | len: hs_header->frame_length); |
1045 | if (page_checksum != hs_header->checksum) { |
1046 | dev_err(&mhba->pdev->dev, "checksum error\n" ); |
1047 | return -1; |
1048 | } |
1049 | |
1050 | switch (hs_header->page_code) { |
1051 | case HS_PAGE_FIRM_CAP: |
1052 | hs_page1 = (struct mvumi_hs_page1 *) hs_header; |
1053 | |
1054 | mhba->max_io = hs_page1->max_io_support; |
1055 | mhba->list_num_io = hs_page1->cl_inout_list_depth; |
1056 | mhba->max_transfer_size = hs_page1->max_transfer_size; |
1057 | mhba->max_target_id = hs_page1->max_devices_support; |
1058 | mhba->hba_capability = hs_page1->capability; |
1059 | mhba->ib_max_size_setting = hs_page1->cl_in_max_entry_size; |
1060 | mhba->ib_max_size = (1 << hs_page1->cl_in_max_entry_size) << 2; |
1061 | |
1062 | mhba->ob_max_size_setting = hs_page1->cl_out_max_entry_size; |
1063 | mhba->ob_max_size = (1 << hs_page1->cl_out_max_entry_size) << 2; |
1064 | |
1065 | dev_dbg(&mhba->pdev->dev, "FW version:%d\n" , |
1066 | hs_page1->fw_ver.ver_build); |
1067 | |
1068 | if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG) |
1069 | mhba->eot_flag = 22; |
1070 | else |
1071 | mhba->eot_flag = 27; |
1072 | if (mhba->hba_capability & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) |
1073 | mhba->list_num_io = 1 << hs_page1->cl_inout_list_depth; |
1074 | break; |
1075 | default: |
1076 | dev_err(&mhba->pdev->dev, "handshake: page code error\n" ); |
1077 | return -1; |
1078 | } |
1079 | return 0; |
1080 | } |
1081 | |
1082 | /** |
1083 | * mvumi_handshake - Move the FW to READY state |
1084 | * @mhba: Adapter soft state |
1085 | * |
1086 | * During the initialization, FW passes can potentially be in any one of |
1087 | * several possible states. If the FW in operational, waiting-for-handshake |
1088 | * states, driver must take steps to bring it to ready state. Otherwise, it |
1089 | * has to wait for the ready state. |
1090 | */ |
1091 | static int mvumi_handshake(struct mvumi_hba *mhba) |
1092 | { |
1093 | unsigned int hs_state, tmp, hs_fun; |
1094 | struct mvumi_hs_header *; |
1095 | struct mvumi_hw_regs *regs = mhba->regs; |
1096 | |
1097 | if (mhba->fw_state == FW_STATE_STARTING) |
1098 | hs_state = HS_S_START; |
1099 | else { |
1100 | tmp = ioread32(regs->arm_to_pciea_msg0); |
1101 | hs_state = HS_GET_STATE(tmp); |
1102 | dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n" , hs_state); |
1103 | if (HS_GET_STATUS(tmp) != HS_STATUS_OK) { |
1104 | mhba->fw_state = FW_STATE_STARTING; |
1105 | return -1; |
1106 | } |
1107 | } |
1108 | |
1109 | hs_fun = 0; |
1110 | switch (hs_state) { |
1111 | case HS_S_START: |
1112 | mhba->fw_state = FW_STATE_HANDSHAKING; |
1113 | HS_SET_STATUS(hs_fun, HS_STATUS_OK); |
1114 | HS_SET_STATE(hs_fun, HS_S_RESET); |
1115 | iowrite32(HANDSHAKE_SIGNATURE, regs->pciea_to_arm_msg1); |
1116 | iowrite32(hs_fun, regs->pciea_to_arm_msg0); |
1117 | iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg); |
1118 | break; |
1119 | |
1120 | case HS_S_RESET: |
1121 | iowrite32(lower_32_bits(mhba->handshake_page_phys), |
1122 | regs->pciea_to_arm_msg1); |
1123 | iowrite32(upper_32_bits(mhba->handshake_page_phys), |
1124 | regs->arm_to_pciea_msg1); |
1125 | HS_SET_STATUS(hs_fun, HS_STATUS_OK); |
1126 | HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR); |
1127 | iowrite32(hs_fun, regs->pciea_to_arm_msg0); |
1128 | iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg); |
1129 | break; |
1130 | |
1131 | case HS_S_PAGE_ADDR: |
1132 | case HS_S_QUERY_PAGE: |
1133 | case HS_S_SEND_PAGE: |
1134 | hs_header = (struct mvumi_hs_header *) mhba->handshake_page; |
1135 | if (hs_header->page_code == HS_PAGE_FIRM_CAP) { |
1136 | mhba->hba_total_pages = |
1137 | ((struct mvumi_hs_page1 *) hs_header)->total_pages; |
1138 | |
1139 | if (mhba->hba_total_pages == 0) |
1140 | mhba->hba_total_pages = HS_PAGE_TOTAL-1; |
1141 | } |
1142 | |
1143 | if (hs_state == HS_S_QUERY_PAGE) { |
1144 | if (mvumi_hs_process_page(mhba, hs_header)) { |
1145 | HS_SET_STATE(hs_fun, HS_S_ABORT); |
1146 | return -1; |
1147 | } |
1148 | if (mvumi_init_data(mhba)) { |
1149 | HS_SET_STATE(hs_fun, HS_S_ABORT); |
1150 | return -1; |
1151 | } |
1152 | } else if (hs_state == HS_S_PAGE_ADDR) { |
1153 | hs_header->page_code = 0; |
1154 | mhba->hba_total_pages = HS_PAGE_TOTAL-1; |
1155 | } |
1156 | |
1157 | if ((hs_header->page_code + 1) <= mhba->hba_total_pages) { |
1158 | hs_header->page_code++; |
1159 | if (hs_header->page_code != HS_PAGE_FIRM_CAP) { |
1160 | mvumi_hs_build_page(mhba, hs_header); |
1161 | HS_SET_STATE(hs_fun, HS_S_SEND_PAGE); |
1162 | } else |
1163 | HS_SET_STATE(hs_fun, HS_S_QUERY_PAGE); |
1164 | } else |
1165 | HS_SET_STATE(hs_fun, HS_S_END); |
1166 | |
1167 | HS_SET_STATUS(hs_fun, HS_STATUS_OK); |
1168 | iowrite32(hs_fun, regs->pciea_to_arm_msg0); |
1169 | iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg); |
1170 | break; |
1171 | |
1172 | case HS_S_END: |
1173 | /* Set communication list ISR */ |
1174 | tmp = ioread32(regs->enpointa_mask_reg); |
1175 | tmp |= regs->int_comaout | regs->int_comaerr; |
1176 | iowrite32(tmp, regs->enpointa_mask_reg); |
1177 | iowrite32(mhba->list_num_io, mhba->ib_shadow); |
1178 | /* Set InBound List Available count shadow */ |
1179 | iowrite32(lower_32_bits(mhba->ib_shadow_phys), |
1180 | regs->inb_aval_count_basel); |
1181 | iowrite32(upper_32_bits(mhba->ib_shadow_phys), |
1182 | regs->inb_aval_count_baseh); |
1183 | |
1184 | if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) { |
1185 | /* Set OutBound List Available count shadow */ |
1186 | iowrite32((mhba->list_num_io-1) | |
1187 | regs->cl_pointer_toggle, |
1188 | mhba->ob_shadow); |
1189 | iowrite32(lower_32_bits(mhba->ob_shadow_phys), |
1190 | regs->outb_copy_basel); |
1191 | iowrite32(upper_32_bits(mhba->ob_shadow_phys), |
1192 | regs->outb_copy_baseh); |
1193 | } |
1194 | |
1195 | mhba->ib_cur_slot = (mhba->list_num_io - 1) | |
1196 | regs->cl_pointer_toggle; |
1197 | mhba->ob_cur_slot = (mhba->list_num_io - 1) | |
1198 | regs->cl_pointer_toggle; |
1199 | mhba->fw_state = FW_STATE_STARTED; |
1200 | |
1201 | break; |
1202 | default: |
1203 | dev_err(&mhba->pdev->dev, "unknown handshake state [0x%x].\n" , |
1204 | hs_state); |
1205 | return -1; |
1206 | } |
1207 | return 0; |
1208 | } |
1209 | |
1210 | static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba) |
1211 | { |
1212 | unsigned int isr_status; |
1213 | unsigned long before; |
1214 | |
1215 | before = jiffies; |
1216 | mvumi_handshake(mhba); |
1217 | do { |
1218 | isr_status = mhba->instancet->read_fw_status_reg(mhba); |
1219 | |
1220 | if (mhba->fw_state == FW_STATE_STARTED) |
1221 | return 0; |
1222 | if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) { |
1223 | dev_err(&mhba->pdev->dev, |
1224 | "no handshake response at state 0x%x.\n" , |
1225 | mhba->fw_state); |
1226 | dev_err(&mhba->pdev->dev, |
1227 | "isr : global=0x%x,status=0x%x.\n" , |
1228 | mhba->global_isr, isr_status); |
1229 | return -1; |
1230 | } |
1231 | rmb(); |
1232 | usleep_range(min: 1000, max: 2000); |
1233 | } while (!(isr_status & DRBL_HANDSHAKE_ISR)); |
1234 | |
1235 | return 0; |
1236 | } |
1237 | |
1238 | static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba) |
1239 | { |
1240 | unsigned int tmp; |
1241 | unsigned long before; |
1242 | |
1243 | before = jiffies; |
1244 | tmp = ioread32(mhba->regs->arm_to_pciea_msg1); |
1245 | while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) { |
1246 | if (tmp != HANDSHAKE_READYSTATE) |
1247 | iowrite32(DRBL_MU_RESET, |
1248 | mhba->regs->pciea_to_arm_drbl_reg); |
1249 | if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) { |
1250 | dev_err(&mhba->pdev->dev, |
1251 | "invalid signature [0x%x].\n" , tmp); |
1252 | return -1; |
1253 | } |
1254 | usleep_range(min: 1000, max: 2000); |
1255 | rmb(); |
1256 | tmp = ioread32(mhba->regs->arm_to_pciea_msg1); |
1257 | } |
1258 | |
1259 | mhba->fw_state = FW_STATE_STARTING; |
1260 | dev_dbg(&mhba->pdev->dev, "start firmware handshake...\n" ); |
1261 | do { |
1262 | if (mvumi_handshake_event(mhba)) { |
1263 | dev_err(&mhba->pdev->dev, |
1264 | "handshake failed at state 0x%x.\n" , |
1265 | mhba->fw_state); |
1266 | return -1; |
1267 | } |
1268 | } while (mhba->fw_state != FW_STATE_STARTED); |
1269 | |
1270 | dev_dbg(&mhba->pdev->dev, "firmware handshake done\n" ); |
1271 | |
1272 | return 0; |
1273 | } |
1274 | |
1275 | static unsigned char mvumi_start(struct mvumi_hba *mhba) |
1276 | { |
1277 | unsigned int tmp; |
1278 | struct mvumi_hw_regs *regs = mhba->regs; |
1279 | |
1280 | /* clear Door bell */ |
1281 | tmp = ioread32(regs->arm_to_pciea_drbl_reg); |
1282 | iowrite32(tmp, regs->arm_to_pciea_drbl_reg); |
1283 | |
1284 | iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg); |
1285 | tmp = ioread32(regs->enpointa_mask_reg) | regs->int_dl_cpu2pciea; |
1286 | iowrite32(tmp, regs->enpointa_mask_reg); |
1287 | msleep(msecs: 100); |
1288 | if (mvumi_check_handshake(mhba)) |
1289 | return -1; |
1290 | |
1291 | return 0; |
1292 | } |
1293 | |
1294 | /** |
1295 | * mvumi_complete_cmd - Completes a command |
1296 | * @mhba: Adapter soft state |
1297 | * @cmd: Command to be completed |
1298 | * @ob_frame: Command response |
1299 | */ |
1300 | static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, |
1301 | struct mvumi_rsp_frame *ob_frame) |
1302 | { |
1303 | struct scsi_cmnd *scmd = cmd->scmd; |
1304 | |
1305 | mvumi_priv(cmd: cmd->scmd)->cmd_priv = NULL; |
1306 | scmd->result = ob_frame->req_status; |
1307 | |
1308 | switch (ob_frame->req_status) { |
1309 | case SAM_STAT_GOOD: |
1310 | scmd->result |= DID_OK << 16; |
1311 | break; |
1312 | case SAM_STAT_BUSY: |
1313 | scmd->result |= DID_BUS_BUSY << 16; |
1314 | break; |
1315 | case SAM_STAT_CHECK_CONDITION: |
1316 | scmd->result |= (DID_OK << 16); |
1317 | if (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) { |
1318 | memcpy(cmd->scmd->sense_buffer, ob_frame->payload, |
1319 | sizeof(struct mvumi_sense_data)); |
1320 | } |
1321 | break; |
1322 | default: |
1323 | scmd->result |= (DID_ABORT << 16); |
1324 | break; |
1325 | } |
1326 | |
1327 | if (scsi_bufflen(cmd: scmd)) |
1328 | dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), |
1329 | scsi_sg_count(scmd), |
1330 | scmd->sc_data_direction); |
1331 | scsi_done(cmd: scmd); |
1332 | mvumi_return_cmd(mhba, cmd); |
1333 | } |
1334 | |
1335 | static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba, |
1336 | struct mvumi_cmd *cmd, |
1337 | struct mvumi_rsp_frame *ob_frame) |
1338 | { |
1339 | if (atomic_read(v: &cmd->sync_cmd)) { |
1340 | cmd->cmd_status = ob_frame->req_status; |
1341 | |
1342 | if ((ob_frame->req_status == SAM_STAT_CHECK_CONDITION) && |
1343 | (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) && |
1344 | cmd->data_buf) { |
1345 | memcpy(cmd->data_buf, ob_frame->payload, |
1346 | sizeof(struct mvumi_sense_data)); |
1347 | } |
1348 | atomic_dec(v: &cmd->sync_cmd); |
1349 | wake_up(&mhba->int_cmd_wait_q); |
1350 | } |
1351 | } |
1352 | |
1353 | static void mvumi_show_event(struct mvumi_hba *mhba, |
1354 | struct mvumi_driver_event *ptr) |
1355 | { |
1356 | unsigned int i; |
1357 | |
1358 | dev_warn(&mhba->pdev->dev, |
1359 | "Event[0x%x] id[0x%x] severity[0x%x] device id[0x%x]\n" , |
1360 | ptr->sequence_no, ptr->event_id, ptr->severity, ptr->device_id); |
1361 | if (ptr->param_count) { |
1362 | printk(KERN_WARNING "Event param(len 0x%x): " , |
1363 | ptr->param_count); |
1364 | for (i = 0; i < ptr->param_count; i++) |
1365 | printk(KERN_WARNING "0x%x " , ptr->params[i]); |
1366 | |
1367 | printk(KERN_WARNING "\n" ); |
1368 | } |
1369 | |
1370 | if (ptr->sense_data_length) { |
1371 | printk(KERN_WARNING "Event sense data(len 0x%x): " , |
1372 | ptr->sense_data_length); |
1373 | for (i = 0; i < ptr->sense_data_length; i++) |
1374 | printk(KERN_WARNING "0x%x " , ptr->sense_data[i]); |
1375 | printk(KERN_WARNING "\n" ); |
1376 | } |
1377 | } |
1378 | |
1379 | static int mvumi_handle_hotplug(struct mvumi_hba *mhba, u16 devid, int status) |
1380 | { |
1381 | struct scsi_device *sdev; |
1382 | int ret = -1; |
1383 | |
1384 | if (status == DEVICE_OFFLINE) { |
1385 | sdev = scsi_device_lookup(mhba->shost, 0, devid, 0); |
1386 | if (sdev) { |
1387 | dev_dbg(&mhba->pdev->dev, "remove disk %d-%d-%d.\n" , 0, |
1388 | sdev->id, 0); |
1389 | scsi_remove_device(sdev); |
1390 | scsi_device_put(sdev); |
1391 | ret = 0; |
1392 | } else |
1393 | dev_err(&mhba->pdev->dev, " no disk[%d] to remove\n" , |
1394 | devid); |
1395 | } else if (status == DEVICE_ONLINE) { |
1396 | sdev = scsi_device_lookup(mhba->shost, 0, devid, 0); |
1397 | if (!sdev) { |
1398 | scsi_add_device(host: mhba->shost, channel: 0, target: devid, lun: 0); |
1399 | dev_dbg(&mhba->pdev->dev, " add disk %d-%d-%d.\n" , 0, |
1400 | devid, 0); |
1401 | ret = 0; |
1402 | } else { |
1403 | dev_err(&mhba->pdev->dev, " don't add disk %d-%d-%d.\n" , |
1404 | 0, devid, 0); |
1405 | scsi_device_put(sdev); |
1406 | } |
1407 | } |
1408 | return ret; |
1409 | } |
1410 | |
1411 | static u64 mvumi_inquiry(struct mvumi_hba *mhba, |
1412 | unsigned int id, struct mvumi_cmd *cmd) |
1413 | { |
1414 | struct mvumi_msg_frame *frame; |
1415 | u64 wwid = 0; |
1416 | int cmd_alloc = 0; |
1417 | int data_buf_len = 64; |
1418 | |
1419 | if (!cmd) { |
1420 | cmd = mvumi_create_internal_cmd(mhba, buf_size: data_buf_len); |
1421 | if (cmd) |
1422 | cmd_alloc = 1; |
1423 | else |
1424 | return 0; |
1425 | } else { |
1426 | memset(cmd->data_buf, 0, data_buf_len); |
1427 | } |
1428 | cmd->scmd = NULL; |
1429 | cmd->cmd_status = REQ_STATUS_PENDING; |
1430 | atomic_set(v: &cmd->sync_cmd, i: 0); |
1431 | frame = cmd->frame; |
1432 | frame->device_id = (u16) id; |
1433 | frame->cmd_flag = CMD_FLAG_DATA_IN; |
1434 | frame->req_function = CL_FUN_SCSI_CMD; |
1435 | frame->cdb_length = 6; |
1436 | frame->data_transfer_length = MVUMI_INQUIRY_LENGTH; |
1437 | memset(frame->cdb, 0, frame->cdb_length); |
1438 | frame->cdb[0] = INQUIRY; |
1439 | frame->cdb[4] = frame->data_transfer_length; |
1440 | |
1441 | mvumi_issue_blocked_cmd(mhba, cmd); |
1442 | |
1443 | if (cmd->cmd_status == SAM_STAT_GOOD) { |
1444 | if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) |
1445 | wwid = id + 1; |
1446 | else |
1447 | memcpy((void *)&wwid, |
1448 | (cmd->data_buf + MVUMI_INQUIRY_UUID_OFF), |
1449 | MVUMI_INQUIRY_UUID_LEN); |
1450 | dev_dbg(&mhba->pdev->dev, |
1451 | "inquiry device(0:%d:0) wwid(%llx)\n" , id, wwid); |
1452 | } else { |
1453 | wwid = 0; |
1454 | } |
1455 | if (cmd_alloc) |
1456 | mvumi_delete_internal_cmd(mhba, cmd); |
1457 | |
1458 | return wwid; |
1459 | } |
1460 | |
1461 | static void mvumi_detach_devices(struct mvumi_hba *mhba) |
1462 | { |
1463 | struct mvumi_device *mv_dev = NULL , *dev_next; |
1464 | struct scsi_device *sdev = NULL; |
1465 | |
1466 | mutex_lock(&mhba->device_lock); |
1467 | |
1468 | /* detach Hard Disk */ |
1469 | list_for_each_entry_safe(mv_dev, dev_next, |
1470 | &mhba->shost_dev_list, list) { |
1471 | mvumi_handle_hotplug(mhba, devid: mv_dev->id, DEVICE_OFFLINE); |
1472 | list_del_init(entry: &mv_dev->list); |
1473 | dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n" , |
1474 | mv_dev->id, mv_dev->wwid); |
1475 | kfree(objp: mv_dev); |
1476 | } |
1477 | list_for_each_entry_safe(mv_dev, dev_next, &mhba->mhba_dev_list, list) { |
1478 | list_del_init(entry: &mv_dev->list); |
1479 | dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n" , |
1480 | mv_dev->id, mv_dev->wwid); |
1481 | kfree(objp: mv_dev); |
1482 | } |
1483 | |
1484 | /* detach virtual device */ |
1485 | if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) |
1486 | sdev = scsi_device_lookup(mhba->shost, 0, |
1487 | mhba->max_target_id - 1, 0); |
1488 | |
1489 | if (sdev) { |
1490 | scsi_remove_device(sdev); |
1491 | scsi_device_put(sdev); |
1492 | } |
1493 | |
1494 | mutex_unlock(lock: &mhba->device_lock); |
1495 | } |
1496 | |
1497 | static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id) |
1498 | { |
1499 | struct scsi_device *sdev; |
1500 | |
1501 | sdev = scsi_device_lookup(mhba->shost, 0, id, 0); |
1502 | if (sdev) { |
1503 | scsi_rescan_device(sdev); |
1504 | scsi_device_put(sdev); |
1505 | } |
1506 | } |
1507 | |
1508 | static int mvumi_match_devices(struct mvumi_hba *mhba, int id, u64 wwid) |
1509 | { |
1510 | struct mvumi_device *mv_dev = NULL; |
1511 | |
1512 | list_for_each_entry(mv_dev, &mhba->shost_dev_list, list) { |
1513 | if (mv_dev->wwid == wwid) { |
1514 | if (mv_dev->id != id) { |
1515 | dev_err(&mhba->pdev->dev, |
1516 | "%s has same wwid[%llx] ," |
1517 | " but different id[%d %d]\n" , |
1518 | __func__, mv_dev->wwid, mv_dev->id, id); |
1519 | return -1; |
1520 | } else { |
1521 | if (mhba->pdev->device == |
1522 | PCI_DEVICE_ID_MARVELL_MV9143) |
1523 | mvumi_rescan_devices(mhba, id); |
1524 | return 1; |
1525 | } |
1526 | } |
1527 | } |
1528 | return 0; |
1529 | } |
1530 | |
1531 | static void mvumi_remove_devices(struct mvumi_hba *mhba, int id) |
1532 | { |
1533 | struct mvumi_device *mv_dev = NULL, *dev_next; |
1534 | |
1535 | list_for_each_entry_safe(mv_dev, dev_next, |
1536 | &mhba->shost_dev_list, list) { |
1537 | if (mv_dev->id == id) { |
1538 | dev_dbg(&mhba->pdev->dev, |
1539 | "detach device(0:%d:0) wwid(%llx) from HOST\n" , |
1540 | mv_dev->id, mv_dev->wwid); |
1541 | mvumi_handle_hotplug(mhba, devid: mv_dev->id, DEVICE_OFFLINE); |
1542 | list_del_init(entry: &mv_dev->list); |
1543 | kfree(objp: mv_dev); |
1544 | } |
1545 | } |
1546 | } |
1547 | |
1548 | static int mvumi_probe_devices(struct mvumi_hba *mhba) |
1549 | { |
1550 | int id, maxid; |
1551 | u64 wwid = 0; |
1552 | struct mvumi_device *mv_dev = NULL; |
1553 | struct mvumi_cmd *cmd = NULL; |
1554 | int found = 0; |
1555 | |
1556 | cmd = mvumi_create_internal_cmd(mhba, buf_size: 64); |
1557 | if (!cmd) |
1558 | return -1; |
1559 | |
1560 | if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) |
1561 | maxid = mhba->max_target_id; |
1562 | else |
1563 | maxid = mhba->max_target_id - 1; |
1564 | |
1565 | for (id = 0; id < maxid; id++) { |
1566 | wwid = mvumi_inquiry(mhba, id, cmd); |
1567 | if (!wwid) { |
1568 | /* device no response, remove it */ |
1569 | mvumi_remove_devices(mhba, id); |
1570 | } else { |
1571 | /* device response, add it */ |
1572 | found = mvumi_match_devices(mhba, id, wwid); |
1573 | if (!found) { |
1574 | mvumi_remove_devices(mhba, id); |
1575 | mv_dev = kzalloc(size: sizeof(struct mvumi_device), |
1576 | GFP_KERNEL); |
1577 | if (!mv_dev) { |
1578 | dev_err(&mhba->pdev->dev, |
1579 | "%s alloc mv_dev failed\n" , |
1580 | __func__); |
1581 | continue; |
1582 | } |
1583 | mv_dev->id = id; |
1584 | mv_dev->wwid = wwid; |
1585 | mv_dev->sdev = NULL; |
1586 | INIT_LIST_HEAD(list: &mv_dev->list); |
1587 | list_add_tail(new: &mv_dev->list, |
1588 | head: &mhba->mhba_dev_list); |
1589 | dev_dbg(&mhba->pdev->dev, |
1590 | "probe a new device(0:%d:0)" |
1591 | " wwid(%llx)\n" , id, mv_dev->wwid); |
1592 | } else if (found == -1) |
1593 | return -1; |
1594 | else |
1595 | continue; |
1596 | } |
1597 | } |
1598 | |
1599 | if (cmd) |
1600 | mvumi_delete_internal_cmd(mhba, cmd); |
1601 | |
1602 | return 0; |
1603 | } |
1604 | |
1605 | static int mvumi_rescan_bus(void *data) |
1606 | { |
1607 | int ret = 0; |
1608 | struct mvumi_hba *mhba = (struct mvumi_hba *) data; |
1609 | struct mvumi_device *mv_dev = NULL , *dev_next; |
1610 | |
1611 | while (!kthread_should_stop()) { |
1612 | |
1613 | set_current_state(TASK_INTERRUPTIBLE); |
1614 | if (!atomic_read(v: &mhba->pnp_count)) |
1615 | schedule(); |
1616 | msleep(msecs: 1000); |
1617 | atomic_set(v: &mhba->pnp_count, i: 0); |
1618 | __set_current_state(TASK_RUNNING); |
1619 | |
1620 | mutex_lock(&mhba->device_lock); |
1621 | ret = mvumi_probe_devices(mhba); |
1622 | if (!ret) { |
1623 | list_for_each_entry_safe(mv_dev, dev_next, |
1624 | &mhba->mhba_dev_list, list) { |
1625 | if (mvumi_handle_hotplug(mhba, devid: mv_dev->id, |
1626 | DEVICE_ONLINE)) { |
1627 | dev_err(&mhba->pdev->dev, |
1628 | "%s add device(0:%d:0) failed" |
1629 | "wwid(%llx) has exist\n" , |
1630 | __func__, |
1631 | mv_dev->id, mv_dev->wwid); |
1632 | list_del_init(entry: &mv_dev->list); |
1633 | kfree(objp: mv_dev); |
1634 | } else { |
1635 | list_move_tail(list: &mv_dev->list, |
1636 | head: &mhba->shost_dev_list); |
1637 | } |
1638 | } |
1639 | } |
1640 | mutex_unlock(lock: &mhba->device_lock); |
1641 | } |
1642 | return 0; |
1643 | } |
1644 | |
1645 | static void mvumi_proc_msg(struct mvumi_hba *mhba, |
1646 | struct mvumi_hotplug_event *param) |
1647 | { |
1648 | u16 size = param->size; |
1649 | const unsigned long *ar_bitmap; |
1650 | const unsigned long *re_bitmap; |
1651 | int index; |
1652 | |
1653 | if (mhba->fw_flag & MVUMI_FW_ATTACH) { |
1654 | index = -1; |
1655 | ar_bitmap = (const unsigned long *) param->bitmap; |
1656 | re_bitmap = (const unsigned long *) ¶m->bitmap[size >> 3]; |
1657 | |
1658 | mutex_lock(&mhba->sas_discovery_mutex); |
1659 | do { |
1660 | index = find_next_zero_bit(addr: ar_bitmap, size, offset: index + 1); |
1661 | if (index >= size) |
1662 | break; |
1663 | mvumi_handle_hotplug(mhba, devid: index, DEVICE_ONLINE); |
1664 | } while (1); |
1665 | |
1666 | index = -1; |
1667 | do { |
1668 | index = find_next_zero_bit(addr: re_bitmap, size, offset: index + 1); |
1669 | if (index >= size) |
1670 | break; |
1671 | mvumi_handle_hotplug(mhba, devid: index, DEVICE_OFFLINE); |
1672 | } while (1); |
1673 | mutex_unlock(lock: &mhba->sas_discovery_mutex); |
1674 | } |
1675 | } |
1676 | |
1677 | static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer) |
1678 | { |
1679 | if (msg == APICDB1_EVENT_GETEVENT) { |
1680 | int i, count; |
1681 | struct mvumi_driver_event *param = NULL; |
1682 | struct mvumi_event_req *er = buffer; |
1683 | count = er->count; |
1684 | if (count > MAX_EVENTS_RETURNED) { |
1685 | dev_err(&mhba->pdev->dev, "event count[0x%x] is bigger" |
1686 | " than max event count[0x%x].\n" , |
1687 | count, MAX_EVENTS_RETURNED); |
1688 | return; |
1689 | } |
1690 | for (i = 0; i < count; i++) { |
1691 | param = &er->events[i]; |
1692 | mvumi_show_event(mhba, ptr: param); |
1693 | } |
1694 | } else if (msg == APICDB1_HOST_GETEVENT) { |
1695 | mvumi_proc_msg(mhba, param: buffer); |
1696 | } |
1697 | } |
1698 | |
1699 | static int mvumi_get_event(struct mvumi_hba *mhba, unsigned char msg) |
1700 | { |
1701 | struct mvumi_cmd *cmd; |
1702 | struct mvumi_msg_frame *frame; |
1703 | |
1704 | cmd = mvumi_create_internal_cmd(mhba, buf_size: 512); |
1705 | if (!cmd) |
1706 | return -1; |
1707 | cmd->scmd = NULL; |
1708 | cmd->cmd_status = REQ_STATUS_PENDING; |
1709 | atomic_set(v: &cmd->sync_cmd, i: 0); |
1710 | frame = cmd->frame; |
1711 | frame->device_id = 0; |
1712 | frame->cmd_flag = CMD_FLAG_DATA_IN; |
1713 | frame->req_function = CL_FUN_SCSI_CMD; |
1714 | frame->cdb_length = MAX_COMMAND_SIZE; |
1715 | frame->data_transfer_length = sizeof(struct mvumi_event_req); |
1716 | memset(frame->cdb, 0, MAX_COMMAND_SIZE); |
1717 | frame->cdb[0] = APICDB0_EVENT; |
1718 | frame->cdb[1] = msg; |
1719 | mvumi_issue_blocked_cmd(mhba, cmd); |
1720 | |
1721 | if (cmd->cmd_status != SAM_STAT_GOOD) |
1722 | dev_err(&mhba->pdev->dev, "get event failed, status=0x%x.\n" , |
1723 | cmd->cmd_status); |
1724 | else |
1725 | mvumi_notification(mhba, msg: cmd->frame->cdb[1], buffer: cmd->data_buf); |
1726 | |
1727 | mvumi_delete_internal_cmd(mhba, cmd); |
1728 | return 0; |
1729 | } |
1730 | |
1731 | static void mvumi_scan_events(struct work_struct *work) |
1732 | { |
1733 | struct mvumi_events_wq *mu_ev = |
1734 | container_of(work, struct mvumi_events_wq, work_q); |
1735 | |
1736 | mvumi_get_event(mhba: mu_ev->mhba, msg: mu_ev->event); |
1737 | kfree(objp: mu_ev); |
1738 | } |
1739 | |
1740 | static void mvumi_launch_events(struct mvumi_hba *mhba, u32 isr_status) |
1741 | { |
1742 | struct mvumi_events_wq *mu_ev; |
1743 | |
1744 | while (isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) { |
1745 | if (isr_status & DRBL_BUS_CHANGE) { |
1746 | atomic_inc(v: &mhba->pnp_count); |
1747 | wake_up_process(tsk: mhba->dm_thread); |
1748 | isr_status &= ~(DRBL_BUS_CHANGE); |
1749 | continue; |
1750 | } |
1751 | |
1752 | mu_ev = kzalloc(size: sizeof(*mu_ev), GFP_ATOMIC); |
1753 | if (mu_ev) { |
1754 | INIT_WORK(&mu_ev->work_q, mvumi_scan_events); |
1755 | mu_ev->mhba = mhba; |
1756 | mu_ev->event = APICDB1_EVENT_GETEVENT; |
1757 | isr_status &= ~(DRBL_EVENT_NOTIFY); |
1758 | mu_ev->param = NULL; |
1759 | schedule_work(work: &mu_ev->work_q); |
1760 | } |
1761 | } |
1762 | } |
1763 | |
1764 | static void mvumi_handle_clob(struct mvumi_hba *mhba) |
1765 | { |
1766 | struct mvumi_rsp_frame *ob_frame; |
1767 | struct mvumi_cmd *cmd; |
1768 | struct mvumi_ob_data *pool; |
1769 | |
1770 | while (!list_empty(head: &mhba->free_ob_list)) { |
1771 | pool = list_first_entry(&mhba->free_ob_list, |
1772 | struct mvumi_ob_data, list); |
1773 | list_del_init(entry: &pool->list); |
1774 | list_add_tail(new: &pool->list, head: &mhba->ob_data_list); |
1775 | |
1776 | ob_frame = (struct mvumi_rsp_frame *) &pool->data[0]; |
1777 | cmd = mhba->tag_cmd[ob_frame->tag]; |
1778 | |
1779 | atomic_dec(v: &mhba->fw_outstanding); |
1780 | mhba->tag_cmd[ob_frame->tag] = NULL; |
1781 | tag_release_one(mhba, st: &mhba->tag_pool, tag: ob_frame->tag); |
1782 | if (cmd->scmd) |
1783 | mvumi_complete_cmd(mhba, cmd, ob_frame); |
1784 | else |
1785 | mvumi_complete_internal_cmd(mhba, cmd, ob_frame); |
1786 | } |
1787 | mhba->instancet->fire_cmd(mhba, NULL); |
1788 | } |
1789 | |
1790 | static irqreturn_t mvumi_isr_handler(int irq, void *devp) |
1791 | { |
1792 | struct mvumi_hba *mhba = (struct mvumi_hba *) devp; |
1793 | unsigned long flags; |
1794 | |
1795 | spin_lock_irqsave(mhba->shost->host_lock, flags); |
1796 | if (unlikely(mhba->instancet->clear_intr(mhba) || !mhba->global_isr)) { |
1797 | spin_unlock_irqrestore(lock: mhba->shost->host_lock, flags); |
1798 | return IRQ_NONE; |
1799 | } |
1800 | |
1801 | if (mhba->global_isr & mhba->regs->int_dl_cpu2pciea) { |
1802 | if (mhba->isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) |
1803 | mvumi_launch_events(mhba, isr_status: mhba->isr_status); |
1804 | if (mhba->isr_status & DRBL_HANDSHAKE_ISR) { |
1805 | dev_warn(&mhba->pdev->dev, "enter handshake again!\n" ); |
1806 | mvumi_handshake(mhba); |
1807 | } |
1808 | |
1809 | } |
1810 | |
1811 | if (mhba->global_isr & mhba->regs->int_comaout) |
1812 | mvumi_receive_ob_list_entry(mhba); |
1813 | |
1814 | mhba->global_isr = 0; |
1815 | mhba->isr_status = 0; |
1816 | if (mhba->fw_state == FW_STATE_STARTED) |
1817 | mvumi_handle_clob(mhba); |
1818 | spin_unlock_irqrestore(lock: mhba->shost->host_lock, flags); |
1819 | return IRQ_HANDLED; |
1820 | } |
1821 | |
1822 | static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba, |
1823 | struct mvumi_cmd *cmd) |
1824 | { |
1825 | void *ib_entry; |
1826 | struct mvumi_msg_frame *ib_frame; |
1827 | unsigned int frame_len; |
1828 | |
1829 | ib_frame = cmd->frame; |
1830 | if (unlikely(mhba->fw_state != FW_STATE_STARTED)) { |
1831 | dev_dbg(&mhba->pdev->dev, "firmware not ready.\n" ); |
1832 | return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE; |
1833 | } |
1834 | if (tag_is_empty(st: &mhba->tag_pool)) { |
1835 | dev_dbg(&mhba->pdev->dev, "no free tag.\n" ); |
1836 | return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE; |
1837 | } |
1838 | mvumi_get_ib_list_entry(mhba, ib_entry: &ib_entry); |
1839 | |
1840 | cmd->frame->tag = tag_get_one(mhba, st: &mhba->tag_pool); |
1841 | cmd->frame->request_id = mhba->io_seq++; |
1842 | cmd->request_id = cmd->frame->request_id; |
1843 | mhba->tag_cmd[cmd->frame->tag] = cmd; |
1844 | frame_len = sizeof(*ib_frame) + |
1845 | ib_frame->sg_counts * sizeof(struct mvumi_sgl); |
1846 | if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { |
1847 | struct mvumi_dyn_list_entry *dle; |
1848 | dle = ib_entry; |
1849 | dle->src_low_addr = |
1850 | cpu_to_le32(lower_32_bits(cmd->frame_phys)); |
1851 | dle->src_high_addr = |
1852 | cpu_to_le32(upper_32_bits(cmd->frame_phys)); |
1853 | dle->if_length = (frame_len >> 2) & 0xFFF; |
1854 | } else { |
1855 | memcpy(ib_entry, ib_frame, frame_len); |
1856 | } |
1857 | return MV_QUEUE_COMMAND_RESULT_SENT; |
1858 | } |
1859 | |
1860 | static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd) |
1861 | { |
1862 | unsigned short num_of_cl_sent = 0; |
1863 | unsigned int count; |
1864 | enum mvumi_qc_result result; |
1865 | |
1866 | if (cmd) |
1867 | list_add_tail(new: &cmd->queue_pointer, head: &mhba->waiting_req_list); |
1868 | count = mhba->instancet->check_ib_list(mhba); |
1869 | if (list_empty(head: &mhba->waiting_req_list) || !count) |
1870 | return; |
1871 | |
1872 | do { |
1873 | cmd = list_first_entry(&mhba->waiting_req_list, |
1874 | struct mvumi_cmd, queue_pointer); |
1875 | list_del_init(entry: &cmd->queue_pointer); |
1876 | result = mvumi_send_command(mhba, cmd); |
1877 | switch (result) { |
1878 | case MV_QUEUE_COMMAND_RESULT_SENT: |
1879 | num_of_cl_sent++; |
1880 | break; |
1881 | case MV_QUEUE_COMMAND_RESULT_NO_RESOURCE: |
1882 | list_add(new: &cmd->queue_pointer, head: &mhba->waiting_req_list); |
1883 | if (num_of_cl_sent > 0) |
1884 | mvumi_send_ib_list_entry(mhba); |
1885 | |
1886 | return; |
1887 | } |
1888 | } while (!list_empty(head: &mhba->waiting_req_list) && count--); |
1889 | |
1890 | if (num_of_cl_sent > 0) |
1891 | mvumi_send_ib_list_entry(mhba); |
1892 | } |
1893 | |
1894 | /** |
1895 | * mvumi_enable_intr - Enables interrupts |
1896 | * @mhba: Adapter soft state |
1897 | */ |
1898 | static void mvumi_enable_intr(struct mvumi_hba *mhba) |
1899 | { |
1900 | unsigned int mask; |
1901 | struct mvumi_hw_regs *regs = mhba->regs; |
1902 | |
1903 | iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg); |
1904 | mask = ioread32(regs->enpointa_mask_reg); |
1905 | mask |= regs->int_dl_cpu2pciea | regs->int_comaout | regs->int_comaerr; |
1906 | iowrite32(mask, regs->enpointa_mask_reg); |
1907 | } |
1908 | |
1909 | /** |
1910 | * mvumi_disable_intr -Disables interrupt |
1911 | * @mhba: Adapter soft state |
1912 | */ |
1913 | static void mvumi_disable_intr(struct mvumi_hba *mhba) |
1914 | { |
1915 | unsigned int mask; |
1916 | struct mvumi_hw_regs *regs = mhba->regs; |
1917 | |
1918 | iowrite32(0, regs->arm_to_pciea_mask_reg); |
1919 | mask = ioread32(regs->enpointa_mask_reg); |
1920 | mask &= ~(regs->int_dl_cpu2pciea | regs->int_comaout | |
1921 | regs->int_comaerr); |
1922 | iowrite32(mask, regs->enpointa_mask_reg); |
1923 | } |
1924 | |
1925 | static int mvumi_clear_intr(void *extend) |
1926 | { |
1927 | struct mvumi_hba *mhba = (struct mvumi_hba *) extend; |
1928 | unsigned int status, isr_status = 0, tmp = 0; |
1929 | struct mvumi_hw_regs *regs = mhba->regs; |
1930 | |
1931 | status = ioread32(regs->main_int_cause_reg); |
1932 | if (!(status & regs->int_mu) || status == 0xFFFFFFFF) |
1933 | return 1; |
1934 | if (unlikely(status & regs->int_comaerr)) { |
1935 | tmp = ioread32(regs->outb_isr_cause); |
1936 | if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) { |
1937 | if (tmp & regs->clic_out_err) { |
1938 | iowrite32(tmp & regs->clic_out_err, |
1939 | regs->outb_isr_cause); |
1940 | } |
1941 | } else { |
1942 | if (tmp & (regs->clic_in_err | regs->clic_out_err)) |
1943 | iowrite32(tmp & (regs->clic_in_err | |
1944 | regs->clic_out_err), |
1945 | regs->outb_isr_cause); |
1946 | } |
1947 | status ^= mhba->regs->int_comaerr; |
1948 | /* inbound or outbound parity error, command will timeout */ |
1949 | } |
1950 | if (status & regs->int_comaout) { |
1951 | tmp = ioread32(regs->outb_isr_cause); |
1952 | if (tmp & regs->clic_irq) |
1953 | iowrite32(tmp & regs->clic_irq, regs->outb_isr_cause); |
1954 | } |
1955 | if (status & regs->int_dl_cpu2pciea) { |
1956 | isr_status = ioread32(regs->arm_to_pciea_drbl_reg); |
1957 | if (isr_status) |
1958 | iowrite32(isr_status, regs->arm_to_pciea_drbl_reg); |
1959 | } |
1960 | |
1961 | mhba->global_isr = status; |
1962 | mhba->isr_status = isr_status; |
1963 | |
1964 | return 0; |
1965 | } |
1966 | |
1967 | /** |
1968 | * mvumi_read_fw_status_reg - returns the current FW status value |
1969 | * @mhba: Adapter soft state |
1970 | */ |
1971 | static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba) |
1972 | { |
1973 | unsigned int status; |
1974 | |
1975 | status = ioread32(mhba->regs->arm_to_pciea_drbl_reg); |
1976 | if (status) |
1977 | iowrite32(status, mhba->regs->arm_to_pciea_drbl_reg); |
1978 | return status; |
1979 | } |
1980 | |
1981 | static struct mvumi_instance_template mvumi_instance_9143 = { |
1982 | .fire_cmd = mvumi_fire_cmd, |
1983 | .enable_intr = mvumi_enable_intr, |
1984 | .disable_intr = mvumi_disable_intr, |
1985 | .clear_intr = mvumi_clear_intr, |
1986 | .read_fw_status_reg = mvumi_read_fw_status_reg, |
1987 | .check_ib_list = mvumi_check_ib_list_9143, |
1988 | .check_ob_list = mvumi_check_ob_list_9143, |
1989 | .reset_host = mvumi_reset_host_9143, |
1990 | }; |
1991 | |
1992 | static struct mvumi_instance_template mvumi_instance_9580 = { |
1993 | .fire_cmd = mvumi_fire_cmd, |
1994 | .enable_intr = mvumi_enable_intr, |
1995 | .disable_intr = mvumi_disable_intr, |
1996 | .clear_intr = mvumi_clear_intr, |
1997 | .read_fw_status_reg = mvumi_read_fw_status_reg, |
1998 | .check_ib_list = mvumi_check_ib_list_9580, |
1999 | .check_ob_list = mvumi_check_ob_list_9580, |
2000 | .reset_host = mvumi_reset_host_9580, |
2001 | }; |
2002 | |
2003 | static int mvumi_slave_configure(struct scsi_device *sdev) |
2004 | { |
2005 | struct mvumi_hba *mhba; |
2006 | unsigned char bitcount = sizeof(unsigned char) * 8; |
2007 | |
2008 | mhba = (struct mvumi_hba *) sdev->host->hostdata; |
2009 | if (sdev->id >= mhba->max_target_id) |
2010 | return -EINVAL; |
2011 | |
2012 | mhba->target_map[sdev->id / bitcount] |= (1 << (sdev->id % bitcount)); |
2013 | return 0; |
2014 | } |
2015 | |
2016 | /** |
2017 | * mvumi_build_frame - Prepares a direct cdb (DCDB) command |
2018 | * @mhba: Adapter soft state |
2019 | * @scmd: SCSI command |
2020 | * @cmd: Command to be prepared in |
2021 | * |
2022 | * This function prepares CDB commands. These are typcially pass-through |
2023 | * commands to the devices. |
2024 | */ |
2025 | static unsigned char mvumi_build_frame(struct mvumi_hba *mhba, |
2026 | struct scsi_cmnd *scmd, struct mvumi_cmd *cmd) |
2027 | { |
2028 | struct mvumi_msg_frame *pframe; |
2029 | |
2030 | cmd->scmd = scmd; |
2031 | cmd->cmd_status = REQ_STATUS_PENDING; |
2032 | pframe = cmd->frame; |
2033 | pframe->device_id = ((unsigned short) scmd->device->id) | |
2034 | (((unsigned short) scmd->device->lun) << 8); |
2035 | pframe->cmd_flag = 0; |
2036 | |
2037 | switch (scmd->sc_data_direction) { |
2038 | case DMA_NONE: |
2039 | pframe->cmd_flag |= CMD_FLAG_NON_DATA; |
2040 | break; |
2041 | case DMA_FROM_DEVICE: |
2042 | pframe->cmd_flag |= CMD_FLAG_DATA_IN; |
2043 | break; |
2044 | case DMA_TO_DEVICE: |
2045 | pframe->cmd_flag |= CMD_FLAG_DATA_OUT; |
2046 | break; |
2047 | case DMA_BIDIRECTIONAL: |
2048 | default: |
2049 | dev_warn(&mhba->pdev->dev, "unexpected data direction[%d] " |
2050 | "cmd[0x%x]\n" , scmd->sc_data_direction, scmd->cmnd[0]); |
2051 | goto error; |
2052 | } |
2053 | |
2054 | pframe->cdb_length = scmd->cmd_len; |
2055 | memcpy(pframe->cdb, scmd->cmnd, pframe->cdb_length); |
2056 | pframe->req_function = CL_FUN_SCSI_CMD; |
2057 | if (scsi_bufflen(cmd: scmd)) { |
2058 | if (mvumi_make_sgl(mhba, scmd, sgl_p: &pframe->payload[0], |
2059 | sg_count: &pframe->sg_counts)) |
2060 | goto error; |
2061 | |
2062 | pframe->data_transfer_length = scsi_bufflen(cmd: scmd); |
2063 | } else { |
2064 | pframe->sg_counts = 0; |
2065 | pframe->data_transfer_length = 0; |
2066 | } |
2067 | return 0; |
2068 | |
2069 | error: |
2070 | scsi_build_sense(scmd, desc: 0, ILLEGAL_REQUEST, asc: 0x24, ascq: 0); |
2071 | return -1; |
2072 | } |
2073 | |
2074 | /** |
2075 | * mvumi_queue_command - Queue entry point |
2076 | * @shost: Scsi host to queue command on |
2077 | * @scmd: SCSI command to be queued |
2078 | */ |
2079 | static int mvumi_queue_command(struct Scsi_Host *shost, |
2080 | struct scsi_cmnd *scmd) |
2081 | { |
2082 | struct mvumi_cmd *cmd; |
2083 | struct mvumi_hba *mhba; |
2084 | unsigned long irq_flags; |
2085 | |
2086 | spin_lock_irqsave(shost->host_lock, irq_flags); |
2087 | |
2088 | mhba = (struct mvumi_hba *) shost->hostdata; |
2089 | scmd->result = 0; |
2090 | cmd = mvumi_get_cmd(mhba); |
2091 | if (unlikely(!cmd)) { |
2092 | spin_unlock_irqrestore(lock: shost->host_lock, flags: irq_flags); |
2093 | return SCSI_MLQUEUE_HOST_BUSY; |
2094 | } |
2095 | |
2096 | if (unlikely(mvumi_build_frame(mhba, scmd, cmd))) |
2097 | goto out_return_cmd; |
2098 | |
2099 | cmd->scmd = scmd; |
2100 | mvumi_priv(cmd: scmd)->cmd_priv = cmd; |
2101 | mhba->instancet->fire_cmd(mhba, cmd); |
2102 | spin_unlock_irqrestore(lock: shost->host_lock, flags: irq_flags); |
2103 | return 0; |
2104 | |
2105 | out_return_cmd: |
2106 | mvumi_return_cmd(mhba, cmd); |
2107 | scsi_done(cmd: scmd); |
2108 | spin_unlock_irqrestore(lock: shost->host_lock, flags: irq_flags); |
2109 | return 0; |
2110 | } |
2111 | |
2112 | static enum scsi_timeout_action mvumi_timed_out(struct scsi_cmnd *scmd) |
2113 | { |
2114 | struct mvumi_cmd *cmd = mvumi_priv(cmd: scmd)->cmd_priv; |
2115 | struct Scsi_Host *host = scmd->device->host; |
2116 | struct mvumi_hba *mhba = shost_priv(shost: host); |
2117 | unsigned long flags; |
2118 | |
2119 | spin_lock_irqsave(mhba->shost->host_lock, flags); |
2120 | |
2121 | if (mhba->tag_cmd[cmd->frame->tag]) { |
2122 | mhba->tag_cmd[cmd->frame->tag] = NULL; |
2123 | tag_release_one(mhba, st: &mhba->tag_pool, tag: cmd->frame->tag); |
2124 | } |
2125 | if (!list_empty(head: &cmd->queue_pointer)) |
2126 | list_del_init(entry: &cmd->queue_pointer); |
2127 | else |
2128 | atomic_dec(v: &mhba->fw_outstanding); |
2129 | |
2130 | scmd->result = (DID_ABORT << 16); |
2131 | mvumi_priv(cmd: scmd)->cmd_priv = NULL; |
2132 | if (scsi_bufflen(cmd: scmd)) { |
2133 | dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), |
2134 | scsi_sg_count(scmd), |
2135 | scmd->sc_data_direction); |
2136 | } |
2137 | mvumi_return_cmd(mhba, cmd); |
2138 | spin_unlock_irqrestore(lock: mhba->shost->host_lock, flags); |
2139 | |
2140 | return SCSI_EH_NOT_HANDLED; |
2141 | } |
2142 | |
2143 | static int |
2144 | mvumi_bios_param(struct scsi_device *sdev, struct block_device *bdev, |
2145 | sector_t capacity, int geom[]) |
2146 | { |
2147 | int heads, sectors; |
2148 | sector_t cylinders; |
2149 | unsigned long tmp; |
2150 | |
2151 | heads = 64; |
2152 | sectors = 32; |
2153 | tmp = heads * sectors; |
2154 | cylinders = capacity; |
2155 | sector_div(cylinders, tmp); |
2156 | |
2157 | if (capacity >= 0x200000) { |
2158 | heads = 255; |
2159 | sectors = 63; |
2160 | tmp = heads * sectors; |
2161 | cylinders = capacity; |
2162 | sector_div(cylinders, tmp); |
2163 | } |
2164 | geom[0] = heads; |
2165 | geom[1] = sectors; |
2166 | geom[2] = cylinders; |
2167 | |
2168 | return 0; |
2169 | } |
2170 | |
2171 | static const struct scsi_host_template mvumi_template = { |
2172 | |
2173 | .module = THIS_MODULE, |
2174 | .name = "Marvell Storage Controller" , |
2175 | .slave_configure = mvumi_slave_configure, |
2176 | .queuecommand = mvumi_queue_command, |
2177 | .eh_timed_out = mvumi_timed_out, |
2178 | .eh_host_reset_handler = mvumi_host_reset, |
2179 | .bios_param = mvumi_bios_param, |
2180 | .dma_boundary = PAGE_SIZE - 1, |
2181 | .this_id = -1, |
2182 | .cmd_size = sizeof(struct mvumi_cmd_priv), |
2183 | }; |
2184 | |
2185 | static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba) |
2186 | { |
2187 | void *base = NULL; |
2188 | struct mvumi_hw_regs *regs; |
2189 | |
2190 | switch (mhba->pdev->device) { |
2191 | case PCI_DEVICE_ID_MARVELL_MV9143: |
2192 | mhba->mmio = mhba->base_addr[0]; |
2193 | base = mhba->mmio; |
2194 | if (!mhba->regs) { |
2195 | mhba->regs = kzalloc(size: sizeof(*regs), GFP_KERNEL); |
2196 | if (mhba->regs == NULL) |
2197 | return -ENOMEM; |
2198 | } |
2199 | regs = mhba->regs; |
2200 | |
2201 | /* For Arm */ |
2202 | regs->ctrl_sts_reg = base + 0x20104; |
2203 | regs->rstoutn_mask_reg = base + 0x20108; |
2204 | regs->sys_soft_rst_reg = base + 0x2010C; |
2205 | regs->main_int_cause_reg = base + 0x20200; |
2206 | regs->enpointa_mask_reg = base + 0x2020C; |
2207 | regs->rstoutn_en_reg = base + 0xF1400; |
2208 | /* For Doorbell */ |
2209 | regs->pciea_to_arm_drbl_reg = base + 0x20400; |
2210 | regs->arm_to_pciea_drbl_reg = base + 0x20408; |
2211 | regs->arm_to_pciea_mask_reg = base + 0x2040C; |
2212 | regs->pciea_to_arm_msg0 = base + 0x20430; |
2213 | regs->pciea_to_arm_msg1 = base + 0x20434; |
2214 | regs->arm_to_pciea_msg0 = base + 0x20438; |
2215 | regs->arm_to_pciea_msg1 = base + 0x2043C; |
2216 | |
2217 | /* For Message Unit */ |
2218 | |
2219 | regs->inb_aval_count_basel = base + 0x508; |
2220 | regs->inb_aval_count_baseh = base + 0x50C; |
2221 | regs->inb_write_pointer = base + 0x518; |
2222 | regs->inb_read_pointer = base + 0x51C; |
2223 | regs->outb_coal_cfg = base + 0x568; |
2224 | regs->outb_copy_basel = base + 0x5B0; |
2225 | regs->outb_copy_baseh = base + 0x5B4; |
2226 | regs->outb_copy_pointer = base + 0x544; |
2227 | regs->outb_read_pointer = base + 0x548; |
2228 | regs->outb_isr_cause = base + 0x560; |
2229 | regs->outb_coal_cfg = base + 0x568; |
2230 | /* Bit setting for HW */ |
2231 | regs->int_comaout = 1 << 8; |
2232 | regs->int_comaerr = 1 << 6; |
2233 | regs->int_dl_cpu2pciea = 1 << 1; |
2234 | regs->cl_pointer_toggle = 1 << 12; |
2235 | regs->clic_irq = 1 << 1; |
2236 | regs->clic_in_err = 1 << 8; |
2237 | regs->clic_out_err = 1 << 12; |
2238 | regs->cl_slot_num_mask = 0xFFF; |
2239 | regs->int_drbl_int_mask = 0x3FFFFFFF; |
2240 | regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout | |
2241 | regs->int_comaerr; |
2242 | break; |
2243 | case PCI_DEVICE_ID_MARVELL_MV9580: |
2244 | mhba->mmio = mhba->base_addr[2]; |
2245 | base = mhba->mmio; |
2246 | if (!mhba->regs) { |
2247 | mhba->regs = kzalloc(size: sizeof(*regs), GFP_KERNEL); |
2248 | if (mhba->regs == NULL) |
2249 | return -ENOMEM; |
2250 | } |
2251 | regs = mhba->regs; |
2252 | /* For Arm */ |
2253 | regs->ctrl_sts_reg = base + 0x20104; |
2254 | regs->rstoutn_mask_reg = base + 0x1010C; |
2255 | regs->sys_soft_rst_reg = base + 0x10108; |
2256 | regs->main_int_cause_reg = base + 0x10200; |
2257 | regs->enpointa_mask_reg = base + 0x1020C; |
2258 | regs->rstoutn_en_reg = base + 0xF1400; |
2259 | |
2260 | /* For Doorbell */ |
2261 | regs->pciea_to_arm_drbl_reg = base + 0x10460; |
2262 | regs->arm_to_pciea_drbl_reg = base + 0x10480; |
2263 | regs->arm_to_pciea_mask_reg = base + 0x10484; |
2264 | regs->pciea_to_arm_msg0 = base + 0x10400; |
2265 | regs->pciea_to_arm_msg1 = base + 0x10404; |
2266 | regs->arm_to_pciea_msg0 = base + 0x10420; |
2267 | regs->arm_to_pciea_msg1 = base + 0x10424; |
2268 | |
2269 | /* For reset*/ |
2270 | regs->reset_request = base + 0x10108; |
2271 | regs->reset_enable = base + 0x1010c; |
2272 | |
2273 | /* For Message Unit */ |
2274 | regs->inb_aval_count_basel = base + 0x4008; |
2275 | regs->inb_aval_count_baseh = base + 0x400C; |
2276 | regs->inb_write_pointer = base + 0x4018; |
2277 | regs->inb_read_pointer = base + 0x401C; |
2278 | regs->outb_copy_basel = base + 0x4058; |
2279 | regs->outb_copy_baseh = base + 0x405C; |
2280 | regs->outb_copy_pointer = base + 0x406C; |
2281 | regs->outb_read_pointer = base + 0x4070; |
2282 | regs->outb_coal_cfg = base + 0x4080; |
2283 | regs->outb_isr_cause = base + 0x4088; |
2284 | /* Bit setting for HW */ |
2285 | regs->int_comaout = 1 << 4; |
2286 | regs->int_dl_cpu2pciea = 1 << 12; |
2287 | regs->int_comaerr = 1 << 29; |
2288 | regs->cl_pointer_toggle = 1 << 14; |
2289 | regs->cl_slot_num_mask = 0x3FFF; |
2290 | regs->clic_irq = 1 << 0; |
2291 | regs->clic_out_err = 1 << 1; |
2292 | regs->int_drbl_int_mask = 0x3FFFFFFF; |
2293 | regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout; |
2294 | break; |
2295 | default: |
2296 | return -1; |
2297 | } |
2298 | |
2299 | return 0; |
2300 | } |
2301 | |
2302 | /** |
2303 | * mvumi_init_fw - Initializes the FW |
2304 | * @mhba: Adapter soft state |
2305 | * |
2306 | * This is the main function for initializing firmware. |
2307 | */ |
2308 | static int mvumi_init_fw(struct mvumi_hba *mhba) |
2309 | { |
2310 | int ret = 0; |
2311 | |
2312 | if (pci_request_regions(mhba->pdev, MV_DRIVER_NAME)) { |
2313 | dev_err(&mhba->pdev->dev, "IO memory region busy!\n" ); |
2314 | return -EBUSY; |
2315 | } |
2316 | ret = mvumi_map_pci_addr(dev: mhba->pdev, addr_array: mhba->base_addr); |
2317 | if (ret) |
2318 | goto fail_ioremap; |
2319 | |
2320 | switch (mhba->pdev->device) { |
2321 | case PCI_DEVICE_ID_MARVELL_MV9143: |
2322 | mhba->instancet = &mvumi_instance_9143; |
2323 | mhba->io_seq = 0; |
2324 | mhba->max_sge = MVUMI_MAX_SG_ENTRY; |
2325 | mhba->request_id_enabled = 1; |
2326 | break; |
2327 | case PCI_DEVICE_ID_MARVELL_MV9580: |
2328 | mhba->instancet = &mvumi_instance_9580; |
2329 | mhba->io_seq = 0; |
2330 | mhba->max_sge = MVUMI_MAX_SG_ENTRY; |
2331 | break; |
2332 | default: |
2333 | dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n" , |
2334 | mhba->pdev->device); |
2335 | mhba->instancet = NULL; |
2336 | ret = -EINVAL; |
2337 | goto fail_alloc_mem; |
2338 | } |
2339 | dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n" , |
2340 | mhba->pdev->device); |
2341 | ret = mvumi_cfg_hw_reg(mhba); |
2342 | if (ret) { |
2343 | dev_err(&mhba->pdev->dev, |
2344 | "failed to allocate memory for reg\n" ); |
2345 | ret = -ENOMEM; |
2346 | goto fail_alloc_mem; |
2347 | } |
2348 | mhba->handshake_page = dma_alloc_coherent(dev: &mhba->pdev->dev, |
2349 | HSP_MAX_SIZE, dma_handle: &mhba->handshake_page_phys, GFP_KERNEL); |
2350 | if (!mhba->handshake_page) { |
2351 | dev_err(&mhba->pdev->dev, |
2352 | "failed to allocate memory for handshake\n" ); |
2353 | ret = -ENOMEM; |
2354 | goto fail_alloc_page; |
2355 | } |
2356 | |
2357 | if (mvumi_start(mhba)) { |
2358 | ret = -EINVAL; |
2359 | goto fail_ready_state; |
2360 | } |
2361 | ret = mvumi_alloc_cmds(mhba); |
2362 | if (ret) |
2363 | goto fail_ready_state; |
2364 | |
2365 | return 0; |
2366 | |
2367 | fail_ready_state: |
2368 | mvumi_release_mem_resource(mhba); |
2369 | dma_free_coherent(dev: &mhba->pdev->dev, HSP_MAX_SIZE, |
2370 | cpu_addr: mhba->handshake_page, dma_handle: mhba->handshake_page_phys); |
2371 | fail_alloc_page: |
2372 | kfree(objp: mhba->regs); |
2373 | fail_alloc_mem: |
2374 | mvumi_unmap_pci_addr(dev: mhba->pdev, addr_array: mhba->base_addr); |
2375 | fail_ioremap: |
2376 | pci_release_regions(mhba->pdev); |
2377 | |
2378 | return ret; |
2379 | } |
2380 | |
2381 | /** |
2382 | * mvumi_io_attach - Attaches this driver to SCSI mid-layer |
2383 | * @mhba: Adapter soft state |
2384 | */ |
2385 | static int mvumi_io_attach(struct mvumi_hba *mhba) |
2386 | { |
2387 | struct Scsi_Host *host = mhba->shost; |
2388 | struct scsi_device *sdev = NULL; |
2389 | int ret; |
2390 | unsigned int max_sg = (mhba->ib_max_size - |
2391 | sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl); |
2392 | |
2393 | host->irq = mhba->pdev->irq; |
2394 | host->unique_id = mhba->unique_id; |
2395 | host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1; |
2396 | host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge; |
2397 | host->max_sectors = mhba->max_transfer_size / 512; |
2398 | host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1; |
2399 | host->max_id = mhba->max_target_id; |
2400 | host->max_cmd_len = MAX_COMMAND_SIZE; |
2401 | |
2402 | ret = scsi_add_host(host, dev: &mhba->pdev->dev); |
2403 | if (ret) { |
2404 | dev_err(&mhba->pdev->dev, "scsi_add_host failed\n" ); |
2405 | return ret; |
2406 | } |
2407 | mhba->fw_flag |= MVUMI_FW_ATTACH; |
2408 | |
2409 | mutex_lock(&mhba->sas_discovery_mutex); |
2410 | if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) |
2411 | ret = scsi_add_device(host, channel: 0, target: mhba->max_target_id - 1, lun: 0); |
2412 | else |
2413 | ret = 0; |
2414 | if (ret) { |
2415 | dev_err(&mhba->pdev->dev, "add virtual device failed\n" ); |
2416 | mutex_unlock(lock: &mhba->sas_discovery_mutex); |
2417 | goto fail_add_device; |
2418 | } |
2419 | |
2420 | mhba->dm_thread = kthread_create(mvumi_rescan_bus, |
2421 | mhba, "mvumi_scanthread" ); |
2422 | if (IS_ERR(ptr: mhba->dm_thread)) { |
2423 | dev_err(&mhba->pdev->dev, |
2424 | "failed to create device scan thread\n" ); |
2425 | ret = PTR_ERR(ptr: mhba->dm_thread); |
2426 | mutex_unlock(lock: &mhba->sas_discovery_mutex); |
2427 | goto fail_create_thread; |
2428 | } |
2429 | atomic_set(v: &mhba->pnp_count, i: 1); |
2430 | wake_up_process(tsk: mhba->dm_thread); |
2431 | |
2432 | mutex_unlock(lock: &mhba->sas_discovery_mutex); |
2433 | return 0; |
2434 | |
2435 | fail_create_thread: |
2436 | if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) |
2437 | sdev = scsi_device_lookup(mhba->shost, 0, |
2438 | mhba->max_target_id - 1, 0); |
2439 | if (sdev) { |
2440 | scsi_remove_device(sdev); |
2441 | scsi_device_put(sdev); |
2442 | } |
2443 | fail_add_device: |
2444 | scsi_remove_host(mhba->shost); |
2445 | return ret; |
2446 | } |
2447 | |
2448 | /** |
2449 | * mvumi_probe_one - PCI hotplug entry point |
2450 | * @pdev: PCI device structure |
2451 | * @id: PCI ids of supported hotplugged adapter |
2452 | */ |
2453 | static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) |
2454 | { |
2455 | struct Scsi_Host *host; |
2456 | struct mvumi_hba *mhba; |
2457 | int ret; |
2458 | |
2459 | dev_dbg(&pdev->dev, " %#4.04x:%#4.04x:%#4.04x:%#4.04x: " , |
2460 | pdev->vendor, pdev->device, pdev->subsystem_vendor, |
2461 | pdev->subsystem_device); |
2462 | |
2463 | ret = pci_enable_device(dev: pdev); |
2464 | if (ret) |
2465 | return ret; |
2466 | |
2467 | ret = mvumi_pci_set_master(pdev); |
2468 | if (ret) |
2469 | goto fail_set_dma_mask; |
2470 | |
2471 | host = scsi_host_alloc(&mvumi_template, sizeof(*mhba)); |
2472 | if (!host) { |
2473 | dev_err(&pdev->dev, "scsi_host_alloc failed\n" ); |
2474 | ret = -ENOMEM; |
2475 | goto fail_alloc_instance; |
2476 | } |
2477 | mhba = shost_priv(shost: host); |
2478 | |
2479 | INIT_LIST_HEAD(list: &mhba->cmd_pool); |
2480 | INIT_LIST_HEAD(list: &mhba->ob_data_list); |
2481 | INIT_LIST_HEAD(list: &mhba->free_ob_list); |
2482 | INIT_LIST_HEAD(list: &mhba->res_list); |
2483 | INIT_LIST_HEAD(list: &mhba->waiting_req_list); |
2484 | mutex_init(&mhba->device_lock); |
2485 | INIT_LIST_HEAD(list: &mhba->mhba_dev_list); |
2486 | INIT_LIST_HEAD(list: &mhba->shost_dev_list); |
2487 | atomic_set(v: &mhba->fw_outstanding, i: 0); |
2488 | init_waitqueue_head(&mhba->int_cmd_wait_q); |
2489 | mutex_init(&mhba->sas_discovery_mutex); |
2490 | |
2491 | mhba->pdev = pdev; |
2492 | mhba->shost = host; |
2493 | mhba->unique_id = pci_dev_id(dev: pdev); |
2494 | |
2495 | ret = mvumi_init_fw(mhba); |
2496 | if (ret) |
2497 | goto fail_init_fw; |
2498 | |
2499 | ret = request_irq(irq: mhba->pdev->irq, handler: mvumi_isr_handler, IRQF_SHARED, |
2500 | name: "mvumi" , dev: mhba); |
2501 | if (ret) { |
2502 | dev_err(&pdev->dev, "failed to register IRQ\n" ); |
2503 | goto fail_init_irq; |
2504 | } |
2505 | |
2506 | mhba->instancet->enable_intr(mhba); |
2507 | pci_set_drvdata(pdev, data: mhba); |
2508 | |
2509 | ret = mvumi_io_attach(mhba); |
2510 | if (ret) |
2511 | goto fail_io_attach; |
2512 | |
2513 | mvumi_backup_bar_addr(mhba); |
2514 | dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n" ); |
2515 | |
2516 | return 0; |
2517 | |
2518 | fail_io_attach: |
2519 | mhba->instancet->disable_intr(mhba); |
2520 | free_irq(mhba->pdev->irq, mhba); |
2521 | fail_init_irq: |
2522 | mvumi_release_fw(mhba); |
2523 | fail_init_fw: |
2524 | scsi_host_put(t: host); |
2525 | |
2526 | fail_alloc_instance: |
2527 | fail_set_dma_mask: |
2528 | pci_disable_device(dev: pdev); |
2529 | |
2530 | return ret; |
2531 | } |
2532 | |
2533 | static void mvumi_detach_one(struct pci_dev *pdev) |
2534 | { |
2535 | struct Scsi_Host *host; |
2536 | struct mvumi_hba *mhba; |
2537 | |
2538 | mhba = pci_get_drvdata(pdev); |
2539 | if (mhba->dm_thread) { |
2540 | kthread_stop(k: mhba->dm_thread); |
2541 | mhba->dm_thread = NULL; |
2542 | } |
2543 | |
2544 | mvumi_detach_devices(mhba); |
2545 | host = mhba->shost; |
2546 | scsi_remove_host(mhba->shost); |
2547 | mvumi_flush_cache(mhba); |
2548 | |
2549 | mhba->instancet->disable_intr(mhba); |
2550 | free_irq(mhba->pdev->irq, mhba); |
2551 | mvumi_release_fw(mhba); |
2552 | scsi_host_put(t: host); |
2553 | pci_disable_device(dev: pdev); |
2554 | dev_dbg(&pdev->dev, "driver is removed!\n" ); |
2555 | } |
2556 | |
2557 | /** |
2558 | * mvumi_shutdown - Shutdown entry point |
2559 | * @pdev: PCI device structure |
2560 | */ |
2561 | static void mvumi_shutdown(struct pci_dev *pdev) |
2562 | { |
2563 | struct mvumi_hba *mhba = pci_get_drvdata(pdev); |
2564 | |
2565 | mvumi_flush_cache(mhba); |
2566 | } |
2567 | |
2568 | static int __maybe_unused mvumi_suspend(struct device *dev) |
2569 | { |
2570 | struct pci_dev *pdev = to_pci_dev(dev); |
2571 | struct mvumi_hba *mhba = pci_get_drvdata(pdev); |
2572 | |
2573 | mvumi_flush_cache(mhba); |
2574 | |
2575 | mhba->instancet->disable_intr(mhba); |
2576 | mvumi_unmap_pci_addr(dev: pdev, addr_array: mhba->base_addr); |
2577 | |
2578 | return 0; |
2579 | } |
2580 | |
2581 | static int __maybe_unused mvumi_resume(struct device *dev) |
2582 | { |
2583 | int ret; |
2584 | struct pci_dev *pdev = to_pci_dev(dev); |
2585 | struct mvumi_hba *mhba = pci_get_drvdata(pdev); |
2586 | |
2587 | ret = dma_set_mask(dev: &pdev->dev, DMA_BIT_MASK(32)); |
2588 | if (ret) |
2589 | goto fail; |
2590 | ret = mvumi_map_pci_addr(dev: mhba->pdev, addr_array: mhba->base_addr); |
2591 | if (ret) |
2592 | goto release_regions; |
2593 | |
2594 | if (mvumi_cfg_hw_reg(mhba)) { |
2595 | ret = -EINVAL; |
2596 | goto unmap_pci_addr; |
2597 | } |
2598 | |
2599 | mhba->mmio = mhba->base_addr[0]; |
2600 | mvumi_reset(mhba); |
2601 | |
2602 | if (mvumi_start(mhba)) { |
2603 | ret = -EINVAL; |
2604 | goto unmap_pci_addr; |
2605 | } |
2606 | |
2607 | mhba->instancet->enable_intr(mhba); |
2608 | |
2609 | return 0; |
2610 | |
2611 | unmap_pci_addr: |
2612 | mvumi_unmap_pci_addr(dev: pdev, addr_array: mhba->base_addr); |
2613 | release_regions: |
2614 | pci_release_regions(pdev); |
2615 | fail: |
2616 | |
2617 | return ret; |
2618 | } |
2619 | |
2620 | static SIMPLE_DEV_PM_OPS(mvumi_pm_ops, mvumi_suspend, mvumi_resume); |
2621 | |
2622 | static struct pci_driver mvumi_pci_driver = { |
2623 | |
2624 | .name = MV_DRIVER_NAME, |
2625 | .id_table = mvumi_pci_table, |
2626 | .probe = mvumi_probe_one, |
2627 | .remove = mvumi_detach_one, |
2628 | .shutdown = mvumi_shutdown, |
2629 | .driver.pm = &mvumi_pm_ops, |
2630 | }; |
2631 | |
2632 | module_pci_driver(mvumi_pci_driver); |
2633 | |