1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * QLogic iSCSI HBA Driver
4 * Copyright (c) 2011-2013 QLogic Corporation
5 */
6
7#include "ql4_def.h"
8#include "ql4_glbl.h"
9#include "ql4_bsg.h"
10
11static int
12qla4xxx_read_flash(struct bsg_job *bsg_job)
13{
14 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
15 struct scsi_qla_host *ha = to_qla_host(shost: host);
16 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
17 struct iscsi_bsg_request *bsg_req = bsg_job->request;
18 uint32_t offset = 0;
19 uint32_t length = 0;
20 dma_addr_t flash_dma;
21 uint8_t *flash = NULL;
22 int rval = -EINVAL;
23
24 bsg_reply->reply_payload_rcv_len = 0;
25
26 if (unlikely(pci_channel_offline(ha->pdev)))
27 goto leave;
28
29 if (ql4xxx_reset_active(ha)) {
30 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
31 rval = -EBUSY;
32 goto leave;
33 }
34
35 if (ha->flash_state != QLFLASH_WAITING) {
36 ql4_printk(KERN_ERR, ha, "%s: another flash operation "
37 "active\n", __func__);
38 rval = -EBUSY;
39 goto leave;
40 }
41
42 ha->flash_state = QLFLASH_READING;
43 offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
44 length = bsg_job->reply_payload.payload_len;
45
46 flash = dma_alloc_coherent(dev: &ha->pdev->dev, size: length, dma_handle: &flash_dma,
47 GFP_KERNEL);
48 if (!flash) {
49 ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
50 "data\n", __func__);
51 rval = -ENOMEM;
52 goto leave;
53 }
54
55 rval = qla4xxx_get_flash(ha, dma_addr: flash_dma, offset, len: length);
56 if (rval) {
57 ql4_printk(KERN_ERR, ha, "%s: get flash failed\n", __func__);
58 bsg_reply->result = DID_ERROR << 16;
59 rval = -EIO;
60 } else {
61 bsg_reply->reply_payload_rcv_len =
62 sg_copy_from_buffer(sgl: bsg_job->reply_payload.sg_list,
63 nents: bsg_job->reply_payload.sg_cnt,
64 buf: flash, buflen: length);
65 bsg_reply->result = DID_OK << 16;
66 }
67
68 bsg_job_done(job: bsg_job, result: bsg_reply->result,
69 reply_payload_rcv_len: bsg_reply->reply_payload_rcv_len);
70 dma_free_coherent(dev: &ha->pdev->dev, size: length, cpu_addr: flash, dma_handle: flash_dma);
71leave:
72 ha->flash_state = QLFLASH_WAITING;
73 return rval;
74}
75
76static int
77qla4xxx_update_flash(struct bsg_job *bsg_job)
78{
79 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
80 struct scsi_qla_host *ha = to_qla_host(shost: host);
81 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
82 struct iscsi_bsg_request *bsg_req = bsg_job->request;
83 uint32_t length = 0;
84 uint32_t offset = 0;
85 uint32_t options = 0;
86 dma_addr_t flash_dma;
87 uint8_t *flash = NULL;
88 int rval = -EINVAL;
89
90 bsg_reply->reply_payload_rcv_len = 0;
91
92 if (unlikely(pci_channel_offline(ha->pdev)))
93 goto leave;
94
95 if (ql4xxx_reset_active(ha)) {
96 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
97 rval = -EBUSY;
98 goto leave;
99 }
100
101 if (ha->flash_state != QLFLASH_WAITING) {
102 ql4_printk(KERN_ERR, ha, "%s: another flash operation "
103 "active\n", __func__);
104 rval = -EBUSY;
105 goto leave;
106 }
107
108 ha->flash_state = QLFLASH_WRITING;
109 length = bsg_job->request_payload.payload_len;
110 offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
111 options = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
112
113 flash = dma_alloc_coherent(dev: &ha->pdev->dev, size: length, dma_handle: &flash_dma,
114 GFP_KERNEL);
115 if (!flash) {
116 ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
117 "data\n", __func__);
118 rval = -ENOMEM;
119 goto leave;
120 }
121
122 sg_copy_to_buffer(sgl: bsg_job->request_payload.sg_list,
123 nents: bsg_job->request_payload.sg_cnt, buf: flash, buflen: length);
124
125 rval = qla4xxx_set_flash(ha, dma_addr: flash_dma, offset, length, options);
126 if (rval) {
127 ql4_printk(KERN_ERR, ha, "%s: set flash failed\n", __func__);
128 bsg_reply->result = DID_ERROR << 16;
129 rval = -EIO;
130 } else
131 bsg_reply->result = DID_OK << 16;
132
133 bsg_job_done(job: bsg_job, result: bsg_reply->result,
134 reply_payload_rcv_len: bsg_reply->reply_payload_rcv_len);
135 dma_free_coherent(dev: &ha->pdev->dev, size: length, cpu_addr: flash, dma_handle: flash_dma);
136leave:
137 ha->flash_state = QLFLASH_WAITING;
138 return rval;
139}
140
141static int
142qla4xxx_get_acb_state(struct bsg_job *bsg_job)
143{
144 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
145 struct scsi_qla_host *ha = to_qla_host(shost: host);
146 struct iscsi_bsg_request *bsg_req = bsg_job->request;
147 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
148 uint32_t status[MBOX_REG_COUNT];
149 uint32_t acb_idx;
150 uint32_t ip_idx;
151 int rval = -EINVAL;
152
153 bsg_reply->reply_payload_rcv_len = 0;
154
155 if (unlikely(pci_channel_offline(ha->pdev)))
156 goto leave;
157
158 /* Only 4022 and above adapters are supported */
159 if (is_qla4010(ha))
160 goto leave;
161
162 if (ql4xxx_reset_active(ha)) {
163 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
164 rval = -EBUSY;
165 goto leave;
166 }
167
168 if (bsg_job->reply_payload.payload_len < sizeof(status)) {
169 ql4_printk(KERN_ERR, ha, "%s: invalid payload len %d\n",
170 __func__, bsg_job->reply_payload.payload_len);
171 rval = -EINVAL;
172 goto leave;
173 }
174
175 acb_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
176 ip_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
177
178 rval = qla4xxx_get_ip_state(ha, acb_idx, ip_idx, sts: status);
179 if (rval) {
180 ql4_printk(KERN_ERR, ha, "%s: get ip state failed\n",
181 __func__);
182 bsg_reply->result = DID_ERROR << 16;
183 rval = -EIO;
184 } else {
185 bsg_reply->reply_payload_rcv_len =
186 sg_copy_from_buffer(sgl: bsg_job->reply_payload.sg_list,
187 nents: bsg_job->reply_payload.sg_cnt,
188 buf: status, buflen: sizeof(status));
189 bsg_reply->result = DID_OK << 16;
190 }
191
192 bsg_job_done(job: bsg_job, result: bsg_reply->result,
193 reply_payload_rcv_len: bsg_reply->reply_payload_rcv_len);
194leave:
195 return rval;
196}
197
198static int
199qla4xxx_read_nvram(struct bsg_job *bsg_job)
200{
201 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
202 struct scsi_qla_host *ha = to_qla_host(shost: host);
203 struct iscsi_bsg_request *bsg_req = bsg_job->request;
204 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
205 uint32_t offset = 0;
206 uint32_t len = 0;
207 uint32_t total_len = 0;
208 dma_addr_t nvram_dma;
209 uint8_t *nvram = NULL;
210 int rval = -EINVAL;
211
212 bsg_reply->reply_payload_rcv_len = 0;
213
214 if (unlikely(pci_channel_offline(ha->pdev)))
215 goto leave;
216
217 /* Only 40xx adapters are supported */
218 if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
219 goto leave;
220
221 if (ql4xxx_reset_active(ha)) {
222 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
223 rval = -EBUSY;
224 goto leave;
225 }
226
227 offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
228 len = bsg_job->reply_payload.payload_len;
229 total_len = offset + len;
230
231 /* total len should not be greater than max NVRAM size */
232 if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
233 ((is_qla4022(ha) || is_qla4032(ha)) &&
234 total_len > QL40X2_NVRAM_SIZE)) {
235 ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
236 " nvram size, offset=%d len=%d\n",
237 __func__, offset, len);
238 goto leave;
239 }
240
241 nvram = dma_alloc_coherent(dev: &ha->pdev->dev, size: len, dma_handle: &nvram_dma,
242 GFP_KERNEL);
243 if (!nvram) {
244 ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for nvram "
245 "data\n", __func__);
246 rval = -ENOMEM;
247 goto leave;
248 }
249
250 rval = qla4xxx_get_nvram(ha, nvram_dma, offset, size: len);
251 if (rval) {
252 ql4_printk(KERN_ERR, ha, "%s: get nvram failed\n", __func__);
253 bsg_reply->result = DID_ERROR << 16;
254 rval = -EIO;
255 } else {
256 bsg_reply->reply_payload_rcv_len =
257 sg_copy_from_buffer(sgl: bsg_job->reply_payload.sg_list,
258 nents: bsg_job->reply_payload.sg_cnt,
259 buf: nvram, buflen: len);
260 bsg_reply->result = DID_OK << 16;
261 }
262
263 bsg_job_done(job: bsg_job, result: bsg_reply->result,
264 reply_payload_rcv_len: bsg_reply->reply_payload_rcv_len);
265 dma_free_coherent(dev: &ha->pdev->dev, size: len, cpu_addr: nvram, dma_handle: nvram_dma);
266leave:
267 return rval;
268}
269
270static int
271qla4xxx_update_nvram(struct bsg_job *bsg_job)
272{
273 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
274 struct scsi_qla_host *ha = to_qla_host(shost: host);
275 struct iscsi_bsg_request *bsg_req = bsg_job->request;
276 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
277 uint32_t offset = 0;
278 uint32_t len = 0;
279 uint32_t total_len = 0;
280 dma_addr_t nvram_dma;
281 uint8_t *nvram = NULL;
282 int rval = -EINVAL;
283
284 bsg_reply->reply_payload_rcv_len = 0;
285
286 if (unlikely(pci_channel_offline(ha->pdev)))
287 goto leave;
288
289 if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
290 goto leave;
291
292 if (ql4xxx_reset_active(ha)) {
293 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
294 rval = -EBUSY;
295 goto leave;
296 }
297
298 offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
299 len = bsg_job->request_payload.payload_len;
300 total_len = offset + len;
301
302 /* total len should not be greater than max NVRAM size */
303 if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
304 ((is_qla4022(ha) || is_qla4032(ha)) &&
305 total_len > QL40X2_NVRAM_SIZE)) {
306 ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
307 " nvram size, offset=%d len=%d\n",
308 __func__, offset, len);
309 goto leave;
310 }
311
312 nvram = dma_alloc_coherent(dev: &ha->pdev->dev, size: len, dma_handle: &nvram_dma,
313 GFP_KERNEL);
314 if (!nvram) {
315 ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
316 "data\n", __func__);
317 rval = -ENOMEM;
318 goto leave;
319 }
320
321 sg_copy_to_buffer(sgl: bsg_job->request_payload.sg_list,
322 nents: bsg_job->request_payload.sg_cnt, buf: nvram, buflen: len);
323
324 rval = qla4xxx_set_nvram(ha, nvram_dma, offset, size: len);
325 if (rval) {
326 ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__);
327 bsg_reply->result = DID_ERROR << 16;
328 rval = -EIO;
329 } else
330 bsg_reply->result = DID_OK << 16;
331
332 bsg_job_done(job: bsg_job, result: bsg_reply->result,
333 reply_payload_rcv_len: bsg_reply->reply_payload_rcv_len);
334 dma_free_coherent(dev: &ha->pdev->dev, size: len, cpu_addr: nvram, dma_handle: nvram_dma);
335leave:
336 return rval;
337}
338
339static int
340qla4xxx_restore_defaults(struct bsg_job *bsg_job)
341{
342 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
343 struct scsi_qla_host *ha = to_qla_host(shost: host);
344 struct iscsi_bsg_request *bsg_req = bsg_job->request;
345 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
346 uint32_t region = 0;
347 uint32_t field0 = 0;
348 uint32_t field1 = 0;
349 int rval = -EINVAL;
350
351 bsg_reply->reply_payload_rcv_len = 0;
352
353 if (unlikely(pci_channel_offline(ha->pdev)))
354 goto leave;
355
356 if (is_qla4010(ha))
357 goto leave;
358
359 if (ql4xxx_reset_active(ha)) {
360 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
361 rval = -EBUSY;
362 goto leave;
363 }
364
365 region = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
366 field0 = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
367 field1 = bsg_req->rqst_data.h_vendor.vendor_cmd[3];
368
369 rval = qla4xxx_restore_factory_defaults(ha, region, field0, field1);
370 if (rval) {
371 ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__);
372 bsg_reply->result = DID_ERROR << 16;
373 rval = -EIO;
374 } else
375 bsg_reply->result = DID_OK << 16;
376
377 bsg_job_done(job: bsg_job, result: bsg_reply->result,
378 reply_payload_rcv_len: bsg_reply->reply_payload_rcv_len);
379leave:
380 return rval;
381}
382
383static int
384qla4xxx_bsg_get_acb(struct bsg_job *bsg_job)
385{
386 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
387 struct scsi_qla_host *ha = to_qla_host(shost: host);
388 struct iscsi_bsg_request *bsg_req = bsg_job->request;
389 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
390 uint32_t acb_type = 0;
391 uint32_t len = 0;
392 dma_addr_t acb_dma;
393 uint8_t *acb = NULL;
394 int rval = -EINVAL;
395
396 bsg_reply->reply_payload_rcv_len = 0;
397
398 if (unlikely(pci_channel_offline(ha->pdev)))
399 goto leave;
400
401 /* Only 4022 and above adapters are supported */
402 if (is_qla4010(ha))
403 goto leave;
404
405 if (ql4xxx_reset_active(ha)) {
406 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
407 rval = -EBUSY;
408 goto leave;
409 }
410
411 acb_type = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
412 len = bsg_job->reply_payload.payload_len;
413 if (len < sizeof(struct addr_ctrl_blk)) {
414 ql4_printk(KERN_ERR, ha, "%s: invalid acb len %d\n",
415 __func__, len);
416 rval = -EINVAL;
417 goto leave;
418 }
419
420 acb = dma_alloc_coherent(dev: &ha->pdev->dev, size: len, dma_handle: &acb_dma, GFP_KERNEL);
421 if (!acb) {
422 ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for acb "
423 "data\n", __func__);
424 rval = -ENOMEM;
425 goto leave;
426 }
427
428 rval = qla4xxx_get_acb(ha, acb_dma, acb_type, len);
429 if (rval) {
430 ql4_printk(KERN_ERR, ha, "%s: get acb failed\n", __func__);
431 bsg_reply->result = DID_ERROR << 16;
432 rval = -EIO;
433 } else {
434 bsg_reply->reply_payload_rcv_len =
435 sg_copy_from_buffer(sgl: bsg_job->reply_payload.sg_list,
436 nents: bsg_job->reply_payload.sg_cnt,
437 buf: acb, buflen: len);
438 bsg_reply->result = DID_OK << 16;
439 }
440
441 bsg_job_done(job: bsg_job, result: bsg_reply->result,
442 reply_payload_rcv_len: bsg_reply->reply_payload_rcv_len);
443 dma_free_coherent(dev: &ha->pdev->dev, size: len, cpu_addr: acb, dma_handle: acb_dma);
444leave:
445 return rval;
446}
447
448static void ql4xxx_execute_diag_cmd(struct bsg_job *bsg_job)
449{
450 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
451 struct scsi_qla_host *ha = to_qla_host(shost: host);
452 struct iscsi_bsg_request *bsg_req = bsg_job->request;
453 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
454 uint8_t *rsp_ptr = NULL;
455 uint32_t mbox_cmd[MBOX_REG_COUNT];
456 uint32_t mbox_sts[MBOX_REG_COUNT];
457 int status = QLA_ERROR;
458
459 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
460
461 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
462 ql4_printk(KERN_INFO, ha, "%s: Adapter reset in progress. Invalid Request\n",
463 __func__);
464 bsg_reply->result = DID_ERROR << 16;
465 goto exit_diag_mem_test;
466 }
467
468 bsg_reply->reply_payload_rcv_len = 0;
469 memcpy(mbox_cmd, &bsg_req->rqst_data.h_vendor.vendor_cmd[1],
470 sizeof(uint32_t) * MBOX_REG_COUNT);
471
472 DEBUG2(ql4_printk(KERN_INFO, ha,
473 "%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n",
474 __func__, mbox_cmd[0], mbox_cmd[1], mbox_cmd[2],
475 mbox_cmd[3], mbox_cmd[4], mbox_cmd[5], mbox_cmd[6],
476 mbox_cmd[7]));
477
478 status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, outCount: 8, mbx_cmd: &mbox_cmd[0],
479 mbx_sts: &mbox_sts[0]);
480
481 DEBUG2(ql4_printk(KERN_INFO, ha,
482 "%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n",
483 __func__, mbox_sts[0], mbox_sts[1], mbox_sts[2],
484 mbox_sts[3], mbox_sts[4], mbox_sts[5], mbox_sts[6],
485 mbox_sts[7]));
486
487 if (status == QLA_SUCCESS)
488 bsg_reply->result = DID_OK << 16;
489 else
490 bsg_reply->result = DID_ERROR << 16;
491
492 /* Send mbox_sts to application */
493 bsg_job->reply_len = sizeof(struct iscsi_bsg_reply) + sizeof(mbox_sts);
494 rsp_ptr = ((uint8_t *)bsg_reply) + sizeof(struct iscsi_bsg_reply);
495 memcpy(rsp_ptr, mbox_sts, sizeof(mbox_sts));
496
497exit_diag_mem_test:
498 DEBUG2(ql4_printk(KERN_INFO, ha,
499 "%s: bsg_reply->result = x%x, status = %s\n",
500 __func__, bsg_reply->result, STATUS(status)));
501
502 bsg_job_done(job: bsg_job, result: bsg_reply->result,
503 reply_payload_rcv_len: bsg_reply->reply_payload_rcv_len);
504}
505
506static int qla4_83xx_wait_for_loopback_config_comp(struct scsi_qla_host *ha,
507 int wait_for_link)
508{
509 int status = QLA_SUCCESS;
510
511 if (!wait_for_completion_timeout(x: &ha->idc_comp, timeout: (IDC_COMP_TOV * HZ))) {
512 ql4_printk(KERN_INFO, ha, "%s: IDC Complete notification not received, Waiting for another %d timeout",
513 __func__, ha->idc_extend_tmo);
514 if (ha->idc_extend_tmo) {
515 if (!wait_for_completion_timeout(x: &ha->idc_comp,
516 timeout: (ha->idc_extend_tmo * HZ))) {
517 ha->notify_idc_comp = 0;
518 ha->notify_link_up_comp = 0;
519 ql4_printk(KERN_WARNING, ha, "%s: Aborting: IDC Complete notification not received",
520 __func__);
521 status = QLA_ERROR;
522 goto exit_wait;
523 } else {
524 DEBUG2(ql4_printk(KERN_INFO, ha,
525 "%s: IDC Complete notification received\n",
526 __func__));
527 }
528 }
529 } else {
530 DEBUG2(ql4_printk(KERN_INFO, ha,
531 "%s: IDC Complete notification received\n",
532 __func__));
533 }
534 ha->notify_idc_comp = 0;
535
536 if (wait_for_link) {
537 if (!wait_for_completion_timeout(x: &ha->link_up_comp,
538 timeout: (IDC_COMP_TOV * HZ))) {
539 ha->notify_link_up_comp = 0;
540 ql4_printk(KERN_WARNING, ha, "%s: Aborting: LINK UP notification not received",
541 __func__);
542 status = QLA_ERROR;
543 goto exit_wait;
544 } else {
545 DEBUG2(ql4_printk(KERN_INFO, ha,
546 "%s: LINK UP notification received\n",
547 __func__));
548 }
549 ha->notify_link_up_comp = 0;
550 }
551
552exit_wait:
553 return status;
554}
555
556static int qla4_83xx_pre_loopback_config(struct scsi_qla_host *ha,
557 uint32_t *mbox_cmd)
558{
559 uint32_t config = 0;
560 int status = QLA_SUCCESS;
561
562 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
563
564 status = qla4_83xx_get_port_config(ha, config: &config);
565 if (status != QLA_SUCCESS)
566 goto exit_pre_loopback_config;
567
568 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Default port config=%08X\n",
569 __func__, config));
570
571 if ((config & ENABLE_INTERNAL_LOOPBACK) ||
572 (config & ENABLE_EXTERNAL_LOOPBACK)) {
573 ql4_printk(KERN_INFO, ha, "%s: Loopback diagnostics already in progress. Invalid request\n",
574 __func__);
575 goto exit_pre_loopback_config;
576 }
577
578 if (mbox_cmd[1] == QL_DIAG_CMD_TEST_INT_LOOPBACK)
579 config |= ENABLE_INTERNAL_LOOPBACK;
580
581 if (mbox_cmd[1] == QL_DIAG_CMD_TEST_EXT_LOOPBACK)
582 config |= ENABLE_EXTERNAL_LOOPBACK;
583
584 config &= ~ENABLE_DCBX;
585
586 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: New port config=%08X\n",
587 __func__, config));
588
589 ha->notify_idc_comp = 1;
590 ha->notify_link_up_comp = 1;
591
592 /* get the link state */
593 qla4xxx_get_firmware_state(ha);
594
595 status = qla4_83xx_set_port_config(ha, config: &config);
596 if (status != QLA_SUCCESS) {
597 ha->notify_idc_comp = 0;
598 ha->notify_link_up_comp = 0;
599 goto exit_pre_loopback_config;
600 }
601exit_pre_loopback_config:
602 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: status = %s\n", __func__,
603 STATUS(status)));
604 return status;
605}
606
607static int qla4_83xx_post_loopback_config(struct scsi_qla_host *ha,
608 uint32_t *mbox_cmd)
609{
610 int status = QLA_SUCCESS;
611 uint32_t config = 0;
612
613 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
614
615 status = qla4_83xx_get_port_config(ha, config: &config);
616 if (status != QLA_SUCCESS)
617 goto exit_post_loopback_config;
618
619 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: port config=%08X\n", __func__,
620 config));
621
622 if (mbox_cmd[1] == QL_DIAG_CMD_TEST_INT_LOOPBACK)
623 config &= ~ENABLE_INTERNAL_LOOPBACK;
624 else if (mbox_cmd[1] == QL_DIAG_CMD_TEST_EXT_LOOPBACK)
625 config &= ~ENABLE_EXTERNAL_LOOPBACK;
626
627 config |= ENABLE_DCBX;
628
629 DEBUG2(ql4_printk(KERN_INFO, ha,
630 "%s: Restore default port config=%08X\n", __func__,
631 config));
632
633 ha->notify_idc_comp = 1;
634 if (ha->addl_fw_state & FW_ADDSTATE_LINK_UP)
635 ha->notify_link_up_comp = 1;
636
637 status = qla4_83xx_set_port_config(ha, config: &config);
638 if (status != QLA_SUCCESS) {
639 ql4_printk(KERN_INFO, ha, "%s: Scheduling adapter reset\n",
640 __func__);
641 set_bit(DPC_RESET_HA, addr: &ha->dpc_flags);
642 clear_bit(AF_LOOPBACK, addr: &ha->flags);
643 goto exit_post_loopback_config;
644 }
645
646exit_post_loopback_config:
647 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: status = %s\n", __func__,
648 STATUS(status)));
649 return status;
650}
651
652static void qla4xxx_execute_diag_loopback_cmd(struct bsg_job *bsg_job)
653{
654 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
655 struct scsi_qla_host *ha = to_qla_host(shost: host);
656 struct iscsi_bsg_request *bsg_req = bsg_job->request;
657 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
658 uint8_t *rsp_ptr = NULL;
659 uint32_t mbox_cmd[MBOX_REG_COUNT];
660 uint32_t mbox_sts[MBOX_REG_COUNT];
661 int wait_for_link = 1;
662 int status = QLA_ERROR;
663
664 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
665
666 bsg_reply->reply_payload_rcv_len = 0;
667
668 if (test_bit(AF_LOOPBACK, &ha->flags)) {
669 ql4_printk(KERN_INFO, ha, "%s: Loopback Diagnostics already in progress. Invalid Request\n",
670 __func__);
671 bsg_reply->result = DID_ERROR << 16;
672 goto exit_loopback_cmd;
673 }
674
675 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
676 ql4_printk(KERN_INFO, ha, "%s: Adapter reset in progress. Invalid Request\n",
677 __func__);
678 bsg_reply->result = DID_ERROR << 16;
679 goto exit_loopback_cmd;
680 }
681
682 memcpy(mbox_cmd, &bsg_req->rqst_data.h_vendor.vendor_cmd[1],
683 sizeof(uint32_t) * MBOX_REG_COUNT);
684
685 if (is_qla8032(ha) || is_qla8042(ha)) {
686 status = qla4_83xx_pre_loopback_config(ha, mbox_cmd);
687 if (status != QLA_SUCCESS) {
688 bsg_reply->result = DID_ERROR << 16;
689 goto exit_loopback_cmd;
690 }
691
692 status = qla4_83xx_wait_for_loopback_config_comp(ha,
693 wait_for_link);
694 if (status != QLA_SUCCESS) {
695 bsg_reply->result = DID_TIME_OUT << 16;
696 goto restore;
697 }
698 }
699
700 DEBUG2(ql4_printk(KERN_INFO, ha,
701 "%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n",
702 __func__, mbox_cmd[0], mbox_cmd[1], mbox_cmd[2],
703 mbox_cmd[3], mbox_cmd[4], mbox_cmd[5], mbox_cmd[6],
704 mbox_cmd[7]));
705
706 status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, outCount: 8, mbx_cmd: &mbox_cmd[0],
707 mbx_sts: &mbox_sts[0]);
708
709 if (status == QLA_SUCCESS)
710 bsg_reply->result = DID_OK << 16;
711 else
712 bsg_reply->result = DID_ERROR << 16;
713
714 DEBUG2(ql4_printk(KERN_INFO, ha,
715 "%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n",
716 __func__, mbox_sts[0], mbox_sts[1], mbox_sts[2],
717 mbox_sts[3], mbox_sts[4], mbox_sts[5], mbox_sts[6],
718 mbox_sts[7]));
719
720 /* Send mbox_sts to application */
721 bsg_job->reply_len = sizeof(struct iscsi_bsg_reply) + sizeof(mbox_sts);
722 rsp_ptr = ((uint8_t *)bsg_reply) + sizeof(struct iscsi_bsg_reply);
723 memcpy(rsp_ptr, mbox_sts, sizeof(mbox_sts));
724restore:
725 if (is_qla8032(ha) || is_qla8042(ha)) {
726 status = qla4_83xx_post_loopback_config(ha, mbox_cmd);
727 if (status != QLA_SUCCESS) {
728 bsg_reply->result = DID_ERROR << 16;
729 goto exit_loopback_cmd;
730 }
731
732 /* for pre_loopback_config() wait for LINK UP only
733 * if PHY LINK is UP */
734 if (!(ha->addl_fw_state & FW_ADDSTATE_LINK_UP))
735 wait_for_link = 0;
736
737 status = qla4_83xx_wait_for_loopback_config_comp(ha,
738 wait_for_link);
739 if (status != QLA_SUCCESS) {
740 bsg_reply->result = DID_TIME_OUT << 16;
741 goto exit_loopback_cmd;
742 }
743 }
744exit_loopback_cmd:
745 DEBUG2(ql4_printk(KERN_INFO, ha,
746 "%s: bsg_reply->result = x%x, status = %s\n",
747 __func__, bsg_reply->result, STATUS(status)));
748 bsg_job_done(job: bsg_job, result: bsg_reply->result,
749 reply_payload_rcv_len: bsg_reply->reply_payload_rcv_len);
750}
751
752static int qla4xxx_execute_diag_test(struct bsg_job *bsg_job)
753{
754 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
755 struct scsi_qla_host *ha = to_qla_host(shost: host);
756 struct iscsi_bsg_request *bsg_req = bsg_job->request;
757 uint32_t diag_cmd;
758 int rval = -EINVAL;
759
760 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
761
762 diag_cmd = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
763 if (diag_cmd == MBOX_CMD_DIAG_TEST) {
764 switch (bsg_req->rqst_data.h_vendor.vendor_cmd[2]) {
765 case QL_DIAG_CMD_TEST_DDR_SIZE:
766 case QL_DIAG_CMD_TEST_DDR_RW:
767 case QL_DIAG_CMD_TEST_ONCHIP_MEM_RW:
768 case QL_DIAG_CMD_TEST_NVRAM:
769 case QL_DIAG_CMD_TEST_FLASH_ROM:
770 case QL_DIAG_CMD_TEST_DMA_XFER:
771 case QL_DIAG_CMD_SELF_DDR_RW:
772 case QL_DIAG_CMD_SELF_ONCHIP_MEM_RW:
773 /* Execute diag test for adapter RAM/FLASH */
774 ql4xxx_execute_diag_cmd(bsg_job);
775 /* Always return success as we want to sent bsg_reply
776 * to Application */
777 rval = QLA_SUCCESS;
778 break;
779
780 case QL_DIAG_CMD_TEST_INT_LOOPBACK:
781 case QL_DIAG_CMD_TEST_EXT_LOOPBACK:
782 /* Execute diag test for Network */
783 qla4xxx_execute_diag_loopback_cmd(bsg_job);
784 /* Always return success as we want to sent bsg_reply
785 * to Application */
786 rval = QLA_SUCCESS;
787 break;
788 default:
789 ql4_printk(KERN_ERR, ha, "%s: Invalid diag test: 0x%x\n",
790 __func__,
791 bsg_req->rqst_data.h_vendor.vendor_cmd[2]);
792 }
793 } else if ((diag_cmd == MBOX_CMD_SET_LED_CONFIG) ||
794 (diag_cmd == MBOX_CMD_GET_LED_CONFIG)) {
795 ql4xxx_execute_diag_cmd(bsg_job);
796 rval = QLA_SUCCESS;
797 } else {
798 ql4_printk(KERN_ERR, ha, "%s: Invalid diag cmd: 0x%x\n",
799 __func__, diag_cmd);
800 }
801
802 return rval;
803}
804
805/**
806 * qla4xxx_process_vendor_specific - handle vendor specific bsg request
807 * @bsg_job: iscsi_bsg_job to handle
808 **/
809int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job)
810{
811 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
812 struct iscsi_bsg_request *bsg_req = bsg_job->request;
813 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
814 struct scsi_qla_host *ha = to_qla_host(shost: host);
815
816 switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) {
817 case QLISCSI_VND_READ_FLASH:
818 return qla4xxx_read_flash(bsg_job);
819
820 case QLISCSI_VND_UPDATE_FLASH:
821 return qla4xxx_update_flash(bsg_job);
822
823 case QLISCSI_VND_GET_ACB_STATE:
824 return qla4xxx_get_acb_state(bsg_job);
825
826 case QLISCSI_VND_READ_NVRAM:
827 return qla4xxx_read_nvram(bsg_job);
828
829 case QLISCSI_VND_UPDATE_NVRAM:
830 return qla4xxx_update_nvram(bsg_job);
831
832 case QLISCSI_VND_RESTORE_DEFAULTS:
833 return qla4xxx_restore_defaults(bsg_job);
834
835 case QLISCSI_VND_GET_ACB:
836 return qla4xxx_bsg_get_acb(bsg_job);
837
838 case QLISCSI_VND_DIAG_TEST:
839 return qla4xxx_execute_diag_test(bsg_job);
840
841 default:
842 ql4_printk(KERN_ERR, ha, "%s: invalid BSG vendor command: "
843 "0x%x\n", __func__, bsg_req->msgcode);
844 bsg_reply->result = (DID_ERROR << 16);
845 bsg_reply->reply_payload_rcv_len = 0;
846 bsg_job_done(job: bsg_job, result: bsg_reply->result,
847 reply_payload_rcv_len: bsg_reply->reply_payload_rcv_len);
848 return -ENOSYS;
849 }
850}
851
852/**
853 * qla4xxx_bsg_request - handle bsg request from ISCSI transport
854 * @bsg_job: iscsi_bsg_job to handle
855 */
856int qla4xxx_bsg_request(struct bsg_job *bsg_job)
857{
858 struct iscsi_bsg_request *bsg_req = bsg_job->request;
859 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
860 struct scsi_qla_host *ha = to_qla_host(shost: host);
861
862 switch (bsg_req->msgcode) {
863 case ISCSI_BSG_HST_VENDOR:
864 return qla4xxx_process_vendor_specific(bsg_job);
865
866 default:
867 ql4_printk(KERN_ERR, ha, "%s: invalid BSG command: 0x%x\n",
868 __func__, bsg_req->msgcode);
869 }
870
871 return -ENOSYS;
872}
873

source code of linux/drivers/scsi/qla4xxx/ql4_bsg.c