1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
5 */
6#include "qla_def.h"
7#include "qla_tmpl.h"
8
9#define ISPREG(vha) (&(vha)->hw->iobase->isp24)
10#define IOBAR(reg) offsetof(typeof(*(reg)), iobase_addr)
11#define IOBASE(vha) IOBAR(ISPREG(vha))
12#define INVALID_ENTRY ((struct qla27xx_fwdt_entry *)0xffffffffffffffffUL)
13
14static inline void
15qla27xx_insert16(uint16_t value, void *buf, ulong *len)
16{
17 if (buf) {
18 buf += *len;
19 *(__le16 *)buf = cpu_to_le16(value);
20 }
21 *len += sizeof(value);
22}
23
24static inline void
25qla27xx_insert32(uint32_t value, void *buf, ulong *len)
26{
27 if (buf) {
28 buf += *len;
29 *(__le32 *)buf = cpu_to_le32(value);
30 }
31 *len += sizeof(value);
32}
33
34static inline void
35qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len)
36{
37 if (buf && mem && size) {
38 buf += *len;
39 memcpy(buf, mem, size);
40 }
41 *len += size;
42}
43
44static inline void
45qla27xx_read8(void __iomem *window, void *buf, ulong *len)
46{
47 uint8_t value = ~0;
48
49 if (buf) {
50 value = rd_reg_byte(addr: window);
51 }
52 qla27xx_insert32(value, buf, len);
53}
54
55static inline void
56qla27xx_read16(void __iomem *window, void *buf, ulong *len)
57{
58 uint16_t value = ~0;
59
60 if (buf) {
61 value = rd_reg_word(addr: window);
62 }
63 qla27xx_insert32(value, buf, len);
64}
65
66static inline void
67qla27xx_read32(void __iomem *window, void *buf, ulong *len)
68{
69 uint32_t value = ~0;
70
71 if (buf) {
72 value = rd_reg_dword(addr: window);
73 }
74 qla27xx_insert32(value, buf, len);
75}
76
77static inline void (*qla27xx_read_vector(uint width))(void __iomem*, void *, ulong *)
78{
79 return
80 (width == 1) ? qla27xx_read8 :
81 (width == 2) ? qla27xx_read16 :
82 qla27xx_read32;
83}
84
85static inline void
86qla27xx_read_reg(__iomem struct device_reg_24xx *reg,
87 uint offset, void *buf, ulong *len)
88{
89 void __iomem *window = (void __iomem *)reg + offset;
90
91 qla27xx_read32(window, buf, len);
92}
93
94static inline void
95qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
96 uint offset, uint32_t data, void *buf)
97{
98 if (buf) {
99 void __iomem *window = (void __iomem *)reg + offset;
100
101 wrt_reg_dword(addr: window, data);
102 }
103}
104
105static inline void
106qla27xx_read_window(__iomem struct device_reg_24xx *reg,
107 uint32_t addr, uint offset, uint count, uint width, void *buf,
108 ulong *len)
109{
110 void __iomem *window = (void __iomem *)reg + offset;
111 void (*readn)(void __iomem*, void *, ulong *) = qla27xx_read_vector(width);
112
113 qla27xx_write_reg(reg, IOBAR(reg), data: addr, buf);
114 while (count--) {
115 qla27xx_insert32(value: addr, buf, len);
116 readn(window, buf, len);
117 window += width;
118 addr++;
119 }
120}
121
122static inline void
123qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf)
124{
125 if (buf)
126 ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY;
127}
128
129static inline struct qla27xx_fwdt_entry *
130qla27xx_next_entry(struct qla27xx_fwdt_entry *ent)
131{
132 return (void *)ent + le32_to_cpu(ent->hdr.size);
133}
134
135static struct qla27xx_fwdt_entry *
136qla27xx_fwdt_entry_t0(struct scsi_qla_host *vha,
137 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
138{
139 ql_dbg(ql_dbg_misc, vha, 0xd100,
140 fmt: "%s: nop [%lx]\n", __func__, *len);
141 qla27xx_skip_entry(ent, buf);
142
143 return qla27xx_next_entry(ent);
144}
145
146static struct qla27xx_fwdt_entry *
147qla27xx_fwdt_entry_t255(struct scsi_qla_host *vha,
148 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
149{
150 ql_dbg(ql_dbg_misc, vha, 0xd1ff,
151 fmt: "%s: end [%lx]\n", __func__, *len);
152 qla27xx_skip_entry(ent, buf);
153
154 /* terminate */
155 return NULL;
156}
157
158static struct qla27xx_fwdt_entry *
159qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha,
160 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
161{
162 ulong addr = le32_to_cpu(ent->t256.base_addr);
163 uint offset = ent->t256.pci_offset;
164 ulong count = le16_to_cpu(ent->t256.reg_count);
165 uint width = ent->t256.reg_width;
166
167 ql_dbg(ql_dbg_misc, vha, 0xd200,
168 fmt: "%s: rdio t1 [%lx]\n", __func__, *len);
169 qla27xx_read_window(ISPREG(vha), addr, offset, count, width, buf, len);
170
171 return qla27xx_next_entry(ent);
172}
173
174static struct qla27xx_fwdt_entry *
175qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha,
176 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
177{
178 ulong addr = le32_to_cpu(ent->t257.base_addr);
179 uint offset = ent->t257.pci_offset;
180 ulong data = le32_to_cpu(ent->t257.write_data);
181
182 ql_dbg(ql_dbg_misc, vha, 0xd201,
183 fmt: "%s: wrio t1 [%lx]\n", __func__, *len);
184 qla27xx_write_reg(ISPREG(vha), IOBASE(vha), data: addr, buf);
185 qla27xx_write_reg(ISPREG(vha), offset, data, buf);
186
187 return qla27xx_next_entry(ent);
188}
189
190static struct qla27xx_fwdt_entry *
191qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha,
192 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
193{
194 uint banksel = ent->t258.banksel_offset;
195 ulong bank = le32_to_cpu(ent->t258.bank);
196 ulong addr = le32_to_cpu(ent->t258.base_addr);
197 uint offset = ent->t258.pci_offset;
198 uint count = le16_to_cpu(ent->t258.reg_count);
199 uint width = ent->t258.reg_width;
200
201 ql_dbg(ql_dbg_misc, vha, 0xd202,
202 fmt: "%s: rdio t2 [%lx]\n", __func__, *len);
203 qla27xx_write_reg(ISPREG(vha), offset: banksel, data: bank, buf);
204 qla27xx_read_window(ISPREG(vha), addr, offset, count, width, buf, len);
205
206 return qla27xx_next_entry(ent);
207}
208
209static struct qla27xx_fwdt_entry *
210qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha,
211 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
212{
213 ulong addr = le32_to_cpu(ent->t259.base_addr);
214 uint banksel = ent->t259.banksel_offset;
215 ulong bank = le32_to_cpu(ent->t259.bank);
216 uint offset = ent->t259.pci_offset;
217 ulong data = le32_to_cpu(ent->t259.write_data);
218
219 ql_dbg(ql_dbg_misc, vha, 0xd203,
220 fmt: "%s: wrio t2 [%lx]\n", __func__, *len);
221 qla27xx_write_reg(ISPREG(vha), IOBASE(vha), data: addr, buf);
222 qla27xx_write_reg(ISPREG(vha), offset: banksel, data: bank, buf);
223 qla27xx_write_reg(ISPREG(vha), offset, data, buf);
224
225 return qla27xx_next_entry(ent);
226}
227
228static struct qla27xx_fwdt_entry *
229qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha,
230 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
231{
232 uint offset = ent->t260.pci_offset;
233
234 ql_dbg(ql_dbg_misc, vha, 0xd204,
235 fmt: "%s: rdpci [%lx]\n", __func__, *len);
236 qla27xx_insert32(value: offset, buf, len);
237 qla27xx_read_reg(ISPREG(vha), offset, buf, len);
238
239 return qla27xx_next_entry(ent);
240}
241
242static struct qla27xx_fwdt_entry *
243qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha,
244 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
245{
246 uint offset = ent->t261.pci_offset;
247 ulong data = le32_to_cpu(ent->t261.write_data);
248
249 ql_dbg(ql_dbg_misc, vha, 0xd205,
250 fmt: "%s: wrpci [%lx]\n", __func__, *len);
251 qla27xx_write_reg(ISPREG(vha), offset, data, buf);
252
253 return qla27xx_next_entry(ent);
254}
255
256static struct qla27xx_fwdt_entry *
257qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
258 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
259{
260 uint area = ent->t262.ram_area;
261 ulong start = le32_to_cpu(ent->t262.start_addr);
262 ulong end = le32_to_cpu(ent->t262.end_addr);
263 ulong dwords;
264 int rc;
265
266 ql_dbg(ql_dbg_misc, vha, 0xd206,
267 fmt: "%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len);
268
269 if (area == T262_RAM_AREA_CRITICAL_RAM) {
270 ;
271 } else if (area == T262_RAM_AREA_EXTERNAL_RAM) {
272 end = vha->hw->fw_memory_size;
273 if (buf)
274 ent->t262.end_addr = cpu_to_le32(end);
275 } else if (area == T262_RAM_AREA_SHARED_RAM) {
276 start = vha->hw->fw_shared_ram_start;
277 end = vha->hw->fw_shared_ram_end;
278 if (buf) {
279 ent->t262.start_addr = cpu_to_le32(start);
280 ent->t262.end_addr = cpu_to_le32(end);
281 }
282 } else if (area == T262_RAM_AREA_DDR_RAM) {
283 start = vha->hw->fw_ddr_ram_start;
284 end = vha->hw->fw_ddr_ram_end;
285 if (buf) {
286 ent->t262.start_addr = cpu_to_le32(start);
287 ent->t262.end_addr = cpu_to_le32(end);
288 }
289 } else if (area == T262_RAM_AREA_MISC) {
290 if (buf) {
291 ent->t262.start_addr = cpu_to_le32(start);
292 ent->t262.end_addr = cpu_to_le32(end);
293 }
294 } else {
295 ql_dbg(ql_dbg_misc, vha, 0xd022,
296 fmt: "%s: unknown area %x\n", __func__, area);
297 qla27xx_skip_entry(ent, buf);
298 goto done;
299 }
300
301 if (end < start || start == 0 || end == 0) {
302 ql_dbg(ql_dbg_misc, vha, 0xd023,
303 fmt: "%s: unusable range (start=%lx end=%lx)\n",
304 __func__, start, end);
305 qla27xx_skip_entry(ent, buf);
306 goto done;
307 }
308
309 dwords = end - start + 1;
310 if (buf) {
311 buf += *len;
312 rc = qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf);
313 if (rc != QLA_SUCCESS) {
314 ql_dbg(ql_dbg_async, vha, 0xffff,
315 fmt: "%s: dump ram MB failed. Area %xh start %lxh end %lxh\n",
316 __func__, area, start, end);
317 return INVALID_ENTRY;
318 }
319 }
320 *len += dwords * sizeof(uint32_t);
321done:
322 return qla27xx_next_entry(ent);
323}
324
325static struct qla27xx_fwdt_entry *
326qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
327 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
328{
329 uint type = ent->t263.queue_type;
330 uint count = 0;
331 uint i;
332 uint length;
333
334 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd207,
335 fmt: "%s: getq(%x) [%lx]\n", __func__, type, *len);
336 if (type == T263_QUEUE_TYPE_REQ) {
337 for (i = 0; i < vha->hw->max_req_queues; i++) {
338 struct req_que *req = vha->hw->req_q_map[i];
339
340 if (req || !buf) {
341 length = req ?
342 req->length : REQUEST_ENTRY_CNT_24XX;
343 qla27xx_insert16(value: i, buf, len);
344 qla27xx_insert16(value: length, buf, len);
345 qla27xx_insertbuf(mem: req ? req->ring : NULL,
346 size: length * sizeof(*req->ring), buf, len);
347 count++;
348 }
349 }
350 } else if (type == T263_QUEUE_TYPE_RSP) {
351 for (i = 0; i < vha->hw->max_rsp_queues; i++) {
352 struct rsp_que *rsp = vha->hw->rsp_q_map[i];
353
354 if (rsp || !buf) {
355 length = rsp ?
356 rsp->length : RESPONSE_ENTRY_CNT_MQ;
357 qla27xx_insert16(value: i, buf, len);
358 qla27xx_insert16(value: length, buf, len);
359 qla27xx_insertbuf(mem: rsp ? rsp->ring : NULL,
360 size: length * sizeof(*rsp->ring), buf, len);
361 count++;
362 }
363 }
364 } else if (QLA_TGT_MODE_ENABLED() &&
365 ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) {
366 struct qla_hw_data *ha = vha->hw;
367 struct atio *atr = ha->tgt.atio_ring;
368
369 if (atr || !buf) {
370 length = ha->tgt.atio_q_length;
371 qla27xx_insert16(value: 0, buf, len);
372 qla27xx_insert16(value: length, buf, len);
373 qla27xx_insertbuf(mem: atr, size: length * sizeof(*atr), buf, len);
374 count++;
375 }
376 } else {
377 ql_dbg(ql_dbg_misc, vha, 0xd026,
378 fmt: "%s: unknown queue %x\n", __func__, type);
379 qla27xx_skip_entry(ent, buf);
380 }
381
382 if (buf) {
383 if (count)
384 ent->t263.num_queues = count;
385 else
386 qla27xx_skip_entry(ent, buf);
387 }
388
389 return qla27xx_next_entry(ent);
390}
391
392static struct qla27xx_fwdt_entry *
393qla27xx_fwdt_entry_t264(struct scsi_qla_host *vha,
394 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
395{
396 ql_dbg(ql_dbg_misc, vha, 0xd208,
397 fmt: "%s: getfce [%lx]\n", __func__, *len);
398 if (vha->hw->fce) {
399 if (buf) {
400 ent->t264.fce_trace_size = FCE_SIZE;
401 ent->t264.write_pointer = vha->hw->fce_wr;
402 ent->t264.base_pointer = vha->hw->fce_dma;
403 ent->t264.fce_enable_mb0 = vha->hw->fce_mb[0];
404 ent->t264.fce_enable_mb2 = vha->hw->fce_mb[2];
405 ent->t264.fce_enable_mb3 = vha->hw->fce_mb[3];
406 ent->t264.fce_enable_mb4 = vha->hw->fce_mb[4];
407 ent->t264.fce_enable_mb5 = vha->hw->fce_mb[5];
408 ent->t264.fce_enable_mb6 = vha->hw->fce_mb[6];
409 }
410 qla27xx_insertbuf(mem: vha->hw->fce, FCE_SIZE, buf, len);
411 } else {
412 ql_dbg(ql_dbg_misc, vha, 0xd027,
413 fmt: "%s: missing fce\n", __func__);
414 qla27xx_skip_entry(ent, buf);
415 }
416
417 return qla27xx_next_entry(ent);
418}
419
420static struct qla27xx_fwdt_entry *
421qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha,
422 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
423{
424 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd209,
425 fmt: "%s: pause risc [%lx]\n", __func__, *len);
426 if (buf)
427 qla24xx_pause_risc(ISPREG(vha), vha->hw);
428
429 return qla27xx_next_entry(ent);
430}
431
432static struct qla27xx_fwdt_entry *
433qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha,
434 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
435{
436 ql_dbg(ql_dbg_misc, vha, 0xd20a,
437 fmt: "%s: reset risc [%lx]\n", __func__, *len);
438 if (buf) {
439 if (qla24xx_soft_reset(vha->hw) != QLA_SUCCESS) {
440 ql_dbg(ql_dbg_async, vha, 0x5001,
441 fmt: "%s: unable to soft reset\n", __func__);
442 return INVALID_ENTRY;
443 }
444 }
445
446 return qla27xx_next_entry(ent);
447}
448
449static struct qla27xx_fwdt_entry *
450qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha,
451 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
452{
453 uint offset = ent->t267.pci_offset;
454 ulong data = le32_to_cpu(ent->t267.data);
455
456 ql_dbg(ql_dbg_misc, vha, 0xd20b,
457 fmt: "%s: dis intr [%lx]\n", __func__, *len);
458 qla27xx_write_reg(ISPREG(vha), offset, data, buf);
459
460 return qla27xx_next_entry(ent);
461}
462
463static struct qla27xx_fwdt_entry *
464qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
465 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
466{
467 ql_dbg(ql_dbg_misc, vha, 0xd20c,
468 fmt: "%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len);
469 switch (ent->t268.buf_type) {
470 case T268_BUF_TYPE_EXTD_TRACE:
471 if (vha->hw->eft) {
472 if (buf) {
473 ent->t268.buf_size = EFT_SIZE;
474 ent->t268.start_addr = vha->hw->eft_dma;
475 }
476 qla27xx_insertbuf(mem: vha->hw->eft, EFT_SIZE, buf, len);
477 } else {
478 ql_dbg(ql_dbg_misc, vha, 0xd028,
479 fmt: "%s: missing eft\n", __func__);
480 qla27xx_skip_entry(ent, buf);
481 }
482 break;
483 case T268_BUF_TYPE_EXCH_BUFOFF:
484 if (vha->hw->exchoffld_buf) {
485 if (buf) {
486 ent->t268.buf_size = vha->hw->exchoffld_size;
487 ent->t268.start_addr =
488 vha->hw->exchoffld_buf_dma;
489 }
490 qla27xx_insertbuf(mem: vha->hw->exchoffld_buf,
491 size: vha->hw->exchoffld_size, buf, len);
492 } else {
493 ql_dbg(ql_dbg_misc, vha, 0xd028,
494 fmt: "%s: missing exch offld\n", __func__);
495 qla27xx_skip_entry(ent, buf);
496 }
497 break;
498 case T268_BUF_TYPE_EXTD_LOGIN:
499 if (vha->hw->exlogin_buf) {
500 if (buf) {
501 ent->t268.buf_size = vha->hw->exlogin_size;
502 ent->t268.start_addr =
503 vha->hw->exlogin_buf_dma;
504 }
505 qla27xx_insertbuf(mem: vha->hw->exlogin_buf,
506 size: vha->hw->exlogin_size, buf, len);
507 } else {
508 ql_dbg(ql_dbg_misc, vha, 0xd028,
509 fmt: "%s: missing ext login\n", __func__);
510 qla27xx_skip_entry(ent, buf);
511 }
512 break;
513
514 case T268_BUF_TYPE_REQ_MIRROR:
515 case T268_BUF_TYPE_RSP_MIRROR:
516 /*
517 * Mirror pointers are not implemented in the
518 * driver, instead shadow pointers are used by
519 * the drier. Skip these entries.
520 */
521 qla27xx_skip_entry(ent, buf);
522 break;
523 default:
524 ql_dbg(ql_dbg_async, vha, 0xd02b,
525 fmt: "%s: unknown buffer %x\n", __func__, ent->t268.buf_type);
526 qla27xx_skip_entry(ent, buf);
527 break;
528 }
529
530 return qla27xx_next_entry(ent);
531}
532
533static struct qla27xx_fwdt_entry *
534qla27xx_fwdt_entry_t269(struct scsi_qla_host *vha,
535 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
536{
537 ql_dbg(ql_dbg_misc, vha, 0xd20d,
538 fmt: "%s: scratch [%lx]\n", __func__, *len);
539 qla27xx_insert32(value: 0xaaaaaaaa, buf, len);
540 qla27xx_insert32(value: 0xbbbbbbbb, buf, len);
541 qla27xx_insert32(value: 0xcccccccc, buf, len);
542 qla27xx_insert32(value: 0xdddddddd, buf, len);
543 qla27xx_insert32(value: *len + sizeof(uint32_t), buf, len);
544 if (buf)
545 ent->t269.scratch_size = 5 * sizeof(uint32_t);
546
547 return qla27xx_next_entry(ent);
548}
549
550static struct qla27xx_fwdt_entry *
551qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
552 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
553{
554 ulong addr = le32_to_cpu(ent->t270.addr);
555 ulong dwords = le32_to_cpu(ent->t270.count);
556
557 ql_dbg(ql_dbg_misc, vha, 0xd20e,
558 fmt: "%s: rdremreg [%lx]\n", __func__, *len);
559 qla27xx_write_reg(ISPREG(vha), IOBASE_ADDR, data: 0x40, buf);
560 while (dwords--) {
561 qla27xx_write_reg(ISPREG(vha), offset: 0xc0, data: addr|0x80000000, buf);
562 qla27xx_insert32(value: addr, buf, len);
563 qla27xx_read_reg(ISPREG(vha), offset: 0xc4, buf, len);
564 addr += sizeof(uint32_t);
565 }
566
567 return qla27xx_next_entry(ent);
568}
569
570static struct qla27xx_fwdt_entry *
571qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha,
572 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
573{
574 ulong addr = le32_to_cpu(ent->t271.addr);
575 ulong data = le32_to_cpu(ent->t271.data);
576
577 ql_dbg(ql_dbg_misc, vha, 0xd20f,
578 fmt: "%s: wrremreg [%lx]\n", __func__, *len);
579 qla27xx_write_reg(ISPREG(vha), IOBASE(vha), data: 0x40, buf);
580 qla27xx_write_reg(ISPREG(vha), offset: 0xc4, data, buf);
581 qla27xx_write_reg(ISPREG(vha), offset: 0xc0, data: addr, buf);
582
583 return qla27xx_next_entry(ent);
584}
585
586static struct qla27xx_fwdt_entry *
587qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha,
588 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
589{
590 ulong dwords = le32_to_cpu(ent->t272.count);
591 ulong start = le32_to_cpu(ent->t272.addr);
592
593 ql_dbg(ql_dbg_misc, vha, 0xd210,
594 fmt: "%s: rdremram [%lx]\n", __func__, *len);
595 if (buf) {
596 ql_dbg(ql_dbg_misc, vha, 0xd02c,
597 fmt: "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords);
598 buf += *len;
599 qla27xx_dump_mpi_ram(vha->hw, start, buf, dwords, &buf);
600 }
601 *len += dwords * sizeof(uint32_t);
602
603 return qla27xx_next_entry(ent);
604}
605
606static struct qla27xx_fwdt_entry *
607qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha,
608 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
609{
610 ulong dwords = le32_to_cpu(ent->t273.count);
611 ulong addr = le32_to_cpu(ent->t273.addr);
612 uint32_t value;
613
614 ql_dbg(ql_dbg_misc, vha, 0xd211,
615 fmt: "%s: pcicfg [%lx]\n", __func__, *len);
616 while (dwords--) {
617 value = ~0;
618 if (pci_read_config_dword(dev: vha->hw->pdev, where: addr, val: &value))
619 ql_dbg(ql_dbg_misc, vha, 0xd02d,
620 fmt: "%s: failed pcicfg read at %lx\n", __func__, addr);
621 qla27xx_insert32(value: addr, buf, len);
622 qla27xx_insert32(value, buf, len);
623 addr += sizeof(uint32_t);
624 }
625
626 return qla27xx_next_entry(ent);
627}
628
629static struct qla27xx_fwdt_entry *
630qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
631 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
632{
633 ulong type = ent->t274.queue_type;
634 uint count = 0;
635 uint i;
636
637 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd212,
638 fmt: "%s: getqsh(%lx) [%lx]\n", __func__, type, *len);
639 if (type == T274_QUEUE_TYPE_REQ_SHAD) {
640 for (i = 0; i < vha->hw->max_req_queues; i++) {
641 struct req_que *req = vha->hw->req_q_map[i];
642
643 if (req || !buf) {
644 qla27xx_insert16(value: i, buf, len);
645 qla27xx_insert16(value: 1, buf, len);
646 qla27xx_insert32(value: req && req->out_ptr ?
647 *req->out_ptr : 0, buf, len);
648 count++;
649 }
650 }
651 } else if (type == T274_QUEUE_TYPE_RSP_SHAD) {
652 for (i = 0; i < vha->hw->max_rsp_queues; i++) {
653 struct rsp_que *rsp = vha->hw->rsp_q_map[i];
654
655 if (rsp || !buf) {
656 qla27xx_insert16(value: i, buf, len);
657 qla27xx_insert16(value: 1, buf, len);
658 qla27xx_insert32(value: rsp && rsp->in_ptr ?
659 *rsp->in_ptr : 0, buf, len);
660 count++;
661 }
662 }
663 } else if (QLA_TGT_MODE_ENABLED() &&
664 ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) {
665 struct qla_hw_data *ha = vha->hw;
666 struct atio *atr = ha->tgt.atio_ring_ptr;
667
668 if (atr || !buf) {
669 qla27xx_insert16(value: 0, buf, len);
670 qla27xx_insert16(value: 1, buf, len);
671 qla27xx_insert32(value: ha->tgt.atio_q_in ?
672 readl(addr: ha->tgt.atio_q_in) : 0, buf, len);
673 count++;
674 }
675 } else {
676 ql_dbg(ql_dbg_misc, vha, 0xd02f,
677 fmt: "%s: unknown queue %lx\n", __func__, type);
678 qla27xx_skip_entry(ent, buf);
679 }
680
681 if (buf) {
682 if (count)
683 ent->t274.num_queues = count;
684 else
685 qla27xx_skip_entry(ent, buf);
686 }
687
688 return qla27xx_next_entry(ent);
689}
690
691static struct qla27xx_fwdt_entry *
692qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha,
693 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
694{
695 ulong offset = offsetof(typeof(*ent), t275.buffer);
696 ulong length = le32_to_cpu(ent->t275.length);
697 ulong size = le32_to_cpu(ent->hdr.size);
698 void *buffer = ent->t275.buffer;
699
700 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd213,
701 fmt: "%s: buffer(%lx) [%lx]\n", __func__, length, *len);
702 if (!length) {
703 ql_dbg(ql_dbg_misc, vha, 0xd020,
704 fmt: "%s: buffer zero length\n", __func__);
705 qla27xx_skip_entry(ent, buf);
706 goto done;
707 }
708 if (offset + length > size) {
709 length = size - offset;
710 ql_dbg(ql_dbg_misc, vha, 0xd030,
711 fmt: "%s: buffer overflow, truncate [%lx]\n", __func__, length);
712 ent->t275.length = cpu_to_le32(length);
713 }
714
715 qla27xx_insertbuf(mem: buffer, size: length, buf, len);
716done:
717 return qla27xx_next_entry(ent);
718}
719
720static struct qla27xx_fwdt_entry *
721qla27xx_fwdt_entry_t276(struct scsi_qla_host *vha,
722 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
723{
724 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd214,
725 fmt: "%s: cond [%lx]\n", __func__, *len);
726
727 if (buf) {
728 ulong cond1 = le32_to_cpu(ent->t276.cond1);
729 ulong cond2 = le32_to_cpu(ent->t276.cond2);
730 uint type = vha->hw->pdev->device >> 4 & 0xf;
731 uint func = vha->hw->port_no & 0x3;
732
733 if (type != cond1 || func != cond2) {
734 struct qla27xx_fwdt_template *tmp = buf;
735
736 tmp->count--;
737 ent = qla27xx_next_entry(ent);
738 qla27xx_skip_entry(ent, buf);
739 }
740 }
741
742 return qla27xx_next_entry(ent);
743}
744
745static struct qla27xx_fwdt_entry *
746qla27xx_fwdt_entry_t277(struct scsi_qla_host *vha,
747 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
748{
749 ulong cmd_addr = le32_to_cpu(ent->t277.cmd_addr);
750 ulong wr_cmd_data = le32_to_cpu(ent->t277.wr_cmd_data);
751 ulong data_addr = le32_to_cpu(ent->t277.data_addr);
752
753 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd215,
754 fmt: "%s: rdpep [%lx]\n", __func__, *len);
755 qla27xx_insert32(value: wr_cmd_data, buf, len);
756 qla27xx_write_reg(ISPREG(vha), offset: cmd_addr, data: wr_cmd_data, buf);
757 qla27xx_read_reg(ISPREG(vha), offset: data_addr, buf, len);
758
759 return qla27xx_next_entry(ent);
760}
761
762static struct qla27xx_fwdt_entry *
763qla27xx_fwdt_entry_t278(struct scsi_qla_host *vha,
764 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
765{
766 ulong cmd_addr = le32_to_cpu(ent->t278.cmd_addr);
767 ulong wr_cmd_data = le32_to_cpu(ent->t278.wr_cmd_data);
768 ulong data_addr = le32_to_cpu(ent->t278.data_addr);
769 ulong wr_data = le32_to_cpu(ent->t278.wr_data);
770
771 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd216,
772 fmt: "%s: wrpep [%lx]\n", __func__, *len);
773 qla27xx_write_reg(ISPREG(vha), offset: data_addr, data: wr_data, buf);
774 qla27xx_write_reg(ISPREG(vha), offset: cmd_addr, data: wr_cmd_data, buf);
775
776 return qla27xx_next_entry(ent);
777}
778
779static struct qla27xx_fwdt_entry *
780qla27xx_fwdt_entry_other(struct scsi_qla_host *vha,
781 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
782{
783 ulong type = le32_to_cpu(ent->hdr.type);
784
785 ql_dbg(ql_dbg_misc, vha, 0xd2ff,
786 fmt: "%s: other %lx [%lx]\n", __func__, type, *len);
787 qla27xx_skip_entry(ent, buf);
788
789 return qla27xx_next_entry(ent);
790}
791
792static struct {
793 uint type;
794 typeof(qla27xx_fwdt_entry_other)(*call);
795} qla27xx_fwdt_entry_call[] = {
796 { ENTRY_TYPE_NOP, qla27xx_fwdt_entry_t0 },
797 { ENTRY_TYPE_TMP_END, qla27xx_fwdt_entry_t255 },
798 { ENTRY_TYPE_RD_IOB_T1, qla27xx_fwdt_entry_t256 },
799 { ENTRY_TYPE_WR_IOB_T1, qla27xx_fwdt_entry_t257 },
800 { ENTRY_TYPE_RD_IOB_T2, qla27xx_fwdt_entry_t258 },
801 { ENTRY_TYPE_WR_IOB_T2, qla27xx_fwdt_entry_t259 },
802 { ENTRY_TYPE_RD_PCI, qla27xx_fwdt_entry_t260 },
803 { ENTRY_TYPE_WR_PCI, qla27xx_fwdt_entry_t261 },
804 { ENTRY_TYPE_RD_RAM, qla27xx_fwdt_entry_t262 },
805 { ENTRY_TYPE_GET_QUEUE, qla27xx_fwdt_entry_t263 },
806 { ENTRY_TYPE_GET_FCE, qla27xx_fwdt_entry_t264 },
807 { ENTRY_TYPE_PSE_RISC, qla27xx_fwdt_entry_t265 },
808 { ENTRY_TYPE_RST_RISC, qla27xx_fwdt_entry_t266 },
809 { ENTRY_TYPE_DIS_INTR, qla27xx_fwdt_entry_t267 },
810 { ENTRY_TYPE_GET_HBUF, qla27xx_fwdt_entry_t268 },
811 { ENTRY_TYPE_SCRATCH, qla27xx_fwdt_entry_t269 },
812 { ENTRY_TYPE_RDREMREG, qla27xx_fwdt_entry_t270 },
813 { ENTRY_TYPE_WRREMREG, qla27xx_fwdt_entry_t271 },
814 { ENTRY_TYPE_RDREMRAM, qla27xx_fwdt_entry_t272 },
815 { ENTRY_TYPE_PCICFG, qla27xx_fwdt_entry_t273 },
816 { ENTRY_TYPE_GET_SHADOW, qla27xx_fwdt_entry_t274 },
817 { ENTRY_TYPE_WRITE_BUF, qla27xx_fwdt_entry_t275 },
818 { ENTRY_TYPE_CONDITIONAL, qla27xx_fwdt_entry_t276 },
819 { ENTRY_TYPE_RDPEPREG, qla27xx_fwdt_entry_t277 },
820 { ENTRY_TYPE_WRPEPREG, qla27xx_fwdt_entry_t278 },
821 { -1, qla27xx_fwdt_entry_other }
822};
823
824static inline
825typeof(qla27xx_fwdt_entry_call->call)(qla27xx_find_entry(uint type))
826{
827 typeof(*qla27xx_fwdt_entry_call) *list = qla27xx_fwdt_entry_call;
828
829 while (list->type < type)
830 list++;
831
832 if (list->type == type)
833 return list->call;
834 return qla27xx_fwdt_entry_other;
835}
836
837static void
838qla27xx_walk_template(struct scsi_qla_host *vha,
839 struct qla27xx_fwdt_template *tmp, void *buf, ulong *len)
840{
841 struct qla27xx_fwdt_entry *ent = (void *)tmp +
842 le32_to_cpu(tmp->entry_offset);
843 ulong type;
844
845 tmp->count = le32_to_cpu(tmp->entry_count);
846 ql_dbg(ql_dbg_misc, vha, 0xd01a,
847 fmt: "%s: entry count %u\n", __func__, tmp->count);
848 while (ent && tmp->count--) {
849 type = le32_to_cpu(ent->hdr.type);
850 ent = qla27xx_find_entry(type)(vha, ent, buf, len);
851 if (!ent)
852 break;
853
854 if (ent == INVALID_ENTRY) {
855 *len = 0;
856 ql_dbg(ql_dbg_async, vha, 0xffff,
857 fmt: "Unable to capture FW dump");
858 goto bailout;
859 }
860 }
861
862 if (tmp->count)
863 ql_dbg(ql_dbg_misc, vha, 0xd018,
864 fmt: "%s: entry count residual=+%u\n", __func__, tmp->count);
865
866 if (ent)
867 ql_dbg(ql_dbg_misc, vha, 0xd019,
868 fmt: "%s: missing end entry\n", __func__);
869
870bailout:
871 cpu_to_le32s(&tmp->count); /* endianize residual count */
872}
873
874static void
875qla27xx_time_stamp(struct qla27xx_fwdt_template *tmp)
876{
877 tmp->capture_timestamp = cpu_to_le32(jiffies);
878}
879
880static void
881qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
882{
883 uint8_t v[] = { 0, 0, 0, 0, 0, 0 };
884
885 WARN_ON_ONCE(sscanf(qla2x00_version_str,
886 "%hhu.%hhu.%hhu.%hhu",
887 v + 0, v + 1, v + 2, v + 3) != 4);
888
889 tmp->driver_info[0] = cpu_to_le32(
890 v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0]);
891 tmp->driver_info[1] = cpu_to_le32(v[5] << 8 | v[4]);
892 tmp->driver_info[2] = __constant_cpu_to_le32(0x12345678);
893}
894
895static void
896qla27xx_firmware_info(struct scsi_qla_host *vha,
897 struct qla27xx_fwdt_template *tmp)
898{
899 tmp->firmware_version[0] = cpu_to_le32(vha->hw->fw_major_version);
900 tmp->firmware_version[1] = cpu_to_le32(vha->hw->fw_minor_version);
901 tmp->firmware_version[2] = cpu_to_le32(vha->hw->fw_subminor_version);
902 tmp->firmware_version[3] = cpu_to_le32(
903 vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes);
904 tmp->firmware_version[4] = cpu_to_le32(
905 vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0]);
906}
907
908static void
909ql27xx_edit_template(struct scsi_qla_host *vha,
910 struct qla27xx_fwdt_template *tmp)
911{
912 qla27xx_time_stamp(tmp);
913 qla27xx_driver_info(tmp);
914 qla27xx_firmware_info(vha, tmp);
915}
916
917static inline uint32_t
918qla27xx_template_checksum(void *p, ulong size)
919{
920 __le32 *buf = p;
921 uint64_t sum = 0;
922
923 size /= sizeof(*buf);
924
925 for ( ; size--; buf++)
926 sum += le32_to_cpu(*buf);
927
928 sum = (sum & 0xffffffff) + (sum >> 32);
929
930 return ~sum;
931}
932
933static inline int
934qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp)
935{
936 return qla27xx_template_checksum(p: tmp,
937 le32_to_cpu(tmp->template_size)) == 0;
938}
939
940static inline int
941qla27xx_verify_template_header(struct qla27xx_fwdt_template *tmp)
942{
943 return le32_to_cpu(tmp->template_type) == TEMPLATE_TYPE_FWDUMP;
944}
945
946static ulong
947qla27xx_execute_fwdt_template(struct scsi_qla_host *vha,
948 struct qla27xx_fwdt_template *tmp, void *buf)
949{
950 ulong len = 0;
951
952 if (qla27xx_fwdt_template_valid(tmp)) {
953 len = le32_to_cpu(tmp->template_size);
954 tmp = memcpy(buf, tmp, len);
955 ql27xx_edit_template(vha, tmp);
956 qla27xx_walk_template(vha, tmp, buf, len: &len);
957 }
958
959 return len;
960}
961
962ulong
963qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha, void *p)
964{
965 struct qla27xx_fwdt_template *tmp = p;
966 ulong len = 0;
967
968 if (qla27xx_fwdt_template_valid(tmp)) {
969 len = le32_to_cpu(tmp->template_size);
970 qla27xx_walk_template(vha, tmp, NULL, len: &len);
971 }
972
973 return len;
974}
975
976ulong
977qla27xx_fwdt_template_size(void *p)
978{
979 struct qla27xx_fwdt_template *tmp = p;
980
981 return le32_to_cpu(tmp->template_size);
982}
983
984int
985qla27xx_fwdt_template_valid(void *p)
986{
987 struct qla27xx_fwdt_template *tmp = p;
988
989 if (!qla27xx_verify_template_header(tmp)) {
990 ql_log(ql_log_warn, NULL, 0xd01c,
991 fmt: "%s: template type %x\n", __func__,
992 le32_to_cpu(tmp->template_type));
993 return false;
994 }
995
996 if (!qla27xx_verify_template_checksum(tmp)) {
997 ql_log(ql_log_warn, NULL, 0xd01d,
998 fmt: "%s: failed template checksum\n", __func__);
999 return false;
1000 }
1001
1002 return true;
1003}
1004
1005void
1006qla27xx_mpi_fwdump(scsi_qla_host_t *vha, int hardware_locked)
1007{
1008 ulong flags = 0;
1009
1010 if (!hardware_locked)
1011 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
1012 if (!vha->hw->mpi_fw_dump) {
1013 ql_log(ql_log_warn, vha, 0x02f3, fmt: "-> mpi_fwdump no buffer\n");
1014 } else {
1015 struct fwdt *fwdt = &vha->hw->fwdt[1];
1016 ulong len;
1017 void *buf = vha->hw->mpi_fw_dump;
1018 bool walk_template_only = false;
1019
1020 if (vha->hw->mpi_fw_dumped) {
1021 /* Use the spare area for any further dumps. */
1022 buf += fwdt->dump_size;
1023 walk_template_only = true;
1024 ql_log(ql_log_warn, vha, 0x02f4,
1025 fmt: "-> MPI firmware already dumped -- dump saving to temporary buffer %p.\n",
1026 buf);
1027 }
1028
1029 ql_log(ql_log_warn, vha, 0x02f5, fmt: "-> fwdt1 running...\n");
1030 if (!fwdt->template) {
1031 ql_log(ql_log_warn, vha, 0x02f6,
1032 fmt: "-> fwdt1 no template\n");
1033 goto bailout;
1034 }
1035 len = qla27xx_execute_fwdt_template(vha, tmp: fwdt->template, buf);
1036 if (len == 0) {
1037 goto bailout;
1038 } else if (len != fwdt->dump_size) {
1039 ql_log(ql_log_warn, vha, 0x02f7,
1040 fmt: "-> fwdt1 fwdump residual=%+ld\n",
1041 fwdt->dump_size - len);
1042 }
1043 vha->hw->stat.num_mpi_reset++;
1044 if (walk_template_only)
1045 goto bailout;
1046
1047 vha->hw->mpi_fw_dump_len = len;
1048 vha->hw->mpi_fw_dumped = 1;
1049
1050 ql_log(ql_log_warn, vha, 0x02f8,
1051 fmt: "-> MPI firmware dump saved to buffer (%lu/%p)\n",
1052 vha->host_no, vha->hw->mpi_fw_dump);
1053 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
1054 }
1055
1056bailout:
1057 if (!hardware_locked)
1058 spin_unlock_irqrestore(lock: &vha->hw->hardware_lock, flags);
1059}
1060
1061void
1062qla27xx_fwdump(scsi_qla_host_t *vha)
1063{
1064 lockdep_assert_held(&vha->hw->hardware_lock);
1065
1066 if (!vha->hw->fw_dump) {
1067 ql_log(ql_log_warn, vha, 0xd01e, fmt: "-> fwdump no buffer\n");
1068 } else if (vha->hw->fw_dumped) {
1069 ql_log(ql_log_warn, vha, 0xd01f,
1070 fmt: "-> Firmware already dumped (%p) -- ignoring request\n",
1071 vha->hw->fw_dump);
1072 } else {
1073 struct fwdt *fwdt = vha->hw->fwdt;
1074 ulong len;
1075 void *buf = vha->hw->fw_dump;
1076
1077 ql_log(ql_log_warn, vha, 0xd011, fmt: "-> fwdt0 running...\n");
1078 if (!fwdt->template) {
1079 ql_log(ql_log_warn, vha, 0xd012,
1080 fmt: "-> fwdt0 no template\n");
1081 return;
1082 }
1083 len = qla27xx_execute_fwdt_template(vha, tmp: fwdt->template, buf);
1084 if (len == 0) {
1085 return;
1086 } else if (len != fwdt->dump_size) {
1087 ql_log(ql_log_warn, vha, 0xd013,
1088 fmt: "-> fwdt0 fwdump residual=%+ld\n",
1089 fwdt->dump_size - len);
1090 }
1091
1092 vha->hw->fw_dump_len = len;
1093 vha->hw->fw_dumped = true;
1094
1095 ql_log(ql_log_warn, vha, 0xd015,
1096 fmt: "-> Firmware dump saved to buffer (%lu/%p) <%lx>\n",
1097 vha->host_no, vha->hw->fw_dump, vha->hw->fw_dump_cap_flags);
1098 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
1099 }
1100}
1101

source code of linux/drivers/scsi/qla2xxx/qla_tmpl.c