1
2
3
4
5
6
7#include "qla_def.h"
8#include "qla_tmpl.h"
9
10#define ISPREG(vha) (&(vha)->hw->iobase->isp24)
11#define IOBAR(reg) offsetof(typeof(*(reg)), iobase_addr)
12#define IOBASE(vha) IOBAR(ISPREG(vha))
13#define INVALID_ENTRY ((struct qla27xx_fwdt_entry *)0xffffffffffffffffUL)
14
15static inline void
16qla27xx_insert16(uint16_t value, void *buf, ulong *len)
17{
18 if (buf) {
19 buf += *len;
20 *(__le16 *)buf = cpu_to_le16(value);
21 }
22 *len += sizeof(value);
23}
24
25static inline void
26qla27xx_insert32(uint32_t value, void *buf, ulong *len)
27{
28 if (buf) {
29 buf += *len;
30 *(__le32 *)buf = cpu_to_le32(value);
31 }
32 *len += sizeof(value);
33}
34
35static inline void
36qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len)
37{
38 if (buf && mem && size) {
39 buf += *len;
40 memcpy(buf, mem, size);
41 }
42 *len += size;
43}
44
45static inline void
46qla27xx_read8(void __iomem *window, void *buf, ulong *len)
47{
48 uint8_t value = ~0;
49
50 if (buf) {
51 value = RD_REG_BYTE(window);
52 }
53 qla27xx_insert32(value, buf, len);
54}
55
56static inline void
57qla27xx_read16(void __iomem *window, void *buf, ulong *len)
58{
59 uint16_t value = ~0;
60
61 if (buf) {
62 value = RD_REG_WORD(window);
63 }
64 qla27xx_insert32(value, buf, len);
65}
66
67static inline void
68qla27xx_read32(void __iomem *window, void *buf, ulong *len)
69{
70 uint32_t value = ~0;
71
72 if (buf) {
73 value = RD_REG_DWORD(window);
74 }
75 qla27xx_insert32(value, buf, len);
76}
77
78static inline void (*qla27xx_read_vector(uint width))(void __iomem*, void *, ulong *)
79{
80 return
81 (width == 1) ? qla27xx_read8 :
82 (width == 2) ? qla27xx_read16 :
83 qla27xx_read32;
84}
85
86static inline void
87qla27xx_read_reg(__iomem struct device_reg_24xx *reg,
88 uint offset, void *buf, ulong *len)
89{
90 void __iomem *window = (void __iomem *)reg + offset;
91
92 qla27xx_read32(window, buf, len);
93}
94
95static inline void
96qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
97 uint offset, uint32_t data, void *buf)
98{
99 if (buf) {
100 void __iomem *window = (void __iomem *)reg + offset;
101
102 WRT_REG_DWORD(window, data);
103 }
104}
105
106static inline void
107qla27xx_read_window(__iomem struct device_reg_24xx *reg,
108 uint32_t addr, uint offset, uint count, uint width, void *buf,
109 ulong *len)
110{
111 void __iomem *window = (void __iomem *)reg + offset;
112 void (*readn)(void __iomem*, void *, ulong *) = qla27xx_read_vector(width);
113
114 qla27xx_write_reg(reg, IOBAR(reg), addr, buf);
115 while (count--) {
116 qla27xx_insert32(addr, buf, len);
117 readn(window, buf, len);
118 window += width;
119 addr++;
120 }
121}
122
123static inline void
124qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf)
125{
126 if (buf)
127 ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY;
128}
129
130static inline struct qla27xx_fwdt_entry *
131qla27xx_next_entry(struct qla27xx_fwdt_entry *ent)
132{
133 return (void *)ent + le32_to_cpu(ent->hdr.size);
134}
135
136static struct qla27xx_fwdt_entry *
137qla27xx_fwdt_entry_t0(struct scsi_qla_host *vha,
138 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
139{
140 ql_dbg(ql_dbg_misc, vha, 0xd100,
141 "%s: nop [%lx]\n", __func__, *len);
142 qla27xx_skip_entry(ent, buf);
143
144 return qla27xx_next_entry(ent);
145}
146
147static struct qla27xx_fwdt_entry *
148qla27xx_fwdt_entry_t255(struct scsi_qla_host *vha,
149 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
150{
151 ql_dbg(ql_dbg_misc, vha, 0xd1ff,
152 "%s: end [%lx]\n", __func__, *len);
153 qla27xx_skip_entry(ent, buf);
154
155
156 return NULL;
157}
158
159static struct qla27xx_fwdt_entry *
160qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha,
161 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
162{
163 ulong addr = le32_to_cpu(ent->t256.base_addr);
164 uint offset = ent->t256.pci_offset;
165 ulong count = le16_to_cpu(ent->t256.reg_count);
166 uint width = ent->t256.reg_width;
167
168 ql_dbg(ql_dbg_misc, vha, 0xd200,
169 "%s: rdio t1 [%lx]\n", __func__, *len);
170 qla27xx_read_window(ISPREG(vha), addr, offset, count, width, buf, len);
171
172 return qla27xx_next_entry(ent);
173}
174
175static struct qla27xx_fwdt_entry *
176qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha,
177 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
178{
179 ulong addr = le32_to_cpu(ent->t257.base_addr);
180 uint offset = ent->t257.pci_offset;
181 ulong data = le32_to_cpu(ent->t257.write_data);
182
183 ql_dbg(ql_dbg_misc, vha, 0xd201,
184 "%s: wrio t1 [%lx]\n", __func__, *len);
185 qla27xx_write_reg(ISPREG(vha), IOBASE(vha), addr, buf);
186 qla27xx_write_reg(ISPREG(vha), offset, data, buf);
187
188 return qla27xx_next_entry(ent);
189}
190
191static struct qla27xx_fwdt_entry *
192qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha,
193 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
194{
195 uint banksel = ent->t258.banksel_offset;
196 ulong bank = le32_to_cpu(ent->t258.bank);
197 ulong addr = le32_to_cpu(ent->t258.base_addr);
198 uint offset = ent->t258.pci_offset;
199 uint count = le16_to_cpu(ent->t258.reg_count);
200 uint width = ent->t258.reg_width;
201
202 ql_dbg(ql_dbg_misc, vha, 0xd202,
203 "%s: rdio t2 [%lx]\n", __func__, *len);
204 qla27xx_write_reg(ISPREG(vha), banksel, bank, buf);
205 qla27xx_read_window(ISPREG(vha), addr, offset, count, width, buf, len);
206
207 return qla27xx_next_entry(ent);
208}
209
210static struct qla27xx_fwdt_entry *
211qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha,
212 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
213{
214 ulong addr = le32_to_cpu(ent->t259.base_addr);
215 uint banksel = ent->t259.banksel_offset;
216 ulong bank = le32_to_cpu(ent->t259.bank);
217 uint offset = ent->t259.pci_offset;
218 ulong data = le32_to_cpu(ent->t259.write_data);
219
220 ql_dbg(ql_dbg_misc, vha, 0xd203,
221 "%s: wrio t2 [%lx]\n", __func__, *len);
222 qla27xx_write_reg(ISPREG(vha), IOBASE(vha), addr, buf);
223 qla27xx_write_reg(ISPREG(vha), banksel, bank, buf);
224 qla27xx_write_reg(ISPREG(vha), offset, data, buf);
225
226 return qla27xx_next_entry(ent);
227}
228
229static struct qla27xx_fwdt_entry *
230qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha,
231 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
232{
233 uint offset = ent->t260.pci_offset;
234
235 ql_dbg(ql_dbg_misc, vha, 0xd204,
236 "%s: rdpci [%lx]\n", __func__, *len);
237 qla27xx_insert32(offset, buf, len);
238 qla27xx_read_reg(ISPREG(vha), offset, buf, len);
239
240 return qla27xx_next_entry(ent);
241}
242
243static struct qla27xx_fwdt_entry *
244qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha,
245 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
246{
247 uint offset = ent->t261.pci_offset;
248 ulong data = le32_to_cpu(ent->t261.write_data);
249
250 ql_dbg(ql_dbg_misc, vha, 0xd205,
251 "%s: wrpci [%lx]\n", __func__, *len);
252 qla27xx_write_reg(ISPREG(vha), offset, data, buf);
253
254 return qla27xx_next_entry(ent);
255}
256
257static struct qla27xx_fwdt_entry *
258qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
259 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
260{
261 uint area = ent->t262.ram_area;
262 ulong start = le32_to_cpu(ent->t262.start_addr);
263 ulong end = le32_to_cpu(ent->t262.end_addr);
264 ulong dwords;
265 int rc;
266
267 ql_dbg(ql_dbg_misc, vha, 0xd206,
268 "%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len);
269
270 if (area == T262_RAM_AREA_CRITICAL_RAM) {
271 ;
272 } else if (area == T262_RAM_AREA_EXTERNAL_RAM) {
273 end = vha->hw->fw_memory_size;
274 if (buf)
275 ent->t262.end_addr = cpu_to_le32(end);
276 } else if (area == T262_RAM_AREA_SHARED_RAM) {
277 start = vha->hw->fw_shared_ram_start;
278 end = vha->hw->fw_shared_ram_end;
279 if (buf) {
280 ent->t262.start_addr = cpu_to_le32(start);
281 ent->t262.end_addr = cpu_to_le32(end);
282 }
283 } else if (area == T262_RAM_AREA_DDR_RAM) {
284 start = vha->hw->fw_ddr_ram_start;
285 end = vha->hw->fw_ddr_ram_end;
286 if (buf) {
287 ent->t262.start_addr = cpu_to_le32(start);
288 ent->t262.end_addr = cpu_to_le32(end);
289 }
290 } else if (area == T262_RAM_AREA_MISC) {
291 if (buf) {
292 ent->t262.start_addr = cpu_to_le32(start);
293 ent->t262.end_addr = cpu_to_le32(end);
294 }
295 } else {
296 ql_dbg(ql_dbg_misc, vha, 0xd022,
297 "%s: unknown area %x\n", __func__, area);
298 qla27xx_skip_entry(ent, buf);
299 goto done;
300 }
301
302 if (end < start || start == 0 || end == 0) {
303 ql_dbg(ql_dbg_misc, vha, 0xd023,
304 "%s: unusable range (start=%lx end=%lx)\n",
305 __func__, start, end);
306 qla27xx_skip_entry(ent, buf);
307 goto done;
308 }
309
310 dwords = end - start + 1;
311 if (buf) {
312 buf += *len;
313 rc = qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf);
314 if (rc != QLA_SUCCESS) {
315 ql_dbg(ql_dbg_async, vha, 0xffff,
316 "%s: dump ram MB failed. Area %xh start %lxh end %lxh\n",
317 __func__, area, start, end);
318 return INVALID_ENTRY;
319 }
320 }
321 *len += dwords * sizeof(uint32_t);
322done:
323 return qla27xx_next_entry(ent);
324}
325
326static struct qla27xx_fwdt_entry *
327qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
328 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
329{
330 uint type = ent->t263.queue_type;
331 uint count = 0;
332 uint i;
333 uint length;
334
335 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd207,
336 "%s: getq(%x) [%lx]\n", __func__, type, *len);
337 if (type == T263_QUEUE_TYPE_REQ) {
338 for (i = 0; i < vha->hw->max_req_queues; i++) {
339 struct req_que *req = vha->hw->req_q_map[i];
340
341 if (req || !buf) {
342 length = req ?
343 req->length : REQUEST_ENTRY_CNT_24XX;
344 qla27xx_insert16(i, buf, len);
345 qla27xx_insert16(length, buf, len);
346 qla27xx_insertbuf(req ? req->ring : NULL,
347 length * sizeof(*req->ring), buf, len);
348 count++;
349 }
350 }
351 } else if (type == T263_QUEUE_TYPE_RSP) {
352 for (i = 0; i < vha->hw->max_rsp_queues; i++) {
353 struct rsp_que *rsp = vha->hw->rsp_q_map[i];
354
355 if (rsp || !buf) {
356 length = rsp ?
357 rsp->length : RESPONSE_ENTRY_CNT_MQ;
358 qla27xx_insert16(i, buf, len);
359 qla27xx_insert16(length, buf, len);
360 qla27xx_insertbuf(rsp ? rsp->ring : NULL,
361 length * sizeof(*rsp->ring), buf, len);
362 count++;
363 }
364 }
365 } else if (QLA_TGT_MODE_ENABLED() &&
366 ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) {
367 struct qla_hw_data *ha = vha->hw;
368 struct atio *atr = ha->tgt.atio_ring;
369
370 if (atr || !buf) {
371 length = ha->tgt.atio_q_length;
372 qla27xx_insert16(0, buf, len);
373 qla27xx_insert16(length, buf, len);
374 qla27xx_insertbuf(atr, length * sizeof(*atr), buf, len);
375 count++;
376 }
377 } else {
378 ql_dbg(ql_dbg_misc, vha, 0xd026,
379 "%s: unknown queue %x\n", __func__, type);
380 qla27xx_skip_entry(ent, buf);
381 }
382
383 if (buf) {
384 if (count)
385 ent->t263.num_queues = count;
386 else
387 qla27xx_skip_entry(ent, buf);
388 }
389
390 return qla27xx_next_entry(ent);
391}
392
393static struct qla27xx_fwdt_entry *
394qla27xx_fwdt_entry_t264(struct scsi_qla_host *vha,
395 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
396{
397 ql_dbg(ql_dbg_misc, vha, 0xd208,
398 "%s: getfce [%lx]\n", __func__, *len);
399 if (vha->hw->fce) {
400 if (buf) {
401 ent->t264.fce_trace_size = FCE_SIZE;
402 ent->t264.write_pointer = vha->hw->fce_wr;
403 ent->t264.base_pointer = vha->hw->fce_dma;
404 ent->t264.fce_enable_mb0 = vha->hw->fce_mb[0];
405 ent->t264.fce_enable_mb2 = vha->hw->fce_mb[2];
406 ent->t264.fce_enable_mb3 = vha->hw->fce_mb[3];
407 ent->t264.fce_enable_mb4 = vha->hw->fce_mb[4];
408 ent->t264.fce_enable_mb5 = vha->hw->fce_mb[5];
409 ent->t264.fce_enable_mb6 = vha->hw->fce_mb[6];
410 }
411 qla27xx_insertbuf(vha->hw->fce, FCE_SIZE, buf, len);
412 } else {
413 ql_dbg(ql_dbg_misc, vha, 0xd027,
414 "%s: missing fce\n", __func__);
415 qla27xx_skip_entry(ent, buf);
416 }
417
418 return qla27xx_next_entry(ent);
419}
420
421static struct qla27xx_fwdt_entry *
422qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha,
423 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
424{
425 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd209,
426 "%s: pause risc [%lx]\n", __func__, *len);
427 if (buf)
428 qla24xx_pause_risc(ISPREG(vha), vha->hw);
429
430 return qla27xx_next_entry(ent);
431}
432
433static struct qla27xx_fwdt_entry *
434qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha,
435 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
436{
437 ql_dbg(ql_dbg_misc, vha, 0xd20a,
438 "%s: reset risc [%lx]\n", __func__, *len);
439 if (buf)
440 WARN_ON_ONCE(qla24xx_soft_reset(vha->hw) != QLA_SUCCESS);
441
442 return qla27xx_next_entry(ent);
443}
444
445static struct qla27xx_fwdt_entry *
446qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha,
447 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
448{
449 uint offset = ent->t267.pci_offset;
450 ulong data = le32_to_cpu(ent->t267.data);
451
452 ql_dbg(ql_dbg_misc, vha, 0xd20b,
453 "%s: dis intr [%lx]\n", __func__, *len);
454 qla27xx_write_reg(ISPREG(vha), offset, data, buf);
455
456 return qla27xx_next_entry(ent);
457}
458
459static struct qla27xx_fwdt_entry *
460qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
461 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
462{
463 ql_dbg(ql_dbg_misc, vha, 0xd20c,
464 "%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len);
465 switch (ent->t268.buf_type) {
466 case T268_BUF_TYPE_EXTD_TRACE:
467 if (vha->hw->eft) {
468 if (buf) {
469 ent->t268.buf_size = EFT_SIZE;
470 ent->t268.start_addr = vha->hw->eft_dma;
471 }
472 qla27xx_insertbuf(vha->hw->eft, EFT_SIZE, buf, len);
473 } else {
474 ql_dbg(ql_dbg_misc, vha, 0xd028,
475 "%s: missing eft\n", __func__);
476 qla27xx_skip_entry(ent, buf);
477 }
478 break;
479 case T268_BUF_TYPE_EXCH_BUFOFF:
480 if (vha->hw->exchoffld_buf) {
481 if (buf) {
482 ent->t268.buf_size = vha->hw->exchoffld_size;
483 ent->t268.start_addr =
484 vha->hw->exchoffld_buf_dma;
485 }
486 qla27xx_insertbuf(vha->hw->exchoffld_buf,
487 vha->hw->exchoffld_size, buf, len);
488 } else {
489 ql_dbg(ql_dbg_misc, vha, 0xd028,
490 "%s: missing exch offld\n", __func__);
491 qla27xx_skip_entry(ent, buf);
492 }
493 break;
494 case T268_BUF_TYPE_EXTD_LOGIN:
495 if (vha->hw->exlogin_buf) {
496 if (buf) {
497 ent->t268.buf_size = vha->hw->exlogin_size;
498 ent->t268.start_addr =
499 vha->hw->exlogin_buf_dma;
500 }
501 qla27xx_insertbuf(vha->hw->exlogin_buf,
502 vha->hw->exlogin_size, buf, len);
503 } else {
504 ql_dbg(ql_dbg_misc, vha, 0xd028,
505 "%s: missing ext login\n", __func__);
506 qla27xx_skip_entry(ent, buf);
507 }
508 break;
509
510 case T268_BUF_TYPE_REQ_MIRROR:
511 case T268_BUF_TYPE_RSP_MIRROR:
512
513
514
515
516
517 qla27xx_skip_entry(ent, buf);
518 break;
519 default:
520 ql_dbg(ql_dbg_async, vha, 0xd02b,
521 "%s: unknown buffer %x\n", __func__, ent->t268.buf_type);
522 qla27xx_skip_entry(ent, buf);
523 break;
524 }
525
526 return qla27xx_next_entry(ent);
527}
528
529static struct qla27xx_fwdt_entry *
530qla27xx_fwdt_entry_t269(struct scsi_qla_host *vha,
531 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
532{
533 ql_dbg(ql_dbg_misc, vha, 0xd20d,
534 "%s: scratch [%lx]\n", __func__, *len);
535 qla27xx_insert32(0xaaaaaaaa, buf, len);
536 qla27xx_insert32(0xbbbbbbbb, buf, len);
537 qla27xx_insert32(0xcccccccc, buf, len);
538 qla27xx_insert32(0xdddddddd, buf, len);
539 qla27xx_insert32(*len + sizeof(uint32_t), buf, len);
540 if (buf)
541 ent->t269.scratch_size = 5 * sizeof(uint32_t);
542
543 return qla27xx_next_entry(ent);
544}
545
546static struct qla27xx_fwdt_entry *
547qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
548 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
549{
550 ulong addr = le32_to_cpu(ent->t270.addr);
551 ulong dwords = le32_to_cpu(ent->t270.count);
552
553 ql_dbg(ql_dbg_misc, vha, 0xd20e,
554 "%s: rdremreg [%lx]\n", __func__, *len);
555 qla27xx_write_reg(ISPREG(vha), IOBASE_ADDR, 0x40, buf);
556 while (dwords--) {
557 qla27xx_write_reg(ISPREG(vha), 0xc0, addr|0x80000000, buf);
558 qla27xx_insert32(addr, buf, len);
559 qla27xx_read_reg(ISPREG(vha), 0xc4, buf, len);
560 addr += sizeof(uint32_t);
561 }
562
563 return qla27xx_next_entry(ent);
564}
565
566static struct qla27xx_fwdt_entry *
567qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha,
568 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
569{
570 ulong addr = le32_to_cpu(ent->t271.addr);
571 ulong data = le32_to_cpu(ent->t271.data);
572
573 ql_dbg(ql_dbg_misc, vha, 0xd20f,
574 "%s: wrremreg [%lx]\n", __func__, *len);
575 qla27xx_write_reg(ISPREG(vha), IOBASE(vha), 0x40, buf);
576 qla27xx_write_reg(ISPREG(vha), 0xc4, data, buf);
577 qla27xx_write_reg(ISPREG(vha), 0xc0, addr, buf);
578
579 return qla27xx_next_entry(ent);
580}
581
582static struct qla27xx_fwdt_entry *
583qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha,
584 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
585{
586 ulong dwords = le32_to_cpu(ent->t272.count);
587 ulong start = le32_to_cpu(ent->t272.addr);
588
589 ql_dbg(ql_dbg_misc, vha, 0xd210,
590 "%s: rdremram [%lx]\n", __func__, *len);
591 if (buf) {
592 ql_dbg(ql_dbg_misc, vha, 0xd02c,
593 "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords);
594 buf += *len;
595 qla27xx_dump_mpi_ram(vha->hw, start, buf, dwords, &buf);
596 }
597 *len += dwords * sizeof(uint32_t);
598
599 return qla27xx_next_entry(ent);
600}
601
602static struct qla27xx_fwdt_entry *
603qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha,
604 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
605{
606 ulong dwords = le32_to_cpu(ent->t273.count);
607 ulong addr = le32_to_cpu(ent->t273.addr);
608 uint32_t value;
609
610 ql_dbg(ql_dbg_misc, vha, 0xd211,
611 "%s: pcicfg [%lx]\n", __func__, *len);
612 while (dwords--) {
613 value = ~0;
614 if (pci_read_config_dword(vha->hw->pdev, addr, &value))
615 ql_dbg(ql_dbg_misc, vha, 0xd02d,
616 "%s: failed pcicfg read at %lx\n", __func__, addr);
617 qla27xx_insert32(addr, buf, len);
618 qla27xx_insert32(value, buf, len);
619 addr += sizeof(uint32_t);
620 }
621
622 return qla27xx_next_entry(ent);
623}
624
625static struct qla27xx_fwdt_entry *
626qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
627 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
628{
629 ulong type = ent->t274.queue_type;
630 uint count = 0;
631 uint i;
632
633 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd212,
634 "%s: getqsh(%lx) [%lx]\n", __func__, type, *len);
635 if (type == T274_QUEUE_TYPE_REQ_SHAD) {
636 for (i = 0; i < vha->hw->max_req_queues; i++) {
637 struct req_que *req = vha->hw->req_q_map[i];
638
639 if (req || !buf) {
640 qla27xx_insert16(i, buf, len);
641 qla27xx_insert16(1, buf, len);
642 qla27xx_insert32(req && req->out_ptr ?
643 *req->out_ptr : 0, buf, len);
644 count++;
645 }
646 }
647 } else if (type == T274_QUEUE_TYPE_RSP_SHAD) {
648 for (i = 0; i < vha->hw->max_rsp_queues; i++) {
649 struct rsp_que *rsp = vha->hw->rsp_q_map[i];
650
651 if (rsp || !buf) {
652 qla27xx_insert16(i, buf, len);
653 qla27xx_insert16(1, buf, len);
654 qla27xx_insert32(rsp && rsp->in_ptr ?
655 *rsp->in_ptr : 0, buf, len);
656 count++;
657 }
658 }
659 } else if (QLA_TGT_MODE_ENABLED() &&
660 ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) {
661 struct qla_hw_data *ha = vha->hw;
662 struct atio *atr = ha->tgt.atio_ring_ptr;
663
664 if (atr || !buf) {
665 qla27xx_insert16(0, buf, len);
666 qla27xx_insert16(1, buf, len);
667 qla27xx_insert32(ha->tgt.atio_q_in ?
668 readl(ha->tgt.atio_q_in) : 0, buf, len);
669 count++;
670 }
671 } else {
672 ql_dbg(ql_dbg_misc, vha, 0xd02f,
673 "%s: unknown queue %lx\n", __func__, type);
674 qla27xx_skip_entry(ent, buf);
675 }
676
677 if (buf) {
678 if (count)
679 ent->t274.num_queues = count;
680 else
681 qla27xx_skip_entry(ent, buf);
682 }
683
684 return qla27xx_next_entry(ent);
685}
686
687static struct qla27xx_fwdt_entry *
688qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha,
689 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
690{
691 ulong offset = offsetof(typeof(*ent), t275.buffer);
692 ulong length = le32_to_cpu(ent->t275.length);
693 ulong size = le32_to_cpu(ent->hdr.size);
694 void *buffer = ent->t275.buffer;
695
696 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd213,
697 "%s: buffer(%lx) [%lx]\n", __func__, length, *len);
698 if (!length) {
699 ql_dbg(ql_dbg_misc, vha, 0xd020,
700 "%s: buffer zero length\n", __func__);
701 qla27xx_skip_entry(ent, buf);
702 goto done;
703 }
704 if (offset + length > size) {
705 length = size - offset;
706 ql_dbg(ql_dbg_misc, vha, 0xd030,
707 "%s: buffer overflow, truncate [%lx]\n", __func__, length);
708 ent->t275.length = cpu_to_le32(length);
709 }
710
711 qla27xx_insertbuf(buffer, length, buf, len);
712done:
713 return qla27xx_next_entry(ent);
714}
715
716static struct qla27xx_fwdt_entry *
717qla27xx_fwdt_entry_t276(struct scsi_qla_host *vha,
718 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
719{
720 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd214,
721 "%s: cond [%lx]\n", __func__, *len);
722
723 if (buf) {
724 ulong cond1 = le32_to_cpu(ent->t276.cond1);
725 ulong cond2 = le32_to_cpu(ent->t276.cond2);
726 uint type = vha->hw->pdev->device >> 4 & 0xf;
727 uint func = vha->hw->port_no & 0x3;
728
729 if (type != cond1 || func != cond2) {
730 struct qla27xx_fwdt_template *tmp = buf;
731
732 tmp->count--;
733 ent = qla27xx_next_entry(ent);
734 qla27xx_skip_entry(ent, buf);
735 }
736 }
737
738 return qla27xx_next_entry(ent);
739}
740
741static struct qla27xx_fwdt_entry *
742qla27xx_fwdt_entry_t277(struct scsi_qla_host *vha,
743 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
744{
745 ulong cmd_addr = le32_to_cpu(ent->t277.cmd_addr);
746 ulong wr_cmd_data = le32_to_cpu(ent->t277.wr_cmd_data);
747 ulong data_addr = le32_to_cpu(ent->t277.data_addr);
748
749 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd215,
750 "%s: rdpep [%lx]\n", __func__, *len);
751 qla27xx_insert32(wr_cmd_data, buf, len);
752 qla27xx_write_reg(ISPREG(vha), cmd_addr, wr_cmd_data, buf);
753 qla27xx_read_reg(ISPREG(vha), data_addr, buf, len);
754
755 return qla27xx_next_entry(ent);
756}
757
758static struct qla27xx_fwdt_entry *
759qla27xx_fwdt_entry_t278(struct scsi_qla_host *vha,
760 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
761{
762 ulong cmd_addr = le32_to_cpu(ent->t278.cmd_addr);
763 ulong wr_cmd_data = le32_to_cpu(ent->t278.wr_cmd_data);
764 ulong data_addr = le32_to_cpu(ent->t278.data_addr);
765 ulong wr_data = le32_to_cpu(ent->t278.wr_data);
766
767 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd216,
768 "%s: wrpep [%lx]\n", __func__, *len);
769 qla27xx_write_reg(ISPREG(vha), data_addr, wr_data, buf);
770 qla27xx_write_reg(ISPREG(vha), cmd_addr, wr_cmd_data, buf);
771
772 return qla27xx_next_entry(ent);
773}
774
775static struct qla27xx_fwdt_entry *
776qla27xx_fwdt_entry_other(struct scsi_qla_host *vha,
777 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
778{
779 ulong type = le32_to_cpu(ent->hdr.type);
780
781 ql_dbg(ql_dbg_misc, vha, 0xd2ff,
782 "%s: other %lx [%lx]\n", __func__, type, *len);
783 qla27xx_skip_entry(ent, buf);
784
785 return qla27xx_next_entry(ent);
786}
787
788static struct {
789 uint type;
790 typeof(qla27xx_fwdt_entry_other)(*call);
791} qla27xx_fwdt_entry_call[] = {
792 { ENTRY_TYPE_NOP, qla27xx_fwdt_entry_t0 },
793 { ENTRY_TYPE_TMP_END, qla27xx_fwdt_entry_t255 },
794 { ENTRY_TYPE_RD_IOB_T1, qla27xx_fwdt_entry_t256 },
795 { ENTRY_TYPE_WR_IOB_T1, qla27xx_fwdt_entry_t257 },
796 { ENTRY_TYPE_RD_IOB_T2, qla27xx_fwdt_entry_t258 },
797 { ENTRY_TYPE_WR_IOB_T2, qla27xx_fwdt_entry_t259 },
798 { ENTRY_TYPE_RD_PCI, qla27xx_fwdt_entry_t260 },
799 { ENTRY_TYPE_WR_PCI, qla27xx_fwdt_entry_t261 },
800 { ENTRY_TYPE_RD_RAM, qla27xx_fwdt_entry_t262 },
801 { ENTRY_TYPE_GET_QUEUE, qla27xx_fwdt_entry_t263 },
802 { ENTRY_TYPE_GET_FCE, qla27xx_fwdt_entry_t264 },
803 { ENTRY_TYPE_PSE_RISC, qla27xx_fwdt_entry_t265 },
804 { ENTRY_TYPE_RST_RISC, qla27xx_fwdt_entry_t266 },
805 { ENTRY_TYPE_DIS_INTR, qla27xx_fwdt_entry_t267 },
806 { ENTRY_TYPE_GET_HBUF, qla27xx_fwdt_entry_t268 },
807 { ENTRY_TYPE_SCRATCH, qla27xx_fwdt_entry_t269 },
808 { ENTRY_TYPE_RDREMREG, qla27xx_fwdt_entry_t270 },
809 { ENTRY_TYPE_WRREMREG, qla27xx_fwdt_entry_t271 },
810 { ENTRY_TYPE_RDREMRAM, qla27xx_fwdt_entry_t272 },
811 { ENTRY_TYPE_PCICFG, qla27xx_fwdt_entry_t273 },
812 { ENTRY_TYPE_GET_SHADOW, qla27xx_fwdt_entry_t274 },
813 { ENTRY_TYPE_WRITE_BUF, qla27xx_fwdt_entry_t275 },
814 { ENTRY_TYPE_CONDITIONAL, qla27xx_fwdt_entry_t276 },
815 { ENTRY_TYPE_RDPEPREG, qla27xx_fwdt_entry_t277 },
816 { ENTRY_TYPE_WRPEPREG, qla27xx_fwdt_entry_t278 },
817 { -1, qla27xx_fwdt_entry_other }
818};
819
820static inline
821typeof(qla27xx_fwdt_entry_call->call)(qla27xx_find_entry(uint type))
822{
823 typeof(*qla27xx_fwdt_entry_call) *list = qla27xx_fwdt_entry_call;
824
825 while (list->type < type)
826 list++;
827
828 if (list->type == type)
829 return list->call;
830 return qla27xx_fwdt_entry_other;
831}
832
833static void
834qla27xx_walk_template(struct scsi_qla_host *vha,
835 struct qla27xx_fwdt_template *tmp, void *buf, ulong *len)
836{
837 struct qla27xx_fwdt_entry *ent = (void *)tmp +
838 le32_to_cpu(tmp->entry_offset);
839 ulong type;
840
841 tmp->count = le32_to_cpu(tmp->entry_count);
842 ql_dbg(ql_dbg_misc, vha, 0xd01a,
843 "%s: entry count %u\n", __func__, tmp->count);
844 while (ent && tmp->count--) {
845 type = le32_to_cpu(ent->hdr.type);
846 ent = qla27xx_find_entry(type)(vha, ent, buf, len);
847 if (!ent)
848 break;
849
850 if (ent == INVALID_ENTRY) {
851 *len = 0;
852 ql_dbg(ql_dbg_async, vha, 0xffff,
853 "Unable to capture FW dump");
854 goto bailout;
855 }
856 }
857
858 if (tmp->count)
859 ql_dbg(ql_dbg_misc, vha, 0xd018,
860 "%s: entry count residual=+%u\n", __func__, tmp->count);
861
862 if (ent)
863 ql_dbg(ql_dbg_misc, vha, 0xd019,
864 "%s: missing end entry\n", __func__);
865
866bailout:
867 cpu_to_le32s(&tmp->count);
868}
869
870static void
871qla27xx_time_stamp(struct qla27xx_fwdt_template *tmp)
872{
873 tmp->capture_timestamp = jiffies;
874}
875
876static void
877qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
878{
879 uint8_t v[] = { 0, 0, 0, 0, 0, 0 };
880
881 WARN_ON_ONCE(sscanf(qla2x00_version_str,
882 "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
883 v+0, v+1, v+2, v+3, v+4, v+5) != 6);
884
885 tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0];
886 tmp->driver_info[1] = v[5] << 8 | v[4];
887 tmp->driver_info[2] = 0x12345678;
888}
889
890static void
891qla27xx_firmware_info(struct scsi_qla_host *vha,
892 struct qla27xx_fwdt_template *tmp)
893{
894 tmp->firmware_version[0] = vha->hw->fw_major_version;
895 tmp->firmware_version[1] = vha->hw->fw_minor_version;
896 tmp->firmware_version[2] = vha->hw->fw_subminor_version;
897 tmp->firmware_version[3] =
898 vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes;
899 tmp->firmware_version[4] =
900 vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0];
901}
902
903static void
904ql27xx_edit_template(struct scsi_qla_host *vha,
905 struct qla27xx_fwdt_template *tmp)
906{
907 qla27xx_time_stamp(tmp);
908 qla27xx_driver_info(tmp);
909 qla27xx_firmware_info(vha, tmp);
910}
911
912static inline uint32_t
913qla27xx_template_checksum(void *p, ulong size)
914{
915 __le32 *buf = p;
916 uint64_t sum = 0;
917
918 size /= sizeof(*buf);
919
920 for ( ; size--; buf++)
921 sum += le32_to_cpu(*buf);
922
923 sum = (sum & 0xffffffff) + (sum >> 32);
924
925 return ~sum;
926}
927
928static inline int
929qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp)
930{
931 return qla27xx_template_checksum(tmp, tmp->template_size) == 0;
932}
933
934static inline int
935qla27xx_verify_template_header(struct qla27xx_fwdt_template *tmp)
936{
937 return le32_to_cpu(tmp->template_type) == TEMPLATE_TYPE_FWDUMP;
938}
939
940static ulong
941qla27xx_execute_fwdt_template(struct scsi_qla_host *vha,
942 struct qla27xx_fwdt_template *tmp, void *buf)
943{
944 ulong len = 0;
945
946 if (qla27xx_fwdt_template_valid(tmp)) {
947 len = tmp->template_size;
948 tmp = memcpy(buf, tmp, len);
949 ql27xx_edit_template(vha, tmp);
950 qla27xx_walk_template(vha, tmp, buf, &len);
951 }
952
953 return len;
954}
955
956ulong
957qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha, void *p)
958{
959 struct qla27xx_fwdt_template *tmp = p;
960 ulong len = 0;
961
962 if (qla27xx_fwdt_template_valid(tmp)) {
963 len = tmp->template_size;
964 qla27xx_walk_template(vha, tmp, NULL, &len);
965 }
966
967 return len;
968}
969
970ulong
971qla27xx_fwdt_template_size(void *p)
972{
973 struct qla27xx_fwdt_template *tmp = p;
974
975 return tmp->template_size;
976}
977
978int
979qla27xx_fwdt_template_valid(void *p)
980{
981 struct qla27xx_fwdt_template *tmp = p;
982
983 if (!qla27xx_verify_template_header(tmp)) {
984 ql_log(ql_log_warn, NULL, 0xd01c,
985 "%s: template type %x\n", __func__,
986 le32_to_cpu(tmp->template_type));
987 return false;
988 }
989
990 if (!qla27xx_verify_template_checksum(tmp)) {
991 ql_log(ql_log_warn, NULL, 0xd01d,
992 "%s: failed template checksum\n", __func__);
993 return false;
994 }
995
996 return true;
997}
998
999void
1000qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
1001{
1002 ulong flags = 0;
1003
1004#ifndef __CHECKER__
1005 if (!hardware_locked)
1006 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
1007#endif
1008
1009 if (!vha->hw->fw_dump) {
1010 ql_log(ql_log_warn, vha, 0xd01e, "-> fwdump no buffer\n");
1011 } else if (vha->hw->fw_dumped) {
1012 ql_log(ql_log_warn, vha, 0xd01f,
1013 "-> Firmware already dumped (%p) -- ignoring request\n",
1014 vha->hw->fw_dump);
1015 } else {
1016 struct fwdt *fwdt = vha->hw->fwdt;
1017 uint j;
1018 ulong len;
1019 void *buf = vha->hw->fw_dump;
1020 uint count = vha->hw->fw_dump_mpi ? 2 : 1;
1021
1022 for (j = 0; j < count; j++, fwdt++, buf += len) {
1023 ql_log(ql_log_warn, vha, 0xd011,
1024 "-> fwdt%u running...\n", j);
1025 if (!fwdt->template) {
1026 ql_log(ql_log_warn, vha, 0xd012,
1027 "-> fwdt%u no template\n", j);
1028 break;
1029 }
1030 len = qla27xx_execute_fwdt_template(vha,
1031 fwdt->template, buf);
1032 if (len == 0) {
1033 goto bailout;
1034 } else if (len != fwdt->dump_size) {
1035 ql_log(ql_log_warn, vha, 0xd013,
1036 "-> fwdt%u fwdump residual=%+ld\n",
1037 j, fwdt->dump_size - len);
1038 }
1039 }
1040 vha->hw->fw_dump_len = buf - (void *)vha->hw->fw_dump;
1041 vha->hw->fw_dumped = 1;
1042
1043 ql_log(ql_log_warn, vha, 0xd015,
1044 "-> Firmware dump saved to buffer (%lu/%p) <%lx>\n",
1045 vha->host_no, vha->hw->fw_dump, vha->hw->fw_dump_cap_flags);
1046 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
1047 }
1048
1049bailout:
1050 vha->hw->fw_dump_mpi = 0;
1051#ifndef __CHECKER__
1052 if (!hardware_locked)
1053 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
1054#endif
1055}
1056