1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
62#include <linux/slab.h>
63#include <linux/vmalloc.h>
64#include <linux/ioport.h>
65#include <linux/delay.h>
66#include <linux/pci.h>
67#include <linux/wait.h>
68#include <linux/spinlock.h>
69#include <linux/sched.h>
70#include <linux/interrupt.h>
71#include <linux/blkdev.h>
72#include <linux/firmware.h>
73#include <linux/module.h>
74#include <linux/moduleparam.h>
75#include <linux/libata.h>
76#include <linux/hdreg.h>
77#include <linux/reboot.h>
78#include <linux/stringify.h>
79#include <asm/io.h>
80#include <asm/irq.h>
81#include <asm/processor.h>
82#include <scsi/scsi.h>
83#include <scsi/scsi_host.h>
84#include <scsi/scsi_tcq.h>
85#include <scsi/scsi_eh.h>
86#include <scsi/scsi_cmnd.h>
87#include "ipr.h"
88
89
90
91
92static LIST_HEAD(ipr_ioa_head);
93static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94static unsigned int ipr_max_speed = 1;
95static int ipr_testmode = 0;
96static unsigned int ipr_fastfail = 0;
97static unsigned int ipr_transop_timeout = 0;
98static unsigned int ipr_debug = 0;
99static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100static unsigned int ipr_dual_ioa_raid = 1;
101static unsigned int ipr_number_of_msix = 16;
102static unsigned int ipr_fast_reboot;
103static DEFINE_SPINLOCK(ipr_driver_lock);
104
105
106static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
107 {
108 .mailbox = 0x0042C,
109 .max_cmds = 100,
110 .cache_line_size = 0x20,
111 .clear_isr = 1,
112 .iopoll_weight = 0,
113 {
114 .set_interrupt_mask_reg = 0x0022C,
115 .clr_interrupt_mask_reg = 0x00230,
116 .clr_interrupt_mask_reg32 = 0x00230,
117 .sense_interrupt_mask_reg = 0x0022C,
118 .sense_interrupt_mask_reg32 = 0x0022C,
119 .clr_interrupt_reg = 0x00228,
120 .clr_interrupt_reg32 = 0x00228,
121 .sense_interrupt_reg = 0x00224,
122 .sense_interrupt_reg32 = 0x00224,
123 .ioarrin_reg = 0x00404,
124 .sense_uproc_interrupt_reg = 0x00214,
125 .sense_uproc_interrupt_reg32 = 0x00214,
126 .set_uproc_interrupt_reg = 0x00214,
127 .set_uproc_interrupt_reg32 = 0x00214,
128 .clr_uproc_interrupt_reg = 0x00218,
129 .clr_uproc_interrupt_reg32 = 0x00218
130 }
131 },
132 {
133 .mailbox = 0x0052C,
134 .max_cmds = 100,
135 .cache_line_size = 0x20,
136 .clear_isr = 1,
137 .iopoll_weight = 0,
138 {
139 .set_interrupt_mask_reg = 0x00288,
140 .clr_interrupt_mask_reg = 0x0028C,
141 .clr_interrupt_mask_reg32 = 0x0028C,
142 .sense_interrupt_mask_reg = 0x00288,
143 .sense_interrupt_mask_reg32 = 0x00288,
144 .clr_interrupt_reg = 0x00284,
145 .clr_interrupt_reg32 = 0x00284,
146 .sense_interrupt_reg = 0x00280,
147 .sense_interrupt_reg32 = 0x00280,
148 .ioarrin_reg = 0x00504,
149 .sense_uproc_interrupt_reg = 0x00290,
150 .sense_uproc_interrupt_reg32 = 0x00290,
151 .set_uproc_interrupt_reg = 0x00290,
152 .set_uproc_interrupt_reg32 = 0x00290,
153 .clr_uproc_interrupt_reg = 0x00294,
154 .clr_uproc_interrupt_reg32 = 0x00294
155 }
156 },
157 {
158 .mailbox = 0x00044,
159 .max_cmds = 1000,
160 .cache_line_size = 0x20,
161 .clear_isr = 0,
162 .iopoll_weight = 64,
163 {
164 .set_interrupt_mask_reg = 0x00010,
165 .clr_interrupt_mask_reg = 0x00018,
166 .clr_interrupt_mask_reg32 = 0x0001C,
167 .sense_interrupt_mask_reg = 0x00010,
168 .sense_interrupt_mask_reg32 = 0x00014,
169 .clr_interrupt_reg = 0x00008,
170 .clr_interrupt_reg32 = 0x0000C,
171 .sense_interrupt_reg = 0x00000,
172 .sense_interrupt_reg32 = 0x00004,
173 .ioarrin_reg = 0x00070,
174 .sense_uproc_interrupt_reg = 0x00020,
175 .sense_uproc_interrupt_reg32 = 0x00024,
176 .set_uproc_interrupt_reg = 0x00020,
177 .set_uproc_interrupt_reg32 = 0x00024,
178 .clr_uproc_interrupt_reg = 0x00028,
179 .clr_uproc_interrupt_reg32 = 0x0002C,
180 .init_feedback_reg = 0x0005C,
181 .dump_addr_reg = 0x00064,
182 .dump_data_reg = 0x00068,
183 .endian_swap_reg = 0x00084
184 }
185 },
186};
187
188static const struct ipr_chip_t ipr_chip[] = {
189 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
197 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
198 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
199};
200
201static int ipr_max_bus_speeds[] = {
202 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
203};
204
205MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
206MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
207module_param_named(max_speed, ipr_max_speed, uint, 0);
208MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
209module_param_named(log_level, ipr_log_level, uint, 0);
210MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
211module_param_named(testmode, ipr_testmode, int, 0);
212MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
213module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
214MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
215module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
216MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
217module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
218MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
219module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
220MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
221module_param_named(max_devs, ipr_max_devs, int, 0);
222MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
223 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
224module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
225MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:16)");
226module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
227MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
228MODULE_LICENSE("GPL");
229MODULE_VERSION(IPR_DRIVER_VERSION);
230
231
232static const
233struct ipr_error_table_t ipr_error_table[] = {
234 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
235 "8155: An unknown error was received"},
236 {0x00330000, 0, 0,
237 "Soft underlength error"},
238 {0x005A0000, 0, 0,
239 "Command to be cancelled not found"},
240 {0x00808000, 0, 0,
241 "Qualified success"},
242 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
243 "FFFE: Soft device bus error recovered by the IOA"},
244 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
245 "4101: Soft device bus fabric error"},
246 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
247 "FFFC: Logical block guard error recovered by the device"},
248 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
249 "FFFC: Logical block reference tag error recovered by the device"},
250 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
251 "4171: Recovered scatter list tag / sequence number error"},
252 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
253 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
254 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
255 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
256 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
257 "FFFD: Recovered logical block reference tag error detected by the IOA"},
258 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
259 "FFFD: Logical block guard error recovered by the IOA"},
260 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
261 "FFF9: Device sector reassign successful"},
262 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
263 "FFF7: Media error recovered by device rewrite procedures"},
264 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
265 "7001: IOA sector reassignment successful"},
266 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
267 "FFF9: Soft media error. Sector reassignment recommended"},
268 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
269 "FFF7: Media error recovered by IOA rewrite procedures"},
270 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
271 "FF3D: Soft PCI bus error recovered by the IOA"},
272 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
273 "FFF6: Device hardware error recovered by the IOA"},
274 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
275 "FFF6: Device hardware error recovered by the device"},
276 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
277 "FF3D: Soft IOA error recovered by the IOA"},
278 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
279 "FFFA: Undefined device response recovered by the IOA"},
280 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
281 "FFF6: Device bus error, message or command phase"},
282 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
283 "FFFE: Task Management Function failed"},
284 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
285 "FFF6: Failure prediction threshold exceeded"},
286 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
287 "8009: Impending cache battery pack failure"},
288 {0x02040100, 0, 0,
289 "Logical Unit in process of becoming ready"},
290 {0x02040200, 0, 0,
291 "Initializing command required"},
292 {0x02040400, 0, 0,
293 "34FF: Disk device format in progress"},
294 {0x02040C00, 0, 0,
295 "Logical unit not accessible, target port in unavailable state"},
296 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
297 "9070: IOA requested reset"},
298 {0x023F0000, 0, 0,
299 "Synchronization required"},
300 {0x02408500, 0, 0,
301 "IOA microcode download required"},
302 {0x02408600, 0, 0,
303 "Device bus connection is prohibited by host"},
304 {0x024E0000, 0, 0,
305 "No ready, IOA shutdown"},
306 {0x025A0000, 0, 0,
307 "Not ready, IOA has been shutdown"},
308 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
309 "3020: Storage subsystem configuration error"},
310 {0x03110B00, 0, 0,
311 "FFF5: Medium error, data unreadable, recommend reassign"},
312 {0x03110C00, 0, 0,
313 "7000: Medium error, data unreadable, do not reassign"},
314 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
315 "FFF3: Disk media format bad"},
316 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
317 "3002: Addressed device failed to respond to selection"},
318 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
319 "3100: Device bus error"},
320 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
321 "3109: IOA timed out a device command"},
322 {0x04088000, 0, 0,
323 "3120: SCSI bus is not operational"},
324 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
325 "4100: Hard device bus fabric error"},
326 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
327 "310C: Logical block guard error detected by the device"},
328 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
329 "310C: Logical block reference tag error detected by the device"},
330 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
331 "4170: Scatter list tag / sequence number error"},
332 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
333 "8150: Logical block CRC error on IOA to Host transfer"},
334 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
335 "4170: Logical block sequence number error on IOA to Host transfer"},
336 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
337 "310D: Logical block reference tag error detected by the IOA"},
338 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
339 "310D: Logical block guard error detected by the IOA"},
340 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
341 "9000: IOA reserved area data check"},
342 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
343 "9001: IOA reserved area invalid data pattern"},
344 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
345 "9002: IOA reserved area LRC error"},
346 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
347 "Hardware Error, IOA metadata access error"},
348 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
349 "102E: Out of alternate sectors for disk storage"},
350 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
351 "FFF4: Data transfer underlength error"},
352 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
353 "FFF4: Data transfer overlength error"},
354 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
355 "3400: Logical unit failure"},
356 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
357 "FFF4: Device microcode is corrupt"},
358 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
359 "8150: PCI bus error"},
360 {0x04430000, 1, 0,
361 "Unsupported device bus message received"},
362 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
363 "FFF4: Disk device problem"},
364 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
365 "8150: Permanent IOA failure"},
366 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
367 "3010: Disk device returned wrong response to IOA"},
368 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
369 "8151: IOA microcode error"},
370 {0x04448500, 0, 0,
371 "Device bus status error"},
372 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
373 "8157: IOA error requiring IOA reset to recover"},
374 {0x04448700, 0, 0,
375 "ATA device status error"},
376 {0x04490000, 0, 0,
377 "Message reject received from the device"},
378 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
379 "8008: A permanent cache battery pack failure occurred"},
380 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
381 "9090: Disk unit has been modified after the last known status"},
382 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
383 "9081: IOA detected device error"},
384 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
385 "9082: IOA detected device error"},
386 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
387 "3110: Device bus error, message or command phase"},
388 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
389 "3110: SAS Command / Task Management Function failed"},
390 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
391 "9091: Incorrect hardware configuration change has been detected"},
392 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
393 "9073: Invalid multi-adapter configuration"},
394 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
395 "4010: Incorrect connection between cascaded expanders"},
396 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
397 "4020: Connections exceed IOA design limits"},
398 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
399 "4030: Incorrect multipath connection"},
400 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
401 "4110: Unsupported enclosure function"},
402 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
403 "4120: SAS cable VPD cannot be read"},
404 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
405 "FFF4: Command to logical unit failed"},
406 {0x05240000, 1, 0,
407 "Illegal request, invalid request type or request packet"},
408 {0x05250000, 0, 0,
409 "Illegal request, invalid resource handle"},
410 {0x05258000, 0, 0,
411 "Illegal request, commands not allowed to this device"},
412 {0x05258100, 0, 0,
413 "Illegal request, command not allowed to a secondary adapter"},
414 {0x05258200, 0, 0,
415 "Illegal request, command not allowed to a non-optimized resource"},
416 {0x05260000, 0, 0,
417 "Illegal request, invalid field in parameter list"},
418 {0x05260100, 0, 0,
419 "Illegal request, parameter not supported"},
420 {0x05260200, 0, 0,
421 "Illegal request, parameter value invalid"},
422 {0x052C0000, 0, 0,
423 "Illegal request, command sequence error"},
424 {0x052C8000, 1, 0,
425 "Illegal request, dual adapter support not enabled"},
426 {0x052C8100, 1, 0,
427 "Illegal request, another cable connector was physically disabled"},
428 {0x054E8000, 1, 0,
429 "Illegal request, inconsistent group id/group count"},
430 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
431 "9031: Array protection temporarily suspended, protection resuming"},
432 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
433 "9040: Array protection temporarily suspended, protection resuming"},
434 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
435 "4080: IOA exceeded maximum operating temperature"},
436 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
437 "4085: Service required"},
438 {0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL,
439 "4086: SAS Adapter Hardware Configuration Error"},
440 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
441 "3140: Device bus not ready to ready transition"},
442 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
443 "FFFB: SCSI bus was reset"},
444 {0x06290500, 0, 0,
445 "FFFE: SCSI bus transition to single ended"},
446 {0x06290600, 0, 0,
447 "FFFE: SCSI bus transition to LVD"},
448 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
449 "FFFB: SCSI bus was reset by another initiator"},
450 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
451 "3029: A device replacement has occurred"},
452 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
453 "4102: Device bus fabric performance degradation"},
454 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
455 "9051: IOA cache data exists for a missing or failed device"},
456 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
457 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
458 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
459 "9025: Disk unit is not supported at its physical location"},
460 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
461 "3020: IOA detected a SCSI bus configuration error"},
462 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
463 "3150: SCSI bus configuration error"},
464 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
465 "9074: Asymmetric advanced function disk configuration"},
466 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
467 "4040: Incomplete multipath connection between IOA and enclosure"},
468 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
469 "4041: Incomplete multipath connection between enclosure and device"},
470 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
471 "9075: Incomplete multipath connection between IOA and remote IOA"},
472 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
473 "9076: Configuration error, missing remote IOA"},
474 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
475 "4050: Enclosure does not support a required multipath function"},
476 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
477 "4121: Configuration error, required cable is missing"},
478 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
479 "4122: Cable is not plugged into the correct location on remote IOA"},
480 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
481 "4123: Configuration error, invalid cable vital product data"},
482 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
483 "4124: Configuration error, both cable ends are plugged into the same IOA"},
484 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
485 "4070: Logically bad block written on device"},
486 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
487 "9041: Array protection temporarily suspended"},
488 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
489 "9042: Corrupt array parity detected on specified device"},
490 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
491 "9030: Array no longer protected due to missing or failed disk unit"},
492 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
493 "9071: Link operational transition"},
494 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
495 "9072: Link not operational transition"},
496 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
497 "9032: Array exposed but still protected"},
498 {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
499 "70DD: Device forced failed by disrupt device command"},
500 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
501 "4061: Multipath redundancy level got better"},
502 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
503 "4060: Multipath redundancy level got worse"},
504 {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
505 "9083: Device raw mode enabled"},
506 {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
507 "9084: Device raw mode disabled"},
508 {0x07270000, 0, 0,
509 "Failure due to other device"},
510 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
511 "9008: IOA does not support functions expected by devices"},
512 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
513 "9010: Cache data associated with attached devices cannot be found"},
514 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
515 "9011: Cache data belongs to devices other than those attached"},
516 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
517 "9020: Array missing 2 or more devices with only 1 device present"},
518 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
519 "9021: Array missing 2 or more devices with 2 or more devices present"},
520 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
521 "9022: Exposed array is missing a required device"},
522 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
523 "9023: Array member(s) not at required physical locations"},
524 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
525 "9024: Array not functional due to present hardware configuration"},
526 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
527 "9026: Array not functional due to present hardware configuration"},
528 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
529 "9027: Array is missing a device and parity is out of sync"},
530 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
531 "9028: Maximum number of arrays already exist"},
532 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
533 "9050: Required cache data cannot be located for a disk unit"},
534 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
535 "9052: Cache data exists for a device that has been modified"},
536 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
537 "9054: IOA resources not available due to previous problems"},
538 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
539 "9092: Disk unit requires initialization before use"},
540 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
541 "9029: Incorrect hardware configuration change has been detected"},
542 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
543 "9060: One or more disk pairs are missing from an array"},
544 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
545 "9061: One or more disks are missing from an array"},
546 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
547 "9062: One or more disks are missing from an array"},
548 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
549 "9063: Maximum number of functional arrays has been exceeded"},
550 {0x07279A00, 0, 0,
551 "Data protect, other volume set problem"},
552 {0x0B260000, 0, 0,
553 "Aborted command, invalid descriptor"},
554 {0x0B3F9000, 0, 0,
555 "Target operating conditions have changed, dual adapter takeover"},
556 {0x0B530200, 0, 0,
557 "Aborted command, medium removal prevented"},
558 {0x0B5A0000, 0, 0,
559 "Command terminated by host"},
560 {0x0B5B8000, 0, 0,
561 "Aborted command, command terminated by host"}
562};
563
564static const struct ipr_ses_table_entry ipr_ses_table[] = {
565 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
566 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
567 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 },
568 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 },
569 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 },
570 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 },
571 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
572 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
573 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
574 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
575 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
576 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
577 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
578};
579
580
581
582
583static int ipr_reset_alert(struct ipr_cmnd *);
584static void ipr_process_ccn(struct ipr_cmnd *);
585static void ipr_process_error(struct ipr_cmnd *);
586static void ipr_reset_ioa_job(struct ipr_cmnd *);
587static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
588 enum ipr_shutdown_type);
589
590#ifdef CONFIG_SCSI_IPR_TRACE
591
592
593
594
595
596
597
598
599
600static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
601 u8 type, u32 add_data)
602{
603 struct ipr_trace_entry *trace_entry;
604 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
605 unsigned int trace_index;
606
607 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
608 trace_entry = &ioa_cfg->trace[trace_index];
609 trace_entry->time = jiffies;
610 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
611 trace_entry->type = type;
612 if (ipr_cmd->ioa_cfg->sis64)
613 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
614 else
615 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
616 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
617 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
618 trace_entry->u.add_data = add_data;
619 wmb();
620}
621#else
622#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
623#endif
624
625
626
627
628
629
630
631
632static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
633{
634 unsigned long lock_flags;
635 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
636
637 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
638 ipr_cmd->done(ipr_cmd);
639 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
640}
641
642
643
644
645
646
647
648
649static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
650{
651 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
652 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
653 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
654 dma_addr_t dma_addr = ipr_cmd->dma_addr;
655 int hrrq_id;
656
657 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
658 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
659 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
660 ioarcb->data_transfer_length = 0;
661 ioarcb->read_data_transfer_length = 0;
662 ioarcb->ioadl_len = 0;
663 ioarcb->read_ioadl_len = 0;
664
665 if (ipr_cmd->ioa_cfg->sis64) {
666 ioarcb->u.sis64_addr_data.data_ioadl_addr =
667 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
668 ioasa64->u.gata.status = 0;
669 } else {
670 ioarcb->write_ioadl_addr =
671 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
672 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
673 ioasa->u.gata.status = 0;
674 }
675
676 ioasa->hdr.ioasc = 0;
677 ioasa->hdr.residual_data_len = 0;
678 ipr_cmd->scsi_cmd = NULL;
679 ipr_cmd->qc = NULL;
680 ipr_cmd->sense_buffer[0] = 0;
681 ipr_cmd->dma_use_sg = 0;
682}
683
684
685
686
687
688
689
690
691static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
692 void (*fast_done) (struct ipr_cmnd *))
693{
694 ipr_reinit_ipr_cmnd(ipr_cmd);
695 ipr_cmd->u.scratch = 0;
696 ipr_cmd->sibling = NULL;
697 ipr_cmd->eh_comp = NULL;
698 ipr_cmd->fast_done = fast_done;
699 init_timer(&ipr_cmd->timer);
700}
701
702
703
704
705
706
707
708
709static
710struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
711{
712 struct ipr_cmnd *ipr_cmd = NULL;
713
714 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
715 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
716 struct ipr_cmnd, queue);
717 list_del(&ipr_cmd->queue);
718 }
719
720
721 return ipr_cmd;
722}
723
724
725
726
727
728
729
730
731static
732struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
733{
734 struct ipr_cmnd *ipr_cmd =
735 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
736 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
737 return ipr_cmd;
738}
739
740
741
742
743
744
745
746
747
748
749
750
751static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
752 u32 clr_ints)
753{
754 volatile u32 int_reg;
755 int i;
756
757
758 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
759 spin_lock(&ioa_cfg->hrrq[i]._lock);
760 ioa_cfg->hrrq[i].allow_interrupts = 0;
761 spin_unlock(&ioa_cfg->hrrq[i]._lock);
762 }
763 wmb();
764
765
766 if (ioa_cfg->sis64)
767 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
768 else
769 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
770
771
772 if (ioa_cfg->sis64)
773 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
774 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
775 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
776}
777
778
779
780
781
782
783
784
785static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
786{
787 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
788
789 if (pcix_cmd_reg == 0)
790 return 0;
791
792 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
793 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
794 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
795 return -EIO;
796 }
797
798 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
799 return 0;
800}
801
802
803
804
805
806
807
808
809static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
810{
811 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
812
813 if (pcix_cmd_reg) {
814 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
815 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
816 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
817 return -EIO;
818 }
819 }
820
821 return 0;
822}
823
824
825
826
827
828
829
830
831
832
833
834static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
835{
836 struct ata_queued_cmd *qc = ipr_cmd->qc;
837 struct ipr_sata_port *sata_port = qc->ap->private_data;
838
839 qc->err_mask |= AC_ERR_OTHER;
840 sata_port->ioasa.status |= ATA_BUSY;
841 ata_qc_complete(qc);
842 if (ipr_cmd->eh_comp)
843 complete(ipr_cmd->eh_comp);
844 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
845}
846
847
848
849
850
851
852
853
854
855
856
857static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
858{
859 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
860 unsigned long hrrq_flags;
861
862 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
863 __ipr_sata_eh_done(ipr_cmd);
864 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
865}
866
867
868
869
870
871
872
873
874
875
876
877static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
878{
879 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
880
881 scsi_cmd->result |= (DID_ERROR << 16);
882
883 scsi_dma_unmap(ipr_cmd->scsi_cmd);
884 scsi_cmd->scsi_done(scsi_cmd);
885 if (ipr_cmd->eh_comp)
886 complete(ipr_cmd->eh_comp);
887 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
888}
889
890
891
892
893
894
895
896
897
898
899
900static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
901{
902 unsigned long hrrq_flags;
903 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
904
905 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
906 __ipr_scsi_eh_done(ipr_cmd);
907 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
908}
909
910
911
912
913
914
915
916
917
918
919static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
920{
921 struct ipr_cmnd *ipr_cmd, *temp;
922 struct ipr_hrr_queue *hrrq;
923
924 ENTER;
925 for_each_hrrq(hrrq, ioa_cfg) {
926 spin_lock(&hrrq->_lock);
927 list_for_each_entry_safe(ipr_cmd,
928 temp, &hrrq->hrrq_pending_q, queue) {
929 list_del(&ipr_cmd->queue);
930
931 ipr_cmd->s.ioasa.hdr.ioasc =
932 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
933 ipr_cmd->s.ioasa.hdr.ilid =
934 cpu_to_be32(IPR_DRIVER_ILID);
935
936 if (ipr_cmd->scsi_cmd)
937 ipr_cmd->done = __ipr_scsi_eh_done;
938 else if (ipr_cmd->qc)
939 ipr_cmd->done = __ipr_sata_eh_done;
940
941 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
942 IPR_IOASC_IOA_WAS_RESET);
943 del_timer(&ipr_cmd->timer);
944 ipr_cmd->done(ipr_cmd);
945 }
946 spin_unlock(&hrrq->_lock);
947 }
948 LEAVE;
949}
950
951
952
953
954
955
956
957
958
959
960
961
962static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
963{
964 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
965 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
966
967 if (ioa_cfg->sis64) {
968
969 send_dma_addr |= 0x1;
970
971
972
973 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
974 send_dma_addr |= 0x4;
975 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
976 } else
977 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
978}
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
994 void (*done) (struct ipr_cmnd *),
995 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
996{
997 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
998
999 ipr_cmd->done = done;
1000
1001 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
1002 ipr_cmd->timer.expires = jiffies + timeout;
1003 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
1004
1005 add_timer(&ipr_cmd->timer);
1006
1007 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
1008
1009 ipr_send_command(ipr_cmd);
1010}
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
1023{
1024 if (ipr_cmd->sibling)
1025 ipr_cmd->sibling = NULL;
1026 else
1027 complete(&ipr_cmd->completion);
1028}
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1044 u32 len, int flags)
1045{
1046 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1047 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1048
1049 ipr_cmd->dma_use_sg = 1;
1050
1051 if (ipr_cmd->ioa_cfg->sis64) {
1052 ioadl64->flags = cpu_to_be32(flags);
1053 ioadl64->data_len = cpu_to_be32(len);
1054 ioadl64->address = cpu_to_be64(dma_addr);
1055
1056 ipr_cmd->ioarcb.ioadl_len =
1057 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1058 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1059 } else {
1060 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1061 ioadl->address = cpu_to_be32(dma_addr);
1062
1063 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1064 ipr_cmd->ioarcb.read_ioadl_len =
1065 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1066 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1067 } else {
1068 ipr_cmd->ioarcb.ioadl_len =
1069 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1070 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1071 }
1072 }
1073}
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1085 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1086 u32 timeout)
1087{
1088 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1089
1090 init_completion(&ipr_cmd->completion);
1091 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1092
1093 spin_unlock_irq(ioa_cfg->host->host_lock);
1094 wait_for_completion(&ipr_cmd->completion);
1095 spin_lock_irq(ioa_cfg->host->host_lock);
1096}
1097
1098static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1099{
1100 unsigned int hrrq;
1101
1102 if (ioa_cfg->hrrq_num == 1)
1103 hrrq = 0;
1104 else {
1105 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1106 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1107 }
1108 return hrrq;
1109}
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1125 struct ipr_hostrcb *hostrcb)
1126{
1127 struct ipr_cmnd *ipr_cmd;
1128 struct ipr_ioarcb *ioarcb;
1129
1130 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1131 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1132 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1133 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1134
1135 ipr_cmd->u.hostrcb = hostrcb;
1136 ioarcb = &ipr_cmd->ioarcb;
1137
1138 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1139 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1140 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1141 ioarcb->cmd_pkt.cdb[1] = type;
1142 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1143 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1144
1145 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1146 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1147
1148 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1149 ipr_cmd->done = ipr_process_ccn;
1150 else
1151 ipr_cmd->done = ipr_process_error;
1152
1153 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1154
1155 ipr_send_command(ipr_cmd);
1156 } else {
1157 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1158 }
1159}
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1170{
1171 switch (proto) {
1172 case IPR_PROTO_SATA:
1173 case IPR_PROTO_SAS_STP:
1174 res->ata_class = ATA_DEV_ATA;
1175 break;
1176 case IPR_PROTO_SATA_ATAPI:
1177 case IPR_PROTO_SAS_STP_ATAPI:
1178 res->ata_class = ATA_DEV_ATAPI;
1179 break;
1180 default:
1181 res->ata_class = ATA_DEV_UNKNOWN;
1182 break;
1183 };
1184}
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194static void ipr_init_res_entry(struct ipr_resource_entry *res,
1195 struct ipr_config_table_entry_wrapper *cfgtew)
1196{
1197 int found = 0;
1198 unsigned int proto;
1199 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1200 struct ipr_resource_entry *gscsi_res = NULL;
1201
1202 res->needs_sync_complete = 0;
1203 res->in_erp = 0;
1204 res->add_to_ml = 0;
1205 res->del_from_ml = 0;
1206 res->resetting_device = 0;
1207 res->reset_occurred = 0;
1208 res->sdev = NULL;
1209 res->sata_port = NULL;
1210
1211 if (ioa_cfg->sis64) {
1212 proto = cfgtew->u.cfgte64->proto;
1213 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1214 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1215 res->qmodel = IPR_QUEUEING_MODEL64(res);
1216 res->type = cfgtew->u.cfgte64->res_type;
1217
1218 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1219 sizeof(res->res_path));
1220
1221 res->bus = 0;
1222 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1223 sizeof(res->dev_lun.scsi_lun));
1224 res->lun = scsilun_to_int(&res->dev_lun);
1225
1226 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1227 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1228 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1229 found = 1;
1230 res->target = gscsi_res->target;
1231 break;
1232 }
1233 }
1234 if (!found) {
1235 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1236 ioa_cfg->max_devs_supported);
1237 set_bit(res->target, ioa_cfg->target_ids);
1238 }
1239 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1240 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1241 res->target = 0;
1242 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1243 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1244 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1245 ioa_cfg->max_devs_supported);
1246 set_bit(res->target, ioa_cfg->array_ids);
1247 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1248 res->bus = IPR_VSET_VIRTUAL_BUS;
1249 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1250 ioa_cfg->max_devs_supported);
1251 set_bit(res->target, ioa_cfg->vset_ids);
1252 } else {
1253 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1254 ioa_cfg->max_devs_supported);
1255 set_bit(res->target, ioa_cfg->target_ids);
1256 }
1257 } else {
1258 proto = cfgtew->u.cfgte->proto;
1259 res->qmodel = IPR_QUEUEING_MODEL(res);
1260 res->flags = cfgtew->u.cfgte->flags;
1261 if (res->flags & IPR_IS_IOA_RESOURCE)
1262 res->type = IPR_RES_TYPE_IOAFP;
1263 else
1264 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1265
1266 res->bus = cfgtew->u.cfgte->res_addr.bus;
1267 res->target = cfgtew->u.cfgte->res_addr.target;
1268 res->lun = cfgtew->u.cfgte->res_addr.lun;
1269 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1270 }
1271
1272 ipr_update_ata_class(res, proto);
1273}
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283static int ipr_is_same_device(struct ipr_resource_entry *res,
1284 struct ipr_config_table_entry_wrapper *cfgtew)
1285{
1286 if (res->ioa_cfg->sis64) {
1287 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1288 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1289 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1290 sizeof(cfgtew->u.cfgte64->lun))) {
1291 return 1;
1292 }
1293 } else {
1294 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1295 res->target == cfgtew->u.cfgte->res_addr.target &&
1296 res->lun == cfgtew->u.cfgte->res_addr.lun)
1297 return 1;
1298 }
1299
1300 return 0;
1301}
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1313{
1314 int i;
1315 char *p = buffer;
1316
1317 *p = '\0';
1318 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1319 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1320 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1321
1322 return buffer;
1323}
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1336 u8 *res_path, char *buffer, int len)
1337{
1338 char *p = buffer;
1339
1340 *p = '\0';
1341 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1342 __ipr_format_res_path(res_path, p, len - (buffer - p));
1343 return buffer;
1344}
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354static void ipr_update_res_entry(struct ipr_resource_entry *res,
1355 struct ipr_config_table_entry_wrapper *cfgtew)
1356{
1357 char buffer[IPR_MAX_RES_PATH_LENGTH];
1358 unsigned int proto;
1359 int new_path = 0;
1360
1361 if (res->ioa_cfg->sis64) {
1362 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1363 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1364 res->type = cfgtew->u.cfgte64->res_type;
1365
1366 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1367 sizeof(struct ipr_std_inq_data));
1368
1369 res->qmodel = IPR_QUEUEING_MODEL64(res);
1370 proto = cfgtew->u.cfgte64->proto;
1371 res->res_handle = cfgtew->u.cfgte64->res_handle;
1372 res->dev_id = cfgtew->u.cfgte64->dev_id;
1373
1374 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1375 sizeof(res->dev_lun.scsi_lun));
1376
1377 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1378 sizeof(res->res_path))) {
1379 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1380 sizeof(res->res_path));
1381 new_path = 1;
1382 }
1383
1384 if (res->sdev && new_path)
1385 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1386 ipr_format_res_path(res->ioa_cfg,
1387 res->res_path, buffer, sizeof(buffer)));
1388 } else {
1389 res->flags = cfgtew->u.cfgte->flags;
1390 if (res->flags & IPR_IS_IOA_RESOURCE)
1391 res->type = IPR_RES_TYPE_IOAFP;
1392 else
1393 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1394
1395 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1396 sizeof(struct ipr_std_inq_data));
1397
1398 res->qmodel = IPR_QUEUEING_MODEL(res);
1399 proto = cfgtew->u.cfgte->proto;
1400 res->res_handle = cfgtew->u.cfgte->res_handle;
1401 }
1402
1403 ipr_update_ata_class(res, proto);
1404}
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415static void ipr_clear_res_target(struct ipr_resource_entry *res)
1416{
1417 struct ipr_resource_entry *gscsi_res = NULL;
1418 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1419
1420 if (!ioa_cfg->sis64)
1421 return;
1422
1423 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1424 clear_bit(res->target, ioa_cfg->array_ids);
1425 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1426 clear_bit(res->target, ioa_cfg->vset_ids);
1427 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1428 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1429 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1430 return;
1431 clear_bit(res->target, ioa_cfg->target_ids);
1432
1433 } else if (res->bus == 0)
1434 clear_bit(res->target, ioa_cfg->target_ids);
1435}
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1446 struct ipr_hostrcb *hostrcb)
1447{
1448 struct ipr_resource_entry *res = NULL;
1449 struct ipr_config_table_entry_wrapper cfgtew;
1450 __be32 cc_res_handle;
1451
1452 u32 is_ndn = 1;
1453
1454 if (ioa_cfg->sis64) {
1455 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1456 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1457 } else {
1458 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1459 cc_res_handle = cfgtew.u.cfgte->res_handle;
1460 }
1461
1462 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1463 if (res->res_handle == cc_res_handle) {
1464 is_ndn = 0;
1465 break;
1466 }
1467 }
1468
1469 if (is_ndn) {
1470 if (list_empty(&ioa_cfg->free_res_q)) {
1471 ipr_send_hcam(ioa_cfg,
1472 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1473 hostrcb);
1474 return;
1475 }
1476
1477 res = list_entry(ioa_cfg->free_res_q.next,
1478 struct ipr_resource_entry, queue);
1479
1480 list_del(&res->queue);
1481 ipr_init_res_entry(res, &cfgtew);
1482 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1483 }
1484
1485 ipr_update_res_entry(res, &cfgtew);
1486
1487 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1488 if (res->sdev) {
1489 res->del_from_ml = 1;
1490 res->res_handle = IPR_INVALID_RES_HANDLE;
1491 schedule_work(&ioa_cfg->work_q);
1492 } else {
1493 ipr_clear_res_target(res);
1494 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1495 }
1496 } else if (!res->sdev || res->del_from_ml) {
1497 res->add_to_ml = 1;
1498 schedule_work(&ioa_cfg->work_q);
1499 }
1500
1501 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1502}
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1515{
1516 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1517 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1518 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1519
1520 list_del_init(&hostrcb->queue);
1521 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1522
1523 if (ioasc) {
1524 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1525 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1526 dev_err(&ioa_cfg->pdev->dev,
1527 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1528
1529 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1530 } else {
1531 ipr_handle_config_change(ioa_cfg, hostrcb);
1532 }
1533}
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546static int strip_and_pad_whitespace(int i, char *buf)
1547{
1548 while (i && buf[i] == ' ')
1549 i--;
1550 buf[i+1] = ' ';
1551 buf[i+2] = '\0';
1552 return i + 2;
1553}
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1565 struct ipr_vpd *vpd)
1566{
1567 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1568 int i = 0;
1569
1570 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1571 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1572
1573 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1574 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1575
1576 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1577 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1578
1579 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1580}
1581
1582
1583
1584
1585
1586
1587
1588
1589static void ipr_log_vpd(struct ipr_vpd *vpd)
1590{
1591 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1592 + IPR_SERIAL_NUM_LEN];
1593
1594 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1595 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1596 IPR_PROD_ID_LEN);
1597 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1598 ipr_err("Vendor/Product ID: %s\n", buffer);
1599
1600 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1601 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1602 ipr_err(" Serial Number: %s\n", buffer);
1603}
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1615 struct ipr_ext_vpd *vpd)
1616{
1617 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1618 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1619 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1620}
1621
1622
1623
1624
1625
1626
1627
1628
1629static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1630{
1631 ipr_log_vpd(&vpd->vpd);
1632 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1633 be32_to_cpu(vpd->wwid[1]));
1634}
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1645 struct ipr_hostrcb *hostrcb)
1646{
1647 struct ipr_hostrcb_type_12_error *error;
1648
1649 if (ioa_cfg->sis64)
1650 error = &hostrcb->hcam.u.error64.u.type_12_error;
1651 else
1652 error = &hostrcb->hcam.u.error.u.type_12_error;
1653
1654 ipr_err("-----Current Configuration-----\n");
1655 ipr_err("Cache Directory Card Information:\n");
1656 ipr_log_ext_vpd(&error->ioa_vpd);
1657 ipr_err("Adapter Card Information:\n");
1658 ipr_log_ext_vpd(&error->cfc_vpd);
1659
1660 ipr_err("-----Expected Configuration-----\n");
1661 ipr_err("Cache Directory Card Information:\n");
1662 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1663 ipr_err("Adapter Card Information:\n");
1664 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1665
1666 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1667 be32_to_cpu(error->ioa_data[0]),
1668 be32_to_cpu(error->ioa_data[1]),
1669 be32_to_cpu(error->ioa_data[2]));
1670}
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1681 struct ipr_hostrcb *hostrcb)
1682{
1683 struct ipr_hostrcb_type_02_error *error =
1684 &hostrcb->hcam.u.error.u.type_02_error;
1685
1686 ipr_err("-----Current Configuration-----\n");
1687 ipr_err("Cache Directory Card Information:\n");
1688 ipr_log_vpd(&error->ioa_vpd);
1689 ipr_err("Adapter Card Information:\n");
1690 ipr_log_vpd(&error->cfc_vpd);
1691
1692 ipr_err("-----Expected Configuration-----\n");
1693 ipr_err("Cache Directory Card Information:\n");
1694 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1695 ipr_err("Adapter Card Information:\n");
1696 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1697
1698 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1699 be32_to_cpu(error->ioa_data[0]),
1700 be32_to_cpu(error->ioa_data[1]),
1701 be32_to_cpu(error->ioa_data[2]));
1702}
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1713 struct ipr_hostrcb *hostrcb)
1714{
1715 int errors_logged, i;
1716 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1717 struct ipr_hostrcb_type_13_error *error;
1718
1719 error = &hostrcb->hcam.u.error.u.type_13_error;
1720 errors_logged = be32_to_cpu(error->errors_logged);
1721
1722 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1723 be32_to_cpu(error->errors_detected), errors_logged);
1724
1725 dev_entry = error->dev;
1726
1727 for (i = 0; i < errors_logged; i++, dev_entry++) {
1728 ipr_err_separator;
1729
1730 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1731 ipr_log_ext_vpd(&dev_entry->vpd);
1732
1733 ipr_err("-----New Device Information-----\n");
1734 ipr_log_ext_vpd(&dev_entry->new_vpd);
1735
1736 ipr_err("Cache Directory Card Information:\n");
1737 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1738
1739 ipr_err("Adapter Card Information:\n");
1740 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1741 }
1742}
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1753 struct ipr_hostrcb *hostrcb)
1754{
1755 int errors_logged, i;
1756 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1757 struct ipr_hostrcb_type_23_error *error;
1758 char buffer[IPR_MAX_RES_PATH_LENGTH];
1759
1760 error = &hostrcb->hcam.u.error64.u.type_23_error;
1761 errors_logged = be32_to_cpu(error->errors_logged);
1762
1763 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1764 be32_to_cpu(error->errors_detected), errors_logged);
1765
1766 dev_entry = error->dev;
1767
1768 for (i = 0; i < errors_logged; i++, dev_entry++) {
1769 ipr_err_separator;
1770
1771 ipr_err("Device %d : %s", i + 1,
1772 __ipr_format_res_path(dev_entry->res_path,
1773 buffer, sizeof(buffer)));
1774 ipr_log_ext_vpd(&dev_entry->vpd);
1775
1776 ipr_err("-----New Device Information-----\n");
1777 ipr_log_ext_vpd(&dev_entry->new_vpd);
1778
1779 ipr_err("Cache Directory Card Information:\n");
1780 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1781
1782 ipr_err("Adapter Card Information:\n");
1783 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1784 }
1785}
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1796 struct ipr_hostrcb *hostrcb)
1797{
1798 int errors_logged, i;
1799 struct ipr_hostrcb_device_data_entry *dev_entry;
1800 struct ipr_hostrcb_type_03_error *error;
1801
1802 error = &hostrcb->hcam.u.error.u.type_03_error;
1803 errors_logged = be32_to_cpu(error->errors_logged);
1804
1805 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1806 be32_to_cpu(error->errors_detected), errors_logged);
1807
1808 dev_entry = error->dev;
1809
1810 for (i = 0; i < errors_logged; i++, dev_entry++) {
1811 ipr_err_separator;
1812
1813 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1814 ipr_log_vpd(&dev_entry->vpd);
1815
1816 ipr_err("-----New Device Information-----\n");
1817 ipr_log_vpd(&dev_entry->new_vpd);
1818
1819 ipr_err("Cache Directory Card Information:\n");
1820 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1821
1822 ipr_err("Adapter Card Information:\n");
1823 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1824
1825 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1826 be32_to_cpu(dev_entry->ioa_data[0]),
1827 be32_to_cpu(dev_entry->ioa_data[1]),
1828 be32_to_cpu(dev_entry->ioa_data[2]),
1829 be32_to_cpu(dev_entry->ioa_data[3]),
1830 be32_to_cpu(dev_entry->ioa_data[4]));
1831 }
1832}
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1843 struct ipr_hostrcb *hostrcb)
1844{
1845 int i, num_entries;
1846 struct ipr_hostrcb_type_14_error *error;
1847 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1848 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1849
1850 error = &hostrcb->hcam.u.error.u.type_14_error;
1851
1852 ipr_err_separator;
1853
1854 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1855 error->protection_level,
1856 ioa_cfg->host->host_no,
1857 error->last_func_vset_res_addr.bus,
1858 error->last_func_vset_res_addr.target,
1859 error->last_func_vset_res_addr.lun);
1860
1861 ipr_err_separator;
1862
1863 array_entry = error->array_member;
1864 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1865 ARRAY_SIZE(error->array_member));
1866
1867 for (i = 0; i < num_entries; i++, array_entry++) {
1868 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1869 continue;
1870
1871 if (be32_to_cpu(error->exposed_mode_adn) == i)
1872 ipr_err("Exposed Array Member %d:\n", i);
1873 else
1874 ipr_err("Array Member %d:\n", i);
1875
1876 ipr_log_ext_vpd(&array_entry->vpd);
1877 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1878 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1879 "Expected Location");
1880
1881 ipr_err_separator;
1882 }
1883}
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1894 struct ipr_hostrcb *hostrcb)
1895{
1896 int i;
1897 struct ipr_hostrcb_type_04_error *error;
1898 struct ipr_hostrcb_array_data_entry *array_entry;
1899 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1900
1901 error = &hostrcb->hcam.u.error.u.type_04_error;
1902
1903 ipr_err_separator;
1904
1905 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1906 error->protection_level,
1907 ioa_cfg->host->host_no,
1908 error->last_func_vset_res_addr.bus,
1909 error->last_func_vset_res_addr.target,
1910 error->last_func_vset_res_addr.lun);
1911
1912 ipr_err_separator;
1913
1914 array_entry = error->array_member;
1915
1916 for (i = 0; i < 18; i++) {
1917 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1918 continue;
1919
1920 if (be32_to_cpu(error->exposed_mode_adn) == i)
1921 ipr_err("Exposed Array Member %d:\n", i);
1922 else
1923 ipr_err("Array Member %d:\n", i);
1924
1925 ipr_log_vpd(&array_entry->vpd);
1926
1927 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1928 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1929 "Expected Location");
1930
1931 ipr_err_separator;
1932
1933 if (i == 9)
1934 array_entry = error->array_member2;
1935 else
1936 array_entry++;
1937 }
1938}
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1950{
1951 int i;
1952
1953 if (len == 0)
1954 return;
1955
1956 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1957 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1958
1959 for (i = 0; i < len / 4; i += 4) {
1960 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1961 be32_to_cpu(data[i]),
1962 be32_to_cpu(data[i+1]),
1963 be32_to_cpu(data[i+2]),
1964 be32_to_cpu(data[i+3]));
1965 }
1966}
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1977 struct ipr_hostrcb *hostrcb)
1978{
1979 struct ipr_hostrcb_type_17_error *error;
1980
1981 if (ioa_cfg->sis64)
1982 error = &hostrcb->hcam.u.error64.u.type_17_error;
1983 else
1984 error = &hostrcb->hcam.u.error.u.type_17_error;
1985
1986 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1987 strim(error->failure_reason);
1988
1989 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1990 be32_to_cpu(hostrcb->hcam.u.error.prc));
1991 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1992 ipr_log_hex_data(ioa_cfg, error->data,
1993 be32_to_cpu(hostrcb->hcam.length) -
1994 (offsetof(struct ipr_hostrcb_error, u) +
1995 offsetof(struct ipr_hostrcb_type_17_error, data)));
1996}
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
2007 struct ipr_hostrcb *hostrcb)
2008{
2009 struct ipr_hostrcb_type_07_error *error;
2010
2011 error = &hostrcb->hcam.u.error.u.type_07_error;
2012 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2013 strim(error->failure_reason);
2014
2015 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
2016 be32_to_cpu(hostrcb->hcam.u.error.prc));
2017 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
2018 ipr_log_hex_data(ioa_cfg, error->data,
2019 be32_to_cpu(hostrcb->hcam.length) -
2020 (offsetof(struct ipr_hostrcb_error, u) +
2021 offsetof(struct ipr_hostrcb_type_07_error, data)));
2022}
2023
2024static const struct {
2025 u8 active;
2026 char *desc;
2027} path_active_desc[] = {
2028 { IPR_PATH_NO_INFO, "Path" },
2029 { IPR_PATH_ACTIVE, "Active path" },
2030 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
2031};
2032
2033static const struct {
2034 u8 state;
2035 char *desc;
2036} path_state_desc[] = {
2037 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
2038 { IPR_PATH_HEALTHY, "is healthy" },
2039 { IPR_PATH_DEGRADED, "is degraded" },
2040 { IPR_PATH_FAILED, "is failed" }
2041};
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2052 struct ipr_hostrcb_fabric_desc *fabric)
2053{
2054 int i, j;
2055 u8 path_state = fabric->path_state;
2056 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2057 u8 state = path_state & IPR_PATH_STATE_MASK;
2058
2059 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2060 if (path_active_desc[i].active != active)
2061 continue;
2062
2063 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2064 if (path_state_desc[j].state != state)
2065 continue;
2066
2067 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2068 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2069 path_active_desc[i].desc, path_state_desc[j].desc,
2070 fabric->ioa_port);
2071 } else if (fabric->cascaded_expander == 0xff) {
2072 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2073 path_active_desc[i].desc, path_state_desc[j].desc,
2074 fabric->ioa_port, fabric->phy);
2075 } else if (fabric->phy == 0xff) {
2076 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2077 path_active_desc[i].desc, path_state_desc[j].desc,
2078 fabric->ioa_port, fabric->cascaded_expander);
2079 } else {
2080 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2081 path_active_desc[i].desc, path_state_desc[j].desc,
2082 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2083 }
2084 return;
2085 }
2086 }
2087
2088 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2089 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2090}
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2101 struct ipr_hostrcb64_fabric_desc *fabric)
2102{
2103 int i, j;
2104 u8 path_state = fabric->path_state;
2105 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2106 u8 state = path_state & IPR_PATH_STATE_MASK;
2107 char buffer[IPR_MAX_RES_PATH_LENGTH];
2108
2109 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2110 if (path_active_desc[i].active != active)
2111 continue;
2112
2113 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2114 if (path_state_desc[j].state != state)
2115 continue;
2116
2117 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2118 path_active_desc[i].desc, path_state_desc[j].desc,
2119 ipr_format_res_path(hostrcb->ioa_cfg,
2120 fabric->res_path,
2121 buffer, sizeof(buffer)));
2122 return;
2123 }
2124 }
2125
2126 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2127 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2128 buffer, sizeof(buffer)));
2129}
2130
2131static const struct {
2132 u8 type;
2133 char *desc;
2134} path_type_desc[] = {
2135 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2136 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2137 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2138 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2139};
2140
2141static const struct {
2142 u8 status;
2143 char *desc;
2144} path_status_desc[] = {
2145 { IPR_PATH_CFG_NO_PROB, "Functional" },
2146 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2147 { IPR_PATH_CFG_FAILED, "Failed" },
2148 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2149 { IPR_PATH_NOT_DETECTED, "Missing" },
2150 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2151};
2152
2153static const char *link_rate[] = {
2154 "unknown",
2155 "disabled",
2156 "phy reset problem",
2157 "spinup hold",
2158 "port selector",
2159 "unknown",
2160 "unknown",
2161 "unknown",
2162 "1.5Gbps",
2163 "3.0Gbps",
2164 "unknown",
2165 "unknown",
2166 "unknown",
2167 "unknown",
2168 "unknown",
2169 "unknown"
2170};
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2181 struct ipr_hostrcb_config_element *cfg)
2182{
2183 int i, j;
2184 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2185 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2186
2187 if (type == IPR_PATH_CFG_NOT_EXIST)
2188 return;
2189
2190 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2191 if (path_type_desc[i].type != type)
2192 continue;
2193
2194 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2195 if (path_status_desc[j].status != status)
2196 continue;
2197
2198 if (type == IPR_PATH_CFG_IOA_PORT) {
2199 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2200 path_status_desc[j].desc, path_type_desc[i].desc,
2201 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2202 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2203 } else {
2204 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2205 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2206 path_status_desc[j].desc, path_type_desc[i].desc,
2207 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2208 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2209 } else if (cfg->cascaded_expander == 0xff) {
2210 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2211 "WWN=%08X%08X\n", path_status_desc[j].desc,
2212 path_type_desc[i].desc, cfg->phy,
2213 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2214 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2215 } else if (cfg->phy == 0xff) {
2216 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2217 "WWN=%08X%08X\n", path_status_desc[j].desc,
2218 path_type_desc[i].desc, cfg->cascaded_expander,
2219 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2220 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2221 } else {
2222 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2223 "WWN=%08X%08X\n", path_status_desc[j].desc,
2224 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2225 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2226 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2227 }
2228 }
2229 return;
2230 }
2231 }
2232
2233 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2234 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2235 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2236 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2237}
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2248 struct ipr_hostrcb64_config_element *cfg)
2249{
2250 int i, j;
2251 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2252 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2253 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2254 char buffer[IPR_MAX_RES_PATH_LENGTH];
2255
2256 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2257 return;
2258
2259 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2260 if (path_type_desc[i].type != type)
2261 continue;
2262
2263 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2264 if (path_status_desc[j].status != status)
2265 continue;
2266
2267 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2268 path_status_desc[j].desc, path_type_desc[i].desc,
2269 ipr_format_res_path(hostrcb->ioa_cfg,
2270 cfg->res_path, buffer, sizeof(buffer)),
2271 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2272 be32_to_cpu(cfg->wwid[0]),
2273 be32_to_cpu(cfg->wwid[1]));
2274 return;
2275 }
2276 }
2277 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2278 "WWN=%08X%08X\n", cfg->type_status,
2279 ipr_format_res_path(hostrcb->ioa_cfg,
2280 cfg->res_path, buffer, sizeof(buffer)),
2281 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2282 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2283}
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2294 struct ipr_hostrcb *hostrcb)
2295{
2296 struct ipr_hostrcb_type_20_error *error;
2297 struct ipr_hostrcb_fabric_desc *fabric;
2298 struct ipr_hostrcb_config_element *cfg;
2299 int i, add_len;
2300
2301 error = &hostrcb->hcam.u.error.u.type_20_error;
2302 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2303 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2304
2305 add_len = be32_to_cpu(hostrcb->hcam.length) -
2306 (offsetof(struct ipr_hostrcb_error, u) +
2307 offsetof(struct ipr_hostrcb_type_20_error, desc));
2308
2309 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2310 ipr_log_fabric_path(hostrcb, fabric);
2311 for_each_fabric_cfg(fabric, cfg)
2312 ipr_log_path_elem(hostrcb, cfg);
2313
2314 add_len -= be16_to_cpu(fabric->length);
2315 fabric = (struct ipr_hostrcb_fabric_desc *)
2316 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2317 }
2318
2319 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2320}
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2331 struct ipr_hostrcb *hostrcb)
2332{
2333 int i, num_entries;
2334 struct ipr_hostrcb_type_24_error *error;
2335 struct ipr_hostrcb64_array_data_entry *array_entry;
2336 char buffer[IPR_MAX_RES_PATH_LENGTH];
2337 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2338
2339 error = &hostrcb->hcam.u.error64.u.type_24_error;
2340
2341 ipr_err_separator;
2342
2343 ipr_err("RAID %s Array Configuration: %s\n",
2344 error->protection_level,
2345 ipr_format_res_path(ioa_cfg, error->last_res_path,
2346 buffer, sizeof(buffer)));
2347
2348 ipr_err_separator;
2349
2350 array_entry = error->array_member;
2351 num_entries = min_t(u32, error->num_entries,
2352 ARRAY_SIZE(error->array_member));
2353
2354 for (i = 0; i < num_entries; i++, array_entry++) {
2355
2356 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2357 continue;
2358
2359 if (error->exposed_mode_adn == i)
2360 ipr_err("Exposed Array Member %d:\n", i);
2361 else
2362 ipr_err("Array Member %d:\n", i);
2363
2364 ipr_err("Array Member %d:\n", i);
2365 ipr_log_ext_vpd(&array_entry->vpd);
2366 ipr_err("Current Location: %s\n",
2367 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2368 buffer, sizeof(buffer)));
2369 ipr_err("Expected Location: %s\n",
2370 ipr_format_res_path(ioa_cfg,
2371 array_entry->expected_res_path,
2372 buffer, sizeof(buffer)));
2373
2374 ipr_err_separator;
2375 }
2376}
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2387 struct ipr_hostrcb *hostrcb)
2388{
2389 struct ipr_hostrcb_type_30_error *error;
2390 struct ipr_hostrcb64_fabric_desc *fabric;
2391 struct ipr_hostrcb64_config_element *cfg;
2392 int i, add_len;
2393
2394 error = &hostrcb->hcam.u.error64.u.type_30_error;
2395
2396 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2397 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2398
2399 add_len = be32_to_cpu(hostrcb->hcam.length) -
2400 (offsetof(struct ipr_hostrcb64_error, u) +
2401 offsetof(struct ipr_hostrcb_type_30_error, desc));
2402
2403 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2404 ipr_log64_fabric_path(hostrcb, fabric);
2405 for_each_fabric_cfg(fabric, cfg)
2406 ipr_log64_path_elem(hostrcb, cfg);
2407
2408 add_len -= be16_to_cpu(fabric->length);
2409 fabric = (struct ipr_hostrcb64_fabric_desc *)
2410 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2411 }
2412
2413 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2414}
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg,
2425 struct ipr_hostrcb *hostrcb)
2426{
2427 struct ipr_hostrcb_type_41_error *error;
2428
2429 error = &hostrcb->hcam.u.error64.u.type_41_error;
2430
2431 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2432 ipr_err("Primary Failure Reason: %s\n", error->failure_reason);
2433 ipr_log_hex_data(ioa_cfg, error->data,
2434 be32_to_cpu(hostrcb->hcam.length) -
2435 (offsetof(struct ipr_hostrcb_error, u) +
2436 offsetof(struct ipr_hostrcb_type_41_error, data)));
2437}
2438
2439
2440
2441
2442
2443
2444
2445
2446static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2447 struct ipr_hostrcb *hostrcb)
2448{
2449 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2450 be32_to_cpu(hostrcb->hcam.length));
2451}
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2462 struct ipr_hostrcb *hostrcb)
2463{
2464 struct ipr_hostrcb_type_21_error *error;
2465 char buffer[IPR_MAX_RES_PATH_LENGTH];
2466
2467 error = &hostrcb->hcam.u.error64.u.type_21_error;
2468
2469 ipr_err("-----Failing Device Information-----\n");
2470 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2471 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2472 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2473 ipr_err("Device Resource Path: %s\n",
2474 __ipr_format_res_path(error->res_path,
2475 buffer, sizeof(buffer)));
2476 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2477 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2478 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2479 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc);
2480 ipr_err("SCSI Sense Data:\n");
2481 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2482 ipr_err("SCSI Command Descriptor Block: \n");
2483 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2484
2485 ipr_err("Additional IOA Data:\n");
2486 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2487}
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500static u32 ipr_get_error(u32 ioasc)
2501{
2502 int i;
2503
2504 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2505 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2506 return i;
2507
2508 return 0;
2509}
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2522 struct ipr_hostrcb *hostrcb)
2523{
2524 u32 ioasc;
2525 int error_index;
2526 struct ipr_hostrcb_type_21_error *error;
2527
2528 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2529 return;
2530
2531 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2532 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2533
2534 if (ioa_cfg->sis64)
2535 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2536 else
2537 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2538
2539 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2540 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2541
2542 scsi_report_bus_reset(ioa_cfg->host,
2543 hostrcb->hcam.u.error.fd_res_addr.bus);
2544 }
2545
2546 error_index = ipr_get_error(ioasc);
2547
2548 if (!ipr_error_table[error_index].log_hcam)
2549 return;
2550
2551 if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2552 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2553 error = &hostrcb->hcam.u.error64.u.type_21_error;
2554
2555 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2556 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2557 return;
2558 }
2559
2560 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2561
2562
2563 ioa_cfg->errors_logged++;
2564
2565 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2566 return;
2567 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2568 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2569
2570 switch (hostrcb->hcam.overlay_id) {
2571 case IPR_HOST_RCB_OVERLAY_ID_2:
2572 ipr_log_cache_error(ioa_cfg, hostrcb);
2573 break;
2574 case IPR_HOST_RCB_OVERLAY_ID_3:
2575 ipr_log_config_error(ioa_cfg, hostrcb);
2576 break;
2577 case IPR_HOST_RCB_OVERLAY_ID_4:
2578 case IPR_HOST_RCB_OVERLAY_ID_6:
2579 ipr_log_array_error(ioa_cfg, hostrcb);
2580 break;
2581 case IPR_HOST_RCB_OVERLAY_ID_7:
2582 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2583 break;
2584 case IPR_HOST_RCB_OVERLAY_ID_12:
2585 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2586 break;
2587 case IPR_HOST_RCB_OVERLAY_ID_13:
2588 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2589 break;
2590 case IPR_HOST_RCB_OVERLAY_ID_14:
2591 case IPR_HOST_RCB_OVERLAY_ID_16:
2592 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2593 break;
2594 case IPR_HOST_RCB_OVERLAY_ID_17:
2595 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2596 break;
2597 case IPR_HOST_RCB_OVERLAY_ID_20:
2598 ipr_log_fabric_error(ioa_cfg, hostrcb);
2599 break;
2600 case IPR_HOST_RCB_OVERLAY_ID_21:
2601 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2602 break;
2603 case IPR_HOST_RCB_OVERLAY_ID_23:
2604 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2605 break;
2606 case IPR_HOST_RCB_OVERLAY_ID_24:
2607 case IPR_HOST_RCB_OVERLAY_ID_26:
2608 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2609 break;
2610 case IPR_HOST_RCB_OVERLAY_ID_30:
2611 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2612 break;
2613 case IPR_HOST_RCB_OVERLAY_ID_41:
2614 ipr_log_sis64_service_required_error(ioa_cfg, hostrcb);
2615 break;
2616 case IPR_HOST_RCB_OVERLAY_ID_1:
2617 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2618 default:
2619 ipr_log_generic_error(ioa_cfg, hostrcb);
2620 break;
2621 }
2622}
2623
2624static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2625{
2626 struct ipr_hostrcb *hostrcb;
2627
2628 hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2629 struct ipr_hostrcb, queue);
2630
2631 if (unlikely(!hostrcb)) {
2632 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2633 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2634 struct ipr_hostrcb, queue);
2635 }
2636
2637 list_del_init(&hostrcb->queue);
2638 return hostrcb;
2639}
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2653{
2654 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2655 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2656 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2657 u32 fd_ioasc;
2658
2659 if (ioa_cfg->sis64)
2660 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2661 else
2662 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2663
2664 list_del_init(&hostrcb->queue);
2665 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2666
2667 if (!ioasc) {
2668 ipr_handle_log_data(ioa_cfg, hostrcb);
2669 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2670 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2671 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2672 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2673 dev_err(&ioa_cfg->pdev->dev,
2674 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2675 }
2676
2677 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
2678 schedule_work(&ioa_cfg->work_q);
2679 hostrcb = ipr_get_free_hostrcb(ioa_cfg);
2680
2681 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2682}
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2695{
2696 unsigned long lock_flags = 0;
2697 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2698
2699 ENTER;
2700 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2701
2702 ioa_cfg->errors_logged++;
2703 dev_err(&ioa_cfg->pdev->dev,
2704 "Adapter being reset due to command timeout.\n");
2705
2706 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2707 ioa_cfg->sdt_state = GET_DUMP;
2708
2709 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2710 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2711
2712 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2713 LEAVE;
2714}
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2727{
2728 unsigned long lock_flags = 0;
2729 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2730
2731 ENTER;
2732 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2733
2734 ioa_cfg->errors_logged++;
2735 dev_err(&ioa_cfg->pdev->dev,
2736 "Adapter timed out transitioning to operational.\n");
2737
2738 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2739 ioa_cfg->sdt_state = GET_DUMP;
2740
2741 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2742 if (ipr_fastfail)
2743 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2744 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2745 }
2746
2747 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2748 LEAVE;
2749}
2750
2751
2752
2753
2754
2755
2756
2757
2758static const struct ipr_ses_table_entry *
2759ipr_find_ses_entry(struct ipr_resource_entry *res)
2760{
2761 int i, j, matches;
2762 struct ipr_std_inq_vpids *vpids;
2763 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2764
2765 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2766 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2767 if (ste->compare_product_id_byte[j] == 'X') {
2768 vpids = &res->std_inq_data.vpids;
2769 if (vpids->product_id[j] == ste->product_id[j])
2770 matches++;
2771 else
2772 break;
2773 } else
2774 matches++;
2775 }
2776
2777 if (matches == IPR_PROD_ID_LEN)
2778 return ste;
2779 }
2780
2781 return NULL;
2782}
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2797{
2798 struct ipr_resource_entry *res;
2799 const struct ipr_ses_table_entry *ste;
2800 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2801
2802
2803 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2804 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2805 continue;
2806
2807 if (bus != res->bus)
2808 continue;
2809
2810 if (!(ste = ipr_find_ses_entry(res)))
2811 continue;
2812
2813 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2814 }
2815
2816 return max_xfer_rate;
2817}
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2830{
2831 volatile u32 pcii_reg;
2832 int delay = 1;
2833
2834
2835 while (delay < max_delay) {
2836 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2837
2838 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2839 return 0;
2840
2841
2842 if ((delay / 1000) > MAX_UDELAY_MS)
2843 mdelay(delay / 1000);
2844 else
2845 udelay(delay);
2846
2847 delay += delay;
2848 }
2849 return -EIO;
2850}
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2863 u32 start_addr,
2864 __be32 *dest, u32 length_in_words)
2865{
2866 int i;
2867
2868 for (i = 0; i < length_in_words; i++) {
2869 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2870 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2871 dest++;
2872 }
2873
2874 return 0;
2875}
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2888 u32 start_addr,
2889 __be32 *dest, u32 length_in_words)
2890{
2891 volatile u32 temp_pcii_reg;
2892 int i, delay = 0;
2893
2894 if (ioa_cfg->sis64)
2895 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2896 dest, length_in_words);
2897
2898
2899 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2900 ioa_cfg->regs.set_uproc_interrupt_reg32);
2901
2902
2903 if (ipr_wait_iodbg_ack(ioa_cfg,
2904 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2905 dev_err(&ioa_cfg->pdev->dev,
2906 "IOA dump long data transfer timeout\n");
2907 return -EIO;
2908 }
2909
2910
2911 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2912 ioa_cfg->regs.clr_interrupt_reg);
2913
2914
2915 writel(start_addr, ioa_cfg->ioa_mailbox);
2916
2917
2918 writel(IPR_UPROCI_RESET_ALERT,
2919 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2920
2921 for (i = 0; i < length_in_words; i++) {
2922
2923 if (ipr_wait_iodbg_ack(ioa_cfg,
2924 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2925 dev_err(&ioa_cfg->pdev->dev,
2926 "IOA dump short data transfer timeout\n");
2927 return -EIO;
2928 }
2929
2930
2931 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2932 dest++;
2933
2934
2935 if (i < (length_in_words - 1)) {
2936
2937 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2938 ioa_cfg->regs.clr_interrupt_reg);
2939 }
2940 }
2941
2942
2943 writel(IPR_UPROCI_RESET_ALERT,
2944 ioa_cfg->regs.set_uproc_interrupt_reg32);
2945
2946 writel(IPR_UPROCI_IO_DEBUG_ALERT,
2947 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2948
2949
2950 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2951 ioa_cfg->regs.clr_interrupt_reg);
2952
2953
2954 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2955 temp_pcii_reg =
2956 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2957
2958 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2959 return 0;
2960
2961 udelay(10);
2962 delay += 10;
2963 }
2964
2965 return 0;
2966}
2967
2968#ifdef CONFIG_SCSI_IPR_DUMP
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2981 unsigned long pci_address, u32 length)
2982{
2983 int bytes_copied = 0;
2984 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2985 __be32 *page;
2986 unsigned long lock_flags = 0;
2987 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2988
2989 if (ioa_cfg->sis64)
2990 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2991 else
2992 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2993
2994 while (bytes_copied < length &&
2995 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2996 if (ioa_dump->page_offset >= PAGE_SIZE ||
2997 ioa_dump->page_offset == 0) {
2998 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2999
3000 if (!page) {
3001 ipr_trace;
3002 return bytes_copied;
3003 }
3004
3005 ioa_dump->page_offset = 0;
3006 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
3007 ioa_dump->next_page_index++;
3008 } else
3009 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
3010
3011 rem_len = length - bytes_copied;
3012 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
3013 cur_len = min(rem_len, rem_page_len);
3014
3015 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3016 if (ioa_cfg->sdt_state == ABORT_DUMP) {
3017 rc = -EIO;
3018 } else {
3019 rc = ipr_get_ldump_data_section(ioa_cfg,
3020 pci_address + bytes_copied,
3021 &page[ioa_dump->page_offset / 4],
3022 (cur_len / sizeof(u32)));
3023 }
3024 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3025
3026 if (!rc) {
3027 ioa_dump->page_offset += cur_len;
3028 bytes_copied += cur_len;
3029 } else {
3030 ipr_trace;
3031 break;
3032 }
3033 schedule();
3034 }
3035
3036 return bytes_copied;
3037}
3038
3039
3040
3041
3042
3043
3044
3045
3046static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
3047{
3048 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
3049 hdr->num_elems = 1;
3050 hdr->offset = sizeof(*hdr);
3051 hdr->status = IPR_DUMP_STATUS_SUCCESS;
3052}
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
3063 struct ipr_driver_dump *driver_dump)
3064{
3065 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3066
3067 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
3068 driver_dump->ioa_type_entry.hdr.len =
3069 sizeof(struct ipr_dump_ioa_type_entry) -
3070 sizeof(struct ipr_dump_entry_header);
3071 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3072 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
3073 driver_dump->ioa_type_entry.type = ioa_cfg->type;
3074 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
3075 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
3076 ucode_vpd->minor_release[1];
3077 driver_dump->hdr.num_entries++;
3078}
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3089 struct ipr_driver_dump *driver_dump)
3090{
3091 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3092 driver_dump->version_entry.hdr.len =
3093 sizeof(struct ipr_dump_version_entry) -
3094 sizeof(struct ipr_dump_entry_header);
3095 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3096 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3097 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3098 driver_dump->hdr.num_entries++;
3099}
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3110 struct ipr_driver_dump *driver_dump)
3111{
3112 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3113 driver_dump->trace_entry.hdr.len =
3114 sizeof(struct ipr_dump_trace_entry) -
3115 sizeof(struct ipr_dump_entry_header);
3116 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3117 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3118 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3119 driver_dump->hdr.num_entries++;
3120}
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3131 struct ipr_driver_dump *driver_dump)
3132{
3133 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3134 driver_dump->location_entry.hdr.len =
3135 sizeof(struct ipr_dump_location_entry) -
3136 sizeof(struct ipr_dump_entry_header);
3137 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3138 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3139 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3140 driver_dump->hdr.num_entries++;
3141}
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3152{
3153 unsigned long start_addr, sdt_word;
3154 unsigned long lock_flags = 0;
3155 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3156 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3157 u32 num_entries, max_num_entries, start_off, end_off;
3158 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3159 struct ipr_sdt *sdt;
3160 int valid = 1;
3161 int i;
3162
3163 ENTER;
3164
3165 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3166
3167 if (ioa_cfg->sdt_state != READ_DUMP) {
3168 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3169 return;
3170 }
3171
3172 if (ioa_cfg->sis64) {
3173 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3174 ssleep(IPR_DUMP_DELAY_SECONDS);
3175 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3176 }
3177
3178 start_addr = readl(ioa_cfg->ioa_mailbox);
3179
3180 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3181 dev_err(&ioa_cfg->pdev->dev,
3182 "Invalid dump table format: %lx\n", start_addr);
3183 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3184 return;
3185 }
3186
3187 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3188
3189 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3190
3191
3192 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3193 driver_dump->hdr.num_entries = 1;
3194 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3195 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3196 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3197 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3198
3199 ipr_dump_version_data(ioa_cfg, driver_dump);
3200 ipr_dump_location_data(ioa_cfg, driver_dump);
3201 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3202 ipr_dump_trace_data(ioa_cfg, driver_dump);
3203
3204
3205 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3206
3207
3208 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3209 ioa_dump->hdr.len = 0;
3210 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3211 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3212
3213
3214
3215
3216
3217 sdt = &ioa_dump->sdt;
3218
3219 if (ioa_cfg->sis64) {
3220 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3221 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3222 } else {
3223 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3224 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3225 }
3226
3227 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3228 (max_num_entries * sizeof(struct ipr_sdt_entry));
3229 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3230 bytes_to_copy / sizeof(__be32));
3231
3232
3233 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3234 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3235 dev_err(&ioa_cfg->pdev->dev,
3236 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3237 rc, be32_to_cpu(sdt->hdr.state));
3238 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3239 ioa_cfg->sdt_state = DUMP_OBTAINED;
3240 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3241 return;
3242 }
3243
3244 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3245
3246 if (num_entries > max_num_entries)
3247 num_entries = max_num_entries;
3248
3249
3250 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3251 if (ioa_cfg->sis64)
3252 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3253 else
3254 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3255
3256 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3257
3258 for (i = 0; i < num_entries; i++) {
3259 if (ioa_dump->hdr.len > max_dump_size) {
3260 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3261 break;
3262 }
3263
3264 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3265 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3266 if (ioa_cfg->sis64)
3267 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3268 else {
3269 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3270 end_off = be32_to_cpu(sdt->entry[i].end_token);
3271
3272 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3273 bytes_to_copy = end_off - start_off;
3274 else
3275 valid = 0;
3276 }
3277 if (valid) {
3278 if (bytes_to_copy > max_dump_size) {
3279 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3280 continue;
3281 }
3282
3283
3284 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3285 bytes_to_copy);
3286
3287 ioa_dump->hdr.len += bytes_copied;
3288
3289 if (bytes_copied != bytes_to_copy) {
3290 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3291 break;
3292 }
3293 }
3294 }
3295 }
3296
3297 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3298
3299
3300 driver_dump->hdr.len += ioa_dump->hdr.len;
3301 wmb();
3302 ioa_cfg->sdt_state = DUMP_OBTAINED;
3303 LEAVE;
3304}
3305
3306#else
3307#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3308#endif
3309
3310
3311
3312
3313
3314
3315
3316
3317static void ipr_release_dump(struct kref *kref)
3318{
3319 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3320 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3321 unsigned long lock_flags = 0;
3322 int i;
3323
3324 ENTER;
3325 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3326 ioa_cfg->dump = NULL;
3327 ioa_cfg->sdt_state = INACTIVE;
3328 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3329
3330 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3331 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3332
3333 vfree(dump->ioa_dump.ioa_data);
3334 kfree(dump);
3335 LEAVE;
3336}
3337
3338static void ipr_add_remove_thread(struct work_struct *work)
3339{
3340 unsigned long lock_flags;
3341 struct ipr_resource_entry *res;
3342 struct scsi_device *sdev;
3343 struct ipr_ioa_cfg *ioa_cfg =
3344 container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
3345 u8 bus, target, lun;
3346 int did_work;
3347
3348 ENTER;
3349 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3350
3351restart:
3352 do {
3353 did_work = 0;
3354 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3355 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3356 return;
3357 }
3358
3359 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3360 if (res->del_from_ml && res->sdev) {
3361 did_work = 1;
3362 sdev = res->sdev;
3363 if (!scsi_device_get(sdev)) {
3364 if (!res->add_to_ml)
3365 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3366 else
3367 res->del_from_ml = 0;
3368 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3369 scsi_remove_device(sdev);
3370 scsi_device_put(sdev);
3371 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3372 }
3373 break;
3374 }
3375 }
3376 } while (did_work);
3377
3378 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3379 if (res->add_to_ml) {
3380 bus = res->bus;
3381 target = res->target;
3382 lun = res->lun;
3383 res->add_to_ml = 0;
3384 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3385 scsi_add_device(ioa_cfg->host, bus, target, lun);
3386 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3387 goto restart;
3388 }
3389 }
3390
3391 ioa_cfg->scan_done = 1;
3392 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3393 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3394 LEAVE;
3395}
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408static void ipr_worker_thread(struct work_struct *work)
3409{
3410 unsigned long lock_flags;
3411 struct ipr_dump *dump;
3412 struct ipr_ioa_cfg *ioa_cfg =
3413 container_of(work, struct ipr_ioa_cfg, work_q);
3414
3415 ENTER;
3416 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3417
3418 if (ioa_cfg->sdt_state == READ_DUMP) {
3419 dump = ioa_cfg->dump;
3420 if (!dump) {
3421 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3422 return;
3423 }
3424 kref_get(&dump->kref);
3425 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3426 ipr_get_ioa_dump(ioa_cfg, dump);
3427 kref_put(&dump->kref, ipr_release_dump);
3428
3429 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3430 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3431 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3432 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3433 return;
3434 }
3435
3436 if (ioa_cfg->scsi_unblock) {
3437 ioa_cfg->scsi_unblock = 0;
3438 ioa_cfg->scsi_blocked = 0;
3439 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3440 scsi_unblock_requests(ioa_cfg->host);
3441 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3442 if (ioa_cfg->scsi_blocked)
3443 scsi_block_requests(ioa_cfg->host);
3444 }
3445
3446 if (!ioa_cfg->scan_enabled) {
3447 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3448 return;
3449 }
3450
3451 schedule_work(&ioa_cfg->scsi_add_work_q);
3452
3453 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3454 LEAVE;
3455}
3456
3457#ifdef CONFIG_SCSI_IPR_TRACE
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3471 struct bin_attribute *bin_attr,
3472 char *buf, loff_t off, size_t count)
3473{
3474 struct device *dev = container_of(kobj, struct device, kobj);
3475 struct Scsi_Host *shost = class_to_shost(dev);
3476 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3477 unsigned long lock_flags = 0;
3478 ssize_t ret;
3479
3480 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3481 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3482 IPR_TRACE_SIZE);
3483 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3484
3485 return ret;
3486}
3487
3488static struct bin_attribute ipr_trace_attr = {
3489 .attr = {
3490 .name = "trace",
3491 .mode = S_IRUGO,
3492 },
3493 .size = 0,
3494 .read = ipr_read_trace,
3495};
3496#endif
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506static ssize_t ipr_show_fw_version(struct device *dev,
3507 struct device_attribute *attr, char *buf)
3508{
3509 struct Scsi_Host *shost = class_to_shost(dev);
3510 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3511 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3512 unsigned long lock_flags = 0;
3513 int len;
3514
3515 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3516 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3517 ucode_vpd->major_release, ucode_vpd->card_type,
3518 ucode_vpd->minor_release[0],
3519 ucode_vpd->minor_release[1]);
3520 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3521 return len;
3522}
3523
3524static struct device_attribute ipr_fw_version_attr = {
3525 .attr = {
3526 .name = "fw_version",
3527 .mode = S_IRUGO,
3528 },
3529 .show = ipr_show_fw_version,
3530};
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540static ssize_t ipr_show_log_level(struct device *dev,
3541 struct device_attribute *attr, char *buf)
3542{
3543 struct Scsi_Host *shost = class_to_shost(dev);
3544 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3545 unsigned long lock_flags = 0;
3546 int len;
3547
3548 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3549 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3550 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3551 return len;
3552}
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562static ssize_t ipr_store_log_level(struct device *dev,
3563 struct device_attribute *attr,
3564 const char *buf, size_t count)
3565{
3566 struct Scsi_Host *shost = class_to_shost(dev);
3567 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3568 unsigned long lock_flags = 0;
3569
3570 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3571 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3572 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3573 return strlen(buf);
3574}
3575
3576static struct device_attribute ipr_log_level_attr = {
3577 .attr = {
3578 .name = "log_level",
3579 .mode = S_IRUGO | S_IWUSR,
3580 },
3581 .show = ipr_show_log_level,
3582 .store = ipr_store_log_level
3583};
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597static ssize_t ipr_store_diagnostics(struct device *dev,
3598 struct device_attribute *attr,
3599 const char *buf, size_t count)
3600{
3601 struct Scsi_Host *shost = class_to_shost(dev);
3602 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3603 unsigned long lock_flags = 0;
3604 int rc = count;
3605
3606 if (!capable(CAP_SYS_ADMIN))
3607 return -EACCES;
3608
3609 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3610 while (ioa_cfg->in_reset_reload) {
3611 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3612 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3613 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3614 }
3615
3616 ioa_cfg->errors_logged = 0;
3617 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3618
3619 if (ioa_cfg->in_reset_reload) {
3620 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3621 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3622
3623
3624 msleep(1000);
3625 } else {
3626 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3627 return -EIO;
3628 }
3629
3630 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3631 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3632 rc = -EIO;
3633 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3634
3635 return rc;
3636}
3637
3638static struct device_attribute ipr_diagnostics_attr = {
3639 .attr = {
3640 .name = "run_diagnostics",
3641 .mode = S_IWUSR,
3642 },
3643 .store = ipr_store_diagnostics
3644};
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654static ssize_t ipr_show_adapter_state(struct device *dev,
3655 struct device_attribute *attr, char *buf)
3656{
3657 struct Scsi_Host *shost = class_to_shost(dev);
3658 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3659 unsigned long lock_flags = 0;
3660 int len;
3661
3662 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3663 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3664 len = snprintf(buf, PAGE_SIZE, "offline\n");
3665 else
3666 len = snprintf(buf, PAGE_SIZE, "online\n");
3667 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3668 return len;
3669}
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682static ssize_t ipr_store_adapter_state(struct device *dev,
3683 struct device_attribute *attr,
3684 const char *buf, size_t count)
3685{
3686 struct Scsi_Host *shost = class_to_shost(dev);
3687 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3688 unsigned long lock_flags;
3689 int result = count, i;
3690
3691 if (!capable(CAP_SYS_ADMIN))
3692 return -EACCES;
3693
3694 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3695 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3696 !strncmp(buf, "online", 6)) {
3697 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3698 spin_lock(&ioa_cfg->hrrq[i]._lock);
3699 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3700 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3701 }
3702 wmb();
3703 ioa_cfg->reset_retries = 0;
3704 ioa_cfg->in_ioa_bringdown = 0;
3705 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3706 }
3707 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3708 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3709
3710 return result;
3711}
3712
3713static struct device_attribute ipr_ioa_state_attr = {
3714 .attr = {
3715 .name = "online_state",
3716 .mode = S_IRUGO | S_IWUSR,
3717 },
3718 .show = ipr_show_adapter_state,
3719 .store = ipr_store_adapter_state
3720};
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733static ssize_t ipr_store_reset_adapter(struct device *dev,
3734 struct device_attribute *attr,
3735 const char *buf, size_t count)
3736{
3737 struct Scsi_Host *shost = class_to_shost(dev);
3738 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3739 unsigned long lock_flags;
3740 int result = count;
3741
3742 if (!capable(CAP_SYS_ADMIN))
3743 return -EACCES;
3744
3745 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3746 if (!ioa_cfg->in_reset_reload)
3747 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3748 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3749 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3750
3751 return result;
3752}
3753
3754static struct device_attribute ipr_ioa_reset_attr = {
3755 .attr = {
3756 .name = "reset_host",
3757 .mode = S_IWUSR,
3758 },
3759 .store = ipr_store_reset_adapter
3760};
3761
3762static int ipr_iopoll(struct irq_poll *iop, int budget);
3763
3764
3765
3766
3767
3768
3769
3770
3771static ssize_t ipr_show_iopoll_weight(struct device *dev,
3772 struct device_attribute *attr, char *buf)
3773{
3774 struct Scsi_Host *shost = class_to_shost(dev);
3775 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3776 unsigned long lock_flags = 0;
3777 int len;
3778
3779 spin_lock_irqsave(shost->host_lock, lock_flags);
3780 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3781 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3782
3783 return len;
3784}
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794static ssize_t ipr_store_iopoll_weight(struct device *dev,
3795 struct device_attribute *attr,
3796 const char *buf, size_t count)
3797{
3798 struct Scsi_Host *shost = class_to_shost(dev);
3799 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3800 unsigned long user_iopoll_weight;
3801 unsigned long lock_flags = 0;
3802 int i;
3803
3804 if (!ioa_cfg->sis64) {
3805 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
3806 return -EINVAL;
3807 }
3808 if (kstrtoul(buf, 10, &user_iopoll_weight))
3809 return -EINVAL;
3810
3811 if (user_iopoll_weight > 256) {
3812 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
3813 return -EINVAL;
3814 }
3815
3816 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3817 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
3818 return strlen(buf);
3819 }
3820
3821 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3822 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3823 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
3824 }
3825
3826 spin_lock_irqsave(shost->host_lock, lock_flags);
3827 ioa_cfg->iopoll_weight = user_iopoll_weight;
3828 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3829 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3830 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
3831 ioa_cfg->iopoll_weight, ipr_iopoll);
3832 }
3833 }
3834 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3835
3836 return strlen(buf);
3837}
3838
3839static struct device_attribute ipr_iopoll_weight_attr = {
3840 .attr = {
3841 .name = "iopoll_weight",
3842 .mode = S_IRUGO | S_IWUSR,
3843 },
3844 .show = ipr_show_iopoll_weight,
3845 .store = ipr_store_iopoll_weight
3846};
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3859{
3860 int sg_size, order, bsize_elem, num_elem, i, j;
3861 struct ipr_sglist *sglist;
3862 struct scatterlist *scatterlist;
3863 struct page *page;
3864
3865
3866 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3867
3868
3869 order = get_order(sg_size);
3870
3871
3872 bsize_elem = PAGE_SIZE * (1 << order);
3873
3874
3875 if (buf_len % bsize_elem)
3876 num_elem = (buf_len / bsize_elem) + 1;
3877 else
3878 num_elem = buf_len / bsize_elem;
3879
3880
3881 sglist = kzalloc(sizeof(struct ipr_sglist) +
3882 (sizeof(struct scatterlist) * (num_elem - 1)),
3883 GFP_KERNEL);
3884
3885 if (sglist == NULL) {
3886 ipr_trace;
3887 return NULL;
3888 }
3889
3890 scatterlist = sglist->scatterlist;
3891 sg_init_table(scatterlist, num_elem);
3892
3893 sglist->order = order;
3894 sglist->num_sg = num_elem;
3895
3896
3897 for (i = 0; i < num_elem; i++) {
3898 page = alloc_pages(GFP_KERNEL, order);
3899 if (!page) {
3900 ipr_trace;
3901
3902
3903 for (j = i - 1; j >= 0; j--)
3904 __free_pages(sg_page(&scatterlist[j]), order);
3905 kfree(sglist);
3906 return NULL;
3907 }
3908
3909 sg_set_page(&scatterlist[i], page, 0, 0);
3910 }
3911
3912 return sglist;
3913}
3914
3915
3916
3917
3918
3919
3920
3921
3922
3923
3924
3925static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3926{
3927 int i;
3928
3929 for (i = 0; i < sglist->num_sg; i++)
3930 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3931
3932 kfree(sglist);
3933}
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3948 u8 *buffer, u32 len)
3949{
3950 int bsize_elem, i, result = 0;
3951 struct scatterlist *scatterlist;
3952 void *kaddr;
3953
3954
3955 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3956
3957 scatterlist = sglist->scatterlist;
3958
3959 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3960 struct page *page = sg_page(&scatterlist[i]);
3961
3962 kaddr = kmap(page);
3963 memcpy(kaddr, buffer, bsize_elem);
3964 kunmap(page);
3965
3966 scatterlist[i].length = bsize_elem;
3967
3968 if (result != 0) {
3969 ipr_trace;
3970 return result;
3971 }
3972 }
3973
3974 if (len % bsize_elem) {
3975 struct page *page = sg_page(&scatterlist[i]);
3976
3977 kaddr = kmap(page);
3978 memcpy(kaddr, buffer, len % bsize_elem);
3979 kunmap(page);
3980
3981 scatterlist[i].length = len % bsize_elem;
3982 }
3983
3984 sglist->buffer_len = len;
3985 return result;
3986}
3987
3988
3989
3990
3991
3992
3993
3994
3995
3996static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3997 struct ipr_sglist *sglist)
3998{
3999 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4000 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
4001 struct scatterlist *scatterlist = sglist->scatterlist;
4002 int i;
4003
4004 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
4005 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4006 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
4007
4008 ioarcb->ioadl_len =
4009 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
4010 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
4011 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
4012 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
4013 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
4014 }
4015
4016 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4017}
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
4028 struct ipr_sglist *sglist)
4029{
4030 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4031 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
4032 struct scatterlist *scatterlist = sglist->scatterlist;
4033 int i;
4034
4035 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
4036 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4037 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
4038
4039 ioarcb->ioadl_len =
4040 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4041
4042 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
4043 ioadl[i].flags_and_data_len =
4044 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
4045 ioadl[i].address =
4046 cpu_to_be32(sg_dma_address(&scatterlist[i]));
4047 }
4048
4049 ioadl[i-1].flags_and_data_len |=
4050 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4051}
4052
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
4064 struct ipr_sglist *sglist)
4065{
4066 unsigned long lock_flags;
4067
4068 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4069 while (ioa_cfg->in_reset_reload) {
4070 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4071 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4072 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4073 }
4074
4075 if (ioa_cfg->ucode_sglist) {
4076 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4077 dev_err(&ioa_cfg->pdev->dev,
4078 "Microcode download already in progress\n");
4079 return -EIO;
4080 }
4081
4082 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
4083 sglist->num_sg, DMA_TO_DEVICE);
4084
4085 if (!sglist->num_dma_sg) {
4086 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4087 dev_err(&ioa_cfg->pdev->dev,
4088 "Failed to map microcode download buffer!\n");
4089 return -EIO;
4090 }
4091
4092 ioa_cfg->ucode_sglist = sglist;
4093 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4094 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4095 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4096
4097 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4098 ioa_cfg->ucode_sglist = NULL;
4099 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4100 return 0;
4101}
4102
4103
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113
4114static ssize_t ipr_store_update_fw(struct device *dev,
4115 struct device_attribute *attr,
4116 const char *buf, size_t count)
4117{
4118 struct Scsi_Host *shost = class_to_shost(dev);
4119 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4120 struct ipr_ucode_image_header *image_hdr;
4121 const struct firmware *fw_entry;
4122 struct ipr_sglist *sglist;
4123 char fname[100];
4124 char *src;
4125 char *endline;
4126 int result, dnld_size;
4127
4128 if (!capable(CAP_SYS_ADMIN))
4129 return -EACCES;
4130
4131 snprintf(fname, sizeof(fname), "%s", buf);
4132
4133 endline = strchr(fname, '\n');
4134 if (endline)
4135 *endline = '\0';
4136
4137 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4138 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4139 return -EIO;
4140 }
4141
4142 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4143
4144 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4145 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4146 sglist = ipr_alloc_ucode_buffer(dnld_size);
4147
4148 if (!sglist) {
4149 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4150 release_firmware(fw_entry);
4151 return -ENOMEM;
4152 }
4153
4154 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4155
4156 if (result) {
4157 dev_err(&ioa_cfg->pdev->dev,
4158 "Microcode buffer copy to DMA buffer failed\n");
4159 goto out;
4160 }
4161
4162 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4163
4164 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4165
4166 if (!result)
4167 result = count;
4168out:
4169 ipr_free_ucode_buffer(sglist);
4170 release_firmware(fw_entry);
4171 return result;
4172}
4173
4174static struct device_attribute ipr_update_fw_attr = {
4175 .attr = {
4176 .name = "update_fw",
4177 .mode = S_IWUSR,
4178 },
4179 .store = ipr_store_update_fw
4180};
4181
4182
4183
4184
4185
4186
4187
4188
4189
4190static ssize_t ipr_show_fw_type(struct device *dev,
4191 struct device_attribute *attr, char *buf)
4192{
4193 struct Scsi_Host *shost = class_to_shost(dev);
4194 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4195 unsigned long lock_flags = 0;
4196 int len;
4197
4198 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4199 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4200 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4201 return len;
4202}
4203
4204static struct device_attribute ipr_ioa_fw_type_attr = {
4205 .attr = {
4206 .name = "fw_type",
4207 .mode = S_IRUGO,
4208 },
4209 .show = ipr_show_fw_type
4210};
4211
4212static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4213 struct bin_attribute *bin_attr, char *buf,
4214 loff_t off, size_t count)
4215{
4216 struct device *cdev = container_of(kobj, struct device, kobj);
4217 struct Scsi_Host *shost = class_to_shost(cdev);
4218 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4219 struct ipr_hostrcb *hostrcb;
4220 unsigned long lock_flags = 0;
4221 int ret;
4222
4223 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4224 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4225 struct ipr_hostrcb, queue);
4226 if (!hostrcb) {
4227 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4228 return 0;
4229 }
4230 ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4231 sizeof(hostrcb->hcam));
4232 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4233 return ret;
4234}
4235
4236static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4237 struct bin_attribute *bin_attr, char *buf,
4238 loff_t off, size_t count)
4239{
4240 struct device *cdev = container_of(kobj, struct device, kobj);
4241 struct Scsi_Host *shost = class_to_shost(cdev);
4242 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4243 struct ipr_hostrcb *hostrcb;
4244 unsigned long lock_flags = 0;
4245
4246 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4247 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4248 struct ipr_hostrcb, queue);
4249 if (!hostrcb) {
4250 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4251 return count;
4252 }
4253
4254
4255 list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4256 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4257 return count;
4258}
4259
4260static struct bin_attribute ipr_ioa_async_err_log = {
4261 .attr = {
4262 .name = "async_err_log",
4263 .mode = S_IRUGO | S_IWUSR,
4264 },
4265 .size = 0,
4266 .read = ipr_read_async_err_log,
4267 .write = ipr_next_async_err_log
4268};
4269
4270static struct device_attribute *ipr_ioa_attrs[] = {
4271 &ipr_fw_version_attr,
4272 &ipr_log_level_attr,
4273 &ipr_diagnostics_attr,
4274 &ipr_ioa_state_attr,
4275 &ipr_ioa_reset_attr,
4276 &ipr_update_fw_attr,
4277 &ipr_ioa_fw_type_attr,
4278 &ipr_iopoll_weight_attr,
4279 NULL,
4280};
4281
4282#ifdef CONFIG_SCSI_IPR_DUMP
4283
4284
4285
4286
4287
4288
4289
4290
4291
4292
4293
4294
4295static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4296 struct bin_attribute *bin_attr,
4297 char *buf, loff_t off, size_t count)
4298{
4299 struct device *cdev = container_of(kobj, struct device, kobj);
4300 struct Scsi_Host *shost = class_to_shost(cdev);
4301 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4302 struct ipr_dump *dump;
4303 unsigned long lock_flags = 0;
4304 char *src;
4305 int len, sdt_end;
4306 size_t rc = count;
4307
4308 if (!capable(CAP_SYS_ADMIN))
4309 return -EACCES;
4310
4311 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4312 dump = ioa_cfg->dump;
4313
4314 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4315 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4316 return 0;
4317 }
4318 kref_get(&dump->kref);
4319 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4320
4321 if (off > dump->driver_dump.hdr.len) {
4322 kref_put(&dump->kref, ipr_release_dump);
4323 return 0;
4324 }
4325
4326 if (off + count > dump->driver_dump.hdr.len) {
4327 count = dump->driver_dump.hdr.len - off;
4328 rc = count;
4329 }
4330
4331 if (count && off < sizeof(dump->driver_dump)) {
4332 if (off + count > sizeof(dump->driver_dump))
4333 len = sizeof(dump->driver_dump) - off;
4334 else
4335 len = count;
4336 src = (u8 *)&dump->driver_dump + off;
4337 memcpy(buf, src, len);
4338 buf += len;
4339 off += len;
4340 count -= len;
4341 }
4342
4343 off -= sizeof(dump->driver_dump);
4344
4345 if (ioa_cfg->sis64)
4346 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4347 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4348 sizeof(struct ipr_sdt_entry));
4349 else
4350 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4351 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4352
4353 if (count && off < sdt_end) {
4354 if (off + count > sdt_end)
4355 len = sdt_end - off;
4356 else
4357 len = count;
4358 src = (u8 *)&dump->ioa_dump + off;
4359 memcpy(buf, src, len);
4360 buf += len;
4361 off += len;
4362 count -= len;
4363 }
4364
4365 off -= sdt_end;
4366
4367 while (count) {
4368 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4369 len = PAGE_ALIGN(off) - off;
4370 else
4371 len = count;
4372 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4373 src += off & ~PAGE_MASK;
4374 memcpy(buf, src, len);
4375 buf += len;
4376 off += len;
4377 count -= len;
4378 }
4379
4380 kref_put(&dump->kref, ipr_release_dump);
4381 return rc;
4382}
4383
4384
4385
4386
4387
4388
4389
4390
4391static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4392{
4393 struct ipr_dump *dump;
4394 __be32 **ioa_data;
4395 unsigned long lock_flags = 0;
4396
4397 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4398
4399 if (!dump) {
4400 ipr_err("Dump memory allocation failed\n");
4401 return -ENOMEM;
4402 }
4403
4404 if (ioa_cfg->sis64)
4405 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4406 else
4407 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4408
4409 if (!ioa_data) {
4410 ipr_err("Dump memory allocation failed\n");
4411 kfree(dump);
4412 return -ENOMEM;
4413 }
4414
4415 dump->ioa_dump.ioa_data = ioa_data;
4416
4417 kref_init(&dump->kref);
4418 dump->ioa_cfg = ioa_cfg;
4419
4420 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4421
4422 if (INACTIVE != ioa_cfg->sdt_state) {
4423 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4424 vfree(dump->ioa_dump.ioa_data);
4425 kfree(dump);
4426 return 0;
4427 }
4428
4429 ioa_cfg->dump = dump;
4430 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4431 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4432 ioa_cfg->dump_taken = 1;
4433 schedule_work(&ioa_cfg->work_q);
4434 }
4435 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4436
4437 return 0;
4438}
4439
4440
4441
4442
4443
4444
4445
4446
4447static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4448{
4449 struct ipr_dump *dump;
4450 unsigned long lock_flags = 0;
4451
4452 ENTER;
4453
4454 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4455 dump = ioa_cfg->dump;
4456 if (!dump) {
4457 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4458 return 0;
4459 }
4460
4461 ioa_cfg->dump = NULL;
4462 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4463
4464 kref_put(&dump->kref, ipr_release_dump);
4465
4466 LEAVE;
4467 return 0;
4468}
4469
4470
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480
4481
4482static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4483 struct bin_attribute *bin_attr,
4484 char *buf, loff_t off, size_t count)
4485{
4486 struct device *cdev = container_of(kobj, struct device, kobj);
4487 struct Scsi_Host *shost = class_to_shost(cdev);
4488 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4489 int rc;
4490
4491 if (!capable(CAP_SYS_ADMIN))
4492 return -EACCES;
4493
4494 if (buf[0] == '1')
4495 rc = ipr_alloc_dump(ioa_cfg);
4496 else if (buf[0] == '0')
4497 rc = ipr_free_dump(ioa_cfg);
4498 else
4499 return -EINVAL;
4500
4501 if (rc)
4502 return rc;
4503 else
4504 return count;
4505}
4506
4507static struct bin_attribute ipr_dump_attr = {
4508 .attr = {
4509 .name = "dump",
4510 .mode = S_IRUSR | S_IWUSR,
4511 },
4512 .size = 0,
4513 .read = ipr_read_dump,
4514 .write = ipr_write_dump
4515};
4516#else
4517static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4518#endif
4519
4520
4521
4522
4523
4524
4525
4526
4527
4528
4529static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4530 int reason)
4531{
4532 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4533 struct ipr_resource_entry *res;
4534 unsigned long lock_flags = 0;
4535
4536 if (reason != SCSI_QDEPTH_DEFAULT)
4537 return -EOPNOTSUPP;
4538
4539 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4540 res = (struct ipr_resource_entry *)sdev->hostdata;
4541
4542 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4543 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4544 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4545
4546 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4547 return sdev->queue_depth;
4548}
4549
4550
4551
4552
4553
4554
4555
4556
4557
4558static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4559{
4560 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4561 struct ipr_resource_entry *res;
4562 unsigned long lock_flags = 0;
4563
4564 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4565 res = (struct ipr_resource_entry *)sdev->hostdata;
4566
4567 if (res) {
4568 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4569
4570
4571
4572
4573 scsi_set_tag_type(sdev, tag_type);
4574
4575 if (tag_type)
4576 scsi_activate_tcq(sdev, sdev->queue_depth);
4577 else
4578 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4579 } else
4580 tag_type = 0;
4581 } else
4582 tag_type = 0;
4583
4584 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4585 return tag_type;
4586}
4587
4588
4589
4590
4591
4592
4593
4594
4595
4596
4597static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4598{
4599 struct scsi_device *sdev = to_scsi_device(dev);
4600 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4601 struct ipr_resource_entry *res;
4602 unsigned long lock_flags = 0;
4603 ssize_t len = -ENXIO;
4604
4605 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4606 res = (struct ipr_resource_entry *)sdev->hostdata;
4607 if (res)
4608 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4609 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4610 return len;
4611}
4612
4613static struct device_attribute ipr_adapter_handle_attr = {
4614 .attr = {
4615 .name = "adapter_handle",
4616 .mode = S_IRUSR,
4617 },
4618 .show = ipr_show_adapter_handle
4619};
4620
4621
4622
4623
4624
4625
4626
4627
4628
4629
4630
4631static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4632{
4633 struct scsi_device *sdev = to_scsi_device(dev);
4634 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4635 struct ipr_resource_entry *res;
4636 unsigned long lock_flags = 0;
4637 ssize_t len = -ENXIO;
4638 char buffer[IPR_MAX_RES_PATH_LENGTH];
4639
4640 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4641 res = (struct ipr_resource_entry *)sdev->hostdata;
4642 if (res && ioa_cfg->sis64)
4643 len = snprintf(buf, PAGE_SIZE, "%s\n",
4644 __ipr_format_res_path(res->res_path, buffer,
4645 sizeof(buffer)));
4646 else if (res)
4647 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4648 res->bus, res->target, res->lun);
4649
4650 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4651 return len;
4652}
4653
4654static struct device_attribute ipr_resource_path_attr = {
4655 .attr = {
4656 .name = "resource_path",
4657 .mode = S_IRUGO,
4658 },
4659 .show = ipr_show_resource_path
4660};
4661
4662
4663
4664
4665
4666
4667
4668
4669
4670
4671static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4672{
4673 struct scsi_device *sdev = to_scsi_device(dev);
4674 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4675 struct ipr_resource_entry *res;
4676 unsigned long lock_flags = 0;
4677 ssize_t len = -ENXIO;
4678
4679 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4680 res = (struct ipr_resource_entry *)sdev->hostdata;
4681 if (res && ioa_cfg->sis64)
4682 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
4683 else if (res)
4684 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4685
4686 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4687 return len;
4688}
4689
4690static struct device_attribute ipr_device_id_attr = {
4691 .attr = {
4692 .name = "device_id",
4693 .mode = S_IRUGO,
4694 },
4695 .show = ipr_show_device_id
4696};
4697
4698
4699
4700
4701
4702
4703
4704
4705
4706
4707static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4708{
4709 struct scsi_device *sdev = to_scsi_device(dev);
4710 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4711 struct ipr_resource_entry *res;
4712 unsigned long lock_flags = 0;
4713 ssize_t len = -ENXIO;
4714
4715 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4716 res = (struct ipr_resource_entry *)sdev->hostdata;
4717
4718 if (res)
4719 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4720
4721 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4722 return len;
4723}
4724
4725static struct device_attribute ipr_resource_type_attr = {
4726 .attr = {
4727 .name = "resource_type",
4728 .mode = S_IRUGO,
4729 },
4730 .show = ipr_show_resource_type
4731};
4732
4733
4734
4735
4736
4737
4738
4739
4740
4741static ssize_t ipr_show_raw_mode(struct device *dev,
4742 struct device_attribute *attr, char *buf)
4743{
4744 struct scsi_device *sdev = to_scsi_device(dev);
4745 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4746 struct ipr_resource_entry *res;
4747 unsigned long lock_flags = 0;
4748 ssize_t len;
4749
4750 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4751 res = (struct ipr_resource_entry *)sdev->hostdata;
4752 if (res)
4753 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4754 else
4755 len = -ENXIO;
4756 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4757 return len;
4758}
4759
4760
4761
4762
4763
4764
4765
4766
4767
4768static ssize_t ipr_store_raw_mode(struct device *dev,
4769 struct device_attribute *attr,
4770 const char *buf, size_t count)
4771{
4772 struct scsi_device *sdev = to_scsi_device(dev);
4773 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4774 struct ipr_resource_entry *res;
4775 unsigned long lock_flags = 0;
4776 ssize_t len;
4777
4778 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4779 res = (struct ipr_resource_entry *)sdev->hostdata;
4780 if (res) {
4781 if (ipr_is_af_dasd_device(res)) {
4782 res->raw_mode = simple_strtoul(buf, NULL, 10);
4783 len = strlen(buf);
4784 if (res->sdev)
4785 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4786 res->raw_mode ? "enabled" : "disabled");
4787 } else
4788 len = -EINVAL;
4789 } else
4790 len = -ENXIO;
4791 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4792 return len;
4793}
4794
4795static struct device_attribute ipr_raw_mode_attr = {
4796 .attr = {
4797 .name = "raw_mode",
4798 .mode = S_IRUGO | S_IWUSR,
4799 },
4800 .show = ipr_show_raw_mode,
4801 .store = ipr_store_raw_mode
4802};
4803
4804static struct device_attribute *ipr_dev_attrs[] = {
4805 &ipr_adapter_handle_attr,
4806 &ipr_resource_path_attr,
4807 &ipr_device_id_attr,
4808 &ipr_resource_type_attr,
4809 &ipr_raw_mode_attr,
4810 NULL,
4811};
4812
4813
4814
4815
4816
4817
4818
4819
4820
4821
4822
4823
4824
4825
4826
4827static int ipr_biosparam(struct scsi_device *sdev,
4828 struct block_device *block_device,
4829 sector_t capacity, int *parm)
4830{
4831 int heads, sectors;
4832 sector_t cylinders;
4833
4834 heads = 128;
4835 sectors = 32;
4836
4837 cylinders = capacity;
4838 sector_div(cylinders, (128 * 32));
4839
4840
4841 parm[0] = heads;
4842 parm[1] = sectors;
4843 parm[2] = cylinders;
4844
4845 return 0;
4846}
4847
4848
4849
4850
4851
4852
4853
4854
4855static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4856{
4857 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4858 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4859 struct ipr_resource_entry *res;
4860
4861 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4862 if ((res->bus == starget->channel) &&
4863 (res->target == starget->id)) {
4864 return res;
4865 }
4866 }
4867
4868 return NULL;
4869}
4870
4871static struct ata_port_info sata_port_info;
4872
4873
4874
4875
4876
4877
4878
4879
4880
4881
4882
4883static int ipr_target_alloc(struct scsi_target *starget)
4884{
4885 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4886 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4887 struct ipr_sata_port *sata_port;
4888 struct ata_port *ap;
4889 struct ipr_resource_entry *res;
4890 unsigned long lock_flags;
4891
4892 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4893 res = ipr_find_starget(starget);
4894 starget->hostdata = NULL;
4895
4896 if (res && ipr_is_gata(res)) {
4897 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4898 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4899 if (!sata_port)
4900 return -ENOMEM;
4901
4902 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4903 if (ap) {
4904 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4905 sata_port->ioa_cfg = ioa_cfg;
4906 sata_port->ap = ap;
4907 sata_port->res = res;
4908
4909 res->sata_port = sata_port;
4910 ap->private_data = sata_port;
4911 starget->hostdata = sata_port;
4912 } else {
4913 kfree(sata_port);
4914 return -ENOMEM;
4915 }
4916 }
4917 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4918
4919 return 0;
4920}
4921
4922
4923
4924
4925
4926
4927
4928
4929
4930static void ipr_target_destroy(struct scsi_target *starget)
4931{
4932 struct ipr_sata_port *sata_port = starget->hostdata;
4933 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4934 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4935
4936 if (ioa_cfg->sis64) {
4937 if (!ipr_find_starget(starget)) {
4938 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4939 clear_bit(starget->id, ioa_cfg->array_ids);
4940 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4941 clear_bit(starget->id, ioa_cfg->vset_ids);
4942 else if (starget->channel == 0)
4943 clear_bit(starget->id, ioa_cfg->target_ids);
4944 }
4945 }
4946
4947 if (sata_port) {
4948 starget->hostdata = NULL;
4949 ata_sas_port_destroy(sata_port->ap);
4950 kfree(sata_port);
4951 }
4952}
4953
4954
4955
4956
4957
4958
4959
4960
4961static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4962{
4963 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4964 struct ipr_resource_entry *res;
4965
4966 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4967 if ((res->bus == sdev->channel) &&
4968 (res->target == sdev->id) &&
4969 (res->lun == sdev->lun))
4970 return res;
4971 }
4972
4973 return NULL;
4974}
4975
4976
4977
4978
4979
4980
4981
4982
4983static void ipr_slave_destroy(struct scsi_device *sdev)
4984{
4985 struct ipr_resource_entry *res;
4986 struct ipr_ioa_cfg *ioa_cfg;
4987 unsigned long lock_flags = 0;
4988
4989 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4990
4991 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4992 res = (struct ipr_resource_entry *) sdev->hostdata;
4993 if (res) {
4994 if (res->sata_port)
4995 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4996 sdev->hostdata = NULL;
4997 res->sdev = NULL;
4998 res->sata_port = NULL;
4999 }
5000 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5001}
5002
5003
5004
5005
5006
5007
5008
5009
5010
5011
5012static int ipr_slave_configure(struct scsi_device *sdev)
5013{
5014 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
5015 struct ipr_resource_entry *res;
5016 struct ata_port *ap = NULL;
5017 unsigned long lock_flags = 0;
5018 char buffer[IPR_MAX_RES_PATH_LENGTH];
5019
5020 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5021 res = sdev->hostdata;
5022 if (res) {
5023 if (ipr_is_af_dasd_device(res))
5024 sdev->type = TYPE_RAID;
5025 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
5026 sdev->scsi_level = 4;
5027 sdev->no_uld_attach = 1;
5028 }
5029 if (ipr_is_vset_device(res)) {
5030 sdev->scsi_level = SCSI_SPC_3;
5031 sdev->no_report_opcodes = 1;
5032 blk_queue_rq_timeout(sdev->request_queue,
5033 IPR_VSET_RW_TIMEOUT);
5034 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
5035 }
5036 if (ipr_is_gata(res) && res->sata_port)
5037 ap = res->sata_port->ap;
5038 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5039
5040 if (ap) {
5041 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
5042 ata_sas_slave_configure(sdev, ap);
5043 } else
5044 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
5045 if (ioa_cfg->sis64)
5046 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
5047 ipr_format_res_path(ioa_cfg,
5048 res->res_path, buffer, sizeof(buffer)));
5049 return 0;
5050 }
5051 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5052 return 0;
5053}
5054
5055
5056
5057
5058
5059
5060
5061
5062
5063
5064
5065static int ipr_ata_slave_alloc(struct scsi_device *sdev)
5066{
5067 struct ipr_sata_port *sata_port = NULL;
5068 int rc = -ENXIO;
5069
5070 ENTER;
5071 if (sdev->sdev_target)
5072 sata_port = sdev->sdev_target->hostdata;
5073 if (sata_port) {
5074 rc = ata_sas_port_init(sata_port->ap);
5075 if (rc == 0)
5076 rc = ata_sas_sync_probe(sata_port->ap);
5077 }
5078
5079 if (rc)
5080 ipr_slave_destroy(sdev);
5081
5082 LEAVE;
5083 return rc;
5084}
5085
5086
5087
5088
5089
5090
5091
5092
5093
5094
5095
5096
5097
5098static int ipr_slave_alloc(struct scsi_device *sdev)
5099{
5100 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
5101 struct ipr_resource_entry *res;
5102 unsigned long lock_flags;
5103 int rc = -ENXIO;
5104
5105 sdev->hostdata = NULL;
5106
5107 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5108
5109 res = ipr_find_sdev(sdev);
5110 if (res) {
5111 res->sdev = sdev;
5112 res->add_to_ml = 0;
5113 res->in_erp = 0;
5114 sdev->hostdata = res;
5115 if (!ipr_is_naca_model(res))
5116 res->needs_sync_complete = 1;
5117 rc = 0;
5118 if (ipr_is_gata(res)) {
5119 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5120 return ipr_ata_slave_alloc(sdev);
5121 }
5122 }
5123
5124 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5125
5126 return rc;
5127}
5128
5129
5130
5131
5132
5133
5134
5135
5136
5137static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
5138{
5139 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
5140 return 1;
5141 return 0;
5142}
5143
5144
5145
5146
5147
5148
5149
5150
5151static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
5152{
5153 struct ipr_cmnd *loop_cmd;
5154
5155 list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
5156 if (loop_cmd == ipr_cmd)
5157 return true;
5158 }
5159
5160 return false;
5161}
5162
5163
5164
5165
5166
5167
5168
5169
5170
5171static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
5172{
5173 struct ipr_resource_entry *res = resource;
5174
5175 if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
5176 return 1;
5177 return 0;
5178}
5179
5180
5181
5182
5183
5184
5185
5186
5187
5188
5189static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5190 int (*match)(struct ipr_cmnd *, void *))
5191{
5192 struct ipr_cmnd *ipr_cmd;
5193 int wait, i;
5194 unsigned long flags;
5195 struct ipr_hrr_queue *hrrq;
5196 signed long timeout = IPR_ABORT_TASK_TIMEOUT;
5197 DECLARE_COMPLETION_ONSTACK(comp);
5198
5199 ENTER;
5200 do {
5201 wait = 0;
5202
5203 for_each_hrrq(hrrq, ioa_cfg) {
5204 spin_lock_irqsave(hrrq->lock, flags);
5205 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5206 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5207 if (!ipr_cmnd_is_free(ipr_cmd)) {
5208 if (match(ipr_cmd, device)) {
5209 ipr_cmd->eh_comp = ∁
5210 wait++;
5211 }
5212 }
5213 }
5214 spin_unlock_irqrestore(hrrq->lock, flags);
5215 }
5216
5217 if (wait) {
5218 timeout = wait_for_completion_timeout(&comp, timeout);
5219
5220 if (!timeout) {
5221 wait = 0;
5222
5223 for_each_hrrq(hrrq, ioa_cfg) {
5224 spin_lock_irqsave(hrrq->lock, flags);
5225 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5226 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5227 if (!ipr_cmnd_is_free(ipr_cmd)) {
5228 if (match(ipr_cmd, device)) {
5229 ipr_cmd->eh_comp = NULL;
5230 wait++;
5231 }
5232 }
5233 }
5234 spin_unlock_irqrestore(hrrq->lock, flags);
5235 }
5236
5237 if (wait)
5238 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5239 LEAVE;
5240 return wait ? FAILED : SUCCESS;
5241 }
5242 }
5243 } while (wait);
5244
5245 LEAVE;
5246 return SUCCESS;
5247}
5248
5249static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
5250{
5251 struct ipr_ioa_cfg *ioa_cfg;
5252 unsigned long lock_flags = 0;
5253 int rc = SUCCESS;
5254
5255 ENTER;
5256 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5257 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5258
5259 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5260 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5261 dev_err(&ioa_cfg->pdev->dev,
5262 "Adapter being reset as a result of error recovery.\n");
5263
5264 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5265 ioa_cfg->sdt_state = GET_DUMP;
5266 }
5267
5268 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5269 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5270 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5271
5272
5273
5274 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5275 ipr_trace;
5276 rc = FAILED;
5277 }
5278
5279 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5280 LEAVE;
5281 return rc;
5282}
5283
5284
5285
5286
5287
5288
5289
5290
5291
5292
5293
5294
5295
5296
5297
5298static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5299 struct ipr_resource_entry *res)
5300{
5301 struct ipr_cmnd *ipr_cmd;
5302 struct ipr_ioarcb *ioarcb;
5303 struct ipr_cmd_pkt *cmd_pkt;
5304 struct ipr_ioarcb_ata_regs *regs;
5305 u32 ioasc;
5306
5307 ENTER;
5308 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5309 ioarcb = &ipr_cmd->ioarcb;
5310 cmd_pkt = &ioarcb->cmd_pkt;
5311
5312 if (ipr_cmd->ioa_cfg->sis64) {
5313 regs = &ipr_cmd->i.ata_ioadl.regs;
5314 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5315 } else
5316 regs = &ioarcb->u.add_data.u.regs;
5317
5318 ioarcb->res_handle = res->res_handle;
5319 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5320 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5321 if (ipr_is_gata(res)) {
5322 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
5323 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
5324 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5325 }
5326
5327 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5328 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5329 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5330 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5331 if (ipr_cmd->ioa_cfg->sis64)
5332 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5333 sizeof(struct ipr_ioasa_gata));
5334 else
5335 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5336 sizeof(struct ipr_ioasa_gata));
5337 }
5338
5339 LEAVE;
5340 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
5341}
5342
5343
5344
5345
5346
5347
5348
5349
5350
5351
5352
5353static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
5354 unsigned long deadline)
5355{
5356 struct ipr_sata_port *sata_port = link->ap->private_data;
5357 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5358 struct ipr_resource_entry *res;
5359 unsigned long lock_flags = 0;
5360 int rc = -ENXIO, ret;
5361
5362 ENTER;
5363 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5364 while (ioa_cfg->in_reset_reload) {
5365 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5366 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5367 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5368 }
5369
5370 res = sata_port->res;
5371 if (res) {
5372 rc = ipr_device_reset(ioa_cfg, res);
5373 *classes = res->ata_class;
5374 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5375
5376 ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5377 if (ret != SUCCESS) {
5378 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5379 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5380 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5381
5382 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5383 }
5384 } else
5385 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5386
5387 LEAVE;
5388 return rc;
5389}
5390
5391
5392
5393
5394
5395
5396
5397
5398
5399
5400
5401
5402static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5403{
5404 struct ipr_cmnd *ipr_cmd;
5405 struct ipr_ioa_cfg *ioa_cfg;
5406 struct ipr_resource_entry *res;
5407 struct ata_port *ap;
5408 int rc = 0, i;
5409 struct ipr_hrr_queue *hrrq;
5410
5411 ENTER;
5412 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5413 res = scsi_cmd->device->hostdata;
5414
5415
5416
5417
5418
5419
5420 if (ioa_cfg->in_reset_reload)
5421 return FAILED;
5422 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5423 return FAILED;
5424
5425 for_each_hrrq(hrrq, ioa_cfg) {
5426 spin_lock(&hrrq->_lock);
5427 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5428 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5429
5430 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5431 if (!ipr_cmd->qc)
5432 continue;
5433 if (ipr_cmnd_is_free(ipr_cmd))
5434 continue;
5435
5436 ipr_cmd->done = ipr_sata_eh_done;
5437 if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5438 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5439 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5440 }
5441 }
5442 }
5443 spin_unlock(&hrrq->_lock);
5444 }
5445 res->resetting_device = 1;
5446 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5447
5448 if (ipr_is_gata(res) && res->sata_port) {
5449 ap = res->sata_port->ap;
5450 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5451 ata_std_error_handler(ap);
5452 spin_lock_irq(scsi_cmd->device->host->host_lock);
5453 } else
5454 rc = ipr_device_reset(ioa_cfg, res);
5455 res->resetting_device = 0;
5456 res->reset_occurred = 1;
5457
5458 LEAVE;
5459 return rc ? FAILED : SUCCESS;
5460}
5461
5462static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5463{
5464 int rc;
5465 struct ipr_ioa_cfg *ioa_cfg;
5466 struct ipr_resource_entry *res;
5467
5468 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5469 res = cmd->device->hostdata;
5470
5471 if (!res)
5472 return FAILED;
5473
5474 spin_lock_irq(cmd->device->host->host_lock);
5475 rc = __ipr_eh_dev_reset(cmd);
5476 spin_unlock_irq(cmd->device->host->host_lock);
5477
5478 if (rc == SUCCESS) {
5479 if (ipr_is_gata(res) && res->sata_port)
5480 rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5481 else
5482 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5483 }
5484
5485 return rc;
5486}
5487
5488
5489
5490
5491
5492
5493
5494
5495
5496
5497static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5498{
5499 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5500 struct ipr_resource_entry *res;
5501
5502 ENTER;
5503 if (!ioa_cfg->sis64)
5504 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5505 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5506 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5507 break;
5508 }
5509 }
5510
5511
5512
5513
5514
5515 if (ipr_cmd->sibling->sibling)
5516 ipr_cmd->sibling->sibling = NULL;
5517 else
5518 ipr_cmd->sibling->done(ipr_cmd->sibling);
5519
5520 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5521 LEAVE;
5522}
5523
5524
5525
5526
5527
5528
5529
5530
5531
5532
5533
5534
5535static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5536{
5537 struct ipr_cmnd *reset_cmd;
5538 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5539 struct ipr_cmd_pkt *cmd_pkt;
5540 unsigned long lock_flags = 0;
5541
5542 ENTER;
5543 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5544 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5545 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5546 return;
5547 }
5548
5549 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5550 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5551 ipr_cmd->sibling = reset_cmd;
5552 reset_cmd->sibling = ipr_cmd;
5553 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5554 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5555 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5556 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5557 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5558
5559 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5560 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5561 LEAVE;
5562}
5563
5564
5565
5566
5567
5568
5569
5570
5571
5572
5573static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5574{
5575 struct ipr_cmnd *ipr_cmd;
5576 struct ipr_ioa_cfg *ioa_cfg;
5577 struct ipr_resource_entry *res;
5578 struct ipr_cmd_pkt *cmd_pkt;
5579 u32 ioasc, int_reg;
5580 int i, op_found = 0;
5581 struct ipr_hrr_queue *hrrq;
5582
5583 ENTER;
5584 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5585 res = scsi_cmd->device->hostdata;
5586
5587
5588
5589
5590
5591 if (ioa_cfg->in_reset_reload ||
5592 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5593 return FAILED;
5594 if (!res)
5595 return FAILED;
5596
5597
5598
5599
5600
5601
5602 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5603
5604 if (!ipr_is_gscsi(res))
5605 return FAILED;
5606
5607 for_each_hrrq(hrrq, ioa_cfg) {
5608 spin_lock(&hrrq->_lock);
5609 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5610 if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5611 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5612 op_found = 1;
5613 break;
5614 }
5615 }
5616 }
5617 spin_unlock(&hrrq->_lock);
5618 }
5619
5620 if (!op_found)
5621 return SUCCESS;
5622
5623 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5624 ipr_cmd->ioarcb.res_handle = res->res_handle;
5625 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5626 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5627 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5628 ipr_cmd->u.sdev = scsi_cmd->device;
5629
5630 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5631 scsi_cmd->cmnd[0]);
5632 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5633 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5634
5635
5636
5637
5638
5639 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5640 ioasc = 0;
5641 ipr_trace;
5642 }
5643
5644 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5645 if (!ipr_is_naca_model(res))
5646 res->needs_sync_complete = 1;
5647
5648 LEAVE;
5649 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5650}
5651
5652
5653
5654
5655
5656
5657
5658
5659static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5660{
5661 unsigned long lock_flags;
5662 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5663 int rc = 0;
5664
5665 spin_lock_irqsave(shost->host_lock, lock_flags);
5666 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5667 rc = 1;
5668 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5669 rc = 1;
5670 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5671 return rc;
5672}
5673
5674
5675
5676
5677
5678
5679
5680
5681static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5682{
5683 unsigned long flags;
5684 int rc;
5685 struct ipr_ioa_cfg *ioa_cfg;
5686
5687 ENTER;
5688
5689 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5690
5691 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5692 rc = ipr_cancel_op(scsi_cmd);
5693 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5694
5695 if (rc == SUCCESS)
5696 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5697 LEAVE;
5698 return rc;
5699}
5700
5701
5702
5703
5704
5705
5706
5707
5708
5709static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5710 u32 int_reg)
5711{
5712 irqreturn_t rc = IRQ_HANDLED;
5713 u32 int_mask_reg;
5714
5715 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5716 int_reg &= ~int_mask_reg;
5717
5718
5719
5720
5721 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5722 if (ioa_cfg->sis64) {
5723 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5724 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5725 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5726
5727
5728 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5729 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5730 list_del(&ioa_cfg->reset_cmd->queue);
5731 del_timer(&ioa_cfg->reset_cmd->timer);
5732 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5733 return IRQ_HANDLED;
5734 }
5735 }
5736
5737 return IRQ_NONE;
5738 }
5739
5740 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5741
5742 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5743 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5744
5745 list_del(&ioa_cfg->reset_cmd->queue);
5746 del_timer(&ioa_cfg->reset_cmd->timer);
5747 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5748 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5749 if (ioa_cfg->clear_isr) {
5750 if (ipr_debug && printk_ratelimit())
5751 dev_err(&ioa_cfg->pdev->dev,
5752 "Spurious interrupt detected. 0x%08X\n", int_reg);
5753 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5754 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5755 return IRQ_NONE;
5756 }
5757 } else {
5758 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5759 ioa_cfg->ioa_unit_checked = 1;
5760 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5761 dev_err(&ioa_cfg->pdev->dev,
5762 "No Host RRQ. 0x%08X\n", int_reg);
5763 else
5764 dev_err(&ioa_cfg->pdev->dev,
5765 "Permanent IOA failure. 0x%08X\n", int_reg);
5766
5767 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5768 ioa_cfg->sdt_state = GET_DUMP;
5769
5770 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5771 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5772 }
5773
5774 return rc;
5775}
5776
5777
5778
5779
5780
5781
5782
5783
5784
5785static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5786{
5787 ioa_cfg->errors_logged++;
5788 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5789
5790 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5791 ioa_cfg->sdt_state = GET_DUMP;
5792
5793 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5794}
5795
5796static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5797 struct list_head *doneq)
5798{
5799 u32 ioasc;
5800 u16 cmd_index;
5801 struct ipr_cmnd *ipr_cmd;
5802 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5803 int num_hrrq = 0;
5804
5805
5806 if (!hrr_queue->allow_interrupts)
5807 return 0;
5808
5809 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5810 hrr_queue->toggle_bit) {
5811
5812 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5813 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5814 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5815
5816 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5817 cmd_index < hrr_queue->min_cmd_id)) {
5818 ipr_isr_eh(ioa_cfg,
5819 "Invalid response handle from IOA: ",
5820 cmd_index);
5821 break;
5822 }
5823
5824 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5825 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5826
5827 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5828
5829 list_move_tail(&ipr_cmd->queue, doneq);
5830
5831 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5832 hrr_queue->hrrq_curr++;
5833 } else {
5834 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5835 hrr_queue->toggle_bit ^= 1u;
5836 }
5837 num_hrrq++;
5838 if (budget > 0 && num_hrrq >= budget)
5839 break;
5840 }
5841
5842 return num_hrrq;
5843}
5844
5845static int ipr_iopoll(struct irq_poll *iop, int budget)
5846{
5847 struct ipr_ioa_cfg *ioa_cfg;
5848 struct ipr_hrr_queue *hrrq;
5849 struct ipr_cmnd *ipr_cmd, *temp;
5850 unsigned long hrrq_flags;
5851 int completed_ops;
5852 LIST_HEAD(doneq);
5853
5854 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5855 ioa_cfg = hrrq->ioa_cfg;
5856
5857 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5858 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5859
5860 if (completed_ops < budget)
5861 irq_poll_complete(iop);
5862 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5863
5864 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5865 list_del(&ipr_cmd->queue);
5866 del_timer(&ipr_cmd->timer);
5867 ipr_cmd->fast_done(ipr_cmd);
5868 }
5869
5870 return completed_ops;
5871}
5872
5873
5874
5875
5876
5877
5878
5879
5880
5881static irqreturn_t ipr_isr(int irq, void *devp)
5882{
5883 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5884 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5885 unsigned long hrrq_flags = 0;
5886 u32 int_reg = 0;
5887 int num_hrrq = 0;
5888 int irq_none = 0;
5889 struct ipr_cmnd *ipr_cmd, *temp;
5890 irqreturn_t rc = IRQ_NONE;
5891 LIST_HEAD(doneq);
5892
5893 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5894
5895 if (!hrrq->allow_interrupts) {
5896 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5897 return IRQ_NONE;
5898 }
5899
5900 while (1) {
5901 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5902 rc = IRQ_HANDLED;
5903
5904 if (!ioa_cfg->clear_isr)
5905 break;
5906
5907
5908 num_hrrq = 0;
5909 do {
5910 writel(IPR_PCII_HRRQ_UPDATED,
5911 ioa_cfg->regs.clr_interrupt_reg32);
5912 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5913 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5914 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5915
5916 } else if (rc == IRQ_NONE && irq_none == 0) {
5917 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5918 irq_none++;
5919 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5920 int_reg & IPR_PCII_HRRQ_UPDATED) {
5921 ipr_isr_eh(ioa_cfg,
5922 "Error clearing HRRQ: ", num_hrrq);
5923 rc = IRQ_HANDLED;
5924 break;
5925 } else
5926 break;
5927 }
5928
5929 if (unlikely(rc == IRQ_NONE))
5930 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5931
5932 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5933 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5934 list_del(&ipr_cmd->queue);
5935 del_timer(&ipr_cmd->timer);
5936 ipr_cmd->fast_done(ipr_cmd);
5937 }
5938 return rc;
5939}
5940
5941
5942
5943
5944
5945
5946
5947
5948
5949static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5950{
5951 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5952 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5953 unsigned long hrrq_flags = 0;
5954 struct ipr_cmnd *ipr_cmd, *temp;
5955 irqreturn_t rc = IRQ_NONE;
5956 LIST_HEAD(doneq);
5957
5958 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5959
5960
5961 if (!hrrq->allow_interrupts) {
5962 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5963 return IRQ_NONE;
5964 }
5965
5966 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5967 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5968 hrrq->toggle_bit) {
5969 irq_poll_sched(&hrrq->iopoll);
5970 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5971 return IRQ_HANDLED;
5972 }
5973 } else {
5974 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5975 hrrq->toggle_bit)
5976
5977 if (ipr_process_hrrq(hrrq, -1, &doneq))
5978 rc = IRQ_HANDLED;
5979 }
5980
5981 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5982
5983 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5984 list_del(&ipr_cmd->queue);
5985 del_timer(&ipr_cmd->timer);
5986 ipr_cmd->fast_done(ipr_cmd);
5987 }
5988 return rc;
5989}
5990
5991
5992
5993
5994
5995
5996
5997
5998
5999static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
6000 struct ipr_cmnd *ipr_cmd)
6001{
6002 int i, nseg;
6003 struct scatterlist *sg;
6004 u32 length;
6005 u32 ioadl_flags = 0;
6006 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6007 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6008 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
6009
6010 length = scsi_bufflen(scsi_cmd);
6011 if (!length)
6012 return 0;
6013
6014 nseg = scsi_dma_map(scsi_cmd);
6015 if (nseg < 0) {
6016 if (printk_ratelimit())
6017 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
6018 return -1;
6019 }
6020
6021 ipr_cmd->dma_use_sg = nseg;
6022
6023 ioarcb->data_transfer_length = cpu_to_be32(length);
6024 ioarcb->ioadl_len =
6025 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6026
6027 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
6028 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6029 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6030 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
6031 ioadl_flags = IPR_IOADL_FLAGS_READ;
6032
6033 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
6034 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
6035 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
6036 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
6037 }
6038
6039 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6040 return 0;
6041}
6042
6043
6044
6045
6046
6047
6048
6049
6050
6051static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
6052 struct ipr_cmnd *ipr_cmd)
6053{
6054 int i, nseg;
6055 struct scatterlist *sg;
6056 u32 length;
6057 u32 ioadl_flags = 0;
6058 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6059 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6060 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6061
6062 length = scsi_bufflen(scsi_cmd);
6063 if (!length)
6064 return 0;
6065
6066 nseg = scsi_dma_map(scsi_cmd);
6067 if (nseg < 0) {
6068 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
6069 return -1;
6070 }
6071
6072 ipr_cmd->dma_use_sg = nseg;
6073
6074 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
6075 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6076 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6077 ioarcb->data_transfer_length = cpu_to_be32(length);
6078 ioarcb->ioadl_len =
6079 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6080 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
6081 ioadl_flags = IPR_IOADL_FLAGS_READ;
6082 ioarcb->read_data_transfer_length = cpu_to_be32(length);
6083 ioarcb->read_ioadl_len =
6084 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6085 }
6086
6087 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
6088 ioadl = ioarcb->u.add_data.u.ioadl;
6089 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
6090 offsetof(struct ipr_ioarcb, u.add_data));
6091 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6092 }
6093
6094 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
6095 ioadl[i].flags_and_data_len =
6096 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6097 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
6098 }
6099
6100 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6101 return 0;
6102}
6103
6104
6105
6106
6107
6108
6109
6110
6111static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
6112{
6113 u8 tag[2];
6114 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
6115
6116 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
6117 switch (tag[0]) {
6118 case MSG_SIMPLE_TAG:
6119 rc = IPR_FLAGS_LO_SIMPLE_TASK;
6120 break;
6121 case MSG_HEAD_TAG:
6122 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
6123 break;
6124 case MSG_ORDERED_TAG:
6125 rc = IPR_FLAGS_LO_ORDERED_TASK;
6126 break;
6127 };
6128 }
6129
6130 return rc;
6131}
6132
6133
6134
6135
6136
6137
6138
6139
6140
6141
6142
6143static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6144{
6145 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6146 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6147 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6148
6149 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6150 scsi_cmd->result |= (DID_ERROR << 16);
6151 scmd_printk(KERN_ERR, scsi_cmd,
6152 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
6153 } else {
6154 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
6155 SCSI_SENSE_BUFFERSIZE);
6156 }
6157
6158 if (res) {
6159 if (!ipr_is_naca_model(res))
6160 res->needs_sync_complete = 1;
6161 res->in_erp = 0;
6162 }
6163 scsi_dma_unmap(ipr_cmd->scsi_cmd);
6164 scsi_cmd->scsi_done(scsi_cmd);
6165 if (ipr_cmd->eh_comp)
6166 complete(ipr_cmd->eh_comp);
6167 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6168}
6169
6170
6171
6172
6173
6174
6175
6176
6177
6178
6179
6180static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6181{
6182 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6183 unsigned long hrrq_flags;
6184
6185 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6186 __ipr_erp_done(ipr_cmd);
6187 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6188}
6189
6190
6191
6192
6193
6194
6195
6196
6197static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
6198{
6199 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6200 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6201 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6202
6203 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
6204 ioarcb->data_transfer_length = 0;
6205 ioarcb->read_data_transfer_length = 0;
6206 ioarcb->ioadl_len = 0;
6207 ioarcb->read_ioadl_len = 0;
6208 ioasa->hdr.ioasc = 0;
6209 ioasa->hdr.residual_data_len = 0;
6210
6211 if (ipr_cmd->ioa_cfg->sis64)
6212 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6213 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
6214 else {
6215 ioarcb->write_ioadl_addr =
6216 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
6217 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6218 }
6219}
6220
6221
6222
6223
6224
6225
6226
6227
6228
6229
6230
6231static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6232{
6233 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6234 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6235
6236 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6237 __ipr_erp_done(ipr_cmd);
6238 return;
6239 }
6240
6241 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6242
6243 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
6244 cmd_pkt->cdb[0] = REQUEST_SENSE;
6245 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
6246 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
6247 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6248 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
6249
6250 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
6251 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
6252
6253 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
6254 IPR_REQUEST_SENSE_TIMEOUT * 2);
6255}
6256
6257
6258
6259
6260
6261
6262
6263
6264
6265
6266
6267static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6268{
6269 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6270 unsigned long hrrq_flags;
6271
6272 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6273 __ipr_erp_request_sense(ipr_cmd);
6274 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6275}
6276
6277
6278
6279
6280
6281
6282
6283
6284
6285
6286
6287
6288
6289static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
6290{
6291 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6292 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6293 struct ipr_cmd_pkt *cmd_pkt;
6294
6295 res->in_erp = 1;
6296
6297 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6298
6299 if (!scsi_get_tag_type(scsi_cmd->device)) {
6300 __ipr_erp_request_sense(ipr_cmd);
6301 return;
6302 }
6303
6304 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6305 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
6306 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
6307
6308 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
6309 IPR_CANCEL_ALL_TIMEOUT);
6310}
6311
6312
6313
6314
6315
6316
6317
6318
6319
6320
6321
6322
6323
6324
6325static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
6326 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
6327{
6328 int i;
6329 u16 data_len;
6330 u32 ioasc, fd_ioasc;
6331 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6332 __be32 *ioasa_data = (__be32 *)ioasa;
6333 int error_index;
6334
6335 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6336 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
6337
6338 if (0 == ioasc)
6339 return;
6340
6341 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6342 return;
6343
6344 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6345 error_index = ipr_get_error(fd_ioasc);
6346 else
6347 error_index = ipr_get_error(ioasc);
6348
6349 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6350
6351 if (ioasa->hdr.ilid != 0)
6352 return;
6353
6354 if (!ipr_is_gscsi(res))
6355 return;
6356
6357 if (ipr_error_table[error_index].log_ioasa == 0)
6358 return;
6359 }
6360
6361 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
6362
6363 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6364 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6365 data_len = sizeof(struct ipr_ioasa64);
6366 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
6367 data_len = sizeof(struct ipr_ioasa);
6368
6369 ipr_err("IOASA Dump:\n");
6370
6371 for (i = 0; i < data_len / 4; i += 4) {
6372 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6373 be32_to_cpu(ioasa_data[i]),
6374 be32_to_cpu(ioasa_data[i+1]),
6375 be32_to_cpu(ioasa_data[i+2]),
6376 be32_to_cpu(ioasa_data[i+3]));
6377 }
6378}
6379
6380
6381
6382
6383
6384
6385
6386
6387
6388static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6389{
6390 u32 failing_lba;
6391 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6392 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
6393 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6394 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
6395
6396 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6397
6398 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6399 return;
6400
6401 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6402
6403 if (ipr_is_vset_device(res) &&
6404 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6405 ioasa->u.vset.failing_lba_hi != 0) {
6406 sense_buf[0] = 0x72;
6407 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6408 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6409 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6410
6411 sense_buf[7] = 12;
6412 sense_buf[8] = 0;
6413 sense_buf[9] = 0x0A;
6414 sense_buf[10] = 0x80;
6415
6416 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6417
6418 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6419 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6420 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6421 sense_buf[15] = failing_lba & 0x000000ff;
6422
6423 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6424
6425 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6426 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6427 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6428 sense_buf[19] = failing_lba & 0x000000ff;
6429 } else {
6430 sense_buf[0] = 0x70;
6431 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6432 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6433 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6434
6435
6436 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6437 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6438 sense_buf[7] = 10;
6439
6440
6441 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6442 sense_buf[15] = 0xC0;
6443 else
6444 sense_buf[15] = 0x80;
6445
6446 sense_buf[16] =
6447 ((IPR_FIELD_POINTER_MASK &
6448 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6449 sense_buf[17] =
6450 (IPR_FIELD_POINTER_MASK &
6451 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6452 } else {
6453 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6454 if (ipr_is_vset_device(res))
6455 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6456 else
6457 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6458
6459 sense_buf[0] |= 0x80;
6460 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6461 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6462 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6463 sense_buf[6] = failing_lba & 0x000000ff;
6464 }
6465
6466 sense_buf[7] = 6;
6467 }
6468 }
6469}
6470
6471
6472
6473
6474
6475
6476
6477
6478
6479
6480
6481static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6482{
6483 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6484 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6485
6486 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6487 return 0;
6488
6489 if (ipr_cmd->ioa_cfg->sis64)
6490 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6491 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6492 SCSI_SENSE_BUFFERSIZE));
6493 else
6494 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6495 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6496 SCSI_SENSE_BUFFERSIZE));
6497 return 1;
6498}
6499
6500
6501
6502
6503
6504
6505
6506
6507
6508
6509
6510
6511static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6512 struct ipr_cmnd *ipr_cmd)
6513{
6514 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6515 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6516 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6517 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6518
6519 if (!res) {
6520 __ipr_scsi_eh_done(ipr_cmd);
6521 return;
6522 }
6523
6524 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6525 ipr_gen_sense(ipr_cmd);
6526
6527 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6528
6529 switch (masked_ioasc) {
6530 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6531 if (ipr_is_naca_model(res))
6532 scsi_cmd->result |= (DID_ABORT << 16);
6533 else
6534 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6535 break;
6536 case IPR_IOASC_IR_RESOURCE_HANDLE:
6537 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6538 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6539 break;
6540 case IPR_IOASC_HW_SEL_TIMEOUT:
6541 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6542 if (!ipr_is_naca_model(res))
6543 res->needs_sync_complete = 1;
6544 break;
6545 case IPR_IOASC_SYNC_REQUIRED:
6546 if (!res->in_erp)
6547 res->needs_sync_complete = 1;
6548 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6549 break;
6550 case IPR_IOASC_MED_DO_NOT_REALLOC:
6551 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6552
6553
6554
6555
6556 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6557 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6558 break;
6559 case IPR_IOASC_BUS_WAS_RESET:
6560 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6561
6562
6563
6564
6565 if (!res->resetting_device)
6566 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6567 scsi_cmd->result |= (DID_ERROR << 16);
6568 if (!ipr_is_naca_model(res))
6569 res->needs_sync_complete = 1;
6570 break;
6571 case IPR_IOASC_HW_DEV_BUS_STATUS:
6572 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6573 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6574 if (!ipr_get_autosense(ipr_cmd)) {
6575 if (!ipr_is_naca_model(res)) {
6576 ipr_erp_cancel_all(ipr_cmd);
6577 return;
6578 }
6579 }
6580 }
6581 if (!ipr_is_naca_model(res))
6582 res->needs_sync_complete = 1;
6583 break;
6584 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6585 break;
6586 case IPR_IOASC_IR_NON_OPTIMIZED:
6587 if (res->raw_mode) {
6588 res->raw_mode = 0;
6589 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6590 } else
6591 scsi_cmd->result |= (DID_ERROR << 16);
6592 break;
6593 default:
6594 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6595 scsi_cmd->result |= (DID_ERROR << 16);
6596 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6597 res->needs_sync_complete = 1;
6598 break;
6599 }
6600
6601 scsi_dma_unmap(ipr_cmd->scsi_cmd);
6602 scsi_cmd->scsi_done(scsi_cmd);
6603 if (ipr_cmd->eh_comp)
6604 complete(ipr_cmd->eh_comp);
6605 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6606}
6607
6608
6609
6610
6611
6612
6613
6614
6615
6616
6617
6618static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6619{
6620 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6621 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6622 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6623 unsigned long lock_flags;
6624
6625 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6626
6627 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6628 scsi_dma_unmap(scsi_cmd);
6629
6630 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6631 scsi_cmd->scsi_done(scsi_cmd);
6632 if (ipr_cmd->eh_comp)
6633 complete(ipr_cmd->eh_comp);
6634 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6635 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6636 } else {
6637 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6638 spin_lock(&ipr_cmd->hrrq->_lock);
6639 ipr_erp_start(ioa_cfg, ipr_cmd);
6640 spin_unlock(&ipr_cmd->hrrq->_lock);
6641 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6642 }
6643}
6644
6645
6646
6647
6648
6649
6650
6651
6652
6653
6654
6655
6656
6657static int ipr_queuecommand(struct Scsi_Host *shost,
6658 struct scsi_cmnd *scsi_cmd)
6659{
6660 struct ipr_ioa_cfg *ioa_cfg;
6661 struct ipr_resource_entry *res;
6662 struct ipr_ioarcb *ioarcb;
6663 struct ipr_cmnd *ipr_cmd;
6664 unsigned long hrrq_flags, lock_flags;
6665 int rc;
6666 struct ipr_hrr_queue *hrrq;
6667 int hrrq_id;
6668
6669 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6670
6671 scsi_cmd->result = (DID_OK << 16);
6672 res = scsi_cmd->device->hostdata;
6673
6674 if (ipr_is_gata(res) && res->sata_port) {
6675 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6676 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6677 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6678 return rc;
6679 }
6680
6681 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6682 hrrq = &ioa_cfg->hrrq[hrrq_id];
6683
6684 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6685
6686
6687
6688
6689
6690 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6691 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6692 return SCSI_MLQUEUE_HOST_BUSY;
6693 }
6694
6695
6696
6697
6698
6699 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6700 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6701 goto err_nodev;
6702 }
6703
6704 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6705 if (ipr_cmd == NULL) {
6706 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6707 return SCSI_MLQUEUE_HOST_BUSY;
6708 }
6709 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6710
6711 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6712 ioarcb = &ipr_cmd->ioarcb;
6713
6714 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6715 ipr_cmd->scsi_cmd = scsi_cmd;
6716 ipr_cmd->done = ipr_scsi_eh_done;
6717
6718 if (ipr_is_gscsi(res)) {
6719 if (scsi_cmd->underflow == 0)
6720 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6721
6722 if (res->reset_occurred) {
6723 res->reset_occurred = 0;
6724 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6725 }
6726 }
6727
6728 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6729 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6730
6731 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6732 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
6733 }
6734
6735 if (scsi_cmd->cmnd[0] >= 0xC0 &&
6736 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6737 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6738 }
6739 if (res->raw_mode && ipr_is_af_dasd_device(res)) {
6740 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
6741
6742 if (scsi_cmd->underflow == 0)
6743 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6744 }
6745
6746 if (ioa_cfg->sis64)
6747 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6748 else
6749 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6750
6751 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6752 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6753 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6754 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6755 if (!rc)
6756 scsi_dma_unmap(scsi_cmd);
6757 return SCSI_MLQUEUE_HOST_BUSY;
6758 }
6759
6760 if (unlikely(hrrq->ioa_is_dead)) {
6761 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6762 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6763 scsi_dma_unmap(scsi_cmd);
6764 goto err_nodev;
6765 }
6766
6767 ioarcb->res_handle = res->res_handle;
6768 if (res->needs_sync_complete) {
6769 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6770 res->needs_sync_complete = 0;
6771 }
6772 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6773 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6774 ipr_send_command(ipr_cmd);
6775 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6776 return 0;
6777
6778err_nodev:
6779 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6780 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6781 scsi_cmd->result = (DID_NO_CONNECT << 16);
6782 scsi_cmd->scsi_done(scsi_cmd);
6783 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6784 return 0;
6785}
6786
6787
6788
6789
6790
6791
6792
6793
6794
6795
6796static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6797{
6798 struct ipr_resource_entry *res;
6799
6800 res = (struct ipr_resource_entry *)sdev->hostdata;
6801 if (res && ipr_is_gata(res)) {
6802 if (cmd == HDIO_GET_IDENTITY)
6803 return -ENOTTY;
6804 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6805 }
6806
6807 return -EINVAL;
6808}
6809
6810
6811
6812
6813
6814
6815
6816
6817static const char *ipr_ioa_info(struct Scsi_Host *host)
6818{
6819 static char buffer[512];
6820 struct ipr_ioa_cfg *ioa_cfg;
6821 unsigned long lock_flags = 0;
6822
6823 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6824
6825 spin_lock_irqsave(host->host_lock, lock_flags);
6826 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6827 spin_unlock_irqrestore(host->host_lock, lock_flags);
6828
6829 return buffer;
6830}
6831
6832static struct scsi_host_template driver_template = {
6833 .module = THIS_MODULE,
6834 .name = "IPR",
6835 .info = ipr_ioa_info,
6836 .ioctl = ipr_ioctl,
6837 .queuecommand = ipr_queuecommand,
6838 .eh_abort_handler = ipr_eh_abort,
6839 .eh_device_reset_handler = ipr_eh_dev_reset,
6840 .eh_host_reset_handler = ipr_eh_host_reset,
6841 .slave_alloc = ipr_slave_alloc,
6842 .slave_configure = ipr_slave_configure,
6843 .slave_destroy = ipr_slave_destroy,
6844 .scan_finished = ipr_scan_finished,
6845 .target_alloc = ipr_target_alloc,
6846 .target_destroy = ipr_target_destroy,
6847 .change_queue_depth = ipr_change_queue_depth,
6848 .change_queue_type = ipr_change_queue_type,
6849 .bios_param = ipr_biosparam,
6850 .can_queue = IPR_MAX_COMMANDS,
6851 .this_id = -1,
6852 .sg_tablesize = IPR_MAX_SGLIST,
6853 .max_sectors = IPR_IOA_MAX_SECTORS,
6854 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6855 .use_clustering = ENABLE_CLUSTERING,
6856 .shost_attrs = ipr_ioa_attrs,
6857 .sdev_attrs = ipr_dev_attrs,
6858 .proc_name = IPR_NAME,
6859};
6860
6861
6862
6863
6864
6865
6866static void ipr_ata_phy_reset(struct ata_port *ap)
6867{
6868 unsigned long flags;
6869 struct ipr_sata_port *sata_port = ap->private_data;
6870 struct ipr_resource_entry *res = sata_port->res;
6871 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6872 int rc;
6873
6874 ENTER;
6875 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6876 while (ioa_cfg->in_reset_reload) {
6877 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6878 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6879 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6880 }
6881
6882 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6883 goto out_unlock;
6884
6885 rc = ipr_device_reset(ioa_cfg, res);
6886
6887 if (rc) {
6888 ap->link.device[0].class = ATA_DEV_NONE;
6889 goto out_unlock;
6890 }
6891
6892 ap->link.device[0].class = res->ata_class;
6893 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6894 ap->link.device[0].class = ATA_DEV_NONE;
6895
6896out_unlock:
6897 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6898 LEAVE;
6899}
6900
6901
6902
6903
6904
6905
6906
6907
6908static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6909{
6910 struct ipr_sata_port *sata_port = qc->ap->private_data;
6911 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6912 struct ipr_cmnd *ipr_cmd;
6913 struct ipr_hrr_queue *hrrq;
6914 unsigned long flags;
6915
6916 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6917 while (ioa_cfg->in_reset_reload) {
6918 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6919 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6920 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6921 }
6922
6923 for_each_hrrq(hrrq, ioa_cfg) {
6924 spin_lock(&hrrq->_lock);
6925 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6926 if (ipr_cmd->qc == qc) {
6927 ipr_device_reset(ioa_cfg, sata_port->res);
6928 break;
6929 }
6930 }
6931 spin_unlock(&hrrq->_lock);
6932 }
6933 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6934}
6935
6936
6937
6938
6939
6940
6941
6942
6943
6944static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6945 struct ata_taskfile *tf)
6946{
6947 regs->feature = tf->feature;
6948 regs->nsect = tf->nsect;
6949 regs->lbal = tf->lbal;
6950 regs->lbam = tf->lbam;
6951 regs->lbah = tf->lbah;
6952 regs->device = tf->device;
6953 regs->command = tf->command;
6954 regs->hob_feature = tf->hob_feature;
6955 regs->hob_nsect = tf->hob_nsect;
6956 regs->hob_lbal = tf->hob_lbal;
6957 regs->hob_lbam = tf->hob_lbam;
6958 regs->hob_lbah = tf->hob_lbah;
6959 regs->ctl = tf->ctl;
6960}
6961
6962
6963
6964
6965
6966
6967
6968
6969
6970
6971
6972static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6973{
6974 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6975 struct ata_queued_cmd *qc = ipr_cmd->qc;
6976 struct ipr_sata_port *sata_port = qc->ap->private_data;
6977 struct ipr_resource_entry *res = sata_port->res;
6978 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6979
6980 spin_lock(&ipr_cmd->hrrq->_lock);
6981 if (ipr_cmd->ioa_cfg->sis64)
6982 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6983 sizeof(struct ipr_ioasa_gata));
6984 else
6985 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6986 sizeof(struct ipr_ioasa_gata));
6987 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6988
6989 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6990 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6991
6992 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6993 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6994 else
6995 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6996 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6997 spin_unlock(&ipr_cmd->hrrq->_lock);
6998 ata_qc_complete(qc);
6999}
7000
7001
7002
7003
7004
7005
7006
7007static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
7008 struct ata_queued_cmd *qc)
7009{
7010 u32 ioadl_flags = 0;
7011 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7012 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
7013 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
7014 int len = qc->nbytes;
7015 struct scatterlist *sg;
7016 unsigned int si;
7017 dma_addr_t dma_addr = ipr_cmd->dma_addr;
7018
7019 if (len == 0)
7020 return;
7021
7022 if (qc->dma_dir == DMA_TO_DEVICE) {
7023 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
7024 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7025 } else if (qc->dma_dir == DMA_FROM_DEVICE)
7026 ioadl_flags = IPR_IOADL_FLAGS_READ;
7027
7028 ioarcb->data_transfer_length = cpu_to_be32(len);
7029 ioarcb->ioadl_len =
7030 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
7031 ioarcb->u.sis64_addr_data.data_ioadl_addr =
7032 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
7033
7034 for_each_sg(qc->sg, sg, qc->n_elem, si) {
7035 ioadl64->flags = cpu_to_be32(ioadl_flags);
7036 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
7037 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
7038
7039 last_ioadl64 = ioadl64;
7040 ioadl64++;
7041 }
7042
7043 if (likely(last_ioadl64))
7044 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
7045}
7046
7047
7048
7049
7050
7051
7052
7053static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
7054 struct ata_queued_cmd *qc)
7055{
7056 u32 ioadl_flags = 0;
7057 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7058 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
7059 struct ipr_ioadl_desc *last_ioadl = NULL;
7060 int len = qc->nbytes;
7061 struct scatterlist *sg;
7062 unsigned int si;
7063
7064 if (len == 0)
7065 return;
7066
7067 if (qc->dma_dir == DMA_TO_DEVICE) {
7068 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
7069 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7070 ioarcb->data_transfer_length = cpu_to_be32(len);
7071 ioarcb->ioadl_len =
7072 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
7073 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
7074 ioadl_flags = IPR_IOADL_FLAGS_READ;
7075 ioarcb->read_data_transfer_length = cpu_to_be32(len);
7076 ioarcb->read_ioadl_len =
7077 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
7078 }
7079
7080 for_each_sg(qc->sg, sg, qc->n_elem, si) {
7081 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
7082 ioadl->address = cpu_to_be32(sg_dma_address(sg));
7083
7084 last_ioadl = ioadl;
7085 ioadl++;
7086 }
7087
7088 if (likely(last_ioadl))
7089 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
7090}
7091
7092
7093
7094
7095
7096
7097
7098
7099static int ipr_qc_defer(struct ata_queued_cmd *qc)
7100{
7101 struct ata_port *ap = qc->ap;
7102 struct ipr_sata_port *sata_port = ap->private_data;
7103 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7104 struct ipr_cmnd *ipr_cmd;
7105 struct ipr_hrr_queue *hrrq;
7106 int hrrq_id;
7107
7108 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
7109 hrrq = &ioa_cfg->hrrq[hrrq_id];
7110
7111 qc->lldd_task = NULL;
7112 spin_lock(&hrrq->_lock);
7113 if (unlikely(hrrq->ioa_is_dead)) {
7114 spin_unlock(&hrrq->_lock);
7115 return 0;
7116 }
7117
7118 if (unlikely(!hrrq->allow_cmds)) {
7119 spin_unlock(&hrrq->_lock);
7120 return ATA_DEFER_LINK;
7121 }
7122
7123 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
7124 if (ipr_cmd == NULL) {
7125 spin_unlock(&hrrq->_lock);
7126 return ATA_DEFER_LINK;
7127 }
7128
7129 qc->lldd_task = ipr_cmd;
7130 spin_unlock(&hrrq->_lock);
7131 return 0;
7132}
7133
7134
7135
7136
7137
7138
7139
7140
7141static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
7142{
7143 struct ata_port *ap = qc->ap;
7144 struct ipr_sata_port *sata_port = ap->private_data;
7145 struct ipr_resource_entry *res = sata_port->res;
7146 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7147 struct ipr_cmnd *ipr_cmd;
7148 struct ipr_ioarcb *ioarcb;
7149 struct ipr_ioarcb_ata_regs *regs;
7150
7151 if (qc->lldd_task == NULL)
7152 ipr_qc_defer(qc);
7153
7154 ipr_cmd = qc->lldd_task;
7155 if (ipr_cmd == NULL)
7156 return AC_ERR_SYSTEM;
7157
7158 qc->lldd_task = NULL;
7159 spin_lock(&ipr_cmd->hrrq->_lock);
7160 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
7161 ipr_cmd->hrrq->ioa_is_dead)) {
7162 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7163 spin_unlock(&ipr_cmd->hrrq->_lock);
7164 return AC_ERR_SYSTEM;
7165 }
7166
7167 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
7168 ioarcb = &ipr_cmd->ioarcb;
7169
7170 if (ioa_cfg->sis64) {
7171 regs = &ipr_cmd->i.ata_ioadl.regs;
7172 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
7173 } else
7174 regs = &ioarcb->u.add_data.u.regs;
7175
7176 memset(regs, 0, sizeof(*regs));
7177 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
7178
7179 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7180 ipr_cmd->qc = qc;
7181 ipr_cmd->done = ipr_sata_done;
7182 ipr_cmd->ioarcb.res_handle = res->res_handle;
7183 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
7184 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
7185 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
7186 ipr_cmd->dma_use_sg = qc->n_elem;
7187
7188 if (ioa_cfg->sis64)
7189 ipr_build_ata_ioadl64(ipr_cmd, qc);
7190 else
7191 ipr_build_ata_ioadl(ipr_cmd, qc);
7192
7193 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
7194 ipr_copy_sata_tf(regs, &qc->tf);
7195 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
7196 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
7197
7198 switch (qc->tf.protocol) {
7199 case ATA_PROT_NODATA:
7200 case ATA_PROT_PIO:
7201 break;
7202
7203 case ATA_PROT_DMA:
7204 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7205 break;
7206
7207 case ATAPI_PROT_PIO:
7208 case ATAPI_PROT_NODATA:
7209 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7210 break;
7211
7212 case ATAPI_PROT_DMA:
7213 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7214 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7215 break;
7216
7217 default:
7218 WARN_ON(1);
7219 spin_unlock(&ipr_cmd->hrrq->_lock);
7220 return AC_ERR_INVALID;
7221 }
7222
7223 ipr_send_command(ipr_cmd);
7224 spin_unlock(&ipr_cmd->hrrq->_lock);
7225
7226 return 0;
7227}
7228
7229
7230
7231
7232
7233
7234
7235
7236static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
7237{
7238 struct ipr_sata_port *sata_port = qc->ap->private_data;
7239 struct ipr_ioasa_gata *g = &sata_port->ioasa;
7240 struct ata_taskfile *tf = &qc->result_tf;
7241
7242 tf->feature = g->error;
7243 tf->nsect = g->nsect;
7244 tf->lbal = g->lbal;
7245 tf->lbam = g->lbam;
7246 tf->lbah = g->lbah;
7247 tf->device = g->device;
7248 tf->command = g->status;
7249 tf->hob_nsect = g->hob_nsect;
7250 tf->hob_lbal = g->hob_lbal;
7251 tf->hob_lbam = g->hob_lbam;
7252 tf->hob_lbah = g->hob_lbah;
7253
7254 return true;
7255}
7256
7257static struct ata_port_operations ipr_sata_ops = {
7258 .phy_reset = ipr_ata_phy_reset,
7259 .hardreset = ipr_sata_reset,
7260 .post_internal_cmd = ipr_ata_post_internal,
7261 .qc_prep = ata_noop_qc_prep,
7262 .qc_defer = ipr_qc_defer,
7263 .qc_issue = ipr_qc_issue,
7264 .qc_fill_rtf = ipr_qc_fill_rtf,
7265 .port_start = ata_sas_port_start,
7266 .port_stop = ata_sas_port_stop
7267};
7268
7269static struct ata_port_info sata_port_info = {
7270 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
7271 ATA_FLAG_SAS_HOST,
7272 .pio_mask = ATA_PIO4_ONLY,
7273 .mwdma_mask = ATA_MWDMA2,
7274 .udma_mask = ATA_UDMA6,
7275 .port_ops = &ipr_sata_ops
7276};
7277
7278#ifdef CONFIG_PPC_PSERIES
7279static const u16 ipr_blocked_processors[] = {
7280 PVR_NORTHSTAR,
7281 PVR_PULSAR,
7282 PVR_POWER4,
7283 PVR_ICESTAR,
7284 PVR_SSTAR,
7285 PVR_POWER4p,
7286 PVR_630,
7287 PVR_630p
7288};
7289
7290
7291
7292
7293
7294
7295
7296
7297
7298
7299
7300
7301static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7302{
7303 int i;
7304
7305 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
7306 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
7307 if (pvr_version_is(ipr_blocked_processors[i]))
7308 return 1;
7309 }
7310 }
7311 return 0;
7312}
7313#else
7314#define ipr_invalid_adapter(ioa_cfg) 0
7315#endif
7316
7317
7318
7319
7320
7321
7322
7323
7324
7325
7326
7327static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7328{
7329 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7330 int i;
7331
7332 ENTER;
7333 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7334 ipr_trace;
7335 ioa_cfg->scsi_unblock = 1;
7336 schedule_work(&ioa_cfg->work_q);
7337 }
7338
7339 ioa_cfg->in_reset_reload = 0;
7340 ioa_cfg->reset_retries = 0;
7341 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7342 spin_lock(&ioa_cfg->hrrq[i]._lock);
7343 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7344 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7345 }
7346 wmb();
7347
7348 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7349 wake_up_all(&ioa_cfg->reset_wait_q);
7350 LEAVE;
7351
7352 return IPR_RC_JOB_RETURN;
7353}
7354
7355
7356
7357
7358
7359
7360
7361
7362
7363
7364
7365
7366static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7367{
7368 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7369 struct ipr_resource_entry *res;
7370 int j;
7371
7372 ENTER;
7373 ioa_cfg->in_reset_reload = 0;
7374 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7375 spin_lock(&ioa_cfg->hrrq[j]._lock);
7376 ioa_cfg->hrrq[j].allow_cmds = 1;
7377 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7378 }
7379 wmb();
7380 ioa_cfg->reset_cmd = NULL;
7381 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7382
7383 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
7384 if (res->add_to_ml || res->del_from_ml) {
7385 ipr_trace;
7386 break;
7387 }
7388 }
7389 schedule_work(&ioa_cfg->work_q);
7390
7391 for (j = 0; j < IPR_NUM_HCAMS; j++) {
7392 list_del_init(&ioa_cfg->hostrcb[j]->queue);
7393 if (j < IPR_NUM_LOG_HCAMS)
7394 ipr_send_hcam(ioa_cfg,
7395 IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7396 ioa_cfg->hostrcb[j]);
7397 else
7398 ipr_send_hcam(ioa_cfg,
7399 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7400 ioa_cfg->hostrcb[j]);
7401 }
7402
7403 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
7404 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7405
7406 ioa_cfg->reset_retries = 0;
7407 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7408 wake_up_all(&ioa_cfg->reset_wait_q);
7409
7410 ioa_cfg->scsi_unblock = 1;
7411 schedule_work(&ioa_cfg->work_q);
7412 LEAVE;
7413 return IPR_RC_JOB_RETURN;
7414}
7415
7416
7417
7418
7419
7420
7421
7422
7423
7424static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7425 struct ipr_std_inq_vpids *vpids)
7426{
7427 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7428 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7429 supported_dev->num_records = 1;
7430 supported_dev->data_length =
7431 cpu_to_be16(sizeof(struct ipr_supported_device));
7432 supported_dev->reserved = 0;
7433}
7434
7435
7436
7437
7438
7439
7440
7441
7442
7443
7444static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7445{
7446 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7447 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
7448 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7449 struct ipr_resource_entry *res = ipr_cmd->u.res;
7450
7451 ipr_cmd->job_step = ipr_ioa_reset_done;
7452
7453 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
7454 if (!ipr_is_scsi_disk(res))
7455 continue;
7456
7457 ipr_cmd->u.res = res;
7458 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
7459
7460 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7461 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7462 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7463
7464 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
7465 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
7466 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7467 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7468
7469 ipr_init_ioadl(ipr_cmd,
7470 ioa_cfg->vpd_cbs_dma +
7471 offsetof(struct ipr_misc_cbs, supp_dev),
7472 sizeof(struct ipr_supported_device),
7473 IPR_IOADL_FLAGS_WRITE_LAST);
7474
7475 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7476 IPR_SET_SUP_DEVICE_TIMEOUT);
7477
7478 if (!ioa_cfg->sis64)
7479 ipr_cmd->job_step = ipr_set_supported_devs;
7480 LEAVE;
7481 return IPR_RC_JOB_RETURN;
7482 }
7483
7484 LEAVE;
7485 return IPR_RC_JOB_CONTINUE;
7486}
7487
7488
7489
7490
7491
7492
7493
7494
7495
7496
7497static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7498 u32 page_code, u32 len)
7499{
7500 struct ipr_mode_page_hdr *mode_hdr;
7501 u32 page_length;
7502 u32 length;
7503
7504 if (!mode_pages || (mode_pages->hdr.length == 0))
7505 return NULL;
7506
7507 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7508 mode_hdr = (struct ipr_mode_page_hdr *)
7509 (mode_pages->data + mode_pages->hdr.block_desc_len);
7510
7511 while (length) {
7512 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7513 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7514 return mode_hdr;
7515 break;
7516 } else {
7517 page_length = (sizeof(struct ipr_mode_page_hdr) +
7518 mode_hdr->page_length);
7519 length -= page_length;
7520 mode_hdr = (struct ipr_mode_page_hdr *)
7521 ((unsigned long)mode_hdr + page_length);
7522 }
7523 }
7524 return NULL;
7525}
7526
7527
7528
7529
7530
7531
7532
7533
7534
7535
7536
7537static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7538 struct ipr_mode_pages *mode_pages)
7539{
7540 int i;
7541 int entry_length;
7542 struct ipr_dev_bus_entry *bus;
7543 struct ipr_mode_page28 *mode_page;
7544
7545 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7546 sizeof(struct ipr_mode_page28));
7547
7548 entry_length = mode_page->entry_length;
7549
7550 bus = mode_page->bus;
7551
7552 for (i = 0; i < mode_page->num_entries; i++) {
7553 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7554 dev_err(&ioa_cfg->pdev->dev,
7555 "Term power is absent on scsi bus %d\n",
7556 bus->res_addr.bus);
7557 }
7558
7559 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7560 }
7561}
7562
7563
7564
7565
7566
7567
7568
7569
7570
7571
7572
7573
7574static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7575{
7576 u32 max_xfer_rate;
7577 int i;
7578
7579 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7580 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7581 ioa_cfg->bus_attr[i].bus_width);
7582
7583 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7584 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7585 }
7586}
7587
7588
7589
7590
7591
7592
7593
7594
7595
7596
7597
7598static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7599 struct ipr_mode_pages *mode_pages)
7600{
7601 int i, entry_length;
7602 struct ipr_dev_bus_entry *bus;
7603 struct ipr_bus_attributes *bus_attr;
7604 struct ipr_mode_page28 *mode_page;
7605
7606 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7607 sizeof(struct ipr_mode_page28));
7608
7609 entry_length = mode_page->entry_length;
7610
7611
7612 for (i = 0, bus = mode_page->bus;
7613 i < mode_page->num_entries;
7614 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7615 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7616 dev_err(&ioa_cfg->pdev->dev,
7617 "Invalid resource address reported: 0x%08X\n",
7618 IPR_GET_PHYS_LOC(bus->res_addr));
7619 continue;
7620 }
7621
7622 bus_attr = &ioa_cfg->bus_attr[i];
7623 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7624 bus->bus_width = bus_attr->bus_width;
7625 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7626 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7627 if (bus_attr->qas_enabled)
7628 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7629 else
7630 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7631 }
7632}
7633
7634
7635
7636
7637
7638
7639
7640
7641
7642
7643
7644
7645static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7646 __be32 res_handle, u8 parm,
7647 dma_addr_t dma_addr, u8 xfer_len)
7648{
7649 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7650
7651 ioarcb->res_handle = res_handle;
7652 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7653 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7654 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7655 ioarcb->cmd_pkt.cdb[1] = parm;
7656 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7657
7658 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7659}
7660
7661
7662
7663
7664
7665
7666
7667
7668
7669
7670
7671static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7672{
7673 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7674 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7675 int length;
7676
7677 ENTER;
7678 ipr_scsi_bus_speed_limit(ioa_cfg);
7679 ipr_check_term_power(ioa_cfg, mode_pages);
7680 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7681 length = mode_pages->hdr.length + 1;
7682 mode_pages->hdr.length = 0;
7683
7684 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7685 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7686 length);
7687
7688 ipr_cmd->job_step = ipr_set_supported_devs;
7689 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7690 struct ipr_resource_entry, queue);
7691 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7692
7693 LEAVE;
7694 return IPR_RC_JOB_RETURN;
7695}
7696
7697
7698
7699
7700
7701
7702
7703
7704
7705
7706
7707
7708static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7709 __be32 res_handle,
7710 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7711{
7712 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7713
7714 ioarcb->res_handle = res_handle;
7715 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7716 ioarcb->cmd_pkt.cdb[2] = parm;
7717 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7718 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7719
7720 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7721}
7722
7723
7724
7725
7726
7727
7728
7729
7730
7731
7732static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7733{
7734 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7735 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7736
7737 dev_err(&ioa_cfg->pdev->dev,
7738 "0x%02X failed with IOASC: 0x%08X\n",
7739 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7740
7741 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7742 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7743 return IPR_RC_JOB_RETURN;
7744}
7745
7746
7747
7748
7749
7750
7751
7752
7753
7754
7755
7756static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7757{
7758 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7759 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7760
7761 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7762 ipr_cmd->job_step = ipr_set_supported_devs;
7763 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7764 struct ipr_resource_entry, queue);
7765 return IPR_RC_JOB_CONTINUE;
7766 }
7767
7768 return ipr_reset_cmd_failed(ipr_cmd);
7769}
7770
7771
7772
7773
7774
7775
7776
7777
7778
7779
7780
7781static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7782{
7783 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7784
7785 ENTER;
7786 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7787 0x28, ioa_cfg->vpd_cbs_dma +
7788 offsetof(struct ipr_misc_cbs, mode_pages),
7789 sizeof(struct ipr_mode_pages));
7790
7791 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7792 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7793
7794 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7795
7796 LEAVE;
7797 return IPR_RC_JOB_RETURN;
7798}
7799
7800
7801
7802
7803
7804
7805
7806
7807
7808
7809static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7810{
7811 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7812 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7813 struct ipr_mode_page24 *mode_page;
7814 int length;
7815
7816 ENTER;
7817 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7818 sizeof(struct ipr_mode_page24));
7819
7820 if (mode_page)
7821 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7822
7823 length = mode_pages->hdr.length + 1;
7824 mode_pages->hdr.length = 0;
7825
7826 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7827 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7828 length);
7829
7830 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7831 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7832
7833 LEAVE;
7834 return IPR_RC_JOB_RETURN;
7835}
7836
7837
7838
7839
7840
7841
7842
7843
7844
7845
7846
7847static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7848{
7849 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7850
7851 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7852 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7853 return IPR_RC_JOB_CONTINUE;
7854 }
7855
7856 return ipr_reset_cmd_failed(ipr_cmd);
7857}
7858
7859
7860
7861
7862
7863
7864
7865
7866
7867
7868
7869static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7870{
7871 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7872
7873 ENTER;
7874 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7875 0x24, ioa_cfg->vpd_cbs_dma +
7876 offsetof(struct ipr_misc_cbs, mode_pages),
7877 sizeof(struct ipr_mode_pages));
7878
7879 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7880 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7881
7882 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7883
7884 LEAVE;
7885 return IPR_RC_JOB_RETURN;
7886}
7887
7888
7889
7890
7891
7892
7893
7894
7895
7896
7897
7898
7899
7900static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7901{
7902 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7903 struct ipr_resource_entry *res, *temp;
7904 struct ipr_config_table_entry_wrapper cfgtew;
7905 int entries, found, flag, i;
7906 LIST_HEAD(old_res);
7907
7908 ENTER;
7909 if (ioa_cfg->sis64)
7910 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7911 else
7912 flag = ioa_cfg->u.cfg_table->hdr.flags;
7913
7914 if (flag & IPR_UCODE_DOWNLOAD_REQ)
7915 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7916
7917 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7918 list_move_tail(&res->queue, &old_res);
7919
7920 if (ioa_cfg->sis64)
7921 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7922 else
7923 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7924
7925 for (i = 0; i < entries; i++) {
7926 if (ioa_cfg->sis64)
7927 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7928 else
7929 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7930 found = 0;
7931
7932 list_for_each_entry_safe(res, temp, &old_res, queue) {
7933 if (ipr_is_same_device(res, &cfgtew)) {
7934 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7935 found = 1;
7936 break;
7937 }
7938 }
7939
7940 if (!found) {
7941 if (list_empty(&ioa_cfg->free_res_q)) {
7942 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7943 break;
7944 }
7945
7946 found = 1;
7947 res = list_entry(ioa_cfg->free_res_q.next,
7948 struct ipr_resource_entry, queue);
7949 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7950 ipr_init_res_entry(res, &cfgtew);
7951 res->add_to_ml = 1;
7952 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7953 res->sdev->allow_restart = 1;
7954
7955 if (found)
7956 ipr_update_res_entry(res, &cfgtew);
7957 }
7958
7959 list_for_each_entry_safe(res, temp, &old_res, queue) {
7960 if (res->sdev) {
7961 res->del_from_ml = 1;
7962 res->res_handle = IPR_INVALID_RES_HANDLE;
7963 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7964 }
7965 }
7966
7967 list_for_each_entry_safe(res, temp, &old_res, queue) {
7968 ipr_clear_res_target(res);
7969 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7970 }
7971
7972 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7973 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7974 else
7975 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7976
7977 LEAVE;
7978 return IPR_RC_JOB_CONTINUE;
7979}
7980
7981
7982
7983
7984
7985
7986
7987
7988
7989
7990
7991static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7992{
7993 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7994 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7995 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7996 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7997
7998 ENTER;
7999 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
8000 ioa_cfg->dual_raid = 1;
8001 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
8002 ucode_vpd->major_release, ucode_vpd->card_type,
8003 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
8004 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8005 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8006
8007 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
8008 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
8009 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
8010 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
8011
8012 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
8013 IPR_IOADL_FLAGS_READ_LAST);
8014
8015 ipr_cmd->job_step = ipr_init_res_table;
8016
8017 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
8018
8019 LEAVE;
8020 return IPR_RC_JOB_RETURN;
8021}
8022
8023static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
8024{
8025 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8026
8027 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
8028 return IPR_RC_JOB_CONTINUE;
8029
8030 return ipr_reset_cmd_failed(ipr_cmd);
8031}
8032
8033static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
8034 __be32 res_handle, u8 sa_code)
8035{
8036 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
8037
8038 ioarcb->res_handle = res_handle;
8039 ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
8040 ioarcb->cmd_pkt.cdb[1] = sa_code;
8041 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8042}
8043
8044
8045
8046
8047
8048
8049
8050
8051static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
8052{
8053 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
8054 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8055 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
8056
8057 ENTER;
8058
8059 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
8060
8061 if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
8062 ipr_build_ioa_service_action(ipr_cmd,
8063 cpu_to_be32(IPR_IOA_RES_HANDLE),
8064 IPR_IOA_SA_CHANGE_CACHE_PARAMS);
8065
8066 ioarcb->cmd_pkt.cdb[2] = 0x40;
8067
8068 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
8069 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8070 IPR_SET_SUP_DEVICE_TIMEOUT);
8071
8072 LEAVE;
8073 return IPR_RC_JOB_RETURN;
8074 }
8075
8076 LEAVE;
8077 return IPR_RC_JOB_CONTINUE;
8078}
8079
8080
8081
8082
8083
8084
8085
8086
8087
8088
8089static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
8090 dma_addr_t dma_addr, u8 xfer_len)
8091{
8092 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
8093
8094 ENTER;
8095 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8096 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8097
8098 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
8099 ioarcb->cmd_pkt.cdb[1] = flags;
8100 ioarcb->cmd_pkt.cdb[2] = page;
8101 ioarcb->cmd_pkt.cdb[4] = xfer_len;
8102
8103 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
8104
8105 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
8106 LEAVE;
8107}
8108
8109
8110
8111
8112
8113
8114
8115
8116
8117
8118
8119static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
8120{
8121 int i;
8122
8123 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
8124 if (page0->page[i] == page)
8125 return 1;
8126
8127 return 0;
8128}
8129
8130
8131
8132
8133
8134
8135
8136
8137
8138
8139
8140static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
8141{
8142 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8143 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8144 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
8145
8146 ENTER;
8147 ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
8148 memset(pageC4, 0, sizeof(*pageC4));
8149
8150 if (ipr_inquiry_page_supported(page0, 0xC4)) {
8151 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
8152 (ioa_cfg->vpd_cbs_dma
8153 + offsetof(struct ipr_misc_cbs,
8154 pageC4_data)),
8155 sizeof(struct ipr_inquiry_pageC4));
8156 return IPR_RC_JOB_RETURN;
8157 }
8158
8159 LEAVE;
8160 return IPR_RC_JOB_CONTINUE;
8161}
8162
8163
8164
8165
8166
8167
8168
8169
8170
8171
8172
8173static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
8174{
8175 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8176 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8177 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
8178
8179 ENTER;
8180 ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
8181 memset(cap, 0, sizeof(*cap));
8182
8183 if (ipr_inquiry_page_supported(page0, 0xD0)) {
8184 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
8185 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
8186 sizeof(struct ipr_inquiry_cap));
8187 return IPR_RC_JOB_RETURN;
8188 }
8189
8190 LEAVE;
8191 return IPR_RC_JOB_CONTINUE;
8192}
8193
8194
8195
8196
8197
8198
8199
8200
8201
8202
8203
8204static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
8205{
8206 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8207
8208 ENTER;
8209
8210 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
8211
8212 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
8213 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
8214 sizeof(struct ipr_inquiry_page3));
8215
8216 LEAVE;
8217 return IPR_RC_JOB_RETURN;
8218}
8219
8220
8221
8222
8223
8224
8225
8226
8227
8228
8229
8230static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
8231{
8232 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8233 char type[5];
8234
8235 ENTER;
8236
8237
8238 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
8239 type[4] = '\0';
8240 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
8241
8242 if (ipr_invalid_adapter(ioa_cfg)) {
8243 dev_err(&ioa_cfg->pdev->dev,
8244 "Adapter not supported in this hardware configuration.\n");
8245
8246 if (!ipr_testmode) {
8247 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8248 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8249 list_add_tail(&ipr_cmd->queue,
8250 &ioa_cfg->hrrq->hrrq_free_q);
8251 return IPR_RC_JOB_RETURN;
8252 }
8253 }
8254
8255 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
8256
8257 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
8258 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8259 sizeof(struct ipr_inquiry_page0));
8260
8261 LEAVE;
8262 return IPR_RC_JOB_RETURN;
8263}
8264
8265
8266
8267
8268
8269
8270
8271
8272
8273
8274static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
8275{
8276 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8277
8278 ENTER;
8279 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
8280
8281 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
8282 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8283 sizeof(struct ipr_ioa_vpd));
8284
8285 LEAVE;
8286 return IPR_RC_JOB_RETURN;
8287}
8288
8289
8290
8291
8292
8293
8294
8295
8296
8297
8298
8299static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
8300{
8301 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8302 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
8303 struct ipr_hrr_queue *hrrq;
8304
8305 ENTER;
8306 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
8307 if (ioa_cfg->identify_hrrq_index == 0)
8308 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
8309
8310 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8311 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
8312
8313 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
8314 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8315
8316 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8317 if (ioa_cfg->sis64)
8318 ioarcb->cmd_pkt.cdb[1] = 0x1;
8319
8320 if (ioa_cfg->nvectors == 1)
8321 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
8322 else
8323 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
8324
8325 ioarcb->cmd_pkt.cdb[2] =
8326 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8327 ioarcb->cmd_pkt.cdb[3] =
8328 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8329 ioarcb->cmd_pkt.cdb[4] =
8330 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8331 ioarcb->cmd_pkt.cdb[5] =
8332 ((u64) hrrq->host_rrq_dma) & 0xff;
8333 ioarcb->cmd_pkt.cdb[7] =
8334 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8335 ioarcb->cmd_pkt.cdb[8] =
8336 (sizeof(u32) * hrrq->size) & 0xff;
8337
8338 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8339 ioarcb->cmd_pkt.cdb[9] =
8340 ioa_cfg->identify_hrrq_index;
8341
8342 if (ioa_cfg->sis64) {
8343 ioarcb->cmd_pkt.cdb[10] =
8344 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8345 ioarcb->cmd_pkt.cdb[11] =
8346 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8347 ioarcb->cmd_pkt.cdb[12] =
8348 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8349 ioarcb->cmd_pkt.cdb[13] =
8350 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8351 }
8352
8353 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8354 ioarcb->cmd_pkt.cdb[14] =
8355 ioa_cfg->identify_hrrq_index;
8356
8357 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8358 IPR_INTERNAL_TIMEOUT);
8359
8360 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8361 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8362
8363 LEAVE;
8364 return IPR_RC_JOB_RETURN;
8365 }
8366
8367 LEAVE;
8368 return IPR_RC_JOB_CONTINUE;
8369}
8370
8371
8372
8373
8374
8375
8376
8377
8378
8379
8380
8381
8382
8383
8384static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
8385{
8386 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8387 unsigned long lock_flags = 0;
8388
8389 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8390
8391 if (ioa_cfg->reset_cmd == ipr_cmd) {
8392 list_del(&ipr_cmd->queue);
8393 ipr_cmd->done(ipr_cmd);
8394 }
8395
8396 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8397}
8398
8399
8400
8401
8402
8403
8404
8405
8406
8407
8408
8409
8410
8411
8412
8413static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8414 unsigned long timeout)
8415{
8416
8417 ENTER;
8418 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8419 ipr_cmd->done = ipr_reset_ioa_job;
8420
8421 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8422 ipr_cmd->timer.expires = jiffies + timeout;
8423 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
8424 add_timer(&ipr_cmd->timer);
8425}
8426
8427
8428
8429
8430
8431
8432
8433
8434static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8435{
8436 struct ipr_hrr_queue *hrrq;
8437
8438 for_each_hrrq(hrrq, ioa_cfg) {
8439 spin_lock(&hrrq->_lock);
8440 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8441
8442
8443 hrrq->hrrq_start = hrrq->host_rrq;
8444 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8445 hrrq->hrrq_curr = hrrq->hrrq_start;
8446 hrrq->toggle_bit = 1;
8447 spin_unlock(&hrrq->_lock);
8448 }
8449 wmb();
8450
8451 ioa_cfg->identify_hrrq_index = 0;
8452 if (ioa_cfg->hrrq_num == 1)
8453 atomic_set(&ioa_cfg->hrrq_index, 0);
8454 else
8455 atomic_set(&ioa_cfg->hrrq_index, 1);
8456
8457
8458 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
8459}
8460
8461
8462
8463
8464
8465
8466
8467
8468static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8469{
8470 unsigned long stage, stage_time;
8471 u32 feedback;
8472 volatile u32 int_reg;
8473 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8474 u64 maskval = 0;
8475
8476 feedback = readl(ioa_cfg->regs.init_feedback_reg);
8477 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8478 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8479
8480 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8481
8482
8483 if (stage_time == 0)
8484 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8485 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
8486 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8487 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8488 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8489
8490 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8491 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8492 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8493 stage_time = ioa_cfg->transop_timeout;
8494 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8495 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
8496 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8497 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8498 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8499 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8500 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8501 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8502 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8503 return IPR_RC_JOB_CONTINUE;
8504 }
8505 }
8506
8507 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8508 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8509 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8510 ipr_cmd->done = ipr_reset_ioa_job;
8511 add_timer(&ipr_cmd->timer);
8512
8513 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8514
8515 return IPR_RC_JOB_RETURN;
8516}
8517
8518
8519
8520
8521
8522
8523
8524
8525
8526
8527
8528static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8529{
8530 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8531 volatile u32 int_reg;
8532 volatile u64 maskval;
8533 int i;
8534
8535 ENTER;
8536 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8537 ipr_init_ioa_mem(ioa_cfg);
8538
8539 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8540 spin_lock(&ioa_cfg->hrrq[i]._lock);
8541 ioa_cfg->hrrq[i].allow_interrupts = 1;
8542 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8543 }
8544 wmb();
8545 if (ioa_cfg->sis64) {
8546
8547 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8548 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8549 }
8550
8551 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8552
8553 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8554 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
8555 ioa_cfg->regs.clr_interrupt_mask_reg32);
8556 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8557 return IPR_RC_JOB_CONTINUE;
8558 }
8559
8560
8561 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8562
8563 if (ioa_cfg->sis64) {
8564 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8565 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8566 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8567 } else
8568 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
8569
8570 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8571
8572 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8573
8574 if (ioa_cfg->sis64) {
8575 ipr_cmd->job_step = ipr_reset_next_stage;
8576 return IPR_RC_JOB_CONTINUE;
8577 }
8578
8579 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8580 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
8581 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8582 ipr_cmd->done = ipr_reset_ioa_job;
8583 add_timer(&ipr_cmd->timer);
8584 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8585
8586 LEAVE;
8587 return IPR_RC_JOB_RETURN;
8588}
8589
8590
8591
8592
8593
8594
8595
8596
8597
8598
8599
8600static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8601{
8602 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8603
8604 if (ioa_cfg->sdt_state == GET_DUMP)
8605 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8606 else if (ioa_cfg->sdt_state == READ_DUMP)
8607 ioa_cfg->sdt_state = ABORT_DUMP;
8608
8609 ioa_cfg->dump_timeout = 1;
8610 ipr_cmd->job_step = ipr_reset_alert;
8611
8612 return IPR_RC_JOB_CONTINUE;
8613}
8614
8615
8616
8617
8618
8619
8620
8621
8622
8623
8624
8625static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8626{
8627 ioa_cfg->errors_logged++;
8628 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8629}
8630
8631
8632
8633
8634
8635
8636
8637
8638
8639
8640
8641static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8642{
8643 unsigned long mailbox;
8644 struct ipr_hostrcb *hostrcb;
8645 struct ipr_uc_sdt sdt;
8646 int rc, length;
8647 u32 ioasc;
8648
8649 mailbox = readl(ioa_cfg->ioa_mailbox);
8650
8651 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8652 ipr_unit_check_no_data(ioa_cfg);
8653 return;
8654 }
8655
8656 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8657 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8658 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8659
8660 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8661 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8662 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8663 ipr_unit_check_no_data(ioa_cfg);
8664 return;
8665 }
8666
8667
8668 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8669 length = be32_to_cpu(sdt.entry[0].end_token);
8670 else
8671 length = (be32_to_cpu(sdt.entry[0].end_token) -
8672 be32_to_cpu(sdt.entry[0].start_token)) &
8673 IPR_FMT2_MBX_ADDR_MASK;
8674
8675 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8676 struct ipr_hostrcb, queue);
8677 list_del_init(&hostrcb->queue);
8678 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8679
8680 rc = ipr_get_ldump_data_section(ioa_cfg,
8681 be32_to_cpu(sdt.entry[0].start_token),
8682 (__be32 *)&hostrcb->hcam,
8683 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8684
8685 if (!rc) {
8686 ipr_handle_log_data(ioa_cfg, hostrcb);
8687 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8688 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8689 ioa_cfg->sdt_state == GET_DUMP)
8690 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8691 } else
8692 ipr_unit_check_no_data(ioa_cfg);
8693
8694 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8695}
8696
8697
8698
8699
8700
8701
8702
8703
8704
8705
8706static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8707{
8708 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8709
8710 ENTER;
8711 ioa_cfg->ioa_unit_checked = 0;
8712 ipr_get_unit_check_buffer(ioa_cfg);
8713 ipr_cmd->job_step = ipr_reset_alert;
8714 ipr_reset_start_timer(ipr_cmd, 0);
8715
8716 LEAVE;
8717 return IPR_RC_JOB_RETURN;
8718}
8719
8720static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8721{
8722 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8723
8724 ENTER;
8725
8726 if (ioa_cfg->sdt_state != GET_DUMP)
8727 return IPR_RC_JOB_RETURN;
8728
8729 if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8730 (readl(ioa_cfg->regs.sense_interrupt_reg) &
8731 IPR_PCII_MAILBOX_STABLE)) {
8732
8733 if (!ipr_cmd->u.time_left)
8734 dev_err(&ioa_cfg->pdev->dev,
8735 "Timed out waiting for Mailbox register.\n");
8736
8737 ioa_cfg->sdt_state = READ_DUMP;
8738 ioa_cfg->dump_timeout = 0;
8739 if (ioa_cfg->sis64)
8740 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8741 else
8742 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8743 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8744 schedule_work(&ioa_cfg->work_q);
8745
8746 } else {
8747 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8748 ipr_reset_start_timer(ipr_cmd,
8749 IPR_CHECK_FOR_RESET_TIMEOUT);
8750 }
8751
8752 LEAVE;
8753 return IPR_RC_JOB_RETURN;
8754}
8755
8756
8757
8758
8759
8760
8761
8762
8763
8764
8765
8766
8767static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8768{
8769 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8770 u32 int_reg;
8771
8772 ENTER;
8773 ioa_cfg->pdev->state_saved = true;
8774 pci_restore_state(ioa_cfg->pdev);
8775
8776 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8777 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8778 return IPR_RC_JOB_CONTINUE;
8779 }
8780
8781 ipr_fail_all_ops(ioa_cfg);
8782
8783 if (ioa_cfg->sis64) {
8784
8785 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8786 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8787 }
8788
8789 if (ioa_cfg->ioa_unit_checked) {
8790 if (ioa_cfg->sis64) {
8791 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8792 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8793 return IPR_RC_JOB_RETURN;
8794 } else {
8795 ioa_cfg->ioa_unit_checked = 0;
8796 ipr_get_unit_check_buffer(ioa_cfg);
8797 ipr_cmd->job_step = ipr_reset_alert;
8798 ipr_reset_start_timer(ipr_cmd, 0);
8799 return IPR_RC_JOB_RETURN;
8800 }
8801 }
8802
8803 if (ioa_cfg->in_ioa_bringdown) {
8804 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8805 } else if (ioa_cfg->sdt_state == GET_DUMP) {
8806 ipr_cmd->job_step = ipr_dump_mailbox_wait;
8807 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
8808 } else {
8809 ipr_cmd->job_step = ipr_reset_enable_ioa;
8810 }
8811
8812 LEAVE;
8813 return IPR_RC_JOB_CONTINUE;
8814}
8815
8816
8817
8818
8819
8820
8821
8822
8823
8824
8825static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8826{
8827 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8828
8829 ENTER;
8830 if (ioa_cfg->cfg_locked)
8831 pci_cfg_access_unlock(ioa_cfg->pdev);
8832 ioa_cfg->cfg_locked = 0;
8833 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8834 LEAVE;
8835 return IPR_RC_JOB_CONTINUE;
8836}
8837
8838
8839
8840
8841
8842
8843
8844
8845
8846
8847static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8848{
8849 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8850 int rc = PCIBIOS_SUCCESSFUL;
8851
8852 ENTER;
8853 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8854 writel(IPR_UPROCI_SIS64_START_BIST,
8855 ioa_cfg->regs.set_uproc_interrupt_reg32);
8856 else
8857 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8858
8859 if (rc == PCIBIOS_SUCCESSFUL) {
8860 ipr_cmd->job_step = ipr_reset_bist_done;
8861 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8862 rc = IPR_RC_JOB_RETURN;
8863 } else {
8864 if (ioa_cfg->cfg_locked)
8865 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8866 ioa_cfg->cfg_locked = 0;
8867 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8868 rc = IPR_RC_JOB_CONTINUE;
8869 }
8870
8871 LEAVE;
8872 return rc;
8873}
8874
8875
8876
8877
8878
8879
8880
8881
8882
8883
8884static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8885{
8886 ENTER;
8887 ipr_cmd->job_step = ipr_reset_bist_done;
8888 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8889 LEAVE;
8890 return IPR_RC_JOB_RETURN;
8891}
8892
8893
8894
8895
8896
8897
8898
8899
8900static void ipr_reset_reset_work(struct work_struct *work)
8901{
8902 struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8903 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8904 struct pci_dev *pdev = ioa_cfg->pdev;
8905 unsigned long lock_flags = 0;
8906
8907 ENTER;
8908 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8909 msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8910 pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8911
8912 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8913 if (ioa_cfg->reset_cmd == ipr_cmd)
8914 ipr_reset_ioa_job(ipr_cmd);
8915 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8916 LEAVE;
8917}
8918
8919
8920
8921
8922
8923
8924
8925
8926
8927
8928static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8929{
8930 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8931
8932 ENTER;
8933 INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8934 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8935 ipr_cmd->job_step = ipr_reset_slot_reset_done;
8936 LEAVE;
8937 return IPR_RC_JOB_RETURN;
8938}
8939
8940
8941
8942
8943
8944
8945
8946
8947
8948
8949static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8950{
8951 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8952 int rc = IPR_RC_JOB_CONTINUE;
8953
8954 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8955 ioa_cfg->cfg_locked = 1;
8956 ipr_cmd->job_step = ioa_cfg->reset;
8957 } else {
8958 if (ipr_cmd->u.time_left) {
8959 rc = IPR_RC_JOB_RETURN;
8960 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8961 ipr_reset_start_timer(ipr_cmd,
8962 IPR_CHECK_FOR_RESET_TIMEOUT);
8963 } else {
8964 ipr_cmd->job_step = ioa_cfg->reset;
8965 dev_err(&ioa_cfg->pdev->dev,
8966 "Timed out waiting to lock config access. Resetting anyway.\n");
8967 }
8968 }
8969
8970 return rc;
8971}
8972
8973
8974
8975
8976
8977
8978
8979
8980
8981
8982static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8983{
8984 ipr_cmd->ioa_cfg->cfg_locked = 0;
8985 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8986 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8987 return IPR_RC_JOB_CONTINUE;
8988}
8989
8990
8991
8992
8993
8994
8995
8996
8997static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8998{
8999 volatile u32 temp_reg;
9000
9001 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9002 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
9003}
9004
9005
9006
9007
9008
9009
9010
9011
9012
9013
9014
9015
9016
9017
9018
9019
9020static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
9021{
9022 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9023 int rc = IPR_RC_JOB_RETURN;
9024
9025 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
9026 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
9027 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
9028 } else {
9029 ipr_cmd->job_step = ipr_reset_block_config_access;
9030 rc = IPR_RC_JOB_CONTINUE;
9031 }
9032
9033 return rc;
9034}
9035
9036
9037
9038
9039
9040
9041
9042
9043
9044
9045
9046
9047
9048static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
9049{
9050 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9051 u16 cmd_reg;
9052 int rc;
9053
9054 ENTER;
9055 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
9056
9057 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
9058 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
9059 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
9060 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
9061 } else {
9062 ipr_cmd->job_step = ipr_reset_block_config_access;
9063 }
9064
9065 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
9066 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
9067
9068 LEAVE;
9069 return IPR_RC_JOB_RETURN;
9070}
9071
9072
9073
9074
9075
9076
9077
9078
9079
9080
9081static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
9082{
9083 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9084
9085 ENTER;
9086 ipr_cmd->job_step = ipr_ioa_bringdown_done;
9087 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9088 LEAVE;
9089 return IPR_RC_JOB_CONTINUE;
9090}
9091
9092
9093
9094
9095
9096
9097
9098
9099
9100
9101
9102static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
9103{
9104 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9105 struct ipr_cmnd *loop_cmd;
9106 struct ipr_hrr_queue *hrrq;
9107 int rc = IPR_RC_JOB_CONTINUE;
9108 int count = 0;
9109
9110 ENTER;
9111 ipr_cmd->job_step = ipr_reset_quiesce_done;
9112
9113 for_each_hrrq(hrrq, ioa_cfg) {
9114 spin_lock(&hrrq->_lock);
9115 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
9116 count++;
9117 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9118 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9119 rc = IPR_RC_JOB_RETURN;
9120 break;
9121 }
9122 spin_unlock(&hrrq->_lock);
9123
9124 if (count)
9125 break;
9126 }
9127
9128 LEAVE;
9129 return rc;
9130}
9131
9132
9133
9134
9135
9136
9137
9138
9139
9140
9141static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
9142{
9143 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9144 int rc = IPR_RC_JOB_CONTINUE;
9145 struct ipr_cmd_pkt *cmd_pkt;
9146 struct ipr_cmnd *hcam_cmd;
9147 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
9148
9149 ENTER;
9150 ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
9151
9152 if (!hrrq->ioa_is_dead) {
9153 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
9154 list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
9155 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
9156 continue;
9157
9158 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9159 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9160 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
9161 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
9162 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
9163 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
9164 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
9165 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
9166 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
9167 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
9168 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
9169 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
9170 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
9171 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
9172
9173 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9174 IPR_CANCEL_TIMEOUT);
9175
9176 rc = IPR_RC_JOB_RETURN;
9177 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9178 break;
9179 }
9180 }
9181 } else
9182 ipr_cmd->job_step = ipr_reset_alert;
9183
9184 LEAVE;
9185 return rc;
9186}
9187
9188
9189
9190
9191
9192
9193
9194
9195
9196
9197static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
9198{
9199 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9200 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9201
9202 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
9203 sglist->num_sg, DMA_TO_DEVICE);
9204
9205 ipr_cmd->job_step = ipr_reset_alert;
9206 return IPR_RC_JOB_CONTINUE;
9207}
9208
9209
9210
9211
9212
9213
9214
9215
9216
9217
9218
9219static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
9220{
9221 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9222 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9223
9224 ENTER;
9225 ipr_cmd->job_step = ipr_reset_alert;
9226
9227 if (!sglist)
9228 return IPR_RC_JOB_CONTINUE;
9229
9230 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9231 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
9232 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
9233 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
9234 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
9235 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
9236 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
9237
9238 if (ioa_cfg->sis64)
9239 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
9240 else
9241 ipr_build_ucode_ioadl(ipr_cmd, sglist);
9242 ipr_cmd->job_step = ipr_reset_ucode_download_done;
9243
9244 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9245 IPR_WRITE_BUFFER_TIMEOUT);
9246
9247 LEAVE;
9248 return IPR_RC_JOB_RETURN;
9249}
9250
9251
9252
9253
9254
9255
9256
9257
9258
9259
9260
9261
9262static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
9263{
9264 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9265 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
9266 unsigned long timeout;
9267 int rc = IPR_RC_JOB_CONTINUE;
9268
9269 ENTER;
9270 if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
9271 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9272 else if (shutdown_type != IPR_SHUTDOWN_NONE &&
9273 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
9274 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9275 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9276 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9277 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
9278
9279 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
9280 timeout = IPR_SHUTDOWN_TIMEOUT;
9281 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
9282 timeout = IPR_INTERNAL_TIMEOUT;
9283 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9284 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
9285 else
9286 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
9287
9288 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
9289
9290 rc = IPR_RC_JOB_RETURN;
9291 ipr_cmd->job_step = ipr_reset_ucode_download;
9292 } else
9293 ipr_cmd->job_step = ipr_reset_alert;
9294
9295 LEAVE;
9296 return rc;
9297}
9298
9299
9300
9301
9302
9303
9304
9305
9306
9307
9308static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
9309{
9310 u32 rc, ioasc;
9311 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9312
9313 do {
9314 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
9315
9316 if (ioa_cfg->reset_cmd != ipr_cmd) {
9317
9318
9319
9320
9321 list_add_tail(&ipr_cmd->queue,
9322 &ipr_cmd->hrrq->hrrq_free_q);
9323 return;
9324 }
9325
9326 if (IPR_IOASC_SENSE_KEY(ioasc)) {
9327 rc = ipr_cmd->job_step_failed(ipr_cmd);
9328 if (rc == IPR_RC_JOB_RETURN)
9329 return;
9330 }
9331
9332 ipr_reinit_ipr_cmnd(ipr_cmd);
9333 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
9334 rc = ipr_cmd->job_step(ipr_cmd);
9335 } while (rc == IPR_RC_JOB_CONTINUE);
9336}
9337
9338
9339
9340
9341
9342
9343
9344
9345
9346
9347
9348
9349
9350
9351
9352static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9353 int (*job_step) (struct ipr_cmnd *),
9354 enum ipr_shutdown_type shutdown_type)
9355{
9356 struct ipr_cmnd *ipr_cmd;
9357 int i;
9358
9359 ioa_cfg->in_reset_reload = 1;
9360 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9361 spin_lock(&ioa_cfg->hrrq[i]._lock);
9362 ioa_cfg->hrrq[i].allow_cmds = 0;
9363 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9364 }
9365 wmb();
9366 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9367 ioa_cfg->scsi_unblock = 0;
9368 ioa_cfg->scsi_blocked = 1;
9369 scsi_block_requests(ioa_cfg->host);
9370 }
9371
9372 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9373 ioa_cfg->reset_cmd = ipr_cmd;
9374 ipr_cmd->job_step = job_step;
9375 ipr_cmd->u.shutdown_type = shutdown_type;
9376
9377 ipr_reset_ioa_job(ipr_cmd);
9378}
9379
9380
9381
9382
9383
9384
9385
9386
9387
9388
9389
9390
9391
9392static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9393 enum ipr_shutdown_type shutdown_type)
9394{
9395 int i;
9396
9397 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
9398 return;
9399
9400 if (ioa_cfg->in_reset_reload) {
9401 if (ioa_cfg->sdt_state == GET_DUMP)
9402 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9403 else if (ioa_cfg->sdt_state == READ_DUMP)
9404 ioa_cfg->sdt_state = ABORT_DUMP;
9405 }
9406
9407 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9408 dev_err(&ioa_cfg->pdev->dev,
9409 "IOA taken offline - error recovery failed\n");
9410
9411 ioa_cfg->reset_retries = 0;
9412 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9413 spin_lock(&ioa_cfg->hrrq[i]._lock);
9414 ioa_cfg->hrrq[i].ioa_is_dead = 1;
9415 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9416 }
9417 wmb();
9418
9419 if (ioa_cfg->in_ioa_bringdown) {
9420 ioa_cfg->reset_cmd = NULL;
9421 ioa_cfg->in_reset_reload = 0;
9422 ipr_fail_all_ops(ioa_cfg);
9423 wake_up_all(&ioa_cfg->reset_wait_q);
9424
9425 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9426 ioa_cfg->scsi_unblock = 1;
9427 schedule_work(&ioa_cfg->work_q);
9428 }
9429 return;
9430 } else {
9431 ioa_cfg->in_ioa_bringdown = 1;
9432 shutdown_type = IPR_SHUTDOWN_NONE;
9433 }
9434 }
9435
9436 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9437 shutdown_type);
9438}
9439
9440
9441
9442
9443
9444
9445
9446
9447
9448static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9449{
9450 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9451 int i;
9452
9453
9454 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9455 spin_lock(&ioa_cfg->hrrq[i]._lock);
9456 ioa_cfg->hrrq[i].allow_interrupts = 0;
9457 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9458 }
9459 wmb();
9460 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
9461 ipr_cmd->done = ipr_reset_ioa_job;
9462 return IPR_RC_JOB_RETURN;
9463}
9464
9465
9466
9467
9468
9469
9470
9471
9472static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9473{
9474 unsigned long flags = 0;
9475 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9476
9477 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9478 if (!ioa_cfg->probe_done)
9479 pci_save_state(pdev);
9480 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9481 return PCI_ERS_RESULT_NEED_RESET;
9482}
9483
9484
9485
9486
9487
9488
9489
9490
9491
9492static void ipr_pci_frozen(struct pci_dev *pdev)
9493{
9494 unsigned long flags = 0;
9495 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9496
9497 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9498 if (ioa_cfg->probe_done)
9499 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
9500 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9501}
9502
9503
9504
9505
9506
9507
9508
9509
9510
9511static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9512{
9513 unsigned long flags = 0;
9514 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9515
9516 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9517 if (ioa_cfg->probe_done) {
9518 if (ioa_cfg->needs_warm_reset)
9519 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9520 else
9521 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9522 IPR_SHUTDOWN_NONE);
9523 } else
9524 wake_up_all(&ioa_cfg->eeh_wait_q);
9525 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9526 return PCI_ERS_RESULT_RECOVERED;
9527}
9528
9529
9530
9531
9532
9533
9534
9535
9536static void ipr_pci_perm_failure(struct pci_dev *pdev)
9537{
9538 unsigned long flags = 0;
9539 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9540 int i;
9541
9542 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9543 if (ioa_cfg->probe_done) {
9544 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9545 ioa_cfg->sdt_state = ABORT_DUMP;
9546 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9547 ioa_cfg->in_ioa_bringdown = 1;
9548 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9549 spin_lock(&ioa_cfg->hrrq[i]._lock);
9550 ioa_cfg->hrrq[i].allow_cmds = 0;
9551 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9552 }
9553 wmb();
9554 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9555 } else
9556 wake_up_all(&ioa_cfg->eeh_wait_q);
9557 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9558}
9559
9560
9561
9562
9563
9564
9565
9566
9567
9568
9569
9570static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9571 pci_channel_state_t state)
9572{
9573 switch (state) {
9574 case pci_channel_io_frozen:
9575 ipr_pci_frozen(pdev);
9576 return PCI_ERS_RESULT_CAN_RECOVER;
9577 case pci_channel_io_perm_failure:
9578 ipr_pci_perm_failure(pdev);
9579 return PCI_ERS_RESULT_DISCONNECT;
9580 break;
9581 default:
9582 break;
9583 }
9584 return PCI_ERS_RESULT_NEED_RESET;
9585}
9586
9587
9588
9589
9590
9591
9592
9593
9594
9595
9596
9597
9598static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
9599{
9600 int rc = 0;
9601 unsigned long host_lock_flags = 0;
9602
9603 ENTER;
9604 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9605 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
9606 ioa_cfg->probe_done = 1;
9607 if (ioa_cfg->needs_hard_reset) {
9608 ioa_cfg->needs_hard_reset = 0;
9609 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9610 } else
9611 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9612 IPR_SHUTDOWN_NONE);
9613 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9614
9615 LEAVE;
9616 return rc;
9617}
9618
9619
9620
9621
9622
9623
9624
9625
9626static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9627{
9628 int i;
9629
9630
9631 if (ioa_cfg->ipr_cmnd_list) {
9632 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9633 if (ioa_cfg->ipr_cmnd_list[i])
9634 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9635 ioa_cfg->ipr_cmnd_list[i],
9636 ioa_cfg->ipr_cmnd_list_dma[i]);
9637
9638 ioa_cfg->ipr_cmnd_list[i] = NULL;
9639 }
9640 }
9641
9642 if (ioa_cfg->ipr_cmd_pool)
9643 pci_pool_destroy(ioa_cfg->ipr_cmd_pool);
9644
9645 kfree(ioa_cfg->ipr_cmnd_list);
9646 kfree(ioa_cfg->ipr_cmnd_list_dma);
9647 ioa_cfg->ipr_cmnd_list = NULL;
9648 ioa_cfg->ipr_cmnd_list_dma = NULL;
9649 ioa_cfg->ipr_cmd_pool = NULL;
9650}
9651
9652
9653
9654
9655
9656
9657
9658
9659static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9660{
9661 int i;
9662
9663 kfree(ioa_cfg->res_entries);
9664 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
9665 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9666 ipr_free_cmd_blks(ioa_cfg);
9667
9668 for (i = 0; i < ioa_cfg->hrrq_num; i++)
9669 pci_free_consistent(ioa_cfg->pdev,
9670 sizeof(u32) * ioa_cfg->hrrq[i].size,
9671 ioa_cfg->hrrq[i].host_rrq,
9672 ioa_cfg->hrrq[i].host_rrq_dma);
9673
9674 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
9675 ioa_cfg->u.cfg_table,
9676 ioa_cfg->cfg_table_dma);
9677
9678 for (i = 0; i < IPR_MAX_HCAMS; i++) {
9679 pci_free_consistent(ioa_cfg->pdev,
9680 sizeof(struct ipr_hostrcb),
9681 ioa_cfg->hostrcb[i],
9682 ioa_cfg->hostrcb_dma[i]);
9683 }
9684
9685 ipr_free_dump(ioa_cfg);
9686 kfree(ioa_cfg->trace);
9687}
9688
9689
9690
9691
9692
9693
9694
9695
9696
9697
9698
9699static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9700{
9701 struct pci_dev *pdev = ioa_cfg->pdev;
9702
9703 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9704 ioa_cfg->intr_flag == IPR_USE_MSIX) {
9705 int i;
9706 for (i = 0; i < ioa_cfg->nvectors; i++)
9707 free_irq(ioa_cfg->vectors_info[i].vec,
9708 &ioa_cfg->hrrq[i]);
9709 } else
9710 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
9711
9712 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9713 pci_disable_msi(pdev);
9714 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9715 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9716 pci_disable_msix(pdev);
9717 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9718 }
9719}
9720
9721
9722
9723
9724
9725
9726
9727
9728
9729
9730
9731static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9732{
9733 struct pci_dev *pdev = ioa_cfg->pdev;
9734
9735 ENTER;
9736 ipr_free_irqs(ioa_cfg);
9737 if (ioa_cfg->reset_work_q)
9738 destroy_workqueue(ioa_cfg->reset_work_q);
9739 iounmap(ioa_cfg->hdw_dma_regs);
9740 pci_release_regions(pdev);
9741 ipr_free_mem(ioa_cfg);
9742 scsi_host_put(ioa_cfg->host);
9743 pci_disable_device(pdev);
9744 LEAVE;
9745}
9746
9747
9748
9749
9750
9751
9752
9753
9754static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9755{
9756 struct ipr_cmnd *ipr_cmd;
9757 struct ipr_ioarcb *ioarcb;
9758 dma_addr_t dma_addr;
9759 int i, entries_each_hrrq, hrrq_id = 0;
9760
9761 ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
9762 sizeof(struct ipr_cmnd), 512, 0);
9763
9764 if (!ioa_cfg->ipr_cmd_pool)
9765 return -ENOMEM;
9766
9767 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9768 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9769
9770 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9771 ipr_free_cmd_blks(ioa_cfg);
9772 return -ENOMEM;
9773 }
9774
9775 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9776 if (ioa_cfg->hrrq_num > 1) {
9777 if (i == 0) {
9778 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9779 ioa_cfg->hrrq[i].min_cmd_id = 0;
9780 ioa_cfg->hrrq[i].max_cmd_id =
9781 (entries_each_hrrq - 1);
9782 } else {
9783 entries_each_hrrq =
9784 IPR_NUM_BASE_CMD_BLKS/
9785 (ioa_cfg->hrrq_num - 1);
9786 ioa_cfg->hrrq[i].min_cmd_id =
9787 IPR_NUM_INTERNAL_CMD_BLKS +
9788 (i - 1) * entries_each_hrrq;
9789 ioa_cfg->hrrq[i].max_cmd_id =
9790 (IPR_NUM_INTERNAL_CMD_BLKS +
9791 i * entries_each_hrrq - 1);
9792 }
9793 } else {
9794 entries_each_hrrq = IPR_NUM_CMD_BLKS;
9795 ioa_cfg->hrrq[i].min_cmd_id = 0;
9796 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9797 }
9798 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9799 }
9800
9801 BUG_ON(ioa_cfg->hrrq_num == 0);
9802
9803 i = IPR_NUM_CMD_BLKS -
9804 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9805 if (i > 0) {
9806 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9807 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9808 }
9809
9810 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9811 ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
9812
9813 if (!ipr_cmd) {
9814 ipr_free_cmd_blks(ioa_cfg);
9815 return -ENOMEM;
9816 }
9817
9818 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9819 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9820 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9821
9822 ioarcb = &ipr_cmd->ioarcb;
9823 ipr_cmd->dma_addr = dma_addr;
9824 if (ioa_cfg->sis64)
9825 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9826 else
9827 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9828
9829 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9830 if (ioa_cfg->sis64) {
9831 ioarcb->u.sis64_addr_data.data_ioadl_addr =
9832 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9833 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9834 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9835 } else {
9836 ioarcb->write_ioadl_addr =
9837 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9838 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9839 ioarcb->ioasa_host_pci_addr =
9840 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9841 }
9842 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9843 ipr_cmd->cmd_index = i;
9844 ipr_cmd->ioa_cfg = ioa_cfg;
9845 ipr_cmd->sense_buffer_dma = dma_addr +
9846 offsetof(struct ipr_cmnd, sense_buffer);
9847
9848 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9849 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9850 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9851 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9852 hrrq_id++;
9853 }
9854
9855 return 0;
9856}
9857
9858
9859
9860
9861
9862
9863
9864
9865static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9866{
9867 struct pci_dev *pdev = ioa_cfg->pdev;
9868 int i, rc = -ENOMEM;
9869
9870 ENTER;
9871 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9872 ioa_cfg->max_devs_supported, GFP_KERNEL);
9873
9874 if (!ioa_cfg->res_entries)
9875 goto out;
9876
9877 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9878 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9879 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9880 }
9881
9882 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
9883 sizeof(struct ipr_misc_cbs),
9884 &ioa_cfg->vpd_cbs_dma);
9885
9886 if (!ioa_cfg->vpd_cbs)
9887 goto out_free_res_entries;
9888
9889 if (ipr_alloc_cmd_blks(ioa_cfg))
9890 goto out_free_vpd_cbs;
9891
9892 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9893 ioa_cfg->hrrq[i].host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
9894 sizeof(u32) * ioa_cfg->hrrq[i].size,
9895 &ioa_cfg->hrrq[i].host_rrq_dma);
9896
9897 if (!ioa_cfg->hrrq[i].host_rrq) {
9898 while (--i > 0)
9899 pci_free_consistent(pdev,
9900 sizeof(u32) * ioa_cfg->hrrq[i].size,
9901 ioa_cfg->hrrq[i].host_rrq,
9902 ioa_cfg->hrrq[i].host_rrq_dma);
9903 goto out_ipr_free_cmd_blocks;
9904 }
9905 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9906 }
9907
9908 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
9909 ioa_cfg->cfg_table_size,
9910 &ioa_cfg->cfg_table_dma);
9911
9912 if (!ioa_cfg->u.cfg_table)
9913 goto out_free_host_rrq;
9914
9915 for (i = 0; i < IPR_MAX_HCAMS; i++) {
9916 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
9917 sizeof(struct ipr_hostrcb),
9918 &ioa_cfg->hostrcb_dma[i]);
9919
9920 if (!ioa_cfg->hostrcb[i])
9921 goto out_free_hostrcb_dma;
9922
9923 ioa_cfg->hostrcb[i]->hostrcb_dma =
9924 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9925 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9926 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9927 }
9928
9929 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9930 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9931
9932 if (!ioa_cfg->trace)
9933 goto out_free_hostrcb_dma;
9934
9935 rc = 0;
9936out:
9937 LEAVE;
9938 return rc;
9939
9940out_free_hostrcb_dma:
9941 while (i-- > 0) {
9942 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
9943 ioa_cfg->hostrcb[i],
9944 ioa_cfg->hostrcb_dma[i]);
9945 }
9946 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
9947 ioa_cfg->u.cfg_table,
9948 ioa_cfg->cfg_table_dma);
9949out_free_host_rrq:
9950 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9951 pci_free_consistent(pdev,
9952 sizeof(u32) * ioa_cfg->hrrq[i].size,
9953 ioa_cfg->hrrq[i].host_rrq,
9954 ioa_cfg->hrrq[i].host_rrq_dma);
9955 }
9956out_ipr_free_cmd_blocks:
9957 ipr_free_cmd_blks(ioa_cfg);
9958out_free_vpd_cbs:
9959 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
9960 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9961out_free_res_entries:
9962 kfree(ioa_cfg->res_entries);
9963 goto out;
9964}
9965
9966
9967
9968
9969
9970
9971
9972
9973static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9974{
9975 int i;
9976
9977 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9978 ioa_cfg->bus_attr[i].bus = i;
9979 ioa_cfg->bus_attr[i].qas_enabled = 0;
9980 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9981 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9982 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9983 else
9984 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9985 }
9986}
9987
9988
9989
9990
9991
9992
9993
9994
9995static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9996{
9997 const struct ipr_interrupt_offsets *p;
9998 struct ipr_interrupts *t;
9999 void __iomem *base;
10000
10001 p = &ioa_cfg->chip_cfg->regs;
10002 t = &ioa_cfg->regs;
10003 base = ioa_cfg->hdw_dma_regs;
10004
10005 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
10006 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
10007 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
10008 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
10009 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
10010 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
10011 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
10012 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
10013 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
10014 t->ioarrin_reg = base + p->ioarrin_reg;
10015 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
10016 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
10017 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
10018 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
10019 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
10020 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
10021
10022 if (ioa_cfg->sis64) {
10023 t->init_feedback_reg = base + p->init_feedback_reg;
10024 t->dump_addr_reg = base + p->dump_addr_reg;
10025 t->dump_data_reg = base + p->dump_data_reg;
10026 t->endian_swap_reg = base + p->endian_swap_reg;
10027 }
10028}
10029
10030
10031
10032
10033
10034
10035
10036
10037
10038
10039static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
10040 struct Scsi_Host *host, struct pci_dev *pdev)
10041{
10042 int i;
10043
10044 ioa_cfg->host = host;
10045 ioa_cfg->pdev = pdev;
10046 ioa_cfg->log_level = ipr_log_level;
10047 ioa_cfg->doorbell = IPR_DOORBELL;
10048 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
10049 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
10050 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
10051 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
10052 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
10053 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
10054
10055 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
10056 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
10057 INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
10058 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
10059 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
10060 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
10061 INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
10062 init_waitqueue_head(&ioa_cfg->reset_wait_q);
10063 init_waitqueue_head(&ioa_cfg->msi_wait_q);
10064 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
10065 ioa_cfg->sdt_state = INACTIVE;
10066
10067 ipr_initialize_bus_attr(ioa_cfg);
10068 ioa_cfg->max_devs_supported = ipr_max_devs;
10069
10070 if (ioa_cfg->sis64) {
10071 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
10072 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
10073 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
10074 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
10075 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
10076 + ((sizeof(struct ipr_config_table_entry64)
10077 * ioa_cfg->max_devs_supported)));
10078 } else {
10079 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
10080 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
10081 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
10082 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
10083 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
10084 + ((sizeof(struct ipr_config_table_entry)
10085 * ioa_cfg->max_devs_supported)));
10086 }
10087
10088 host->max_channel = IPR_VSET_BUS;
10089 host->unique_id = host->host_no;
10090 host->max_cmd_len = IPR_MAX_CDB_LEN;
10091 host->can_queue = ioa_cfg->max_cmds;
10092 pci_set_drvdata(pdev, ioa_cfg);
10093
10094 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
10095 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
10096 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
10097 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
10098 if (i == 0)
10099 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
10100 else
10101 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
10102 }
10103}
10104
10105
10106
10107
10108
10109
10110
10111
10112static const struct ipr_chip_t *
10113ipr_get_chip_info(const struct pci_device_id *dev_id)
10114{
10115 int i;
10116
10117 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
10118 if (ipr_chip[i].vendor == dev_id->vendor &&
10119 ipr_chip[i].device == dev_id->device)
10120 return &ipr_chip[i];
10121 return NULL;
10122}
10123
10124
10125
10126
10127
10128
10129
10130
10131
10132static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
10133{
10134 struct pci_dev *pdev = ioa_cfg->pdev;
10135
10136 if (pci_channel_offline(pdev)) {
10137 wait_event_timeout(ioa_cfg->eeh_wait_q,
10138 !pci_channel_offline(pdev),
10139 IPR_PCI_ERROR_RECOVERY_TIMEOUT);
10140 pci_restore_state(pdev);
10141 }
10142}
10143
10144static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
10145{
10146 struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
10147 int i, vectors;
10148
10149 for (i = 0; i < ARRAY_SIZE(entries); ++i)
10150 entries[i].entry = i;
10151
10152 vectors = pci_enable_msix_range(ioa_cfg->pdev,
10153 entries, 1, ipr_number_of_msix);
10154 if (vectors < 0) {
10155 ipr_wait_for_pci_err_recovery(ioa_cfg);
10156 return vectors;
10157 }
10158
10159 for (i = 0; i < vectors; i++)
10160 ioa_cfg->vectors_info[i].vec = entries[i].vector;
10161 ioa_cfg->nvectors = vectors;
10162
10163 return 0;
10164}
10165
10166static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
10167{
10168 int i, vectors;
10169
10170 vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
10171 if (vectors < 0) {
10172 ipr_wait_for_pci_err_recovery(ioa_cfg);
10173 return vectors;
10174 }
10175
10176 for (i = 0; i < vectors; i++)
10177 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
10178 ioa_cfg->nvectors = vectors;
10179
10180 return 0;
10181}
10182
10183static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
10184{
10185 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
10186
10187 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
10188 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
10189 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
10190 ioa_cfg->vectors_info[vec_idx].
10191 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
10192 }
10193}
10194
10195static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
10196{
10197 int i, rc;
10198
10199 for (i = 1; i < ioa_cfg->nvectors; i++) {
10200 rc = request_irq(ioa_cfg->vectors_info[i].vec,
10201 ipr_isr_mhrrq,
10202 0,
10203 ioa_cfg->vectors_info[i].desc,
10204 &ioa_cfg->hrrq[i]);
10205 if (rc) {
10206 while (--i >= 0)
10207 free_irq(ioa_cfg->vectors_info[i].vec,
10208 &ioa_cfg->hrrq[i]);
10209 return rc;
10210 }
10211 }
10212 return 0;
10213}
10214
10215
10216
10217
10218
10219
10220
10221
10222
10223
10224
10225static irqreturn_t ipr_test_intr(int irq, void *devp)
10226{
10227 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
10228 unsigned long lock_flags = 0;
10229 irqreturn_t rc = IRQ_HANDLED;
10230
10231 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
10232 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10233
10234 ioa_cfg->msi_received = 1;
10235 wake_up(&ioa_cfg->msi_wait_q);
10236
10237 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10238 return rc;
10239}
10240
10241
10242
10243
10244
10245
10246
10247
10248
10249
10250
10251
10252
10253static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
10254{
10255 int rc;
10256 volatile u32 int_reg;
10257 unsigned long lock_flags = 0;
10258
10259 ENTER;
10260
10261 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10262 init_waitqueue_head(&ioa_cfg->msi_wait_q);
10263 ioa_cfg->msi_received = 0;
10264 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10265 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
10266 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10267 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10268
10269 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10270 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
10271 else
10272 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
10273 if (rc) {
10274 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
10275 return rc;
10276 } else if (ipr_debug)
10277 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
10278
10279 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
10280 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
10281 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
10282 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10283 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10284
10285 if (!ioa_cfg->msi_received) {
10286
10287 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
10288 rc = -EOPNOTSUPP;
10289 } else if (ipr_debug)
10290 dev_info(&pdev->dev, "MSI test succeeded.\n");
10291
10292 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10293
10294 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10295 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
10296 else
10297 free_irq(pdev->irq, ioa_cfg);
10298
10299 LEAVE;
10300
10301 return rc;
10302}
10303
10304
10305
10306
10307
10308
10309
10310
10311static int ipr_probe_ioa(struct pci_dev *pdev,
10312 const struct pci_device_id *dev_id)
10313{
10314 struct ipr_ioa_cfg *ioa_cfg;
10315 struct Scsi_Host *host;
10316 unsigned long ipr_regs_pci;
10317 void __iomem *ipr_regs;
10318 int rc = PCIBIOS_SUCCESSFUL;
10319 volatile u32 mask, uproc, interrupts;
10320 unsigned long lock_flags, driver_lock_flags;
10321
10322 ENTER;
10323
10324 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
10325 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10326
10327 if (!host) {
10328 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
10329 rc = -ENOMEM;
10330 goto out;
10331 }
10332
10333 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10334 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
10335 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
10336
10337 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
10338
10339 if (!ioa_cfg->ipr_chip) {
10340 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10341 dev_id->vendor, dev_id->device);
10342 goto out_scsi_host_put;
10343 }
10344
10345
10346 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
10347 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
10348 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
10349 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
10350
10351 if (ipr_transop_timeout)
10352 ioa_cfg->transop_timeout = ipr_transop_timeout;
10353 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10354 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10355 else
10356 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10357
10358 ioa_cfg->revid = pdev->revision;
10359
10360 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10361
10362 ipr_regs_pci = pci_resource_start(pdev, 0);
10363
10364 rc = pci_request_regions(pdev, IPR_NAME);
10365 if (rc < 0) {
10366 dev_err(&pdev->dev,
10367 "Couldn't register memory range of registers\n");
10368 goto out_scsi_host_put;
10369 }
10370
10371 rc = pci_enable_device(pdev);
10372
10373 if (rc || pci_channel_offline(pdev)) {
10374 if (pci_channel_offline(pdev)) {
10375 ipr_wait_for_pci_err_recovery(ioa_cfg);
10376 rc = pci_enable_device(pdev);
10377 }
10378
10379 if (rc) {
10380 dev_err(&pdev->dev, "Cannot enable adapter\n");
10381 ipr_wait_for_pci_err_recovery(ioa_cfg);
10382 goto out_release_regions;
10383 }
10384 }
10385
10386 ipr_regs = pci_ioremap_bar(pdev, 0);
10387
10388 if (!ipr_regs) {
10389 dev_err(&pdev->dev,
10390 "Couldn't map memory range of registers\n");
10391 rc = -ENOMEM;
10392 goto out_disable;
10393 }
10394
10395 ioa_cfg->hdw_dma_regs = ipr_regs;
10396 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10397 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10398
10399 ipr_init_regs(ioa_cfg);
10400
10401 if (ioa_cfg->sis64) {
10402 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
10403 if (rc < 0) {
10404 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
10405 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
10406 }
10407 } else
10408 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
10409
10410 if (rc < 0) {
10411 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
10412 goto cleanup_nomem;
10413 }
10414
10415 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10416 ioa_cfg->chip_cfg->cache_line_size);
10417
10418 if (rc != PCIBIOS_SUCCESSFUL) {
10419 dev_err(&pdev->dev, "Write of cache line size failed\n");
10420 ipr_wait_for_pci_err_recovery(ioa_cfg);
10421 rc = -EIO;
10422 goto cleanup_nomem;
10423 }
10424
10425
10426 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10427 ipr_wait_for_pci_err_recovery(ioa_cfg);
10428
10429 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10430 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10431 IPR_MAX_MSIX_VECTORS);
10432 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10433 }
10434
10435 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
10436 ipr_enable_msix(ioa_cfg) == 0)
10437 ioa_cfg->intr_flag = IPR_USE_MSIX;
10438 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
10439 ipr_enable_msi(ioa_cfg) == 0)
10440 ioa_cfg->intr_flag = IPR_USE_MSI;
10441 else {
10442 ioa_cfg->intr_flag = IPR_USE_LSI;
10443 ioa_cfg->clear_isr = 1;
10444 ioa_cfg->nvectors = 1;
10445 dev_info(&pdev->dev, "Cannot enable MSI.\n");
10446 }
10447
10448 pci_set_master(pdev);
10449
10450 if (pci_channel_offline(pdev)) {
10451 ipr_wait_for_pci_err_recovery(ioa_cfg);
10452 pci_set_master(pdev);
10453 if (pci_channel_offline(pdev)) {
10454 rc = -EIO;
10455 goto out_msi_disable;
10456 }
10457 }
10458
10459 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
10460 ioa_cfg->intr_flag == IPR_USE_MSIX) {
10461 rc = ipr_test_msi(ioa_cfg, pdev);
10462 if (rc == -EOPNOTSUPP) {
10463 ipr_wait_for_pci_err_recovery(ioa_cfg);
10464 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
10465 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
10466 pci_disable_msi(pdev);
10467 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
10468 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
10469 pci_disable_msix(pdev);
10470 }
10471
10472 ioa_cfg->intr_flag = IPR_USE_LSI;
10473 ioa_cfg->nvectors = 1;
10474 }
10475 else if (rc)
10476 goto out_msi_disable;
10477 else {
10478 if (ioa_cfg->intr_flag == IPR_USE_MSI)
10479 dev_info(&pdev->dev,
10480 "Request for %d MSIs succeeded with starting IRQ: %d\n",
10481 ioa_cfg->nvectors, pdev->irq);
10482 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10483 dev_info(&pdev->dev,
10484 "Request for %d MSIXs succeeded.",
10485 ioa_cfg->nvectors);
10486 }
10487 }
10488
10489 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10490 (unsigned int)num_online_cpus(),
10491 (unsigned int)IPR_MAX_HRRQ_NUM);
10492
10493 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
10494 goto out_msi_disable;
10495
10496 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
10497 goto out_msi_disable;
10498
10499 rc = ipr_alloc_mem(ioa_cfg);
10500 if (rc < 0) {
10501 dev_err(&pdev->dev,
10502 "Couldn't allocate enough memory for device driver!\n");
10503 goto out_msi_disable;
10504 }
10505
10506
10507 rc = pci_save_state(pdev);
10508
10509 if (rc != PCIBIOS_SUCCESSFUL) {
10510 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10511 rc = -EIO;
10512 goto cleanup_nolog;
10513 }
10514
10515
10516
10517
10518
10519 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10520 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10521 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
10522 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10523 ioa_cfg->needs_hard_reset = 1;
10524 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
10525 ioa_cfg->needs_hard_reset = 1;
10526 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10527 ioa_cfg->ioa_unit_checked = 1;
10528
10529 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10530 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10531 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10532
10533 if (ioa_cfg->intr_flag == IPR_USE_MSI
10534 || ioa_cfg->intr_flag == IPR_USE_MSIX) {
10535 name_msi_vectors(ioa_cfg);
10536 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
10537 0,
10538 ioa_cfg->vectors_info[0].desc,
10539 &ioa_cfg->hrrq[0]);
10540 if (!rc)
10541 rc = ipr_request_other_msi_irqs(ioa_cfg);
10542 } else {
10543 rc = request_irq(pdev->irq, ipr_isr,
10544 IRQF_SHARED,
10545 IPR_NAME, &ioa_cfg->hrrq[0]);
10546 }
10547 if (rc) {
10548 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10549 pdev->irq, rc);
10550 goto cleanup_nolog;
10551 }
10552
10553 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10554 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10555 ioa_cfg->needs_warm_reset = 1;
10556 ioa_cfg->reset = ipr_reset_slot_reset;
10557
10558 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10559 WQ_MEM_RECLAIM, host->host_no);
10560
10561 if (!ioa_cfg->reset_work_q) {
10562 dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
10563 rc = -ENOMEM;
10564 goto out_free_irq;
10565 }
10566 } else
10567 ioa_cfg->reset = ipr_reset_start_bist;
10568
10569 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10570 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
10571 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10572
10573 LEAVE;
10574out:
10575 return rc;
10576
10577out_free_irq:
10578 ipr_free_irqs(ioa_cfg);
10579cleanup_nolog:
10580 ipr_free_mem(ioa_cfg);
10581out_msi_disable:
10582 ipr_wait_for_pci_err_recovery(ioa_cfg);
10583 if (ioa_cfg->intr_flag == IPR_USE_MSI)
10584 pci_disable_msi(pdev);
10585 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10586 pci_disable_msix(pdev);
10587cleanup_nomem:
10588 iounmap(ipr_regs);
10589out_disable:
10590 pci_disable_device(pdev);
10591out_release_regions:
10592 pci_release_regions(pdev);
10593out_scsi_host_put:
10594 scsi_host_put(host);
10595 goto out;
10596}
10597
10598
10599
10600
10601
10602
10603
10604
10605
10606
10607
10608
10609
10610
10611
10612static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10613 enum ipr_shutdown_type shutdown_type)
10614{
10615 ENTER;
10616 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10617 ioa_cfg->sdt_state = ABORT_DUMP;
10618 ioa_cfg->reset_retries = 0;
10619 ioa_cfg->in_ioa_bringdown = 1;
10620 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10621 LEAVE;
10622}
10623
10624
10625
10626
10627
10628
10629
10630
10631
10632
10633static void __ipr_remove(struct pci_dev *pdev)
10634{
10635 unsigned long host_lock_flags = 0;
10636 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10637 int i;
10638 unsigned long driver_lock_flags;
10639 ENTER;
10640
10641 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10642 while (ioa_cfg->in_reset_reload) {
10643 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10644 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10645 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10646 }
10647
10648 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10649 spin_lock(&ioa_cfg->hrrq[i]._lock);
10650 ioa_cfg->hrrq[i].removing_ioa = 1;
10651 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10652 }
10653 wmb();
10654 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10655
10656 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10657 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10658 flush_work(&ioa_cfg->work_q);
10659 if (ioa_cfg->reset_work_q)
10660 flush_workqueue(ioa_cfg->reset_work_q);
10661 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
10662 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10663
10664 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10665 list_del(&ioa_cfg->queue);
10666 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10667
10668 if (ioa_cfg->sdt_state == ABORT_DUMP)
10669 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10670 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10671
10672 ipr_free_all_resources(ioa_cfg);
10673
10674 LEAVE;
10675}
10676
10677
10678
10679
10680
10681
10682
10683
10684
10685
10686static void ipr_remove(struct pci_dev *pdev)
10687{
10688 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10689
10690 ENTER;
10691
10692 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10693 &ipr_trace_attr);
10694 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10695 &ipr_dump_attr);
10696 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10697 &ipr_ioa_async_err_log);
10698 scsi_remove_host(ioa_cfg->host);
10699
10700 __ipr_remove(pdev);
10701
10702 LEAVE;
10703}
10704
10705
10706
10707
10708
10709
10710
10711static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10712{
10713 struct ipr_ioa_cfg *ioa_cfg;
10714 unsigned long flags;
10715 int rc, i;
10716
10717 rc = ipr_probe_ioa(pdev, dev_id);
10718
10719 if (rc)
10720 return rc;
10721
10722 ioa_cfg = pci_get_drvdata(pdev);
10723 rc = ipr_probe_ioa_part2(ioa_cfg);
10724
10725 if (rc) {
10726 __ipr_remove(pdev);
10727 return rc;
10728 }
10729
10730 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10731
10732 if (rc) {
10733 __ipr_remove(pdev);
10734 return rc;
10735 }
10736
10737 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
10738 &ipr_trace_attr);
10739
10740 if (rc) {
10741 scsi_remove_host(ioa_cfg->host);
10742 __ipr_remove(pdev);
10743 return rc;
10744 }
10745
10746 rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10747 &ipr_ioa_async_err_log);
10748
10749 if (rc) {
10750 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10751 &ipr_dump_attr);
10752 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10753 &ipr_trace_attr);
10754 scsi_remove_host(ioa_cfg->host);
10755 __ipr_remove(pdev);
10756 return rc;
10757 }
10758
10759 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10760 &ipr_dump_attr);
10761
10762 if (rc) {
10763 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10764 &ipr_ioa_async_err_log);
10765 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10766 &ipr_trace_attr);
10767 scsi_remove_host(ioa_cfg->host);
10768 __ipr_remove(pdev);
10769 return rc;
10770 }
10771 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10772 ioa_cfg->scan_enabled = 1;
10773 schedule_work(&ioa_cfg->work_q);
10774 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10775
10776 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10777
10778 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10779 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10780 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
10781 ioa_cfg->iopoll_weight, ipr_iopoll);
10782 }
10783 }
10784
10785 scsi_scan_host(ioa_cfg->host);
10786
10787 return 0;
10788}
10789
10790
10791
10792
10793
10794
10795
10796
10797
10798
10799
10800static void ipr_shutdown(struct pci_dev *pdev)
10801{
10802 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10803 unsigned long lock_flags = 0;
10804 enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
10805 int i;
10806
10807 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10808 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10809 ioa_cfg->iopoll_weight = 0;
10810 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10811 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
10812 }
10813
10814 while (ioa_cfg->in_reset_reload) {
10815 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10816 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10817 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10818 }
10819
10820 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10821 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10822
10823 ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
10824 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10825 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10826 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
10827 ipr_free_irqs(ioa_cfg);
10828 pci_disable_device(ioa_cfg->pdev);
10829 }
10830}
10831
10832static struct pci_device_id ipr_pci_table[] = {
10833 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10834 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10835 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10836 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10837 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10838 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10839 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10840 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10841 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10842 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10843 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10844 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10845 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10846 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10847 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10848 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10849 IPR_USE_LONG_TRANSOP_TIMEOUT },
10850 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10851 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10852 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10853 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10854 IPR_USE_LONG_TRANSOP_TIMEOUT },
10855 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10856 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10857 IPR_USE_LONG_TRANSOP_TIMEOUT },
10858 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10859 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10860 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10861 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10862 IPR_USE_LONG_TRANSOP_TIMEOUT},
10863 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10864 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10865 IPR_USE_LONG_TRANSOP_TIMEOUT },
10866 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10867 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10868 IPR_USE_LONG_TRANSOP_TIMEOUT },
10869 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10870 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10871 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10872 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10873 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10874 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10875 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10876 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10877 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10878 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10879 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10880 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10881 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10882 IPR_USE_LONG_TRANSOP_TIMEOUT },
10883 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10884 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10885 IPR_USE_LONG_TRANSOP_TIMEOUT },
10886 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10887 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10888 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10889 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10890 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10891 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10892 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10893 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10894 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10895 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10896 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10897 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10898 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10899 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10900 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10901 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10902 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10903 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10904 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10905 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10906 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10907 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10908 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10909 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10910 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10911 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10912 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10913 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10914 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10915 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10916 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10917 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10918 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10919 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10920 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10921 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10922 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10923 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10924 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10925 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10926 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10927 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10928 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10929 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10930 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10931 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10932 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10933 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10934 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10935 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10936 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10937 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10938 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10939 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10940 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10941 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
10942 { }
10943};
10944MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10945
10946static const struct pci_error_handlers ipr_err_handler = {
10947 .error_detected = ipr_pci_error_detected,
10948 .mmio_enabled = ipr_pci_mmio_enabled,
10949 .slot_reset = ipr_pci_slot_reset,
10950};
10951
10952static struct pci_driver ipr_driver = {
10953 .name = IPR_NAME,
10954 .id_table = ipr_pci_table,
10955 .probe = ipr_probe,
10956 .remove = ipr_remove,
10957 .shutdown = ipr_shutdown,
10958 .err_handler = &ipr_err_handler,
10959};
10960
10961
10962
10963
10964
10965
10966
10967static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10968{
10969 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10970}
10971
10972
10973
10974
10975
10976
10977
10978static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10979{
10980 struct ipr_cmnd *ipr_cmd;
10981 struct ipr_ioa_cfg *ioa_cfg;
10982 unsigned long flags = 0, driver_lock_flags;
10983
10984 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10985 return NOTIFY_DONE;
10986
10987 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10988
10989 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10990 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10991 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10992 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10993 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10994 continue;
10995 }
10996
10997 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10998 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10999 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
11000 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
11001 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
11002
11003 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
11004 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
11005 }
11006 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
11007
11008 return NOTIFY_OK;
11009}
11010
11011static struct notifier_block ipr_notifier = {
11012 ipr_halt, NULL, 0
11013};
11014
11015
11016
11017
11018
11019
11020
11021static int __init ipr_init(void)
11022{
11023 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
11024 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
11025
11026 register_reboot_notifier(&ipr_notifier);
11027 return pci_register_driver(&ipr_driver);
11028}
11029
11030
11031
11032
11033
11034
11035
11036
11037
11038static void __exit ipr_exit(void)
11039{
11040 unregister_reboot_notifier(&ipr_notifier);
11041 pci_unregister_driver(&ipr_driver);
11042}
11043
11044module_init(ipr_init);
11045module_exit(ipr_exit);
11046