1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/module.h>
22#include <linux/interrupt.h>
23#include <linux/types.h>
24#include <linux/pci.h>
25#include <linux/kernel.h>
26#include <linux/slab.h>
27#include <linux/delay.h>
28#include <linux/fs.h>
29#include <linux/timer.h>
30#include <linux/init.h>
31#include <linux/spinlock.h>
32#include <linux/compat.h>
33#include <linux/blktrace_api.h>
34#include <linux/uaccess.h>
35#include <linux/io.h>
36#include <linux/dma-mapping.h>
37#include <linux/completion.h>
38#include <linux/moduleparam.h>
39#include <scsi/scsi.h>
40#include <scsi/scsi_cmnd.h>
41#include <scsi/scsi_device.h>
42#include <scsi/scsi_host.h>
43#include <scsi/scsi_tcq.h>
44#include <scsi/scsi_eh.h>
45#include <scsi/scsi_transport_sas.h>
46#include <scsi/scsi_dbg.h>
47#include <linux/cciss_ioctl.h>
48#include <linux/string.h>
49#include <linux/bitmap.h>
50#include <linux/atomic.h>
51#include <linux/jiffies.h>
52#include <linux/percpu-defs.h>
53#include <linux/percpu.h>
54#include <asm/unaligned.h>
55#include <asm/div64.h>
56#include "hpsa_cmd.h"
57#include "hpsa.h"
58
59
60
61
62
63#define HPSA_DRIVER_VERSION "3.4.20-200"
64#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
65#define HPSA "hpsa"
66
67
68#define CLEAR_EVENT_WAIT_INTERVAL 20
69#define MODE_CHANGE_WAIT_INTERVAL 10
70#define MAX_CLEAR_EVENT_WAIT 30000
71#define MAX_MODE_CHANGE_WAIT 2000
72#define MAX_IOCTL_CONFIG_WAIT 1000
73
74
75#define MAX_CMD_RETRIES 3
76
77#define HPSA_EH_PTRAID_TIMEOUT (240 * HZ)
78
79
80MODULE_AUTHOR("Hewlett-Packard Company");
81MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
82 HPSA_DRIVER_VERSION);
83MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
84MODULE_VERSION(HPSA_DRIVER_VERSION);
85MODULE_LICENSE("GPL");
86MODULE_ALIAS("cciss");
87
88static int hpsa_simple_mode;
89module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
90MODULE_PARM_DESC(hpsa_simple_mode,
91 "Use 'simple mode' rather than 'performant mode'");
92
93
94static const struct pci_device_id hpsa_pci_device_id[] = {
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1920},
111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1925},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
128 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
129 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
130 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
131 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
132 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
133 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
134 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
135 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
136 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
137 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
138 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581},
139 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582},
140 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583},
141 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584},
142 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585},
143 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
144 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
145 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
146 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
147 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
148 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
149 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
150 {PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
151 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
152 {0,}
153};
154
155MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
156
157
158
159
160
161static struct board_type products[] = {
162 {0x40700E11, "Smart Array 5300", &SA5A_access},
163 {0x40800E11, "Smart Array 5i", &SA5B_access},
164 {0x40820E11, "Smart Array 532", &SA5B_access},
165 {0x40830E11, "Smart Array 5312", &SA5B_access},
166 {0x409A0E11, "Smart Array 641", &SA5A_access},
167 {0x409B0E11, "Smart Array 642", &SA5A_access},
168 {0x409C0E11, "Smart Array 6400", &SA5A_access},
169 {0x409D0E11, "Smart Array 6400 EM", &SA5A_access},
170 {0x40910E11, "Smart Array 6i", &SA5A_access},
171 {0x3225103C, "Smart Array P600", &SA5A_access},
172 {0x3223103C, "Smart Array P800", &SA5A_access},
173 {0x3234103C, "Smart Array P400", &SA5A_access},
174 {0x3235103C, "Smart Array P400i", &SA5A_access},
175 {0x3211103C, "Smart Array E200i", &SA5A_access},
176 {0x3212103C, "Smart Array E200", &SA5A_access},
177 {0x3213103C, "Smart Array E200i", &SA5A_access},
178 {0x3214103C, "Smart Array E200i", &SA5A_access},
179 {0x3215103C, "Smart Array E200i", &SA5A_access},
180 {0x3237103C, "Smart Array E500", &SA5A_access},
181 {0x323D103C, "Smart Array P700m", &SA5A_access},
182 {0x3241103C, "Smart Array P212", &SA5_access},
183 {0x3243103C, "Smart Array P410", &SA5_access},
184 {0x3245103C, "Smart Array P410i", &SA5_access},
185 {0x3247103C, "Smart Array P411", &SA5_access},
186 {0x3249103C, "Smart Array P812", &SA5_access},
187 {0x324A103C, "Smart Array P712m", &SA5_access},
188 {0x324B103C, "Smart Array P711m", &SA5_access},
189 {0x3233103C, "HP StorageWorks 1210m", &SA5_access},
190 {0x3350103C, "Smart Array P222", &SA5_access},
191 {0x3351103C, "Smart Array P420", &SA5_access},
192 {0x3352103C, "Smart Array P421", &SA5_access},
193 {0x3353103C, "Smart Array P822", &SA5_access},
194 {0x3354103C, "Smart Array P420i", &SA5_access},
195 {0x3355103C, "Smart Array P220i", &SA5_access},
196 {0x3356103C, "Smart Array P721m", &SA5_access},
197 {0x1920103C, "Smart Array P430i", &SA5_access},
198 {0x1921103C, "Smart Array P830i", &SA5_access},
199 {0x1922103C, "Smart Array P430", &SA5_access},
200 {0x1923103C, "Smart Array P431", &SA5_access},
201 {0x1924103C, "Smart Array P830", &SA5_access},
202 {0x1925103C, "Smart Array P831", &SA5_access},
203 {0x1926103C, "Smart Array P731m", &SA5_access},
204 {0x1928103C, "Smart Array P230i", &SA5_access},
205 {0x1929103C, "Smart Array P530", &SA5_access},
206 {0x21BD103C, "Smart Array P244br", &SA5_access},
207 {0x21BE103C, "Smart Array P741m", &SA5_access},
208 {0x21BF103C, "Smart HBA H240ar", &SA5_access},
209 {0x21C0103C, "Smart Array P440ar", &SA5_access},
210 {0x21C1103C, "Smart Array P840ar", &SA5_access},
211 {0x21C2103C, "Smart Array P440", &SA5_access},
212 {0x21C3103C, "Smart Array P441", &SA5_access},
213 {0x21C4103C, "Smart Array", &SA5_access},
214 {0x21C5103C, "Smart Array P841", &SA5_access},
215 {0x21C6103C, "Smart HBA H244br", &SA5_access},
216 {0x21C7103C, "Smart HBA H240", &SA5_access},
217 {0x21C8103C, "Smart HBA H241", &SA5_access},
218 {0x21C9103C, "Smart Array", &SA5_access},
219 {0x21CA103C, "Smart Array P246br", &SA5_access},
220 {0x21CB103C, "Smart Array P840", &SA5_access},
221 {0x21CC103C, "Smart Array", &SA5_access},
222 {0x21CD103C, "Smart Array", &SA5_access},
223 {0x21CE103C, "Smart HBA", &SA5_access},
224 {0x05809005, "SmartHBA-SA", &SA5_access},
225 {0x05819005, "SmartHBA-SA 8i", &SA5_access},
226 {0x05829005, "SmartHBA-SA 8i8e", &SA5_access},
227 {0x05839005, "SmartHBA-SA 8e", &SA5_access},
228 {0x05849005, "SmartHBA-SA 16i", &SA5_access},
229 {0x05859005, "SmartHBA-SA 4i4e", &SA5_access},
230 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
231 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
232 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
233 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
234 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
235 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
236};
237
238static struct scsi_transport_template *hpsa_sas_transport_template;
239static int hpsa_add_sas_host(struct ctlr_info *h);
240static void hpsa_delete_sas_host(struct ctlr_info *h);
241static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
242 struct hpsa_scsi_dev_t *device);
243static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device);
244static struct hpsa_scsi_dev_t
245 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
246 struct sas_rphy *rphy);
247
248#define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
249static const struct scsi_cmnd hpsa_cmd_busy;
250#define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
251static const struct scsi_cmnd hpsa_cmd_idle;
252static int number_of_controllers;
253
254static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
255static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
256static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd,
257 void __user *arg);
258static int hpsa_passthru_ioctl(struct ctlr_info *h,
259 IOCTL_Command_struct *iocommand);
260static int hpsa_big_passthru_ioctl(struct ctlr_info *h,
261 BIG_IOCTL_Command_struct *ioc);
262
263#ifdef CONFIG_COMPAT
264static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd,
265 void __user *arg);
266#endif
267
268static void cmd_free(struct ctlr_info *h, struct CommandList *c);
269static struct CommandList *cmd_alloc(struct ctlr_info *h);
270static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
271static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
272 struct scsi_cmnd *scmd);
273static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
274 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
275 int cmd_type);
276static void hpsa_free_cmd_pool(struct ctlr_info *h);
277#define VPD_PAGE (1 << 8)
278#define HPSA_SIMPLE_ERROR_BITS 0x03
279
280static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
281static void hpsa_scan_start(struct Scsi_Host *);
282static int hpsa_scan_finished(struct Scsi_Host *sh,
283 unsigned long elapsed_time);
284static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
285
286static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
287static int hpsa_slave_alloc(struct scsi_device *sdev);
288static int hpsa_slave_configure(struct scsi_device *sdev);
289static void hpsa_slave_destroy(struct scsi_device *sdev);
290
291static void hpsa_update_scsi_devices(struct ctlr_info *h);
292static int check_for_unit_attention(struct ctlr_info *h,
293 struct CommandList *c);
294static void check_ioctl_unit_attention(struct ctlr_info *h,
295 struct CommandList *c);
296
297static void calc_bucket_map(int *bucket, int num_buckets,
298 int nsgs, int min_blocks, u32 *bucket_map);
299static void hpsa_free_performant_mode(struct ctlr_info *h);
300static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
301static inline u32 next_command(struct ctlr_info *h, u8 q);
302static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
303 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
304 u64 *cfg_offset);
305static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
306 unsigned long *memory_bar);
307static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
308 bool *legacy_board);
309static int wait_for_device_to_become_ready(struct ctlr_info *h,
310 unsigned char lunaddr[],
311 int reply_queue);
312static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
313 int wait_for_ready);
314static inline void finish_cmd(struct CommandList *c);
315static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
316#define BOARD_NOT_READY 0
317#define BOARD_READY 1
318static void hpsa_drain_accel_commands(struct ctlr_info *h);
319static void hpsa_flush_cache(struct ctlr_info *h);
320static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
321 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
322 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
323static void hpsa_command_resubmit_worker(struct work_struct *work);
324static u32 lockup_detected(struct ctlr_info *h);
325static int detect_controller_lockup(struct ctlr_info *h);
326static void hpsa_disable_rld_caching(struct ctlr_info *h);
327static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
328 struct ReportExtendedLUNdata *buf, int bufsize);
329static bool hpsa_vpd_page_supported(struct ctlr_info *h,
330 unsigned char scsi3addr[], u8 page);
331static int hpsa_luns_changed(struct ctlr_info *h);
332static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
333 struct hpsa_scsi_dev_t *dev,
334 unsigned char *scsi3addr);
335
336static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
337{
338 unsigned long *priv = shost_priv(sdev->host);
339 return (struct ctlr_info *) *priv;
340}
341
342static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
343{
344 unsigned long *priv = shost_priv(sh);
345 return (struct ctlr_info *) *priv;
346}
347
348static inline bool hpsa_is_cmd_idle(struct CommandList *c)
349{
350 return c->scsi_cmd == SCSI_CMD_IDLE;
351}
352
353
354static void decode_sense_data(const u8 *sense_data, int sense_data_len,
355 u8 *sense_key, u8 *asc, u8 *ascq)
356{
357 struct scsi_sense_hdr sshdr;
358 bool rc;
359
360 *sense_key = -1;
361 *asc = -1;
362 *ascq = -1;
363
364 if (sense_data_len < 1)
365 return;
366
367 rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
368 if (rc) {
369 *sense_key = sshdr.sense_key;
370 *asc = sshdr.asc;
371 *ascq = sshdr.ascq;
372 }
373}
374
375static int check_for_unit_attention(struct ctlr_info *h,
376 struct CommandList *c)
377{
378 u8 sense_key, asc, ascq;
379 int sense_len;
380
381 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
382 sense_len = sizeof(c->err_info->SenseInfo);
383 else
384 sense_len = c->err_info->SenseLen;
385
386 decode_sense_data(c->err_info->SenseInfo, sense_len,
387 &sense_key, &asc, &ascq);
388 if (sense_key != UNIT_ATTENTION || asc == 0xff)
389 return 0;
390
391 switch (asc) {
392 case STATE_CHANGED:
393 dev_warn(&h->pdev->dev,
394 "%s: a state change detected, command retried\n",
395 h->devname);
396 break;
397 case LUN_FAILED:
398 dev_warn(&h->pdev->dev,
399 "%s: LUN failure detected\n", h->devname);
400 break;
401 case REPORT_LUNS_CHANGED:
402 dev_warn(&h->pdev->dev,
403 "%s: report LUN data changed\n", h->devname);
404
405
406
407
408 break;
409 case POWER_OR_RESET:
410 dev_warn(&h->pdev->dev,
411 "%s: a power on or device reset detected\n",
412 h->devname);
413 break;
414 case UNIT_ATTENTION_CLEARED:
415 dev_warn(&h->pdev->dev,
416 "%s: unit attention cleared by another initiator\n",
417 h->devname);
418 break;
419 default:
420 dev_warn(&h->pdev->dev,
421 "%s: unknown unit attention detected\n",
422 h->devname);
423 break;
424 }
425 return 1;
426}
427
428static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
429{
430 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
431 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
432 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
433 return 0;
434 dev_warn(&h->pdev->dev, HPSA "device busy");
435 return 1;
436}
437
438static u32 lockup_detected(struct ctlr_info *h);
439static ssize_t host_show_lockup_detected(struct device *dev,
440 struct device_attribute *attr, char *buf)
441{
442 int ld;
443 struct ctlr_info *h;
444 struct Scsi_Host *shost = class_to_shost(dev);
445
446 h = shost_to_hba(shost);
447 ld = lockup_detected(h);
448
449 return sprintf(buf, "ld=%d\n", ld);
450}
451
452static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
453 struct device_attribute *attr,
454 const char *buf, size_t count)
455{
456 int status, len;
457 struct ctlr_info *h;
458 struct Scsi_Host *shost = class_to_shost(dev);
459 char tmpbuf[10];
460
461 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
462 return -EACCES;
463 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
464 strncpy(tmpbuf, buf, len);
465 tmpbuf[len] = '\0';
466 if (sscanf(tmpbuf, "%d", &status) != 1)
467 return -EINVAL;
468 h = shost_to_hba(shost);
469 h->acciopath_status = !!status;
470 dev_warn(&h->pdev->dev,
471 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
472 h->acciopath_status ? "enabled" : "disabled");
473 return count;
474}
475
476static ssize_t host_store_raid_offload_debug(struct device *dev,
477 struct device_attribute *attr,
478 const char *buf, size_t count)
479{
480 int debug_level, len;
481 struct ctlr_info *h;
482 struct Scsi_Host *shost = class_to_shost(dev);
483 char tmpbuf[10];
484
485 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
486 return -EACCES;
487 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
488 strncpy(tmpbuf, buf, len);
489 tmpbuf[len] = '\0';
490 if (sscanf(tmpbuf, "%d", &debug_level) != 1)
491 return -EINVAL;
492 if (debug_level < 0)
493 debug_level = 0;
494 h = shost_to_hba(shost);
495 h->raid_offload_debug = debug_level;
496 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
497 h->raid_offload_debug);
498 return count;
499}
500
501static ssize_t host_store_rescan(struct device *dev,
502 struct device_attribute *attr,
503 const char *buf, size_t count)
504{
505 struct ctlr_info *h;
506 struct Scsi_Host *shost = class_to_shost(dev);
507 h = shost_to_hba(shost);
508 hpsa_scan_start(h->scsi_host);
509 return count;
510}
511
512static void hpsa_turn_off_ioaccel_for_device(struct hpsa_scsi_dev_t *device)
513{
514 device->offload_enabled = 0;
515 device->offload_to_be_enabled = 0;
516}
517
518static ssize_t host_show_firmware_revision(struct device *dev,
519 struct device_attribute *attr, char *buf)
520{
521 struct ctlr_info *h;
522 struct Scsi_Host *shost = class_to_shost(dev);
523 unsigned char *fwrev;
524
525 h = shost_to_hba(shost);
526 if (!h->hba_inquiry_data)
527 return 0;
528 fwrev = &h->hba_inquiry_data[32];
529 return snprintf(buf, 20, "%c%c%c%c\n",
530 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
531}
532
533static ssize_t host_show_commands_outstanding(struct device *dev,
534 struct device_attribute *attr, char *buf)
535{
536 struct Scsi_Host *shost = class_to_shost(dev);
537 struct ctlr_info *h = shost_to_hba(shost);
538
539 return snprintf(buf, 20, "%d\n",
540 atomic_read(&h->commands_outstanding));
541}
542
543static ssize_t host_show_transport_mode(struct device *dev,
544 struct device_attribute *attr, char *buf)
545{
546 struct ctlr_info *h;
547 struct Scsi_Host *shost = class_to_shost(dev);
548
549 h = shost_to_hba(shost);
550 return snprintf(buf, 20, "%s\n",
551 h->transMethod & CFGTBL_Trans_Performant ?
552 "performant" : "simple");
553}
554
555static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
556 struct device_attribute *attr, char *buf)
557{
558 struct ctlr_info *h;
559 struct Scsi_Host *shost = class_to_shost(dev);
560
561 h = shost_to_hba(shost);
562 return snprintf(buf, 30, "HP SSD Smart Path %s\n",
563 (h->acciopath_status == 1) ? "enabled" : "disabled");
564}
565
566
567static u32 unresettable_controller[] = {
568 0x324a103C,
569 0x324b103C,
570 0x3223103C,
571 0x3234103C,
572 0x3235103C,
573 0x3211103C,
574 0x3212103C,
575 0x3213103C,
576 0x3214103C,
577 0x3215103C,
578 0x3237103C,
579 0x323D103C,
580 0x40800E11,
581 0x409C0E11,
582 0x409D0E11,
583 0x40700E11,
584 0x40820E11,
585 0x40830E11,
586 0x409A0E11,
587 0x409B0E11,
588 0x40910E11,
589};
590
591
592static u32 soft_unresettable_controller[] = {
593 0x40800E11,
594 0x40700E11,
595 0x40820E11,
596 0x40830E11,
597 0x409A0E11,
598 0x409B0E11,
599 0x40910E11,
600
601
602
603
604
605
606
607 0x409C0E11,
608 0x409D0E11,
609};
610
611static int board_id_in_array(u32 a[], int nelems, u32 board_id)
612{
613 int i;
614
615 for (i = 0; i < nelems; i++)
616 if (a[i] == board_id)
617 return 1;
618 return 0;
619}
620
621static int ctlr_is_hard_resettable(u32 board_id)
622{
623 return !board_id_in_array(unresettable_controller,
624 ARRAY_SIZE(unresettable_controller), board_id);
625}
626
627static int ctlr_is_soft_resettable(u32 board_id)
628{
629 return !board_id_in_array(soft_unresettable_controller,
630 ARRAY_SIZE(soft_unresettable_controller), board_id);
631}
632
633static int ctlr_is_resettable(u32 board_id)
634{
635 return ctlr_is_hard_resettable(board_id) ||
636 ctlr_is_soft_resettable(board_id);
637}
638
639static ssize_t host_show_resettable(struct device *dev,
640 struct device_attribute *attr, char *buf)
641{
642 struct ctlr_info *h;
643 struct Scsi_Host *shost = class_to_shost(dev);
644
645 h = shost_to_hba(shost);
646 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
647}
648
649static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
650{
651 return (scsi3addr[3] & 0xC0) == 0x40;
652}
653
654static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
655 "1(+0)ADM", "UNKNOWN", "PHYS DRV"
656};
657#define HPSA_RAID_0 0
658#define HPSA_RAID_4 1
659#define HPSA_RAID_1 2
660#define HPSA_RAID_5 3
661#define HPSA_RAID_51 4
662#define HPSA_RAID_6 5
663#define HPSA_RAID_ADM 6
664#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2)
665#define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1)
666
667static inline bool is_logical_device(struct hpsa_scsi_dev_t *device)
668{
669 return !device->physical_device;
670}
671
672static ssize_t raid_level_show(struct device *dev,
673 struct device_attribute *attr, char *buf)
674{
675 ssize_t l = 0;
676 unsigned char rlevel;
677 struct ctlr_info *h;
678 struct scsi_device *sdev;
679 struct hpsa_scsi_dev_t *hdev;
680 unsigned long flags;
681
682 sdev = to_scsi_device(dev);
683 h = sdev_to_hba(sdev);
684 spin_lock_irqsave(&h->lock, flags);
685 hdev = sdev->hostdata;
686 if (!hdev) {
687 spin_unlock_irqrestore(&h->lock, flags);
688 return -ENODEV;
689 }
690
691
692 if (!is_logical_device(hdev)) {
693 spin_unlock_irqrestore(&h->lock, flags);
694 l = snprintf(buf, PAGE_SIZE, "N/A\n");
695 return l;
696 }
697
698 rlevel = hdev->raid_level;
699 spin_unlock_irqrestore(&h->lock, flags);
700 if (rlevel > RAID_UNKNOWN)
701 rlevel = RAID_UNKNOWN;
702 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
703 return l;
704}
705
706static ssize_t lunid_show(struct device *dev,
707 struct device_attribute *attr, char *buf)
708{
709 struct ctlr_info *h;
710 struct scsi_device *sdev;
711 struct hpsa_scsi_dev_t *hdev;
712 unsigned long flags;
713 unsigned char lunid[8];
714
715 sdev = to_scsi_device(dev);
716 h = sdev_to_hba(sdev);
717 spin_lock_irqsave(&h->lock, flags);
718 hdev = sdev->hostdata;
719 if (!hdev) {
720 spin_unlock_irqrestore(&h->lock, flags);
721 return -ENODEV;
722 }
723 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
724 spin_unlock_irqrestore(&h->lock, flags);
725 return snprintf(buf, 20, "0x%8phN\n", lunid);
726}
727
728static ssize_t unique_id_show(struct device *dev,
729 struct device_attribute *attr, char *buf)
730{
731 struct ctlr_info *h;
732 struct scsi_device *sdev;
733 struct hpsa_scsi_dev_t *hdev;
734 unsigned long flags;
735 unsigned char sn[16];
736
737 sdev = to_scsi_device(dev);
738 h = sdev_to_hba(sdev);
739 spin_lock_irqsave(&h->lock, flags);
740 hdev = sdev->hostdata;
741 if (!hdev) {
742 spin_unlock_irqrestore(&h->lock, flags);
743 return -ENODEV;
744 }
745 memcpy(sn, hdev->device_id, sizeof(sn));
746 spin_unlock_irqrestore(&h->lock, flags);
747 return snprintf(buf, 16 * 2 + 2,
748 "%02X%02X%02X%02X%02X%02X%02X%02X"
749 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
750 sn[0], sn[1], sn[2], sn[3],
751 sn[4], sn[5], sn[6], sn[7],
752 sn[8], sn[9], sn[10], sn[11],
753 sn[12], sn[13], sn[14], sn[15]);
754}
755
756static ssize_t sas_address_show(struct device *dev,
757 struct device_attribute *attr, char *buf)
758{
759 struct ctlr_info *h;
760 struct scsi_device *sdev;
761 struct hpsa_scsi_dev_t *hdev;
762 unsigned long flags;
763 u64 sas_address;
764
765 sdev = to_scsi_device(dev);
766 h = sdev_to_hba(sdev);
767 spin_lock_irqsave(&h->lock, flags);
768 hdev = sdev->hostdata;
769 if (!hdev || is_logical_device(hdev) || !hdev->expose_device) {
770 spin_unlock_irqrestore(&h->lock, flags);
771 return -ENODEV;
772 }
773 sas_address = hdev->sas_address;
774 spin_unlock_irqrestore(&h->lock, flags);
775
776 return snprintf(buf, PAGE_SIZE, "0x%016llx\n", sas_address);
777}
778
779static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
780 struct device_attribute *attr, char *buf)
781{
782 struct ctlr_info *h;
783 struct scsi_device *sdev;
784 struct hpsa_scsi_dev_t *hdev;
785 unsigned long flags;
786 int offload_enabled;
787
788 sdev = to_scsi_device(dev);
789 h = sdev_to_hba(sdev);
790 spin_lock_irqsave(&h->lock, flags);
791 hdev = sdev->hostdata;
792 if (!hdev) {
793 spin_unlock_irqrestore(&h->lock, flags);
794 return -ENODEV;
795 }
796 offload_enabled = hdev->offload_enabled;
797 spin_unlock_irqrestore(&h->lock, flags);
798
799 if (hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC)
800 return snprintf(buf, 20, "%d\n", offload_enabled);
801 else
802 return snprintf(buf, 40, "%s\n",
803 "Not applicable for a controller");
804}
805
806#define MAX_PATHS 8
807static ssize_t path_info_show(struct device *dev,
808 struct device_attribute *attr, char *buf)
809{
810 struct ctlr_info *h;
811 struct scsi_device *sdev;
812 struct hpsa_scsi_dev_t *hdev;
813 unsigned long flags;
814 int i;
815 int output_len = 0;
816 u8 box;
817 u8 bay;
818 u8 path_map_index = 0;
819 char *active;
820 unsigned char phys_connector[2];
821
822 sdev = to_scsi_device(dev);
823 h = sdev_to_hba(sdev);
824 spin_lock_irqsave(&h->devlock, flags);
825 hdev = sdev->hostdata;
826 if (!hdev) {
827 spin_unlock_irqrestore(&h->devlock, flags);
828 return -ENODEV;
829 }
830
831 bay = hdev->bay;
832 for (i = 0; i < MAX_PATHS; i++) {
833 path_map_index = 1<<i;
834 if (i == hdev->active_path_index)
835 active = "Active";
836 else if (hdev->path_map & path_map_index)
837 active = "Inactive";
838 else
839 continue;
840
841 output_len += scnprintf(buf + output_len,
842 PAGE_SIZE - output_len,
843 "[%d:%d:%d:%d] %20.20s ",
844 h->scsi_host->host_no,
845 hdev->bus, hdev->target, hdev->lun,
846 scsi_device_type(hdev->devtype));
847
848 if (hdev->devtype == TYPE_RAID || is_logical_device(hdev)) {
849 output_len += scnprintf(buf + output_len,
850 PAGE_SIZE - output_len,
851 "%s\n", active);
852 continue;
853 }
854
855 box = hdev->box[i];
856 memcpy(&phys_connector, &hdev->phys_connector[i],
857 sizeof(phys_connector));
858 if (phys_connector[0] < '0')
859 phys_connector[0] = '0';
860 if (phys_connector[1] < '0')
861 phys_connector[1] = '0';
862 output_len += scnprintf(buf + output_len,
863 PAGE_SIZE - output_len,
864 "PORT: %.2s ",
865 phys_connector);
866 if ((hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) &&
867 hdev->expose_device) {
868 if (box == 0 || box == 0xFF) {
869 output_len += scnprintf(buf + output_len,
870 PAGE_SIZE - output_len,
871 "BAY: %hhu %s\n",
872 bay, active);
873 } else {
874 output_len += scnprintf(buf + output_len,
875 PAGE_SIZE - output_len,
876 "BOX: %hhu BAY: %hhu %s\n",
877 box, bay, active);
878 }
879 } else if (box != 0 && box != 0xFF) {
880 output_len += scnprintf(buf + output_len,
881 PAGE_SIZE - output_len, "BOX: %hhu %s\n",
882 box, active);
883 } else
884 output_len += scnprintf(buf + output_len,
885 PAGE_SIZE - output_len, "%s\n", active);
886 }
887
888 spin_unlock_irqrestore(&h->devlock, flags);
889 return output_len;
890}
891
892static ssize_t host_show_ctlr_num(struct device *dev,
893 struct device_attribute *attr, char *buf)
894{
895 struct ctlr_info *h;
896 struct Scsi_Host *shost = class_to_shost(dev);
897
898 h = shost_to_hba(shost);
899 return snprintf(buf, 20, "%d\n", h->ctlr);
900}
901
902static ssize_t host_show_legacy_board(struct device *dev,
903 struct device_attribute *attr, char *buf)
904{
905 struct ctlr_info *h;
906 struct Scsi_Host *shost = class_to_shost(dev);
907
908 h = shost_to_hba(shost);
909 return snprintf(buf, 20, "%d\n", h->legacy_board ? 1 : 0);
910}
911
912static DEVICE_ATTR_RO(raid_level);
913static DEVICE_ATTR_RO(lunid);
914static DEVICE_ATTR_RO(unique_id);
915static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
916static DEVICE_ATTR_RO(sas_address);
917static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
918 host_show_hp_ssd_smart_path_enabled, NULL);
919static DEVICE_ATTR_RO(path_info);
920static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
921 host_show_hp_ssd_smart_path_status,
922 host_store_hp_ssd_smart_path_status);
923static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
924 host_store_raid_offload_debug);
925static DEVICE_ATTR(firmware_revision, S_IRUGO,
926 host_show_firmware_revision, NULL);
927static DEVICE_ATTR(commands_outstanding, S_IRUGO,
928 host_show_commands_outstanding, NULL);
929static DEVICE_ATTR(transport_mode, S_IRUGO,
930 host_show_transport_mode, NULL);
931static DEVICE_ATTR(resettable, S_IRUGO,
932 host_show_resettable, NULL);
933static DEVICE_ATTR(lockup_detected, S_IRUGO,
934 host_show_lockup_detected, NULL);
935static DEVICE_ATTR(ctlr_num, S_IRUGO,
936 host_show_ctlr_num, NULL);
937static DEVICE_ATTR(legacy_board, S_IRUGO,
938 host_show_legacy_board, NULL);
939
940static struct device_attribute *hpsa_sdev_attrs[] = {
941 &dev_attr_raid_level,
942 &dev_attr_lunid,
943 &dev_attr_unique_id,
944 &dev_attr_hp_ssd_smart_path_enabled,
945 &dev_attr_path_info,
946 &dev_attr_sas_address,
947 NULL,
948};
949
950static struct device_attribute *hpsa_shost_attrs[] = {
951 &dev_attr_rescan,
952 &dev_attr_firmware_revision,
953 &dev_attr_commands_outstanding,
954 &dev_attr_transport_mode,
955 &dev_attr_resettable,
956 &dev_attr_hp_ssd_smart_path_status,
957 &dev_attr_raid_offload_debug,
958 &dev_attr_lockup_detected,
959 &dev_attr_ctlr_num,
960 &dev_attr_legacy_board,
961 NULL,
962};
963
964#define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_DRIVER +\
965 HPSA_MAX_CONCURRENT_PASSTHRUS)
966
967static struct scsi_host_template hpsa_driver_template = {
968 .module = THIS_MODULE,
969 .name = HPSA,
970 .proc_name = HPSA,
971 .queuecommand = hpsa_scsi_queue_command,
972 .scan_start = hpsa_scan_start,
973 .scan_finished = hpsa_scan_finished,
974 .change_queue_depth = hpsa_change_queue_depth,
975 .this_id = -1,
976 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
977 .ioctl = hpsa_ioctl,
978 .slave_alloc = hpsa_slave_alloc,
979 .slave_configure = hpsa_slave_configure,
980 .slave_destroy = hpsa_slave_destroy,
981#ifdef CONFIG_COMPAT
982 .compat_ioctl = hpsa_compat_ioctl,
983#endif
984 .sdev_attrs = hpsa_sdev_attrs,
985 .shost_attrs = hpsa_shost_attrs,
986 .max_sectors = 2048,
987 .no_write_same = 1,
988};
989
990static inline u32 next_command(struct ctlr_info *h, u8 q)
991{
992 u32 a;
993 struct reply_queue_buffer *rq = &h->reply_queue[q];
994
995 if (h->transMethod & CFGTBL_Trans_io_accel1)
996 return h->access.command_completed(h, q);
997
998 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
999 return h->access.command_completed(h, q);
1000
1001 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
1002 a = rq->head[rq->current_entry];
1003 rq->current_entry++;
1004 atomic_dec(&h->commands_outstanding);
1005 } else {
1006 a = FIFO_EMPTY;
1007 }
1008
1009 if (rq->current_entry == h->max_commands) {
1010 rq->current_entry = 0;
1011 rq->wraparound ^= 1;
1012 }
1013 return a;
1014}
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047#define DEFAULT_REPLY_QUEUE (-1)
1048static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
1049 int reply_queue)
1050{
1051 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
1052 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
1053 if (unlikely(!h->msix_vectors))
1054 return;
1055 c->Header.ReplyQueue = reply_queue;
1056 }
1057}
1058
1059static void set_ioaccel1_performant_mode(struct ctlr_info *h,
1060 struct CommandList *c,
1061 int reply_queue)
1062{
1063 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
1064
1065
1066
1067
1068
1069 cp->ReplyQueue = reply_queue;
1070
1071
1072
1073
1074
1075
1076 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
1077 IOACCEL1_BUSADDR_CMDTYPE;
1078}
1079
1080static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
1081 struct CommandList *c,
1082 int reply_queue)
1083{
1084 struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
1085 &h->ioaccel2_cmd_pool[c->cmdindex];
1086
1087
1088
1089
1090 cp->reply_queue = reply_queue;
1091
1092
1093
1094
1095
1096 c->busaddr |= h->ioaccel2_blockFetchTable[0];
1097}
1098
1099static void set_ioaccel2_performant_mode(struct ctlr_info *h,
1100 struct CommandList *c,
1101 int reply_queue)
1102{
1103 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
1104
1105
1106
1107
1108
1109 cp->reply_queue = reply_queue;
1110
1111
1112
1113
1114
1115
1116 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
1117}
1118
1119static int is_firmware_flash_cmd(u8 *cdb)
1120{
1121 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
1122}
1123
1124
1125
1126
1127
1128
1129#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
1130#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
1131#define HPSA_EVENT_MONITOR_INTERVAL (15 * HZ)
1132static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
1133 struct CommandList *c)
1134{
1135 if (!is_firmware_flash_cmd(c->Request.CDB))
1136 return;
1137 atomic_inc(&h->firmware_flash_in_progress);
1138 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
1139}
1140
1141static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
1142 struct CommandList *c)
1143{
1144 if (is_firmware_flash_cmd(c->Request.CDB) &&
1145 atomic_dec_and_test(&h->firmware_flash_in_progress))
1146 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
1147}
1148
1149static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
1150 struct CommandList *c, int reply_queue)
1151{
1152 dial_down_lockup_detection_during_fw_flash(h, c);
1153 atomic_inc(&h->commands_outstanding);
1154 if (c->device)
1155 atomic_inc(&c->device->commands_outstanding);
1156
1157 reply_queue = h->reply_map[raw_smp_processor_id()];
1158 switch (c->cmd_type) {
1159 case CMD_IOACCEL1:
1160 set_ioaccel1_performant_mode(h, c, reply_queue);
1161 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
1162 break;
1163 case CMD_IOACCEL2:
1164 set_ioaccel2_performant_mode(h, c, reply_queue);
1165 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1166 break;
1167 case IOACCEL2_TMF:
1168 set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
1169 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1170 break;
1171 default:
1172 set_performant_mode(h, c, reply_queue);
1173 h->access.submit_command(h, c);
1174 }
1175}
1176
1177static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
1178{
1179 __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
1180}
1181
1182static inline int is_hba_lunid(unsigned char scsi3addr[])
1183{
1184 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
1185}
1186
1187static inline int is_scsi_rev_5(struct ctlr_info *h)
1188{
1189 if (!h->hba_inquiry_data)
1190 return 0;
1191 if ((h->hba_inquiry_data[2] & 0x07) == 5)
1192 return 1;
1193 return 0;
1194}
1195
1196static int hpsa_find_target_lun(struct ctlr_info *h,
1197 unsigned char scsi3addr[], int bus, int *target, int *lun)
1198{
1199
1200
1201
1202 int i, found = 0;
1203 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
1204
1205 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
1206
1207 for (i = 0; i < h->ndevices; i++) {
1208 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
1209 __set_bit(h->dev[i]->target, lun_taken);
1210 }
1211
1212 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
1213 if (i < HPSA_MAX_DEVICES) {
1214
1215 *target = i;
1216 *lun = 0;
1217 found = 1;
1218 }
1219 return !found;
1220}
1221
1222static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
1223 struct hpsa_scsi_dev_t *dev, char *description)
1224{
1225#define LABEL_SIZE 25
1226 char label[LABEL_SIZE];
1227
1228 if (h == NULL || h->pdev == NULL || h->scsi_host == NULL)
1229 return;
1230
1231 switch (dev->devtype) {
1232 case TYPE_RAID:
1233 snprintf(label, LABEL_SIZE, "controller");
1234 break;
1235 case TYPE_ENCLOSURE:
1236 snprintf(label, LABEL_SIZE, "enclosure");
1237 break;
1238 case TYPE_DISK:
1239 case TYPE_ZBC:
1240 if (dev->external)
1241 snprintf(label, LABEL_SIZE, "external");
1242 else if (!is_logical_dev_addr_mode(dev->scsi3addr))
1243 snprintf(label, LABEL_SIZE, "%s",
1244 raid_label[PHYSICAL_DRIVE]);
1245 else
1246 snprintf(label, LABEL_SIZE, "RAID-%s",
1247 dev->raid_level > RAID_UNKNOWN ? "?" :
1248 raid_label[dev->raid_level]);
1249 break;
1250 case TYPE_ROM:
1251 snprintf(label, LABEL_SIZE, "rom");
1252 break;
1253 case TYPE_TAPE:
1254 snprintf(label, LABEL_SIZE, "tape");
1255 break;
1256 case TYPE_MEDIUM_CHANGER:
1257 snprintf(label, LABEL_SIZE, "changer");
1258 break;
1259 default:
1260 snprintf(label, LABEL_SIZE, "UNKNOWN");
1261 break;
1262 }
1263
1264 dev_printk(level, &h->pdev->dev,
1265 "scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n",
1266 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
1267 description,
1268 scsi_device_type(dev->devtype),
1269 dev->vendor,
1270 dev->model,
1271 label,
1272 dev->offload_config ? '+' : '-',
1273 dev->offload_to_be_enabled ? '+' : '-',
1274 dev->expose_device);
1275}
1276
1277
1278static int hpsa_scsi_add_entry(struct ctlr_info *h,
1279 struct hpsa_scsi_dev_t *device,
1280 struct hpsa_scsi_dev_t *added[], int *nadded)
1281{
1282
1283 int n = h->ndevices;
1284 int i;
1285 unsigned char addr1[8], addr2[8];
1286 struct hpsa_scsi_dev_t *sd;
1287
1288 if (n >= HPSA_MAX_DEVICES) {
1289 dev_err(&h->pdev->dev, "too many devices, some will be "
1290 "inaccessible.\n");
1291 return -1;
1292 }
1293
1294
1295 if (device->lun != -1)
1296
1297 goto lun_assigned;
1298
1299
1300
1301
1302
1303 if (device->scsi3addr[4] == 0) {
1304
1305 if (hpsa_find_target_lun(h, device->scsi3addr,
1306 device->bus, &device->target, &device->lun) != 0)
1307 return -1;
1308 goto lun_assigned;
1309 }
1310
1311
1312
1313
1314
1315
1316
1317 memcpy(addr1, device->scsi3addr, 8);
1318 addr1[4] = 0;
1319 addr1[5] = 0;
1320 for (i = 0; i < n; i++) {
1321 sd = h->dev[i];
1322 memcpy(addr2, sd->scsi3addr, 8);
1323 addr2[4] = 0;
1324 addr2[5] = 0;
1325
1326 if (memcmp(addr1, addr2, 8) == 0) {
1327 device->bus = sd->bus;
1328 device->target = sd->target;
1329 device->lun = device->scsi3addr[4];
1330 break;
1331 }
1332 }
1333 if (device->lun == -1) {
1334 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
1335 " suspect firmware bug or unsupported hardware "
1336 "configuration.\n");
1337 return -1;
1338 }
1339
1340lun_assigned:
1341
1342 h->dev[n] = device;
1343 h->ndevices++;
1344 added[*nadded] = device;
1345 (*nadded)++;
1346 hpsa_show_dev_msg(KERN_INFO, h, device,
1347 device->expose_device ? "added" : "masked");
1348 return 0;
1349}
1350
1351
1352
1353
1354
1355
1356static void hpsa_scsi_update_entry(struct ctlr_info *h,
1357 int entry, struct hpsa_scsi_dev_t *new_entry)
1358{
1359
1360 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1361
1362
1363 h->dev[entry]->raid_level = new_entry->raid_level;
1364
1365
1366
1367
1368 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1369
1370
1371 if (new_entry->offload_config && new_entry->offload_to_be_enabled) {
1372
1373
1374
1375
1376
1377
1378
1379
1380 h->dev[entry]->raid_map = new_entry->raid_map;
1381 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1382 }
1383 if (new_entry->offload_to_be_enabled) {
1384 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1385 wmb();
1386 }
1387 h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
1388 h->dev[entry]->offload_config = new_entry->offload_config;
1389 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
1390 h->dev[entry]->queue_depth = new_entry->queue_depth;
1391
1392
1393
1394
1395
1396
1397 h->dev[entry]->offload_to_be_enabled = new_entry->offload_to_be_enabled;
1398
1399
1400
1401
1402 if (!new_entry->offload_to_be_enabled)
1403 h->dev[entry]->offload_enabled = 0;
1404
1405 hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
1406}
1407
1408
1409static void hpsa_scsi_replace_entry(struct ctlr_info *h,
1410 int entry, struct hpsa_scsi_dev_t *new_entry,
1411 struct hpsa_scsi_dev_t *added[], int *nadded,
1412 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1413{
1414
1415 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1416 removed[*nremoved] = h->dev[entry];
1417 (*nremoved)++;
1418
1419
1420
1421
1422
1423 if (new_entry->target == -1) {
1424 new_entry->target = h->dev[entry]->target;
1425 new_entry->lun = h->dev[entry]->lun;
1426 }
1427
1428 h->dev[entry] = new_entry;
1429 added[*nadded] = new_entry;
1430 (*nadded)++;
1431
1432 hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
1433}
1434
1435
1436static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry,
1437 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1438{
1439
1440 int i;
1441 struct hpsa_scsi_dev_t *sd;
1442
1443 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1444
1445 sd = h->dev[entry];
1446 removed[*nremoved] = h->dev[entry];
1447 (*nremoved)++;
1448
1449 for (i = entry; i < h->ndevices-1; i++)
1450 h->dev[i] = h->dev[i+1];
1451 h->ndevices--;
1452 hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
1453}
1454
1455#define SCSI3ADDR_EQ(a, b) ( \
1456 (a)[7] == (b)[7] && \
1457 (a)[6] == (b)[6] && \
1458 (a)[5] == (b)[5] && \
1459 (a)[4] == (b)[4] && \
1460 (a)[3] == (b)[3] && \
1461 (a)[2] == (b)[2] && \
1462 (a)[1] == (b)[1] && \
1463 (a)[0] == (b)[0])
1464
1465static void fixup_botched_add(struct ctlr_info *h,
1466 struct hpsa_scsi_dev_t *added)
1467{
1468
1469
1470
1471 unsigned long flags;
1472 int i, j;
1473
1474 spin_lock_irqsave(&h->lock, flags);
1475 for (i = 0; i < h->ndevices; i++) {
1476 if (h->dev[i] == added) {
1477 for (j = i; j < h->ndevices-1; j++)
1478 h->dev[j] = h->dev[j+1];
1479 h->ndevices--;
1480 break;
1481 }
1482 }
1483 spin_unlock_irqrestore(&h->lock, flags);
1484 kfree(added);
1485}
1486
1487static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1488 struct hpsa_scsi_dev_t *dev2)
1489{
1490
1491
1492
1493
1494 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1495 sizeof(dev1->scsi3addr)) != 0)
1496 return 0;
1497 if (memcmp(dev1->device_id, dev2->device_id,
1498 sizeof(dev1->device_id)) != 0)
1499 return 0;
1500 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1501 return 0;
1502 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1503 return 0;
1504 if (dev1->devtype != dev2->devtype)
1505 return 0;
1506 if (dev1->bus != dev2->bus)
1507 return 0;
1508 return 1;
1509}
1510
1511static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1512 struct hpsa_scsi_dev_t *dev2)
1513{
1514
1515
1516
1517
1518 if (dev1->raid_level != dev2->raid_level)
1519 return 1;
1520 if (dev1->offload_config != dev2->offload_config)
1521 return 1;
1522 if (dev1->offload_to_be_enabled != dev2->offload_to_be_enabled)
1523 return 1;
1524 if (!is_logical_dev_addr_mode(dev1->scsi3addr))
1525 if (dev1->queue_depth != dev2->queue_depth)
1526 return 1;
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536 if (dev1->ioaccel_handle != dev2->ioaccel_handle)
1537 return 1;
1538 return 0;
1539}
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1550 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1551 int *index)
1552{
1553 int i;
1554#define DEVICE_NOT_FOUND 0
1555#define DEVICE_CHANGED 1
1556#define DEVICE_SAME 2
1557#define DEVICE_UPDATED 3
1558 if (needle == NULL)
1559 return DEVICE_NOT_FOUND;
1560
1561 for (i = 0; i < haystack_size; i++) {
1562 if (haystack[i] == NULL)
1563 continue;
1564 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1565 *index = i;
1566 if (device_is_the_same(needle, haystack[i])) {
1567 if (device_updated(needle, haystack[i]))
1568 return DEVICE_UPDATED;
1569 return DEVICE_SAME;
1570 } else {
1571
1572 if (needle->volume_offline)
1573 return DEVICE_NOT_FOUND;
1574 return DEVICE_CHANGED;
1575 }
1576 }
1577 }
1578 *index = -1;
1579 return DEVICE_NOT_FOUND;
1580}
1581
1582static void hpsa_monitor_offline_device(struct ctlr_info *h,
1583 unsigned char scsi3addr[])
1584{
1585 struct offline_device_entry *device;
1586 unsigned long flags;
1587
1588
1589 spin_lock_irqsave(&h->offline_device_lock, flags);
1590 list_for_each_entry(device, &h->offline_device_list, offline_list) {
1591 if (memcmp(device->scsi3addr, scsi3addr,
1592 sizeof(device->scsi3addr)) == 0) {
1593 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1594 return;
1595 }
1596 }
1597 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1598
1599
1600 device = kmalloc(sizeof(*device), GFP_KERNEL);
1601 if (!device)
1602 return;
1603
1604 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1605 spin_lock_irqsave(&h->offline_device_lock, flags);
1606 list_add_tail(&device->offline_list, &h->offline_device_list);
1607 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1608}
1609
1610
1611static void hpsa_show_volume_status(struct ctlr_info *h,
1612 struct hpsa_scsi_dev_t *sd)
1613{
1614 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1615 dev_info(&h->pdev->dev,
1616 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1617 h->scsi_host->host_no,
1618 sd->bus, sd->target, sd->lun);
1619 switch (sd->volume_offline) {
1620 case HPSA_LV_OK:
1621 break;
1622 case HPSA_LV_UNDERGOING_ERASE:
1623 dev_info(&h->pdev->dev,
1624 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1625 h->scsi_host->host_no,
1626 sd->bus, sd->target, sd->lun);
1627 break;
1628 case HPSA_LV_NOT_AVAILABLE:
1629 dev_info(&h->pdev->dev,
1630 "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
1631 h->scsi_host->host_no,
1632 sd->bus, sd->target, sd->lun);
1633 break;
1634 case HPSA_LV_UNDERGOING_RPI:
1635 dev_info(&h->pdev->dev,
1636 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
1637 h->scsi_host->host_no,
1638 sd->bus, sd->target, sd->lun);
1639 break;
1640 case HPSA_LV_PENDING_RPI:
1641 dev_info(&h->pdev->dev,
1642 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1643 h->scsi_host->host_no,
1644 sd->bus, sd->target, sd->lun);
1645 break;
1646 case HPSA_LV_ENCRYPTED_NO_KEY:
1647 dev_info(&h->pdev->dev,
1648 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1649 h->scsi_host->host_no,
1650 sd->bus, sd->target, sd->lun);
1651 break;
1652 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1653 dev_info(&h->pdev->dev,
1654 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1655 h->scsi_host->host_no,
1656 sd->bus, sd->target, sd->lun);
1657 break;
1658 case HPSA_LV_UNDERGOING_ENCRYPTION:
1659 dev_info(&h->pdev->dev,
1660 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1661 h->scsi_host->host_no,
1662 sd->bus, sd->target, sd->lun);
1663 break;
1664 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1665 dev_info(&h->pdev->dev,
1666 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1667 h->scsi_host->host_no,
1668 sd->bus, sd->target, sd->lun);
1669 break;
1670 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1671 dev_info(&h->pdev->dev,
1672 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1673 h->scsi_host->host_no,
1674 sd->bus, sd->target, sd->lun);
1675 break;
1676 case HPSA_LV_PENDING_ENCRYPTION:
1677 dev_info(&h->pdev->dev,
1678 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1679 h->scsi_host->host_no,
1680 sd->bus, sd->target, sd->lun);
1681 break;
1682 case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1683 dev_info(&h->pdev->dev,
1684 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1685 h->scsi_host->host_no,
1686 sd->bus, sd->target, sd->lun);
1687 break;
1688 }
1689}
1690
1691
1692
1693
1694
1695static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1696 struct hpsa_scsi_dev_t *dev[], int ndevices,
1697 struct hpsa_scsi_dev_t *logical_drive)
1698{
1699 struct raid_map_data *map = &logical_drive->raid_map;
1700 struct raid_map_disk_data *dd = &map->data[0];
1701 int i, j;
1702 int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1703 le16_to_cpu(map->metadata_disks_per_row);
1704 int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1705 le16_to_cpu(map->layout_map_count) *
1706 total_disks_per_row;
1707 int nphys_disk = le16_to_cpu(map->layout_map_count) *
1708 total_disks_per_row;
1709 int qdepth;
1710
1711 if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1712 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1713
1714 logical_drive->nphysical_disks = nraid_map_entries;
1715
1716 qdepth = 0;
1717 for (i = 0; i < nraid_map_entries; i++) {
1718 logical_drive->phys_disk[i] = NULL;
1719 if (!logical_drive->offload_config)
1720 continue;
1721 for (j = 0; j < ndevices; j++) {
1722 if (dev[j] == NULL)
1723 continue;
1724 if (dev[j]->devtype != TYPE_DISK &&
1725 dev[j]->devtype != TYPE_ZBC)
1726 continue;
1727 if (is_logical_device(dev[j]))
1728 continue;
1729 if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1730 continue;
1731
1732 logical_drive->phys_disk[i] = dev[j];
1733 if (i < nphys_disk)
1734 qdepth = min(h->nr_cmds, qdepth +
1735 logical_drive->phys_disk[i]->queue_depth);
1736 break;
1737 }
1738
1739
1740
1741
1742
1743
1744
1745
1746 if (!logical_drive->phys_disk[i]) {
1747 dev_warn(&h->pdev->dev,
1748 "%s: [%d:%d:%d:%d] A phys disk component of LV is missing, turning off offload_enabled for LV.\n",
1749 __func__,
1750 h->scsi_host->host_no, logical_drive->bus,
1751 logical_drive->target, logical_drive->lun);
1752 hpsa_turn_off_ioaccel_for_device(logical_drive);
1753 logical_drive->queue_depth = 8;
1754 }
1755 }
1756 if (nraid_map_entries)
1757
1758
1759
1760
1761 logical_drive->queue_depth = qdepth;
1762 else {
1763 if (logical_drive->external)
1764 logical_drive->queue_depth = EXTERNAL_QD;
1765 else
1766 logical_drive->queue_depth = h->nr_cmds;
1767 }
1768}
1769
1770static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1771 struct hpsa_scsi_dev_t *dev[], int ndevices)
1772{
1773 int i;
1774
1775 for (i = 0; i < ndevices; i++) {
1776 if (dev[i] == NULL)
1777 continue;
1778 if (dev[i]->devtype != TYPE_DISK &&
1779 dev[i]->devtype != TYPE_ZBC)
1780 continue;
1781 if (!is_logical_device(dev[i]))
1782 continue;
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803 if (!dev[i]->offload_enabled && dev[i]->offload_to_be_enabled)
1804 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1805 }
1806}
1807
1808static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1809{
1810 int rc = 0;
1811
1812 if (!h->scsi_host)
1813 return 1;
1814
1815 if (is_logical_device(device))
1816 rc = scsi_add_device(h->scsi_host, device->bus,
1817 device->target, device->lun);
1818 else
1819 rc = hpsa_add_sas_device(h->sas_host, device);
1820
1821 return rc;
1822}
1823
1824static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h,
1825 struct hpsa_scsi_dev_t *dev)
1826{
1827 int i;
1828 int count = 0;
1829
1830 for (i = 0; i < h->nr_cmds; i++) {
1831 struct CommandList *c = h->cmd_pool + i;
1832 int refcount = atomic_inc_return(&c->refcount);
1833
1834 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev,
1835 dev->scsi3addr)) {
1836 unsigned long flags;
1837
1838 spin_lock_irqsave(&h->lock, flags);
1839 if (!hpsa_is_cmd_idle(c))
1840 ++count;
1841 spin_unlock_irqrestore(&h->lock, flags);
1842 }
1843
1844 cmd_free(h, c);
1845 }
1846
1847 return count;
1848}
1849
1850#define NUM_WAIT 20
1851static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h,
1852 struct hpsa_scsi_dev_t *device)
1853{
1854 int cmds = 0;
1855 int waits = 0;
1856 int num_wait = NUM_WAIT;
1857
1858 if (device->external)
1859 num_wait = HPSA_EH_PTRAID_TIMEOUT;
1860
1861 while (1) {
1862 cmds = hpsa_find_outstanding_commands_for_dev(h, device);
1863 if (cmds == 0)
1864 break;
1865 if (++waits > num_wait)
1866 break;
1867 msleep(1000);
1868 }
1869
1870 if (waits > num_wait) {
1871 dev_warn(&h->pdev->dev,
1872 "%s: removing device [%d:%d:%d:%d] with %d outstanding commands!\n",
1873 __func__,
1874 h->scsi_host->host_no,
1875 device->bus, device->target, device->lun, cmds);
1876 }
1877}
1878
1879static void hpsa_remove_device(struct ctlr_info *h,
1880 struct hpsa_scsi_dev_t *device)
1881{
1882 struct scsi_device *sdev = NULL;
1883
1884 if (!h->scsi_host)
1885 return;
1886
1887
1888
1889
1890 device->removed = 1;
1891 hpsa_wait_for_outstanding_commands_for_dev(h, device);
1892
1893 if (is_logical_device(device)) {
1894 sdev = scsi_device_lookup(h->scsi_host, device->bus,
1895 device->target, device->lun);
1896 if (sdev) {
1897 scsi_remove_device(sdev);
1898 scsi_device_put(sdev);
1899 } else {
1900
1901
1902
1903
1904
1905 hpsa_show_dev_msg(KERN_WARNING, h, device,
1906 "didn't find device for removal.");
1907 }
1908 } else {
1909
1910 hpsa_remove_sas_device(device);
1911 }
1912}
1913
1914static void adjust_hpsa_scsi_table(struct ctlr_info *h,
1915 struct hpsa_scsi_dev_t *sd[], int nsds)
1916{
1917
1918
1919
1920
1921 int i, entry, device_change, changes = 0;
1922 struct hpsa_scsi_dev_t *csd;
1923 unsigned long flags;
1924 struct hpsa_scsi_dev_t **added, **removed;
1925 int nadded, nremoved;
1926
1927
1928
1929
1930
1931 spin_lock_irqsave(&h->reset_lock, flags);
1932 if (h->reset_in_progress) {
1933 h->drv_req_rescan = 1;
1934 spin_unlock_irqrestore(&h->reset_lock, flags);
1935 return;
1936 }
1937 spin_unlock_irqrestore(&h->reset_lock, flags);
1938
1939 added = kcalloc(HPSA_MAX_DEVICES, sizeof(*added), GFP_KERNEL);
1940 removed = kcalloc(HPSA_MAX_DEVICES, sizeof(*removed), GFP_KERNEL);
1941
1942 if (!added || !removed) {
1943 dev_warn(&h->pdev->dev, "out of memory in "
1944 "adjust_hpsa_scsi_table\n");
1945 goto free_and_out;
1946 }
1947
1948 spin_lock_irqsave(&h->devlock, flags);
1949
1950
1951
1952
1953
1954
1955
1956
1957 i = 0;
1958 nremoved = 0;
1959 nadded = 0;
1960 while (i < h->ndevices) {
1961 csd = h->dev[i];
1962 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1963 if (device_change == DEVICE_NOT_FOUND) {
1964 changes++;
1965 hpsa_scsi_remove_entry(h, i, removed, &nremoved);
1966 continue;
1967 } else if (device_change == DEVICE_CHANGED) {
1968 changes++;
1969 hpsa_scsi_replace_entry(h, i, sd[entry],
1970 added, &nadded, removed, &nremoved);
1971
1972
1973
1974 sd[entry] = NULL;
1975 } else if (device_change == DEVICE_UPDATED) {
1976 hpsa_scsi_update_entry(h, i, sd[entry]);
1977 }
1978 i++;
1979 }
1980
1981
1982
1983
1984
1985 for (i = 0; i < nsds; i++) {
1986 if (!sd[i])
1987 continue;
1988
1989
1990
1991
1992
1993
1994 if (sd[i]->volume_offline) {
1995 hpsa_show_volume_status(h, sd[i]);
1996 hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
1997 continue;
1998 }
1999
2000 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
2001 h->ndevices, &entry);
2002 if (device_change == DEVICE_NOT_FOUND) {
2003 changes++;
2004 if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0)
2005 break;
2006 sd[i] = NULL;
2007 } else if (device_change == DEVICE_CHANGED) {
2008
2009 changes++;
2010 dev_warn(&h->pdev->dev,
2011 "device unexpectedly changed.\n");
2012
2013 }
2014 }
2015 hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025 for (i = 0; i < h->ndevices; i++) {
2026 if (h->dev[i] == NULL)
2027 continue;
2028 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
2029 }
2030
2031 spin_unlock_irqrestore(&h->devlock, flags);
2032
2033
2034
2035
2036
2037 for (i = 0; i < nsds; i++) {
2038 if (!sd[i])
2039 continue;
2040 if (sd[i]->volume_offline)
2041 hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
2042 }
2043
2044
2045
2046
2047
2048 if (!changes)
2049 goto free_and_out;
2050
2051
2052 for (i = 0; i < nremoved; i++) {
2053 if (removed[i] == NULL)
2054 continue;
2055 if (removed[i]->expose_device)
2056 hpsa_remove_device(h, removed[i]);
2057 kfree(removed[i]);
2058 removed[i] = NULL;
2059 }
2060
2061
2062 for (i = 0; i < nadded; i++) {
2063 int rc = 0;
2064
2065 if (added[i] == NULL)
2066 continue;
2067 if (!(added[i]->expose_device))
2068 continue;
2069 rc = hpsa_add_device(h, added[i]);
2070 if (!rc)
2071 continue;
2072 dev_warn(&h->pdev->dev,
2073 "addition failed %d, device not added.", rc);
2074
2075
2076
2077 fixup_botched_add(h, added[i]);
2078 h->drv_req_rescan = 1;
2079 }
2080
2081free_and_out:
2082 kfree(added);
2083 kfree(removed);
2084}
2085
2086
2087
2088
2089
2090static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
2091 int bus, int target, int lun)
2092{
2093 int i;
2094 struct hpsa_scsi_dev_t *sd;
2095
2096 for (i = 0; i < h->ndevices; i++) {
2097 sd = h->dev[i];
2098 if (sd->bus == bus && sd->target == target && sd->lun == lun)
2099 return sd;
2100 }
2101 return NULL;
2102}
2103
2104static int hpsa_slave_alloc(struct scsi_device *sdev)
2105{
2106 struct hpsa_scsi_dev_t *sd = NULL;
2107 unsigned long flags;
2108 struct ctlr_info *h;
2109
2110 h = sdev_to_hba(sdev);
2111 spin_lock_irqsave(&h->devlock, flags);
2112 if (sdev_channel(sdev) == HPSA_PHYSICAL_DEVICE_BUS) {
2113 struct scsi_target *starget;
2114 struct sas_rphy *rphy;
2115
2116 starget = scsi_target(sdev);
2117 rphy = target_to_rphy(starget);
2118 sd = hpsa_find_device_by_sas_rphy(h, rphy);
2119 if (sd) {
2120 sd->target = sdev_id(sdev);
2121 sd->lun = sdev->lun;
2122 }
2123 }
2124 if (!sd)
2125 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
2126 sdev_id(sdev), sdev->lun);
2127
2128 if (sd && sd->expose_device) {
2129 atomic_set(&sd->ioaccel_cmds_out, 0);
2130 sdev->hostdata = sd;
2131 } else
2132 sdev->hostdata = NULL;
2133 spin_unlock_irqrestore(&h->devlock, flags);
2134 return 0;
2135}
2136
2137
2138#define CTLR_TIMEOUT (120 * HZ)
2139static int hpsa_slave_configure(struct scsi_device *sdev)
2140{
2141 struct hpsa_scsi_dev_t *sd;
2142 int queue_depth;
2143
2144 sd = sdev->hostdata;
2145 sdev->no_uld_attach = !sd || !sd->expose_device;
2146
2147 if (sd) {
2148 sd->was_removed = 0;
2149 queue_depth = sd->queue_depth != 0 ?
2150 sd->queue_depth : sdev->host->can_queue;
2151 if (sd->external) {
2152 queue_depth = EXTERNAL_QD;
2153 sdev->eh_timeout = HPSA_EH_PTRAID_TIMEOUT;
2154 blk_queue_rq_timeout(sdev->request_queue,
2155 HPSA_EH_PTRAID_TIMEOUT);
2156 }
2157 if (is_hba_lunid(sd->scsi3addr)) {
2158 sdev->eh_timeout = CTLR_TIMEOUT;
2159 blk_queue_rq_timeout(sdev->request_queue, CTLR_TIMEOUT);
2160 }
2161 } else {
2162 queue_depth = sdev->host->can_queue;
2163 }
2164
2165 scsi_change_queue_depth(sdev, queue_depth);
2166
2167 return 0;
2168}
2169
2170static void hpsa_slave_destroy(struct scsi_device *sdev)
2171{
2172 struct hpsa_scsi_dev_t *hdev = NULL;
2173
2174 hdev = sdev->hostdata;
2175
2176 if (hdev)
2177 hdev->was_removed = 1;
2178}
2179
2180static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2181{
2182 int i;
2183
2184 if (!h->ioaccel2_cmd_sg_list)
2185 return;
2186 for (i = 0; i < h->nr_cmds; i++) {
2187 kfree(h->ioaccel2_cmd_sg_list[i]);
2188 h->ioaccel2_cmd_sg_list[i] = NULL;
2189 }
2190 kfree(h->ioaccel2_cmd_sg_list);
2191 h->ioaccel2_cmd_sg_list = NULL;
2192}
2193
2194static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2195{
2196 int i;
2197
2198 if (h->chainsize <= 0)
2199 return 0;
2200
2201 h->ioaccel2_cmd_sg_list =
2202 kcalloc(h->nr_cmds, sizeof(*h->ioaccel2_cmd_sg_list),
2203 GFP_KERNEL);
2204 if (!h->ioaccel2_cmd_sg_list)
2205 return -ENOMEM;
2206 for (i = 0; i < h->nr_cmds; i++) {
2207 h->ioaccel2_cmd_sg_list[i] =
2208 kmalloc_array(h->maxsgentries,
2209 sizeof(*h->ioaccel2_cmd_sg_list[i]),
2210 GFP_KERNEL);
2211 if (!h->ioaccel2_cmd_sg_list[i])
2212 goto clean;
2213 }
2214 return 0;
2215
2216clean:
2217 hpsa_free_ioaccel2_sg_chain_blocks(h);
2218 return -ENOMEM;
2219}
2220
2221static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
2222{
2223 int i;
2224
2225 if (!h->cmd_sg_list)
2226 return;
2227 for (i = 0; i < h->nr_cmds; i++) {
2228 kfree(h->cmd_sg_list[i]);
2229 h->cmd_sg_list[i] = NULL;
2230 }
2231 kfree(h->cmd_sg_list);
2232 h->cmd_sg_list = NULL;
2233}
2234
2235static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
2236{
2237 int i;
2238
2239 if (h->chainsize <= 0)
2240 return 0;
2241
2242 h->cmd_sg_list = kcalloc(h->nr_cmds, sizeof(*h->cmd_sg_list),
2243 GFP_KERNEL);
2244 if (!h->cmd_sg_list)
2245 return -ENOMEM;
2246
2247 for (i = 0; i < h->nr_cmds; i++) {
2248 h->cmd_sg_list[i] = kmalloc_array(h->chainsize,
2249 sizeof(*h->cmd_sg_list[i]),
2250 GFP_KERNEL);
2251 if (!h->cmd_sg_list[i])
2252 goto clean;
2253
2254 }
2255 return 0;
2256
2257clean:
2258 hpsa_free_sg_chain_blocks(h);
2259 return -ENOMEM;
2260}
2261
2262static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
2263 struct io_accel2_cmd *cp, struct CommandList *c)
2264{
2265 struct ioaccel2_sg_element *chain_block;
2266 u64 temp64;
2267 u32 chain_size;
2268
2269 chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
2270 chain_size = le32_to_cpu(cp->sg[0].length);
2271 temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_size,
2272 DMA_TO_DEVICE);
2273 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2274
2275 cp->sg->address = 0;
2276 return -1;
2277 }
2278 cp->sg->address = cpu_to_le64(temp64);
2279 return 0;
2280}
2281
2282static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
2283 struct io_accel2_cmd *cp)
2284{
2285 struct ioaccel2_sg_element *chain_sg;
2286 u64 temp64;
2287 u32 chain_size;
2288
2289 chain_sg = cp->sg;
2290 temp64 = le64_to_cpu(chain_sg->address);
2291 chain_size = le32_to_cpu(cp->sg[0].length);
2292 dma_unmap_single(&h->pdev->dev, temp64, chain_size, DMA_TO_DEVICE);
2293}
2294
2295static int hpsa_map_sg_chain_block(struct ctlr_info *h,
2296 struct CommandList *c)
2297{
2298 struct SGDescriptor *chain_sg, *chain_block;
2299 u64 temp64;
2300 u32 chain_len;
2301
2302 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2303 chain_block = h->cmd_sg_list[c->cmdindex];
2304 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
2305 chain_len = sizeof(*chain_sg) *
2306 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
2307 chain_sg->Len = cpu_to_le32(chain_len);
2308 temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_len,
2309 DMA_TO_DEVICE);
2310 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2311
2312 chain_sg->Addr = cpu_to_le64(0);
2313 return -1;
2314 }
2315 chain_sg->Addr = cpu_to_le64(temp64);
2316 return 0;
2317}
2318
2319static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
2320 struct CommandList *c)
2321{
2322 struct SGDescriptor *chain_sg;
2323
2324 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
2325 return;
2326
2327 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2328 dma_unmap_single(&h->pdev->dev, le64_to_cpu(chain_sg->Addr),
2329 le32_to_cpu(chain_sg->Len), DMA_TO_DEVICE);
2330}
2331
2332
2333
2334
2335
2336
2337static int handle_ioaccel_mode2_error(struct ctlr_info *h,
2338 struct CommandList *c,
2339 struct scsi_cmnd *cmd,
2340 struct io_accel2_cmd *c2,
2341 struct hpsa_scsi_dev_t *dev)
2342{
2343 int data_len;
2344 int retry = 0;
2345 u32 ioaccel2_resid = 0;
2346
2347 switch (c2->error_data.serv_response) {
2348 case IOACCEL2_SERV_RESPONSE_COMPLETE:
2349 switch (c2->error_data.status) {
2350 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
2351 if (cmd)
2352 cmd->result = 0;
2353 break;
2354 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
2355 cmd->result |= SAM_STAT_CHECK_CONDITION;
2356 if (c2->error_data.data_present !=
2357 IOACCEL2_SENSE_DATA_PRESENT) {
2358 memset(cmd->sense_buffer, 0,
2359 SCSI_SENSE_BUFFERSIZE);
2360 break;
2361 }
2362
2363 data_len = c2->error_data.sense_data_len;
2364 if (data_len > SCSI_SENSE_BUFFERSIZE)
2365 data_len = SCSI_SENSE_BUFFERSIZE;
2366 if (data_len > sizeof(c2->error_data.sense_data_buff))
2367 data_len =
2368 sizeof(c2->error_data.sense_data_buff);
2369 memcpy(cmd->sense_buffer,
2370 c2->error_data.sense_data_buff, data_len);
2371 retry = 1;
2372 break;
2373 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
2374 retry = 1;
2375 break;
2376 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
2377 retry = 1;
2378 break;
2379 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
2380 retry = 1;
2381 break;
2382 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
2383 retry = 1;
2384 break;
2385 default:
2386 retry = 1;
2387 break;
2388 }
2389 break;
2390 case IOACCEL2_SERV_RESPONSE_FAILURE:
2391 switch (c2->error_data.status) {
2392 case IOACCEL2_STATUS_SR_IO_ERROR:
2393 case IOACCEL2_STATUS_SR_IO_ABORTED:
2394 case IOACCEL2_STATUS_SR_OVERRUN:
2395 retry = 1;
2396 break;
2397 case IOACCEL2_STATUS_SR_UNDERRUN:
2398 cmd->result = (DID_OK << 16);
2399 cmd->result |= (COMMAND_COMPLETE << 8);
2400 ioaccel2_resid = get_unaligned_le32(
2401 &c2->error_data.resid_cnt[0]);
2402 scsi_set_resid(cmd, ioaccel2_resid);
2403 break;
2404 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
2405 case IOACCEL2_STATUS_SR_INVALID_DEVICE:
2406 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
2407
2408
2409
2410
2411
2412
2413
2414
2415 if (dev->physical_device && dev->expose_device) {
2416 cmd->result = DID_NO_CONNECT << 16;
2417 dev->removed = 1;
2418 h->drv_req_rescan = 1;
2419 dev_warn(&h->pdev->dev,
2420 "%s: device is gone!\n", __func__);
2421 } else
2422
2423
2424
2425
2426
2427 retry = 1;
2428 break;
2429 default:
2430 retry = 1;
2431 }
2432 break;
2433 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
2434 break;
2435 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
2436 break;
2437 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
2438 retry = 1;
2439 break;
2440 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
2441 break;
2442 default:
2443 retry = 1;
2444 break;
2445 }
2446
2447 if (dev->in_reset)
2448 retry = 0;
2449
2450 return retry;
2451}
2452
2453static void hpsa_cmd_resolve_events(struct ctlr_info *h,
2454 struct CommandList *c)
2455{
2456 struct hpsa_scsi_dev_t *dev = c->device;
2457
2458
2459
2460
2461
2462
2463 c->scsi_cmd = SCSI_CMD_IDLE;
2464 mb();
2465 if (dev) {
2466 atomic_dec(&dev->commands_outstanding);
2467 if (dev->in_reset &&
2468 atomic_read(&dev->commands_outstanding) <= 0)
2469 wake_up_all(&h->event_sync_wait_queue);
2470 }
2471}
2472
2473static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
2474 struct CommandList *c)
2475{
2476 hpsa_cmd_resolve_events(h, c);
2477 cmd_tagged_free(h, c);
2478}
2479
2480static void hpsa_cmd_free_and_done(struct ctlr_info *h,
2481 struct CommandList *c, struct scsi_cmnd *cmd)
2482{
2483 hpsa_cmd_resolve_and_free(h, c);
2484 if (cmd && cmd->scsi_done)
2485 cmd->scsi_done(cmd);
2486}
2487
2488static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
2489{
2490 INIT_WORK(&c->work, hpsa_command_resubmit_worker);
2491 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
2492}
2493
2494static void process_ioaccel2_completion(struct ctlr_info *h,
2495 struct CommandList *c, struct scsi_cmnd *cmd,
2496 struct hpsa_scsi_dev_t *dev)
2497{
2498 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2499
2500
2501 if (likely(c2->error_data.serv_response == 0 &&
2502 c2->error_data.status == 0)) {
2503 cmd->result = 0;
2504 return hpsa_cmd_free_and_done(h, c, cmd);
2505 }
2506
2507
2508
2509
2510
2511
2512 if (is_logical_device(dev) &&
2513 c2->error_data.serv_response ==
2514 IOACCEL2_SERV_RESPONSE_FAILURE) {
2515 if (c2->error_data.status ==
2516 IOACCEL2_STATUS_SR_IOACCEL_DISABLED) {
2517 hpsa_turn_off_ioaccel_for_device(dev);
2518 }
2519
2520 if (dev->in_reset) {
2521 cmd->result = DID_RESET << 16;
2522 return hpsa_cmd_free_and_done(h, c, cmd);
2523 }
2524
2525 return hpsa_retry_cmd(h, c);
2526 }
2527
2528 if (handle_ioaccel_mode2_error(h, c, cmd, c2, dev))
2529 return hpsa_retry_cmd(h, c);
2530
2531 return hpsa_cmd_free_and_done(h, c, cmd);
2532}
2533
2534
2535static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
2536 struct CommandList *cp)
2537{
2538 u8 tmf_status = cp->err_info->ScsiStatus;
2539
2540 switch (tmf_status) {
2541 case CISS_TMF_COMPLETE:
2542
2543
2544
2545
2546 case CISS_TMF_SUCCESS:
2547 return 0;
2548 case CISS_TMF_INVALID_FRAME:
2549 case CISS_TMF_NOT_SUPPORTED:
2550 case CISS_TMF_FAILED:
2551 case CISS_TMF_WRONG_LUN:
2552 case CISS_TMF_OVERLAPPED_TAG:
2553 break;
2554 default:
2555 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
2556 tmf_status);
2557 break;
2558 }
2559 return -tmf_status;
2560}
2561
2562static void complete_scsi_command(struct CommandList *cp)
2563{
2564 struct scsi_cmnd *cmd;
2565 struct ctlr_info *h;
2566 struct ErrorInfo *ei;
2567 struct hpsa_scsi_dev_t *dev;
2568 struct io_accel2_cmd *c2;
2569
2570 u8 sense_key;
2571 u8 asc;
2572 u8 ascq;
2573 unsigned long sense_data_size;
2574
2575 ei = cp->err_info;
2576 cmd = cp->scsi_cmd;
2577 h = cp->h;
2578
2579 if (!cmd->device) {
2580 cmd->result = DID_NO_CONNECT << 16;
2581 return hpsa_cmd_free_and_done(h, cp, cmd);
2582 }
2583
2584 dev = cmd->device->hostdata;
2585 if (!dev) {
2586 cmd->result = DID_NO_CONNECT << 16;
2587 return hpsa_cmd_free_and_done(h, cp, cmd);
2588 }
2589 c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
2590
2591 scsi_dma_unmap(cmd);
2592 if ((cp->cmd_type == CMD_SCSI) &&
2593 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
2594 hpsa_unmap_sg_chain_block(h, cp);
2595
2596 if ((cp->cmd_type == CMD_IOACCEL2) &&
2597 (c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
2598 hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
2599
2600 cmd->result = (DID_OK << 16);
2601 cmd->result |= (COMMAND_COMPLETE << 8);
2602
2603
2604 if (dev->was_removed) {
2605 hpsa_cmd_resolve_and_free(h, cp);
2606 return;
2607 }
2608
2609 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) {
2610 if (dev->physical_device && dev->expose_device &&
2611 dev->removed) {
2612 cmd->result = DID_NO_CONNECT << 16;
2613 return hpsa_cmd_free_and_done(h, cp, cmd);
2614 }
2615 if (likely(cp->phys_disk != NULL))
2616 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
2617 }
2618
2619
2620
2621
2622
2623
2624 if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
2625
2626 cmd->result = DID_NO_CONNECT << 16;
2627 return hpsa_cmd_free_and_done(h, cp, cmd);
2628 }
2629
2630 if (cp->cmd_type == CMD_IOACCEL2)
2631 return process_ioaccel2_completion(h, cp, cmd, dev);
2632
2633 scsi_set_resid(cmd, ei->ResidualCnt);
2634 if (ei->CommandStatus == 0)
2635 return hpsa_cmd_free_and_done(h, cp, cmd);
2636
2637
2638
2639
2640 if (cp->cmd_type == CMD_IOACCEL1) {
2641 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
2642 cp->Header.SGList = scsi_sg_count(cmd);
2643 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
2644 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
2645 IOACCEL1_IOFLAGS_CDBLEN_MASK;
2646 cp->Header.tag = c->tag;
2647 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
2648 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
2649
2650
2651
2652
2653
2654 if (is_logical_device(dev)) {
2655 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
2656 dev->offload_enabled = 0;
2657 return hpsa_retry_cmd(h, cp);
2658 }
2659 }
2660
2661
2662 switch (ei->CommandStatus) {
2663
2664 case CMD_TARGET_STATUS:
2665 cmd->result |= ei->ScsiStatus;
2666
2667 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
2668 sense_data_size = SCSI_SENSE_BUFFERSIZE;
2669 else
2670 sense_data_size = sizeof(ei->SenseInfo);
2671 if (ei->SenseLen < sense_data_size)
2672 sense_data_size = ei->SenseLen;
2673 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2674 if (ei->ScsiStatus)
2675 decode_sense_data(ei->SenseInfo, sense_data_size,
2676 &sense_key, &asc, &ascq);
2677 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
2678 switch (sense_key) {
2679 case ABORTED_COMMAND:
2680 cmd->result |= DID_SOFT_ERROR << 16;
2681 break;
2682 case UNIT_ATTENTION:
2683 if (asc == 0x3F && ascq == 0x0E)
2684 h->drv_req_rescan = 1;
2685 break;
2686 case ILLEGAL_REQUEST:
2687 if (asc == 0x25 && ascq == 0x00) {
2688 dev->removed = 1;
2689 cmd->result = DID_NO_CONNECT << 16;
2690 }
2691 break;
2692 }
2693 break;
2694 }
2695
2696
2697
2698 if (ei->ScsiStatus) {
2699 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
2700 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2701 "Returning result: 0x%x\n",
2702 cp, ei->ScsiStatus,
2703 sense_key, asc, ascq,
2704 cmd->result);
2705 } else {
2706 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
2707 "Returning no connection.\n", cp),
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721 cmd->result = DID_NO_CONNECT << 16;
2722 }
2723 break;
2724
2725 case CMD_DATA_UNDERRUN:
2726 break;
2727 case CMD_DATA_OVERRUN:
2728 dev_warn(&h->pdev->dev,
2729 "CDB %16phN data overrun\n", cp->Request.CDB);
2730 break;
2731 case CMD_INVALID: {
2732
2733
2734
2735
2736
2737
2738
2739
2740 cmd->result = DID_NO_CONNECT << 16;
2741 }
2742 break;
2743 case CMD_PROTOCOL_ERR:
2744 cmd->result = DID_ERROR << 16;
2745 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2746 cp->Request.CDB);
2747 break;
2748 case CMD_HARDWARE_ERR:
2749 cmd->result = DID_ERROR << 16;
2750 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2751 cp->Request.CDB);
2752 break;
2753 case CMD_CONNECTION_LOST:
2754 cmd->result = DID_ERROR << 16;
2755 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2756 cp->Request.CDB);
2757 break;
2758 case CMD_ABORTED:
2759 cmd->result = DID_ABORT << 16;
2760 break;
2761 case CMD_ABORT_FAILED:
2762 cmd->result = DID_ERROR << 16;
2763 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2764 cp->Request.CDB);
2765 break;
2766 case CMD_UNSOLICITED_ABORT:
2767 cmd->result = DID_SOFT_ERROR << 16;
2768 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2769 cp->Request.CDB);
2770 break;
2771 case CMD_TIMEOUT:
2772 cmd->result = DID_TIME_OUT << 16;
2773 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2774 cp->Request.CDB);
2775 break;
2776 case CMD_UNABORTABLE:
2777 cmd->result = DID_ERROR << 16;
2778 dev_warn(&h->pdev->dev, "Command unabortable\n");
2779 break;
2780 case CMD_TMF_STATUS:
2781 if (hpsa_evaluate_tmf_status(h, cp))
2782 cmd->result = DID_ERROR << 16;
2783 break;
2784 case CMD_IOACCEL_DISABLED:
2785
2786
2787
2788 cmd->result = DID_SOFT_ERROR << 16;
2789 dev_warn(&h->pdev->dev,
2790 "cp %p had HP SSD Smart Path error\n", cp);
2791 break;
2792 default:
2793 cmd->result = DID_ERROR << 16;
2794 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2795 cp, ei->CommandStatus);
2796 }
2797
2798 return hpsa_cmd_free_and_done(h, cp, cmd);
2799}
2800
2801static void hpsa_pci_unmap(struct pci_dev *pdev, struct CommandList *c,
2802 int sg_used, enum dma_data_direction data_direction)
2803{
2804 int i;
2805
2806 for (i = 0; i < sg_used; i++)
2807 dma_unmap_single(&pdev->dev, le64_to_cpu(c->SG[i].Addr),
2808 le32_to_cpu(c->SG[i].Len),
2809 data_direction);
2810}
2811
2812static int hpsa_map_one(struct pci_dev *pdev,
2813 struct CommandList *cp,
2814 unsigned char *buf,
2815 size_t buflen,
2816 enum dma_data_direction data_direction)
2817{
2818 u64 addr64;
2819
2820 if (buflen == 0 || data_direction == DMA_NONE) {
2821 cp->Header.SGList = 0;
2822 cp->Header.SGTotal = cpu_to_le16(0);
2823 return 0;
2824 }
2825
2826 addr64 = dma_map_single(&pdev->dev, buf, buflen, data_direction);
2827 if (dma_mapping_error(&pdev->dev, addr64)) {
2828
2829 cp->Header.SGList = 0;
2830 cp->Header.SGTotal = cpu_to_le16(0);
2831 return -1;
2832 }
2833 cp->SG[0].Addr = cpu_to_le64(addr64);
2834 cp->SG[0].Len = cpu_to_le32(buflen);
2835 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST);
2836 cp->Header.SGList = 1;
2837 cp->Header.SGTotal = cpu_to_le16(1);
2838 return 0;
2839}
2840
2841#define NO_TIMEOUT ((unsigned long) -1)
2842#define DEFAULT_TIMEOUT 30000
2843static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2844 struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
2845{
2846 DECLARE_COMPLETION_ONSTACK(wait);
2847
2848 c->waiting = &wait;
2849 __enqueue_cmd_and_start_io(h, c, reply_queue);
2850 if (timeout_msecs == NO_TIMEOUT) {
2851
2852 wait_for_completion_io(&wait);
2853 return IO_OK;
2854 }
2855 if (!wait_for_completion_io_timeout(&wait,
2856 msecs_to_jiffies(timeout_msecs))) {
2857 dev_warn(&h->pdev->dev, "Command timed out.\n");
2858 return -ETIMEDOUT;
2859 }
2860 return IO_OK;
2861}
2862
2863static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2864 int reply_queue, unsigned long timeout_msecs)
2865{
2866 if (unlikely(lockup_detected(h))) {
2867 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2868 return IO_OK;
2869 }
2870 return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
2871}
2872
2873static u32 lockup_detected(struct ctlr_info *h)
2874{
2875 int cpu;
2876 u32 rc, *lockup_detected;
2877
2878 cpu = get_cpu();
2879 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2880 rc = *lockup_detected;
2881 put_cpu();
2882 return rc;
2883}
2884
2885#define MAX_DRIVER_CMD_RETRIES 25
2886static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2887 struct CommandList *c, enum dma_data_direction data_direction,
2888 unsigned long timeout_msecs)
2889{
2890 int backoff_time = 10, retry_count = 0;
2891 int rc;
2892
2893 do {
2894 memset(c->err_info, 0, sizeof(*c->err_info));
2895 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2896 timeout_msecs);
2897 if (rc)
2898 break;
2899 retry_count++;
2900 if (retry_count > 3) {
2901 msleep(backoff_time);
2902 if (backoff_time < 1000)
2903 backoff_time *= 2;
2904 }
2905 } while ((check_for_unit_attention(h, c) ||
2906 check_for_busy(h, c)) &&
2907 retry_count <= MAX_DRIVER_CMD_RETRIES);
2908 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2909 if (retry_count > MAX_DRIVER_CMD_RETRIES)
2910 rc = -EIO;
2911 return rc;
2912}
2913
2914static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2915 struct CommandList *c)
2916{
2917 const u8 *cdb = c->Request.CDB;
2918 const u8 *lun = c->Header.LUN.LunAddrBytes;
2919
2920 dev_warn(&h->pdev->dev, "%s: LUN:%8phN CDB:%16phN\n",
2921 txt, lun, cdb);
2922}
2923
2924static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2925 struct CommandList *cp)
2926{
2927 const struct ErrorInfo *ei = cp->err_info;
2928 struct device *d = &cp->h->pdev->dev;
2929 u8 sense_key, asc, ascq;
2930 int sense_len;
2931
2932 switch (ei->CommandStatus) {
2933 case CMD_TARGET_STATUS:
2934 if (ei->SenseLen > sizeof(ei->SenseInfo))
2935 sense_len = sizeof(ei->SenseInfo);
2936 else
2937 sense_len = ei->SenseLen;
2938 decode_sense_data(ei->SenseInfo, sense_len,
2939 &sense_key, &asc, &ascq);
2940 hpsa_print_cmd(h, "SCSI status", cp);
2941 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2942 dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2943 sense_key, asc, ascq);
2944 else
2945 dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
2946 if (ei->ScsiStatus == 0)
2947 dev_warn(d, "SCSI status is abnormally zero. "
2948 "(probably indicates selection timeout "
2949 "reported incorrectly due to a known "
2950 "firmware bug, circa July, 2001.)\n");
2951 break;
2952 case CMD_DATA_UNDERRUN:
2953 break;
2954 case CMD_DATA_OVERRUN:
2955 hpsa_print_cmd(h, "overrun condition", cp);
2956 break;
2957 case CMD_INVALID: {
2958
2959
2960
2961 hpsa_print_cmd(h, "invalid command", cp);
2962 dev_warn(d, "probably means device no longer present\n");
2963 }
2964 break;
2965 case CMD_PROTOCOL_ERR:
2966 hpsa_print_cmd(h, "protocol error", cp);
2967 break;
2968 case CMD_HARDWARE_ERR:
2969 hpsa_print_cmd(h, "hardware error", cp);
2970 break;
2971 case CMD_CONNECTION_LOST:
2972 hpsa_print_cmd(h, "connection lost", cp);
2973 break;
2974 case CMD_ABORTED:
2975 hpsa_print_cmd(h, "aborted", cp);
2976 break;
2977 case CMD_ABORT_FAILED:
2978 hpsa_print_cmd(h, "abort failed", cp);
2979 break;
2980 case CMD_UNSOLICITED_ABORT:
2981 hpsa_print_cmd(h, "unsolicited abort", cp);
2982 break;
2983 case CMD_TIMEOUT:
2984 hpsa_print_cmd(h, "timed out", cp);
2985 break;
2986 case CMD_UNABORTABLE:
2987 hpsa_print_cmd(h, "unabortable", cp);
2988 break;
2989 case CMD_CTLR_LOCKUP:
2990 hpsa_print_cmd(h, "controller lockup detected", cp);
2991 break;
2992 default:
2993 hpsa_print_cmd(h, "unknown status", cp);
2994 dev_warn(d, "Unknown command status %x\n",
2995 ei->CommandStatus);
2996 }
2997}
2998
2999static int hpsa_do_receive_diagnostic(struct ctlr_info *h, u8 *scsi3addr,
3000 u8 page, u8 *buf, size_t bufsize)
3001{
3002 int rc = IO_OK;
3003 struct CommandList *c;
3004 struct ErrorInfo *ei;
3005
3006 c = cmd_alloc(h);
3007 if (fill_cmd(c, RECEIVE_DIAGNOSTIC, h, buf, bufsize,
3008 page, scsi3addr, TYPE_CMD)) {
3009 rc = -1;
3010 goto out;
3011 }
3012 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3013 NO_TIMEOUT);
3014 if (rc)
3015 goto out;
3016 ei = c->err_info;
3017 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3018 hpsa_scsi_interpret_error(h, c);
3019 rc = -1;
3020 }
3021out:
3022 cmd_free(h, c);
3023 return rc;
3024}
3025
3026static u64 hpsa_get_enclosure_logical_identifier(struct ctlr_info *h,
3027 u8 *scsi3addr)
3028{
3029 u8 *buf;
3030 u64 sa = 0;
3031 int rc = 0;
3032
3033 buf = kzalloc(1024, GFP_KERNEL);
3034 if (!buf)
3035 return 0;
3036
3037 rc = hpsa_do_receive_diagnostic(h, scsi3addr, RECEIVE_DIAGNOSTIC,
3038 buf, 1024);
3039
3040 if (rc)
3041 goto out;
3042
3043 sa = get_unaligned_be64(buf+12);
3044
3045out:
3046 kfree(buf);
3047 return sa;
3048}
3049
3050static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
3051 u16 page, unsigned char *buf,
3052 unsigned char bufsize)
3053{
3054 int rc = IO_OK;
3055 struct CommandList *c;
3056 struct ErrorInfo *ei;
3057
3058 c = cmd_alloc(h);
3059
3060 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
3061 page, scsi3addr, TYPE_CMD)) {
3062 rc = -1;
3063 goto out;
3064 }
3065 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3066 NO_TIMEOUT);
3067 if (rc)
3068 goto out;
3069 ei = c->err_info;
3070 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3071 hpsa_scsi_interpret_error(h, c);
3072 rc = -1;
3073 }
3074out:
3075 cmd_free(h, c);
3076 return rc;
3077}
3078
3079static int hpsa_send_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
3080 u8 reset_type, int reply_queue)
3081{
3082 int rc = IO_OK;
3083 struct CommandList *c;
3084 struct ErrorInfo *ei;
3085
3086 c = cmd_alloc(h);
3087 c->device = dev;
3088
3089
3090 (void) fill_cmd(c, reset_type, h, NULL, 0, 0, dev->scsi3addr, TYPE_MSG);
3091 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
3092 if (rc) {
3093 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
3094 goto out;
3095 }
3096
3097
3098 ei = c->err_info;
3099 if (ei->CommandStatus != 0) {
3100 hpsa_scsi_interpret_error(h, c);
3101 rc = -1;
3102 }
3103out:
3104 cmd_free(h, c);
3105 return rc;
3106}
3107
3108static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
3109 struct hpsa_scsi_dev_t *dev,
3110 unsigned char *scsi3addr)
3111{
3112 int i;
3113 bool match = false;
3114 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
3115 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
3116
3117 if (hpsa_is_cmd_idle(c))
3118 return false;
3119
3120 switch (c->cmd_type) {
3121 case CMD_SCSI:
3122 case CMD_IOCTL_PEND:
3123 match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes,
3124 sizeof(c->Header.LUN.LunAddrBytes));
3125 break;
3126
3127 case CMD_IOACCEL1:
3128 case CMD_IOACCEL2:
3129 if (c->phys_disk == dev) {
3130
3131 match = true;
3132 } else {
3133
3134
3135
3136
3137 for (i = 0; i < dev->nphysical_disks && !match; i++) {
3138
3139
3140
3141
3142 match = dev->phys_disk[i] == c->phys_disk;
3143 }
3144 }
3145 break;
3146
3147 case IOACCEL2_TMF:
3148 for (i = 0; i < dev->nphysical_disks && !match; i++) {
3149 match = dev->phys_disk[i]->ioaccel_handle ==
3150 le32_to_cpu(ac->it_nexus);
3151 }
3152 break;
3153
3154 case 0:
3155 match = false;
3156 break;
3157
3158 default:
3159 dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n",
3160 c->cmd_type);
3161 BUG();
3162 }
3163
3164 return match;
3165}
3166
3167static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
3168 u8 reset_type, int reply_queue)
3169{
3170 int rc = 0;
3171
3172
3173 if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) {
3174 dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n");
3175 return -EINTR;
3176 }
3177
3178 rc = hpsa_send_reset(h, dev, reset_type, reply_queue);
3179 if (!rc) {
3180
3181 atomic_dec(&dev->commands_outstanding);
3182 wait_event(h->event_sync_wait_queue,
3183 atomic_read(&dev->commands_outstanding) <= 0 ||
3184 lockup_detected(h));
3185 }
3186
3187 if (unlikely(lockup_detected(h))) {
3188 dev_warn(&h->pdev->dev,
3189 "Controller lockup detected during reset wait\n");
3190 rc = -ENODEV;
3191 }
3192
3193 if (!rc)
3194 rc = wait_for_device_to_become_ready(h, dev->scsi3addr, 0);
3195
3196 mutex_unlock(&h->reset_mutex);
3197 return rc;
3198}
3199
3200static void hpsa_get_raid_level(struct ctlr_info *h,
3201 unsigned char *scsi3addr, unsigned char *raid_level)
3202{
3203 int rc;
3204 unsigned char *buf;
3205
3206 *raid_level = RAID_UNKNOWN;
3207 buf = kzalloc(64, GFP_KERNEL);
3208 if (!buf)
3209 return;
3210
3211 if (!hpsa_vpd_page_supported(h, scsi3addr,
3212 HPSA_VPD_LV_DEVICE_GEOMETRY))
3213 goto exit;
3214
3215 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
3216 HPSA_VPD_LV_DEVICE_GEOMETRY, buf, 64);
3217
3218 if (rc == 0)
3219 *raid_level = buf[8];
3220 if (*raid_level > RAID_UNKNOWN)
3221 *raid_level = RAID_UNKNOWN;
3222exit:
3223 kfree(buf);
3224 return;
3225}
3226
3227#define HPSA_MAP_DEBUG
3228#ifdef HPSA_MAP_DEBUG
3229static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
3230 struct raid_map_data *map_buff)
3231{
3232 struct raid_map_disk_data *dd = &map_buff->data[0];
3233 int map, row, col;
3234 u16 map_cnt, row_cnt, disks_per_row;
3235
3236 if (rc != 0)
3237 return;
3238
3239
3240 if (h->raid_offload_debug < 2)
3241 return;
3242
3243 dev_info(&h->pdev->dev, "structure_size = %u\n",
3244 le32_to_cpu(map_buff->structure_size));
3245 dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
3246 le32_to_cpu(map_buff->volume_blk_size));
3247 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
3248 le64_to_cpu(map_buff->volume_blk_cnt));
3249 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
3250 map_buff->phys_blk_shift);
3251 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
3252 map_buff->parity_rotation_shift);
3253 dev_info(&h->pdev->dev, "strip_size = %u\n",
3254 le16_to_cpu(map_buff->strip_size));
3255 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
3256 le64_to_cpu(map_buff->disk_starting_blk));
3257 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
3258 le64_to_cpu(map_buff->disk_blk_cnt));
3259 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
3260 le16_to_cpu(map_buff->data_disks_per_row));
3261 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
3262 le16_to_cpu(map_buff->metadata_disks_per_row));
3263 dev_info(&h->pdev->dev, "row_cnt = %u\n",
3264 le16_to_cpu(map_buff->row_cnt));
3265 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
3266 le16_to_cpu(map_buff->layout_map_count));
3267 dev_info(&h->pdev->dev, "flags = 0x%x\n",
3268 le16_to_cpu(map_buff->flags));
3269 dev_info(&h->pdev->dev, "encryption = %s\n",
3270 le16_to_cpu(map_buff->flags) &
3271 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF");
3272 dev_info(&h->pdev->dev, "dekindex = %u\n",
3273 le16_to_cpu(map_buff->dekindex));
3274 map_cnt = le16_to_cpu(map_buff->layout_map_count);
3275 for (map = 0; map < map_cnt; map++) {
3276 dev_info(&h->pdev->dev, "Map%u:\n", map);
3277 row_cnt = le16_to_cpu(map_buff->row_cnt);
3278 for (row = 0; row < row_cnt; row++) {
3279 dev_info(&h->pdev->dev, " Row%u:\n", row);
3280 disks_per_row =
3281 le16_to_cpu(map_buff->data_disks_per_row);
3282 for (col = 0; col < disks_per_row; col++, dd++)
3283 dev_info(&h->pdev->dev,
3284 " D%02u: h=0x%04x xor=%u,%u\n",
3285 col, dd->ioaccel_handle,
3286 dd->xor_mult[0], dd->xor_mult[1]);
3287 disks_per_row =
3288 le16_to_cpu(map_buff->metadata_disks_per_row);
3289 for (col = 0; col < disks_per_row; col++, dd++)
3290 dev_info(&h->pdev->dev,
3291 " M%02u: h=0x%04x xor=%u,%u\n",
3292 col, dd->ioaccel_handle,
3293 dd->xor_mult[0], dd->xor_mult[1]);
3294 }
3295 }
3296}
3297#else
3298static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
3299 __attribute__((unused)) int rc,
3300 __attribute__((unused)) struct raid_map_data *map_buff)
3301{
3302}
3303#endif
3304
3305static int hpsa_get_raid_map(struct ctlr_info *h,
3306 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3307{
3308 int rc = 0;
3309 struct CommandList *c;
3310 struct ErrorInfo *ei;
3311
3312 c = cmd_alloc(h);
3313
3314 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
3315 sizeof(this_device->raid_map), 0,
3316 scsi3addr, TYPE_CMD)) {
3317 dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
3318 cmd_free(h, c);
3319 return -1;
3320 }
3321 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3322 NO_TIMEOUT);
3323 if (rc)
3324 goto out;
3325 ei = c->err_info;
3326 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3327 hpsa_scsi_interpret_error(h, c);
3328 rc = -1;
3329 goto out;
3330 }
3331 cmd_free(h, c);
3332
3333
3334 if (le32_to_cpu(this_device->raid_map.structure_size) >
3335 sizeof(this_device->raid_map)) {
3336 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
3337 rc = -1;
3338 }
3339 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
3340 return rc;
3341out:
3342 cmd_free(h, c);
3343 return rc;
3344}
3345
3346static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h,
3347 unsigned char scsi3addr[], u16 bmic_device_index,
3348 struct bmic_sense_subsystem_info *buf, size_t bufsize)
3349{
3350 int rc = IO_OK;
3351 struct CommandList *c;
3352 struct ErrorInfo *ei;
3353
3354 c = cmd_alloc(h);
3355
3356 rc = fill_cmd(c, BMIC_SENSE_SUBSYSTEM_INFORMATION, h, buf, bufsize,
3357 0, RAID_CTLR_LUNID, TYPE_CMD);
3358 if (rc)
3359 goto out;
3360
3361 c->Request.CDB[2] = bmic_device_index & 0xff;
3362 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3363
3364 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3365 NO_TIMEOUT);
3366 if (rc)
3367 goto out;
3368 ei = c->err_info;
3369 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3370 hpsa_scsi_interpret_error(h, c);
3371 rc = -1;
3372 }
3373out:
3374 cmd_free(h, c);
3375 return rc;
3376}
3377
3378static int hpsa_bmic_id_controller(struct ctlr_info *h,
3379 struct bmic_identify_controller *buf, size_t bufsize)
3380{
3381 int rc = IO_OK;
3382 struct CommandList *c;
3383 struct ErrorInfo *ei;
3384
3385 c = cmd_alloc(h);
3386
3387 rc = fill_cmd(c, BMIC_IDENTIFY_CONTROLLER, h, buf, bufsize,
3388 0, RAID_CTLR_LUNID, TYPE_CMD);
3389 if (rc)
3390 goto out;
3391
3392 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3393 NO_TIMEOUT);
3394 if (rc)
3395 goto out;
3396 ei = c->err_info;
3397 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3398 hpsa_scsi_interpret_error(h, c);
3399 rc = -1;
3400 }
3401out:
3402 cmd_free(h, c);
3403 return rc;
3404}
3405
3406static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
3407 unsigned char scsi3addr[], u16 bmic_device_index,
3408 struct bmic_identify_physical_device *buf, size_t bufsize)
3409{
3410 int rc = IO_OK;
3411 struct CommandList *c;
3412 struct ErrorInfo *ei;
3413
3414 c = cmd_alloc(h);
3415 rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
3416 0, RAID_CTLR_LUNID, TYPE_CMD);
3417 if (rc)
3418 goto out;
3419
3420 c->Request.CDB[2] = bmic_device_index & 0xff;
3421 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3422
3423 hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3424 NO_TIMEOUT);
3425 ei = c->err_info;
3426 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3427 hpsa_scsi_interpret_error(h, c);
3428 rc = -1;
3429 }
3430out:
3431 cmd_free(h, c);
3432
3433 return rc;
3434}
3435
3436
3437
3438
3439
3440
3441
3442static void hpsa_get_enclosure_info(struct ctlr_info *h,
3443 unsigned char *scsi3addr,
3444 struct ReportExtendedLUNdata *rlep, int rle_index,
3445 struct hpsa_scsi_dev_t *encl_dev)
3446{
3447 int rc = -1;
3448 struct CommandList *c = NULL;
3449 struct ErrorInfo *ei = NULL;
3450 struct bmic_sense_storage_box_params *bssbp = NULL;
3451 struct bmic_identify_physical_device *id_phys = NULL;
3452 struct ext_report_lun_entry *rle;
3453 u16 bmic_device_index = 0;
3454
3455 if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN)
3456 return;
3457
3458 rle = &rlep->LUN[rle_index];
3459
3460 encl_dev->eli =
3461 hpsa_get_enclosure_logical_identifier(h, scsi3addr);
3462
3463 bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
3464
3465 if (encl_dev->target == -1 || encl_dev->lun == -1) {
3466 rc = IO_OK;
3467 goto out;
3468 }
3469
3470 if (bmic_device_index == 0xFF00 || MASKED_DEVICE(&rle->lunid[0])) {
3471 rc = IO_OK;
3472 goto out;
3473 }
3474
3475 bssbp = kzalloc(sizeof(*bssbp), GFP_KERNEL);
3476 if (!bssbp)
3477 goto out;
3478
3479 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
3480 if (!id_phys)
3481 goto out;
3482
3483 rc = hpsa_bmic_id_physical_device(h, scsi3addr, bmic_device_index,
3484 id_phys, sizeof(*id_phys));
3485 if (rc) {
3486 dev_warn(&h->pdev->dev, "%s: id_phys failed %d bdi[0x%x]\n",
3487 __func__, encl_dev->external, bmic_device_index);
3488 goto out;
3489 }
3490
3491 c = cmd_alloc(h);
3492
3493 rc = fill_cmd(c, BMIC_SENSE_STORAGE_BOX_PARAMS, h, bssbp,
3494 sizeof(*bssbp), 0, RAID_CTLR_LUNID, TYPE_CMD);
3495
3496 if (rc)
3497 goto out;
3498
3499 if (id_phys->phys_connector[1] == 'E')
3500 c->Request.CDB[5] = id_phys->box_index;
3501 else
3502 c->Request.CDB[5] = 0;
3503
3504 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3505 NO_TIMEOUT);
3506 if (rc)
3507 goto out;
3508
3509 ei = c->err_info;
3510 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3511 rc = -1;
3512 goto out;
3513 }
3514
3515 encl_dev->box[id_phys->active_path_number] = bssbp->phys_box_on_port;
3516 memcpy(&encl_dev->phys_connector[id_phys->active_path_number],
3517 bssbp->phys_connector, sizeof(bssbp->phys_connector));
3518
3519 rc = IO_OK;
3520out:
3521 kfree(bssbp);
3522 kfree(id_phys);
3523
3524 if (c)
3525 cmd_free(h, c);
3526
3527 if (rc != IO_OK)
3528 hpsa_show_dev_msg(KERN_INFO, h, encl_dev,
3529 "Error, could not get enclosure information");
3530}
3531
3532static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h,
3533 unsigned char *scsi3addr)
3534{
3535 struct ReportExtendedLUNdata *physdev;
3536 u32 nphysicals;
3537 u64 sa = 0;
3538 int i;
3539
3540 physdev = kzalloc(sizeof(*physdev), GFP_KERNEL);
3541 if (!physdev)
3542 return 0;
3543
3544 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
3545 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3546 kfree(physdev);
3547 return 0;
3548 }
3549 nphysicals = get_unaligned_be32(physdev->LUNListLength) / 24;
3550
3551 for (i = 0; i < nphysicals; i++)
3552 if (!memcmp(&physdev->LUN[i].lunid[0], scsi3addr, 8)) {
3553 sa = get_unaligned_be64(&physdev->LUN[i].wwid[0]);
3554 break;
3555 }
3556
3557 kfree(physdev);
3558
3559 return sa;
3560}
3561
3562static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
3563 struct hpsa_scsi_dev_t *dev)
3564{
3565 int rc;
3566 u64 sa = 0;
3567
3568 if (is_hba_lunid(scsi3addr)) {
3569 struct bmic_sense_subsystem_info *ssi;
3570
3571 ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
3572 if (!ssi)
3573 return;
3574
3575 rc = hpsa_bmic_sense_subsystem_information(h,
3576 scsi3addr, 0, ssi, sizeof(*ssi));
3577 if (rc == 0) {
3578 sa = get_unaligned_be64(ssi->primary_world_wide_id);
3579 h->sas_address = sa;
3580 }
3581
3582 kfree(ssi);
3583 } else
3584 sa = hpsa_get_sas_address_from_report_physical(h, scsi3addr);
3585
3586 dev->sas_address = sa;
3587}
3588
3589static void hpsa_ext_ctrl_present(struct ctlr_info *h,
3590 struct ReportExtendedLUNdata *physdev)
3591{
3592 u32 nphysicals;
3593 int i;
3594
3595 if (h->discovery_polling)
3596 return;
3597
3598 nphysicals = (get_unaligned_be32(physdev->LUNListLength) / 24) + 1;
3599
3600 for (i = 0; i < nphysicals; i++) {
3601 if (physdev->LUN[i].device_type ==
3602 BMIC_DEVICE_TYPE_CONTROLLER
3603 && !is_hba_lunid(physdev->LUN[i].lunid)) {
3604 dev_info(&h->pdev->dev,
3605 "External controller present, activate discovery polling and disable rld caching\n");
3606 hpsa_disable_rld_caching(h);
3607 h->discovery_polling = 1;
3608 break;
3609 }
3610 }
3611}
3612
3613
3614static bool hpsa_vpd_page_supported(struct ctlr_info *h,
3615 unsigned char scsi3addr[], u8 page)
3616{
3617 int rc;
3618 int i;
3619 int pages;
3620 unsigned char *buf, bufsize;
3621
3622 buf = kzalloc(256, GFP_KERNEL);
3623 if (!buf)
3624 return false;
3625
3626
3627 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3628 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3629 buf, HPSA_VPD_HEADER_SZ);
3630 if (rc != 0)
3631 goto exit_unsupported;
3632 pages = buf[3];
3633 if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
3634 bufsize = pages + HPSA_VPD_HEADER_SZ;
3635 else
3636 bufsize = 255;
3637
3638
3639 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3640 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3641 buf, bufsize);
3642 if (rc != 0)
3643 goto exit_unsupported;
3644
3645 pages = buf[3];
3646 for (i = 1; i <= pages; i++)
3647 if (buf[3 + i] == page)
3648 goto exit_supported;
3649exit_unsupported:
3650 kfree(buf);
3651 return false;
3652exit_supported:
3653 kfree(buf);
3654 return true;
3655}
3656
3657
3658
3659
3660
3661
3662
3663
3664static void hpsa_get_ioaccel_status(struct ctlr_info *h,
3665 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3666{
3667 int rc;
3668 unsigned char *buf;
3669 u8 ioaccel_status;
3670
3671 this_device->offload_config = 0;
3672 this_device->offload_enabled = 0;
3673 this_device->offload_to_be_enabled = 0;
3674
3675 buf = kzalloc(64, GFP_KERNEL);
3676 if (!buf)
3677 return;
3678 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
3679 goto out;
3680 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3681 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
3682 if (rc != 0)
3683 goto out;
3684
3685#define IOACCEL_STATUS_BYTE 4
3686#define OFFLOAD_CONFIGURED_BIT 0x01
3687#define OFFLOAD_ENABLED_BIT 0x02
3688 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
3689 this_device->offload_config =
3690 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
3691 if (this_device->offload_config) {
3692 bool offload_enabled =
3693 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
3694
3695
3696
3697 if (offload_enabled) {
3698 rc = hpsa_get_raid_map(h, scsi3addr, this_device);
3699 if (rc)
3700 goto out;
3701 this_device->offload_to_be_enabled = 1;
3702 }
3703 }
3704
3705out:
3706 kfree(buf);
3707 return;
3708}
3709
3710
3711static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
3712 unsigned char *device_id, int index, int buflen)
3713{
3714 int rc;
3715 unsigned char *buf;
3716
3717
3718 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_DEVICE_ID))
3719 return 1;
3720
3721 buf = kzalloc(64, GFP_KERNEL);
3722 if (!buf)
3723 return -ENOMEM;
3724
3725 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
3726 HPSA_VPD_LV_DEVICE_ID, buf, 64);
3727 if (rc == 0) {
3728 if (buflen > 16)
3729 buflen = 16;
3730 memcpy(device_id, &buf[8], buflen);
3731 }
3732
3733 kfree(buf);
3734
3735 return rc;
3736}
3737
3738static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
3739 void *buf, int bufsize,
3740 int extended_response)
3741{
3742 int rc = IO_OK;
3743 struct CommandList *c;
3744 unsigned char scsi3addr[8];
3745 struct ErrorInfo *ei;
3746
3747 c = cmd_alloc(h);
3748
3749
3750 memset(scsi3addr, 0, sizeof(scsi3addr));
3751 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
3752 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
3753 rc = -EAGAIN;
3754 goto out;
3755 }
3756 if (extended_response)
3757 c->Request.CDB[1] = extended_response;
3758 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3759 NO_TIMEOUT);
3760 if (rc)
3761 goto out;
3762 ei = c->err_info;
3763 if (ei->CommandStatus != 0 &&
3764 ei->CommandStatus != CMD_DATA_UNDERRUN) {
3765 hpsa_scsi_interpret_error(h, c);
3766 rc = -EIO;
3767 } else {
3768 struct ReportLUNdata *rld = buf;
3769
3770 if (rld->extended_response_flag != extended_response) {
3771 if (!h->legacy_board) {
3772 dev_err(&h->pdev->dev,
3773 "report luns requested format %u, got %u\n",
3774 extended_response,
3775 rld->extended_response_flag);
3776 rc = -EINVAL;
3777 } else
3778 rc = -EOPNOTSUPP;
3779 }
3780 }
3781out:
3782 cmd_free(h, c);
3783 return rc;
3784}
3785
3786static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
3787 struct ReportExtendedLUNdata *buf, int bufsize)
3788{
3789 int rc;
3790 struct ReportLUNdata *lbuf;
3791
3792 rc = hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
3793 HPSA_REPORT_PHYS_EXTENDED);
3794 if (!rc || rc != -EOPNOTSUPP)
3795 return rc;
3796
3797
3798 lbuf = kzalloc(sizeof(*lbuf), GFP_KERNEL);
3799 if (!lbuf)
3800 return -ENOMEM;
3801
3802 rc = hpsa_scsi_do_report_luns(h, 0, lbuf, sizeof(*lbuf), 0);
3803 if (!rc) {
3804 int i;
3805 u32 nphys;
3806
3807
3808 memcpy(buf, lbuf, 8);
3809 nphys = be32_to_cpu(*((__be32 *)lbuf->LUNListLength)) / 8;
3810 for (i = 0; i < nphys; i++)
3811 memcpy(buf->LUN[i].lunid, lbuf->LUN[i], 8);
3812 }
3813 kfree(lbuf);
3814 return rc;
3815}
3816
3817static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
3818 struct ReportLUNdata *buf, int bufsize)
3819{
3820 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
3821}
3822
3823static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
3824 int bus, int target, int lun)
3825{
3826 device->bus = bus;
3827 device->target = target;
3828 device->lun = lun;
3829}
3830
3831
3832static int hpsa_get_volume_status(struct ctlr_info *h,
3833 unsigned char scsi3addr[])
3834{
3835 int rc;
3836 int status;
3837 int size;
3838 unsigned char *buf;
3839
3840 buf = kzalloc(64, GFP_KERNEL);
3841 if (!buf)
3842 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3843
3844
3845 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
3846 goto exit_failed;
3847
3848
3849 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3850 buf, HPSA_VPD_HEADER_SZ);
3851 if (rc != 0)
3852 goto exit_failed;
3853 size = buf[3];
3854
3855
3856 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3857 buf, size + HPSA_VPD_HEADER_SZ);
3858 if (rc != 0)
3859 goto exit_failed;
3860 status = buf[4];
3861
3862 kfree(buf);
3863 return status;
3864exit_failed:
3865 kfree(buf);
3866 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3867}
3868
3869
3870
3871
3872
3873
3874
3875
3876static unsigned char hpsa_volume_offline(struct ctlr_info *h,
3877 unsigned char scsi3addr[])
3878{
3879 struct CommandList *c;
3880 unsigned char *sense;
3881 u8 sense_key, asc, ascq;
3882 int sense_len;
3883 int rc, ldstat = 0;
3884 u16 cmd_status;
3885 u8 scsi_status;
3886#define ASC_LUN_NOT_READY 0x04
3887#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
3888#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
3889
3890 c = cmd_alloc(h);
3891
3892 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
3893 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
3894 NO_TIMEOUT);
3895 if (rc) {
3896 cmd_free(h, c);
3897 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3898 }
3899 sense = c->err_info->SenseInfo;
3900 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
3901 sense_len = sizeof(c->err_info->SenseInfo);
3902 else
3903 sense_len = c->err_info->SenseLen;
3904 decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
3905 cmd_status = c->err_info->CommandStatus;
3906 scsi_status = c->err_info->ScsiStatus;
3907 cmd_free(h, c);
3908
3909
3910 ldstat = hpsa_get_volume_status(h, scsi3addr);
3911
3912
3913 switch (ldstat) {
3914 case HPSA_LV_FAILED:
3915 case HPSA_LV_UNDERGOING_ERASE:
3916 case HPSA_LV_NOT_AVAILABLE:
3917 case HPSA_LV_UNDERGOING_RPI:
3918 case HPSA_LV_PENDING_RPI:
3919 case HPSA_LV_ENCRYPTED_NO_KEY:
3920 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
3921 case HPSA_LV_UNDERGOING_ENCRYPTION:
3922 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
3923 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
3924 return ldstat;
3925 case HPSA_VPD_LV_STATUS_UNSUPPORTED:
3926
3927
3928
3929 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
3930 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
3931 return ldstat;
3932 break;
3933 default:
3934 break;
3935 }
3936 return HPSA_LV_OK;
3937}
3938
3939static int hpsa_update_device_info(struct ctlr_info *h,
3940 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
3941 unsigned char *is_OBDR_device)
3942{
3943
3944#define OBDR_SIG_OFFSET 43
3945#define OBDR_TAPE_SIG "$DR-10"
3946#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
3947#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
3948
3949 unsigned char *inq_buff;
3950 unsigned char *obdr_sig;
3951 int rc = 0;
3952
3953 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
3954 if (!inq_buff) {
3955 rc = -ENOMEM;
3956 goto bail_out;
3957 }
3958
3959
3960 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
3961 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
3962 dev_err(&h->pdev->dev,
3963 "%s: inquiry failed, device will be skipped.\n",
3964 __func__);
3965 rc = HPSA_INQUIRY_FAILED;
3966 goto bail_out;
3967 }
3968
3969 scsi_sanitize_inquiry_string(&inq_buff[8], 8);
3970 scsi_sanitize_inquiry_string(&inq_buff[16], 16);
3971
3972 this_device->devtype = (inq_buff[0] & 0x1f);
3973 memcpy(this_device->scsi3addr, scsi3addr, 8);
3974 memcpy(this_device->vendor, &inq_buff[8],
3975 sizeof(this_device->vendor));
3976 memcpy(this_device->model, &inq_buff[16],
3977 sizeof(this_device->model));
3978 this_device->rev = inq_buff[2];
3979 memset(this_device->device_id, 0,
3980 sizeof(this_device->device_id));
3981 if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
3982 sizeof(this_device->device_id)) < 0) {
3983 dev_err(&h->pdev->dev,
3984 "hpsa%d: %s: can't get device id for [%d:%d:%d:%d]\t%s\t%.16s\n",
3985 h->ctlr, __func__,
3986 h->scsi_host->host_no,
3987 this_device->bus, this_device->target,
3988 this_device->lun,
3989 scsi_device_type(this_device->devtype),
3990 this_device->model);
3991 rc = HPSA_LV_FAILED;
3992 goto bail_out;
3993 }
3994
3995 if ((this_device->devtype == TYPE_DISK ||
3996 this_device->devtype == TYPE_ZBC) &&
3997 is_logical_dev_addr_mode(scsi3addr)) {
3998 unsigned char volume_offline;
3999
4000 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
4001 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
4002 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
4003 volume_offline = hpsa_volume_offline(h, scsi3addr);
4004 if (volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED &&
4005 h->legacy_board) {
4006
4007
4008
4009 dev_info(&h->pdev->dev,
4010 "C0:T%d:L%d Volume status not available, assuming online.\n",
4011 this_device->target, this_device->lun);
4012 volume_offline = 0;
4013 }
4014 this_device->volume_offline = volume_offline;
4015 if (volume_offline == HPSA_LV_FAILED) {
4016 rc = HPSA_LV_FAILED;
4017 dev_err(&h->pdev->dev,
4018 "%s: LV failed, device will be skipped.\n",
4019 __func__);
4020 goto bail_out;
4021 }
4022 } else {
4023 this_device->raid_level = RAID_UNKNOWN;
4024 this_device->offload_config = 0;
4025 hpsa_turn_off_ioaccel_for_device(this_device);
4026 this_device->hba_ioaccel_enabled = 0;
4027 this_device->volume_offline = 0;
4028 this_device->queue_depth = h->nr_cmds;
4029 }
4030
4031 if (this_device->external)
4032 this_device->queue_depth = EXTERNAL_QD;
4033
4034 if (is_OBDR_device) {
4035
4036
4037
4038 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
4039 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
4040 strncmp(obdr_sig, OBDR_TAPE_SIG,
4041 OBDR_SIG_LEN) == 0);
4042 }
4043 kfree(inq_buff);
4044 return 0;
4045
4046bail_out:
4047 kfree(inq_buff);
4048 return rc;
4049}
4050
4051
4052
4053
4054
4055
4056
4057static void figure_bus_target_lun(struct ctlr_info *h,
4058 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
4059{
4060 u32 lunid = get_unaligned_le32(lunaddrbytes);
4061
4062 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
4063
4064 if (is_hba_lunid(lunaddrbytes)) {
4065 int bus = HPSA_HBA_BUS;
4066
4067 if (!device->rev)
4068 bus = HPSA_LEGACY_HBA_BUS;
4069 hpsa_set_bus_target_lun(device,
4070 bus, 0, lunid & 0x3fff);
4071 } else
4072
4073 hpsa_set_bus_target_lun(device,
4074 HPSA_PHYSICAL_DEVICE_BUS, -1, -1);
4075 return;
4076 }
4077
4078 if (device->external) {
4079 hpsa_set_bus_target_lun(device,
4080 HPSA_EXTERNAL_RAID_VOLUME_BUS, (lunid >> 16) & 0x3fff,
4081 lunid & 0x00ff);
4082 return;
4083 }
4084 hpsa_set_bus_target_lun(device, HPSA_RAID_VOLUME_BUS,
4085 0, lunid & 0x3fff);
4086}
4087
4088static int figure_external_status(struct ctlr_info *h, int raid_ctlr_position,
4089 int i, int nphysicals, int nlocal_logicals)
4090{
4091
4092
4093
4094 int logicals_start = nphysicals + (raid_ctlr_position == 0);
4095
4096 if (i == raid_ctlr_position)
4097 return 0;
4098
4099 if (i < logicals_start)
4100 return 0;
4101
4102
4103 if ((i - nphysicals - (raid_ctlr_position == 0)) < nlocal_logicals)
4104 return 0;
4105
4106 return 1;
4107}
4108
4109
4110
4111
4112
4113
4114
4115static int hpsa_gather_lun_info(struct ctlr_info *h,
4116 struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
4117 struct ReportLUNdata *logdev, u32 *nlogicals)
4118{
4119 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
4120 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
4121 return -1;
4122 }
4123 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
4124 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
4125 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
4126 HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
4127 *nphysicals = HPSA_MAX_PHYS_LUN;
4128 }
4129 if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
4130 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
4131 return -1;
4132 }
4133 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
4134
4135 if (*nlogicals > HPSA_MAX_LUN) {
4136 dev_warn(&h->pdev->dev,
4137 "maximum logical LUNs (%d) exceeded. "
4138 "%d LUNs ignored.\n", HPSA_MAX_LUN,
4139 *nlogicals - HPSA_MAX_LUN);
4140 *nlogicals = HPSA_MAX_LUN;
4141 }
4142 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
4143 dev_warn(&h->pdev->dev,
4144 "maximum logical + physical LUNs (%d) exceeded. "
4145 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
4146 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
4147 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
4148 }
4149 return 0;
4150}
4151
4152static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
4153 int i, int nphysicals, int nlogicals,
4154 struct ReportExtendedLUNdata *physdev_list,
4155 struct ReportLUNdata *logdev_list)
4156{
4157
4158
4159
4160
4161
4162 int logicals_start = nphysicals + (raid_ctlr_position == 0);
4163 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
4164
4165 if (i == raid_ctlr_position)
4166 return RAID_CTLR_LUNID;
4167
4168 if (i < logicals_start)
4169 return &physdev_list->LUN[i -
4170 (raid_ctlr_position == 0)].lunid[0];
4171
4172 if (i < last_device)
4173 return &logdev_list->LUN[i - nphysicals -
4174 (raid_ctlr_position == 0)][0];
4175 BUG();
4176 return NULL;
4177}
4178
4179
4180static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
4181 struct hpsa_scsi_dev_t *dev,
4182 struct ReportExtendedLUNdata *rlep, int rle_index,
4183 struct bmic_identify_physical_device *id_phys)
4184{
4185 int rc;
4186 struct ext_report_lun_entry *rle;
4187
4188 if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN)
4189 return;
4190
4191 rle = &rlep->LUN[rle_index];
4192
4193 dev->ioaccel_handle = rle->ioaccel_handle;
4194 if ((rle->device_flags & 0x08) && dev->ioaccel_handle)
4195 dev->hba_ioaccel_enabled = 1;
4196 memset(id_phys, 0, sizeof(*id_phys));
4197 rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0],
4198 GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]), id_phys,
4199 sizeof(*id_phys));
4200 if (!rc)
4201
4202#define DRIVE_CMDS_RESERVED_FOR_FW 2
4203#define DRIVE_QUEUE_DEPTH 7
4204 dev->queue_depth =
4205 le16_to_cpu(id_phys->current_queue_depth_limit) -
4206 DRIVE_CMDS_RESERVED_FOR_FW;
4207 else
4208 dev->queue_depth = DRIVE_QUEUE_DEPTH;
4209}
4210
4211static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device,
4212 struct ReportExtendedLUNdata *rlep, int rle_index,
4213 struct bmic_identify_physical_device *id_phys)
4214{
4215 struct ext_report_lun_entry *rle;
4216
4217 if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN)
4218 return;
4219
4220 rle = &rlep->LUN[rle_index];
4221
4222 if ((rle->device_flags & 0x08) && this_device->ioaccel_handle)
4223 this_device->hba_ioaccel_enabled = 1;
4224
4225 memcpy(&this_device->active_path_index,
4226 &id_phys->active_path_number,
4227 sizeof(this_device->active_path_index));
4228 memcpy(&this_device->path_map,
4229 &id_phys->redundant_path_present_map,
4230 sizeof(this_device->path_map));
4231 memcpy(&this_device->box,
4232 &id_phys->alternate_paths_phys_box_on_port,
4233 sizeof(this_device->box));
4234 memcpy(&this_device->phys_connector,
4235 &id_phys->alternate_paths_phys_connector,
4236 sizeof(this_device->phys_connector));
4237 memcpy(&this_device->bay,
4238 &id_phys->phys_bay_in_box,
4239 sizeof(this_device->bay));
4240}
4241
4242
4243static int hpsa_set_local_logical_count(struct ctlr_info *h,
4244 struct bmic_identify_controller *id_ctlr,
4245 u32 *nlocals)
4246{
4247 int rc;
4248
4249 if (!id_ctlr) {
4250 dev_warn(&h->pdev->dev, "%s: id_ctlr buffer is NULL.\n",
4251 __func__);
4252 return -ENOMEM;
4253 }
4254 memset(id_ctlr, 0, sizeof(*id_ctlr));
4255 rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr));
4256 if (!rc)
4257 if (id_ctlr->configured_logical_drive_count < 255)
4258 *nlocals = id_ctlr->configured_logical_drive_count;
4259 else
4260 *nlocals = le16_to_cpu(
4261 id_ctlr->extended_logical_unit_count);
4262 else
4263 *nlocals = -1;
4264 return rc;
4265}
4266
4267static bool hpsa_is_disk_spare(struct ctlr_info *h, u8 *lunaddrbytes)
4268{
4269 struct bmic_identify_physical_device *id_phys;
4270 bool is_spare = false;
4271 int rc;
4272
4273 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
4274 if (!id_phys)
4275 return false;
4276
4277 rc = hpsa_bmic_id_physical_device(h,
4278 lunaddrbytes,
4279 GET_BMIC_DRIVE_NUMBER(lunaddrbytes),
4280 id_phys, sizeof(*id_phys));
4281 if (rc == 0)
4282 is_spare = (id_phys->more_flags >> 6) & 0x01;
4283
4284 kfree(id_phys);
4285 return is_spare;
4286}
4287
4288#define RPL_DEV_FLAG_NON_DISK 0x1
4289#define RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED 0x2
4290#define RPL_DEV_FLAG_UNCONFIG_DISK 0x4
4291
4292#define BMIC_DEVICE_TYPE_ENCLOSURE 6
4293
4294static bool hpsa_skip_device(struct ctlr_info *h, u8 *lunaddrbytes,
4295 struct ext_report_lun_entry *rle)
4296{
4297 u8 device_flags;
4298 u8 device_type;
4299
4300 if (!MASKED_DEVICE(lunaddrbytes))
4301 return false;
4302
4303 device_flags = rle->device_flags;
4304 device_type = rle->device_type;
4305
4306 if (device_flags & RPL_DEV_FLAG_NON_DISK) {
4307 if (device_type == BMIC_DEVICE_TYPE_ENCLOSURE)
4308 return false;
4309 return true;
4310 }
4311
4312 if (!(device_flags & RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED))
4313 return false;
4314
4315 if (device_flags & RPL_DEV_FLAG_UNCONFIG_DISK)
4316 return false;
4317
4318
4319
4320
4321
4322
4323
4324
4325
4326 if (hpsa_is_disk_spare(h, lunaddrbytes))
4327 return true;
4328
4329 return false;
4330}
4331
4332static void hpsa_update_scsi_devices(struct ctlr_info *h)
4333{
4334
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344 struct ReportExtendedLUNdata *physdev_list = NULL;
4345 struct ReportLUNdata *logdev_list = NULL;
4346 struct bmic_identify_physical_device *id_phys = NULL;
4347 struct bmic_identify_controller *id_ctlr = NULL;
4348 u32 nphysicals = 0;
4349 u32 nlogicals = 0;
4350 u32 nlocal_logicals = 0;
4351 u32 ndev_allocated = 0;
4352 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
4353 int ncurrent = 0;
4354 int i, n_ext_target_devs, ndevs_to_allocate;
4355 int raid_ctlr_position;
4356 bool physical_device;
4357 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
4358
4359 currentsd = kcalloc(HPSA_MAX_DEVICES, sizeof(*currentsd), GFP_KERNEL);
4360 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
4361 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
4362 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
4363 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
4364 id_ctlr = kzalloc(sizeof(*id_ctlr), GFP_KERNEL);
4365
4366 if (!currentsd || !physdev_list || !logdev_list ||
4367 !tmpdevice || !id_phys || !id_ctlr) {
4368 dev_err(&h->pdev->dev, "out of memory\n");
4369 goto out;
4370 }
4371 memset(lunzerobits, 0, sizeof(lunzerobits));
4372
4373 h->drv_req_rescan = 0;
4374
4375 if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
4376 logdev_list, &nlogicals)) {
4377 h->drv_req_rescan = 1;
4378 goto out;
4379 }
4380
4381
4382 if (hpsa_set_local_logical_count(h, id_ctlr, &nlocal_logicals)) {
4383 dev_warn(&h->pdev->dev,
4384 "%s: Can't determine number of local logical devices.\n",
4385 __func__);
4386 }
4387
4388
4389
4390
4391
4392 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
4393
4394 hpsa_ext_ctrl_present(h, physdev_list);
4395
4396
4397 for (i = 0; i < ndevs_to_allocate; i++) {
4398 if (i >= HPSA_MAX_DEVICES) {
4399 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
4400 " %d devices ignored.\n", HPSA_MAX_DEVICES,
4401 ndevs_to_allocate - HPSA_MAX_DEVICES);
4402 break;
4403 }
4404
4405 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
4406 if (!currentsd[i]) {
4407 h->drv_req_rescan = 1;
4408 goto out;
4409 }
4410 ndev_allocated++;
4411 }
4412
4413 if (is_scsi_rev_5(h))
4414 raid_ctlr_position = 0;
4415 else
4416 raid_ctlr_position = nphysicals + nlogicals;
4417
4418
4419 n_ext_target_devs = 0;
4420 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
4421 u8 *lunaddrbytes, is_OBDR = 0;
4422 int rc = 0;
4423 int phys_dev_index = i - (raid_ctlr_position == 0);
4424 bool skip_device = false;
4425
4426 memset(tmpdevice, 0, sizeof(*tmpdevice));
4427
4428 physical_device = i < nphysicals + (raid_ctlr_position == 0);
4429
4430
4431 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
4432 i, nphysicals, nlogicals, physdev_list, logdev_list);
4433
4434
4435 tmpdevice->external =
4436 figure_external_status(h, raid_ctlr_position, i,
4437 nphysicals, nlocal_logicals);
4438
4439
4440
4441
4442 if (phys_dev_index >= 0 && !tmpdevice->external &&
4443 physical_device) {
4444 skip_device = hpsa_skip_device(h, lunaddrbytes,
4445 &physdev_list->LUN[phys_dev_index]);
4446 if (skip_device)
4447 continue;
4448 }
4449
4450
4451 rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
4452 &is_OBDR);
4453 if (rc == -ENOMEM) {
4454 dev_warn(&h->pdev->dev,
4455 "Out of memory, rescan deferred.\n");
4456 h->drv_req_rescan = 1;
4457 goto out;
4458 }
4459 if (rc) {
4460 h->drv_req_rescan = 1;
4461 continue;
4462 }
4463
4464 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
4465 this_device = currentsd[ncurrent];
4466
4467 *this_device = *tmpdevice;
4468 this_device->physical_device = physical_device;
4469
4470
4471
4472
4473
4474 if (MASKED_DEVICE(lunaddrbytes) && this_device->physical_device)
4475 this_device->expose_device = 0;
4476 else
4477 this_device->expose_device = 1;
4478
4479
4480
4481
4482
4483 if (this_device->physical_device && this_device->expose_device)
4484 hpsa_get_sas_address(h, lunaddrbytes, this_device);
4485
4486 switch (this_device->devtype) {
4487 case TYPE_ROM:
4488
4489
4490
4491
4492
4493
4494
4495 if (is_OBDR)
4496 ncurrent++;
4497 break;
4498 case TYPE_DISK:
4499 case TYPE_ZBC:
4500 if (this_device->physical_device) {
4501
4502
4503 this_device->offload_enabled = 0;
4504 hpsa_get_ioaccel_drive_info(h, this_device,
4505 physdev_list, phys_dev_index, id_phys);
4506 hpsa_get_path_info(this_device,
4507 physdev_list, phys_dev_index, id_phys);
4508 }
4509 ncurrent++;
4510 break;
4511 case TYPE_TAPE:
4512 case TYPE_MEDIUM_CHANGER:
4513 ncurrent++;
4514 break;
4515 case TYPE_ENCLOSURE:
4516 if (!this_device->external)
4517 hpsa_get_enclosure_info(h, lunaddrbytes,
4518 physdev_list, phys_dev_index,
4519 this_device);
4520 ncurrent++;
4521 break;
4522 case TYPE_RAID:
4523
4524
4525
4526
4527
4528 if (!is_hba_lunid(lunaddrbytes))
4529 break;
4530 ncurrent++;
4531 break;
4532 default:
4533 break;
4534 }
4535 if (ncurrent >= HPSA_MAX_DEVICES)
4536 break;
4537 }
4538
4539 if (h->sas_host == NULL) {
4540 int rc = 0;
4541
4542 rc = hpsa_add_sas_host(h);
4543 if (rc) {
4544 dev_warn(&h->pdev->dev,
4545 "Could not add sas host %d\n", rc);
4546 goto out;
4547 }
4548 }
4549
4550 adjust_hpsa_scsi_table(h, currentsd, ncurrent);
4551out:
4552 kfree(tmpdevice);
4553 for (i = 0; i < ndev_allocated; i++)
4554 kfree(currentsd[i]);
4555 kfree(currentsd);
4556 kfree(physdev_list);
4557 kfree(logdev_list);
4558 kfree(id_ctlr);
4559 kfree(id_phys);
4560}
4561
4562static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
4563 struct scatterlist *sg)
4564{
4565 u64 addr64 = (u64) sg_dma_address(sg);
4566 unsigned int len = sg_dma_len(sg);
4567
4568 desc->Addr = cpu_to_le64(addr64);
4569 desc->Len = cpu_to_le32(len);
4570 desc->Ext = 0;
4571}
4572
4573
4574
4575
4576
4577
4578static int hpsa_scatter_gather(struct ctlr_info *h,
4579 struct CommandList *cp,
4580 struct scsi_cmnd *cmd)
4581{
4582 struct scatterlist *sg;
4583 int use_sg, i, sg_limit, chained, last_sg;
4584 struct SGDescriptor *curr_sg;
4585
4586 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4587
4588 use_sg = scsi_dma_map(cmd);
4589 if (use_sg < 0)
4590 return use_sg;
4591
4592 if (!use_sg)
4593 goto sglist_finished;
4594
4595
4596
4597
4598
4599
4600
4601
4602 curr_sg = cp->SG;
4603 chained = use_sg > h->max_cmd_sg_entries;
4604 sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
4605 last_sg = scsi_sg_count(cmd) - 1;
4606 scsi_for_each_sg(cmd, sg, sg_limit, i) {
4607 hpsa_set_sg_descriptor(curr_sg, sg);
4608 curr_sg++;
4609 }
4610
4611 if (chained) {
4612
4613
4614
4615
4616
4617
4618 curr_sg = h->cmd_sg_list[cp->cmdindex];
4619 sg_limit = use_sg - sg_limit;
4620 for_each_sg(sg, sg, sg_limit, i) {
4621 hpsa_set_sg_descriptor(curr_sg, sg);
4622 curr_sg++;
4623 }
4624 }
4625
4626
4627 (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST);
4628
4629 if (use_sg + chained > h->maxSG)
4630 h->maxSG = use_sg + chained;
4631
4632 if (chained) {
4633 cp->Header.SGList = h->max_cmd_sg_entries;
4634 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
4635 if (hpsa_map_sg_chain_block(h, cp)) {
4636 scsi_dma_unmap(cmd);
4637 return -1;
4638 }
4639 return 0;
4640 }
4641
4642sglist_finished:
4643
4644 cp->Header.SGList = (u8) use_sg;
4645 cp->Header.SGTotal = cpu_to_le16(use_sg);
4646 return 0;
4647}
4648
4649static inline void warn_zero_length_transfer(struct ctlr_info *h,
4650 u8 *cdb, int cdb_len,
4651 const char *func)
4652{
4653 dev_warn(&h->pdev->dev,
4654 "%s: Blocking zero-length request: CDB:%*phN\n",
4655 func, cdb_len, cdb);
4656}
4657
4658#define IO_ACCEL_INELIGIBLE 1
4659
4660static bool is_zero_length_transfer(u8 *cdb)
4661{
4662 u32 block_cnt;
4663
4664
4665 switch (cdb[0]) {
4666 case READ_10:
4667 case WRITE_10:
4668 case VERIFY:
4669 case WRITE_VERIFY:
4670 block_cnt = get_unaligned_be16(&cdb[7]);
4671 break;
4672 case READ_12:
4673 case WRITE_12:
4674 case VERIFY_12:
4675 case WRITE_VERIFY_12:
4676 block_cnt = get_unaligned_be32(&cdb[6]);
4677 break;
4678 case READ_16:
4679 case WRITE_16:
4680 case VERIFY_16:
4681 block_cnt = get_unaligned_be32(&cdb[10]);
4682 break;
4683 default:
4684 return false;
4685 }
4686
4687 return block_cnt == 0;
4688}
4689
4690static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
4691{
4692 int is_write = 0;
4693 u32 block;
4694 u32 block_cnt;
4695
4696
4697 switch (cdb[0]) {
4698 case WRITE_6:
4699 case WRITE_12:
4700 is_write = 1;
4701 fallthrough;
4702 case READ_6:
4703 case READ_12:
4704 if (*cdb_len == 6) {
4705 block = (((cdb[1] & 0x1F) << 16) |
4706 (cdb[2] << 8) |
4707 cdb[3]);
4708 block_cnt = cdb[4];
4709 if (block_cnt == 0)
4710 block_cnt = 256;
4711 } else {
4712 BUG_ON(*cdb_len != 12);
4713 block = get_unaligned_be32(&cdb[2]);
4714 block_cnt = get_unaligned_be32(&cdb[6]);
4715 }
4716 if (block_cnt > 0xffff)
4717 return IO_ACCEL_INELIGIBLE;
4718
4719 cdb[0] = is_write ? WRITE_10 : READ_10;
4720 cdb[1] = 0;
4721 cdb[2] = (u8) (block >> 24);
4722 cdb[3] = (u8) (block >> 16);
4723 cdb[4] = (u8) (block >> 8);
4724 cdb[5] = (u8) (block);
4725 cdb[6] = 0;
4726 cdb[7] = (u8) (block_cnt >> 8);
4727 cdb[8] = (u8) (block_cnt);
4728 cdb[9] = 0;
4729 *cdb_len = 10;
4730 break;
4731 }
4732 return 0;
4733}
4734
4735static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
4736 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4737 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4738{
4739 struct scsi_cmnd *cmd = c->scsi_cmd;
4740 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
4741 unsigned int len;
4742 unsigned int total_len = 0;
4743 struct scatterlist *sg;
4744 u64 addr64;
4745 int use_sg, i;
4746 struct SGDescriptor *curr_sg;
4747 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
4748
4749
4750 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
4751 atomic_dec(&phys_disk->ioaccel_cmds_out);
4752 return IO_ACCEL_INELIGIBLE;
4753 }
4754
4755 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
4756
4757 if (is_zero_length_transfer(cdb)) {
4758 warn_zero_length_transfer(h, cdb, cdb_len, __func__);
4759 atomic_dec(&phys_disk->ioaccel_cmds_out);
4760 return IO_ACCEL_INELIGIBLE;
4761 }
4762
4763 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4764 atomic_dec(&phys_disk->ioaccel_cmds_out);
4765 return IO_ACCEL_INELIGIBLE;
4766 }
4767
4768 c->cmd_type = CMD_IOACCEL1;
4769
4770
4771 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
4772 (c->cmdindex * sizeof(*cp));
4773 BUG_ON(c->busaddr & 0x0000007F);
4774
4775 use_sg = scsi_dma_map(cmd);
4776 if (use_sg < 0) {
4777 atomic_dec(&phys_disk->ioaccel_cmds_out);
4778 return use_sg;
4779 }
4780
4781 if (use_sg) {
4782 curr_sg = cp->SG;
4783 scsi_for_each_sg(cmd, sg, use_sg, i) {
4784 addr64 = (u64) sg_dma_address(sg);
4785 len = sg_dma_len(sg);
4786 total_len += len;
4787 curr_sg->Addr = cpu_to_le64(addr64);
4788 curr_sg->Len = cpu_to_le32(len);
4789 curr_sg->Ext = cpu_to_le32(0);
4790 curr_sg++;
4791 }
4792 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
4793
4794 switch (cmd->sc_data_direction) {
4795 case DMA_TO_DEVICE:
4796 control |= IOACCEL1_CONTROL_DATA_OUT;
4797 break;
4798 case DMA_FROM_DEVICE:
4799 control |= IOACCEL1_CONTROL_DATA_IN;
4800 break;
4801 case DMA_NONE:
4802 control |= IOACCEL1_CONTROL_NODATAXFER;
4803 break;
4804 default:
4805 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4806 cmd->sc_data_direction);
4807 BUG();
4808 break;
4809 }
4810 } else {
4811 control |= IOACCEL1_CONTROL_NODATAXFER;
4812 }
4813
4814 c->Header.SGList = use_sg;
4815
4816 cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
4817 cp->transfer_len = cpu_to_le32(total_len);
4818 cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
4819 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
4820 cp->control = cpu_to_le32(control);
4821 memcpy(cp->CDB, cdb, cdb_len);
4822 memcpy(cp->CISS_LUN, scsi3addr, 8);
4823
4824 enqueue_cmd_and_start_io(h, c);
4825 return 0;
4826}
4827
4828
4829
4830
4831
4832static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
4833 struct CommandList *c)
4834{
4835 struct scsi_cmnd *cmd = c->scsi_cmd;
4836 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4837
4838 if (!dev)
4839 return -1;
4840
4841 c->phys_disk = dev;
4842
4843 if (dev->in_reset)
4844 return -1;
4845
4846 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
4847 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
4848}
4849
4850
4851
4852
4853static void set_encrypt_ioaccel2(struct ctlr_info *h,
4854 struct CommandList *c, struct io_accel2_cmd *cp)
4855{
4856 struct scsi_cmnd *cmd = c->scsi_cmd;
4857 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4858 struct raid_map_data *map = &dev->raid_map;
4859 u64 first_block;
4860
4861
4862 if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
4863 return;
4864
4865 cp->dekindex = map->dekindex;
4866
4867
4868 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
4869
4870
4871
4872
4873
4874 switch (cmd->cmnd[0]) {
4875
4876 case READ_6:
4877 case WRITE_6:
4878 first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
4879 (cmd->cmnd[2] << 8) |
4880 cmd->cmnd[3]);
4881 break;
4882 case WRITE_10:
4883 case READ_10:
4884
4885 case WRITE_12:
4886 case READ_12:
4887 first_block = get_unaligned_be32(&cmd->cmnd[2]);
4888 break;
4889 case WRITE_16:
4890 case READ_16:
4891 first_block = get_unaligned_be64(&cmd->cmnd[2]);
4892 break;
4893 default:
4894 dev_err(&h->pdev->dev,
4895 "ERROR: %s: size (0x%x) not supported for encryption\n",
4896 __func__, cmd->cmnd[0]);
4897 BUG();
4898 break;
4899 }
4900
4901 if (le32_to_cpu(map->volume_blk_size) != 512)
4902 first_block = first_block *
4903 le32_to_cpu(map->volume_blk_size)/512;
4904
4905 cp->tweak_lower = cpu_to_le32(first_block);
4906 cp->tweak_upper = cpu_to_le32(first_block >> 32);
4907}
4908
4909static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
4910 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4911 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4912{
4913 struct scsi_cmnd *cmd = c->scsi_cmd;
4914 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
4915 struct ioaccel2_sg_element *curr_sg;
4916 int use_sg, i;
4917 struct scatterlist *sg;
4918 u64 addr64;
4919 u32 len;
4920 u32 total_len = 0;
4921
4922 if (!cmd->device)
4923 return -1;
4924
4925 if (!cmd->device->hostdata)
4926 return -1;
4927
4928 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4929
4930 if (is_zero_length_transfer(cdb)) {
4931 warn_zero_length_transfer(h, cdb, cdb_len, __func__);
4932 atomic_dec(&phys_disk->ioaccel_cmds_out);
4933 return IO_ACCEL_INELIGIBLE;
4934 }
4935
4936 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4937 atomic_dec(&phys_disk->ioaccel_cmds_out);
4938 return IO_ACCEL_INELIGIBLE;
4939 }
4940
4941 c->cmd_type = CMD_IOACCEL2;
4942
4943 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
4944 (c->cmdindex * sizeof(*cp));
4945 BUG_ON(c->busaddr & 0x0000007F);
4946
4947 memset(cp, 0, sizeof(*cp));
4948 cp->IU_type = IOACCEL2_IU_TYPE;
4949
4950 use_sg = scsi_dma_map(cmd);
4951 if (use_sg < 0) {
4952 atomic_dec(&phys_disk->ioaccel_cmds_out);
4953 return use_sg;
4954 }
4955
4956 if (use_sg) {
4957 curr_sg = cp->sg;
4958 if (use_sg > h->ioaccel_maxsg) {
4959 addr64 = le64_to_cpu(
4960 h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
4961 curr_sg->address = cpu_to_le64(addr64);
4962 curr_sg->length = 0;
4963 curr_sg->reserved[0] = 0;
4964 curr_sg->reserved[1] = 0;
4965 curr_sg->reserved[2] = 0;
4966 curr_sg->chain_indicator = IOACCEL2_CHAIN;
4967
4968 curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
4969 }
4970 scsi_for_each_sg(cmd, sg, use_sg, i) {
4971 addr64 = (u64) sg_dma_address(sg);
4972 len = sg_dma_len(sg);
4973 total_len += len;
4974 curr_sg->address = cpu_to_le64(addr64);
4975 curr_sg->length = cpu_to_le32(len);
4976 curr_sg->reserved[0] = 0;
4977 curr_sg->reserved[1] = 0;
4978 curr_sg->reserved[2] = 0;
4979 curr_sg->chain_indicator = 0;
4980 curr_sg++;
4981 }
4982
4983
4984
4985
4986 (curr_sg - 1)->chain_indicator = IOACCEL2_LAST_SG;
4987
4988 switch (cmd->sc_data_direction) {
4989 case DMA_TO_DEVICE:
4990 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4991 cp->direction |= IOACCEL2_DIR_DATA_OUT;
4992 break;
4993 case DMA_FROM_DEVICE:
4994 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4995 cp->direction |= IOACCEL2_DIR_DATA_IN;
4996 break;
4997 case DMA_NONE:
4998 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4999 cp->direction |= IOACCEL2_DIR_NO_DATA;
5000 break;
5001 default:
5002 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
5003 cmd->sc_data_direction);
5004 BUG();
5005 break;
5006 }
5007 } else {
5008 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
5009 cp->direction |= IOACCEL2_DIR_NO_DATA;
5010 }
5011
5012
5013 set_encrypt_ioaccel2(h, c, cp);
5014
5015 cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
5016 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
5017 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
5018
5019 cp->data_len = cpu_to_le32(total_len);
5020 cp->err_ptr = cpu_to_le64(c->busaddr +
5021 offsetof(struct io_accel2_cmd, error_data));
5022 cp->err_len = cpu_to_le32(sizeof(cp->error_data));
5023
5024
5025 if (use_sg > h->ioaccel_maxsg) {
5026 cp->sg_count = 1;
5027 cp->sg[0].length = cpu_to_le32(use_sg * sizeof(cp->sg[0]));
5028 if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
5029 atomic_dec(&phys_disk->ioaccel_cmds_out);
5030 scsi_dma_unmap(cmd);
5031 return -1;
5032 }
5033 } else
5034 cp->sg_count = (u8) use_sg;
5035
5036 if (phys_disk->in_reset) {
5037 cmd->result = DID_RESET << 16;
5038 return -1;
5039 }
5040
5041 enqueue_cmd_and_start_io(h, c);
5042 return 0;
5043}
5044
5045
5046
5047
5048static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
5049 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
5050 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
5051{
5052 if (!c->scsi_cmd->device)
5053 return -1;
5054
5055 if (!c->scsi_cmd->device->hostdata)
5056 return -1;
5057
5058 if (phys_disk->in_reset)
5059 return -1;
5060
5061
5062 if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
5063 phys_disk->queue_depth) {
5064 atomic_dec(&phys_disk->ioaccel_cmds_out);
5065 return IO_ACCEL_INELIGIBLE;
5066 }
5067 if (h->transMethod & CFGTBL_Trans_io_accel1)
5068 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
5069 cdb, cdb_len, scsi3addr,
5070 phys_disk);
5071 else
5072 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
5073 cdb, cdb_len, scsi3addr,
5074 phys_disk);
5075}
5076
5077static void raid_map_helper(struct raid_map_data *map,
5078 int offload_to_mirror, u32 *map_index, u32 *current_group)
5079{
5080 if (offload_to_mirror == 0) {
5081
5082 *map_index %= le16_to_cpu(map->data_disks_per_row);
5083 return;
5084 }
5085 do {
5086
5087 *current_group = *map_index /
5088 le16_to_cpu(map->data_disks_per_row);
5089 if (offload_to_mirror == *current_group)
5090 continue;
5091 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
5092
5093 *map_index += le16_to_cpu(map->data_disks_per_row);
5094 (*current_group)++;
5095 } else {
5096
5097 *map_index %= le16_to_cpu(map->data_disks_per_row);
5098 *current_group = 0;
5099 }
5100 } while (offload_to_mirror != *current_group);
5101}
5102
5103
5104
5105
5106static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
5107 struct CommandList *c)
5108{
5109 struct scsi_cmnd *cmd = c->scsi_cmd;
5110 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
5111 struct raid_map_data *map = &dev->raid_map;
5112 struct raid_map_disk_data *dd = &map->data[0];
5113 int is_write = 0;
5114 u32 map_index;
5115 u64 first_block, last_block;
5116 u32 block_cnt;
5117 u32 blocks_per_row;
5118 u64 first_row, last_row;
5119 u32 first_row_offset, last_row_offset;
5120 u32 first_column, last_column;
5121 u64 r0_first_row, r0_last_row;
5122 u32 r5or6_blocks_per_row;
5123 u64 r5or6_first_row, r5or6_last_row;
5124 u32 r5or6_first_row_offset, r5or6_last_row_offset;
5125 u32 r5or6_first_column, r5or6_last_column;
5126 u32 total_disks_per_row;
5127 u32 stripesize;
5128 u32 first_group, last_group, current_group;
5129 u32 map_row;
5130 u32 disk_handle;
5131 u64 disk_block;
5132 u32 disk_block_cnt;
5133 u8 cdb[16];
5134 u8 cdb_len;
5135 u16 strip_size;
5136#if BITS_PER_LONG == 32
5137 u64 tmpdiv;
5138#endif
5139 int offload_to_mirror;
5140
5141 if (!dev)
5142 return -1;
5143
5144 if (dev->in_reset)
5145 return -1;
5146
5147
5148 switch (cmd->cmnd[0]) {
5149 case WRITE_6:
5150 is_write = 1;
5151 fallthrough;
5152 case READ_6:
5153 first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
5154 (cmd->cmnd[2] << 8) |
5155 cmd->cmnd[3]);
5156 block_cnt = cmd->cmnd[4];
5157 if (block_cnt == 0)
5158 block_cnt = 256;
5159 break;
5160 case WRITE_10:
5161 is_write = 1;
5162 fallthrough;
5163 case READ_10:
5164 first_block =
5165 (((u64) cmd->cmnd[2]) << 24) |
5166 (((u64) cmd->cmnd[3]) << 16) |
5167 (((u64) cmd->cmnd[4]) << 8) |
5168 cmd->cmnd[5];
5169 block_cnt =
5170 (((u32) cmd->cmnd[7]) << 8) |
5171 cmd->cmnd[8];
5172 break;
5173 case WRITE_12:
5174 is_write = 1;
5175 fallthrough;
5176 case READ_12:
5177 first_block =
5178 (((u64) cmd->cmnd[2]) << 24) |
5179 (((u64) cmd->cmnd[3]) << 16) |
5180 (((u64) cmd->cmnd[4]) << 8) |
5181 cmd->cmnd[5];
5182 block_cnt =
5183 (((u32) cmd->cmnd[6]) << 24) |
5184 (((u32) cmd->cmnd[7]) << 16) |
5185 (((u32) cmd->cmnd[8]) << 8) |
5186 cmd->cmnd[9];
5187 break;
5188 case WRITE_16:
5189 is_write = 1;
5190 fallthrough;
5191 case READ_16:
5192 first_block =
5193 (((u64) cmd->cmnd[2]) << 56) |
5194 (((u64) cmd->cmnd[3]) << 48) |
5195 (((u64) cmd->cmnd[4]) << 40) |
5196 (((u64) cmd->cmnd[5]) << 32) |
5197 (((u64) cmd->cmnd[6]) << 24) |
5198 (((u64) cmd->cmnd[7]) << 16) |
5199 (((u64) cmd->cmnd[8]) << 8) |
5200 cmd->cmnd[9];
5201 block_cnt =
5202 (((u32) cmd->cmnd[10]) << 24) |
5203 (((u32) cmd->cmnd[11]) << 16) |
5204 (((u32) cmd->cmnd[12]) << 8) |
5205 cmd->cmnd[13];
5206 break;
5207 default:
5208 return IO_ACCEL_INELIGIBLE;
5209 }
5210 last_block = first_block + block_cnt - 1;
5211
5212
5213 if (is_write && dev->raid_level != 0)
5214 return IO_ACCEL_INELIGIBLE;
5215
5216
5217 if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
5218 last_block < first_block)
5219 return IO_ACCEL_INELIGIBLE;
5220
5221
5222 blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
5223 le16_to_cpu(map->strip_size);
5224 strip_size = le16_to_cpu(map->strip_size);
5225#if BITS_PER_LONG == 32
5226 tmpdiv = first_block;
5227 (void) do_div(tmpdiv, blocks_per_row);
5228 first_row = tmpdiv;
5229 tmpdiv = last_block;
5230 (void) do_div(tmpdiv, blocks_per_row);
5231 last_row = tmpdiv;
5232 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
5233 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
5234 tmpdiv = first_row_offset;
5235 (void) do_div(tmpdiv, strip_size);
5236 first_column = tmpdiv;
5237 tmpdiv = last_row_offset;
5238 (void) do_div(tmpdiv, strip_size);
5239 last_column = tmpdiv;
5240#else
5241 first_row = first_block / blocks_per_row;
5242 last_row = last_block / blocks_per_row;
5243 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
5244 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
5245 first_column = first_row_offset / strip_size;
5246 last_column = last_row_offset / strip_size;
5247#endif
5248
5249
5250 if ((first_row != last_row) || (first_column != last_column))
5251 return IO_ACCEL_INELIGIBLE;
5252
5253
5254 total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
5255 le16_to_cpu(map->metadata_disks_per_row);
5256 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
5257 le16_to_cpu(map->row_cnt);
5258 map_index = (map_row * total_disks_per_row) + first_column;
5259
5260 switch (dev->raid_level) {
5261 case HPSA_RAID_0:
5262 break;
5263 case HPSA_RAID_1:
5264
5265
5266
5267
5268
5269 if (le16_to_cpu(map->layout_map_count) != 2) {
5270 hpsa_turn_off_ioaccel_for_device(dev);
5271 return IO_ACCEL_INELIGIBLE;
5272 }
5273 if (dev->offload_to_mirror)
5274 map_index += le16_to_cpu(map->data_disks_per_row);
5275 dev->offload_to_mirror = !dev->offload_to_mirror;
5276 break;
5277 case HPSA_RAID_ADM:
5278
5279
5280
5281
5282 if (le16_to_cpu(map->layout_map_count) != 3) {
5283 hpsa_turn_off_ioaccel_for_device(dev);
5284 return IO_ACCEL_INELIGIBLE;
5285 }
5286
5287 offload_to_mirror = dev->offload_to_mirror;
5288 raid_map_helper(map, offload_to_mirror,
5289 &map_index, ¤t_group);
5290
5291 offload_to_mirror =
5292 (offload_to_mirror >=
5293 le16_to_cpu(map->layout_map_count) - 1)
5294 ? 0 : offload_to_mirror + 1;
5295 dev->offload_to_mirror = offload_to_mirror;
5296
5297
5298
5299
5300 break;
5301 case HPSA_RAID_5:
5302 case HPSA_RAID_6:
5303 if (le16_to_cpu(map->layout_map_count) <= 1)
5304 break;
5305
5306
5307 r5or6_blocks_per_row =
5308 le16_to_cpu(map->strip_size) *
5309 le16_to_cpu(map->data_disks_per_row);
5310 if (r5or6_blocks_per_row == 0) {
5311 hpsa_turn_off_ioaccel_for_device(dev);
5312 return IO_ACCEL_INELIGIBLE;
5313 }
5314 stripesize = r5or6_blocks_per_row *
5315 le16_to_cpu(map->layout_map_count);
5316#if BITS_PER_LONG == 32
5317 tmpdiv = first_block;
5318 first_group = do_div(tmpdiv, stripesize);
5319 tmpdiv = first_group;
5320 (void) do_div(tmpdiv, r5or6_blocks_per_row);
5321 first_group = tmpdiv;
5322 tmpdiv = last_block;
5323 last_group = do_div(tmpdiv, stripesize);
5324 tmpdiv = last_group;
5325 (void) do_div(tmpdiv, r5or6_blocks_per_row);
5326 last_group = tmpdiv;
5327#else
5328 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
5329 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
5330#endif
5331 if (first_group != last_group)
5332 return IO_ACCEL_INELIGIBLE;
5333
5334
5335#if BITS_PER_LONG == 32
5336 tmpdiv = first_block;
5337 (void) do_div(tmpdiv, stripesize);
5338 first_row = r5or6_first_row = r0_first_row = tmpdiv;
5339 tmpdiv = last_block;
5340 (void) do_div(tmpdiv, stripesize);
5341 r5or6_last_row = r0_last_row = tmpdiv;
5342#else
5343 first_row = r5or6_first_row = r0_first_row =
5344 first_block / stripesize;
5345 r5or6_last_row = r0_last_row = last_block / stripesize;
5346#endif
5347 if (r5or6_first_row != r5or6_last_row)
5348 return IO_ACCEL_INELIGIBLE;
5349
5350
5351
5352#if BITS_PER_LONG == 32
5353 tmpdiv = first_block;
5354 first_row_offset = do_div(tmpdiv, stripesize);
5355 tmpdiv = first_row_offset;
5356 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
5357 r5or6_first_row_offset = first_row_offset;
5358 tmpdiv = last_block;
5359 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
5360 tmpdiv = r5or6_last_row_offset;
5361 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
5362 tmpdiv = r5or6_first_row_offset;
5363 (void) do_div(tmpdiv, map->strip_size);
5364 first_column = r5or6_first_column = tmpdiv;
5365 tmpdiv = r5or6_last_row_offset;
5366 (void) do_div(tmpdiv, map->strip_size);
5367 r5or6_last_column = tmpdiv;
5368#else
5369 first_row_offset = r5or6_first_row_offset =
5370 (u32)((first_block % stripesize) %
5371 r5or6_blocks_per_row);
5372
5373 r5or6_last_row_offset =
5374 (u32)((last_block % stripesize) %
5375 r5or6_blocks_per_row);
5376
5377 first_column = r5or6_first_column =
5378 r5or6_first_row_offset / le16_to_cpu(map->strip_size);
5379 r5or6_last_column =
5380 r5or6_last_row_offset / le16_to_cpu(map->strip_size);
5381#endif
5382 if (r5or6_first_column != r5or6_last_column)
5383 return IO_ACCEL_INELIGIBLE;
5384
5385
5386 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
5387 le16_to_cpu(map->row_cnt);
5388
5389 map_index = (first_group *
5390 (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
5391 (map_row * total_disks_per_row) + first_column;
5392 break;
5393 default:
5394 return IO_ACCEL_INELIGIBLE;
5395 }
5396
5397 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
5398 return IO_ACCEL_INELIGIBLE;
5399
5400 c->phys_disk = dev->phys_disk[map_index];
5401 if (!c->phys_disk)
5402 return IO_ACCEL_INELIGIBLE;
5403
5404 disk_handle = dd[map_index].ioaccel_handle;
5405 disk_block = le64_to_cpu(map->disk_starting_blk) +
5406 first_row * le16_to_cpu(map->strip_size) +
5407 (first_row_offset - first_column *
5408 le16_to_cpu(map->strip_size));
5409 disk_block_cnt = block_cnt;
5410
5411
5412 if (map->phys_blk_shift) {
5413 disk_block <<= map->phys_blk_shift;
5414 disk_block_cnt <<= map->phys_blk_shift;
5415 }
5416 BUG_ON(disk_block_cnt > 0xffff);
5417
5418
5419 if (disk_block > 0xffffffff) {
5420 cdb[0] = is_write ? WRITE_16 : READ_16;
5421 cdb[1] = 0;
5422 cdb[2] = (u8) (disk_block >> 56);
5423 cdb[3] = (u8) (disk_block >> 48);
5424 cdb[4] = (u8) (disk_block >> 40);
5425 cdb[5] = (u8) (disk_block >> 32);
5426 cdb[6] = (u8) (disk_block >> 24);
5427 cdb[7] = (u8) (disk_block >> 16);
5428 cdb[8] = (u8) (disk_block >> 8);
5429 cdb[9] = (u8) (disk_block);
5430 cdb[10] = (u8) (disk_block_cnt >> 24);
5431 cdb[11] = (u8) (disk_block_cnt >> 16);
5432 cdb[12] = (u8) (disk_block_cnt >> 8);
5433 cdb[13] = (u8) (disk_block_cnt);
5434 cdb[14] = 0;
5435 cdb[15] = 0;
5436 cdb_len = 16;
5437 } else {
5438 cdb[0] = is_write ? WRITE_10 : READ_10;
5439 cdb[1] = 0;
5440 cdb[2] = (u8) (disk_block >> 24);
5441 cdb[3] = (u8) (disk_block >> 16);
5442 cdb[4] = (u8) (disk_block >> 8);
5443 cdb[5] = (u8) (disk_block);
5444 cdb[6] = 0;
5445 cdb[7] = (u8) (disk_block_cnt >> 8);
5446 cdb[8] = (u8) (disk_block_cnt);
5447 cdb[9] = 0;
5448 cdb_len = 10;
5449 }
5450 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
5451 dev->scsi3addr,
5452 dev->phys_disk[map_index]);
5453}
5454
5455
5456
5457
5458
5459
5460static int hpsa_ciss_submit(struct ctlr_info *h,
5461 struct CommandList *c, struct scsi_cmnd *cmd,
5462 struct hpsa_scsi_dev_t *dev)
5463{
5464 cmd->host_scribble = (unsigned char *) c;
5465 c->cmd_type = CMD_SCSI;
5466 c->scsi_cmd = cmd;
5467 c->Header.ReplyQueue = 0;
5468 memcpy(&c->Header.LUN.LunAddrBytes[0], &dev->scsi3addr[0], 8);
5469 c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
5470
5471
5472
5473 c->Request.Timeout = 0;
5474 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
5475 c->Request.CDBLen = cmd->cmd_len;
5476 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
5477 switch (cmd->sc_data_direction) {
5478 case DMA_TO_DEVICE:
5479 c->Request.type_attr_dir =
5480 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
5481 break;
5482 case DMA_FROM_DEVICE:
5483 c->Request.type_attr_dir =
5484 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
5485 break;
5486 case DMA_NONE:
5487 c->Request.type_attr_dir =
5488 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
5489 break;
5490 case DMA_BIDIRECTIONAL:
5491
5492
5493
5494
5495
5496 c->Request.type_attr_dir =
5497 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
5498
5499
5500
5501
5502
5503
5504
5505
5506 break;
5507
5508 default:
5509 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
5510 cmd->sc_data_direction);
5511 BUG();
5512 break;
5513 }
5514
5515 if (hpsa_scatter_gather(h, c, cmd) < 0) {
5516 hpsa_cmd_resolve_and_free(h, c);
5517 return SCSI_MLQUEUE_HOST_BUSY;
5518 }
5519
5520 if (dev->in_reset) {
5521 hpsa_cmd_resolve_and_free(h, c);
5522 return SCSI_MLQUEUE_HOST_BUSY;
5523 }
5524
5525 c->device = dev;
5526
5527 enqueue_cmd_and_start_io(h, c);
5528
5529 return 0;
5530}
5531
5532static void hpsa_cmd_init(struct ctlr_info *h, int index,
5533 struct CommandList *c)
5534{
5535 dma_addr_t cmd_dma_handle, err_dma_handle;
5536
5537
5538 memset(c, 0, offsetof(struct CommandList, refcount));
5539 c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
5540 cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
5541 c->err_info = h->errinfo_pool + index;
5542 memset(c->err_info, 0, sizeof(*c->err_info));
5543 err_dma_handle = h->errinfo_pool_dhandle
5544 + index * sizeof(*c->err_info);
5545 c->cmdindex = index;
5546 c->busaddr = (u32) cmd_dma_handle;
5547 c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
5548 c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
5549 c->h = h;
5550 c->scsi_cmd = SCSI_CMD_IDLE;
5551}
5552
5553static void hpsa_preinitialize_commands(struct ctlr_info *h)
5554{
5555 int i;
5556
5557 for (i = 0; i < h->nr_cmds; i++) {
5558 struct CommandList *c = h->cmd_pool + i;
5559
5560 hpsa_cmd_init(h, i, c);
5561 atomic_set(&c->refcount, 0);
5562 }
5563}
5564
5565static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
5566 struct CommandList *c)
5567{
5568 dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
5569
5570 BUG_ON(c->cmdindex != index);
5571
5572 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
5573 memset(c->err_info, 0, sizeof(*c->err_info));
5574 c->busaddr = (u32) cmd_dma_handle;
5575}
5576
5577static int hpsa_ioaccel_submit(struct ctlr_info *h,
5578 struct CommandList *c, struct scsi_cmnd *cmd)
5579{
5580 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
5581 int rc = IO_ACCEL_INELIGIBLE;
5582
5583 if (!dev)
5584 return SCSI_MLQUEUE_HOST_BUSY;
5585
5586 if (dev->in_reset)
5587 return SCSI_MLQUEUE_HOST_BUSY;
5588
5589 if (hpsa_simple_mode)
5590 return IO_ACCEL_INELIGIBLE;
5591
5592 cmd->host_scribble = (unsigned char *) c;
5593
5594 if (dev->offload_enabled) {
5595 hpsa_cmd_init(h, c->cmdindex, c);
5596 c->cmd_type = CMD_SCSI;
5597 c->scsi_cmd = cmd;
5598 c->device = dev;
5599 rc = hpsa_scsi_ioaccel_raid_map(h, c);
5600 if (rc < 0)
5601 rc = SCSI_MLQUEUE_HOST_BUSY;
5602 } else if (dev->hba_ioaccel_enabled) {
5603 hpsa_cmd_init(h, c->cmdindex, c);
5604 c->cmd_type = CMD_SCSI;
5605 c->scsi_cmd = cmd;
5606 c->device = dev;
5607 rc = hpsa_scsi_ioaccel_direct_map(h, c);
5608 if (rc < 0)
5609 rc = SCSI_MLQUEUE_HOST_BUSY;
5610 }
5611 return rc;
5612}
5613
5614static void hpsa_command_resubmit_worker(struct work_struct *work)
5615{
5616 struct scsi_cmnd *cmd;
5617 struct hpsa_scsi_dev_t *dev;
5618 struct CommandList *c = container_of(work, struct CommandList, work);
5619
5620 cmd = c->scsi_cmd;
5621 dev = cmd->device->hostdata;
5622 if (!dev) {
5623 cmd->result = DID_NO_CONNECT << 16;
5624 return hpsa_cmd_free_and_done(c->h, c, cmd);
5625 }
5626
5627 if (dev->in_reset) {
5628 cmd->result = DID_RESET << 16;
5629 return hpsa_cmd_free_and_done(c->h, c, cmd);
5630 }
5631
5632 if (c->cmd_type == CMD_IOACCEL2) {
5633 struct ctlr_info *h = c->h;
5634 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5635 int rc;
5636
5637 if (c2->error_data.serv_response ==
5638 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
5639 rc = hpsa_ioaccel_submit(h, c, cmd);
5640 if (rc == 0)
5641 return;
5642 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
5643
5644
5645
5646
5647
5648 cmd->result = DID_IMM_RETRY << 16;
5649 return hpsa_cmd_free_and_done(h, c, cmd);
5650 }
5651
5652 }
5653 }
5654 hpsa_cmd_partial_init(c->h, c->cmdindex, c);
5655 if (hpsa_ciss_submit(c->h, c, cmd, dev)) {
5656
5657
5658
5659
5660
5661
5662
5663
5664 cmd->result = DID_IMM_RETRY << 16;
5665 cmd->scsi_done(cmd);
5666 }
5667}
5668
5669
5670static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
5671{
5672 struct ctlr_info *h;
5673 struct hpsa_scsi_dev_t *dev;
5674 struct CommandList *c;
5675 int rc = 0;
5676
5677
5678 h = sdev_to_hba(cmd->device);
5679
5680 BUG_ON(cmd->request->tag < 0);
5681
5682 dev = cmd->device->hostdata;
5683 if (!dev) {
5684 cmd->result = DID_NO_CONNECT << 16;
5685 cmd->scsi_done(cmd);
5686 return 0;
5687 }
5688
5689 if (dev->removed) {
5690 cmd->result = DID_NO_CONNECT << 16;
5691 cmd->scsi_done(cmd);
5692 return 0;
5693 }
5694
5695 if (unlikely(lockup_detected(h))) {
5696 cmd->result = DID_NO_CONNECT << 16;
5697 cmd->scsi_done(cmd);
5698 return 0;
5699 }
5700
5701 if (dev->in_reset)
5702 return SCSI_MLQUEUE_DEVICE_BUSY;
5703
5704 c = cmd_tagged_alloc(h, cmd);
5705 if (c == NULL)
5706 return SCSI_MLQUEUE_DEVICE_BUSY;
5707
5708
5709
5710
5711
5712 cmd->result = 0;
5713
5714
5715
5716
5717
5718 if (likely(cmd->retries == 0 &&
5719 !blk_rq_is_passthrough(cmd->request) &&
5720 h->acciopath_status)) {
5721 rc = hpsa_ioaccel_submit(h, c, cmd);
5722 if (rc == 0)
5723 return 0;
5724 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
5725 hpsa_cmd_resolve_and_free(h, c);
5726 return SCSI_MLQUEUE_HOST_BUSY;
5727 }
5728 }
5729 return hpsa_ciss_submit(h, c, cmd, dev);
5730}
5731
5732static void hpsa_scan_complete(struct ctlr_info *h)
5733{
5734 unsigned long flags;
5735
5736 spin_lock_irqsave(&h->scan_lock, flags);
5737 h->scan_finished = 1;
5738 wake_up(&h->scan_wait_queue);
5739 spin_unlock_irqrestore(&h->scan_lock, flags);
5740}
5741
5742static void hpsa_scan_start(struct Scsi_Host *sh)
5743{
5744 struct ctlr_info *h = shost_to_hba(sh);
5745 unsigned long flags;
5746
5747
5748
5749
5750
5751
5752
5753 if (unlikely(lockup_detected(h)))
5754 return hpsa_scan_complete(h);
5755
5756
5757
5758
5759 spin_lock_irqsave(&h->scan_lock, flags);
5760 if (h->scan_waiting) {
5761 spin_unlock_irqrestore(&h->scan_lock, flags);
5762 return;
5763 }
5764
5765 spin_unlock_irqrestore(&h->scan_lock, flags);
5766
5767
5768 while (1) {
5769 spin_lock_irqsave(&h->scan_lock, flags);
5770 if (h->scan_finished)
5771 break;
5772 h->scan_waiting = 1;
5773 spin_unlock_irqrestore(&h->scan_lock, flags);
5774 wait_event(h->scan_wait_queue, h->scan_finished);
5775
5776
5777
5778
5779
5780 }
5781 h->scan_finished = 0;
5782 h->scan_waiting = 0;
5783 spin_unlock_irqrestore(&h->scan_lock, flags);
5784
5785 if (unlikely(lockup_detected(h)))
5786 return hpsa_scan_complete(h);
5787
5788
5789
5790
5791 spin_lock_irqsave(&h->reset_lock, flags);
5792 if (h->reset_in_progress) {
5793 h->drv_req_rescan = 1;
5794 spin_unlock_irqrestore(&h->reset_lock, flags);
5795 hpsa_scan_complete(h);
5796 return;
5797 }
5798 spin_unlock_irqrestore(&h->reset_lock, flags);
5799
5800 hpsa_update_scsi_devices(h);
5801
5802 hpsa_scan_complete(h);
5803}
5804
5805static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
5806{
5807 struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
5808
5809 if (!logical_drive)
5810 return -ENODEV;
5811
5812 if (qdepth < 1)
5813 qdepth = 1;
5814 else if (qdepth > logical_drive->queue_depth)
5815 qdepth = logical_drive->queue_depth;
5816
5817 return scsi_change_queue_depth(sdev, qdepth);
5818}
5819
5820static int hpsa_scan_finished(struct Scsi_Host *sh,
5821 unsigned long elapsed_time)
5822{
5823 struct ctlr_info *h = shost_to_hba(sh);
5824 unsigned long flags;
5825 int finished;
5826
5827 spin_lock_irqsave(&h->scan_lock, flags);
5828 finished = h->scan_finished;
5829 spin_unlock_irqrestore(&h->scan_lock, flags);
5830 return finished;
5831}
5832
5833static int hpsa_scsi_host_alloc(struct ctlr_info *h)
5834{
5835 struct Scsi_Host *sh;
5836
5837 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
5838 if (sh == NULL) {
5839 dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
5840 return -ENOMEM;
5841 }
5842
5843 sh->io_port = 0;
5844 sh->n_io_port = 0;
5845 sh->this_id = -1;
5846 sh->max_channel = 3;
5847 sh->max_cmd_len = MAX_COMMAND_SIZE;
5848 sh->max_lun = HPSA_MAX_LUN;
5849 sh->max_id = HPSA_MAX_LUN;
5850 sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
5851 sh->cmd_per_lun = sh->can_queue;
5852 sh->sg_tablesize = h->maxsgentries;
5853 sh->transportt = hpsa_sas_transport_template;
5854 sh->hostdata[0] = (unsigned long) h;
5855 sh->irq = pci_irq_vector(h->pdev, 0);
5856 sh->unique_id = sh->irq;
5857
5858 h->scsi_host = sh;
5859 return 0;
5860}
5861
5862static int hpsa_scsi_add_host(struct ctlr_info *h)
5863{
5864 int rv;
5865
5866 rv = scsi_add_host(h->scsi_host, &h->pdev->dev);
5867 if (rv) {
5868 dev_err(&h->pdev->dev, "scsi_add_host failed\n");
5869 return rv;
5870 }
5871 scsi_scan_host(h->scsi_host);
5872 return 0;
5873}
5874
5875
5876
5877
5878
5879
5880
5881static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
5882{
5883 int idx = scmd->request->tag;
5884
5885 if (idx < 0)
5886 return idx;
5887
5888
5889 return idx += HPSA_NRESERVED_CMDS;
5890}
5891
5892
5893
5894
5895
5896static int hpsa_send_test_unit_ready(struct ctlr_info *h,
5897 struct CommandList *c, unsigned char lunaddr[],
5898 int reply_queue)
5899{
5900 int rc;
5901
5902
5903 (void) fill_cmd(c, TEST_UNIT_READY, h,
5904 NULL, 0, 0, lunaddr, TYPE_CMD);
5905 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5906 if (rc)
5907 return rc;
5908
5909
5910
5911 if (c->err_info->CommandStatus == CMD_SUCCESS)
5912 return 0;
5913
5914
5915
5916
5917
5918
5919 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5920 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
5921 (c->err_info->SenseInfo[2] == NO_SENSE ||
5922 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
5923 return 0;
5924
5925 return 1;
5926}
5927
5928
5929
5930
5931
5932static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h,
5933 struct CommandList *c,
5934 unsigned char lunaddr[], int reply_queue)
5935{
5936 int rc;
5937 int count = 0;
5938 int waittime = 1;
5939
5940
5941 for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) {
5942
5943
5944
5945
5946
5947 msleep(1000 * waittime);
5948
5949 rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue);
5950 if (!rc)
5951 break;
5952
5953
5954 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
5955 waittime *= 2;
5956
5957 dev_warn(&h->pdev->dev,
5958 "waiting %d secs for device to become ready.\n",
5959 waittime);
5960 }
5961
5962 return rc;
5963}
5964
5965static int wait_for_device_to_become_ready(struct ctlr_info *h,
5966 unsigned char lunaddr[],
5967 int reply_queue)
5968{
5969 int first_queue;
5970 int last_queue;
5971 int rq;
5972 int rc = 0;
5973 struct CommandList *c;
5974
5975 c = cmd_alloc(h);
5976
5977
5978
5979
5980
5981
5982 if (reply_queue == DEFAULT_REPLY_QUEUE) {
5983 first_queue = 0;
5984 last_queue = h->nreply_queues - 1;
5985 } else {
5986 first_queue = reply_queue;
5987 last_queue = reply_queue;
5988 }
5989
5990 for (rq = first_queue; rq <= last_queue; rq++) {
5991 rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
5992 if (rc)
5993 break;
5994 }
5995
5996 if (rc)
5997 dev_warn(&h->pdev->dev, "giving up on device.\n");
5998 else
5999 dev_warn(&h->pdev->dev, "device is ready.\n");
6000
6001 cmd_free(h, c);
6002 return rc;
6003}
6004
6005
6006
6007
6008static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
6009{
6010 int rc = SUCCESS;
6011 int i;
6012 struct ctlr_info *h;
6013 struct hpsa_scsi_dev_t *dev = NULL;
6014 u8 reset_type;
6015 char msg[48];
6016 unsigned long flags;
6017
6018
6019 h = sdev_to_hba(scsicmd->device);
6020 if (h == NULL)
6021 return FAILED;
6022
6023 spin_lock_irqsave(&h->reset_lock, flags);
6024 h->reset_in_progress = 1;
6025 spin_unlock_irqrestore(&h->reset_lock, flags);
6026
6027 if (lockup_detected(h)) {
6028 rc = FAILED;
6029 goto return_reset_status;
6030 }
6031
6032 dev = scsicmd->device->hostdata;
6033 if (!dev) {
6034 dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__);
6035 rc = FAILED;
6036 goto return_reset_status;
6037 }
6038
6039 if (dev->devtype == TYPE_ENCLOSURE) {
6040 rc = SUCCESS;
6041 goto return_reset_status;
6042 }
6043
6044
6045 if (lockup_detected(h)) {
6046 snprintf(msg, sizeof(msg),
6047 "cmd %d RESET FAILED, lockup detected",
6048 hpsa_get_cmd_index(scsicmd));
6049 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
6050 rc = FAILED;
6051 goto return_reset_status;
6052 }
6053
6054
6055 if (detect_controller_lockup(h)) {
6056 snprintf(msg, sizeof(msg),
6057 "cmd %d RESET FAILED, new lockup detected",
6058 hpsa_get_cmd_index(scsicmd));
6059 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
6060 rc = FAILED;
6061 goto return_reset_status;
6062 }
6063
6064
6065 if (is_hba_lunid(dev->scsi3addr)) {
6066 rc = SUCCESS;
6067 goto return_reset_status;
6068 }
6069
6070 if (is_logical_dev_addr_mode(dev->scsi3addr))
6071 reset_type = HPSA_DEVICE_RESET_MSG;
6072 else
6073 reset_type = HPSA_PHYS_TARGET_RESET;
6074
6075 sprintf(msg, "resetting %s",
6076 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ");
6077 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
6078
6079
6080
6081
6082 dev->in_reset = true;
6083 for (i = 0; i < 10; i++) {
6084 if (atomic_read(&dev->commands_outstanding) > 0)
6085 msleep(1000);
6086 else
6087 break;
6088 }
6089
6090
6091 rc = hpsa_do_reset(h, dev, reset_type, DEFAULT_REPLY_QUEUE);
6092 if (rc == 0)
6093 rc = SUCCESS;
6094 else
6095 rc = FAILED;
6096
6097 sprintf(msg, "reset %s %s",
6098 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ",
6099 rc == SUCCESS ? "completed successfully" : "failed");
6100 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
6101
6102return_reset_status:
6103 spin_lock_irqsave(&h->reset_lock, flags);
6104 h->reset_in_progress = 0;
6105 if (dev)
6106 dev->in_reset = false;
6107 spin_unlock_irqrestore(&h->reset_lock, flags);
6108 return rc;
6109}
6110
6111
6112
6113
6114
6115
6116
6117static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
6118 struct scsi_cmnd *scmd)
6119{
6120 int idx = hpsa_get_cmd_index(scmd);
6121 struct CommandList *c = h->cmd_pool + idx;
6122
6123 if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) {
6124 dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n",
6125 idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1);
6126
6127
6128
6129 BUG();
6130 }
6131
6132 if (unlikely(!hpsa_is_cmd_idle(c))) {
6133
6134
6135
6136
6137
6138
6139 if (idx != h->last_collision_tag) {
6140 dev_warn(&h->pdev->dev,
6141 "%s: tag collision (tag=%d)\n", __func__, idx);
6142 if (scmd)
6143 scsi_print_command(scmd);
6144 h->last_collision_tag = idx;
6145 }
6146 return NULL;
6147 }
6148
6149 atomic_inc(&c->refcount);
6150
6151 hpsa_cmd_partial_init(h, idx, c);
6152 return c;
6153}
6154
6155static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
6156{
6157
6158
6159
6160
6161 (void)atomic_dec(&c->refcount);
6162}
6163
6164
6165
6166
6167
6168
6169
6170
6171
6172
6173static struct CommandList *cmd_alloc(struct ctlr_info *h)
6174{
6175 struct CommandList *c;
6176 int refcount, i;
6177 int offset = 0;
6178
6179
6180
6181
6182
6183
6184
6185
6186
6187
6188
6189
6190
6191
6192
6193
6194
6195
6196
6197
6198 for (;;) {
6199 i = find_next_zero_bit(h->cmd_pool_bits,
6200 HPSA_NRESERVED_CMDS,
6201 offset);
6202 if (unlikely(i >= HPSA_NRESERVED_CMDS)) {
6203 offset = 0;
6204 continue;
6205 }
6206 c = h->cmd_pool + i;
6207 refcount = atomic_inc_return(&c->refcount);
6208 if (unlikely(refcount > 1)) {
6209 cmd_free(h, c);
6210 offset = (i + 1) % HPSA_NRESERVED_CMDS;
6211 continue;
6212 }
6213 set_bit(i & (BITS_PER_LONG - 1),
6214 h->cmd_pool_bits + (i / BITS_PER_LONG));
6215 break;
6216 }
6217 hpsa_cmd_partial_init(h, i, c);
6218 c->device = NULL;
6219 return c;
6220}
6221
6222
6223
6224
6225
6226
6227
6228static void cmd_free(struct ctlr_info *h, struct CommandList *c)
6229{
6230 if (atomic_dec_and_test(&c->refcount)) {
6231 int i;
6232
6233 i = c - h->cmd_pool;
6234 clear_bit(i & (BITS_PER_LONG - 1),
6235 h->cmd_pool_bits + (i / BITS_PER_LONG));
6236 }
6237}
6238
6239#ifdef CONFIG_COMPAT
6240
6241static int hpsa_ioctl32_passthru(struct scsi_device *dev, unsigned int cmd,
6242 void __user *arg)
6243{
6244 struct ctlr_info *h = sdev_to_hba(dev);
6245 IOCTL32_Command_struct __user *arg32 = arg;
6246 IOCTL_Command_struct arg64;
6247 int err;
6248 u32 cp;
6249
6250 if (!arg)
6251 return -EINVAL;
6252
6253 memset(&arg64, 0, sizeof(arg64));
6254 if (copy_from_user(&arg64, arg32, offsetof(IOCTL_Command_struct, buf)))
6255 return -EFAULT;
6256 if (get_user(cp, &arg32->buf))
6257 return -EFAULT;
6258 arg64.buf = compat_ptr(cp);
6259
6260 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6261 return -EAGAIN;
6262 err = hpsa_passthru_ioctl(h, &arg64);
6263 atomic_inc(&h->passthru_cmds_avail);
6264 if (err)
6265 return err;
6266 if (copy_to_user(&arg32->error_info, &arg64.error_info,
6267 sizeof(arg32->error_info)))
6268 return -EFAULT;
6269 return 0;
6270}
6271
6272static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
6273 unsigned int cmd, void __user *arg)
6274{
6275 struct ctlr_info *h = sdev_to_hba(dev);
6276 BIG_IOCTL32_Command_struct __user *arg32 = arg;
6277 BIG_IOCTL_Command_struct arg64;
6278 int err;
6279 u32 cp;
6280
6281 if (!arg)
6282 return -EINVAL;
6283 memset(&arg64, 0, sizeof(arg64));
6284 if (copy_from_user(&arg64, arg32,
6285 offsetof(BIG_IOCTL32_Command_struct, buf)))
6286 return -EFAULT;
6287 if (get_user(cp, &arg32->buf))
6288 return -EFAULT;
6289 arg64.buf = compat_ptr(cp);
6290
6291 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6292 return -EAGAIN;
6293 err = hpsa_big_passthru_ioctl(h, &arg64);
6294 atomic_inc(&h->passthru_cmds_avail);
6295 if (err)
6296 return err;
6297 if (copy_to_user(&arg32->error_info, &arg64.error_info,
6298 sizeof(arg32->error_info)))
6299 return -EFAULT;
6300 return 0;
6301}
6302
6303static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd,
6304 void __user *arg)
6305{
6306 switch (cmd) {
6307 case CCISS_GETPCIINFO:
6308 case CCISS_GETINTINFO:
6309 case CCISS_SETINTINFO:
6310 case CCISS_GETNODENAME:
6311 case CCISS_SETNODENAME:
6312 case CCISS_GETHEARTBEAT:
6313 case CCISS_GETBUSTYPES:
6314 case CCISS_GETFIRMVER:
6315 case CCISS_GETDRIVVER:
6316 case CCISS_REVALIDVOLS:
6317 case CCISS_DEREGDISK:
6318 case CCISS_REGNEWDISK:
6319 case CCISS_REGNEWD:
6320 case CCISS_RESCANDISK:
6321 case CCISS_GETLUNINFO:
6322 return hpsa_ioctl(dev, cmd, arg);
6323
6324 case CCISS_PASSTHRU32:
6325 return hpsa_ioctl32_passthru(dev, cmd, arg);
6326 case CCISS_BIG_PASSTHRU32:
6327 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
6328
6329 default:
6330 return -ENOIOCTLCMD;
6331 }
6332}
6333#endif
6334
6335static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
6336{
6337 struct hpsa_pci_info pciinfo;
6338
6339 if (!argp)
6340 return -EINVAL;
6341 pciinfo.domain = pci_domain_nr(h->pdev->bus);
6342 pciinfo.bus = h->pdev->bus->number;
6343 pciinfo.dev_fn = h->pdev->devfn;
6344 pciinfo.board_id = h->board_id;
6345 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
6346 return -EFAULT;
6347 return 0;
6348}
6349
6350static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
6351{
6352 DriverVer_type DriverVer;
6353 unsigned char vmaj, vmin, vsubmin;
6354 int rc;
6355
6356 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
6357 &vmaj, &vmin, &vsubmin);
6358 if (rc != 3) {
6359 dev_info(&h->pdev->dev, "driver version string '%s' "
6360 "unrecognized.", HPSA_DRIVER_VERSION);
6361 vmaj = 0;
6362 vmin = 0;
6363 vsubmin = 0;
6364 }
6365 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
6366 if (!argp)
6367 return -EINVAL;
6368 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
6369 return -EFAULT;
6370 return 0;
6371}
6372
6373static int hpsa_passthru_ioctl(struct ctlr_info *h,
6374 IOCTL_Command_struct *iocommand)
6375{
6376 struct CommandList *c;
6377 char *buff = NULL;
6378 u64 temp64;
6379 int rc = 0;
6380
6381 if (!capable(CAP_SYS_RAWIO))
6382 return -EPERM;
6383 if ((iocommand->buf_size < 1) &&
6384 (iocommand->Request.Type.Direction != XFER_NONE)) {
6385 return -EINVAL;
6386 }
6387 if (iocommand->buf_size > 0) {
6388 buff = kmalloc(iocommand->buf_size, GFP_KERNEL);
6389 if (buff == NULL)
6390 return -ENOMEM;
6391 if (iocommand->Request.Type.Direction & XFER_WRITE) {
6392
6393 if (copy_from_user(buff, iocommand->buf,
6394 iocommand->buf_size)) {
6395 rc = -EFAULT;
6396 goto out_kfree;
6397 }
6398 } else {
6399 memset(buff, 0, iocommand->buf_size);
6400 }
6401 }
6402 c = cmd_alloc(h);
6403
6404
6405 c->cmd_type = CMD_IOCTL_PEND;
6406 c->scsi_cmd = SCSI_CMD_BUSY;
6407
6408 c->Header.ReplyQueue = 0;
6409 if (iocommand->buf_size > 0) {
6410 c->Header.SGList = 1;
6411 c->Header.SGTotal = cpu_to_le16(1);
6412 } else {
6413 c->Header.SGList = 0;
6414 c->Header.SGTotal = cpu_to_le16(0);
6415 }
6416 memcpy(&c->Header.LUN, &iocommand->LUN_info, sizeof(c->Header.LUN));
6417
6418
6419 memcpy(&c->Request, &iocommand->Request,
6420 sizeof(c->Request));
6421
6422
6423 if (iocommand->buf_size > 0) {
6424 temp64 = dma_map_single(&h->pdev->dev, buff,
6425 iocommand->buf_size, DMA_BIDIRECTIONAL);
6426 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
6427 c->SG[0].Addr = cpu_to_le64(0);
6428 c->SG[0].Len = cpu_to_le32(0);
6429 rc = -ENOMEM;
6430 goto out;
6431 }
6432 c->SG[0].Addr = cpu_to_le64(temp64);
6433 c->SG[0].Len = cpu_to_le32(iocommand->buf_size);
6434 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST);
6435 }
6436 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
6437 NO_TIMEOUT);
6438 if (iocommand->buf_size > 0)
6439 hpsa_pci_unmap(h->pdev, c, 1, DMA_BIDIRECTIONAL);
6440 check_ioctl_unit_attention(h, c);
6441 if (rc) {
6442 rc = -EIO;
6443 goto out;
6444 }
6445
6446
6447 memcpy(&iocommand->error_info, c->err_info,
6448 sizeof(iocommand->error_info));
6449 if ((iocommand->Request.Type.Direction & XFER_READ) &&
6450 iocommand->buf_size > 0) {
6451
6452 if (copy_to_user(iocommand->buf, buff, iocommand->buf_size)) {
6453 rc = -EFAULT;
6454 goto out;
6455 }
6456 }
6457out:
6458 cmd_free(h, c);
6459out_kfree:
6460 kfree(buff);
6461 return rc;
6462}
6463
6464static int hpsa_big_passthru_ioctl(struct ctlr_info *h,
6465 BIG_IOCTL_Command_struct *ioc)
6466{
6467 struct CommandList *c;
6468 unsigned char **buff = NULL;
6469 int *buff_size = NULL;
6470 u64 temp64;
6471 BYTE sg_used = 0;
6472 int status = 0;
6473 u32 left;
6474 u32 sz;
6475 BYTE __user *data_ptr;
6476
6477 if (!capable(CAP_SYS_RAWIO))
6478 return -EPERM;
6479
6480 if ((ioc->buf_size < 1) &&
6481 (ioc->Request.Type.Direction != XFER_NONE))
6482 return -EINVAL;
6483
6484 if (ioc->malloc_size > MAX_KMALLOC_SIZE)
6485 return -EINVAL;
6486 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD)
6487 return -EINVAL;
6488 buff = kcalloc(SG_ENTRIES_IN_CMD, sizeof(char *), GFP_KERNEL);
6489 if (!buff) {
6490 status = -ENOMEM;
6491 goto cleanup1;
6492 }
6493 buff_size = kmalloc_array(SG_ENTRIES_IN_CMD, sizeof(int), GFP_KERNEL);
6494 if (!buff_size) {
6495 status = -ENOMEM;
6496 goto cleanup1;
6497 }
6498 left = ioc->buf_size;
6499 data_ptr = ioc->buf;
6500 while (left) {
6501 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
6502 buff_size[sg_used] = sz;
6503 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
6504 if (buff[sg_used] == NULL) {
6505 status = -ENOMEM;
6506 goto cleanup1;
6507 }
6508 if (ioc->Request.Type.Direction & XFER_WRITE) {
6509 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
6510 status = -EFAULT;
6511 goto cleanup1;
6512 }
6513 } else
6514 memset(buff[sg_used], 0, sz);
6515 left -= sz;
6516 data_ptr += sz;
6517 sg_used++;
6518 }
6519 c = cmd_alloc(h);
6520
6521 c->cmd_type = CMD_IOCTL_PEND;
6522 c->scsi_cmd = SCSI_CMD_BUSY;
6523 c->Header.ReplyQueue = 0;
6524 c->Header.SGList = (u8) sg_used;
6525 c->Header.SGTotal = cpu_to_le16(sg_used);
6526 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
6527 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
6528 if (ioc->buf_size > 0) {
6529 int i;
6530 for (i = 0; i < sg_used; i++) {
6531 temp64 = dma_map_single(&h->pdev->dev, buff[i],
6532 buff_size[i], DMA_BIDIRECTIONAL);
6533 if (dma_mapping_error(&h->pdev->dev,
6534 (dma_addr_t) temp64)) {
6535 c->SG[i].Addr = cpu_to_le64(0);
6536 c->SG[i].Len = cpu_to_le32(0);
6537 hpsa_pci_unmap(h->pdev, c, i,
6538 DMA_BIDIRECTIONAL);
6539 status = -ENOMEM;
6540 goto cleanup0;
6541 }
6542 c->SG[i].Addr = cpu_to_le64(temp64);
6543 c->SG[i].Len = cpu_to_le32(buff_size[i]);
6544 c->SG[i].Ext = cpu_to_le32(0);
6545 }
6546 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
6547 }
6548 status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
6549 NO_TIMEOUT);
6550 if (sg_used)
6551 hpsa_pci_unmap(h->pdev, c, sg_used, DMA_BIDIRECTIONAL);
6552 check_ioctl_unit_attention(h, c);
6553 if (status) {
6554 status = -EIO;
6555 goto cleanup0;
6556 }
6557
6558
6559 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
6560 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
6561 int i;
6562
6563
6564 BYTE __user *ptr = ioc->buf;
6565 for (i = 0; i < sg_used; i++) {
6566 if (copy_to_user(ptr, buff[i], buff_size[i])) {
6567 status = -EFAULT;
6568 goto cleanup0;
6569 }
6570 ptr += buff_size[i];
6571 }
6572 }
6573 status = 0;
6574cleanup0:
6575 cmd_free(h, c);
6576cleanup1:
6577 if (buff) {
6578 int i;
6579
6580 for (i = 0; i < sg_used; i++)
6581 kfree(buff[i]);
6582 kfree(buff);
6583 }
6584 kfree(buff_size);
6585 return status;
6586}
6587
6588static void check_ioctl_unit_attention(struct ctlr_info *h,
6589 struct CommandList *c)
6590{
6591 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
6592 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
6593 (void) check_for_unit_attention(h, c);
6594}
6595
6596
6597
6598
6599static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd,
6600 void __user *argp)
6601{
6602 struct ctlr_info *h = sdev_to_hba(dev);
6603 int rc;
6604
6605 switch (cmd) {
6606 case CCISS_DEREGDISK:
6607 case CCISS_REGNEWDISK:
6608 case CCISS_REGNEWD:
6609 hpsa_scan_start(h->scsi_host);
6610 return 0;
6611 case CCISS_GETPCIINFO:
6612 return hpsa_getpciinfo_ioctl(h, argp);
6613 case CCISS_GETDRIVVER:
6614 return hpsa_getdrivver_ioctl(h, argp);
6615 case CCISS_PASSTHRU: {
6616 IOCTL_Command_struct iocommand;
6617
6618 if (!argp)
6619 return -EINVAL;
6620 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
6621 return -EFAULT;
6622 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6623 return -EAGAIN;
6624 rc = hpsa_passthru_ioctl(h, &iocommand);
6625 atomic_inc(&h->passthru_cmds_avail);
6626 if (!rc && copy_to_user(argp, &iocommand, sizeof(iocommand)))
6627 rc = -EFAULT;
6628 return rc;
6629 }
6630 case CCISS_BIG_PASSTHRU: {
6631 BIG_IOCTL_Command_struct ioc;
6632 if (!argp)
6633 return -EINVAL;
6634 if (copy_from_user(&ioc, argp, sizeof(ioc)))
6635 return -EFAULT;
6636 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6637 return -EAGAIN;
6638 rc = hpsa_big_passthru_ioctl(h, &ioc);
6639 atomic_inc(&h->passthru_cmds_avail);
6640 if (!rc && copy_to_user(argp, &ioc, sizeof(ioc)))
6641 rc = -EFAULT;
6642 return rc;
6643 }
6644 default:
6645 return -ENOTTY;
6646 }
6647}
6648
6649static void hpsa_send_host_reset(struct ctlr_info *h, u8 reset_type)
6650{
6651 struct CommandList *c;
6652
6653 c = cmd_alloc(h);
6654
6655
6656 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
6657 RAID_CTLR_LUNID, TYPE_MSG);
6658 c->Request.CDB[1] = reset_type;
6659 c->waiting = NULL;
6660 enqueue_cmd_and_start_io(h, c);
6661
6662
6663
6664
6665 return;
6666}
6667
6668static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
6669 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
6670 int cmd_type)
6671{
6672 enum dma_data_direction dir = DMA_NONE;
6673
6674 c->cmd_type = CMD_IOCTL_PEND;
6675 c->scsi_cmd = SCSI_CMD_BUSY;
6676 c->Header.ReplyQueue = 0;
6677 if (buff != NULL && size > 0) {
6678 c->Header.SGList = 1;
6679 c->Header.SGTotal = cpu_to_le16(1);
6680 } else {
6681 c->Header.SGList = 0;
6682 c->Header.SGTotal = cpu_to_le16(0);
6683 }
6684 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
6685
6686 if (cmd_type == TYPE_CMD) {
6687 switch (cmd) {
6688 case HPSA_INQUIRY:
6689
6690 if (page_code & VPD_PAGE) {
6691 c->Request.CDB[1] = 0x01;
6692 c->Request.CDB[2] = (page_code & 0xff);
6693 }
6694 c->Request.CDBLen = 6;
6695 c->Request.type_attr_dir =
6696 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6697 c->Request.Timeout = 0;
6698 c->Request.CDB[0] = HPSA_INQUIRY;
6699 c->Request.CDB[4] = size & 0xFF;
6700 break;
6701 case RECEIVE_DIAGNOSTIC:
6702 c->Request.CDBLen = 6;
6703 c->Request.type_attr_dir =
6704 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6705 c->Request.Timeout = 0;
6706 c->Request.CDB[0] = cmd;
6707 c->Request.CDB[1] = 1;
6708 c->Request.CDB[2] = 1;
6709 c->Request.CDB[3] = (size >> 8) & 0xFF;
6710 c->Request.CDB[4] = size & 0xFF;
6711 break;
6712 case HPSA_REPORT_LOG:
6713 case HPSA_REPORT_PHYS:
6714
6715
6716
6717 c->Request.CDBLen = 12;
6718 c->Request.type_attr_dir =
6719 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6720 c->Request.Timeout = 0;
6721 c->Request.CDB[0] = cmd;
6722 c->Request.CDB[6] = (size >> 24) & 0xFF;
6723 c->Request.CDB[7] = (size >> 16) & 0xFF;
6724 c->Request.CDB[8] = (size >> 8) & 0xFF;
6725 c->Request.CDB[9] = size & 0xFF;
6726 break;
6727 case BMIC_SENSE_DIAG_OPTIONS:
6728 c->Request.CDBLen = 16;
6729 c->Request.type_attr_dir =
6730 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6731 c->Request.Timeout = 0;
6732
6733 c->Request.CDB[0] = BMIC_READ;
6734 c->Request.CDB[6] = BMIC_SENSE_DIAG_OPTIONS;
6735 break;
6736 case BMIC_SET_DIAG_OPTIONS:
6737 c->Request.CDBLen = 16;
6738 c->Request.type_attr_dir =
6739 TYPE_ATTR_DIR(cmd_type,
6740 ATTR_SIMPLE, XFER_WRITE);
6741 c->Request.Timeout = 0;
6742 c->Request.CDB[0] = BMIC_WRITE;
6743 c->Request.CDB[6] = BMIC_SET_DIAG_OPTIONS;
6744 break;
6745 case HPSA_CACHE_FLUSH:
6746 c->Request.CDBLen = 12;
6747 c->Request.type_attr_dir =
6748 TYPE_ATTR_DIR(cmd_type,
6749 ATTR_SIMPLE, XFER_WRITE);
6750 c->Request.Timeout = 0;
6751 c->Request.CDB[0] = BMIC_WRITE;
6752 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
6753 c->Request.CDB[7] = (size >> 8) & 0xFF;
6754 c->Request.CDB[8] = size & 0xFF;
6755 break;
6756 case TEST_UNIT_READY:
6757 c->Request.CDBLen = 6;
6758 c->Request.type_attr_dir =
6759 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6760 c->Request.Timeout = 0;
6761 break;
6762 case HPSA_GET_RAID_MAP:
6763 c->Request.CDBLen = 12;
6764 c->Request.type_attr_dir =
6765 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6766 c->Request.Timeout = 0;
6767 c->Request.CDB[0] = HPSA_CISS_READ;
6768 c->Request.CDB[1] = cmd;
6769 c->Request.CDB[6] = (size >> 24) & 0xFF;
6770 c->Request.CDB[7] = (size >> 16) & 0xFF;
6771 c->Request.CDB[8] = (size >> 8) & 0xFF;
6772 c->Request.CDB[9] = size & 0xFF;
6773 break;
6774 case BMIC_SENSE_CONTROLLER_PARAMETERS:
6775 c->Request.CDBLen = 10;
6776 c->Request.type_attr_dir =
6777 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6778 c->Request.Timeout = 0;
6779 c->Request.CDB[0] = BMIC_READ;
6780 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
6781 c->Request.CDB[7] = (size >> 16) & 0xFF;
6782 c->Request.CDB[8] = (size >> 8) & 0xFF;
6783 break;
6784 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
6785 c->Request.CDBLen = 10;
6786 c->Request.type_attr_dir =
6787 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6788 c->Request.Timeout = 0;
6789 c->Request.CDB[0] = BMIC_READ;
6790 c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
6791 c->Request.CDB[7] = (size >> 16) & 0xFF;
6792 c->Request.CDB[8] = (size >> 8) & 0XFF;
6793 break;
6794 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
6795 c->Request.CDBLen = 10;
6796 c->Request.type_attr_dir =
6797 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6798 c->Request.Timeout = 0;
6799 c->Request.CDB[0] = BMIC_READ;
6800 c->Request.CDB[6] = BMIC_SENSE_SUBSYSTEM_INFORMATION;
6801 c->Request.CDB[7] = (size >> 16) & 0xFF;
6802 c->Request.CDB[8] = (size >> 8) & 0XFF;
6803 break;
6804 case BMIC_SENSE_STORAGE_BOX_PARAMS:
6805 c->Request.CDBLen = 10;
6806 c->Request.type_attr_dir =
6807 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6808 c->Request.Timeout = 0;
6809 c->Request.CDB[0] = BMIC_READ;
6810 c->Request.CDB[6] = BMIC_SENSE_STORAGE_BOX_PARAMS;
6811 c->Request.CDB[7] = (size >> 16) & 0xFF;
6812 c->Request.CDB[8] = (size >> 8) & 0XFF;
6813 break;
6814 case BMIC_IDENTIFY_CONTROLLER:
6815 c->Request.CDBLen = 10;
6816 c->Request.type_attr_dir =
6817 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6818 c->Request.Timeout = 0;
6819 c->Request.CDB[0] = BMIC_READ;
6820 c->Request.CDB[1] = 0;
6821 c->Request.CDB[2] = 0;
6822 c->Request.CDB[3] = 0;
6823 c->Request.CDB[4] = 0;
6824 c->Request.CDB[5] = 0;
6825 c->Request.CDB[6] = BMIC_IDENTIFY_CONTROLLER;
6826 c->Request.CDB[7] = (size >> 16) & 0xFF;
6827 c->Request.CDB[8] = (size >> 8) & 0XFF;
6828 c->Request.CDB[9] = 0;
6829 break;
6830 default:
6831 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
6832 BUG();
6833 }
6834 } else if (cmd_type == TYPE_MSG) {
6835 switch (cmd) {
6836
6837 case HPSA_PHYS_TARGET_RESET:
6838 c->Request.CDBLen = 16;
6839 c->Request.type_attr_dir =
6840 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6841 c->Request.Timeout = 0;
6842 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6843 c->Request.CDB[0] = HPSA_RESET;
6844 c->Request.CDB[1] = HPSA_TARGET_RESET_TYPE;
6845
6846 c->Request.CDB[4] = 0x00;
6847 c->Request.CDB[5] = 0x00;
6848 c->Request.CDB[6] = 0x00;
6849 c->Request.CDB[7] = 0x00;
6850 break;
6851 case HPSA_DEVICE_RESET_MSG:
6852 c->Request.CDBLen = 16;
6853 c->Request.type_attr_dir =
6854 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6855 c->Request.Timeout = 0;
6856 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6857 c->Request.CDB[0] = cmd;
6858 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
6859
6860
6861 c->Request.CDB[4] = 0x00;
6862 c->Request.CDB[5] = 0x00;
6863 c->Request.CDB[6] = 0x00;
6864 c->Request.CDB[7] = 0x00;
6865 break;
6866 default:
6867 dev_warn(&h->pdev->dev, "unknown message type %d\n",
6868 cmd);
6869 BUG();
6870 }
6871 } else {
6872 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
6873 BUG();
6874 }
6875
6876 switch (GET_DIR(c->Request.type_attr_dir)) {
6877 case XFER_READ:
6878 dir = DMA_FROM_DEVICE;
6879 break;
6880 case XFER_WRITE:
6881 dir = DMA_TO_DEVICE;
6882 break;
6883 case XFER_NONE:
6884 dir = DMA_NONE;
6885 break;
6886 default:
6887 dir = DMA_BIDIRECTIONAL;
6888 }
6889 if (hpsa_map_one(h->pdev, c, buff, size, dir))
6890 return -1;
6891 return 0;
6892}
6893
6894
6895
6896
6897static void __iomem *remap_pci_mem(ulong base, ulong size)
6898{
6899 ulong page_base = ((ulong) base) & PAGE_MASK;
6900 ulong page_offs = ((ulong) base) - page_base;
6901 void __iomem *page_remapped = ioremap(page_base,
6902 page_offs + size);
6903
6904 return page_remapped ? (page_remapped + page_offs) : NULL;
6905}
6906
6907static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
6908{
6909 return h->access.command_completed(h, q);
6910}
6911
6912static inline bool interrupt_pending(struct ctlr_info *h)
6913{
6914 return h->access.intr_pending(h);
6915}
6916
6917static inline long interrupt_not_for_us(struct ctlr_info *h)
6918{
6919 return (h->access.intr_pending(h) == 0) ||
6920 (h->interrupts_enabled == 0);
6921}
6922
6923static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
6924 u32 raw_tag)
6925{
6926 if (unlikely(tag_index >= h->nr_cmds)) {
6927 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
6928 return 1;
6929 }
6930 return 0;
6931}
6932
6933static inline void finish_cmd(struct CommandList *c)
6934{
6935 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
6936 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
6937 || c->cmd_type == CMD_IOACCEL2))
6938 complete_scsi_command(c);
6939 else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF)
6940 complete(c->waiting);
6941}
6942
6943
6944static inline void process_indexed_cmd(struct ctlr_info *h,
6945 u32 raw_tag)
6946{
6947 u32 tag_index;
6948 struct CommandList *c;
6949
6950 tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
6951 if (!bad_tag(h, tag_index, raw_tag)) {
6952 c = h->cmd_pool + tag_index;
6953 finish_cmd(c);
6954 }
6955}
6956
6957
6958
6959
6960
6961
6962static int ignore_bogus_interrupt(struct ctlr_info *h)
6963{
6964 if (likely(!reset_devices))
6965 return 0;
6966
6967 if (likely(h->interrupts_enabled))
6968 return 0;
6969
6970 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
6971 "(known firmware bug.) Ignoring.\n");
6972
6973 return 1;
6974}
6975
6976
6977
6978
6979
6980
6981static struct ctlr_info *queue_to_hba(u8 *queue)
6982{
6983 return container_of((queue - *queue), struct ctlr_info, q[0]);
6984}
6985
6986static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
6987{
6988 struct ctlr_info *h = queue_to_hba(queue);
6989 u8 q = *(u8 *) queue;
6990 u32 raw_tag;
6991
6992 if (ignore_bogus_interrupt(h))
6993 return IRQ_NONE;
6994
6995 if (interrupt_not_for_us(h))
6996 return IRQ_NONE;
6997 h->last_intr_timestamp = get_jiffies_64();
6998 while (interrupt_pending(h)) {
6999 raw_tag = get_next_completion(h, q);
7000 while (raw_tag != FIFO_EMPTY)
7001 raw_tag = next_command(h, q);
7002 }
7003 return IRQ_HANDLED;
7004}
7005
7006static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
7007{
7008 struct ctlr_info *h = queue_to_hba(queue);
7009 u32 raw_tag;
7010 u8 q = *(u8 *) queue;
7011
7012 if (ignore_bogus_interrupt(h))
7013 return IRQ_NONE;
7014
7015 h->last_intr_timestamp = get_jiffies_64();
7016 raw_tag = get_next_completion(h, q);
7017 while (raw_tag != FIFO_EMPTY)
7018 raw_tag = next_command(h, q);
7019 return IRQ_HANDLED;
7020}
7021
7022static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
7023{
7024 struct ctlr_info *h = queue_to_hba((u8 *) queue);
7025 u32 raw_tag;
7026 u8 q = *(u8 *) queue;
7027
7028 if (interrupt_not_for_us(h))
7029 return IRQ_NONE;
7030 h->last_intr_timestamp = get_jiffies_64();
7031 while (interrupt_pending(h)) {
7032 raw_tag = get_next_completion(h, q);
7033 while (raw_tag != FIFO_EMPTY) {
7034 process_indexed_cmd(h, raw_tag);
7035 raw_tag = next_command(h, q);
7036 }
7037 }
7038 return IRQ_HANDLED;
7039}
7040
7041static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
7042{
7043 struct ctlr_info *h = queue_to_hba(queue);
7044 u32 raw_tag;
7045 u8 q = *(u8 *) queue;
7046
7047 h->last_intr_timestamp = get_jiffies_64();
7048 raw_tag = get_next_completion(h, q);
7049 while (raw_tag != FIFO_EMPTY) {
7050 process_indexed_cmd(h, raw_tag);
7051 raw_tag = next_command(h, q);
7052 }
7053 return IRQ_HANDLED;
7054}
7055
7056
7057
7058
7059
7060static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
7061 unsigned char type)
7062{
7063 struct Command {
7064 struct CommandListHeader CommandHeader;
7065 struct RequestBlock Request;
7066 struct ErrDescriptor ErrorDescriptor;
7067 };
7068 struct Command *cmd;
7069 static const size_t cmd_sz = sizeof(*cmd) +
7070 sizeof(cmd->ErrorDescriptor);
7071 dma_addr_t paddr64;
7072 __le32 paddr32;
7073 u32 tag;
7074 void __iomem *vaddr;
7075 int i, err;
7076
7077 vaddr = pci_ioremap_bar(pdev, 0);
7078 if (vaddr == NULL)
7079 return -ENOMEM;
7080
7081
7082
7083
7084
7085 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
7086 if (err) {
7087 iounmap(vaddr);
7088 return err;
7089 }
7090
7091 cmd = dma_alloc_coherent(&pdev->dev, cmd_sz, &paddr64, GFP_KERNEL);
7092 if (cmd == NULL) {
7093 iounmap(vaddr);
7094 return -ENOMEM;
7095 }
7096
7097
7098
7099
7100
7101 paddr32 = cpu_to_le32(paddr64);
7102
7103 cmd->CommandHeader.ReplyQueue = 0;
7104 cmd->CommandHeader.SGList = 0;
7105 cmd->CommandHeader.SGTotal = cpu_to_le16(0);
7106 cmd->CommandHeader.tag = cpu_to_le64(paddr64);
7107 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
7108
7109 cmd->Request.CDBLen = 16;
7110 cmd->Request.type_attr_dir =
7111 TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
7112 cmd->Request.Timeout = 0;
7113 cmd->Request.CDB[0] = opcode;
7114 cmd->Request.CDB[1] = type;
7115 memset(&cmd->Request.CDB[2], 0, 14);
7116 cmd->ErrorDescriptor.Addr =
7117 cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
7118 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
7119
7120 writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
7121
7122 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
7123 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
7124 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
7125 break;
7126 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
7127 }
7128
7129 iounmap(vaddr);
7130
7131
7132
7133
7134 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
7135 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
7136 opcode, type);
7137 return -ETIMEDOUT;
7138 }
7139
7140 dma_free_coherent(&pdev->dev, cmd_sz, cmd, paddr64);
7141
7142 if (tag & HPSA_ERROR_BIT) {
7143 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
7144 opcode, type);
7145 return -EIO;
7146 }
7147
7148 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
7149 opcode, type);
7150 return 0;
7151}
7152
7153#define hpsa_noop(p) hpsa_message(p, 3, 0)
7154
7155static int hpsa_controller_hard_reset(struct pci_dev *pdev,
7156 void __iomem *vaddr, u32 use_doorbell)
7157{
7158
7159 if (use_doorbell) {
7160
7161
7162
7163
7164 dev_info(&pdev->dev, "using doorbell to reset controller\n");
7165 writel(use_doorbell, vaddr + SA5_DOORBELL);
7166
7167
7168
7169
7170
7171
7172 msleep(10000);
7173 } else {
7174
7175
7176
7177
7178
7179
7180
7181
7182
7183 int rc = 0;
7184
7185 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
7186
7187
7188 rc = pci_set_power_state(pdev, PCI_D3hot);
7189 if (rc)
7190 return rc;
7191
7192 msleep(500);
7193
7194
7195 rc = pci_set_power_state(pdev, PCI_D0);
7196 if (rc)
7197 return rc;
7198
7199
7200
7201
7202
7203
7204 msleep(500);
7205 }
7206 return 0;
7207}
7208
7209static void init_driver_version(char *driver_version, int len)
7210{
7211 memset(driver_version, 0, len);
7212 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
7213}
7214
7215static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
7216{
7217 char *driver_version;
7218 int i, size = sizeof(cfgtable->driver_version);
7219
7220 driver_version = kmalloc(size, GFP_KERNEL);
7221 if (!driver_version)
7222 return -ENOMEM;
7223
7224 init_driver_version(driver_version, size);
7225 for (i = 0; i < size; i++)
7226 writeb(driver_version[i], &cfgtable->driver_version[i]);
7227 kfree(driver_version);
7228 return 0;
7229}
7230
7231static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
7232 unsigned char *driver_ver)
7233{
7234 int i;
7235
7236 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
7237 driver_ver[i] = readb(&cfgtable->driver_version[i]);
7238}
7239
7240static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
7241{
7242
7243 char *driver_ver, *old_driver_ver;
7244 int rc, size = sizeof(cfgtable->driver_version);
7245
7246 old_driver_ver = kmalloc_array(2, size, GFP_KERNEL);
7247 if (!old_driver_ver)
7248 return -ENOMEM;
7249 driver_ver = old_driver_ver + size;
7250
7251
7252
7253
7254 init_driver_version(old_driver_ver, size);
7255 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
7256 rc = !memcmp(driver_ver, old_driver_ver, size);
7257 kfree(old_driver_ver);
7258 return rc;
7259}
7260
7261
7262
7263static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
7264{
7265 u64 cfg_offset;
7266 u32 cfg_base_addr;
7267 u64 cfg_base_addr_index;
7268 void __iomem *vaddr;
7269 unsigned long paddr;
7270 u32 misc_fw_support;
7271 int rc;
7272 struct CfgTable __iomem *cfgtable;
7273 u32 use_doorbell;
7274 u16 command_register;
7275
7276
7277
7278
7279
7280
7281
7282
7283
7284
7285
7286
7287
7288
7289 if (!ctlr_is_resettable(board_id)) {
7290 dev_warn(&pdev->dev, "Controller not resettable\n");
7291 return -ENODEV;
7292 }
7293
7294
7295 if (!ctlr_is_hard_resettable(board_id))
7296 return -ENOTSUPP;
7297
7298
7299 pci_read_config_word(pdev, 4, &command_register);
7300 pci_save_state(pdev);
7301
7302
7303 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
7304 if (rc)
7305 return rc;
7306 vaddr = remap_pci_mem(paddr, 0x250);
7307 if (!vaddr)
7308 return -ENOMEM;
7309
7310
7311 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
7312 &cfg_base_addr_index, &cfg_offset);
7313 if (rc)
7314 goto unmap_vaddr;
7315 cfgtable = remap_pci_mem(pci_resource_start(pdev,
7316 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
7317 if (!cfgtable) {
7318 rc = -ENOMEM;
7319 goto unmap_vaddr;
7320 }
7321 rc = write_driver_ver_to_cfgtable(cfgtable);
7322 if (rc)
7323 goto unmap_cfgtable;
7324
7325
7326
7327
7328 misc_fw_support = readl(&cfgtable->misc_fw_support);
7329 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
7330 if (use_doorbell) {
7331 use_doorbell = DOORBELL_CTLR_RESET2;
7332 } else {
7333 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
7334 if (use_doorbell) {
7335 dev_warn(&pdev->dev,
7336 "Soft reset not supported. Firmware update is required.\n");
7337 rc = -ENOTSUPP;
7338 goto unmap_cfgtable;
7339 }
7340 }
7341
7342 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
7343 if (rc)
7344 goto unmap_cfgtable;
7345
7346 pci_restore_state(pdev);
7347 pci_write_config_word(pdev, 4, command_register);
7348
7349
7350
7351 msleep(HPSA_POST_RESET_PAUSE_MSECS);
7352
7353 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
7354 if (rc) {
7355 dev_warn(&pdev->dev,
7356 "Failed waiting for board to become ready after hard reset\n");
7357 goto unmap_cfgtable;
7358 }
7359
7360 rc = controller_reset_failed(vaddr);
7361 if (rc < 0)
7362 goto unmap_cfgtable;
7363 if (rc) {
7364 dev_warn(&pdev->dev, "Unable to successfully reset "
7365 "controller. Will try soft reset.\n");
7366 rc = -ENOTSUPP;
7367 } else {
7368 dev_info(&pdev->dev, "board ready after hard reset.\n");
7369 }
7370
7371unmap_cfgtable:
7372 iounmap(cfgtable);
7373
7374unmap_vaddr:
7375 iounmap(vaddr);
7376 return rc;
7377}
7378
7379
7380
7381
7382
7383
7384static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
7385{
7386#ifdef HPSA_DEBUG
7387 int i;
7388 char temp_name[17];
7389
7390 dev_info(dev, "Controller Configuration information\n");
7391 dev_info(dev, "------------------------------------\n");
7392 for (i = 0; i < 4; i++)
7393 temp_name[i] = readb(&(tb->Signature[i]));
7394 temp_name[4] = '\0';
7395 dev_info(dev, " Signature = %s\n", temp_name);
7396 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
7397 dev_info(dev, " Transport methods supported = 0x%x\n",
7398 readl(&(tb->TransportSupport)));
7399 dev_info(dev, " Transport methods active = 0x%x\n",
7400 readl(&(tb->TransportActive)));
7401 dev_info(dev, " Requested transport Method = 0x%x\n",
7402 readl(&(tb->HostWrite.TransportRequest)));
7403 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
7404 readl(&(tb->HostWrite.CoalIntDelay)));
7405 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
7406 readl(&(tb->HostWrite.CoalIntCount)));
7407 dev_info(dev, " Max outstanding commands = %d\n",
7408 readl(&(tb->CmdsOutMax)));
7409 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
7410 for (i = 0; i < 16; i++)
7411 temp_name[i] = readb(&(tb->ServerName[i]));
7412 temp_name[16] = '\0';
7413 dev_info(dev, " Server Name = %s\n", temp_name);
7414 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
7415 readl(&(tb->HeartBeat)));
7416#endif
7417}
7418
7419static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
7420{
7421 int i, offset, mem_type, bar_type;
7422
7423 if (pci_bar_addr == PCI_BASE_ADDRESS_0)
7424 return 0;
7425 offset = 0;
7426 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
7427 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
7428 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
7429 offset += 4;
7430 else {
7431 mem_type = pci_resource_flags(pdev, i) &
7432 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
7433 switch (mem_type) {
7434 case PCI_BASE_ADDRESS_MEM_TYPE_32:
7435 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
7436 offset += 4;
7437 break;
7438 case PCI_BASE_ADDRESS_MEM_TYPE_64:
7439 offset += 8;
7440 break;
7441 default:
7442 dev_warn(&pdev->dev,
7443 "base address is invalid\n");
7444 return -1;
7445 break;
7446 }
7447 }
7448 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
7449 return i + 1;
7450 }
7451 return -1;
7452}
7453
7454static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
7455{
7456 pci_free_irq_vectors(h->pdev);
7457 h->msix_vectors = 0;
7458}
7459
7460static void hpsa_setup_reply_map(struct ctlr_info *h)
7461{
7462 const struct cpumask *mask;
7463 unsigned int queue, cpu;
7464
7465 for (queue = 0; queue < h->msix_vectors; queue++) {
7466 mask = pci_irq_get_affinity(h->pdev, queue);
7467 if (!mask)
7468 goto fallback;
7469
7470 for_each_cpu(cpu, mask)
7471 h->reply_map[cpu] = queue;
7472 }
7473 return;
7474
7475fallback:
7476 for_each_possible_cpu(cpu)
7477 h->reply_map[cpu] = 0;
7478}
7479
7480
7481
7482
7483static int hpsa_interrupt_mode(struct ctlr_info *h)
7484{
7485 unsigned int flags = PCI_IRQ_LEGACY;
7486 int ret;
7487
7488
7489 switch (h->board_id) {
7490 case 0x40700E11:
7491 case 0x40800E11:
7492 case 0x40820E11:
7493 case 0x40830E11:
7494 break;
7495 default:
7496 ret = pci_alloc_irq_vectors(h->pdev, 1, MAX_REPLY_QUEUES,
7497 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
7498 if (ret > 0) {
7499 h->msix_vectors = ret;
7500 return 0;
7501 }
7502
7503 flags |= PCI_IRQ_MSI;
7504 break;
7505 }
7506
7507 ret = pci_alloc_irq_vectors(h->pdev, 1, 1, flags);
7508 if (ret < 0)
7509 return ret;
7510 return 0;
7511}
7512
7513static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
7514 bool *legacy_board)
7515{
7516 int i;
7517 u32 subsystem_vendor_id, subsystem_device_id;
7518
7519 subsystem_vendor_id = pdev->subsystem_vendor;
7520 subsystem_device_id = pdev->subsystem_device;
7521 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
7522 subsystem_vendor_id;
7523
7524 if (legacy_board)
7525 *legacy_board = false;
7526 for (i = 0; i < ARRAY_SIZE(products); i++)
7527 if (*board_id == products[i].board_id) {
7528 if (products[i].access != &SA5A_access &&
7529 products[i].access != &SA5B_access)
7530 return i;
7531 dev_warn(&pdev->dev,
7532 "legacy board ID: 0x%08x\n",
7533 *board_id);
7534 if (legacy_board)
7535 *legacy_board = true;
7536 return i;
7537 }
7538
7539 dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x\n", *board_id);
7540 if (legacy_board)
7541 *legacy_board = true;
7542 return ARRAY_SIZE(products) - 1;
7543}
7544
7545static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
7546 unsigned long *memory_bar)
7547{
7548 int i;
7549
7550 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
7551 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
7552
7553 *memory_bar = pci_resource_start(pdev, i);
7554 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
7555 *memory_bar);
7556 return 0;
7557 }
7558 dev_warn(&pdev->dev, "no memory BAR found\n");
7559 return -ENODEV;
7560}
7561
7562static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
7563 int wait_for_ready)
7564{
7565 int i, iterations;
7566 u32 scratchpad;
7567 if (wait_for_ready)
7568 iterations = HPSA_BOARD_READY_ITERATIONS;
7569 else
7570 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
7571
7572 for (i = 0; i < iterations; i++) {
7573 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
7574 if (wait_for_ready) {
7575 if (scratchpad == HPSA_FIRMWARE_READY)
7576 return 0;
7577 } else {
7578 if (scratchpad != HPSA_FIRMWARE_READY)
7579 return 0;
7580 }
7581 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
7582 }
7583 dev_warn(&pdev->dev, "board not ready, timed out.\n");
7584 return -ENODEV;
7585}
7586
7587static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
7588 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
7589 u64 *cfg_offset)
7590{
7591 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
7592 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
7593 *cfg_base_addr &= (u32) 0x0000ffff;
7594 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
7595 if (*cfg_base_addr_index == -1) {
7596 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
7597 return -ENODEV;
7598 }
7599 return 0;
7600}
7601
7602static void hpsa_free_cfgtables(struct ctlr_info *h)
7603{
7604 if (h->transtable) {
7605 iounmap(h->transtable);
7606 h->transtable = NULL;
7607 }
7608 if (h->cfgtable) {
7609 iounmap(h->cfgtable);
7610 h->cfgtable = NULL;
7611 }
7612}
7613
7614
7615
7616
7617static int hpsa_find_cfgtables(struct ctlr_info *h)
7618{
7619 u64 cfg_offset;
7620 u32 cfg_base_addr;
7621 u64 cfg_base_addr_index;
7622 u32 trans_offset;
7623 int rc;
7624
7625 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7626 &cfg_base_addr_index, &cfg_offset);
7627 if (rc)
7628 return rc;
7629 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
7630 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
7631 if (!h->cfgtable) {
7632 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
7633 return -ENOMEM;
7634 }
7635 rc = write_driver_ver_to_cfgtable(h->cfgtable);
7636 if (rc)
7637 return rc;
7638
7639 trans_offset = readl(&h->cfgtable->TransMethodOffset);
7640 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
7641 cfg_base_addr_index)+cfg_offset+trans_offset,
7642 sizeof(*h->transtable));
7643 if (!h->transtable) {
7644 dev_err(&h->pdev->dev, "Failed mapping transfer table\n");
7645 hpsa_free_cfgtables(h);
7646 return -ENOMEM;
7647 }
7648 return 0;
7649}
7650
7651static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
7652{
7653#define MIN_MAX_COMMANDS 16
7654 BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
7655
7656 h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
7657
7658
7659 if (reset_devices && h->max_commands > 32)
7660 h->max_commands = 32;
7661
7662 if (h->max_commands < MIN_MAX_COMMANDS) {
7663 dev_warn(&h->pdev->dev,
7664 "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
7665 h->max_commands,
7666 MIN_MAX_COMMANDS);
7667 h->max_commands = MIN_MAX_COMMANDS;
7668 }
7669}
7670
7671
7672
7673
7674
7675static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
7676{
7677 return h->maxsgentries > 512;
7678}
7679
7680
7681
7682
7683
7684static void hpsa_find_board_params(struct ctlr_info *h)
7685{
7686 hpsa_get_max_perf_mode_cmds(h);
7687 h->nr_cmds = h->max_commands;
7688 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
7689 h->fw_support = readl(&(h->cfgtable->misc_fw_support));
7690 if (hpsa_supports_chained_sg_blocks(h)) {
7691
7692 h->max_cmd_sg_entries = 32;
7693 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
7694 h->maxsgentries--;
7695 } else {
7696
7697
7698
7699
7700
7701 h->max_cmd_sg_entries = 31;
7702 h->maxsgentries = 31;
7703 h->chainsize = 0;
7704 }
7705
7706
7707 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
7708 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
7709 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
7710 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
7711 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
7712 if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags))
7713 dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n");
7714}
7715
7716static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
7717{
7718 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
7719 dev_err(&h->pdev->dev, "not a valid CISS config table\n");
7720 return false;
7721 }
7722 return true;
7723}
7724
7725static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
7726{
7727 u32 driver_support;
7728
7729 driver_support = readl(&(h->cfgtable->driver_support));
7730
7731#ifdef CONFIG_X86
7732 driver_support |= ENABLE_SCSI_PREFETCH;
7733#endif
7734 driver_support |= ENABLE_UNIT_ATTN;
7735 writel(driver_support, &(h->cfgtable->driver_support));
7736}
7737
7738
7739
7740
7741static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
7742{
7743 u32 dma_prefetch;
7744
7745 if (h->board_id != 0x3225103C)
7746 return;
7747 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
7748 dma_prefetch |= 0x8000;
7749 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
7750}
7751
7752static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
7753{
7754 int i;
7755 u32 doorbell_value;
7756 unsigned long flags;
7757
7758 for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
7759 spin_lock_irqsave(&h->lock, flags);
7760 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7761 spin_unlock_irqrestore(&h->lock, flags);
7762 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
7763 goto done;
7764
7765 msleep(CLEAR_EVENT_WAIT_INTERVAL);
7766 }
7767 return -ENODEV;
7768done:
7769 return 0;
7770}
7771
7772static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
7773{
7774 int i;
7775 u32 doorbell_value;
7776 unsigned long flags;
7777
7778
7779
7780
7781
7782 for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
7783 if (h->remove_in_progress)
7784 goto done;
7785 spin_lock_irqsave(&h->lock, flags);
7786 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7787 spin_unlock_irqrestore(&h->lock, flags);
7788 if (!(doorbell_value & CFGTBL_ChangeReq))
7789 goto done;
7790
7791 msleep(MODE_CHANGE_WAIT_INTERVAL);
7792 }
7793 return -ENODEV;
7794done:
7795 return 0;
7796}
7797
7798
7799static int hpsa_enter_simple_mode(struct ctlr_info *h)
7800{
7801 u32 trans_support;
7802
7803 trans_support = readl(&(h->cfgtable->TransportSupport));
7804 if (!(trans_support & SIMPLE_MODE))
7805 return -ENOTSUPP;
7806
7807 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
7808
7809
7810 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
7811 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
7812 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7813 if (hpsa_wait_for_mode_change_ack(h))
7814 goto error;
7815 print_cfg_table(&h->pdev->dev, h->cfgtable);
7816 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
7817 goto error;
7818 h->transMethod = CFGTBL_Trans_Simple;
7819 return 0;
7820error:
7821 dev_err(&h->pdev->dev, "failed to enter simple mode\n");
7822 return -ENODEV;
7823}
7824
7825
7826static void hpsa_free_pci_init(struct ctlr_info *h)
7827{
7828 hpsa_free_cfgtables(h);
7829 iounmap(h->vaddr);
7830 h->vaddr = NULL;
7831 hpsa_disable_interrupt_mode(h);
7832
7833
7834
7835
7836 pci_disable_device(h->pdev);
7837 pci_release_regions(h->pdev);
7838}
7839
7840
7841static int hpsa_pci_init(struct ctlr_info *h)
7842{
7843 int prod_index, err;
7844 bool legacy_board;
7845
7846 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id, &legacy_board);
7847 if (prod_index < 0)
7848 return prod_index;
7849 h->product_name = products[prod_index].product_name;
7850 h->access = *(products[prod_index].access);
7851 h->legacy_board = legacy_board;
7852 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
7853 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
7854
7855 err = pci_enable_device(h->pdev);
7856 if (err) {
7857 dev_err(&h->pdev->dev, "failed to enable PCI device\n");
7858 pci_disable_device(h->pdev);
7859 return err;
7860 }
7861
7862 err = pci_request_regions(h->pdev, HPSA);
7863 if (err) {
7864 dev_err(&h->pdev->dev,
7865 "failed to obtain PCI resources\n");
7866 pci_disable_device(h->pdev);
7867 return err;
7868 }
7869
7870 pci_set_master(h->pdev);
7871
7872 err = hpsa_interrupt_mode(h);
7873 if (err)
7874 goto clean1;
7875
7876
7877 hpsa_setup_reply_map(h);
7878
7879 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
7880 if (err)
7881 goto clean2;
7882 h->vaddr = remap_pci_mem(h->paddr, 0x250);
7883 if (!h->vaddr) {
7884 dev_err(&h->pdev->dev, "failed to remap PCI mem\n");
7885 err = -ENOMEM;
7886 goto clean2;
7887 }
7888 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
7889 if (err)
7890 goto clean3;
7891 err = hpsa_find_cfgtables(h);
7892 if (err)
7893 goto clean3;
7894 hpsa_find_board_params(h);
7895
7896 if (!hpsa_CISS_signature_present(h)) {
7897 err = -ENODEV;
7898 goto clean4;
7899 }
7900 hpsa_set_driver_support_bits(h);
7901 hpsa_p600_dma_prefetch_quirk(h);
7902 err = hpsa_enter_simple_mode(h);
7903 if (err)
7904 goto clean4;
7905 return 0;
7906
7907clean4:
7908 hpsa_free_cfgtables(h);
7909clean3:
7910 iounmap(h->vaddr);
7911 h->vaddr = NULL;
7912clean2:
7913 hpsa_disable_interrupt_mode(h);
7914clean1:
7915
7916
7917
7918
7919 pci_disable_device(h->pdev);
7920 pci_release_regions(h->pdev);
7921 return err;
7922}
7923
7924static void hpsa_hba_inquiry(struct ctlr_info *h)
7925{
7926 int rc;
7927
7928#define HBA_INQUIRY_BYTE_COUNT 64
7929 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
7930 if (!h->hba_inquiry_data)
7931 return;
7932 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
7933 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
7934 if (rc != 0) {
7935 kfree(h->hba_inquiry_data);
7936 h->hba_inquiry_data = NULL;
7937 }
7938}
7939
7940static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
7941{
7942 int rc, i;
7943 void __iomem *vaddr;
7944
7945 if (!reset_devices)
7946 return 0;
7947
7948
7949
7950
7951
7952 rc = pci_enable_device(pdev);
7953 if (rc) {
7954 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
7955 return -ENODEV;
7956 }
7957 pci_disable_device(pdev);
7958 msleep(260);
7959 rc = pci_enable_device(pdev);
7960 if (rc) {
7961 dev_warn(&pdev->dev, "failed to enable device.\n");
7962 return -ENODEV;
7963 }
7964
7965 pci_set_master(pdev);
7966
7967 vaddr = pci_ioremap_bar(pdev, 0);
7968 if (vaddr == NULL) {
7969 rc = -ENOMEM;
7970 goto out_disable;
7971 }
7972 writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
7973 iounmap(vaddr);
7974
7975
7976 rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
7977
7978
7979
7980
7981
7982
7983 if (rc)
7984 goto out_disable;
7985
7986
7987 dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
7988 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
7989 if (hpsa_noop(pdev) == 0)
7990 break;
7991 else
7992 dev_warn(&pdev->dev, "no-op failed%s\n",
7993 (i < 11 ? "; re-trying" : ""));
7994 }
7995
7996out_disable:
7997
7998 pci_disable_device(pdev);
7999 return rc;
8000}
8001
8002static void hpsa_free_cmd_pool(struct ctlr_info *h)
8003{
8004 kfree(h->cmd_pool_bits);
8005 h->cmd_pool_bits = NULL;
8006 if (h->cmd_pool) {
8007 dma_free_coherent(&h->pdev->dev,
8008 h->nr_cmds * sizeof(struct CommandList),
8009 h->cmd_pool,
8010 h->cmd_pool_dhandle);
8011 h->cmd_pool = NULL;
8012 h->cmd_pool_dhandle = 0;
8013 }
8014 if (h->errinfo_pool) {
8015 dma_free_coherent(&h->pdev->dev,
8016 h->nr_cmds * sizeof(struct ErrorInfo),
8017 h->errinfo_pool,
8018 h->errinfo_pool_dhandle);
8019 h->errinfo_pool = NULL;
8020 h->errinfo_pool_dhandle = 0;
8021 }
8022}
8023
8024static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
8025{
8026 h->cmd_pool_bits = kcalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG),
8027 sizeof(unsigned long),
8028 GFP_KERNEL);
8029 h->cmd_pool = dma_alloc_coherent(&h->pdev->dev,
8030 h->nr_cmds * sizeof(*h->cmd_pool),
8031 &h->cmd_pool_dhandle, GFP_KERNEL);
8032 h->errinfo_pool = dma_alloc_coherent(&h->pdev->dev,
8033 h->nr_cmds * sizeof(*h->errinfo_pool),
8034 &h->errinfo_pool_dhandle, GFP_KERNEL);
8035 if ((h->cmd_pool_bits == NULL)
8036 || (h->cmd_pool == NULL)
8037 || (h->errinfo_pool == NULL)) {
8038 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
8039 goto clean_up;
8040 }
8041 hpsa_preinitialize_commands(h);
8042 return 0;
8043clean_up:
8044 hpsa_free_cmd_pool(h);
8045 return -ENOMEM;
8046}
8047
8048
8049static void hpsa_free_irqs(struct ctlr_info *h)
8050{
8051 int i;
8052 int irq_vector = 0;
8053
8054 if (hpsa_simple_mode)
8055 irq_vector = h->intr_mode;
8056
8057 if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) {
8058
8059 free_irq(pci_irq_vector(h->pdev, irq_vector),
8060 &h->q[h->intr_mode]);
8061 h->q[h->intr_mode] = 0;
8062 return;
8063 }
8064
8065 for (i = 0; i < h->msix_vectors; i++) {
8066 free_irq(pci_irq_vector(h->pdev, i), &h->q[i]);
8067 h->q[i] = 0;
8068 }
8069 for (; i < MAX_REPLY_QUEUES; i++)
8070 h->q[i] = 0;
8071}
8072
8073
8074static int hpsa_request_irqs(struct ctlr_info *h,
8075 irqreturn_t (*msixhandler)(int, void *),
8076 irqreturn_t (*intxhandler)(int, void *))
8077{
8078 int rc, i;
8079 int irq_vector = 0;
8080
8081 if (hpsa_simple_mode)
8082 irq_vector = h->intr_mode;
8083
8084
8085
8086
8087
8088 for (i = 0; i < MAX_REPLY_QUEUES; i++)
8089 h->q[i] = (u8) i;
8090
8091 if (h->intr_mode == PERF_MODE_INT && h->msix_vectors > 0) {
8092
8093 for (i = 0; i < h->msix_vectors; i++) {
8094 sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
8095 rc = request_irq(pci_irq_vector(h->pdev, i), msixhandler,
8096 0, h->intrname[i],
8097 &h->q[i]);
8098 if (rc) {
8099 int j;
8100
8101 dev_err(&h->pdev->dev,
8102 "failed to get irq %d for %s\n",
8103 pci_irq_vector(h->pdev, i), h->devname);
8104 for (j = 0; j < i; j++) {
8105 free_irq(pci_irq_vector(h->pdev, j), &h->q[j]);
8106 h->q[j] = 0;
8107 }
8108 for (; j < MAX_REPLY_QUEUES; j++)
8109 h->q[j] = 0;
8110 return rc;
8111 }
8112 }
8113 } else {
8114
8115 if (h->msix_vectors > 0 || h->pdev->msi_enabled) {
8116 sprintf(h->intrname[0], "%s-msi%s", h->devname,
8117 h->msix_vectors ? "x" : "");
8118 rc = request_irq(pci_irq_vector(h->pdev, irq_vector),
8119 msixhandler, 0,
8120 h->intrname[0],
8121 &h->q[h->intr_mode]);
8122 } else {
8123 sprintf(h->intrname[h->intr_mode],
8124 "%s-intx", h->devname);
8125 rc = request_irq(pci_irq_vector(h->pdev, irq_vector),
8126 intxhandler, IRQF_SHARED,
8127 h->intrname[0],
8128 &h->q[h->intr_mode]);
8129 }
8130 }
8131 if (rc) {
8132 dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
8133 pci_irq_vector(h->pdev, irq_vector), h->devname);
8134 hpsa_free_irqs(h);
8135 return -ENODEV;
8136 }
8137 return 0;
8138}
8139
8140static int hpsa_kdump_soft_reset(struct ctlr_info *h)
8141{
8142 int rc;
8143 hpsa_send_host_reset(h, HPSA_RESET_TYPE_CONTROLLER);
8144
8145 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
8146 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
8147 if (rc) {
8148 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
8149 return rc;
8150 }
8151
8152 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
8153 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
8154 if (rc) {
8155 dev_warn(&h->pdev->dev, "Board failed to become ready "
8156 "after soft reset.\n");
8157 return rc;
8158 }
8159
8160 return 0;
8161}
8162
8163static void hpsa_free_reply_queues(struct ctlr_info *h)
8164{
8165 int i;
8166
8167 for (i = 0; i < h->nreply_queues; i++) {
8168 if (!h->reply_queue[i].head)
8169 continue;
8170 dma_free_coherent(&h->pdev->dev,
8171 h->reply_queue_size,
8172 h->reply_queue[i].head,
8173 h->reply_queue[i].busaddr);
8174 h->reply_queue[i].head = NULL;
8175 h->reply_queue[i].busaddr = 0;
8176 }
8177 h->reply_queue_size = 0;
8178}
8179
8180static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
8181{
8182 hpsa_free_performant_mode(h);
8183 hpsa_free_sg_chain_blocks(h);
8184 hpsa_free_cmd_pool(h);
8185 hpsa_free_irqs(h);
8186 scsi_host_put(h->scsi_host);
8187 h->scsi_host = NULL;
8188 hpsa_free_pci_init(h);
8189 free_percpu(h->lockup_detected);
8190 h->lockup_detected = NULL;
8191 if (h->resubmit_wq) {
8192 destroy_workqueue(h->resubmit_wq);
8193 h->resubmit_wq = NULL;
8194 }
8195 if (h->rescan_ctlr_wq) {
8196 destroy_workqueue(h->rescan_ctlr_wq);
8197 h->rescan_ctlr_wq = NULL;
8198 }
8199 if (h->monitor_ctlr_wq) {
8200 destroy_workqueue(h->monitor_ctlr_wq);
8201 h->monitor_ctlr_wq = NULL;
8202 }
8203
8204 kfree(h);
8205}
8206
8207
8208static void fail_all_outstanding_cmds(struct ctlr_info *h)
8209{
8210 int i, refcount;
8211 struct CommandList *c;
8212 int failcount = 0;
8213
8214 flush_workqueue(h->resubmit_wq);
8215 for (i = 0; i < h->nr_cmds; i++) {
8216 c = h->cmd_pool + i;
8217 refcount = atomic_inc_return(&c->refcount);
8218 if (refcount > 1) {
8219 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
8220 finish_cmd(c);
8221 atomic_dec(&h->commands_outstanding);
8222 failcount++;
8223 }
8224 cmd_free(h, c);
8225 }
8226 dev_warn(&h->pdev->dev,
8227 "failed %d commands in fail_all\n", failcount);
8228}
8229
8230static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
8231{
8232 int cpu;
8233
8234 for_each_online_cpu(cpu) {
8235 u32 *lockup_detected;
8236 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
8237 *lockup_detected = value;
8238 }
8239 wmb();
8240}
8241
8242static void controller_lockup_detected(struct ctlr_info *h)
8243{
8244 unsigned long flags;
8245 u32 lockup_detected;
8246
8247 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8248 spin_lock_irqsave(&h->lock, flags);
8249 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
8250 if (!lockup_detected) {
8251
8252 dev_warn(&h->pdev->dev,
8253 "lockup detected after %d but scratchpad register is zero\n",
8254 h->heartbeat_sample_interval / HZ);
8255 lockup_detected = 0xffffffff;
8256 }
8257 set_lockup_detected_for_all_cpus(h, lockup_detected);
8258 spin_unlock_irqrestore(&h->lock, flags);
8259 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
8260 lockup_detected, h->heartbeat_sample_interval / HZ);
8261 if (lockup_detected == 0xffff0000) {
8262 dev_warn(&h->pdev->dev, "Telling controller to do a CHKPT\n");
8263 writel(DOORBELL_GENERATE_CHKPT, h->vaddr + SA5_DOORBELL);
8264 }
8265 pci_disable_device(h->pdev);
8266 fail_all_outstanding_cmds(h);
8267}
8268
8269static int detect_controller_lockup(struct ctlr_info *h)
8270{
8271 u64 now;
8272 u32 heartbeat;
8273 unsigned long flags;
8274
8275 now = get_jiffies_64();
8276
8277 if (time_after64(h->last_intr_timestamp +
8278 (h->heartbeat_sample_interval), now))
8279 return false;
8280
8281
8282
8283
8284
8285
8286 if (time_after64(h->last_heartbeat_timestamp +
8287 (h->heartbeat_sample_interval), now))
8288 return false;
8289
8290
8291 spin_lock_irqsave(&h->lock, flags);
8292 heartbeat = readl(&h->cfgtable->HeartBeat);
8293 spin_unlock_irqrestore(&h->lock, flags);
8294 if (h->last_heartbeat == heartbeat) {
8295 controller_lockup_detected(h);
8296 return true;
8297 }
8298
8299
8300 h->last_heartbeat = heartbeat;
8301 h->last_heartbeat_timestamp = now;
8302 return false;
8303}
8304
8305
8306
8307
8308
8309
8310
8311
8312
8313
8314static void hpsa_set_ioaccel_status(struct ctlr_info *h)
8315{
8316 int rc;
8317 int i;
8318 u8 ioaccel_status;
8319 unsigned char *buf;
8320 struct hpsa_scsi_dev_t *device;
8321
8322 if (!h)
8323 return;
8324
8325 buf = kmalloc(64, GFP_KERNEL);
8326 if (!buf)
8327 return;
8328
8329
8330
8331
8332 for (i = 0; i < h->ndevices; i++) {
8333 int offload_to_be_enabled = 0;
8334 int offload_config = 0;
8335
8336 device = h->dev[i];
8337
8338 if (!device)
8339 continue;
8340 if (!hpsa_vpd_page_supported(h, device->scsi3addr,
8341 HPSA_VPD_LV_IOACCEL_STATUS))
8342 continue;
8343
8344 memset(buf, 0, 64);
8345
8346 rc = hpsa_scsi_do_inquiry(h, device->scsi3addr,
8347 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS,
8348 buf, 64);
8349 if (rc != 0)
8350 continue;
8351
8352 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
8353
8354
8355
8356
8357 offload_config =
8358 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
8359
8360
8361
8362
8363 if (offload_config)
8364 offload_to_be_enabled =
8365 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
8366
8367
8368
8369
8370
8371
8372 if (offload_to_be_enabled)
8373 continue;
8374
8375
8376
8377
8378
8379
8380
8381 hpsa_turn_off_ioaccel_for_device(device);
8382 }
8383
8384 kfree(buf);
8385}
8386
8387static void hpsa_ack_ctlr_events(struct ctlr_info *h)
8388{
8389 char *event_type;
8390
8391 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8392 return;
8393
8394
8395 if ((h->transMethod & (CFGTBL_Trans_io_accel1
8396 | CFGTBL_Trans_io_accel2)) &&
8397 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
8398 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
8399
8400 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
8401 event_type = "state change";
8402 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
8403 event_type = "configuration change";
8404
8405 scsi_block_requests(h->scsi_host);
8406 hpsa_set_ioaccel_status(h);
8407 hpsa_drain_accel_commands(h);
8408
8409 dev_warn(&h->pdev->dev,
8410 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
8411 h->events, event_type);
8412 writel(h->events, &(h->cfgtable->clear_event_notify));
8413
8414 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8415
8416 hpsa_wait_for_clear_event_notify_ack(h);
8417 scsi_unblock_requests(h->scsi_host);
8418 } else {
8419
8420 writel(h->events, &(h->cfgtable->clear_event_notify));
8421 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8422 hpsa_wait_for_clear_event_notify_ack(h);
8423 }
8424 return;
8425}
8426
8427
8428
8429
8430
8431
8432static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
8433{
8434 if (h->drv_req_rescan) {
8435 h->drv_req_rescan = 0;
8436 return 1;
8437 }
8438
8439 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8440 return 0;
8441
8442 h->events = readl(&(h->cfgtable->event_notify));
8443 return h->events & RESCAN_REQUIRED_EVENT_BITS;
8444}
8445
8446
8447
8448
8449static int hpsa_offline_devices_ready(struct ctlr_info *h)
8450{
8451 unsigned long flags;
8452 struct offline_device_entry *d;
8453 struct list_head *this, *tmp;
8454
8455 spin_lock_irqsave(&h->offline_device_lock, flags);
8456 list_for_each_safe(this, tmp, &h->offline_device_list) {
8457 d = list_entry(this, struct offline_device_entry,
8458 offline_list);
8459 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8460 if (!hpsa_volume_offline(h, d->scsi3addr)) {
8461 spin_lock_irqsave(&h->offline_device_lock, flags);
8462 list_del(&d->offline_list);
8463 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8464 return 1;
8465 }
8466 spin_lock_irqsave(&h->offline_device_lock, flags);
8467 }
8468 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8469 return 0;
8470}
8471
8472static int hpsa_luns_changed(struct ctlr_info *h)
8473{
8474 int rc = 1;
8475 struct ReportLUNdata *logdev = NULL;
8476
8477
8478
8479
8480
8481 if (!h->lastlogicals)
8482 return rc;
8483
8484 logdev = kzalloc(sizeof(*logdev), GFP_KERNEL);
8485 if (!logdev)
8486 return rc;
8487
8488 if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) {
8489 dev_warn(&h->pdev->dev,
8490 "report luns failed, can't track lun changes.\n");
8491 goto out;
8492 }
8493 if (memcmp(logdev, h->lastlogicals, sizeof(*logdev))) {
8494 dev_info(&h->pdev->dev,
8495 "Lun changes detected.\n");
8496 memcpy(h->lastlogicals, logdev, sizeof(*logdev));
8497 goto out;
8498 } else
8499 rc = 0;
8500out:
8501 kfree(logdev);
8502 return rc;
8503}
8504
8505static void hpsa_perform_rescan(struct ctlr_info *h)
8506{
8507 struct Scsi_Host *sh = NULL;
8508 unsigned long flags;
8509
8510
8511
8512
8513 spin_lock_irqsave(&h->reset_lock, flags);
8514 if (h->reset_in_progress) {
8515 h->drv_req_rescan = 1;
8516 spin_unlock_irqrestore(&h->reset_lock, flags);
8517 return;
8518 }
8519 spin_unlock_irqrestore(&h->reset_lock, flags);
8520
8521 sh = scsi_host_get(h->scsi_host);
8522 if (sh != NULL) {
8523 hpsa_scan_start(sh);
8524 scsi_host_put(sh);
8525 h->drv_req_rescan = 0;
8526 }
8527}
8528
8529
8530
8531
8532static void hpsa_event_monitor_worker(struct work_struct *work)
8533{
8534 struct ctlr_info *h = container_of(to_delayed_work(work),
8535 struct ctlr_info, event_monitor_work);
8536 unsigned long flags;
8537
8538 spin_lock_irqsave(&h->lock, flags);
8539 if (h->remove_in_progress) {
8540 spin_unlock_irqrestore(&h->lock, flags);
8541 return;
8542 }
8543 spin_unlock_irqrestore(&h->lock, flags);
8544
8545 if (hpsa_ctlr_needs_rescan(h)) {
8546 hpsa_ack_ctlr_events(h);
8547 hpsa_perform_rescan(h);
8548 }
8549
8550 spin_lock_irqsave(&h->lock, flags);
8551 if (!h->remove_in_progress)
8552 queue_delayed_work(h->monitor_ctlr_wq, &h->event_monitor_work,
8553 HPSA_EVENT_MONITOR_INTERVAL);
8554 spin_unlock_irqrestore(&h->lock, flags);
8555}
8556
8557static void hpsa_rescan_ctlr_worker(struct work_struct *work)
8558{
8559 unsigned long flags;
8560 struct ctlr_info *h = container_of(to_delayed_work(work),
8561 struct ctlr_info, rescan_ctlr_work);
8562
8563 spin_lock_irqsave(&h->lock, flags);
8564 if (h->remove_in_progress) {
8565 spin_unlock_irqrestore(&h->lock, flags);
8566 return;
8567 }
8568 spin_unlock_irqrestore(&h->lock, flags);
8569
8570 if (h->drv_req_rescan || hpsa_offline_devices_ready(h)) {
8571 hpsa_perform_rescan(h);
8572 } else if (h->discovery_polling) {
8573 if (hpsa_luns_changed(h)) {
8574 dev_info(&h->pdev->dev,
8575 "driver discovery polling rescan.\n");
8576 hpsa_perform_rescan(h);
8577 }
8578 }
8579 spin_lock_irqsave(&h->lock, flags);
8580 if (!h->remove_in_progress)
8581 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8582 h->heartbeat_sample_interval);
8583 spin_unlock_irqrestore(&h->lock, flags);
8584}
8585
8586static void hpsa_monitor_ctlr_worker(struct work_struct *work)
8587{
8588 unsigned long flags;
8589 struct ctlr_info *h = container_of(to_delayed_work(work),
8590 struct ctlr_info, monitor_ctlr_work);
8591
8592 detect_controller_lockup(h);
8593 if (lockup_detected(h))
8594 return;
8595
8596 spin_lock_irqsave(&h->lock, flags);
8597 if (!h->remove_in_progress)
8598 queue_delayed_work(h->monitor_ctlr_wq, &h->monitor_ctlr_work,
8599 h->heartbeat_sample_interval);
8600 spin_unlock_irqrestore(&h->lock, flags);
8601}
8602
8603static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
8604 char *name)
8605{
8606 struct workqueue_struct *wq = NULL;
8607
8608 wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
8609 if (!wq)
8610 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
8611
8612 return wq;
8613}
8614
8615static void hpda_free_ctlr_info(struct ctlr_info *h)
8616{
8617 kfree(h->reply_map);
8618 kfree(h);
8619}
8620
8621static struct ctlr_info *hpda_alloc_ctlr_info(void)
8622{
8623 struct ctlr_info *h;
8624
8625 h = kzalloc(sizeof(*h), GFP_KERNEL);
8626 if (!h)
8627 return NULL;
8628
8629 h->reply_map = kcalloc(nr_cpu_ids, sizeof(*h->reply_map), GFP_KERNEL);
8630 if (!h->reply_map) {
8631 kfree(h);
8632 return NULL;
8633 }
8634 return h;
8635}
8636
8637static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8638{
8639 int dac, rc;
8640 struct ctlr_info *h;
8641 int try_soft_reset = 0;
8642 unsigned long flags;
8643 u32 board_id;
8644
8645 if (number_of_controllers == 0)
8646 printk(KERN_INFO DRIVER_NAME "\n");
8647
8648 rc = hpsa_lookup_board_id(pdev, &board_id, NULL);
8649 if (rc < 0) {
8650 dev_warn(&pdev->dev, "Board ID not found\n");
8651 return rc;
8652 }
8653
8654 rc = hpsa_init_reset_devices(pdev, board_id);
8655 if (rc) {
8656 if (rc != -ENOTSUPP)
8657 return rc;
8658
8659
8660
8661
8662
8663 try_soft_reset = 1;
8664 rc = 0;
8665 }
8666
8667reinit_after_soft_reset:
8668
8669
8670
8671
8672
8673 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
8674 h = hpda_alloc_ctlr_info();
8675 if (!h) {
8676 dev_err(&pdev->dev, "Failed to allocate controller head\n");
8677 return -ENOMEM;
8678 }
8679
8680 h->pdev = pdev;
8681
8682 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
8683 INIT_LIST_HEAD(&h->offline_device_list);
8684 spin_lock_init(&h->lock);
8685 spin_lock_init(&h->offline_device_lock);
8686 spin_lock_init(&h->scan_lock);
8687 spin_lock_init(&h->reset_lock);
8688 atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
8689
8690
8691 h->lockup_detected = alloc_percpu(u32);
8692 if (!h->lockup_detected) {
8693 dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
8694 rc = -ENOMEM;
8695 goto clean1;
8696 }
8697 set_lockup_detected_for_all_cpus(h, 0);
8698
8699 rc = hpsa_pci_init(h);
8700 if (rc)
8701 goto clean2;
8702
8703
8704
8705 rc = hpsa_scsi_host_alloc(h);
8706 if (rc)
8707 goto clean2_5;
8708
8709 sprintf(h->devname, HPSA "%d", h->scsi_host->host_no);
8710 h->ctlr = number_of_controllers;
8711 number_of_controllers++;
8712
8713
8714 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
8715 if (rc == 0) {
8716 dac = 1;
8717 } else {
8718 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
8719 if (rc == 0) {
8720 dac = 0;
8721 } else {
8722 dev_err(&pdev->dev, "no suitable DMA available\n");
8723 goto clean3;
8724 }
8725 }
8726
8727
8728 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8729
8730 rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
8731 if (rc)
8732 goto clean3;
8733 rc = hpsa_alloc_cmd_pool(h);
8734 if (rc)
8735 goto clean4;
8736 rc = hpsa_alloc_sg_chain_blocks(h);
8737 if (rc)
8738 goto clean5;
8739 init_waitqueue_head(&h->scan_wait_queue);
8740 init_waitqueue_head(&h->event_sync_wait_queue);
8741 mutex_init(&h->reset_mutex);
8742 h->scan_finished = 1;
8743 h->scan_waiting = 0;
8744
8745 pci_set_drvdata(pdev, h);
8746 h->ndevices = 0;
8747
8748 spin_lock_init(&h->devlock);
8749 rc = hpsa_put_ctlr_into_performant_mode(h);
8750 if (rc)
8751 goto clean6;
8752
8753
8754 h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
8755 if (!h->rescan_ctlr_wq) {
8756 rc = -ENOMEM;
8757 goto clean7;
8758 }
8759
8760 h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
8761 if (!h->resubmit_wq) {
8762 rc = -ENOMEM;
8763 goto clean7;
8764 }
8765
8766 h->monitor_ctlr_wq = hpsa_create_controller_wq(h, "monitor");
8767 if (!h->monitor_ctlr_wq) {
8768 rc = -ENOMEM;
8769 goto clean7;
8770 }
8771
8772
8773
8774
8775
8776
8777 if (try_soft_reset) {
8778
8779
8780
8781
8782
8783
8784
8785
8786 spin_lock_irqsave(&h->lock, flags);
8787 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8788 spin_unlock_irqrestore(&h->lock, flags);
8789 hpsa_free_irqs(h);
8790 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
8791 hpsa_intx_discard_completions);
8792 if (rc) {
8793 dev_warn(&h->pdev->dev,
8794 "Failed to request_irq after soft reset.\n");
8795
8796
8797
8798
8799 hpsa_free_performant_mode(h);
8800 hpsa_free_sg_chain_blocks(h);
8801 hpsa_free_cmd_pool(h);
8802
8803
8804
8805
8806 goto clean3;
8807 }
8808
8809 rc = hpsa_kdump_soft_reset(h);
8810 if (rc)
8811
8812 goto clean7;
8813
8814 dev_info(&h->pdev->dev, "Board READY.\n");
8815 dev_info(&h->pdev->dev,
8816 "Waiting for stale completions to drain.\n");
8817 h->access.set_intr_mask(h, HPSA_INTR_ON);
8818 msleep(10000);
8819 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8820
8821 rc = controller_reset_failed(h->cfgtable);
8822 if (rc)
8823 dev_info(&h->pdev->dev,
8824 "Soft reset appears to have failed.\n");
8825
8826
8827
8828
8829
8830 hpsa_undo_allocations_after_kdump_soft_reset(h);
8831 try_soft_reset = 0;
8832 if (rc)
8833
8834 return -ENODEV;
8835
8836 goto reinit_after_soft_reset;
8837 }
8838
8839
8840 h->acciopath_status = 1;
8841
8842 h->discovery_polling = 0;
8843
8844
8845
8846 h->access.set_intr_mask(h, HPSA_INTR_ON);
8847
8848 hpsa_hba_inquiry(h);
8849
8850 h->lastlogicals = kzalloc(sizeof(*(h->lastlogicals)), GFP_KERNEL);
8851 if (!h->lastlogicals)
8852 dev_info(&h->pdev->dev,
8853 "Can't track change to report lun data\n");
8854
8855
8856 rc = hpsa_scsi_add_host(h);
8857 if (rc)
8858 goto clean8;
8859
8860
8861 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
8862 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
8863 schedule_delayed_work(&h->monitor_ctlr_work,
8864 h->heartbeat_sample_interval);
8865 INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
8866 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8867 h->heartbeat_sample_interval);
8868 INIT_DELAYED_WORK(&h->event_monitor_work, hpsa_event_monitor_worker);
8869 schedule_delayed_work(&h->event_monitor_work,
8870 HPSA_EVENT_MONITOR_INTERVAL);
8871 return 0;
8872
8873clean8:
8874 kfree(h->lastlogicals);
8875clean7:
8876 hpsa_free_performant_mode(h);
8877 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8878clean6:
8879 hpsa_free_sg_chain_blocks(h);
8880clean5:
8881 hpsa_free_cmd_pool(h);
8882clean4:
8883 hpsa_free_irqs(h);
8884clean3:
8885 scsi_host_put(h->scsi_host);
8886 h->scsi_host = NULL;
8887clean2_5:
8888 hpsa_free_pci_init(h);
8889clean2:
8890 if (h->lockup_detected) {
8891 free_percpu(h->lockup_detected);
8892 h->lockup_detected = NULL;
8893 }
8894clean1:
8895 if (h->resubmit_wq) {
8896 destroy_workqueue(h->resubmit_wq);
8897 h->resubmit_wq = NULL;
8898 }
8899 if (h->rescan_ctlr_wq) {
8900 destroy_workqueue(h->rescan_ctlr_wq);
8901 h->rescan_ctlr_wq = NULL;
8902 }
8903 if (h->monitor_ctlr_wq) {
8904 destroy_workqueue(h->monitor_ctlr_wq);
8905 h->monitor_ctlr_wq = NULL;
8906 }
8907 kfree(h);
8908 return rc;
8909}
8910
8911static void hpsa_flush_cache(struct ctlr_info *h)
8912{
8913 char *flush_buf;
8914 struct CommandList *c;
8915 int rc;
8916
8917 if (unlikely(lockup_detected(h)))
8918 return;
8919 flush_buf = kzalloc(4, GFP_KERNEL);
8920 if (!flush_buf)
8921 return;
8922
8923 c = cmd_alloc(h);
8924
8925 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
8926 RAID_CTLR_LUNID, TYPE_CMD)) {
8927 goto out;
8928 }
8929 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE,
8930 DEFAULT_TIMEOUT);
8931 if (rc)
8932 goto out;
8933 if (c->err_info->CommandStatus != 0)
8934out:
8935 dev_warn(&h->pdev->dev,
8936 "error flushing cache on controller\n");
8937 cmd_free(h, c);
8938 kfree(flush_buf);
8939}
8940
8941
8942
8943
8944static void hpsa_disable_rld_caching(struct ctlr_info *h)
8945{
8946 u32 *options;
8947 struct CommandList *c;
8948 int rc;
8949
8950
8951 if (unlikely(h->lockup_detected))
8952 return;
8953
8954 options = kzalloc(sizeof(*options), GFP_KERNEL);
8955 if (!options)
8956 return;
8957
8958 c = cmd_alloc(h);
8959
8960
8961 if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
8962 RAID_CTLR_LUNID, TYPE_CMD))
8963 goto errout;
8964
8965 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
8966 NO_TIMEOUT);
8967 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8968 goto errout;
8969
8970
8971 *options |= HPSA_DIAG_OPTS_DISABLE_RLD_CACHING;
8972
8973 if (fill_cmd(c, BMIC_SET_DIAG_OPTIONS, h, options, 4, 0,
8974 RAID_CTLR_LUNID, TYPE_CMD))
8975 goto errout;
8976
8977 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE,
8978 NO_TIMEOUT);
8979 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8980 goto errout;
8981
8982
8983 if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
8984 RAID_CTLR_LUNID, TYPE_CMD))
8985 goto errout;
8986
8987 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
8988 NO_TIMEOUT);
8989 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8990 goto errout;
8991
8992 if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING)
8993 goto out;
8994
8995errout:
8996 dev_err(&h->pdev->dev,
8997 "Error: failed to disable report lun data caching.\n");
8998out:
8999 cmd_free(h, c);
9000 kfree(options);
9001}
9002
9003static void __hpsa_shutdown(struct pci_dev *pdev)
9004{
9005 struct ctlr_info *h;
9006
9007 h = pci_get_drvdata(pdev);
9008
9009
9010
9011
9012 hpsa_flush_cache(h);
9013 h->access.set_intr_mask(h, HPSA_INTR_OFF);
9014 hpsa_free_irqs(h);
9015 hpsa_disable_interrupt_mode(h);
9016}
9017
9018static void hpsa_shutdown(struct pci_dev *pdev)
9019{
9020 __hpsa_shutdown(pdev);
9021 pci_disable_device(pdev);
9022}
9023
9024static void hpsa_free_device_info(struct ctlr_info *h)
9025{
9026 int i;
9027
9028 for (i = 0; i < h->ndevices; i++) {
9029 kfree(h->dev[i]);
9030 h->dev[i] = NULL;
9031 }
9032}
9033
9034static void hpsa_remove_one(struct pci_dev *pdev)
9035{
9036 struct ctlr_info *h;
9037 unsigned long flags;
9038
9039 if (pci_get_drvdata(pdev) == NULL) {
9040 dev_err(&pdev->dev, "unable to remove device\n");
9041 return;
9042 }
9043 h = pci_get_drvdata(pdev);
9044
9045
9046 spin_lock_irqsave(&h->lock, flags);
9047 h->remove_in_progress = 1;
9048 spin_unlock_irqrestore(&h->lock, flags);
9049 cancel_delayed_work_sync(&h->monitor_ctlr_work);
9050 cancel_delayed_work_sync(&h->rescan_ctlr_work);
9051 cancel_delayed_work_sync(&h->event_monitor_work);
9052 destroy_workqueue(h->rescan_ctlr_wq);
9053 destroy_workqueue(h->resubmit_wq);
9054 destroy_workqueue(h->monitor_ctlr_wq);
9055
9056 hpsa_delete_sas_host(h);
9057
9058
9059
9060
9061
9062
9063
9064 if (h->scsi_host)
9065 scsi_remove_host(h->scsi_host);
9066
9067
9068 __hpsa_shutdown(pdev);
9069
9070 hpsa_free_device_info(h);
9071
9072 kfree(h->hba_inquiry_data);
9073 h->hba_inquiry_data = NULL;
9074 hpsa_free_ioaccel2_sg_chain_blocks(h);
9075 hpsa_free_performant_mode(h);
9076 hpsa_free_sg_chain_blocks(h);
9077 hpsa_free_cmd_pool(h);
9078 kfree(h->lastlogicals);
9079
9080
9081
9082 scsi_host_put(h->scsi_host);
9083 h->scsi_host = NULL;
9084
9085
9086 hpsa_free_pci_init(h);
9087
9088 free_percpu(h->lockup_detected);
9089 h->lockup_detected = NULL;
9090
9091
9092 hpda_free_ctlr_info(h);
9093}
9094
9095static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
9096 __attribute__((unused)) pm_message_t state)
9097{
9098 return -ENOSYS;
9099}
9100
9101static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
9102{
9103 return -ENOSYS;
9104}
9105
9106static struct pci_driver hpsa_pci_driver = {
9107 .name = HPSA,
9108 .probe = hpsa_init_one,
9109 .remove = hpsa_remove_one,
9110 .id_table = hpsa_pci_device_id,
9111 .shutdown = hpsa_shutdown,
9112 .suspend = hpsa_suspend,
9113 .resume = hpsa_resume,
9114};
9115
9116
9117
9118
9119
9120
9121
9122
9123
9124
9125
9126
9127
9128static void calc_bucket_map(int bucket[], int num_buckets,
9129 int nsgs, int min_blocks, u32 *bucket_map)
9130{
9131 int i, j, b, size;
9132
9133
9134 for (i = 0; i <= nsgs; i++) {
9135
9136 size = i + min_blocks;
9137 b = num_buckets;
9138
9139 for (j = 0; j < num_buckets; j++) {
9140 if (bucket[j] >= size) {
9141 b = j;
9142 break;
9143 }
9144 }
9145
9146 bucket_map[i] = b;
9147 }
9148}
9149
9150
9151
9152
9153
9154static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
9155{
9156 int i;
9157 unsigned long register_value;
9158 unsigned long transMethod = CFGTBL_Trans_Performant |
9159 (trans_support & CFGTBL_Trans_use_short_tags) |
9160 CFGTBL_Trans_enable_directed_msix |
9161 (trans_support & (CFGTBL_Trans_io_accel1 |
9162 CFGTBL_Trans_io_accel2));
9163 struct access_method access = SA5_performant_access;
9164
9165
9166
9167
9168
9169
9170
9171
9172
9173
9174
9175
9176
9177
9178
9179
9180
9181
9182 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
9183#define MIN_IOACCEL2_BFT_ENTRY 5
9184#define HPSA_IOACCEL2_HEADER_SZ 4
9185 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
9186 13, 14, 15, 16, 17, 18, 19,
9187 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
9188 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
9189 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
9190 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
9191 16 * MIN_IOACCEL2_BFT_ENTRY);
9192 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
9193 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
9194
9195
9196
9197
9198
9199
9200
9201
9202
9203
9204 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
9205 access = SA5_performant_access_no_read;
9206
9207
9208 for (i = 0; i < h->nreply_queues; i++)
9209 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
9210
9211 bft[7] = SG_ENTRIES_IN_CMD + 4;
9212 calc_bucket_map(bft, ARRAY_SIZE(bft),
9213 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
9214 for (i = 0; i < 8; i++)
9215 writel(bft[i], &h->transtable->BlockFetch[i]);
9216
9217
9218 writel(h->max_commands, &h->transtable->RepQSize);
9219 writel(h->nreply_queues, &h->transtable->RepQCount);
9220 writel(0, &h->transtable->RepQCtrAddrLow32);
9221 writel(0, &h->transtable->RepQCtrAddrHigh32);
9222
9223 for (i = 0; i < h->nreply_queues; i++) {
9224 writel(0, &h->transtable->RepQAddr[i].upper);
9225 writel(h->reply_queue[i].busaddr,
9226 &h->transtable->RepQAddr[i].lower);
9227 }
9228
9229 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
9230 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
9231
9232
9233
9234 if (trans_support & CFGTBL_Trans_io_accel1) {
9235 access = SA5_ioaccel_mode1_access;
9236 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
9237 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
9238 } else
9239 if (trans_support & CFGTBL_Trans_io_accel2)
9240 access = SA5_ioaccel_mode2_access;
9241 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
9242 if (hpsa_wait_for_mode_change_ack(h)) {
9243 dev_err(&h->pdev->dev,
9244 "performant mode problem - doorbell timeout\n");
9245 return -ENODEV;
9246 }
9247 register_value = readl(&(h->cfgtable->TransportActive));
9248 if (!(register_value & CFGTBL_Trans_Performant)) {
9249 dev_err(&h->pdev->dev,
9250 "performant mode problem - transport not active\n");
9251 return -ENODEV;
9252 }
9253
9254 h->access = access;
9255 h->transMethod = transMethod;
9256
9257 if (!((trans_support & CFGTBL_Trans_io_accel1) ||
9258 (trans_support & CFGTBL_Trans_io_accel2)))
9259 return 0;
9260
9261 if (trans_support & CFGTBL_Trans_io_accel1) {
9262
9263 for (i = 0; i < h->nreply_queues; i++) {
9264 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
9265 h->reply_queue[i].current_entry =
9266 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
9267 }
9268 bft[7] = h->ioaccel_maxsg + 8;
9269 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
9270 h->ioaccel1_blockFetchTable);
9271
9272
9273 for (i = 0; i < h->nreply_queues; i++)
9274 memset(h->reply_queue[i].head,
9275 (u8) IOACCEL_MODE1_REPLY_UNUSED,
9276 h->reply_queue_size);
9277
9278
9279
9280
9281 for (i = 0; i < h->nr_cmds; i++) {
9282 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
9283
9284 cp->function = IOACCEL1_FUNCTION_SCSIIO;
9285 cp->err_info = (u32) (h->errinfo_pool_dhandle +
9286 (i * sizeof(struct ErrorInfo)));
9287 cp->err_info_len = sizeof(struct ErrorInfo);
9288 cp->sgl_offset = IOACCEL1_SGLOFFSET;
9289 cp->host_context_flags =
9290 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
9291 cp->timeout_sec = 0;
9292 cp->ReplyQueue = 0;
9293 cp->tag =
9294 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
9295 cp->host_addr =
9296 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
9297 (i * sizeof(struct io_accel1_cmd)));
9298 }
9299 } else if (trans_support & CFGTBL_Trans_io_accel2) {
9300 u64 cfg_offset, cfg_base_addr_index;
9301 u32 bft2_offset, cfg_base_addr;
9302 int rc;
9303
9304 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
9305 &cfg_base_addr_index, &cfg_offset);
9306 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
9307 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
9308 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
9309 4, h->ioaccel2_blockFetchTable);
9310 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
9311 BUILD_BUG_ON(offsetof(struct CfgTable,
9312 io_accel_request_size_offset) != 0xb8);
9313 h->ioaccel2_bft2_regs =
9314 remap_pci_mem(pci_resource_start(h->pdev,
9315 cfg_base_addr_index) +
9316 cfg_offset + bft2_offset,
9317 ARRAY_SIZE(bft2) *
9318 sizeof(*h->ioaccel2_bft2_regs));
9319 for (i = 0; i < ARRAY_SIZE(bft2); i++)
9320 writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
9321 }
9322 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
9323 if (hpsa_wait_for_mode_change_ack(h)) {
9324 dev_err(&h->pdev->dev,
9325 "performant mode problem - enabling ioaccel mode\n");
9326 return -ENODEV;
9327 }
9328 return 0;
9329}
9330
9331
9332static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
9333{
9334 if (h->ioaccel_cmd_pool) {
9335 dma_free_coherent(&h->pdev->dev,
9336 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
9337 h->ioaccel_cmd_pool,
9338 h->ioaccel_cmd_pool_dhandle);
9339 h->ioaccel_cmd_pool = NULL;
9340 h->ioaccel_cmd_pool_dhandle = 0;
9341 }
9342 kfree(h->ioaccel1_blockFetchTable);
9343 h->ioaccel1_blockFetchTable = NULL;
9344}
9345
9346
9347static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
9348{
9349 h->ioaccel_maxsg =
9350 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
9351 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
9352 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
9353
9354
9355
9356
9357
9358 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
9359 IOACCEL1_COMMANDLIST_ALIGNMENT);
9360 h->ioaccel_cmd_pool =
9361 dma_alloc_coherent(&h->pdev->dev,
9362 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
9363 &h->ioaccel_cmd_pool_dhandle, GFP_KERNEL);
9364
9365 h->ioaccel1_blockFetchTable =
9366 kmalloc(((h->ioaccel_maxsg + 1) *
9367 sizeof(u32)), GFP_KERNEL);
9368
9369 if ((h->ioaccel_cmd_pool == NULL) ||
9370 (h->ioaccel1_blockFetchTable == NULL))
9371 goto clean_up;
9372
9373 memset(h->ioaccel_cmd_pool, 0,
9374 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
9375 return 0;
9376
9377clean_up:
9378 hpsa_free_ioaccel1_cmd_and_bft(h);
9379 return -ENOMEM;
9380}
9381
9382
9383static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
9384{
9385 hpsa_free_ioaccel2_sg_chain_blocks(h);
9386
9387 if (h->ioaccel2_cmd_pool) {
9388 dma_free_coherent(&h->pdev->dev,
9389 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9390 h->ioaccel2_cmd_pool,
9391 h->ioaccel2_cmd_pool_dhandle);
9392 h->ioaccel2_cmd_pool = NULL;
9393 h->ioaccel2_cmd_pool_dhandle = 0;
9394 }
9395 kfree(h->ioaccel2_blockFetchTable);
9396 h->ioaccel2_blockFetchTable = NULL;
9397}
9398
9399
9400static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
9401{
9402 int rc;
9403
9404
9405
9406 h->ioaccel_maxsg =
9407 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
9408 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
9409 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
9410
9411 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
9412 IOACCEL2_COMMANDLIST_ALIGNMENT);
9413 h->ioaccel2_cmd_pool =
9414 dma_alloc_coherent(&h->pdev->dev,
9415 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9416 &h->ioaccel2_cmd_pool_dhandle, GFP_KERNEL);
9417
9418 h->ioaccel2_blockFetchTable =
9419 kmalloc(((h->ioaccel_maxsg + 1) *
9420 sizeof(u32)), GFP_KERNEL);
9421
9422 if ((h->ioaccel2_cmd_pool == NULL) ||
9423 (h->ioaccel2_blockFetchTable == NULL)) {
9424 rc = -ENOMEM;
9425 goto clean_up;
9426 }
9427
9428 rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h);
9429 if (rc)
9430 goto clean_up;
9431
9432 memset(h->ioaccel2_cmd_pool, 0,
9433 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
9434 return 0;
9435
9436clean_up:
9437 hpsa_free_ioaccel2_cmd_and_bft(h);
9438 return rc;
9439}
9440
9441
9442static void hpsa_free_performant_mode(struct ctlr_info *h)
9443{
9444 kfree(h->blockFetchTable);
9445 h->blockFetchTable = NULL;
9446 hpsa_free_reply_queues(h);
9447 hpsa_free_ioaccel1_cmd_and_bft(h);
9448 hpsa_free_ioaccel2_cmd_and_bft(h);
9449}
9450
9451
9452
9453
9454static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
9455{
9456 u32 trans_support;
9457 unsigned long transMethod = CFGTBL_Trans_Performant |
9458 CFGTBL_Trans_use_short_tags;
9459 int i, rc;
9460
9461 if (hpsa_simple_mode)
9462 return 0;
9463
9464 trans_support = readl(&(h->cfgtable->TransportSupport));
9465 if (!(trans_support & PERFORMANT_MODE))
9466 return 0;
9467
9468
9469 if (trans_support & CFGTBL_Trans_io_accel1) {
9470 transMethod |= CFGTBL_Trans_io_accel1 |
9471 CFGTBL_Trans_enable_directed_msix;
9472 rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
9473 if (rc)
9474 return rc;
9475 } else if (trans_support & CFGTBL_Trans_io_accel2) {
9476 transMethod |= CFGTBL_Trans_io_accel2 |
9477 CFGTBL_Trans_enable_directed_msix;
9478 rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
9479 if (rc)
9480 return rc;
9481 }
9482
9483 h->nreply_queues = h->msix_vectors > 0 ? h->msix_vectors : 1;
9484 hpsa_get_max_perf_mode_cmds(h);
9485
9486 h->reply_queue_size = h->max_commands * sizeof(u64);
9487
9488 for (i = 0; i < h->nreply_queues; i++) {
9489 h->reply_queue[i].head = dma_alloc_coherent(&h->pdev->dev,
9490 h->reply_queue_size,
9491 &h->reply_queue[i].busaddr,
9492 GFP_KERNEL);
9493 if (!h->reply_queue[i].head) {
9494 rc = -ENOMEM;
9495 goto clean1;
9496 }
9497 h->reply_queue[i].size = h->max_commands;
9498 h->reply_queue[i].wraparound = 1;
9499 h->reply_queue[i].current_entry = 0;
9500 }
9501
9502
9503 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
9504 sizeof(u32)), GFP_KERNEL);
9505 if (!h->blockFetchTable) {
9506 rc = -ENOMEM;
9507 goto clean1;
9508 }
9509
9510 rc = hpsa_enter_performant_mode(h, trans_support);
9511 if (rc)
9512 goto clean2;
9513 return 0;
9514
9515clean2:
9516 kfree(h->blockFetchTable);
9517 h->blockFetchTable = NULL;
9518clean1:
9519 hpsa_free_reply_queues(h);
9520 hpsa_free_ioaccel1_cmd_and_bft(h);
9521 hpsa_free_ioaccel2_cmd_and_bft(h);
9522 return rc;
9523}
9524
9525static int is_accelerated_cmd(struct CommandList *c)
9526{
9527 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
9528}
9529
9530static void hpsa_drain_accel_commands(struct ctlr_info *h)
9531{
9532 struct CommandList *c = NULL;
9533 int i, accel_cmds_out;
9534 int refcount;
9535
9536 do {
9537 accel_cmds_out = 0;
9538 for (i = 0; i < h->nr_cmds; i++) {
9539 c = h->cmd_pool + i;
9540 refcount = atomic_inc_return(&c->refcount);
9541 if (refcount > 1)
9542 accel_cmds_out += is_accelerated_cmd(c);
9543 cmd_free(h, c);
9544 }
9545 if (accel_cmds_out <= 0)
9546 break;
9547 msleep(100);
9548 } while (1);
9549}
9550
9551static struct hpsa_sas_phy *hpsa_alloc_sas_phy(
9552 struct hpsa_sas_port *hpsa_sas_port)
9553{
9554 struct hpsa_sas_phy *hpsa_sas_phy;
9555 struct sas_phy *phy;
9556
9557 hpsa_sas_phy = kzalloc(sizeof(*hpsa_sas_phy), GFP_KERNEL);
9558 if (!hpsa_sas_phy)
9559 return NULL;
9560
9561 phy = sas_phy_alloc(hpsa_sas_port->parent_node->parent_dev,
9562 hpsa_sas_port->next_phy_index);
9563 if (!phy) {
9564 kfree(hpsa_sas_phy);
9565 return NULL;
9566 }
9567
9568 hpsa_sas_port->next_phy_index++;
9569 hpsa_sas_phy->phy = phy;
9570 hpsa_sas_phy->parent_port = hpsa_sas_port;
9571
9572 return hpsa_sas_phy;
9573}
9574
9575static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9576{
9577 struct sas_phy *phy = hpsa_sas_phy->phy;
9578
9579 sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy);
9580 if (hpsa_sas_phy->added_to_port)
9581 list_del(&hpsa_sas_phy->phy_list_entry);
9582 sas_phy_delete(phy);
9583 kfree(hpsa_sas_phy);
9584}
9585
9586static int hpsa_sas_port_add_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9587{
9588 int rc;
9589 struct hpsa_sas_port *hpsa_sas_port;
9590 struct sas_phy *phy;
9591 struct sas_identify *identify;
9592
9593 hpsa_sas_port = hpsa_sas_phy->parent_port;
9594 phy = hpsa_sas_phy->phy;
9595
9596 identify = &phy->identify;
9597 memset(identify, 0, sizeof(*identify));
9598 identify->sas_address = hpsa_sas_port->sas_address;
9599 identify->device_type = SAS_END_DEVICE;
9600 identify->initiator_port_protocols = SAS_PROTOCOL_STP;
9601 identify->target_port_protocols = SAS_PROTOCOL_STP;
9602 phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
9603 phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
9604 phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN;
9605 phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN;
9606 phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
9607
9608 rc = sas_phy_add(hpsa_sas_phy->phy);
9609 if (rc)
9610 return rc;
9611
9612 sas_port_add_phy(hpsa_sas_port->port, hpsa_sas_phy->phy);
9613 list_add_tail(&hpsa_sas_phy->phy_list_entry,
9614 &hpsa_sas_port->phy_list_head);
9615 hpsa_sas_phy->added_to_port = true;
9616
9617 return 0;
9618}
9619
9620static int
9621 hpsa_sas_port_add_rphy(struct hpsa_sas_port *hpsa_sas_port,
9622 struct sas_rphy *rphy)
9623{
9624 struct sas_identify *identify;
9625
9626 identify = &rphy->identify;
9627 identify->sas_address = hpsa_sas_port->sas_address;
9628 identify->initiator_port_protocols = SAS_PROTOCOL_STP;
9629 identify->target_port_protocols = SAS_PROTOCOL_STP;
9630
9631 return sas_rphy_add(rphy);
9632}
9633
9634static struct hpsa_sas_port
9635 *hpsa_alloc_sas_port(struct hpsa_sas_node *hpsa_sas_node,
9636 u64 sas_address)
9637{
9638 int rc;
9639 struct hpsa_sas_port *hpsa_sas_port;
9640 struct sas_port *port;
9641
9642 hpsa_sas_port = kzalloc(sizeof(*hpsa_sas_port), GFP_KERNEL);
9643 if (!hpsa_sas_port)
9644 return NULL;
9645
9646 INIT_LIST_HEAD(&hpsa_sas_port->phy_list_head);
9647 hpsa_sas_port->parent_node = hpsa_sas_node;
9648
9649 port = sas_port_alloc_num(hpsa_sas_node->parent_dev);
9650 if (!port)
9651 goto free_hpsa_port;
9652
9653 rc = sas_port_add(port);
9654 if (rc)
9655 goto free_sas_port;
9656
9657 hpsa_sas_port->port = port;
9658 hpsa_sas_port->sas_address = sas_address;
9659 list_add_tail(&hpsa_sas_port->port_list_entry,
9660 &hpsa_sas_node->port_list_head);
9661
9662 return hpsa_sas_port;
9663
9664free_sas_port:
9665 sas_port_free(port);
9666free_hpsa_port:
9667 kfree(hpsa_sas_port);
9668
9669 return NULL;
9670}
9671
9672static void hpsa_free_sas_port(struct hpsa_sas_port *hpsa_sas_port)
9673{
9674 struct hpsa_sas_phy *hpsa_sas_phy;
9675 struct hpsa_sas_phy *next;
9676
9677 list_for_each_entry_safe(hpsa_sas_phy, next,
9678 &hpsa_sas_port->phy_list_head, phy_list_entry)
9679 hpsa_free_sas_phy(hpsa_sas_phy);
9680
9681 sas_port_delete(hpsa_sas_port->port);
9682 list_del(&hpsa_sas_port->port_list_entry);
9683 kfree(hpsa_sas_port);
9684}
9685
9686static struct hpsa_sas_node *hpsa_alloc_sas_node(struct device *parent_dev)
9687{
9688 struct hpsa_sas_node *hpsa_sas_node;
9689
9690 hpsa_sas_node = kzalloc(sizeof(*hpsa_sas_node), GFP_KERNEL);
9691 if (hpsa_sas_node) {
9692 hpsa_sas_node->parent_dev = parent_dev;
9693 INIT_LIST_HEAD(&hpsa_sas_node->port_list_head);
9694 }
9695
9696 return hpsa_sas_node;
9697}
9698
9699static void hpsa_free_sas_node(struct hpsa_sas_node *hpsa_sas_node)
9700{
9701 struct hpsa_sas_port *hpsa_sas_port;
9702 struct hpsa_sas_port *next;
9703
9704 if (!hpsa_sas_node)
9705 return;
9706
9707 list_for_each_entry_safe(hpsa_sas_port, next,
9708 &hpsa_sas_node->port_list_head, port_list_entry)
9709 hpsa_free_sas_port(hpsa_sas_port);
9710
9711 kfree(hpsa_sas_node);
9712}
9713
9714static struct hpsa_scsi_dev_t
9715 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
9716 struct sas_rphy *rphy)
9717{
9718 int i;
9719 struct hpsa_scsi_dev_t *device;
9720
9721 for (i = 0; i < h->ndevices; i++) {
9722 device = h->dev[i];
9723 if (!device->sas_port)
9724 continue;
9725 if (device->sas_port->rphy == rphy)
9726 return device;
9727 }
9728
9729 return NULL;
9730}
9731
9732static int hpsa_add_sas_host(struct ctlr_info *h)
9733{
9734 int rc;
9735 struct device *parent_dev;
9736 struct hpsa_sas_node *hpsa_sas_node;
9737 struct hpsa_sas_port *hpsa_sas_port;
9738 struct hpsa_sas_phy *hpsa_sas_phy;
9739
9740 parent_dev = &h->scsi_host->shost_dev;
9741
9742 hpsa_sas_node = hpsa_alloc_sas_node(parent_dev);
9743 if (!hpsa_sas_node)
9744 return -ENOMEM;
9745
9746 hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, h->sas_address);
9747 if (!hpsa_sas_port) {
9748 rc = -ENODEV;
9749 goto free_sas_node;
9750 }
9751
9752 hpsa_sas_phy = hpsa_alloc_sas_phy(hpsa_sas_port);
9753 if (!hpsa_sas_phy) {
9754 rc = -ENODEV;
9755 goto free_sas_port;
9756 }
9757
9758 rc = hpsa_sas_port_add_phy(hpsa_sas_phy);
9759 if (rc)
9760 goto free_sas_phy;
9761
9762 h->sas_host = hpsa_sas_node;
9763
9764 return 0;
9765
9766free_sas_phy:
9767 hpsa_free_sas_phy(hpsa_sas_phy);
9768free_sas_port:
9769 hpsa_free_sas_port(hpsa_sas_port);
9770free_sas_node:
9771 hpsa_free_sas_node(hpsa_sas_node);
9772
9773 return rc;
9774}
9775
9776static void hpsa_delete_sas_host(struct ctlr_info *h)
9777{
9778 hpsa_free_sas_node(h->sas_host);
9779}
9780
9781static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
9782 struct hpsa_scsi_dev_t *device)
9783{
9784 int rc;
9785 struct hpsa_sas_port *hpsa_sas_port;
9786 struct sas_rphy *rphy;
9787
9788 hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, device->sas_address);
9789 if (!hpsa_sas_port)
9790 return -ENOMEM;
9791
9792 rphy = sas_end_device_alloc(hpsa_sas_port->port);
9793 if (!rphy) {
9794 rc = -ENODEV;
9795 goto free_sas_port;
9796 }
9797
9798 hpsa_sas_port->rphy = rphy;
9799 device->sas_port = hpsa_sas_port;
9800
9801 rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy);
9802 if (rc)
9803 goto free_sas_port;
9804
9805 return 0;
9806
9807free_sas_port:
9808 hpsa_free_sas_port(hpsa_sas_port);
9809 device->sas_port = NULL;
9810
9811 return rc;
9812}
9813
9814static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device)
9815{
9816 if (device->sas_port) {
9817 hpsa_free_sas_port(device->sas_port);
9818 device->sas_port = NULL;
9819 }
9820}
9821
9822static int
9823hpsa_sas_get_linkerrors(struct sas_phy *phy)
9824{
9825 return 0;
9826}
9827
9828static int
9829hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
9830{
9831 struct Scsi_Host *shost = phy_to_shost(rphy);
9832 struct ctlr_info *h;
9833 struct hpsa_scsi_dev_t *sd;
9834
9835 if (!shost)
9836 return -ENXIO;
9837
9838 h = shost_to_hba(shost);
9839
9840 if (!h)
9841 return -ENXIO;
9842
9843 sd = hpsa_find_device_by_sas_rphy(h, rphy);
9844 if (!sd)
9845 return -ENXIO;
9846
9847 *identifier = sd->eli;
9848
9849 return 0;
9850}
9851
9852static int
9853hpsa_sas_get_bay_identifier(struct sas_rphy *rphy)
9854{
9855 return -ENXIO;
9856}
9857
9858static int
9859hpsa_sas_phy_reset(struct sas_phy *phy, int hard_reset)
9860{
9861 return 0;
9862}
9863
9864static int
9865hpsa_sas_phy_enable(struct sas_phy *phy, int enable)
9866{
9867 return 0;
9868}
9869
9870static int
9871hpsa_sas_phy_setup(struct sas_phy *phy)
9872{
9873 return 0;
9874}
9875
9876static void
9877hpsa_sas_phy_release(struct sas_phy *phy)
9878{
9879}
9880
9881static int
9882hpsa_sas_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
9883{
9884 return -EINVAL;
9885}
9886
9887static struct sas_function_template hpsa_sas_transport_functions = {
9888 .get_linkerrors = hpsa_sas_get_linkerrors,
9889 .get_enclosure_identifier = hpsa_sas_get_enclosure_identifier,
9890 .get_bay_identifier = hpsa_sas_get_bay_identifier,
9891 .phy_reset = hpsa_sas_phy_reset,
9892 .phy_enable = hpsa_sas_phy_enable,
9893 .phy_setup = hpsa_sas_phy_setup,
9894 .phy_release = hpsa_sas_phy_release,
9895 .set_phy_speed = hpsa_sas_phy_speed,
9896};
9897
9898
9899
9900
9901
9902static int __init hpsa_init(void)
9903{
9904 int rc;
9905
9906 hpsa_sas_transport_template =
9907 sas_attach_transport(&hpsa_sas_transport_functions);
9908 if (!hpsa_sas_transport_template)
9909 return -ENODEV;
9910
9911 rc = pci_register_driver(&hpsa_pci_driver);
9912
9913 if (rc)
9914 sas_release_transport(hpsa_sas_transport_template);
9915
9916 return rc;
9917}
9918
9919static void __exit hpsa_cleanup(void)
9920{
9921 pci_unregister_driver(&hpsa_pci_driver);
9922 sas_release_transport(hpsa_sas_transport_template);
9923}
9924
9925static void __attribute__((unused)) verify_offsets(void)
9926{
9927#define VERIFY_OFFSET(member, offset) \
9928 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
9929
9930 VERIFY_OFFSET(structure_size, 0);
9931 VERIFY_OFFSET(volume_blk_size, 4);
9932 VERIFY_OFFSET(volume_blk_cnt, 8);
9933 VERIFY_OFFSET(phys_blk_shift, 16);
9934 VERIFY_OFFSET(parity_rotation_shift, 17);
9935 VERIFY_OFFSET(strip_size, 18);
9936 VERIFY_OFFSET(disk_starting_blk, 20);
9937 VERIFY_OFFSET(disk_blk_cnt, 28);
9938 VERIFY_OFFSET(data_disks_per_row, 36);
9939 VERIFY_OFFSET(metadata_disks_per_row, 38);
9940 VERIFY_OFFSET(row_cnt, 40);
9941 VERIFY_OFFSET(layout_map_count, 42);
9942 VERIFY_OFFSET(flags, 44);
9943 VERIFY_OFFSET(dekindex, 46);
9944
9945 VERIFY_OFFSET(data, 64);
9946
9947#undef VERIFY_OFFSET
9948
9949#define VERIFY_OFFSET(member, offset) \
9950 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
9951
9952 VERIFY_OFFSET(IU_type, 0);
9953 VERIFY_OFFSET(direction, 1);
9954 VERIFY_OFFSET(reply_queue, 2);
9955
9956 VERIFY_OFFSET(scsi_nexus, 4);
9957 VERIFY_OFFSET(Tag, 8);
9958 VERIFY_OFFSET(cdb, 16);
9959 VERIFY_OFFSET(cciss_lun, 32);
9960 VERIFY_OFFSET(data_len, 40);
9961 VERIFY_OFFSET(cmd_priority_task_attr, 44);
9962 VERIFY_OFFSET(sg_count, 45);
9963
9964 VERIFY_OFFSET(err_ptr, 48);
9965 VERIFY_OFFSET(err_len, 56);
9966
9967 VERIFY_OFFSET(sg, 64);
9968
9969#undef VERIFY_OFFSET
9970
9971#define VERIFY_OFFSET(member, offset) \
9972 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
9973
9974 VERIFY_OFFSET(dev_handle, 0x00);
9975 VERIFY_OFFSET(reserved1, 0x02);
9976 VERIFY_OFFSET(function, 0x03);
9977 VERIFY_OFFSET(reserved2, 0x04);
9978 VERIFY_OFFSET(err_info, 0x0C);
9979 VERIFY_OFFSET(reserved3, 0x10);
9980 VERIFY_OFFSET(err_info_len, 0x12);
9981 VERIFY_OFFSET(reserved4, 0x13);
9982 VERIFY_OFFSET(sgl_offset, 0x14);
9983 VERIFY_OFFSET(reserved5, 0x15);
9984 VERIFY_OFFSET(transfer_len, 0x1C);
9985 VERIFY_OFFSET(reserved6, 0x20);
9986 VERIFY_OFFSET(io_flags, 0x24);
9987 VERIFY_OFFSET(reserved7, 0x26);
9988 VERIFY_OFFSET(LUN, 0x34);
9989 VERIFY_OFFSET(control, 0x3C);
9990 VERIFY_OFFSET(CDB, 0x40);
9991 VERIFY_OFFSET(reserved8, 0x50);
9992 VERIFY_OFFSET(host_context_flags, 0x60);
9993 VERIFY_OFFSET(timeout_sec, 0x62);
9994 VERIFY_OFFSET(ReplyQueue, 0x64);
9995 VERIFY_OFFSET(reserved9, 0x65);
9996 VERIFY_OFFSET(tag, 0x68);
9997 VERIFY_OFFSET(host_addr, 0x70);
9998 VERIFY_OFFSET(CISS_LUN, 0x78);
9999 VERIFY_OFFSET(SG, 0x78 + 8);
10000#undef VERIFY_OFFSET
10001}
10002
10003module_init(hpsa_init);
10004module_exit(hpsa_cleanup);
10005