1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/module.h>
21#include <linux/interrupt.h>
22#include <linux/types.h>
23#include <linux/pci.h>
24#include <linux/pci-aspm.h>
25#include <linux/kernel.h>
26#include <linux/slab.h>
27#include <linux/delay.h>
28#include <linux/fs.h>
29#include <linux/timer.h>
30#include <linux/init.h>
31#include <linux/spinlock.h>
32#include <linux/compat.h>
33#include <linux/blktrace_api.h>
34#include <linux/uaccess.h>
35#include <linux/io.h>
36#include <linux/dma-mapping.h>
37#include <linux/completion.h>
38#include <linux/moduleparam.h>
39#include <scsi/scsi.h>
40#include <scsi/scsi_cmnd.h>
41#include <scsi/scsi_device.h>
42#include <scsi/scsi_host.h>
43#include <scsi/scsi_tcq.h>
44#include <scsi/scsi_eh.h>
45#include <scsi/scsi_transport_sas.h>
46#include <scsi/scsi_dbg.h>
47#include <linux/cciss_ioctl.h>
48#include <linux/string.h>
49#include <linux/bitmap.h>
50#include <linux/atomic.h>
51#include <linux/jiffies.h>
52#include <linux/percpu-defs.h>
53#include <linux/percpu.h>
54#include <asm/unaligned.h>
55#include <asm/div64.h>
56#include "hpsa_cmd.h"
57#include "hpsa.h"
58
59
60
61
62
63#define HPSA_DRIVER_VERSION "3.4.16-0"
64#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
65#define HPSA "hpsa"
66
67
68#define CLEAR_EVENT_WAIT_INTERVAL 20
69#define MODE_CHANGE_WAIT_INTERVAL 10
70#define MAX_CLEAR_EVENT_WAIT 30000
71#define MAX_MODE_CHANGE_WAIT 2000
72#define MAX_IOCTL_CONFIG_WAIT 1000
73
74
75#define MAX_CMD_RETRIES 3
76
77
78MODULE_AUTHOR("Hewlett-Packard Company");
79MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
80 HPSA_DRIVER_VERSION);
81MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
82MODULE_VERSION(HPSA_DRIVER_VERSION);
83MODULE_LICENSE("GPL");
84
85static int hpsa_allow_any;
86module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
87MODULE_PARM_DESC(hpsa_allow_any,
88 "Allow hpsa driver to access unknown HP Smart Array hardware");
89static int hpsa_simple_mode;
90module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
91MODULE_PARM_DESC(hpsa_simple_mode,
92 "Use 'simple mode' rather than 'performant mode'");
93
94
95static const struct pci_device_id hpsa_pci_device_id[] = {
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
128 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
129 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
130 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
131 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
132 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
133 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
134 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
135 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
136 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
137 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581},
138 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582},
139 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583},
140 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584},
141 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585},
142 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
143 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
144 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
145 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
146 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
147 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
148 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
149 {0,}
150};
151
152MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
153
154
155
156
157
158static struct board_type products[] = {
159 {0x3241103C, "Smart Array P212", &SA5_access},
160 {0x3243103C, "Smart Array P410", &SA5_access},
161 {0x3245103C, "Smart Array P410i", &SA5_access},
162 {0x3247103C, "Smart Array P411", &SA5_access},
163 {0x3249103C, "Smart Array P812", &SA5_access},
164 {0x324A103C, "Smart Array P712m", &SA5_access},
165 {0x324B103C, "Smart Array P711m", &SA5_access},
166 {0x3233103C, "HP StorageWorks 1210m", &SA5_access},
167 {0x3350103C, "Smart Array P222", &SA5_access},
168 {0x3351103C, "Smart Array P420", &SA5_access},
169 {0x3352103C, "Smart Array P421", &SA5_access},
170 {0x3353103C, "Smart Array P822", &SA5_access},
171 {0x3354103C, "Smart Array P420i", &SA5_access},
172 {0x3355103C, "Smart Array P220i", &SA5_access},
173 {0x3356103C, "Smart Array P721m", &SA5_access},
174 {0x1921103C, "Smart Array P830i", &SA5_access},
175 {0x1922103C, "Smart Array P430", &SA5_access},
176 {0x1923103C, "Smart Array P431", &SA5_access},
177 {0x1924103C, "Smart Array P830", &SA5_access},
178 {0x1926103C, "Smart Array P731m", &SA5_access},
179 {0x1928103C, "Smart Array P230i", &SA5_access},
180 {0x1929103C, "Smart Array P530", &SA5_access},
181 {0x21BD103C, "Smart Array P244br", &SA5_access},
182 {0x21BE103C, "Smart Array P741m", &SA5_access},
183 {0x21BF103C, "Smart HBA H240ar", &SA5_access},
184 {0x21C0103C, "Smart Array P440ar", &SA5_access},
185 {0x21C1103C, "Smart Array P840ar", &SA5_access},
186 {0x21C2103C, "Smart Array P440", &SA5_access},
187 {0x21C3103C, "Smart Array P441", &SA5_access},
188 {0x21C4103C, "Smart Array", &SA5_access},
189 {0x21C5103C, "Smart Array P841", &SA5_access},
190 {0x21C6103C, "Smart HBA H244br", &SA5_access},
191 {0x21C7103C, "Smart HBA H240", &SA5_access},
192 {0x21C8103C, "Smart HBA H241", &SA5_access},
193 {0x21C9103C, "Smart Array", &SA5_access},
194 {0x21CA103C, "Smart Array P246br", &SA5_access},
195 {0x21CB103C, "Smart Array P840", &SA5_access},
196 {0x21CC103C, "Smart Array", &SA5_access},
197 {0x21CD103C, "Smart Array", &SA5_access},
198 {0x21CE103C, "Smart HBA", &SA5_access},
199 {0x05809005, "SmartHBA-SA", &SA5_access},
200 {0x05819005, "SmartHBA-SA 8i", &SA5_access},
201 {0x05829005, "SmartHBA-SA 8i8e", &SA5_access},
202 {0x05839005, "SmartHBA-SA 8e", &SA5_access},
203 {0x05849005, "SmartHBA-SA 16i", &SA5_access},
204 {0x05859005, "SmartHBA-SA 4i4e", &SA5_access},
205 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
206 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
207 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
208 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
209 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
210 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
211};
212
213static struct scsi_transport_template *hpsa_sas_transport_template;
214static int hpsa_add_sas_host(struct ctlr_info *h);
215static void hpsa_delete_sas_host(struct ctlr_info *h);
216static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
217 struct hpsa_scsi_dev_t *device);
218static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device);
219static struct hpsa_scsi_dev_t
220 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
221 struct sas_rphy *rphy);
222
223#define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
224static const struct scsi_cmnd hpsa_cmd_busy;
225#define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
226static const struct scsi_cmnd hpsa_cmd_idle;
227static int number_of_controllers;
228
229static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
230static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
231static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
232
233#ifdef CONFIG_COMPAT
234static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
235 void __user *arg);
236#endif
237
238static void cmd_free(struct ctlr_info *h, struct CommandList *c);
239static struct CommandList *cmd_alloc(struct ctlr_info *h);
240static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
241static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
242 struct scsi_cmnd *scmd);
243static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
244 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
245 int cmd_type);
246static void hpsa_free_cmd_pool(struct ctlr_info *h);
247#define VPD_PAGE (1 << 8)
248#define HPSA_SIMPLE_ERROR_BITS 0x03
249
250static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
251static void hpsa_scan_start(struct Scsi_Host *);
252static int hpsa_scan_finished(struct Scsi_Host *sh,
253 unsigned long elapsed_time);
254static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
255
256static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
257static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
258static int hpsa_slave_alloc(struct scsi_device *sdev);
259static int hpsa_slave_configure(struct scsi_device *sdev);
260static void hpsa_slave_destroy(struct scsi_device *sdev);
261
262static void hpsa_update_scsi_devices(struct ctlr_info *h);
263static int check_for_unit_attention(struct ctlr_info *h,
264 struct CommandList *c);
265static void check_ioctl_unit_attention(struct ctlr_info *h,
266 struct CommandList *c);
267
268static void calc_bucket_map(int *bucket, int num_buckets,
269 int nsgs, int min_blocks, u32 *bucket_map);
270static void hpsa_free_performant_mode(struct ctlr_info *h);
271static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
272static inline u32 next_command(struct ctlr_info *h, u8 q);
273static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
274 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
275 u64 *cfg_offset);
276static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
277 unsigned long *memory_bar);
278static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
279static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
280 int wait_for_ready);
281static inline void finish_cmd(struct CommandList *c);
282static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
283#define BOARD_NOT_READY 0
284#define BOARD_READY 1
285static void hpsa_drain_accel_commands(struct ctlr_info *h);
286static void hpsa_flush_cache(struct ctlr_info *h);
287static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
288 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
289 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
290static void hpsa_command_resubmit_worker(struct work_struct *work);
291static u32 lockup_detected(struct ctlr_info *h);
292static int detect_controller_lockup(struct ctlr_info *h);
293static void hpsa_disable_rld_caching(struct ctlr_info *h);
294static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
295 struct ReportExtendedLUNdata *buf, int bufsize);
296static int hpsa_luns_changed(struct ctlr_info *h);
297static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
298 struct hpsa_scsi_dev_t *dev,
299 unsigned char *scsi3addr);
300
301static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
302{
303 unsigned long *priv = shost_priv(sdev->host);
304 return (struct ctlr_info *) *priv;
305}
306
307static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
308{
309 unsigned long *priv = shost_priv(sh);
310 return (struct ctlr_info *) *priv;
311}
312
313static inline bool hpsa_is_cmd_idle(struct CommandList *c)
314{
315 return c->scsi_cmd == SCSI_CMD_IDLE;
316}
317
318static inline bool hpsa_is_pending_event(struct CommandList *c)
319{
320 return c->abort_pending || c->reset_pending;
321}
322
323
324static void decode_sense_data(const u8 *sense_data, int sense_data_len,
325 u8 *sense_key, u8 *asc, u8 *ascq)
326{
327 struct scsi_sense_hdr sshdr;
328 bool rc;
329
330 *sense_key = -1;
331 *asc = -1;
332 *ascq = -1;
333
334 if (sense_data_len < 1)
335 return;
336
337 rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
338 if (rc) {
339 *sense_key = sshdr.sense_key;
340 *asc = sshdr.asc;
341 *ascq = sshdr.ascq;
342 }
343}
344
345static int check_for_unit_attention(struct ctlr_info *h,
346 struct CommandList *c)
347{
348 u8 sense_key, asc, ascq;
349 int sense_len;
350
351 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
352 sense_len = sizeof(c->err_info->SenseInfo);
353 else
354 sense_len = c->err_info->SenseLen;
355
356 decode_sense_data(c->err_info->SenseInfo, sense_len,
357 &sense_key, &asc, &ascq);
358 if (sense_key != UNIT_ATTENTION || asc == 0xff)
359 return 0;
360
361 switch (asc) {
362 case STATE_CHANGED:
363 dev_warn(&h->pdev->dev,
364 "%s: a state change detected, command retried\n",
365 h->devname);
366 break;
367 case LUN_FAILED:
368 dev_warn(&h->pdev->dev,
369 "%s: LUN failure detected\n", h->devname);
370 break;
371 case REPORT_LUNS_CHANGED:
372 dev_warn(&h->pdev->dev,
373 "%s: report LUN data changed\n", h->devname);
374
375
376
377
378 break;
379 case POWER_OR_RESET:
380 dev_warn(&h->pdev->dev,
381 "%s: a power on or device reset detected\n",
382 h->devname);
383 break;
384 case UNIT_ATTENTION_CLEARED:
385 dev_warn(&h->pdev->dev,
386 "%s: unit attention cleared by another initiator\n",
387 h->devname);
388 break;
389 default:
390 dev_warn(&h->pdev->dev,
391 "%s: unknown unit attention detected\n",
392 h->devname);
393 break;
394 }
395 return 1;
396}
397
398static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
399{
400 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
401 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
402 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
403 return 0;
404 dev_warn(&h->pdev->dev, HPSA "device busy");
405 return 1;
406}
407
408static u32 lockup_detected(struct ctlr_info *h);
409static ssize_t host_show_lockup_detected(struct device *dev,
410 struct device_attribute *attr, char *buf)
411{
412 int ld;
413 struct ctlr_info *h;
414 struct Scsi_Host *shost = class_to_shost(dev);
415
416 h = shost_to_hba(shost);
417 ld = lockup_detected(h);
418
419 return sprintf(buf, "ld=%d\n", ld);
420}
421
422static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
423 struct device_attribute *attr,
424 const char *buf, size_t count)
425{
426 int status, len;
427 struct ctlr_info *h;
428 struct Scsi_Host *shost = class_to_shost(dev);
429 char tmpbuf[10];
430
431 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
432 return -EACCES;
433 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
434 strncpy(tmpbuf, buf, len);
435 tmpbuf[len] = '\0';
436 if (sscanf(tmpbuf, "%d", &status) != 1)
437 return -EINVAL;
438 h = shost_to_hba(shost);
439 h->acciopath_status = !!status;
440 dev_warn(&h->pdev->dev,
441 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
442 h->acciopath_status ? "enabled" : "disabled");
443 return count;
444}
445
446static ssize_t host_store_raid_offload_debug(struct device *dev,
447 struct device_attribute *attr,
448 const char *buf, size_t count)
449{
450 int debug_level, len;
451 struct ctlr_info *h;
452 struct Scsi_Host *shost = class_to_shost(dev);
453 char tmpbuf[10];
454
455 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
456 return -EACCES;
457 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
458 strncpy(tmpbuf, buf, len);
459 tmpbuf[len] = '\0';
460 if (sscanf(tmpbuf, "%d", &debug_level) != 1)
461 return -EINVAL;
462 if (debug_level < 0)
463 debug_level = 0;
464 h = shost_to_hba(shost);
465 h->raid_offload_debug = debug_level;
466 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
467 h->raid_offload_debug);
468 return count;
469}
470
471static ssize_t host_store_rescan(struct device *dev,
472 struct device_attribute *attr,
473 const char *buf, size_t count)
474{
475 struct ctlr_info *h;
476 struct Scsi_Host *shost = class_to_shost(dev);
477 h = shost_to_hba(shost);
478 hpsa_scan_start(h->scsi_host);
479 return count;
480}
481
482static ssize_t host_show_firmware_revision(struct device *dev,
483 struct device_attribute *attr, char *buf)
484{
485 struct ctlr_info *h;
486 struct Scsi_Host *shost = class_to_shost(dev);
487 unsigned char *fwrev;
488
489 h = shost_to_hba(shost);
490 if (!h->hba_inquiry_data)
491 return 0;
492 fwrev = &h->hba_inquiry_data[32];
493 return snprintf(buf, 20, "%c%c%c%c\n",
494 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
495}
496
497static ssize_t host_show_commands_outstanding(struct device *dev,
498 struct device_attribute *attr, char *buf)
499{
500 struct Scsi_Host *shost = class_to_shost(dev);
501 struct ctlr_info *h = shost_to_hba(shost);
502
503 return snprintf(buf, 20, "%d\n",
504 atomic_read(&h->commands_outstanding));
505}
506
507static ssize_t host_show_transport_mode(struct device *dev,
508 struct device_attribute *attr, char *buf)
509{
510 struct ctlr_info *h;
511 struct Scsi_Host *shost = class_to_shost(dev);
512
513 h = shost_to_hba(shost);
514 return snprintf(buf, 20, "%s\n",
515 h->transMethod & CFGTBL_Trans_Performant ?
516 "performant" : "simple");
517}
518
519static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
520 struct device_attribute *attr, char *buf)
521{
522 struct ctlr_info *h;
523 struct Scsi_Host *shost = class_to_shost(dev);
524
525 h = shost_to_hba(shost);
526 return snprintf(buf, 30, "HP SSD Smart Path %s\n",
527 (h->acciopath_status == 1) ? "enabled" : "disabled");
528}
529
530
531static u32 unresettable_controller[] = {
532 0x324a103C,
533 0x324b103C,
534 0x3223103C,
535 0x3234103C,
536 0x3235103C,
537 0x3211103C,
538 0x3212103C,
539 0x3213103C,
540 0x3214103C,
541 0x3215103C,
542 0x3237103C,
543 0x323D103C,
544 0x40800E11,
545 0x409C0E11,
546 0x409D0E11,
547 0x40700E11,
548 0x40820E11,
549 0x40830E11,
550 0x409A0E11,
551 0x409B0E11,
552 0x40910E11,
553};
554
555
556static u32 soft_unresettable_controller[] = {
557 0x40800E11,
558 0x40700E11,
559 0x40820E11,
560 0x40830E11,
561 0x409A0E11,
562 0x409B0E11,
563 0x40910E11,
564
565
566
567
568
569
570
571 0x409C0E11,
572 0x409D0E11,
573};
574
575static u32 needs_abort_tags_swizzled[] = {
576 0x323D103C,
577 0x324a103C,
578 0x324b103C,
579};
580
581static int board_id_in_array(u32 a[], int nelems, u32 board_id)
582{
583 int i;
584
585 for (i = 0; i < nelems; i++)
586 if (a[i] == board_id)
587 return 1;
588 return 0;
589}
590
591static int ctlr_is_hard_resettable(u32 board_id)
592{
593 return !board_id_in_array(unresettable_controller,
594 ARRAY_SIZE(unresettable_controller), board_id);
595}
596
597static int ctlr_is_soft_resettable(u32 board_id)
598{
599 return !board_id_in_array(soft_unresettable_controller,
600 ARRAY_SIZE(soft_unresettable_controller), board_id);
601}
602
603static int ctlr_is_resettable(u32 board_id)
604{
605 return ctlr_is_hard_resettable(board_id) ||
606 ctlr_is_soft_resettable(board_id);
607}
608
609static int ctlr_needs_abort_tags_swizzled(u32 board_id)
610{
611 return board_id_in_array(needs_abort_tags_swizzled,
612 ARRAY_SIZE(needs_abort_tags_swizzled), board_id);
613}
614
615static ssize_t host_show_resettable(struct device *dev,
616 struct device_attribute *attr, char *buf)
617{
618 struct ctlr_info *h;
619 struct Scsi_Host *shost = class_to_shost(dev);
620
621 h = shost_to_hba(shost);
622 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
623}
624
625static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
626{
627 return (scsi3addr[3] & 0xC0) == 0x40;
628}
629
630static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
631 "1(+0)ADM", "UNKNOWN", "PHYS DRV"
632};
633#define HPSA_RAID_0 0
634#define HPSA_RAID_4 1
635#define HPSA_RAID_1 2
636#define HPSA_RAID_5 3
637#define HPSA_RAID_51 4
638#define HPSA_RAID_6 5
639#define HPSA_RAID_ADM 6
640#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2)
641#define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1)
642
643static inline bool is_logical_device(struct hpsa_scsi_dev_t *device)
644{
645 return !device->physical_device;
646}
647
648static ssize_t raid_level_show(struct device *dev,
649 struct device_attribute *attr, char *buf)
650{
651 ssize_t l = 0;
652 unsigned char rlevel;
653 struct ctlr_info *h;
654 struct scsi_device *sdev;
655 struct hpsa_scsi_dev_t *hdev;
656 unsigned long flags;
657
658 sdev = to_scsi_device(dev);
659 h = sdev_to_hba(sdev);
660 spin_lock_irqsave(&h->lock, flags);
661 hdev = sdev->hostdata;
662 if (!hdev) {
663 spin_unlock_irqrestore(&h->lock, flags);
664 return -ENODEV;
665 }
666
667
668 if (!is_logical_device(hdev)) {
669 spin_unlock_irqrestore(&h->lock, flags);
670 l = snprintf(buf, PAGE_SIZE, "N/A\n");
671 return l;
672 }
673
674 rlevel = hdev->raid_level;
675 spin_unlock_irqrestore(&h->lock, flags);
676 if (rlevel > RAID_UNKNOWN)
677 rlevel = RAID_UNKNOWN;
678 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
679 return l;
680}
681
682static ssize_t lunid_show(struct device *dev,
683 struct device_attribute *attr, char *buf)
684{
685 struct ctlr_info *h;
686 struct scsi_device *sdev;
687 struct hpsa_scsi_dev_t *hdev;
688 unsigned long flags;
689 unsigned char lunid[8];
690
691 sdev = to_scsi_device(dev);
692 h = sdev_to_hba(sdev);
693 spin_lock_irqsave(&h->lock, flags);
694 hdev = sdev->hostdata;
695 if (!hdev) {
696 spin_unlock_irqrestore(&h->lock, flags);
697 return -ENODEV;
698 }
699 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
700 spin_unlock_irqrestore(&h->lock, flags);
701 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
702 lunid[0], lunid[1], lunid[2], lunid[3],
703 lunid[4], lunid[5], lunid[6], lunid[7]);
704}
705
706static ssize_t unique_id_show(struct device *dev,
707 struct device_attribute *attr, char *buf)
708{
709 struct ctlr_info *h;
710 struct scsi_device *sdev;
711 struct hpsa_scsi_dev_t *hdev;
712 unsigned long flags;
713 unsigned char sn[16];
714
715 sdev = to_scsi_device(dev);
716 h = sdev_to_hba(sdev);
717 spin_lock_irqsave(&h->lock, flags);
718 hdev = sdev->hostdata;
719 if (!hdev) {
720 spin_unlock_irqrestore(&h->lock, flags);
721 return -ENODEV;
722 }
723 memcpy(sn, hdev->device_id, sizeof(sn));
724 spin_unlock_irqrestore(&h->lock, flags);
725 return snprintf(buf, 16 * 2 + 2,
726 "%02X%02X%02X%02X%02X%02X%02X%02X"
727 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
728 sn[0], sn[1], sn[2], sn[3],
729 sn[4], sn[5], sn[6], sn[7],
730 sn[8], sn[9], sn[10], sn[11],
731 sn[12], sn[13], sn[14], sn[15]);
732}
733
734static ssize_t sas_address_show(struct device *dev,
735 struct device_attribute *attr, char *buf)
736{
737 struct ctlr_info *h;
738 struct scsi_device *sdev;
739 struct hpsa_scsi_dev_t *hdev;
740 unsigned long flags;
741 u64 sas_address;
742
743 sdev = to_scsi_device(dev);
744 h = sdev_to_hba(sdev);
745 spin_lock_irqsave(&h->lock, flags);
746 hdev = sdev->hostdata;
747 if (!hdev || is_logical_device(hdev) || !hdev->expose_device) {
748 spin_unlock_irqrestore(&h->lock, flags);
749 return -ENODEV;
750 }
751 sas_address = hdev->sas_address;
752 spin_unlock_irqrestore(&h->lock, flags);
753
754 return snprintf(buf, PAGE_SIZE, "0x%016llx\n", sas_address);
755}
756
757static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
758 struct device_attribute *attr, char *buf)
759{
760 struct ctlr_info *h;
761 struct scsi_device *sdev;
762 struct hpsa_scsi_dev_t *hdev;
763 unsigned long flags;
764 int offload_enabled;
765
766 sdev = to_scsi_device(dev);
767 h = sdev_to_hba(sdev);
768 spin_lock_irqsave(&h->lock, flags);
769 hdev = sdev->hostdata;
770 if (!hdev) {
771 spin_unlock_irqrestore(&h->lock, flags);
772 return -ENODEV;
773 }
774 offload_enabled = hdev->offload_enabled;
775 spin_unlock_irqrestore(&h->lock, flags);
776 return snprintf(buf, 20, "%d\n", offload_enabled);
777}
778
779#define MAX_PATHS 8
780static ssize_t path_info_show(struct device *dev,
781 struct device_attribute *attr, char *buf)
782{
783 struct ctlr_info *h;
784 struct scsi_device *sdev;
785 struct hpsa_scsi_dev_t *hdev;
786 unsigned long flags;
787 int i;
788 int output_len = 0;
789 u8 box;
790 u8 bay;
791 u8 path_map_index = 0;
792 char *active;
793 unsigned char phys_connector[2];
794
795 sdev = to_scsi_device(dev);
796 h = sdev_to_hba(sdev);
797 spin_lock_irqsave(&h->devlock, flags);
798 hdev = sdev->hostdata;
799 if (!hdev) {
800 spin_unlock_irqrestore(&h->devlock, flags);
801 return -ENODEV;
802 }
803
804 bay = hdev->bay;
805 for (i = 0; i < MAX_PATHS; i++) {
806 path_map_index = 1<<i;
807 if (i == hdev->active_path_index)
808 active = "Active";
809 else if (hdev->path_map & path_map_index)
810 active = "Inactive";
811 else
812 continue;
813
814 output_len += scnprintf(buf + output_len,
815 PAGE_SIZE - output_len,
816 "[%d:%d:%d:%d] %20.20s ",
817 h->scsi_host->host_no,
818 hdev->bus, hdev->target, hdev->lun,
819 scsi_device_type(hdev->devtype));
820
821 if (hdev->devtype == TYPE_RAID || is_logical_device(hdev)) {
822 output_len += scnprintf(buf + output_len,
823 PAGE_SIZE - output_len,
824 "%s\n", active);
825 continue;
826 }
827
828 box = hdev->box[i];
829 memcpy(&phys_connector, &hdev->phys_connector[i],
830 sizeof(phys_connector));
831 if (phys_connector[0] < '0')
832 phys_connector[0] = '0';
833 if (phys_connector[1] < '0')
834 phys_connector[1] = '0';
835 output_len += scnprintf(buf + output_len,
836 PAGE_SIZE - output_len,
837 "PORT: %.2s ",
838 phys_connector);
839 if ((hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) &&
840 hdev->expose_device) {
841 if (box == 0 || box == 0xFF) {
842 output_len += scnprintf(buf + output_len,
843 PAGE_SIZE - output_len,
844 "BAY: %hhu %s\n",
845 bay, active);
846 } else {
847 output_len += scnprintf(buf + output_len,
848 PAGE_SIZE - output_len,
849 "BOX: %hhu BAY: %hhu %s\n",
850 box, bay, active);
851 }
852 } else if (box != 0 && box != 0xFF) {
853 output_len += scnprintf(buf + output_len,
854 PAGE_SIZE - output_len, "BOX: %hhu %s\n",
855 box, active);
856 } else
857 output_len += scnprintf(buf + output_len,
858 PAGE_SIZE - output_len, "%s\n", active);
859 }
860
861 spin_unlock_irqrestore(&h->devlock, flags);
862 return output_len;
863}
864
865static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
866static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
867static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
868static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
869static DEVICE_ATTR(sas_address, S_IRUGO, sas_address_show, NULL);
870static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
871 host_show_hp_ssd_smart_path_enabled, NULL);
872static DEVICE_ATTR(path_info, S_IRUGO, path_info_show, NULL);
873static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
874 host_show_hp_ssd_smart_path_status,
875 host_store_hp_ssd_smart_path_status);
876static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
877 host_store_raid_offload_debug);
878static DEVICE_ATTR(firmware_revision, S_IRUGO,
879 host_show_firmware_revision, NULL);
880static DEVICE_ATTR(commands_outstanding, S_IRUGO,
881 host_show_commands_outstanding, NULL);
882static DEVICE_ATTR(transport_mode, S_IRUGO,
883 host_show_transport_mode, NULL);
884static DEVICE_ATTR(resettable, S_IRUGO,
885 host_show_resettable, NULL);
886static DEVICE_ATTR(lockup_detected, S_IRUGO,
887 host_show_lockup_detected, NULL);
888
889static struct device_attribute *hpsa_sdev_attrs[] = {
890 &dev_attr_raid_level,
891 &dev_attr_lunid,
892 &dev_attr_unique_id,
893 &dev_attr_hp_ssd_smart_path_enabled,
894 &dev_attr_path_info,
895 &dev_attr_sas_address,
896 NULL,
897};
898
899static struct device_attribute *hpsa_shost_attrs[] = {
900 &dev_attr_rescan,
901 &dev_attr_firmware_revision,
902 &dev_attr_commands_outstanding,
903 &dev_attr_transport_mode,
904 &dev_attr_resettable,
905 &dev_attr_hp_ssd_smart_path_status,
906 &dev_attr_raid_offload_debug,
907 &dev_attr_lockup_detected,
908 NULL,
909};
910
911#define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_ABORTS + \
912 HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS)
913
914static struct scsi_host_template hpsa_driver_template = {
915 .module = THIS_MODULE,
916 .name = HPSA,
917 .proc_name = HPSA,
918 .queuecommand = hpsa_scsi_queue_command,
919 .scan_start = hpsa_scan_start,
920 .scan_finished = hpsa_scan_finished,
921 .change_queue_depth = hpsa_change_queue_depth,
922 .this_id = -1,
923 .use_clustering = ENABLE_CLUSTERING,
924 .eh_abort_handler = hpsa_eh_abort_handler,
925 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
926 .ioctl = hpsa_ioctl,
927 .slave_alloc = hpsa_slave_alloc,
928 .slave_configure = hpsa_slave_configure,
929 .slave_destroy = hpsa_slave_destroy,
930#ifdef CONFIG_COMPAT
931 .compat_ioctl = hpsa_compat_ioctl,
932#endif
933 .sdev_attrs = hpsa_sdev_attrs,
934 .shost_attrs = hpsa_shost_attrs,
935 .max_sectors = 8192,
936 .no_write_same = 1,
937};
938
939static inline u32 next_command(struct ctlr_info *h, u8 q)
940{
941 u32 a;
942 struct reply_queue_buffer *rq = &h->reply_queue[q];
943
944 if (h->transMethod & CFGTBL_Trans_io_accel1)
945 return h->access.command_completed(h, q);
946
947 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
948 return h->access.command_completed(h, q);
949
950 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
951 a = rq->head[rq->current_entry];
952 rq->current_entry++;
953 atomic_dec(&h->commands_outstanding);
954 } else {
955 a = FIFO_EMPTY;
956 }
957
958 if (rq->current_entry == h->max_commands) {
959 rq->current_entry = 0;
960 rq->wraparound ^= 1;
961 }
962 return a;
963}
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996#define DEFAULT_REPLY_QUEUE (-1)
997static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
998 int reply_queue)
999{
1000 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
1001 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
1002 if (unlikely(!h->msix_vector))
1003 return;
1004 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
1005 c->Header.ReplyQueue =
1006 raw_smp_processor_id() % h->nreply_queues;
1007 else
1008 c->Header.ReplyQueue = reply_queue % h->nreply_queues;
1009 }
1010}
1011
1012static void set_ioaccel1_performant_mode(struct ctlr_info *h,
1013 struct CommandList *c,
1014 int reply_queue)
1015{
1016 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
1017
1018
1019
1020
1021
1022 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
1023 cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
1024 else
1025 cp->ReplyQueue = reply_queue % h->nreply_queues;
1026
1027
1028
1029
1030
1031
1032 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
1033 IOACCEL1_BUSADDR_CMDTYPE;
1034}
1035
1036static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
1037 struct CommandList *c,
1038 int reply_queue)
1039{
1040 struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
1041 &h->ioaccel2_cmd_pool[c->cmdindex];
1042
1043
1044
1045
1046 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
1047 cp->reply_queue = smp_processor_id() % h->nreply_queues;
1048 else
1049 cp->reply_queue = reply_queue % h->nreply_queues;
1050
1051
1052
1053
1054
1055 c->busaddr |= h->ioaccel2_blockFetchTable[0];
1056}
1057
1058static void set_ioaccel2_performant_mode(struct ctlr_info *h,
1059 struct CommandList *c,
1060 int reply_queue)
1061{
1062 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
1063
1064
1065
1066
1067
1068 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
1069 cp->reply_queue = smp_processor_id() % h->nreply_queues;
1070 else
1071 cp->reply_queue = reply_queue % h->nreply_queues;
1072
1073
1074
1075
1076
1077
1078 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
1079}
1080
1081static int is_firmware_flash_cmd(u8 *cdb)
1082{
1083 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
1084}
1085
1086
1087
1088
1089
1090
1091#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
1092#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
1093static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
1094 struct CommandList *c)
1095{
1096 if (!is_firmware_flash_cmd(c->Request.CDB))
1097 return;
1098 atomic_inc(&h->firmware_flash_in_progress);
1099 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
1100}
1101
1102static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
1103 struct CommandList *c)
1104{
1105 if (is_firmware_flash_cmd(c->Request.CDB) &&
1106 atomic_dec_and_test(&h->firmware_flash_in_progress))
1107 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
1108}
1109
1110static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
1111 struct CommandList *c, int reply_queue)
1112{
1113 dial_down_lockup_detection_during_fw_flash(h, c);
1114 atomic_inc(&h->commands_outstanding);
1115 switch (c->cmd_type) {
1116 case CMD_IOACCEL1:
1117 set_ioaccel1_performant_mode(h, c, reply_queue);
1118 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
1119 break;
1120 case CMD_IOACCEL2:
1121 set_ioaccel2_performant_mode(h, c, reply_queue);
1122 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1123 break;
1124 case IOACCEL2_TMF:
1125 set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
1126 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1127 break;
1128 default:
1129 set_performant_mode(h, c, reply_queue);
1130 h->access.submit_command(h, c);
1131 }
1132}
1133
1134static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
1135{
1136 if (unlikely(hpsa_is_pending_event(c)))
1137 return finish_cmd(c);
1138
1139 __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
1140}
1141
1142static inline int is_hba_lunid(unsigned char scsi3addr[])
1143{
1144 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
1145}
1146
1147static inline int is_scsi_rev_5(struct ctlr_info *h)
1148{
1149 if (!h->hba_inquiry_data)
1150 return 0;
1151 if ((h->hba_inquiry_data[2] & 0x07) == 5)
1152 return 1;
1153 return 0;
1154}
1155
1156static int hpsa_find_target_lun(struct ctlr_info *h,
1157 unsigned char scsi3addr[], int bus, int *target, int *lun)
1158{
1159
1160
1161
1162 int i, found = 0;
1163 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
1164
1165 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
1166
1167 for (i = 0; i < h->ndevices; i++) {
1168 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
1169 __set_bit(h->dev[i]->target, lun_taken);
1170 }
1171
1172 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
1173 if (i < HPSA_MAX_DEVICES) {
1174
1175 *target = i;
1176 *lun = 0;
1177 found = 1;
1178 }
1179 return !found;
1180}
1181
1182static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
1183 struct hpsa_scsi_dev_t *dev, char *description)
1184{
1185#define LABEL_SIZE 25
1186 char label[LABEL_SIZE];
1187
1188 if (h == NULL || h->pdev == NULL || h->scsi_host == NULL)
1189 return;
1190
1191 switch (dev->devtype) {
1192 case TYPE_RAID:
1193 snprintf(label, LABEL_SIZE, "controller");
1194 break;
1195 case TYPE_ENCLOSURE:
1196 snprintf(label, LABEL_SIZE, "enclosure");
1197 break;
1198 case TYPE_DISK:
1199 case TYPE_ZBC:
1200 if (dev->external)
1201 snprintf(label, LABEL_SIZE, "external");
1202 else if (!is_logical_dev_addr_mode(dev->scsi3addr))
1203 snprintf(label, LABEL_SIZE, "%s",
1204 raid_label[PHYSICAL_DRIVE]);
1205 else
1206 snprintf(label, LABEL_SIZE, "RAID-%s",
1207 dev->raid_level > RAID_UNKNOWN ? "?" :
1208 raid_label[dev->raid_level]);
1209 break;
1210 case TYPE_ROM:
1211 snprintf(label, LABEL_SIZE, "rom");
1212 break;
1213 case TYPE_TAPE:
1214 snprintf(label, LABEL_SIZE, "tape");
1215 break;
1216 case TYPE_MEDIUM_CHANGER:
1217 snprintf(label, LABEL_SIZE, "changer");
1218 break;
1219 default:
1220 snprintf(label, LABEL_SIZE, "UNKNOWN");
1221 break;
1222 }
1223
1224 dev_printk(level, &h->pdev->dev,
1225 "scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n",
1226 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
1227 description,
1228 scsi_device_type(dev->devtype),
1229 dev->vendor,
1230 dev->model,
1231 label,
1232 dev->offload_config ? '+' : '-',
1233 dev->offload_enabled ? '+' : '-',
1234 dev->expose_device);
1235}
1236
1237
1238static int hpsa_scsi_add_entry(struct ctlr_info *h,
1239 struct hpsa_scsi_dev_t *device,
1240 struct hpsa_scsi_dev_t *added[], int *nadded)
1241{
1242
1243 int n = h->ndevices;
1244 int i;
1245 unsigned char addr1[8], addr2[8];
1246 struct hpsa_scsi_dev_t *sd;
1247
1248 if (n >= HPSA_MAX_DEVICES) {
1249 dev_err(&h->pdev->dev, "too many devices, some will be "
1250 "inaccessible.\n");
1251 return -1;
1252 }
1253
1254
1255 if (device->lun != -1)
1256
1257 goto lun_assigned;
1258
1259
1260
1261
1262
1263 if (device->scsi3addr[4] == 0) {
1264
1265 if (hpsa_find_target_lun(h, device->scsi3addr,
1266 device->bus, &device->target, &device->lun) != 0)
1267 return -1;
1268 goto lun_assigned;
1269 }
1270
1271
1272
1273
1274
1275
1276
1277 memcpy(addr1, device->scsi3addr, 8);
1278 addr1[4] = 0;
1279 addr1[5] = 0;
1280 for (i = 0; i < n; i++) {
1281 sd = h->dev[i];
1282 memcpy(addr2, sd->scsi3addr, 8);
1283 addr2[4] = 0;
1284 addr2[5] = 0;
1285
1286 if (memcmp(addr1, addr2, 8) == 0) {
1287 device->bus = sd->bus;
1288 device->target = sd->target;
1289 device->lun = device->scsi3addr[4];
1290 break;
1291 }
1292 }
1293 if (device->lun == -1) {
1294 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
1295 " suspect firmware bug or unsupported hardware "
1296 "configuration.\n");
1297 return -1;
1298 }
1299
1300lun_assigned:
1301
1302 h->dev[n] = device;
1303 h->ndevices++;
1304 added[*nadded] = device;
1305 (*nadded)++;
1306 hpsa_show_dev_msg(KERN_INFO, h, device,
1307 device->expose_device ? "added" : "masked");
1308 device->offload_to_be_enabled = device->offload_enabled;
1309 device->offload_enabled = 0;
1310 return 0;
1311}
1312
1313
1314static void hpsa_scsi_update_entry(struct ctlr_info *h,
1315 int entry, struct hpsa_scsi_dev_t *new_entry)
1316{
1317 int offload_enabled;
1318
1319 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1320
1321
1322 h->dev[entry]->raid_level = new_entry->raid_level;
1323
1324
1325 if (new_entry->offload_config && new_entry->offload_enabled) {
1326
1327
1328
1329
1330
1331
1332
1333
1334 h->dev[entry]->raid_map = new_entry->raid_map;
1335 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1336 }
1337 if (new_entry->hba_ioaccel_enabled) {
1338 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1339 wmb();
1340 }
1341 h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
1342 h->dev[entry]->offload_config = new_entry->offload_config;
1343 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
1344 h->dev[entry]->queue_depth = new_entry->queue_depth;
1345
1346
1347
1348
1349
1350
1351 h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled;
1352 if (!new_entry->offload_enabled)
1353 h->dev[entry]->offload_enabled = 0;
1354
1355 offload_enabled = h->dev[entry]->offload_enabled;
1356 h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled;
1357 hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
1358 h->dev[entry]->offload_enabled = offload_enabled;
1359}
1360
1361
1362static void hpsa_scsi_replace_entry(struct ctlr_info *h,
1363 int entry, struct hpsa_scsi_dev_t *new_entry,
1364 struct hpsa_scsi_dev_t *added[], int *nadded,
1365 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1366{
1367
1368 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1369 removed[*nremoved] = h->dev[entry];
1370 (*nremoved)++;
1371
1372
1373
1374
1375
1376 if (new_entry->target == -1) {
1377 new_entry->target = h->dev[entry]->target;
1378 new_entry->lun = h->dev[entry]->lun;
1379 }
1380
1381 h->dev[entry] = new_entry;
1382 added[*nadded] = new_entry;
1383 (*nadded)++;
1384 hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
1385 new_entry->offload_to_be_enabled = new_entry->offload_enabled;
1386 new_entry->offload_enabled = 0;
1387}
1388
1389
1390static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry,
1391 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1392{
1393
1394 int i;
1395 struct hpsa_scsi_dev_t *sd;
1396
1397 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1398
1399 sd = h->dev[entry];
1400 removed[*nremoved] = h->dev[entry];
1401 (*nremoved)++;
1402
1403 for (i = entry; i < h->ndevices-1; i++)
1404 h->dev[i] = h->dev[i+1];
1405 h->ndevices--;
1406 hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
1407}
1408
1409#define SCSI3ADDR_EQ(a, b) ( \
1410 (a)[7] == (b)[7] && \
1411 (a)[6] == (b)[6] && \
1412 (a)[5] == (b)[5] && \
1413 (a)[4] == (b)[4] && \
1414 (a)[3] == (b)[3] && \
1415 (a)[2] == (b)[2] && \
1416 (a)[1] == (b)[1] && \
1417 (a)[0] == (b)[0])
1418
1419static void fixup_botched_add(struct ctlr_info *h,
1420 struct hpsa_scsi_dev_t *added)
1421{
1422
1423
1424
1425 unsigned long flags;
1426 int i, j;
1427
1428 spin_lock_irqsave(&h->lock, flags);
1429 for (i = 0; i < h->ndevices; i++) {
1430 if (h->dev[i] == added) {
1431 for (j = i; j < h->ndevices-1; j++)
1432 h->dev[j] = h->dev[j+1];
1433 h->ndevices--;
1434 break;
1435 }
1436 }
1437 spin_unlock_irqrestore(&h->lock, flags);
1438 kfree(added);
1439}
1440
1441static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1442 struct hpsa_scsi_dev_t *dev2)
1443{
1444
1445
1446
1447
1448 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1449 sizeof(dev1->scsi3addr)) != 0)
1450 return 0;
1451 if (memcmp(dev1->device_id, dev2->device_id,
1452 sizeof(dev1->device_id)) != 0)
1453 return 0;
1454 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1455 return 0;
1456 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1457 return 0;
1458 if (dev1->devtype != dev2->devtype)
1459 return 0;
1460 if (dev1->bus != dev2->bus)
1461 return 0;
1462 return 1;
1463}
1464
1465static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1466 struct hpsa_scsi_dev_t *dev2)
1467{
1468
1469
1470
1471
1472 if (dev1->raid_level != dev2->raid_level)
1473 return 1;
1474 if (dev1->offload_config != dev2->offload_config)
1475 return 1;
1476 if (dev1->offload_enabled != dev2->offload_enabled)
1477 return 1;
1478 if (!is_logical_dev_addr_mode(dev1->scsi3addr))
1479 if (dev1->queue_depth != dev2->queue_depth)
1480 return 1;
1481 return 0;
1482}
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1493 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1494 int *index)
1495{
1496 int i;
1497#define DEVICE_NOT_FOUND 0
1498#define DEVICE_CHANGED 1
1499#define DEVICE_SAME 2
1500#define DEVICE_UPDATED 3
1501 if (needle == NULL)
1502 return DEVICE_NOT_FOUND;
1503
1504 for (i = 0; i < haystack_size; i++) {
1505 if (haystack[i] == NULL)
1506 continue;
1507 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1508 *index = i;
1509 if (device_is_the_same(needle, haystack[i])) {
1510 if (device_updated(needle, haystack[i]))
1511 return DEVICE_UPDATED;
1512 return DEVICE_SAME;
1513 } else {
1514
1515 if (needle->volume_offline)
1516 return DEVICE_NOT_FOUND;
1517 return DEVICE_CHANGED;
1518 }
1519 }
1520 }
1521 *index = -1;
1522 return DEVICE_NOT_FOUND;
1523}
1524
1525static void hpsa_monitor_offline_device(struct ctlr_info *h,
1526 unsigned char scsi3addr[])
1527{
1528 struct offline_device_entry *device;
1529 unsigned long flags;
1530
1531
1532 spin_lock_irqsave(&h->offline_device_lock, flags);
1533 list_for_each_entry(device, &h->offline_device_list, offline_list) {
1534 if (memcmp(device->scsi3addr, scsi3addr,
1535 sizeof(device->scsi3addr)) == 0) {
1536 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1537 return;
1538 }
1539 }
1540 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1541
1542
1543 device = kmalloc(sizeof(*device), GFP_KERNEL);
1544 if (!device) {
1545 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
1546 return;
1547 }
1548 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1549 spin_lock_irqsave(&h->offline_device_lock, flags);
1550 list_add_tail(&device->offline_list, &h->offline_device_list);
1551 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1552}
1553
1554
1555static void hpsa_show_volume_status(struct ctlr_info *h,
1556 struct hpsa_scsi_dev_t *sd)
1557{
1558 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1559 dev_info(&h->pdev->dev,
1560 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1561 h->scsi_host->host_no,
1562 sd->bus, sd->target, sd->lun);
1563 switch (sd->volume_offline) {
1564 case HPSA_LV_OK:
1565 break;
1566 case HPSA_LV_UNDERGOING_ERASE:
1567 dev_info(&h->pdev->dev,
1568 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1569 h->scsi_host->host_no,
1570 sd->bus, sd->target, sd->lun);
1571 break;
1572 case HPSA_LV_NOT_AVAILABLE:
1573 dev_info(&h->pdev->dev,
1574 "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
1575 h->scsi_host->host_no,
1576 sd->bus, sd->target, sd->lun);
1577 break;
1578 case HPSA_LV_UNDERGOING_RPI:
1579 dev_info(&h->pdev->dev,
1580 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
1581 h->scsi_host->host_no,
1582 sd->bus, sd->target, sd->lun);
1583 break;
1584 case HPSA_LV_PENDING_RPI:
1585 dev_info(&h->pdev->dev,
1586 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1587 h->scsi_host->host_no,
1588 sd->bus, sd->target, sd->lun);
1589 break;
1590 case HPSA_LV_ENCRYPTED_NO_KEY:
1591 dev_info(&h->pdev->dev,
1592 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1593 h->scsi_host->host_no,
1594 sd->bus, sd->target, sd->lun);
1595 break;
1596 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1597 dev_info(&h->pdev->dev,
1598 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1599 h->scsi_host->host_no,
1600 sd->bus, sd->target, sd->lun);
1601 break;
1602 case HPSA_LV_UNDERGOING_ENCRYPTION:
1603 dev_info(&h->pdev->dev,
1604 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1605 h->scsi_host->host_no,
1606 sd->bus, sd->target, sd->lun);
1607 break;
1608 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1609 dev_info(&h->pdev->dev,
1610 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1611 h->scsi_host->host_no,
1612 sd->bus, sd->target, sd->lun);
1613 break;
1614 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1615 dev_info(&h->pdev->dev,
1616 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1617 h->scsi_host->host_no,
1618 sd->bus, sd->target, sd->lun);
1619 break;
1620 case HPSA_LV_PENDING_ENCRYPTION:
1621 dev_info(&h->pdev->dev,
1622 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1623 h->scsi_host->host_no,
1624 sd->bus, sd->target, sd->lun);
1625 break;
1626 case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1627 dev_info(&h->pdev->dev,
1628 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1629 h->scsi_host->host_no,
1630 sd->bus, sd->target, sd->lun);
1631 break;
1632 }
1633}
1634
1635
1636
1637
1638
1639static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1640 struct hpsa_scsi_dev_t *dev[], int ndevices,
1641 struct hpsa_scsi_dev_t *logical_drive)
1642{
1643 struct raid_map_data *map = &logical_drive->raid_map;
1644 struct raid_map_disk_data *dd = &map->data[0];
1645 int i, j;
1646 int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1647 le16_to_cpu(map->metadata_disks_per_row);
1648 int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1649 le16_to_cpu(map->layout_map_count) *
1650 total_disks_per_row;
1651 int nphys_disk = le16_to_cpu(map->layout_map_count) *
1652 total_disks_per_row;
1653 int qdepth;
1654
1655 if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1656 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1657
1658 logical_drive->nphysical_disks = nraid_map_entries;
1659
1660 qdepth = 0;
1661 for (i = 0; i < nraid_map_entries; i++) {
1662 logical_drive->phys_disk[i] = NULL;
1663 if (!logical_drive->offload_config)
1664 continue;
1665 for (j = 0; j < ndevices; j++) {
1666 if (dev[j] == NULL)
1667 continue;
1668 if (dev[j]->devtype != TYPE_DISK &&
1669 dev[j]->devtype != TYPE_ZBC)
1670 continue;
1671 if (is_logical_device(dev[j]))
1672 continue;
1673 if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1674 continue;
1675
1676 logical_drive->phys_disk[i] = dev[j];
1677 if (i < nphys_disk)
1678 qdepth = min(h->nr_cmds, qdepth +
1679 logical_drive->phys_disk[i]->queue_depth);
1680 break;
1681 }
1682
1683
1684
1685
1686
1687
1688
1689
1690 if (!logical_drive->phys_disk[i]) {
1691 logical_drive->offload_enabled = 0;
1692 logical_drive->offload_to_be_enabled = 0;
1693 logical_drive->queue_depth = 8;
1694 }
1695 }
1696 if (nraid_map_entries)
1697
1698
1699
1700
1701 logical_drive->queue_depth = qdepth;
1702 else
1703 logical_drive->queue_depth = h->nr_cmds;
1704}
1705
1706static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1707 struct hpsa_scsi_dev_t *dev[], int ndevices)
1708{
1709 int i;
1710
1711 for (i = 0; i < ndevices; i++) {
1712 if (dev[i] == NULL)
1713 continue;
1714 if (dev[i]->devtype != TYPE_DISK &&
1715 dev[i]->devtype != TYPE_ZBC)
1716 continue;
1717 if (!is_logical_device(dev[i]))
1718 continue;
1719
1720
1721
1722
1723
1724
1725
1726 if (dev[i]->offload_enabled)
1727 continue;
1728
1729 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1730 }
1731}
1732
1733static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1734{
1735 int rc = 0;
1736
1737 if (!h->scsi_host)
1738 return 1;
1739
1740 if (is_logical_device(device))
1741 rc = scsi_add_device(h->scsi_host, device->bus,
1742 device->target, device->lun);
1743 else
1744 rc = hpsa_add_sas_device(h->sas_host, device);
1745
1746 return rc;
1747}
1748
1749static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h,
1750 struct hpsa_scsi_dev_t *dev)
1751{
1752 int i;
1753 int count = 0;
1754
1755 for (i = 0; i < h->nr_cmds; i++) {
1756 struct CommandList *c = h->cmd_pool + i;
1757 int refcount = atomic_inc_return(&c->refcount);
1758
1759 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev,
1760 dev->scsi3addr)) {
1761 unsigned long flags;
1762
1763 spin_lock_irqsave(&h->lock, flags);
1764 if (!hpsa_is_cmd_idle(c))
1765 ++count;
1766 spin_unlock_irqrestore(&h->lock, flags);
1767 }
1768
1769 cmd_free(h, c);
1770 }
1771
1772 return count;
1773}
1774
1775static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h,
1776 struct hpsa_scsi_dev_t *device)
1777{
1778 int cmds = 0;
1779 int waits = 0;
1780
1781 while (1) {
1782 cmds = hpsa_find_outstanding_commands_for_dev(h, device);
1783 if (cmds == 0)
1784 break;
1785 if (++waits > 20)
1786 break;
1787 dev_warn(&h->pdev->dev,
1788 "%s: removing device with %d outstanding commands!\n",
1789 __func__, cmds);
1790 msleep(1000);
1791 }
1792}
1793
1794static void hpsa_remove_device(struct ctlr_info *h,
1795 struct hpsa_scsi_dev_t *device)
1796{
1797 struct scsi_device *sdev = NULL;
1798
1799 if (!h->scsi_host)
1800 return;
1801
1802 if (is_logical_device(device)) {
1803 sdev = scsi_device_lookup(h->scsi_host, device->bus,
1804 device->target, device->lun);
1805 if (sdev) {
1806 scsi_remove_device(sdev);
1807 scsi_device_put(sdev);
1808 } else {
1809
1810
1811
1812
1813
1814 hpsa_show_dev_msg(KERN_WARNING, h, device,
1815 "didn't find device for removal.");
1816 }
1817 } else {
1818
1819 device->removed = 1;
1820 hpsa_wait_for_outstanding_commands_for_dev(h, device);
1821
1822 hpsa_remove_sas_device(device);
1823 }
1824}
1825
1826static void adjust_hpsa_scsi_table(struct ctlr_info *h,
1827 struct hpsa_scsi_dev_t *sd[], int nsds)
1828{
1829
1830
1831
1832
1833 int i, entry, device_change, changes = 0;
1834 struct hpsa_scsi_dev_t *csd;
1835 unsigned long flags;
1836 struct hpsa_scsi_dev_t **added, **removed;
1837 int nadded, nremoved;
1838
1839
1840
1841
1842
1843 if (h->reset_in_progress) {
1844 h->drv_req_rescan = 1;
1845 return;
1846 }
1847
1848 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1849 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
1850
1851 if (!added || !removed) {
1852 dev_warn(&h->pdev->dev, "out of memory in "
1853 "adjust_hpsa_scsi_table\n");
1854 goto free_and_out;
1855 }
1856
1857 spin_lock_irqsave(&h->devlock, flags);
1858
1859
1860
1861
1862
1863
1864
1865
1866 i = 0;
1867 nremoved = 0;
1868 nadded = 0;
1869 while (i < h->ndevices) {
1870 csd = h->dev[i];
1871 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1872 if (device_change == DEVICE_NOT_FOUND) {
1873 changes++;
1874 hpsa_scsi_remove_entry(h, i, removed, &nremoved);
1875 continue;
1876 } else if (device_change == DEVICE_CHANGED) {
1877 changes++;
1878 hpsa_scsi_replace_entry(h, i, sd[entry],
1879 added, &nadded, removed, &nremoved);
1880
1881
1882
1883 sd[entry] = NULL;
1884 } else if (device_change == DEVICE_UPDATED) {
1885 hpsa_scsi_update_entry(h, i, sd[entry]);
1886 }
1887 i++;
1888 }
1889
1890
1891
1892
1893
1894 for (i = 0; i < nsds; i++) {
1895 if (!sd[i])
1896 continue;
1897
1898
1899
1900
1901
1902
1903 if (sd[i]->volume_offline) {
1904 hpsa_show_volume_status(h, sd[i]);
1905 hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
1906 continue;
1907 }
1908
1909 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1910 h->ndevices, &entry);
1911 if (device_change == DEVICE_NOT_FOUND) {
1912 changes++;
1913 if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0)
1914 break;
1915 sd[i] = NULL;
1916 } else if (device_change == DEVICE_CHANGED) {
1917
1918 changes++;
1919 dev_warn(&h->pdev->dev,
1920 "device unexpectedly changed.\n");
1921
1922 }
1923 }
1924 hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
1925
1926
1927
1928
1929 for (i = 0; i < h->ndevices; i++) {
1930 if (h->dev[i] == NULL)
1931 continue;
1932 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
1933 }
1934
1935 spin_unlock_irqrestore(&h->devlock, flags);
1936
1937
1938
1939
1940
1941 for (i = 0; i < nsds; i++) {
1942 if (!sd[i])
1943 continue;
1944 if (sd[i]->volume_offline)
1945 hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
1946 }
1947
1948
1949
1950
1951
1952 if (!changes)
1953 goto free_and_out;
1954
1955
1956 for (i = 0; i < nremoved; i++) {
1957 if (removed[i] == NULL)
1958 continue;
1959 if (removed[i]->expose_device)
1960 hpsa_remove_device(h, removed[i]);
1961 kfree(removed[i]);
1962 removed[i] = NULL;
1963 }
1964
1965
1966 for (i = 0; i < nadded; i++) {
1967 int rc = 0;
1968
1969 if (added[i] == NULL)
1970 continue;
1971 if (!(added[i]->expose_device))
1972 continue;
1973 rc = hpsa_add_device(h, added[i]);
1974 if (!rc)
1975 continue;
1976 dev_warn(&h->pdev->dev,
1977 "addition failed %d, device not added.", rc);
1978
1979
1980
1981 fixup_botched_add(h, added[i]);
1982 h->drv_req_rescan = 1;
1983 }
1984
1985free_and_out:
1986 kfree(added);
1987 kfree(removed);
1988}
1989
1990
1991
1992
1993
1994static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1995 int bus, int target, int lun)
1996{
1997 int i;
1998 struct hpsa_scsi_dev_t *sd;
1999
2000 for (i = 0; i < h->ndevices; i++) {
2001 sd = h->dev[i];
2002 if (sd->bus == bus && sd->target == target && sd->lun == lun)
2003 return sd;
2004 }
2005 return NULL;
2006}
2007
2008static int hpsa_slave_alloc(struct scsi_device *sdev)
2009{
2010 struct hpsa_scsi_dev_t *sd;
2011 unsigned long flags;
2012 struct ctlr_info *h;
2013
2014 h = sdev_to_hba(sdev);
2015 spin_lock_irqsave(&h->devlock, flags);
2016 if (sdev_channel(sdev) == HPSA_PHYSICAL_DEVICE_BUS) {
2017 struct scsi_target *starget;
2018 struct sas_rphy *rphy;
2019
2020 starget = scsi_target(sdev);
2021 rphy = target_to_rphy(starget);
2022 sd = hpsa_find_device_by_sas_rphy(h, rphy);
2023 if (sd) {
2024 sd->target = sdev_id(sdev);
2025 sd->lun = sdev->lun;
2026 }
2027 } else
2028 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
2029 sdev_id(sdev), sdev->lun);
2030
2031 if (sd && sd->expose_device) {
2032 atomic_set(&sd->ioaccel_cmds_out, 0);
2033 sdev->hostdata = sd;
2034 } else
2035 sdev->hostdata = NULL;
2036 spin_unlock_irqrestore(&h->devlock, flags);
2037 return 0;
2038}
2039
2040
2041static int hpsa_slave_configure(struct scsi_device *sdev)
2042{
2043 struct hpsa_scsi_dev_t *sd;
2044 int queue_depth;
2045
2046 sd = sdev->hostdata;
2047 sdev->no_uld_attach = !sd || !sd->expose_device;
2048
2049 if (sd)
2050 queue_depth = sd->queue_depth != 0 ?
2051 sd->queue_depth : sdev->host->can_queue;
2052 else
2053 queue_depth = sdev->host->can_queue;
2054
2055 scsi_change_queue_depth(sdev, queue_depth);
2056
2057 return 0;
2058}
2059
2060static void hpsa_slave_destroy(struct scsi_device *sdev)
2061{
2062
2063}
2064
2065static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2066{
2067 int i;
2068
2069 if (!h->ioaccel2_cmd_sg_list)
2070 return;
2071 for (i = 0; i < h->nr_cmds; i++) {
2072 kfree(h->ioaccel2_cmd_sg_list[i]);
2073 h->ioaccel2_cmd_sg_list[i] = NULL;
2074 }
2075 kfree(h->ioaccel2_cmd_sg_list);
2076 h->ioaccel2_cmd_sg_list = NULL;
2077}
2078
2079static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2080{
2081 int i;
2082
2083 if (h->chainsize <= 0)
2084 return 0;
2085
2086 h->ioaccel2_cmd_sg_list =
2087 kzalloc(sizeof(*h->ioaccel2_cmd_sg_list) * h->nr_cmds,
2088 GFP_KERNEL);
2089 if (!h->ioaccel2_cmd_sg_list)
2090 return -ENOMEM;
2091 for (i = 0; i < h->nr_cmds; i++) {
2092 h->ioaccel2_cmd_sg_list[i] =
2093 kmalloc(sizeof(*h->ioaccel2_cmd_sg_list[i]) *
2094 h->maxsgentries, GFP_KERNEL);
2095 if (!h->ioaccel2_cmd_sg_list[i])
2096 goto clean;
2097 }
2098 return 0;
2099
2100clean:
2101 hpsa_free_ioaccel2_sg_chain_blocks(h);
2102 return -ENOMEM;
2103}
2104
2105static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
2106{
2107 int i;
2108
2109 if (!h->cmd_sg_list)
2110 return;
2111 for (i = 0; i < h->nr_cmds; i++) {
2112 kfree(h->cmd_sg_list[i]);
2113 h->cmd_sg_list[i] = NULL;
2114 }
2115 kfree(h->cmd_sg_list);
2116 h->cmd_sg_list = NULL;
2117}
2118
2119static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
2120{
2121 int i;
2122
2123 if (h->chainsize <= 0)
2124 return 0;
2125
2126 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
2127 GFP_KERNEL);
2128 if (!h->cmd_sg_list) {
2129 dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
2130 return -ENOMEM;
2131 }
2132 for (i = 0; i < h->nr_cmds; i++) {
2133 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
2134 h->chainsize, GFP_KERNEL);
2135 if (!h->cmd_sg_list[i]) {
2136 dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
2137 goto clean;
2138 }
2139 }
2140 return 0;
2141
2142clean:
2143 hpsa_free_sg_chain_blocks(h);
2144 return -ENOMEM;
2145}
2146
2147static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
2148 struct io_accel2_cmd *cp, struct CommandList *c)
2149{
2150 struct ioaccel2_sg_element *chain_block;
2151 u64 temp64;
2152 u32 chain_size;
2153
2154 chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
2155 chain_size = le32_to_cpu(cp->sg[0].length);
2156 temp64 = pci_map_single(h->pdev, chain_block, chain_size,
2157 PCI_DMA_TODEVICE);
2158 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2159
2160 cp->sg->address = 0;
2161 return -1;
2162 }
2163 cp->sg->address = cpu_to_le64(temp64);
2164 return 0;
2165}
2166
2167static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
2168 struct io_accel2_cmd *cp)
2169{
2170 struct ioaccel2_sg_element *chain_sg;
2171 u64 temp64;
2172 u32 chain_size;
2173
2174 chain_sg = cp->sg;
2175 temp64 = le64_to_cpu(chain_sg->address);
2176 chain_size = le32_to_cpu(cp->sg[0].length);
2177 pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE);
2178}
2179
2180static int hpsa_map_sg_chain_block(struct ctlr_info *h,
2181 struct CommandList *c)
2182{
2183 struct SGDescriptor *chain_sg, *chain_block;
2184 u64 temp64;
2185 u32 chain_len;
2186
2187 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2188 chain_block = h->cmd_sg_list[c->cmdindex];
2189 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
2190 chain_len = sizeof(*chain_sg) *
2191 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
2192 chain_sg->Len = cpu_to_le32(chain_len);
2193 temp64 = pci_map_single(h->pdev, chain_block, chain_len,
2194 PCI_DMA_TODEVICE);
2195 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2196
2197 chain_sg->Addr = cpu_to_le64(0);
2198 return -1;
2199 }
2200 chain_sg->Addr = cpu_to_le64(temp64);
2201 return 0;
2202}
2203
2204static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
2205 struct CommandList *c)
2206{
2207 struct SGDescriptor *chain_sg;
2208
2209 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
2210 return;
2211
2212 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2213 pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
2214 le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
2215}
2216
2217
2218
2219
2220
2221
2222static int handle_ioaccel_mode2_error(struct ctlr_info *h,
2223 struct CommandList *c,
2224 struct scsi_cmnd *cmd,
2225 struct io_accel2_cmd *c2,
2226 struct hpsa_scsi_dev_t *dev)
2227{
2228 int data_len;
2229 int retry = 0;
2230 u32 ioaccel2_resid = 0;
2231
2232 switch (c2->error_data.serv_response) {
2233 case IOACCEL2_SERV_RESPONSE_COMPLETE:
2234 switch (c2->error_data.status) {
2235 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
2236 break;
2237 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
2238 cmd->result |= SAM_STAT_CHECK_CONDITION;
2239 if (c2->error_data.data_present !=
2240 IOACCEL2_SENSE_DATA_PRESENT) {
2241 memset(cmd->sense_buffer, 0,
2242 SCSI_SENSE_BUFFERSIZE);
2243 break;
2244 }
2245
2246 data_len = c2->error_data.sense_data_len;
2247 if (data_len > SCSI_SENSE_BUFFERSIZE)
2248 data_len = SCSI_SENSE_BUFFERSIZE;
2249 if (data_len > sizeof(c2->error_data.sense_data_buff))
2250 data_len =
2251 sizeof(c2->error_data.sense_data_buff);
2252 memcpy(cmd->sense_buffer,
2253 c2->error_data.sense_data_buff, data_len);
2254 retry = 1;
2255 break;
2256 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
2257 retry = 1;
2258 break;
2259 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
2260 retry = 1;
2261 break;
2262 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
2263 retry = 1;
2264 break;
2265 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
2266 retry = 1;
2267 break;
2268 default:
2269 retry = 1;
2270 break;
2271 }
2272 break;
2273 case IOACCEL2_SERV_RESPONSE_FAILURE:
2274 switch (c2->error_data.status) {
2275 case IOACCEL2_STATUS_SR_IO_ERROR:
2276 case IOACCEL2_STATUS_SR_IO_ABORTED:
2277 case IOACCEL2_STATUS_SR_OVERRUN:
2278 retry = 1;
2279 break;
2280 case IOACCEL2_STATUS_SR_UNDERRUN:
2281 cmd->result = (DID_OK << 16);
2282 cmd->result |= (COMMAND_COMPLETE << 8);
2283 ioaccel2_resid = get_unaligned_le32(
2284 &c2->error_data.resid_cnt[0]);
2285 scsi_set_resid(cmd, ioaccel2_resid);
2286 break;
2287 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
2288 case IOACCEL2_STATUS_SR_INVALID_DEVICE:
2289 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
2290
2291
2292
2293
2294
2295
2296
2297
2298 if (dev->physical_device && dev->expose_device) {
2299 cmd->result = DID_NO_CONNECT << 16;
2300 dev->removed = 1;
2301 h->drv_req_rescan = 1;
2302 dev_warn(&h->pdev->dev,
2303 "%s: device is gone!\n", __func__);
2304 } else
2305
2306
2307
2308
2309
2310 retry = 1;
2311 break;
2312 default:
2313 retry = 1;
2314 }
2315 break;
2316 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
2317 break;
2318 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
2319 break;
2320 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
2321 retry = 1;
2322 break;
2323 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
2324 break;
2325 default:
2326 retry = 1;
2327 break;
2328 }
2329
2330 return retry;
2331}
2332
2333static void hpsa_cmd_resolve_events(struct ctlr_info *h,
2334 struct CommandList *c)
2335{
2336 bool do_wake = false;
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353 c->scsi_cmd = SCSI_CMD_IDLE;
2354 mb();
2355 if (c->abort_pending) {
2356 do_wake = true;
2357 c->abort_pending = false;
2358 }
2359 if (c->reset_pending) {
2360 unsigned long flags;
2361 struct hpsa_scsi_dev_t *dev;
2362
2363
2364
2365
2366
2367
2368 spin_lock_irqsave(&h->lock, flags);
2369 dev = c->reset_pending;
2370 if (dev && atomic_dec_and_test(&dev->reset_cmds_out))
2371 do_wake = true;
2372 c->reset_pending = NULL;
2373 spin_unlock_irqrestore(&h->lock, flags);
2374 }
2375
2376 if (do_wake)
2377 wake_up_all(&h->event_sync_wait_queue);
2378}
2379
2380static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
2381 struct CommandList *c)
2382{
2383 hpsa_cmd_resolve_events(h, c);
2384 cmd_tagged_free(h, c);
2385}
2386
2387static void hpsa_cmd_free_and_done(struct ctlr_info *h,
2388 struct CommandList *c, struct scsi_cmnd *cmd)
2389{
2390 hpsa_cmd_resolve_and_free(h, c);
2391 cmd->scsi_done(cmd);
2392}
2393
2394static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
2395{
2396 INIT_WORK(&c->work, hpsa_command_resubmit_worker);
2397 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
2398}
2399
2400static void hpsa_set_scsi_cmd_aborted(struct scsi_cmnd *cmd)
2401{
2402 cmd->result = DID_ABORT << 16;
2403}
2404
2405static void hpsa_cmd_abort_and_free(struct ctlr_info *h, struct CommandList *c,
2406 struct scsi_cmnd *cmd)
2407{
2408 hpsa_set_scsi_cmd_aborted(cmd);
2409 dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n",
2410 c->Request.CDB, c->err_info->ScsiStatus);
2411 hpsa_cmd_resolve_and_free(h, c);
2412}
2413
2414static void process_ioaccel2_completion(struct ctlr_info *h,
2415 struct CommandList *c, struct scsi_cmnd *cmd,
2416 struct hpsa_scsi_dev_t *dev)
2417{
2418 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2419
2420
2421 if (likely(c2->error_data.serv_response == 0 &&
2422 c2->error_data.status == 0))
2423 return hpsa_cmd_free_and_done(h, c, cmd);
2424
2425
2426
2427
2428
2429
2430 if (is_logical_device(dev) &&
2431 c2->error_data.serv_response ==
2432 IOACCEL2_SERV_RESPONSE_FAILURE) {
2433 if (c2->error_data.status ==
2434 IOACCEL2_STATUS_SR_IOACCEL_DISABLED) {
2435 dev->offload_enabled = 0;
2436 dev->offload_to_be_enabled = 0;
2437 }
2438
2439 return hpsa_retry_cmd(h, c);
2440 }
2441
2442 if (handle_ioaccel_mode2_error(h, c, cmd, c2, dev))
2443 return hpsa_retry_cmd(h, c);
2444
2445 return hpsa_cmd_free_and_done(h, c, cmd);
2446}
2447
2448
2449static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
2450 struct CommandList *cp)
2451{
2452 u8 tmf_status = cp->err_info->ScsiStatus;
2453
2454 switch (tmf_status) {
2455 case CISS_TMF_COMPLETE:
2456
2457
2458
2459
2460 case CISS_TMF_SUCCESS:
2461 return 0;
2462 case CISS_TMF_INVALID_FRAME:
2463 case CISS_TMF_NOT_SUPPORTED:
2464 case CISS_TMF_FAILED:
2465 case CISS_TMF_WRONG_LUN:
2466 case CISS_TMF_OVERLAPPED_TAG:
2467 break;
2468 default:
2469 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
2470 tmf_status);
2471 break;
2472 }
2473 return -tmf_status;
2474}
2475
2476static void complete_scsi_command(struct CommandList *cp)
2477{
2478 struct scsi_cmnd *cmd;
2479 struct ctlr_info *h;
2480 struct ErrorInfo *ei;
2481 struct hpsa_scsi_dev_t *dev;
2482 struct io_accel2_cmd *c2;
2483
2484 u8 sense_key;
2485 u8 asc;
2486 u8 ascq;
2487 unsigned long sense_data_size;
2488
2489 ei = cp->err_info;
2490 cmd = cp->scsi_cmd;
2491 h = cp->h;
2492 dev = cmd->device->hostdata;
2493 c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
2494
2495 scsi_dma_unmap(cmd);
2496 if ((cp->cmd_type == CMD_SCSI) &&
2497 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
2498 hpsa_unmap_sg_chain_block(h, cp);
2499
2500 if ((cp->cmd_type == CMD_IOACCEL2) &&
2501 (c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
2502 hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
2503
2504 cmd->result = (DID_OK << 16);
2505 cmd->result |= (COMMAND_COMPLETE << 8);
2506
2507 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1)
2508 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
2509
2510
2511
2512
2513
2514
2515 if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
2516
2517 cmd->result = DID_NO_CONNECT << 16;
2518 return hpsa_cmd_free_and_done(h, cp, cmd);
2519 }
2520
2521 if ((unlikely(hpsa_is_pending_event(cp)))) {
2522 if (cp->reset_pending)
2523 return hpsa_cmd_resolve_and_free(h, cp);
2524 if (cp->abort_pending)
2525 return hpsa_cmd_abort_and_free(h, cp, cmd);
2526 }
2527
2528 if (cp->cmd_type == CMD_IOACCEL2)
2529 return process_ioaccel2_completion(h, cp, cmd, dev);
2530
2531 scsi_set_resid(cmd, ei->ResidualCnt);
2532 if (ei->CommandStatus == 0)
2533 return hpsa_cmd_free_and_done(h, cp, cmd);
2534
2535
2536
2537
2538 if (cp->cmd_type == CMD_IOACCEL1) {
2539 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
2540 cp->Header.SGList = scsi_sg_count(cmd);
2541 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
2542 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
2543 IOACCEL1_IOFLAGS_CDBLEN_MASK;
2544 cp->Header.tag = c->tag;
2545 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
2546 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
2547
2548
2549
2550
2551
2552 if (is_logical_device(dev)) {
2553 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
2554 dev->offload_enabled = 0;
2555 return hpsa_retry_cmd(h, cp);
2556 }
2557 }
2558
2559
2560 switch (ei->CommandStatus) {
2561
2562 case CMD_TARGET_STATUS:
2563 cmd->result |= ei->ScsiStatus;
2564
2565 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
2566 sense_data_size = SCSI_SENSE_BUFFERSIZE;
2567 else
2568 sense_data_size = sizeof(ei->SenseInfo);
2569 if (ei->SenseLen < sense_data_size)
2570 sense_data_size = ei->SenseLen;
2571 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2572 if (ei->ScsiStatus)
2573 decode_sense_data(ei->SenseInfo, sense_data_size,
2574 &sense_key, &asc, &ascq);
2575 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
2576 if (sense_key == ABORTED_COMMAND) {
2577 cmd->result |= DID_SOFT_ERROR << 16;
2578 break;
2579 }
2580 break;
2581 }
2582
2583
2584
2585 if (ei->ScsiStatus) {
2586 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
2587 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2588 "Returning result: 0x%x\n",
2589 cp, ei->ScsiStatus,
2590 sense_key, asc, ascq,
2591 cmd->result);
2592 } else {
2593 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
2594 "Returning no connection.\n", cp),
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608 cmd->result = DID_NO_CONNECT << 16;
2609 }
2610 break;
2611
2612 case CMD_DATA_UNDERRUN:
2613 break;
2614 case CMD_DATA_OVERRUN:
2615 dev_warn(&h->pdev->dev,
2616 "CDB %16phN data overrun\n", cp->Request.CDB);
2617 break;
2618 case CMD_INVALID: {
2619
2620
2621
2622
2623
2624
2625
2626
2627 cmd->result = DID_NO_CONNECT << 16;
2628 }
2629 break;
2630 case CMD_PROTOCOL_ERR:
2631 cmd->result = DID_ERROR << 16;
2632 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2633 cp->Request.CDB);
2634 break;
2635 case CMD_HARDWARE_ERR:
2636 cmd->result = DID_ERROR << 16;
2637 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2638 cp->Request.CDB);
2639 break;
2640 case CMD_CONNECTION_LOST:
2641 cmd->result = DID_ERROR << 16;
2642 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2643 cp->Request.CDB);
2644 break;
2645 case CMD_ABORTED:
2646
2647 return hpsa_cmd_abort_and_free(h, cp, cmd);
2648 case CMD_ABORT_FAILED:
2649 cmd->result = DID_ERROR << 16;
2650 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2651 cp->Request.CDB);
2652 break;
2653 case CMD_UNSOLICITED_ABORT:
2654 cmd->result = DID_SOFT_ERROR << 16;
2655 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2656 cp->Request.CDB);
2657 break;
2658 case CMD_TIMEOUT:
2659 cmd->result = DID_TIME_OUT << 16;
2660 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2661 cp->Request.CDB);
2662 break;
2663 case CMD_UNABORTABLE:
2664 cmd->result = DID_ERROR << 16;
2665 dev_warn(&h->pdev->dev, "Command unabortable\n");
2666 break;
2667 case CMD_TMF_STATUS:
2668 if (hpsa_evaluate_tmf_status(h, cp))
2669 cmd->result = DID_ERROR << 16;
2670 break;
2671 case CMD_IOACCEL_DISABLED:
2672
2673
2674
2675 cmd->result = DID_SOFT_ERROR << 16;
2676 dev_warn(&h->pdev->dev,
2677 "cp %p had HP SSD Smart Path error\n", cp);
2678 break;
2679 default:
2680 cmd->result = DID_ERROR << 16;
2681 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2682 cp, ei->CommandStatus);
2683 }
2684
2685 return hpsa_cmd_free_and_done(h, cp, cmd);
2686}
2687
2688static void hpsa_pci_unmap(struct pci_dev *pdev,
2689 struct CommandList *c, int sg_used, int data_direction)
2690{
2691 int i;
2692
2693 for (i = 0; i < sg_used; i++)
2694 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
2695 le32_to_cpu(c->SG[i].Len),
2696 data_direction);
2697}
2698
2699static int hpsa_map_one(struct pci_dev *pdev,
2700 struct CommandList *cp,
2701 unsigned char *buf,
2702 size_t buflen,
2703 int data_direction)
2704{
2705 u64 addr64;
2706
2707 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
2708 cp->Header.SGList = 0;
2709 cp->Header.SGTotal = cpu_to_le16(0);
2710 return 0;
2711 }
2712
2713 addr64 = pci_map_single(pdev, buf, buflen, data_direction);
2714 if (dma_mapping_error(&pdev->dev, addr64)) {
2715
2716 cp->Header.SGList = 0;
2717 cp->Header.SGTotal = cpu_to_le16(0);
2718 return -1;
2719 }
2720 cp->SG[0].Addr = cpu_to_le64(addr64);
2721 cp->SG[0].Len = cpu_to_le32(buflen);
2722 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST);
2723 cp->Header.SGList = 1;
2724 cp->Header.SGTotal = cpu_to_le16(1);
2725 return 0;
2726}
2727
2728#define NO_TIMEOUT ((unsigned long) -1)
2729#define DEFAULT_TIMEOUT 30000
2730static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2731 struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
2732{
2733 DECLARE_COMPLETION_ONSTACK(wait);
2734
2735 c->waiting = &wait;
2736 __enqueue_cmd_and_start_io(h, c, reply_queue);
2737 if (timeout_msecs == NO_TIMEOUT) {
2738
2739 wait_for_completion_io(&wait);
2740 return IO_OK;
2741 }
2742 if (!wait_for_completion_io_timeout(&wait,
2743 msecs_to_jiffies(timeout_msecs))) {
2744 dev_warn(&h->pdev->dev, "Command timed out.\n");
2745 return -ETIMEDOUT;
2746 }
2747 return IO_OK;
2748}
2749
2750static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2751 int reply_queue, unsigned long timeout_msecs)
2752{
2753 if (unlikely(lockup_detected(h))) {
2754 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2755 return IO_OK;
2756 }
2757 return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
2758}
2759
2760static u32 lockup_detected(struct ctlr_info *h)
2761{
2762 int cpu;
2763 u32 rc, *lockup_detected;
2764
2765 cpu = get_cpu();
2766 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2767 rc = *lockup_detected;
2768 put_cpu();
2769 return rc;
2770}
2771
2772#define MAX_DRIVER_CMD_RETRIES 25
2773static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2774 struct CommandList *c, int data_direction, unsigned long timeout_msecs)
2775{
2776 int backoff_time = 10, retry_count = 0;
2777 int rc;
2778
2779 do {
2780 memset(c->err_info, 0, sizeof(*c->err_info));
2781 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2782 timeout_msecs);
2783 if (rc)
2784 break;
2785 retry_count++;
2786 if (retry_count > 3) {
2787 msleep(backoff_time);
2788 if (backoff_time < 1000)
2789 backoff_time *= 2;
2790 }
2791 } while ((check_for_unit_attention(h, c) ||
2792 check_for_busy(h, c)) &&
2793 retry_count <= MAX_DRIVER_CMD_RETRIES);
2794 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2795 if (retry_count > MAX_DRIVER_CMD_RETRIES)
2796 rc = -EIO;
2797 return rc;
2798}
2799
2800static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2801 struct CommandList *c)
2802{
2803 const u8 *cdb = c->Request.CDB;
2804 const u8 *lun = c->Header.LUN.LunAddrBytes;
2805
2806 dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
2807 " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2808 txt, lun[0], lun[1], lun[2], lun[3],
2809 lun[4], lun[5], lun[6], lun[7],
2810 cdb[0], cdb[1], cdb[2], cdb[3],
2811 cdb[4], cdb[5], cdb[6], cdb[7],
2812 cdb[8], cdb[9], cdb[10], cdb[11],
2813 cdb[12], cdb[13], cdb[14], cdb[15]);
2814}
2815
2816static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2817 struct CommandList *cp)
2818{
2819 const struct ErrorInfo *ei = cp->err_info;
2820 struct device *d = &cp->h->pdev->dev;
2821 u8 sense_key, asc, ascq;
2822 int sense_len;
2823
2824 switch (ei->CommandStatus) {
2825 case CMD_TARGET_STATUS:
2826 if (ei->SenseLen > sizeof(ei->SenseInfo))
2827 sense_len = sizeof(ei->SenseInfo);
2828 else
2829 sense_len = ei->SenseLen;
2830 decode_sense_data(ei->SenseInfo, sense_len,
2831 &sense_key, &asc, &ascq);
2832 hpsa_print_cmd(h, "SCSI status", cp);
2833 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2834 dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2835 sense_key, asc, ascq);
2836 else
2837 dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
2838 if (ei->ScsiStatus == 0)
2839 dev_warn(d, "SCSI status is abnormally zero. "
2840 "(probably indicates selection timeout "
2841 "reported incorrectly due to a known "
2842 "firmware bug, circa July, 2001.)\n");
2843 break;
2844 case CMD_DATA_UNDERRUN:
2845 break;
2846 case CMD_DATA_OVERRUN:
2847 hpsa_print_cmd(h, "overrun condition", cp);
2848 break;
2849 case CMD_INVALID: {
2850
2851
2852
2853 hpsa_print_cmd(h, "invalid command", cp);
2854 dev_warn(d, "probably means device no longer present\n");
2855 }
2856 break;
2857 case CMD_PROTOCOL_ERR:
2858 hpsa_print_cmd(h, "protocol error", cp);
2859 break;
2860 case CMD_HARDWARE_ERR:
2861 hpsa_print_cmd(h, "hardware error", cp);
2862 break;
2863 case CMD_CONNECTION_LOST:
2864 hpsa_print_cmd(h, "connection lost", cp);
2865 break;
2866 case CMD_ABORTED:
2867 hpsa_print_cmd(h, "aborted", cp);
2868 break;
2869 case CMD_ABORT_FAILED:
2870 hpsa_print_cmd(h, "abort failed", cp);
2871 break;
2872 case CMD_UNSOLICITED_ABORT:
2873 hpsa_print_cmd(h, "unsolicited abort", cp);
2874 break;
2875 case CMD_TIMEOUT:
2876 hpsa_print_cmd(h, "timed out", cp);
2877 break;
2878 case CMD_UNABORTABLE:
2879 hpsa_print_cmd(h, "unabortable", cp);
2880 break;
2881 case CMD_CTLR_LOCKUP:
2882 hpsa_print_cmd(h, "controller lockup detected", cp);
2883 break;
2884 default:
2885 hpsa_print_cmd(h, "unknown status", cp);
2886 dev_warn(d, "Unknown command status %x\n",
2887 ei->CommandStatus);
2888 }
2889}
2890
2891static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
2892 u16 page, unsigned char *buf,
2893 unsigned char bufsize)
2894{
2895 int rc = IO_OK;
2896 struct CommandList *c;
2897 struct ErrorInfo *ei;
2898
2899 c = cmd_alloc(h);
2900
2901 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
2902 page, scsi3addr, TYPE_CMD)) {
2903 rc = -1;
2904 goto out;
2905 }
2906 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2907 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
2908 if (rc)
2909 goto out;
2910 ei = c->err_info;
2911 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2912 hpsa_scsi_interpret_error(h, c);
2913 rc = -1;
2914 }
2915out:
2916 cmd_free(h, c);
2917 return rc;
2918}
2919
2920static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
2921 u8 reset_type, int reply_queue)
2922{
2923 int rc = IO_OK;
2924 struct CommandList *c;
2925 struct ErrorInfo *ei;
2926
2927 c = cmd_alloc(h);
2928
2929
2930
2931 (void) fill_cmd(c, reset_type, h, NULL, 0, 0,
2932 scsi3addr, TYPE_MSG);
2933 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
2934 if (rc) {
2935 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
2936 goto out;
2937 }
2938
2939
2940 ei = c->err_info;
2941 if (ei->CommandStatus != 0) {
2942 hpsa_scsi_interpret_error(h, c);
2943 rc = -1;
2944 }
2945out:
2946 cmd_free(h, c);
2947 return rc;
2948}
2949
2950static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
2951 struct hpsa_scsi_dev_t *dev,
2952 unsigned char *scsi3addr)
2953{
2954 int i;
2955 bool match = false;
2956 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2957 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
2958
2959 if (hpsa_is_cmd_idle(c))
2960 return false;
2961
2962 switch (c->cmd_type) {
2963 case CMD_SCSI:
2964 case CMD_IOCTL_PEND:
2965 match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes,
2966 sizeof(c->Header.LUN.LunAddrBytes));
2967 break;
2968
2969 case CMD_IOACCEL1:
2970 case CMD_IOACCEL2:
2971 if (c->phys_disk == dev) {
2972
2973 match = true;
2974 } else {
2975
2976
2977
2978
2979 for (i = 0; i < dev->nphysical_disks && !match; i++) {
2980
2981
2982
2983
2984 match = dev->phys_disk[i] == c->phys_disk;
2985 }
2986 }
2987 break;
2988
2989 case IOACCEL2_TMF:
2990 for (i = 0; i < dev->nphysical_disks && !match; i++) {
2991 match = dev->phys_disk[i]->ioaccel_handle ==
2992 le32_to_cpu(ac->it_nexus);
2993 }
2994 break;
2995
2996 case 0:
2997 match = false;
2998 break;
2999
3000 default:
3001 dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n",
3002 c->cmd_type);
3003 BUG();
3004 }
3005
3006 return match;
3007}
3008
3009static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
3010 unsigned char *scsi3addr, u8 reset_type, int reply_queue)
3011{
3012 int i;
3013 int rc = 0;
3014
3015
3016 if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) {
3017 dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n");
3018 return -EINTR;
3019 }
3020
3021 BUG_ON(atomic_read(&dev->reset_cmds_out) != 0);
3022
3023 for (i = 0; i < h->nr_cmds; i++) {
3024 struct CommandList *c = h->cmd_pool + i;
3025 int refcount = atomic_inc_return(&c->refcount);
3026
3027 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, scsi3addr)) {
3028 unsigned long flags;
3029
3030
3031
3032
3033
3034
3035
3036 c->reset_pending = dev;
3037 spin_lock_irqsave(&h->lock, flags);
3038 if (!hpsa_is_cmd_idle(c))
3039 atomic_inc(&dev->reset_cmds_out);
3040 else
3041 c->reset_pending = NULL;
3042 spin_unlock_irqrestore(&h->lock, flags);
3043 }
3044
3045 cmd_free(h, c);
3046 }
3047
3048 rc = hpsa_send_reset(h, scsi3addr, reset_type, reply_queue);
3049 if (!rc)
3050 wait_event(h->event_sync_wait_queue,
3051 atomic_read(&dev->reset_cmds_out) == 0 ||
3052 lockup_detected(h));
3053
3054 if (unlikely(lockup_detected(h))) {
3055 dev_warn(&h->pdev->dev,
3056 "Controller lockup detected during reset wait\n");
3057 rc = -ENODEV;
3058 }
3059
3060 if (unlikely(rc))
3061 atomic_set(&dev->reset_cmds_out, 0);
3062
3063 mutex_unlock(&h->reset_mutex);
3064 return rc;
3065}
3066
3067static void hpsa_get_raid_level(struct ctlr_info *h,
3068 unsigned char *scsi3addr, unsigned char *raid_level)
3069{
3070 int rc;
3071 unsigned char *buf;
3072
3073 *raid_level = RAID_UNKNOWN;
3074 buf = kzalloc(64, GFP_KERNEL);
3075 if (!buf)
3076 return;
3077 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
3078 if (rc == 0)
3079 *raid_level = buf[8];
3080 if (*raid_level > RAID_UNKNOWN)
3081 *raid_level = RAID_UNKNOWN;
3082 kfree(buf);
3083 return;
3084}
3085
3086#define HPSA_MAP_DEBUG
3087#ifdef HPSA_MAP_DEBUG
3088static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
3089 struct raid_map_data *map_buff)
3090{
3091 struct raid_map_disk_data *dd = &map_buff->data[0];
3092 int map, row, col;
3093 u16 map_cnt, row_cnt, disks_per_row;
3094
3095 if (rc != 0)
3096 return;
3097
3098
3099 if (h->raid_offload_debug < 2)
3100 return;
3101
3102 dev_info(&h->pdev->dev, "structure_size = %u\n",
3103 le32_to_cpu(map_buff->structure_size));
3104 dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
3105 le32_to_cpu(map_buff->volume_blk_size));
3106 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
3107 le64_to_cpu(map_buff->volume_blk_cnt));
3108 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
3109 map_buff->phys_blk_shift);
3110 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
3111 map_buff->parity_rotation_shift);
3112 dev_info(&h->pdev->dev, "strip_size = %u\n",
3113 le16_to_cpu(map_buff->strip_size));
3114 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
3115 le64_to_cpu(map_buff->disk_starting_blk));
3116 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
3117 le64_to_cpu(map_buff->disk_blk_cnt));
3118 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
3119 le16_to_cpu(map_buff->data_disks_per_row));
3120 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
3121 le16_to_cpu(map_buff->metadata_disks_per_row));
3122 dev_info(&h->pdev->dev, "row_cnt = %u\n",
3123 le16_to_cpu(map_buff->row_cnt));
3124 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
3125 le16_to_cpu(map_buff->layout_map_count));
3126 dev_info(&h->pdev->dev, "flags = 0x%x\n",
3127 le16_to_cpu(map_buff->flags));
3128 dev_info(&h->pdev->dev, "encrypytion = %s\n",
3129 le16_to_cpu(map_buff->flags) &
3130 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF");
3131 dev_info(&h->pdev->dev, "dekindex = %u\n",
3132 le16_to_cpu(map_buff->dekindex));
3133 map_cnt = le16_to_cpu(map_buff->layout_map_count);
3134 for (map = 0; map < map_cnt; map++) {
3135 dev_info(&h->pdev->dev, "Map%u:\n", map);
3136 row_cnt = le16_to_cpu(map_buff->row_cnt);
3137 for (row = 0; row < row_cnt; row++) {
3138 dev_info(&h->pdev->dev, " Row%u:\n", row);
3139 disks_per_row =
3140 le16_to_cpu(map_buff->data_disks_per_row);
3141 for (col = 0; col < disks_per_row; col++, dd++)
3142 dev_info(&h->pdev->dev,
3143 " D%02u: h=0x%04x xor=%u,%u\n",
3144 col, dd->ioaccel_handle,
3145 dd->xor_mult[0], dd->xor_mult[1]);
3146 disks_per_row =
3147 le16_to_cpu(map_buff->metadata_disks_per_row);
3148 for (col = 0; col < disks_per_row; col++, dd++)
3149 dev_info(&h->pdev->dev,
3150 " M%02u: h=0x%04x xor=%u,%u\n",
3151 col, dd->ioaccel_handle,
3152 dd->xor_mult[0], dd->xor_mult[1]);
3153 }
3154 }
3155}
3156#else
3157static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
3158 __attribute__((unused)) int rc,
3159 __attribute__((unused)) struct raid_map_data *map_buff)
3160{
3161}
3162#endif
3163
3164static int hpsa_get_raid_map(struct ctlr_info *h,
3165 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3166{
3167 int rc = 0;
3168 struct CommandList *c;
3169 struct ErrorInfo *ei;
3170
3171 c = cmd_alloc(h);
3172
3173 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
3174 sizeof(this_device->raid_map), 0,
3175 scsi3addr, TYPE_CMD)) {
3176 dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
3177 cmd_free(h, c);
3178 return -1;
3179 }
3180 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3181 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
3182 if (rc)
3183 goto out;
3184 ei = c->err_info;
3185 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3186 hpsa_scsi_interpret_error(h, c);
3187 rc = -1;
3188 goto out;
3189 }
3190 cmd_free(h, c);
3191
3192
3193 if (le32_to_cpu(this_device->raid_map.structure_size) >
3194 sizeof(this_device->raid_map)) {
3195 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
3196 rc = -1;
3197 }
3198 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
3199 return rc;
3200out:
3201 cmd_free(h, c);
3202 return rc;
3203}
3204
3205static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h,
3206 unsigned char scsi3addr[], u16 bmic_device_index,
3207 struct bmic_sense_subsystem_info *buf, size_t bufsize)
3208{
3209 int rc = IO_OK;
3210 struct CommandList *c;
3211 struct ErrorInfo *ei;
3212
3213 c = cmd_alloc(h);
3214
3215 rc = fill_cmd(c, BMIC_SENSE_SUBSYSTEM_INFORMATION, h, buf, bufsize,
3216 0, RAID_CTLR_LUNID, TYPE_CMD);
3217 if (rc)
3218 goto out;
3219
3220 c->Request.CDB[2] = bmic_device_index & 0xff;
3221 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3222
3223 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3224 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
3225 if (rc)
3226 goto out;
3227 ei = c->err_info;
3228 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3229 hpsa_scsi_interpret_error(h, c);
3230 rc = -1;
3231 }
3232out:
3233 cmd_free(h, c);
3234 return rc;
3235}
3236
3237static int hpsa_bmic_id_controller(struct ctlr_info *h,
3238 struct bmic_identify_controller *buf, size_t bufsize)
3239{
3240 int rc = IO_OK;
3241 struct CommandList *c;
3242 struct ErrorInfo *ei;
3243
3244 c = cmd_alloc(h);
3245
3246 rc = fill_cmd(c, BMIC_IDENTIFY_CONTROLLER, h, buf, bufsize,
3247 0, RAID_CTLR_LUNID, TYPE_CMD);
3248 if (rc)
3249 goto out;
3250
3251 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3252 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
3253 if (rc)
3254 goto out;
3255 ei = c->err_info;
3256 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3257 hpsa_scsi_interpret_error(h, c);
3258 rc = -1;
3259 }
3260out:
3261 cmd_free(h, c);
3262 return rc;
3263}
3264
3265static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
3266 unsigned char scsi3addr[], u16 bmic_device_index,
3267 struct bmic_identify_physical_device *buf, size_t bufsize)
3268{
3269 int rc = IO_OK;
3270 struct CommandList *c;
3271 struct ErrorInfo *ei;
3272
3273 c = cmd_alloc(h);
3274 rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
3275 0, RAID_CTLR_LUNID, TYPE_CMD);
3276 if (rc)
3277 goto out;
3278
3279 c->Request.CDB[2] = bmic_device_index & 0xff;
3280 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3281
3282 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
3283 DEFAULT_TIMEOUT);
3284 ei = c->err_info;
3285 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3286 hpsa_scsi_interpret_error(h, c);
3287 rc = -1;
3288 }
3289out:
3290 cmd_free(h, c);
3291
3292 return rc;
3293}
3294
3295
3296
3297
3298
3299
3300
3301static void hpsa_get_enclosure_info(struct ctlr_info *h,
3302 unsigned char *scsi3addr,
3303 struct ReportExtendedLUNdata *rlep, int rle_index,
3304 struct hpsa_scsi_dev_t *encl_dev)
3305{
3306 int rc = -1;
3307 struct CommandList *c = NULL;
3308 struct ErrorInfo *ei = NULL;
3309 struct bmic_sense_storage_box_params *bssbp = NULL;
3310 struct bmic_identify_physical_device *id_phys = NULL;
3311 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
3312 u16 bmic_device_index = 0;
3313
3314 bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
3315
3316 if (bmic_device_index == 0xFF00 || MASKED_DEVICE(&rle->lunid[0])) {
3317 rc = IO_OK;
3318 goto out;
3319 }
3320
3321 bssbp = kzalloc(sizeof(*bssbp), GFP_KERNEL);
3322 if (!bssbp)
3323 goto out;
3324
3325 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
3326 if (!id_phys)
3327 goto out;
3328
3329 rc = hpsa_bmic_id_physical_device(h, scsi3addr, bmic_device_index,
3330 id_phys, sizeof(*id_phys));
3331 if (rc) {
3332 dev_warn(&h->pdev->dev, "%s: id_phys failed %d bdi[0x%x]\n",
3333 __func__, encl_dev->external, bmic_device_index);
3334 goto out;
3335 }
3336
3337 c = cmd_alloc(h);
3338
3339 rc = fill_cmd(c, BMIC_SENSE_STORAGE_BOX_PARAMS, h, bssbp,
3340 sizeof(*bssbp), 0, RAID_CTLR_LUNID, TYPE_CMD);
3341
3342 if (rc)
3343 goto out;
3344
3345 if (id_phys->phys_connector[1] == 'E')
3346 c->Request.CDB[5] = id_phys->box_index;
3347 else
3348 c->Request.CDB[5] = 0;
3349
3350 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
3351 DEFAULT_TIMEOUT);
3352 if (rc)
3353 goto out;
3354
3355 ei = c->err_info;
3356 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3357 rc = -1;
3358 goto out;
3359 }
3360
3361 encl_dev->box[id_phys->active_path_number] = bssbp->phys_box_on_port;
3362 memcpy(&encl_dev->phys_connector[id_phys->active_path_number],
3363 bssbp->phys_connector, sizeof(bssbp->phys_connector));
3364
3365 rc = IO_OK;
3366out:
3367 kfree(bssbp);
3368 kfree(id_phys);
3369
3370 if (c)
3371 cmd_free(h, c);
3372
3373 if (rc != IO_OK)
3374 hpsa_show_dev_msg(KERN_INFO, h, encl_dev,
3375 "Error, could not get enclosure information\n");
3376}
3377
3378static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h,
3379 unsigned char *scsi3addr)
3380{
3381 struct ReportExtendedLUNdata *physdev;
3382 u32 nphysicals;
3383 u64 sa = 0;
3384 int i;
3385
3386 physdev = kzalloc(sizeof(*physdev), GFP_KERNEL);
3387 if (!physdev)
3388 return 0;
3389
3390 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
3391 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3392 kfree(physdev);
3393 return 0;
3394 }
3395 nphysicals = get_unaligned_be32(physdev->LUNListLength) / 24;
3396
3397 for (i = 0; i < nphysicals; i++)
3398 if (!memcmp(&physdev->LUN[i].lunid[0], scsi3addr, 8)) {
3399 sa = get_unaligned_be64(&physdev->LUN[i].wwid[0]);
3400 break;
3401 }
3402
3403 kfree(physdev);
3404
3405 return sa;
3406}
3407
3408static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
3409 struct hpsa_scsi_dev_t *dev)
3410{
3411 int rc;
3412 u64 sa = 0;
3413
3414 if (is_hba_lunid(scsi3addr)) {
3415 struct bmic_sense_subsystem_info *ssi;
3416
3417 ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
3418 if (ssi == NULL) {
3419 dev_warn(&h->pdev->dev,
3420 "%s: out of memory\n", __func__);
3421 return;
3422 }
3423
3424 rc = hpsa_bmic_sense_subsystem_information(h,
3425 scsi3addr, 0, ssi, sizeof(*ssi));
3426 if (rc == 0) {
3427 sa = get_unaligned_be64(ssi->primary_world_wide_id);
3428 h->sas_address = sa;
3429 }
3430
3431 kfree(ssi);
3432 } else
3433 sa = hpsa_get_sas_address_from_report_physical(h, scsi3addr);
3434
3435 dev->sas_address = sa;
3436}
3437
3438
3439static int hpsa_vpd_page_supported(struct ctlr_info *h,
3440 unsigned char scsi3addr[], u8 page)
3441{
3442 int rc;
3443 int i;
3444 int pages;
3445 unsigned char *buf, bufsize;
3446
3447 buf = kzalloc(256, GFP_KERNEL);
3448 if (!buf)
3449 return 0;
3450
3451
3452 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3453 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3454 buf, HPSA_VPD_HEADER_SZ);
3455 if (rc != 0)
3456 goto exit_unsupported;
3457 pages = buf[3];
3458 if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
3459 bufsize = pages + HPSA_VPD_HEADER_SZ;
3460 else
3461 bufsize = 255;
3462
3463
3464 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3465 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3466 buf, bufsize);
3467 if (rc != 0)
3468 goto exit_unsupported;
3469
3470 pages = buf[3];
3471 for (i = 1; i <= pages; i++)
3472 if (buf[3 + i] == page)
3473 goto exit_supported;
3474exit_unsupported:
3475 kfree(buf);
3476 return 0;
3477exit_supported:
3478 kfree(buf);
3479 return 1;
3480}
3481
3482static void hpsa_get_ioaccel_status(struct ctlr_info *h,
3483 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3484{
3485 int rc;
3486 unsigned char *buf;
3487 u8 ioaccel_status;
3488
3489 this_device->offload_config = 0;
3490 this_device->offload_enabled = 0;
3491 this_device->offload_to_be_enabled = 0;
3492
3493 buf = kzalloc(64, GFP_KERNEL);
3494 if (!buf)
3495 return;
3496 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
3497 goto out;
3498 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3499 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
3500 if (rc != 0)
3501 goto out;
3502
3503#define IOACCEL_STATUS_BYTE 4
3504#define OFFLOAD_CONFIGURED_BIT 0x01
3505#define OFFLOAD_ENABLED_BIT 0x02
3506 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
3507 this_device->offload_config =
3508 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
3509 if (this_device->offload_config) {
3510 this_device->offload_enabled =
3511 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
3512 if (hpsa_get_raid_map(h, scsi3addr, this_device))
3513 this_device->offload_enabled = 0;
3514 }
3515 this_device->offload_to_be_enabled = this_device->offload_enabled;
3516out:
3517 kfree(buf);
3518 return;
3519}
3520
3521
3522static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
3523 unsigned char *device_id, int index, int buflen)
3524{
3525 int rc;
3526 unsigned char *buf;
3527
3528 if (buflen > 16)
3529 buflen = 16;
3530 buf = kzalloc(64, GFP_KERNEL);
3531 if (!buf)
3532 return -ENOMEM;
3533 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
3534 if (rc == 0)
3535 memcpy(device_id, &buf[index], buflen);
3536
3537 kfree(buf);
3538
3539 return rc != 0;
3540}
3541
3542static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
3543 void *buf, int bufsize,
3544 int extended_response)
3545{
3546 int rc = IO_OK;
3547 struct CommandList *c;
3548 unsigned char scsi3addr[8];
3549 struct ErrorInfo *ei;
3550
3551 c = cmd_alloc(h);
3552
3553
3554 memset(scsi3addr, 0, sizeof(scsi3addr));
3555 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
3556 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
3557 rc = -1;
3558 goto out;
3559 }
3560 if (extended_response)
3561 c->Request.CDB[1] = extended_response;
3562 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3563 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
3564 if (rc)
3565 goto out;
3566 ei = c->err_info;
3567 if (ei->CommandStatus != 0 &&
3568 ei->CommandStatus != CMD_DATA_UNDERRUN) {
3569 hpsa_scsi_interpret_error(h, c);
3570 rc = -1;
3571 } else {
3572 struct ReportLUNdata *rld = buf;
3573
3574 if (rld->extended_response_flag != extended_response) {
3575 dev_err(&h->pdev->dev,
3576 "report luns requested format %u, got %u\n",
3577 extended_response,
3578 rld->extended_response_flag);
3579 rc = -1;
3580 }
3581 }
3582out:
3583 cmd_free(h, c);
3584 return rc;
3585}
3586
3587static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
3588 struct ReportExtendedLUNdata *buf, int bufsize)
3589{
3590 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
3591 HPSA_REPORT_PHYS_EXTENDED);
3592}
3593
3594static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
3595 struct ReportLUNdata *buf, int bufsize)
3596{
3597 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
3598}
3599
3600static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
3601 int bus, int target, int lun)
3602{
3603 device->bus = bus;
3604 device->target = target;
3605 device->lun = lun;
3606}
3607
3608
3609static int hpsa_get_volume_status(struct ctlr_info *h,
3610 unsigned char scsi3addr[])
3611{
3612 int rc;
3613 int status;
3614 int size;
3615 unsigned char *buf;
3616
3617 buf = kzalloc(64, GFP_KERNEL);
3618 if (!buf)
3619 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3620
3621
3622 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
3623 goto exit_failed;
3624
3625
3626 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3627 buf, HPSA_VPD_HEADER_SZ);
3628 if (rc != 0)
3629 goto exit_failed;
3630 size = buf[3];
3631
3632
3633 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3634 buf, size + HPSA_VPD_HEADER_SZ);
3635 if (rc != 0)
3636 goto exit_failed;
3637 status = buf[4];
3638
3639 kfree(buf);
3640 return status;
3641exit_failed:
3642 kfree(buf);
3643 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3644}
3645
3646
3647
3648
3649
3650
3651
3652
3653static int hpsa_volume_offline(struct ctlr_info *h,
3654 unsigned char scsi3addr[])
3655{
3656 struct CommandList *c;
3657 unsigned char *sense;
3658 u8 sense_key, asc, ascq;
3659 int sense_len;
3660 int rc, ldstat = 0;
3661 u16 cmd_status;
3662 u8 scsi_status;
3663#define ASC_LUN_NOT_READY 0x04
3664#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
3665#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
3666
3667 c = cmd_alloc(h);
3668
3669 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
3670 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
3671 DEFAULT_TIMEOUT);
3672 if (rc) {
3673 cmd_free(h, c);
3674 return 0;
3675 }
3676 sense = c->err_info->SenseInfo;
3677 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
3678 sense_len = sizeof(c->err_info->SenseInfo);
3679 else
3680 sense_len = c->err_info->SenseLen;
3681 decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
3682 cmd_status = c->err_info->CommandStatus;
3683 scsi_status = c->err_info->ScsiStatus;
3684 cmd_free(h, c);
3685
3686 if (cmd_status != CMD_TARGET_STATUS ||
3687 scsi_status != SAM_STAT_CHECK_CONDITION ||
3688 sense_key != NOT_READY ||
3689 asc != ASC_LUN_NOT_READY) {
3690 return 0;
3691 }
3692
3693
3694 ldstat = hpsa_get_volume_status(h, scsi3addr);
3695
3696
3697 switch (ldstat) {
3698 case HPSA_LV_UNDERGOING_ERASE:
3699 case HPSA_LV_NOT_AVAILABLE:
3700 case HPSA_LV_UNDERGOING_RPI:
3701 case HPSA_LV_PENDING_RPI:
3702 case HPSA_LV_ENCRYPTED_NO_KEY:
3703 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
3704 case HPSA_LV_UNDERGOING_ENCRYPTION:
3705 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
3706 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
3707 return ldstat;
3708 case HPSA_VPD_LV_STATUS_UNSUPPORTED:
3709
3710
3711
3712 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
3713 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
3714 return ldstat;
3715 break;
3716 default:
3717 break;
3718 }
3719 return 0;
3720}
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730static int hpsa_device_supports_aborts(struct ctlr_info *h,
3731 unsigned char *scsi3addr)
3732{
3733 struct CommandList *c;
3734 struct ErrorInfo *ei;
3735 int rc = 0;
3736
3737 u64 tag = (u64) -1;
3738
3739
3740 if (!is_logical_dev_addr_mode(scsi3addr))
3741 return 1;
3742
3743 c = cmd_alloc(h);
3744
3745 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG);
3746 (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
3747 DEFAULT_TIMEOUT);
3748
3749 ei = c->err_info;
3750 switch (ei->CommandStatus) {
3751 case CMD_INVALID:
3752 rc = 0;
3753 break;
3754 case CMD_UNABORTABLE:
3755 case CMD_ABORT_FAILED:
3756 rc = 1;
3757 break;
3758 case CMD_TMF_STATUS:
3759 rc = hpsa_evaluate_tmf_status(h, c);
3760 break;
3761 default:
3762 rc = 0;
3763 break;
3764 }
3765 cmd_free(h, c);
3766 return rc;
3767}
3768
3769static int hpsa_update_device_info(struct ctlr_info *h,
3770 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
3771 unsigned char *is_OBDR_device)
3772{
3773
3774#define OBDR_SIG_OFFSET 43
3775#define OBDR_TAPE_SIG "$DR-10"
3776#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
3777#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
3778
3779 unsigned char *inq_buff;
3780 unsigned char *obdr_sig;
3781 int rc = 0;
3782
3783 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
3784 if (!inq_buff) {
3785 rc = -ENOMEM;
3786 goto bail_out;
3787 }
3788
3789
3790 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
3791 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
3792
3793 dev_err(&h->pdev->dev,
3794 "hpsa_update_device_info: inquiry failed\n");
3795 rc = -EIO;
3796 goto bail_out;
3797 }
3798
3799 scsi_sanitize_inquiry_string(&inq_buff[8], 8);
3800 scsi_sanitize_inquiry_string(&inq_buff[16], 16);
3801
3802 this_device->devtype = (inq_buff[0] & 0x1f);
3803 memcpy(this_device->scsi3addr, scsi3addr, 8);
3804 memcpy(this_device->vendor, &inq_buff[8],
3805 sizeof(this_device->vendor));
3806 memcpy(this_device->model, &inq_buff[16],
3807 sizeof(this_device->model));
3808 memset(this_device->device_id, 0,
3809 sizeof(this_device->device_id));
3810 hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
3811 sizeof(this_device->device_id));
3812
3813 if ((this_device->devtype == TYPE_DISK ||
3814 this_device->devtype == TYPE_ZBC) &&
3815 is_logical_dev_addr_mode(scsi3addr)) {
3816 int volume_offline;
3817
3818 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
3819 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
3820 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
3821 volume_offline = hpsa_volume_offline(h, scsi3addr);
3822 if (volume_offline < 0 || volume_offline > 0xff)
3823 volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
3824 this_device->volume_offline = volume_offline & 0xff;
3825 } else {
3826 this_device->raid_level = RAID_UNKNOWN;
3827 this_device->offload_config = 0;
3828 this_device->offload_enabled = 0;
3829 this_device->offload_to_be_enabled = 0;
3830 this_device->hba_ioaccel_enabled = 0;
3831 this_device->volume_offline = 0;
3832 this_device->queue_depth = h->nr_cmds;
3833 }
3834
3835 if (is_OBDR_device) {
3836
3837
3838
3839 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
3840 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
3841 strncmp(obdr_sig, OBDR_TAPE_SIG,
3842 OBDR_SIG_LEN) == 0);
3843 }
3844 kfree(inq_buff);
3845 return 0;
3846
3847bail_out:
3848 kfree(inq_buff);
3849 return rc;
3850}
3851
3852static void hpsa_update_device_supports_aborts(struct ctlr_info *h,
3853 struct hpsa_scsi_dev_t *dev, u8 *scsi3addr)
3854{
3855 unsigned long flags;
3856 int rc, entry;
3857
3858
3859
3860
3861
3862 spin_lock_irqsave(&h->devlock, flags);
3863 rc = hpsa_scsi_find_entry(dev, h->dev, h->ndevices, &entry);
3864 if ((rc == DEVICE_SAME || rc == DEVICE_UPDATED) &&
3865 entry >= 0 && entry < h->ndevices) {
3866 dev->supports_aborts = h->dev[entry]->supports_aborts;
3867 spin_unlock_irqrestore(&h->devlock, flags);
3868 } else {
3869 spin_unlock_irqrestore(&h->devlock, flags);
3870 dev->supports_aborts =
3871 hpsa_device_supports_aborts(h, scsi3addr);
3872 if (dev->supports_aborts < 0)
3873 dev->supports_aborts = 0;
3874 }
3875}
3876
3877
3878
3879
3880
3881
3882
3883static void figure_bus_target_lun(struct ctlr_info *h,
3884 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
3885{
3886 u32 lunid = get_unaligned_le32(lunaddrbytes);
3887
3888 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
3889
3890 if (is_hba_lunid(lunaddrbytes))
3891 hpsa_set_bus_target_lun(device,
3892 HPSA_HBA_BUS, 0, lunid & 0x3fff);
3893 else
3894
3895 hpsa_set_bus_target_lun(device,
3896 HPSA_PHYSICAL_DEVICE_BUS, -1, -1);
3897 return;
3898 }
3899
3900 if (device->external) {
3901 hpsa_set_bus_target_lun(device,
3902 HPSA_EXTERNAL_RAID_VOLUME_BUS, (lunid >> 16) & 0x3fff,
3903 lunid & 0x00ff);
3904 return;
3905 }
3906 hpsa_set_bus_target_lun(device, HPSA_RAID_VOLUME_BUS,
3907 0, lunid & 0x3fff);
3908}
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
3920 struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
3921{
3922 struct io_accel2_cmd *c2 =
3923 &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
3924 unsigned long flags;
3925 int i;
3926
3927 spin_lock_irqsave(&h->devlock, flags);
3928 for (i = 0; i < h->ndevices; i++)
3929 if (h->dev[i]->ioaccel_handle == le32_to_cpu(c2->scsi_nexus)) {
3930 memcpy(scsi3addr, h->dev[i]->scsi3addr,
3931 sizeof(h->dev[i]->scsi3addr));
3932 spin_unlock_irqrestore(&h->devlock, flags);
3933 return 1;
3934 }
3935 spin_unlock_irqrestore(&h->devlock, flags);
3936 return 0;
3937}
3938
3939static int figure_external_status(struct ctlr_info *h, int raid_ctlr_position,
3940 int i, int nphysicals, int nlocal_logicals)
3941{
3942
3943
3944
3945 int logicals_start = nphysicals + (raid_ctlr_position == 0);
3946
3947 if (i == raid_ctlr_position)
3948 return 0;
3949
3950 if (i < logicals_start)
3951 return 0;
3952
3953
3954 if ((i - nphysicals - (raid_ctlr_position == 0)) < nlocal_logicals)
3955 return 0;
3956
3957 return 1;
3958}
3959
3960
3961
3962
3963
3964
3965
3966static int hpsa_gather_lun_info(struct ctlr_info *h,
3967 struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
3968 struct ReportLUNdata *logdev, u32 *nlogicals)
3969{
3970 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
3971 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3972 return -1;
3973 }
3974 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
3975 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
3976 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
3977 HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
3978 *nphysicals = HPSA_MAX_PHYS_LUN;
3979 }
3980 if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
3981 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
3982 return -1;
3983 }
3984 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
3985
3986 if (*nlogicals > HPSA_MAX_LUN) {
3987 dev_warn(&h->pdev->dev,
3988 "maximum logical LUNs (%d) exceeded. "
3989 "%d LUNs ignored.\n", HPSA_MAX_LUN,
3990 *nlogicals - HPSA_MAX_LUN);
3991 *nlogicals = HPSA_MAX_LUN;
3992 }
3993 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
3994 dev_warn(&h->pdev->dev,
3995 "maximum logical + physical LUNs (%d) exceeded. "
3996 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
3997 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
3998 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
3999 }
4000 return 0;
4001}
4002
4003static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
4004 int i, int nphysicals, int nlogicals,
4005 struct ReportExtendedLUNdata *physdev_list,
4006 struct ReportLUNdata *logdev_list)
4007{
4008
4009
4010
4011
4012
4013 int logicals_start = nphysicals + (raid_ctlr_position == 0);
4014 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
4015
4016 if (i == raid_ctlr_position)
4017 return RAID_CTLR_LUNID;
4018
4019 if (i < logicals_start)
4020 return &physdev_list->LUN[i -
4021 (raid_ctlr_position == 0)].lunid[0];
4022
4023 if (i < last_device)
4024 return &logdev_list->LUN[i - nphysicals -
4025 (raid_ctlr_position == 0)][0];
4026 BUG();
4027 return NULL;
4028}
4029
4030
4031static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
4032 struct hpsa_scsi_dev_t *dev,
4033 struct ReportExtendedLUNdata *rlep, int rle_index,
4034 struct bmic_identify_physical_device *id_phys)
4035{
4036 int rc;
4037 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
4038
4039 dev->ioaccel_handle = rle->ioaccel_handle;
4040 if ((rle->device_flags & 0x08) && dev->ioaccel_handle)
4041 dev->hba_ioaccel_enabled = 1;
4042 memset(id_phys, 0, sizeof(*id_phys));
4043 rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0],
4044 GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]), id_phys,
4045 sizeof(*id_phys));
4046 if (!rc)
4047
4048#define DRIVE_CMDS_RESERVED_FOR_FW 2
4049#define DRIVE_QUEUE_DEPTH 7
4050 dev->queue_depth =
4051 le16_to_cpu(id_phys->current_queue_depth_limit) -
4052 DRIVE_CMDS_RESERVED_FOR_FW;
4053 else
4054 dev->queue_depth = DRIVE_QUEUE_DEPTH;
4055}
4056
4057static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device,
4058 struct ReportExtendedLUNdata *rlep, int rle_index,
4059 struct bmic_identify_physical_device *id_phys)
4060{
4061 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
4062
4063 if ((rle->device_flags & 0x08) && this_device->ioaccel_handle)
4064 this_device->hba_ioaccel_enabled = 1;
4065
4066 memcpy(&this_device->active_path_index,
4067 &id_phys->active_path_number,
4068 sizeof(this_device->active_path_index));
4069 memcpy(&this_device->path_map,
4070 &id_phys->redundant_path_present_map,
4071 sizeof(this_device->path_map));
4072 memcpy(&this_device->box,
4073 &id_phys->alternate_paths_phys_box_on_port,
4074 sizeof(this_device->box));
4075 memcpy(&this_device->phys_connector,
4076 &id_phys->alternate_paths_phys_connector,
4077 sizeof(this_device->phys_connector));
4078 memcpy(&this_device->bay,
4079 &id_phys->phys_bay_in_box,
4080 sizeof(this_device->bay));
4081}
4082
4083
4084static int hpsa_set_local_logical_count(struct ctlr_info *h,
4085 struct bmic_identify_controller *id_ctlr,
4086 u32 *nlocals)
4087{
4088 int rc;
4089
4090 if (!id_ctlr) {
4091 dev_warn(&h->pdev->dev, "%s: id_ctlr buffer is NULL.\n",
4092 __func__);
4093 return -ENOMEM;
4094 }
4095 memset(id_ctlr, 0, sizeof(*id_ctlr));
4096 rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr));
4097 if (!rc)
4098 if (id_ctlr->configured_logical_drive_count < 256)
4099 *nlocals = id_ctlr->configured_logical_drive_count;
4100 else
4101 *nlocals = le16_to_cpu(
4102 id_ctlr->extended_logical_unit_count);
4103 else
4104 *nlocals = -1;
4105 return rc;
4106}
4107
4108
4109static void hpsa_update_scsi_devices(struct ctlr_info *h)
4110{
4111
4112
4113
4114
4115
4116
4117
4118
4119
4120
4121 struct ReportExtendedLUNdata *physdev_list = NULL;
4122 struct ReportLUNdata *logdev_list = NULL;
4123 struct bmic_identify_physical_device *id_phys = NULL;
4124 struct bmic_identify_controller *id_ctlr = NULL;
4125 u32 nphysicals = 0;
4126 u32 nlogicals = 0;
4127 u32 nlocal_logicals = 0;
4128 u32 ndev_allocated = 0;
4129 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
4130 int ncurrent = 0;
4131 int i, n_ext_target_devs, ndevs_to_allocate;
4132 int raid_ctlr_position;
4133 bool physical_device;
4134 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
4135
4136 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
4137 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
4138 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
4139 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
4140 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
4141 id_ctlr = kzalloc(sizeof(*id_ctlr), GFP_KERNEL);
4142
4143 if (!currentsd || !physdev_list || !logdev_list ||
4144 !tmpdevice || !id_phys || !id_ctlr) {
4145 dev_err(&h->pdev->dev, "out of memory\n");
4146 goto out;
4147 }
4148 memset(lunzerobits, 0, sizeof(lunzerobits));
4149
4150 h->drv_req_rescan = 0;
4151
4152 if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
4153 logdev_list, &nlogicals)) {
4154 h->drv_req_rescan = 1;
4155 goto out;
4156 }
4157
4158
4159 if (hpsa_set_local_logical_count(h, id_ctlr, &nlocal_logicals)) {
4160 dev_warn(&h->pdev->dev,
4161 "%s: Can't determine number of local logical devices.\n",
4162 __func__);
4163 }
4164
4165
4166
4167
4168
4169 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
4170
4171
4172 for (i = 0; i < ndevs_to_allocate; i++) {
4173 if (i >= HPSA_MAX_DEVICES) {
4174 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
4175 " %d devices ignored.\n", HPSA_MAX_DEVICES,
4176 ndevs_to_allocate - HPSA_MAX_DEVICES);
4177 break;
4178 }
4179
4180 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
4181 if (!currentsd[i]) {
4182 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
4183 __FILE__, __LINE__);
4184 h->drv_req_rescan = 1;
4185 goto out;
4186 }
4187 ndev_allocated++;
4188 }
4189
4190 if (is_scsi_rev_5(h))
4191 raid_ctlr_position = 0;
4192 else
4193 raid_ctlr_position = nphysicals + nlogicals;
4194
4195
4196 n_ext_target_devs = 0;
4197 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
4198 u8 *lunaddrbytes, is_OBDR = 0;
4199 int rc = 0;
4200 int phys_dev_index = i - (raid_ctlr_position == 0);
4201
4202 physical_device = i < nphysicals + (raid_ctlr_position == 0);
4203
4204
4205 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
4206 i, nphysicals, nlogicals, physdev_list, logdev_list);
4207
4208
4209 if (MASKED_DEVICE(lunaddrbytes) && physical_device &&
4210 (physdev_list->LUN[phys_dev_index].device_type != 0x06) &&
4211 (physdev_list->LUN[phys_dev_index].device_flags & 0x01))
4212 continue;
4213
4214
4215 rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
4216 &is_OBDR);
4217 if (rc == -ENOMEM) {
4218 dev_warn(&h->pdev->dev,
4219 "Out of memory, rescan deferred.\n");
4220 h->drv_req_rescan = 1;
4221 goto out;
4222 }
4223 if (rc) {
4224 dev_warn(&h->pdev->dev,
4225 "Inquiry failed, skipping device.\n");
4226 continue;
4227 }
4228
4229
4230 tmpdevice->external =
4231 figure_external_status(h, raid_ctlr_position, i,
4232 nphysicals, nlocal_logicals);
4233
4234 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
4235 hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes);
4236 this_device = currentsd[ncurrent];
4237
4238
4239
4240
4241 if (!h->discovery_polling) {
4242 if (tmpdevice->external) {
4243 h->discovery_polling = 1;
4244 dev_info(&h->pdev->dev,
4245 "External target, activate discovery polling.\n");
4246 }
4247 }
4248
4249
4250 *this_device = *tmpdevice;
4251 this_device->physical_device = physical_device;
4252
4253
4254
4255
4256
4257 if (MASKED_DEVICE(lunaddrbytes) && this_device->physical_device)
4258 this_device->expose_device = 0;
4259 else
4260 this_device->expose_device = 1;
4261
4262
4263
4264
4265
4266 if (this_device->physical_device && this_device->expose_device)
4267 hpsa_get_sas_address(h, lunaddrbytes, this_device);
4268
4269 switch (this_device->devtype) {
4270 case TYPE_ROM:
4271
4272
4273
4274
4275
4276
4277
4278 if (is_OBDR)
4279 ncurrent++;
4280 break;
4281 case TYPE_DISK:
4282 case TYPE_ZBC:
4283 if (this_device->physical_device) {
4284
4285
4286 this_device->offload_enabled = 0;
4287 hpsa_get_ioaccel_drive_info(h, this_device,
4288 physdev_list, phys_dev_index, id_phys);
4289 hpsa_get_path_info(this_device,
4290 physdev_list, phys_dev_index, id_phys);
4291 }
4292 ncurrent++;
4293 break;
4294 case TYPE_TAPE:
4295 case TYPE_MEDIUM_CHANGER:
4296 ncurrent++;
4297 break;
4298 case TYPE_ENCLOSURE:
4299 if (!this_device->external)
4300 hpsa_get_enclosure_info(h, lunaddrbytes,
4301 physdev_list, phys_dev_index,
4302 this_device);
4303 ncurrent++;
4304 break;
4305 case TYPE_RAID:
4306
4307
4308
4309
4310
4311 if (!is_hba_lunid(lunaddrbytes))
4312 break;
4313 ncurrent++;
4314 break;
4315 default:
4316 break;
4317 }
4318 if (ncurrent >= HPSA_MAX_DEVICES)
4319 break;
4320 }
4321
4322 if (h->sas_host == NULL) {
4323 int rc = 0;
4324
4325 rc = hpsa_add_sas_host(h);
4326 if (rc) {
4327 dev_warn(&h->pdev->dev,
4328 "Could not add sas host %d\n", rc);
4329 goto out;
4330 }
4331 }
4332
4333 adjust_hpsa_scsi_table(h, currentsd, ncurrent);
4334out:
4335 kfree(tmpdevice);
4336 for (i = 0; i < ndev_allocated; i++)
4337 kfree(currentsd[i]);
4338 kfree(currentsd);
4339 kfree(physdev_list);
4340 kfree(logdev_list);
4341 kfree(id_ctlr);
4342 kfree(id_phys);
4343}
4344
4345static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
4346 struct scatterlist *sg)
4347{
4348 u64 addr64 = (u64) sg_dma_address(sg);
4349 unsigned int len = sg_dma_len(sg);
4350
4351 desc->Addr = cpu_to_le64(addr64);
4352 desc->Len = cpu_to_le32(len);
4353 desc->Ext = 0;
4354}
4355
4356
4357
4358
4359
4360
4361static int hpsa_scatter_gather(struct ctlr_info *h,
4362 struct CommandList *cp,
4363 struct scsi_cmnd *cmd)
4364{
4365 struct scatterlist *sg;
4366 int use_sg, i, sg_limit, chained, last_sg;
4367 struct SGDescriptor *curr_sg;
4368
4369 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4370
4371 use_sg = scsi_dma_map(cmd);
4372 if (use_sg < 0)
4373 return use_sg;
4374
4375 if (!use_sg)
4376 goto sglist_finished;
4377
4378
4379
4380
4381
4382
4383
4384
4385 curr_sg = cp->SG;
4386 chained = use_sg > h->max_cmd_sg_entries;
4387 sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
4388 last_sg = scsi_sg_count(cmd) - 1;
4389 scsi_for_each_sg(cmd, sg, sg_limit, i) {
4390 hpsa_set_sg_descriptor(curr_sg, sg);
4391 curr_sg++;
4392 }
4393
4394 if (chained) {
4395
4396
4397
4398
4399
4400
4401 curr_sg = h->cmd_sg_list[cp->cmdindex];
4402 sg_limit = use_sg - sg_limit;
4403 for_each_sg(sg, sg, sg_limit, i) {
4404 hpsa_set_sg_descriptor(curr_sg, sg);
4405 curr_sg++;
4406 }
4407 }
4408
4409
4410 (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST);
4411
4412 if (use_sg + chained > h->maxSG)
4413 h->maxSG = use_sg + chained;
4414
4415 if (chained) {
4416 cp->Header.SGList = h->max_cmd_sg_entries;
4417 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
4418 if (hpsa_map_sg_chain_block(h, cp)) {
4419 scsi_dma_unmap(cmd);
4420 return -1;
4421 }
4422 return 0;
4423 }
4424
4425sglist_finished:
4426
4427 cp->Header.SGList = (u8) use_sg;
4428 cp->Header.SGTotal = cpu_to_le16(use_sg);
4429 return 0;
4430}
4431
4432#define IO_ACCEL_INELIGIBLE (1)
4433static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
4434{
4435 int is_write = 0;
4436 u32 block;
4437 u32 block_cnt;
4438
4439
4440 switch (cdb[0]) {
4441 case WRITE_6:
4442 case WRITE_12:
4443 is_write = 1;
4444 case READ_6:
4445 case READ_12:
4446 if (*cdb_len == 6) {
4447 block = get_unaligned_be16(&cdb[2]);
4448 block_cnt = cdb[4];
4449 if (block_cnt == 0)
4450 block_cnt = 256;
4451 } else {
4452 BUG_ON(*cdb_len != 12);
4453 block = get_unaligned_be32(&cdb[2]);
4454 block_cnt = get_unaligned_be32(&cdb[6]);
4455 }
4456 if (block_cnt > 0xffff)
4457 return IO_ACCEL_INELIGIBLE;
4458
4459 cdb[0] = is_write ? WRITE_10 : READ_10;
4460 cdb[1] = 0;
4461 cdb[2] = (u8) (block >> 24);
4462 cdb[3] = (u8) (block >> 16);
4463 cdb[4] = (u8) (block >> 8);
4464 cdb[5] = (u8) (block);
4465 cdb[6] = 0;
4466 cdb[7] = (u8) (block_cnt >> 8);
4467 cdb[8] = (u8) (block_cnt);
4468 cdb[9] = 0;
4469 *cdb_len = 10;
4470 break;
4471 }
4472 return 0;
4473}
4474
4475static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
4476 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4477 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4478{
4479 struct scsi_cmnd *cmd = c->scsi_cmd;
4480 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
4481 unsigned int len;
4482 unsigned int total_len = 0;
4483 struct scatterlist *sg;
4484 u64 addr64;
4485 int use_sg, i;
4486 struct SGDescriptor *curr_sg;
4487 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
4488
4489
4490 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
4491 atomic_dec(&phys_disk->ioaccel_cmds_out);
4492 return IO_ACCEL_INELIGIBLE;
4493 }
4494
4495 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
4496
4497 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4498 atomic_dec(&phys_disk->ioaccel_cmds_out);
4499 return IO_ACCEL_INELIGIBLE;
4500 }
4501
4502 c->cmd_type = CMD_IOACCEL1;
4503
4504
4505 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
4506 (c->cmdindex * sizeof(*cp));
4507 BUG_ON(c->busaddr & 0x0000007F);
4508
4509 use_sg = scsi_dma_map(cmd);
4510 if (use_sg < 0) {
4511 atomic_dec(&phys_disk->ioaccel_cmds_out);
4512 return use_sg;
4513 }
4514
4515 if (use_sg) {
4516 curr_sg = cp->SG;
4517 scsi_for_each_sg(cmd, sg, use_sg, i) {
4518 addr64 = (u64) sg_dma_address(sg);
4519 len = sg_dma_len(sg);
4520 total_len += len;
4521 curr_sg->Addr = cpu_to_le64(addr64);
4522 curr_sg->Len = cpu_to_le32(len);
4523 curr_sg->Ext = cpu_to_le32(0);
4524 curr_sg++;
4525 }
4526 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
4527
4528 switch (cmd->sc_data_direction) {
4529 case DMA_TO_DEVICE:
4530 control |= IOACCEL1_CONTROL_DATA_OUT;
4531 break;
4532 case DMA_FROM_DEVICE:
4533 control |= IOACCEL1_CONTROL_DATA_IN;
4534 break;
4535 case DMA_NONE:
4536 control |= IOACCEL1_CONTROL_NODATAXFER;
4537 break;
4538 default:
4539 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4540 cmd->sc_data_direction);
4541 BUG();
4542 break;
4543 }
4544 } else {
4545 control |= IOACCEL1_CONTROL_NODATAXFER;
4546 }
4547
4548 c->Header.SGList = use_sg;
4549
4550 cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
4551 cp->transfer_len = cpu_to_le32(total_len);
4552 cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
4553 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
4554 cp->control = cpu_to_le32(control);
4555 memcpy(cp->CDB, cdb, cdb_len);
4556 memcpy(cp->CISS_LUN, scsi3addr, 8);
4557
4558 enqueue_cmd_and_start_io(h, c);
4559 return 0;
4560}
4561
4562
4563
4564
4565
4566static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
4567 struct CommandList *c)
4568{
4569 struct scsi_cmnd *cmd = c->scsi_cmd;
4570 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4571
4572 c->phys_disk = dev;
4573
4574 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
4575 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
4576}
4577
4578
4579
4580
4581static void set_encrypt_ioaccel2(struct ctlr_info *h,
4582 struct CommandList *c, struct io_accel2_cmd *cp)
4583{
4584 struct scsi_cmnd *cmd = c->scsi_cmd;
4585 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4586 struct raid_map_data *map = &dev->raid_map;
4587 u64 first_block;
4588
4589
4590 if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
4591 return;
4592
4593 cp->dekindex = map->dekindex;
4594
4595
4596 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
4597
4598
4599
4600
4601
4602 switch (cmd->cmnd[0]) {
4603
4604 case WRITE_6:
4605 case READ_6:
4606 first_block = get_unaligned_be16(&cmd->cmnd[2]);
4607 break;
4608 case WRITE_10:
4609 case READ_10:
4610
4611 case WRITE_12:
4612 case READ_12:
4613 first_block = get_unaligned_be32(&cmd->cmnd[2]);
4614 break;
4615 case WRITE_16:
4616 case READ_16:
4617 first_block = get_unaligned_be64(&cmd->cmnd[2]);
4618 break;
4619 default:
4620 dev_err(&h->pdev->dev,
4621 "ERROR: %s: size (0x%x) not supported for encryption\n",
4622 __func__, cmd->cmnd[0]);
4623 BUG();
4624 break;
4625 }
4626
4627 if (le32_to_cpu(map->volume_blk_size) != 512)
4628 first_block = first_block *
4629 le32_to_cpu(map->volume_blk_size)/512;
4630
4631 cp->tweak_lower = cpu_to_le32(first_block);
4632 cp->tweak_upper = cpu_to_le32(first_block >> 32);
4633}
4634
4635static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
4636 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4637 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4638{
4639 struct scsi_cmnd *cmd = c->scsi_cmd;
4640 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
4641 struct ioaccel2_sg_element *curr_sg;
4642 int use_sg, i;
4643 struct scatterlist *sg;
4644 u64 addr64;
4645 u32 len;
4646 u32 total_len = 0;
4647
4648 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4649
4650 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4651 atomic_dec(&phys_disk->ioaccel_cmds_out);
4652 return IO_ACCEL_INELIGIBLE;
4653 }
4654
4655 c->cmd_type = CMD_IOACCEL2;
4656
4657 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
4658 (c->cmdindex * sizeof(*cp));
4659 BUG_ON(c->busaddr & 0x0000007F);
4660
4661 memset(cp, 0, sizeof(*cp));
4662 cp->IU_type = IOACCEL2_IU_TYPE;
4663
4664 use_sg = scsi_dma_map(cmd);
4665 if (use_sg < 0) {
4666 atomic_dec(&phys_disk->ioaccel_cmds_out);
4667 return use_sg;
4668 }
4669
4670 if (use_sg) {
4671 curr_sg = cp->sg;
4672 if (use_sg > h->ioaccel_maxsg) {
4673 addr64 = le64_to_cpu(
4674 h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
4675 curr_sg->address = cpu_to_le64(addr64);
4676 curr_sg->length = 0;
4677 curr_sg->reserved[0] = 0;
4678 curr_sg->reserved[1] = 0;
4679 curr_sg->reserved[2] = 0;
4680 curr_sg->chain_indicator = 0x80;
4681
4682 curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
4683 }
4684 scsi_for_each_sg(cmd, sg, use_sg, i) {
4685 addr64 = (u64) sg_dma_address(sg);
4686 len = sg_dma_len(sg);
4687 total_len += len;
4688 curr_sg->address = cpu_to_le64(addr64);
4689 curr_sg->length = cpu_to_le32(len);
4690 curr_sg->reserved[0] = 0;
4691 curr_sg->reserved[1] = 0;
4692 curr_sg->reserved[2] = 0;
4693 curr_sg->chain_indicator = 0;
4694 curr_sg++;
4695 }
4696
4697 switch (cmd->sc_data_direction) {
4698 case DMA_TO_DEVICE:
4699 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4700 cp->direction |= IOACCEL2_DIR_DATA_OUT;
4701 break;
4702 case DMA_FROM_DEVICE:
4703 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4704 cp->direction |= IOACCEL2_DIR_DATA_IN;
4705 break;
4706 case DMA_NONE:
4707 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4708 cp->direction |= IOACCEL2_DIR_NO_DATA;
4709 break;
4710 default:
4711 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4712 cmd->sc_data_direction);
4713 BUG();
4714 break;
4715 }
4716 } else {
4717 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4718 cp->direction |= IOACCEL2_DIR_NO_DATA;
4719 }
4720
4721
4722 set_encrypt_ioaccel2(h, c, cp);
4723
4724 cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
4725 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
4726 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
4727
4728 cp->data_len = cpu_to_le32(total_len);
4729 cp->err_ptr = cpu_to_le64(c->busaddr +
4730 offsetof(struct io_accel2_cmd, error_data));
4731 cp->err_len = cpu_to_le32(sizeof(cp->error_data));
4732
4733
4734 if (use_sg > h->ioaccel_maxsg) {
4735 cp->sg_count = 1;
4736 cp->sg[0].length = cpu_to_le32(use_sg * sizeof(cp->sg[0]));
4737 if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
4738 atomic_dec(&phys_disk->ioaccel_cmds_out);
4739 scsi_dma_unmap(cmd);
4740 return -1;
4741 }
4742 } else
4743 cp->sg_count = (u8) use_sg;
4744
4745 enqueue_cmd_and_start_io(h, c);
4746 return 0;
4747}
4748
4749
4750
4751
4752static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
4753 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4754 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4755{
4756
4757 if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
4758 phys_disk->queue_depth) {
4759 atomic_dec(&phys_disk->ioaccel_cmds_out);
4760 return IO_ACCEL_INELIGIBLE;
4761 }
4762 if (h->transMethod & CFGTBL_Trans_io_accel1)
4763 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
4764 cdb, cdb_len, scsi3addr,
4765 phys_disk);
4766 else
4767 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
4768 cdb, cdb_len, scsi3addr,
4769 phys_disk);
4770}
4771
4772static void raid_map_helper(struct raid_map_data *map,
4773 int offload_to_mirror, u32 *map_index, u32 *current_group)
4774{
4775 if (offload_to_mirror == 0) {
4776
4777 *map_index %= le16_to_cpu(map->data_disks_per_row);
4778 return;
4779 }
4780 do {
4781
4782 *current_group = *map_index /
4783 le16_to_cpu(map->data_disks_per_row);
4784 if (offload_to_mirror == *current_group)
4785 continue;
4786 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
4787
4788 *map_index += le16_to_cpu(map->data_disks_per_row);
4789 (*current_group)++;
4790 } else {
4791
4792 *map_index %= le16_to_cpu(map->data_disks_per_row);
4793 *current_group = 0;
4794 }
4795 } while (offload_to_mirror != *current_group);
4796}
4797
4798
4799
4800
4801static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
4802 struct CommandList *c)
4803{
4804 struct scsi_cmnd *cmd = c->scsi_cmd;
4805 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4806 struct raid_map_data *map = &dev->raid_map;
4807 struct raid_map_disk_data *dd = &map->data[0];
4808 int is_write = 0;
4809 u32 map_index;
4810 u64 first_block, last_block;
4811 u32 block_cnt;
4812 u32 blocks_per_row;
4813 u64 first_row, last_row;
4814 u32 first_row_offset, last_row_offset;
4815 u32 first_column, last_column;
4816 u64 r0_first_row, r0_last_row;
4817 u32 r5or6_blocks_per_row;
4818 u64 r5or6_first_row, r5or6_last_row;
4819 u32 r5or6_first_row_offset, r5or6_last_row_offset;
4820 u32 r5or6_first_column, r5or6_last_column;
4821 u32 total_disks_per_row;
4822 u32 stripesize;
4823 u32 first_group, last_group, current_group;
4824 u32 map_row;
4825 u32 disk_handle;
4826 u64 disk_block;
4827 u32 disk_block_cnt;
4828 u8 cdb[16];
4829 u8 cdb_len;
4830 u16 strip_size;
4831#if BITS_PER_LONG == 32
4832 u64 tmpdiv;
4833#endif
4834 int offload_to_mirror;
4835
4836
4837 switch (cmd->cmnd[0]) {
4838 case WRITE_6:
4839 is_write = 1;
4840 case READ_6:
4841 first_block = get_unaligned_be16(&cmd->cmnd[2]);
4842 block_cnt = cmd->cmnd[4];
4843 if (block_cnt == 0)
4844 block_cnt = 256;
4845 break;
4846 case WRITE_10:
4847 is_write = 1;
4848 case READ_10:
4849 first_block =
4850 (((u64) cmd->cmnd[2]) << 24) |
4851 (((u64) cmd->cmnd[3]) << 16) |
4852 (((u64) cmd->cmnd[4]) << 8) |
4853 cmd->cmnd[5];
4854 block_cnt =
4855 (((u32) cmd->cmnd[7]) << 8) |
4856 cmd->cmnd[8];
4857 break;
4858 case WRITE_12:
4859 is_write = 1;
4860 case READ_12:
4861 first_block =
4862 (((u64) cmd->cmnd[2]) << 24) |
4863 (((u64) cmd->cmnd[3]) << 16) |
4864 (((u64) cmd->cmnd[4]) << 8) |
4865 cmd->cmnd[5];
4866 block_cnt =
4867 (((u32) cmd->cmnd[6]) << 24) |
4868 (((u32) cmd->cmnd[7]) << 16) |
4869 (((u32) cmd->cmnd[8]) << 8) |
4870 cmd->cmnd[9];
4871 break;
4872 case WRITE_16:
4873 is_write = 1;
4874 case READ_16:
4875 first_block =
4876 (((u64) cmd->cmnd[2]) << 56) |
4877 (((u64) cmd->cmnd[3]) << 48) |
4878 (((u64) cmd->cmnd[4]) << 40) |
4879 (((u64) cmd->cmnd[5]) << 32) |
4880 (((u64) cmd->cmnd[6]) << 24) |
4881 (((u64) cmd->cmnd[7]) << 16) |
4882 (((u64) cmd->cmnd[8]) << 8) |
4883 cmd->cmnd[9];
4884 block_cnt =
4885 (((u32) cmd->cmnd[10]) << 24) |
4886 (((u32) cmd->cmnd[11]) << 16) |
4887 (((u32) cmd->cmnd[12]) << 8) |
4888 cmd->cmnd[13];
4889 break;
4890 default:
4891 return IO_ACCEL_INELIGIBLE;
4892 }
4893 last_block = first_block + block_cnt - 1;
4894
4895
4896 if (is_write && dev->raid_level != 0)
4897 return IO_ACCEL_INELIGIBLE;
4898
4899
4900 if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
4901 last_block < first_block)
4902 return IO_ACCEL_INELIGIBLE;
4903
4904
4905 blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
4906 le16_to_cpu(map->strip_size);
4907 strip_size = le16_to_cpu(map->strip_size);
4908#if BITS_PER_LONG == 32
4909 tmpdiv = first_block;
4910 (void) do_div(tmpdiv, blocks_per_row);
4911 first_row = tmpdiv;
4912 tmpdiv = last_block;
4913 (void) do_div(tmpdiv, blocks_per_row);
4914 last_row = tmpdiv;
4915 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
4916 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
4917 tmpdiv = first_row_offset;
4918 (void) do_div(tmpdiv, strip_size);
4919 first_column = tmpdiv;
4920 tmpdiv = last_row_offset;
4921 (void) do_div(tmpdiv, strip_size);
4922 last_column = tmpdiv;
4923#else
4924 first_row = first_block / blocks_per_row;
4925 last_row = last_block / blocks_per_row;
4926 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
4927 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
4928 first_column = first_row_offset / strip_size;
4929 last_column = last_row_offset / strip_size;
4930#endif
4931
4932
4933 if ((first_row != last_row) || (first_column != last_column))
4934 return IO_ACCEL_INELIGIBLE;
4935
4936
4937 total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
4938 le16_to_cpu(map->metadata_disks_per_row);
4939 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
4940 le16_to_cpu(map->row_cnt);
4941 map_index = (map_row * total_disks_per_row) + first_column;
4942
4943 switch (dev->raid_level) {
4944 case HPSA_RAID_0:
4945 break;
4946 case HPSA_RAID_1:
4947
4948
4949
4950
4951 BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
4952 if (dev->offload_to_mirror)
4953 map_index += le16_to_cpu(map->data_disks_per_row);
4954 dev->offload_to_mirror = !dev->offload_to_mirror;
4955 break;
4956 case HPSA_RAID_ADM:
4957
4958
4959
4960 BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
4961
4962 offload_to_mirror = dev->offload_to_mirror;
4963 raid_map_helper(map, offload_to_mirror,
4964 &map_index, ¤t_group);
4965
4966 offload_to_mirror =
4967 (offload_to_mirror >=
4968 le16_to_cpu(map->layout_map_count) - 1)
4969 ? 0 : offload_to_mirror + 1;
4970 dev->offload_to_mirror = offload_to_mirror;
4971
4972
4973
4974
4975 break;
4976 case HPSA_RAID_5:
4977 case HPSA_RAID_6:
4978 if (le16_to_cpu(map->layout_map_count) <= 1)
4979 break;
4980
4981
4982 r5or6_blocks_per_row =
4983 le16_to_cpu(map->strip_size) *
4984 le16_to_cpu(map->data_disks_per_row);
4985 BUG_ON(r5or6_blocks_per_row == 0);
4986 stripesize = r5or6_blocks_per_row *
4987 le16_to_cpu(map->layout_map_count);
4988#if BITS_PER_LONG == 32
4989 tmpdiv = first_block;
4990 first_group = do_div(tmpdiv, stripesize);
4991 tmpdiv = first_group;
4992 (void) do_div(tmpdiv, r5or6_blocks_per_row);
4993 first_group = tmpdiv;
4994 tmpdiv = last_block;
4995 last_group = do_div(tmpdiv, stripesize);
4996 tmpdiv = last_group;
4997 (void) do_div(tmpdiv, r5or6_blocks_per_row);
4998 last_group = tmpdiv;
4999#else
5000 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
5001 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
5002#endif
5003 if (first_group != last_group)
5004 return IO_ACCEL_INELIGIBLE;
5005
5006
5007#if BITS_PER_LONG == 32
5008 tmpdiv = first_block;
5009 (void) do_div(tmpdiv, stripesize);
5010 first_row = r5or6_first_row = r0_first_row = tmpdiv;
5011 tmpdiv = last_block;
5012 (void) do_div(tmpdiv, stripesize);
5013 r5or6_last_row = r0_last_row = tmpdiv;
5014#else
5015 first_row = r5or6_first_row = r0_first_row =
5016 first_block / stripesize;
5017 r5or6_last_row = r0_last_row = last_block / stripesize;
5018#endif
5019 if (r5or6_first_row != r5or6_last_row)
5020 return IO_ACCEL_INELIGIBLE;
5021
5022
5023
5024#if BITS_PER_LONG == 32
5025 tmpdiv = first_block;
5026 first_row_offset = do_div(tmpdiv, stripesize);
5027 tmpdiv = first_row_offset;
5028 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
5029 r5or6_first_row_offset = first_row_offset;
5030 tmpdiv = last_block;
5031 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
5032 tmpdiv = r5or6_last_row_offset;
5033 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
5034 tmpdiv = r5or6_first_row_offset;
5035 (void) do_div(tmpdiv, map->strip_size);
5036 first_column = r5or6_first_column = tmpdiv;
5037 tmpdiv = r5or6_last_row_offset;
5038 (void) do_div(tmpdiv, map->strip_size);
5039 r5or6_last_column = tmpdiv;
5040#else
5041 first_row_offset = r5or6_first_row_offset =
5042 (u32)((first_block % stripesize) %
5043 r5or6_blocks_per_row);
5044
5045 r5or6_last_row_offset =
5046 (u32)((last_block % stripesize) %
5047 r5or6_blocks_per_row);
5048
5049 first_column = r5or6_first_column =
5050 r5or6_first_row_offset / le16_to_cpu(map->strip_size);
5051 r5or6_last_column =
5052 r5or6_last_row_offset / le16_to_cpu(map->strip_size);
5053#endif
5054 if (r5or6_first_column != r5or6_last_column)
5055 return IO_ACCEL_INELIGIBLE;
5056
5057
5058 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
5059 le16_to_cpu(map->row_cnt);
5060
5061 map_index = (first_group *
5062 (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
5063 (map_row * total_disks_per_row) + first_column;
5064 break;
5065 default:
5066 return IO_ACCEL_INELIGIBLE;
5067 }
5068
5069 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
5070 return IO_ACCEL_INELIGIBLE;
5071
5072 c->phys_disk = dev->phys_disk[map_index];
5073 if (!c->phys_disk)
5074 return IO_ACCEL_INELIGIBLE;
5075
5076 disk_handle = dd[map_index].ioaccel_handle;
5077 disk_block = le64_to_cpu(map->disk_starting_blk) +
5078 first_row * le16_to_cpu(map->strip_size) +
5079 (first_row_offset - first_column *
5080 le16_to_cpu(map->strip_size));
5081 disk_block_cnt = block_cnt;
5082
5083
5084 if (map->phys_blk_shift) {
5085 disk_block <<= map->phys_blk_shift;
5086 disk_block_cnt <<= map->phys_blk_shift;
5087 }
5088 BUG_ON(disk_block_cnt > 0xffff);
5089
5090
5091 if (disk_block > 0xffffffff) {
5092 cdb[0] = is_write ? WRITE_16 : READ_16;
5093 cdb[1] = 0;
5094 cdb[2] = (u8) (disk_block >> 56);
5095 cdb[3] = (u8) (disk_block >> 48);
5096 cdb[4] = (u8) (disk_block >> 40);
5097 cdb[5] = (u8) (disk_block >> 32);
5098 cdb[6] = (u8) (disk_block >> 24);
5099 cdb[7] = (u8) (disk_block >> 16);
5100 cdb[8] = (u8) (disk_block >> 8);
5101 cdb[9] = (u8) (disk_block);
5102 cdb[10] = (u8) (disk_block_cnt >> 24);
5103 cdb[11] = (u8) (disk_block_cnt >> 16);
5104 cdb[12] = (u8) (disk_block_cnt >> 8);
5105 cdb[13] = (u8) (disk_block_cnt);
5106 cdb[14] = 0;
5107 cdb[15] = 0;
5108 cdb_len = 16;
5109 } else {
5110 cdb[0] = is_write ? WRITE_10 : READ_10;
5111 cdb[1] = 0;
5112 cdb[2] = (u8) (disk_block >> 24);
5113 cdb[3] = (u8) (disk_block >> 16);
5114 cdb[4] = (u8) (disk_block >> 8);
5115 cdb[5] = (u8) (disk_block);
5116 cdb[6] = 0;
5117 cdb[7] = (u8) (disk_block_cnt >> 8);
5118 cdb[8] = (u8) (disk_block_cnt);
5119 cdb[9] = 0;
5120 cdb_len = 10;
5121 }
5122 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
5123 dev->scsi3addr,
5124 dev->phys_disk[map_index]);
5125}
5126
5127
5128
5129
5130
5131
5132static int hpsa_ciss_submit(struct ctlr_info *h,
5133 struct CommandList *c, struct scsi_cmnd *cmd,
5134 unsigned char scsi3addr[])
5135{
5136 cmd->host_scribble = (unsigned char *) c;
5137 c->cmd_type = CMD_SCSI;
5138 c->scsi_cmd = cmd;
5139 c->Header.ReplyQueue = 0;
5140 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
5141 c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
5142
5143
5144
5145 c->Request.Timeout = 0;
5146 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
5147 c->Request.CDBLen = cmd->cmd_len;
5148 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
5149 switch (cmd->sc_data_direction) {
5150 case DMA_TO_DEVICE:
5151 c->Request.type_attr_dir =
5152 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
5153 break;
5154 case DMA_FROM_DEVICE:
5155 c->Request.type_attr_dir =
5156 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
5157 break;
5158 case DMA_NONE:
5159 c->Request.type_attr_dir =
5160 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
5161 break;
5162 case DMA_BIDIRECTIONAL:
5163
5164
5165
5166
5167
5168 c->Request.type_attr_dir =
5169 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
5170
5171
5172
5173
5174
5175
5176
5177
5178 break;
5179
5180 default:
5181 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
5182 cmd->sc_data_direction);
5183 BUG();
5184 break;
5185 }
5186
5187 if (hpsa_scatter_gather(h, c, cmd) < 0) {
5188 hpsa_cmd_resolve_and_free(h, c);
5189 return SCSI_MLQUEUE_HOST_BUSY;
5190 }
5191 enqueue_cmd_and_start_io(h, c);
5192
5193 return 0;
5194}
5195
5196static void hpsa_cmd_init(struct ctlr_info *h, int index,
5197 struct CommandList *c)
5198{
5199 dma_addr_t cmd_dma_handle, err_dma_handle;
5200
5201
5202 memset(c, 0, offsetof(struct CommandList, refcount));
5203 c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
5204 cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
5205 c->err_info = h->errinfo_pool + index;
5206 memset(c->err_info, 0, sizeof(*c->err_info));
5207 err_dma_handle = h->errinfo_pool_dhandle
5208 + index * sizeof(*c->err_info);
5209 c->cmdindex = index;
5210 c->busaddr = (u32) cmd_dma_handle;
5211 c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
5212 c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
5213 c->h = h;
5214 c->scsi_cmd = SCSI_CMD_IDLE;
5215}
5216
5217static void hpsa_preinitialize_commands(struct ctlr_info *h)
5218{
5219 int i;
5220
5221 for (i = 0; i < h->nr_cmds; i++) {
5222 struct CommandList *c = h->cmd_pool + i;
5223
5224 hpsa_cmd_init(h, i, c);
5225 atomic_set(&c->refcount, 0);
5226 }
5227}
5228
5229static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
5230 struct CommandList *c)
5231{
5232 dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
5233
5234 BUG_ON(c->cmdindex != index);
5235
5236 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
5237 memset(c->err_info, 0, sizeof(*c->err_info));
5238 c->busaddr = (u32) cmd_dma_handle;
5239}
5240
5241static int hpsa_ioaccel_submit(struct ctlr_info *h,
5242 struct CommandList *c, struct scsi_cmnd *cmd,
5243 unsigned char *scsi3addr)
5244{
5245 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
5246 int rc = IO_ACCEL_INELIGIBLE;
5247
5248 cmd->host_scribble = (unsigned char *) c;
5249
5250 if (dev->offload_enabled) {
5251 hpsa_cmd_init(h, c->cmdindex, c);
5252 c->cmd_type = CMD_SCSI;
5253 c->scsi_cmd = cmd;
5254 rc = hpsa_scsi_ioaccel_raid_map(h, c);
5255 if (rc < 0)
5256 rc = SCSI_MLQUEUE_HOST_BUSY;
5257 } else if (dev->hba_ioaccel_enabled) {
5258 hpsa_cmd_init(h, c->cmdindex, c);
5259 c->cmd_type = CMD_SCSI;
5260 c->scsi_cmd = cmd;
5261 rc = hpsa_scsi_ioaccel_direct_map(h, c);
5262 if (rc < 0)
5263 rc = SCSI_MLQUEUE_HOST_BUSY;
5264 }
5265 return rc;
5266}
5267
5268static void hpsa_command_resubmit_worker(struct work_struct *work)
5269{
5270 struct scsi_cmnd *cmd;
5271 struct hpsa_scsi_dev_t *dev;
5272 struct CommandList *c = container_of(work, struct CommandList, work);
5273
5274 cmd = c->scsi_cmd;
5275 dev = cmd->device->hostdata;
5276 if (!dev) {
5277 cmd->result = DID_NO_CONNECT << 16;
5278 return hpsa_cmd_free_and_done(c->h, c, cmd);
5279 }
5280 if (c->reset_pending)
5281 return hpsa_cmd_resolve_and_free(c->h, c);
5282 if (c->abort_pending)
5283 return hpsa_cmd_abort_and_free(c->h, c, cmd);
5284 if (c->cmd_type == CMD_IOACCEL2) {
5285 struct ctlr_info *h = c->h;
5286 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5287 int rc;
5288
5289 if (c2->error_data.serv_response ==
5290 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
5291 rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr);
5292 if (rc == 0)
5293 return;
5294 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
5295
5296
5297
5298
5299
5300 cmd->result = DID_IMM_RETRY << 16;
5301 return hpsa_cmd_free_and_done(h, c, cmd);
5302 }
5303
5304 }
5305 }
5306 hpsa_cmd_partial_init(c->h, c->cmdindex, c);
5307 if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
5308
5309
5310
5311
5312
5313
5314
5315
5316 cmd->result = DID_IMM_RETRY << 16;
5317 cmd->scsi_done(cmd);
5318 }
5319}
5320
5321
5322static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
5323{
5324 struct ctlr_info *h;
5325 struct hpsa_scsi_dev_t *dev;
5326 unsigned char scsi3addr[8];
5327 struct CommandList *c;
5328 int rc = 0;
5329
5330
5331 h = sdev_to_hba(cmd->device);
5332
5333 BUG_ON(cmd->request->tag < 0);
5334
5335 dev = cmd->device->hostdata;
5336 if (!dev) {
5337 cmd->result = NOT_READY << 16;
5338 cmd->scsi_done(cmd);
5339 return 0;
5340 }
5341
5342 if (dev->removed) {
5343 cmd->result = DID_NO_CONNECT << 16;
5344 cmd->scsi_done(cmd);
5345 return 0;
5346 }
5347
5348 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
5349
5350 if (unlikely(lockup_detected(h))) {
5351 cmd->result = DID_NO_CONNECT << 16;
5352 cmd->scsi_done(cmd);
5353 return 0;
5354 }
5355 c = cmd_tagged_alloc(h, cmd);
5356
5357
5358
5359
5360
5361 if (likely(cmd->retries == 0 &&
5362 cmd->request->cmd_type == REQ_TYPE_FS &&
5363 h->acciopath_status)) {
5364 rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr);
5365 if (rc == 0)
5366 return 0;
5367 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
5368 hpsa_cmd_resolve_and_free(h, c);
5369 return SCSI_MLQUEUE_HOST_BUSY;
5370 }
5371 }
5372 return hpsa_ciss_submit(h, c, cmd, scsi3addr);
5373}
5374
5375static void hpsa_scan_complete(struct ctlr_info *h)
5376{
5377 unsigned long flags;
5378
5379 spin_lock_irqsave(&h->scan_lock, flags);
5380 h->scan_finished = 1;
5381 wake_up_all(&h->scan_wait_queue);
5382 spin_unlock_irqrestore(&h->scan_lock, flags);
5383}
5384
5385static void hpsa_scan_start(struct Scsi_Host *sh)
5386{
5387 struct ctlr_info *h = shost_to_hba(sh);
5388 unsigned long flags;
5389
5390
5391
5392
5393
5394
5395
5396 if (unlikely(lockup_detected(h)))
5397 return hpsa_scan_complete(h);
5398
5399
5400 while (1) {
5401 spin_lock_irqsave(&h->scan_lock, flags);
5402 if (h->scan_finished)
5403 break;
5404 spin_unlock_irqrestore(&h->scan_lock, flags);
5405 wait_event(h->scan_wait_queue, h->scan_finished);
5406
5407
5408
5409
5410
5411 }
5412 h->scan_finished = 0;
5413 spin_unlock_irqrestore(&h->scan_lock, flags);
5414
5415 if (unlikely(lockup_detected(h)))
5416 return hpsa_scan_complete(h);
5417
5418 hpsa_update_scsi_devices(h);
5419
5420 hpsa_scan_complete(h);
5421}
5422
5423static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
5424{
5425 struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
5426
5427 if (!logical_drive)
5428 return -ENODEV;
5429
5430 if (qdepth < 1)
5431 qdepth = 1;
5432 else if (qdepth > logical_drive->queue_depth)
5433 qdepth = logical_drive->queue_depth;
5434
5435 return scsi_change_queue_depth(sdev, qdepth);
5436}
5437
5438static int hpsa_scan_finished(struct Scsi_Host *sh,
5439 unsigned long elapsed_time)
5440{
5441 struct ctlr_info *h = shost_to_hba(sh);
5442 unsigned long flags;
5443 int finished;
5444
5445 spin_lock_irqsave(&h->scan_lock, flags);
5446 finished = h->scan_finished;
5447 spin_unlock_irqrestore(&h->scan_lock, flags);
5448 return finished;
5449}
5450
5451static int hpsa_scsi_host_alloc(struct ctlr_info *h)
5452{
5453 struct Scsi_Host *sh;
5454
5455 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
5456 if (sh == NULL) {
5457 dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
5458 return -ENOMEM;
5459 }
5460
5461 sh->io_port = 0;
5462 sh->n_io_port = 0;
5463 sh->this_id = -1;
5464 sh->max_channel = 3;
5465 sh->max_cmd_len = MAX_COMMAND_SIZE;
5466 sh->max_lun = HPSA_MAX_LUN;
5467 sh->max_id = HPSA_MAX_LUN;
5468 sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
5469 sh->cmd_per_lun = sh->can_queue;
5470 sh->sg_tablesize = h->maxsgentries;
5471 sh->transportt = hpsa_sas_transport_template;
5472 sh->hostdata[0] = (unsigned long) h;
5473 sh->irq = h->intr[h->intr_mode];
5474 sh->unique_id = sh->irq;
5475
5476 h->scsi_host = sh;
5477 return 0;
5478}
5479
5480static int hpsa_scsi_add_host(struct ctlr_info *h)
5481{
5482 int rv;
5483
5484 rv = scsi_add_host(h->scsi_host, &h->pdev->dev);
5485 if (rv) {
5486 dev_err(&h->pdev->dev, "scsi_add_host failed\n");
5487 return rv;
5488 }
5489 scsi_scan_host(h->scsi_host);
5490 return 0;
5491}
5492
5493
5494
5495
5496
5497
5498
5499static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
5500{
5501 int idx = scmd->request->tag;
5502
5503 if (idx < 0)
5504 return idx;
5505
5506
5507 return idx += HPSA_NRESERVED_CMDS;
5508}
5509
5510
5511
5512
5513
5514static int hpsa_send_test_unit_ready(struct ctlr_info *h,
5515 struct CommandList *c, unsigned char lunaddr[],
5516 int reply_queue)
5517{
5518 int rc;
5519
5520
5521 (void) fill_cmd(c, TEST_UNIT_READY, h,
5522 NULL, 0, 0, lunaddr, TYPE_CMD);
5523 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
5524 if (rc)
5525 return rc;
5526
5527
5528
5529 if (c->err_info->CommandStatus == CMD_SUCCESS)
5530 return 0;
5531
5532
5533
5534
5535
5536
5537 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5538 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
5539 (c->err_info->SenseInfo[2] == NO_SENSE ||
5540 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
5541 return 0;
5542
5543 return 1;
5544}
5545
5546
5547
5548
5549
5550static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h,
5551 struct CommandList *c,
5552 unsigned char lunaddr[], int reply_queue)
5553{
5554 int rc;
5555 int count = 0;
5556 int waittime = 1;
5557
5558
5559 for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) {
5560
5561
5562
5563
5564
5565 msleep(1000 * waittime);
5566
5567 rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue);
5568 if (!rc)
5569 break;
5570
5571
5572 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
5573 waittime *= 2;
5574
5575 dev_warn(&h->pdev->dev,
5576 "waiting %d secs for device to become ready.\n",
5577 waittime);
5578 }
5579
5580 return rc;
5581}
5582
5583static int wait_for_device_to_become_ready(struct ctlr_info *h,
5584 unsigned char lunaddr[],
5585 int reply_queue)
5586{
5587 int first_queue;
5588 int last_queue;
5589 int rq;
5590 int rc = 0;
5591 struct CommandList *c;
5592
5593 c = cmd_alloc(h);
5594
5595
5596
5597
5598
5599
5600 if (reply_queue == DEFAULT_REPLY_QUEUE) {
5601 first_queue = 0;
5602 last_queue = h->nreply_queues - 1;
5603 } else {
5604 first_queue = reply_queue;
5605 last_queue = reply_queue;
5606 }
5607
5608 for (rq = first_queue; rq <= last_queue; rq++) {
5609 rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
5610 if (rc)
5611 break;
5612 }
5613
5614 if (rc)
5615 dev_warn(&h->pdev->dev, "giving up on device.\n");
5616 else
5617 dev_warn(&h->pdev->dev, "device is ready.\n");
5618
5619 cmd_free(h, c);
5620 return rc;
5621}
5622
5623
5624
5625
5626static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
5627{
5628 int rc;
5629 struct ctlr_info *h;
5630 struct hpsa_scsi_dev_t *dev;
5631 u8 reset_type;
5632 char msg[48];
5633
5634
5635 h = sdev_to_hba(scsicmd->device);
5636 if (h == NULL)
5637 return FAILED;
5638
5639 if (lockup_detected(h))
5640 return FAILED;
5641
5642 dev = scsicmd->device->hostdata;
5643 if (!dev) {
5644 dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__);
5645 return FAILED;
5646 }
5647
5648
5649 if (lockup_detected(h)) {
5650 snprintf(msg, sizeof(msg),
5651 "cmd %d RESET FAILED, lockup detected",
5652 hpsa_get_cmd_index(scsicmd));
5653 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5654 return FAILED;
5655 }
5656
5657
5658 if (detect_controller_lockup(h)) {
5659 snprintf(msg, sizeof(msg),
5660 "cmd %d RESET FAILED, new lockup detected",
5661 hpsa_get_cmd_index(scsicmd));
5662 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5663 return FAILED;
5664 }
5665
5666
5667 if (is_hba_lunid(dev->scsi3addr))
5668 return SUCCESS;
5669
5670 if (is_logical_dev_addr_mode(dev->scsi3addr))
5671 reset_type = HPSA_DEVICE_RESET_MSG;
5672 else
5673 reset_type = HPSA_PHYS_TARGET_RESET;
5674
5675 sprintf(msg, "resetting %s",
5676 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ");
5677 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5678
5679 h->reset_in_progress = 1;
5680
5681
5682 rc = hpsa_do_reset(h, dev, dev->scsi3addr, reset_type,
5683 DEFAULT_REPLY_QUEUE);
5684 sprintf(msg, "reset %s %s",
5685 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ",
5686 rc == 0 ? "completed successfully" : "failed");
5687 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5688 h->reset_in_progress = 0;
5689 return rc == 0 ? SUCCESS : FAILED;
5690}
5691
5692static void swizzle_abort_tag(u8 *tag)
5693{
5694 u8 original_tag[8];
5695
5696 memcpy(original_tag, tag, 8);
5697 tag[0] = original_tag[3];
5698 tag[1] = original_tag[2];
5699 tag[2] = original_tag[1];
5700 tag[3] = original_tag[0];
5701 tag[4] = original_tag[7];
5702 tag[5] = original_tag[6];
5703 tag[6] = original_tag[5];
5704 tag[7] = original_tag[4];
5705}
5706
5707static void hpsa_get_tag(struct ctlr_info *h,
5708 struct CommandList *c, __le32 *taglower, __le32 *tagupper)
5709{
5710 u64 tag;
5711 if (c->cmd_type == CMD_IOACCEL1) {
5712 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
5713 &h->ioaccel_cmd_pool[c->cmdindex];
5714 tag = le64_to_cpu(cm1->tag);
5715 *tagupper = cpu_to_le32(tag >> 32);
5716 *taglower = cpu_to_le32(tag);
5717 return;
5718 }
5719 if (c->cmd_type == CMD_IOACCEL2) {
5720 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
5721 &h->ioaccel2_cmd_pool[c->cmdindex];
5722
5723 memset(tagupper, 0, sizeof(*tagupper));
5724 *taglower = cm2->Tag;
5725 return;
5726 }
5727 tag = le64_to_cpu(c->Header.tag);
5728 *tagupper = cpu_to_le32(tag >> 32);
5729 *taglower = cpu_to_le32(tag);
5730}
5731
5732static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
5733 struct CommandList *abort, int reply_queue)
5734{
5735 int rc = IO_OK;
5736 struct CommandList *c;
5737 struct ErrorInfo *ei;
5738 __le32 tagupper, taglower;
5739
5740 c = cmd_alloc(h);
5741
5742
5743 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &abort->Header.tag,
5744 0, 0, scsi3addr, TYPE_MSG);
5745 if (h->needs_abort_tags_swizzled)
5746 swizzle_abort_tag(&c->Request.CDB[4]);
5747 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
5748 hpsa_get_tag(h, abort, &taglower, &tagupper);
5749 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n",
5750 __func__, tagupper, taglower);
5751
5752
5753 ei = c->err_info;
5754 switch (ei->CommandStatus) {
5755 case CMD_SUCCESS:
5756 break;
5757 case CMD_TMF_STATUS:
5758 rc = hpsa_evaluate_tmf_status(h, c);
5759 break;
5760 case CMD_UNABORTABLE:
5761 rc = -1;
5762 break;
5763 default:
5764 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
5765 __func__, tagupper, taglower);
5766 hpsa_scsi_interpret_error(h, c);
5767 rc = -1;
5768 break;
5769 }
5770 cmd_free(h, c);
5771 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
5772 __func__, tagupper, taglower);
5773 return rc;
5774}
5775
5776static void setup_ioaccel2_abort_cmd(struct CommandList *c, struct ctlr_info *h,
5777 struct CommandList *command_to_abort, int reply_queue)
5778{
5779 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5780 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
5781 struct io_accel2_cmd *c2a =
5782 &h->ioaccel2_cmd_pool[command_to_abort->cmdindex];
5783 struct scsi_cmnd *scmd = command_to_abort->scsi_cmd;
5784 struct hpsa_scsi_dev_t *dev = scmd->device->hostdata;
5785
5786
5787
5788
5789
5790
5791 BUILD_BUG_ON(sizeof(struct hpsa_tmf_struct) >
5792 sizeof(struct io_accel2_cmd));
5793 BUG_ON(offsetof(struct io_accel2_cmd, error_data) <
5794 offsetof(struct hpsa_tmf_struct, error_len) +
5795 sizeof(ac->error_len));
5796
5797 c->cmd_type = IOACCEL2_TMF;
5798 c->scsi_cmd = SCSI_CMD_BUSY;
5799
5800
5801 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
5802 (c->cmdindex * sizeof(struct io_accel2_cmd));
5803 BUG_ON(c->busaddr & 0x0000007F);
5804
5805 memset(ac, 0, sizeof(*c2));
5806 ac->iu_type = IOACCEL2_IU_TMF_TYPE;
5807 ac->reply_queue = reply_queue;
5808 ac->tmf = IOACCEL2_TMF_ABORT;
5809 ac->it_nexus = cpu_to_le32(dev->ioaccel_handle);
5810 memset(ac->lun_id, 0, sizeof(ac->lun_id));
5811 ac->tag = cpu_to_le64(c->cmdindex << DIRECT_LOOKUP_SHIFT);
5812 ac->abort_tag = cpu_to_le64(le32_to_cpu(c2a->Tag));
5813 ac->error_ptr = cpu_to_le64(c->busaddr +
5814 offsetof(struct io_accel2_cmd, error_data));
5815 ac->error_len = cpu_to_le32(sizeof(c2->error_data));
5816}
5817
5818
5819
5820
5821
5822
5823
5824
5825static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
5826 unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
5827{
5828 int rc = IO_OK;
5829 struct scsi_cmnd *scmd;
5830 struct hpsa_scsi_dev_t *dev;
5831 unsigned char phys_scsi3addr[8];
5832 unsigned char *psa = &phys_scsi3addr[0];
5833
5834
5835 scmd = abort->scsi_cmd;
5836 dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
5837 if (dev == NULL) {
5838 dev_warn(&h->pdev->dev,
5839 "Cannot abort: no device pointer for command.\n");
5840 return -1;
5841 }
5842
5843 if (h->raid_offload_debug > 0)
5844 dev_info(&h->pdev->dev,
5845 "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5846 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
5847 "Reset as abort",
5848 scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
5849 scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
5850
5851 if (!dev->offload_enabled) {
5852 dev_warn(&h->pdev->dev,
5853 "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
5854 return -1;
5855 }
5856
5857
5858 if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
5859 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
5860 return -1;
5861 }
5862
5863
5864 if (h->raid_offload_debug > 0)
5865 dev_info(&h->pdev->dev,
5866 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5867 psa[0], psa[1], psa[2], psa[3],
5868 psa[4], psa[5], psa[6], psa[7]);
5869 rc = hpsa_do_reset(h, dev, psa, HPSA_RESET_TYPE_TARGET, reply_queue);
5870 if (rc != 0) {
5871 dev_warn(&h->pdev->dev,
5872 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5873 psa[0], psa[1], psa[2], psa[3],
5874 psa[4], psa[5], psa[6], psa[7]);
5875 return rc;
5876 }
5877
5878
5879 if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) {
5880 dev_warn(&h->pdev->dev,
5881 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5882 psa[0], psa[1], psa[2], psa[3],
5883 psa[4], psa[5], psa[6], psa[7]);
5884 return -1;
5885 }
5886
5887
5888 dev_info(&h->pdev->dev,
5889 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5890 psa[0], psa[1], psa[2], psa[3],
5891 psa[4], psa[5], psa[6], psa[7]);
5892
5893 return rc;
5894}
5895
5896static int hpsa_send_abort_ioaccel2(struct ctlr_info *h,
5897 struct CommandList *abort, int reply_queue)
5898{
5899 int rc = IO_OK;
5900 struct CommandList *c;
5901 __le32 taglower, tagupper;
5902 struct hpsa_scsi_dev_t *dev;
5903 struct io_accel2_cmd *c2;
5904
5905 dev = abort->scsi_cmd->device->hostdata;
5906 if (!dev->offload_enabled && !dev->hba_ioaccel_enabled)
5907 return -1;
5908
5909 c = cmd_alloc(h);
5910 setup_ioaccel2_abort_cmd(c, h, abort, reply_queue);
5911 c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5912 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
5913 hpsa_get_tag(h, abort, &taglower, &tagupper);
5914 dev_dbg(&h->pdev->dev,
5915 "%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n",
5916 __func__, tagupper, taglower);
5917
5918
5919 dev_dbg(&h->pdev->dev,
5920 "%s: Tag:0x%08x:%08x: abort service response = 0x%02x.\n",
5921 __func__, tagupper, taglower, c2->error_data.serv_response);
5922 switch (c2->error_data.serv_response) {
5923 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
5924 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
5925 rc = 0;
5926 break;
5927 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
5928 case IOACCEL2_SERV_RESPONSE_FAILURE:
5929 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
5930 rc = -1;
5931 break;
5932 default:
5933 dev_warn(&h->pdev->dev,
5934 "%s: Tag:0x%08x:%08x: unknown abort service response 0x%02x\n",
5935 __func__, tagupper, taglower,
5936 c2->error_data.serv_response);
5937 rc = -1;
5938 }
5939 cmd_free(h, c);
5940 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
5941 tagupper, taglower);
5942 return rc;
5943}
5944
5945static int hpsa_send_abort_both_ways(struct ctlr_info *h,
5946 struct hpsa_scsi_dev_t *dev, struct CommandList *abort, int reply_queue)
5947{
5948
5949
5950
5951
5952
5953
5954 if (abort->cmd_type == CMD_IOACCEL2) {
5955 if ((HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags) ||
5956 dev->physical_device)
5957 return hpsa_send_abort_ioaccel2(h, abort,
5958 reply_queue);
5959 else
5960 return hpsa_send_reset_as_abort_ioaccel2(h,
5961 dev->scsi3addr,
5962 abort, reply_queue);
5963 }
5964 return hpsa_send_abort(h, dev->scsi3addr, abort, reply_queue);
5965}
5966
5967
5968static int hpsa_extract_reply_queue(struct ctlr_info *h,
5969 struct CommandList *c)
5970{
5971 if (c->cmd_type == CMD_IOACCEL2)
5972 return h->ioaccel2_cmd_pool[c->cmdindex].reply_queue;
5973 return c->Header.ReplyQueue;
5974}
5975
5976
5977
5978
5979
5980static inline int wait_for_available_abort_cmd(struct ctlr_info *h)
5981{
5982#define ABORT_CMD_WAIT_MSECS 5000
5983 return !wait_event_timeout(h->abort_cmd_wait_queue,
5984 atomic_dec_if_positive(&h->abort_cmds_available) >= 0,
5985 msecs_to_jiffies(ABORT_CMD_WAIT_MSECS));
5986}
5987
5988
5989
5990
5991
5992static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
5993{
5994
5995 int rc;
5996 struct ctlr_info *h;
5997 struct hpsa_scsi_dev_t *dev;
5998 struct CommandList *abort;
5999 struct scsi_cmnd *as;
6000 char msg[256];
6001 int ml = 0;
6002 __le32 tagupper, taglower;
6003 int refcount, reply_queue;
6004
6005 if (sc == NULL)
6006 return FAILED;
6007
6008 if (sc->device == NULL)
6009 return FAILED;
6010
6011
6012 h = sdev_to_hba(sc->device);
6013 if (h == NULL)
6014 return FAILED;
6015
6016
6017 dev = sc->device->hostdata;
6018 if (!dev) {
6019 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
6020 msg);
6021 return FAILED;
6022 }
6023
6024
6025 if (lockup_detected(h)) {
6026 hpsa_show_dev_msg(KERN_WARNING, h, dev,
6027 "ABORT FAILED, lockup detected");
6028 return FAILED;
6029 }
6030
6031
6032 if (detect_controller_lockup(h)) {
6033 hpsa_show_dev_msg(KERN_WARNING, h, dev,
6034 "ABORT FAILED, new lockup detected");
6035 return FAILED;
6036 }
6037
6038
6039 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
6040 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
6041 return FAILED;
6042
6043 memset(msg, 0, sizeof(msg));
6044 ml += sprintf(msg+ml, "scsi %d:%d:%d:%llu %s %p",
6045 h->scsi_host->host_no, sc->device->channel,
6046 sc->device->id, sc->device->lun,
6047 "Aborting command", sc);
6048
6049
6050 abort = (struct CommandList *) sc->host_scribble;
6051 if (abort == NULL) {
6052
6053 return SUCCESS;
6054 }
6055 refcount = atomic_inc_return(&abort->refcount);
6056 if (refcount == 1) {
6057 cmd_free(h, abort);
6058 return SUCCESS;
6059 }
6060
6061
6062 if (abort->cmd_type != CMD_IOACCEL2 &&
6063 abort->cmd_type != CMD_IOACCEL1 && !dev->supports_aborts) {
6064 cmd_free(h, abort);
6065 return FAILED;
6066 }
6067
6068
6069
6070
6071
6072 if (abort->scsi_cmd != sc) {
6073 cmd_free(h, abort);
6074 return SUCCESS;
6075 }
6076
6077 abort->abort_pending = true;
6078 hpsa_get_tag(h, abort, &taglower, &tagupper);
6079 reply_queue = hpsa_extract_reply_queue(h, abort);
6080 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
6081 as = abort->scsi_cmd;
6082 if (as != NULL)
6083 ml += sprintf(msg+ml,
6084 "CDBLen: %d CDB: 0x%02x%02x... SN: 0x%lx ",
6085 as->cmd_len, as->cmnd[0], as->cmnd[1],
6086 as->serial_number);
6087 dev_warn(&h->pdev->dev, "%s BEING SENT\n", msg);
6088 hpsa_show_dev_msg(KERN_WARNING, h, dev, "Aborting command");
6089
6090
6091
6092
6093
6094
6095 if (wait_for_available_abort_cmd(h)) {
6096 dev_warn(&h->pdev->dev,
6097 "%s FAILED, timeout waiting for an abort command to become available.\n",
6098 msg);
6099 cmd_free(h, abort);
6100 return FAILED;
6101 }
6102 rc = hpsa_send_abort_both_ways(h, dev, abort, reply_queue);
6103 atomic_inc(&h->abort_cmds_available);
6104 wake_up_all(&h->abort_cmd_wait_queue);
6105 if (rc != 0) {
6106 dev_warn(&h->pdev->dev, "%s SENT, FAILED\n", msg);
6107 hpsa_show_dev_msg(KERN_WARNING, h, dev,
6108 "FAILED to abort command");
6109 cmd_free(h, abort);
6110 return FAILED;
6111 }
6112 dev_info(&h->pdev->dev, "%s SENT, SUCCESS\n", msg);
6113 wait_event(h->event_sync_wait_queue,
6114 abort->scsi_cmd != sc || lockup_detected(h));
6115 cmd_free(h, abort);
6116 return !lockup_detected(h) ? SUCCESS : FAILED;
6117}
6118
6119
6120
6121
6122
6123
6124
6125static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
6126 struct scsi_cmnd *scmd)
6127{
6128 int idx = hpsa_get_cmd_index(scmd);
6129 struct CommandList *c = h->cmd_pool + idx;
6130
6131 if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) {
6132 dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n",
6133 idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1);
6134
6135
6136
6137 BUG();
6138 }
6139
6140 atomic_inc(&c->refcount);
6141 if (unlikely(!hpsa_is_cmd_idle(c))) {
6142
6143
6144
6145
6146
6147
6148 dev_err(&h->pdev->dev,
6149 "tag collision (tag=%d) in cmd_tagged_alloc().\n",
6150 idx);
6151 if (c->scsi_cmd != NULL)
6152 scsi_print_command(c->scsi_cmd);
6153 scsi_print_command(scmd);
6154 }
6155
6156 hpsa_cmd_partial_init(h, idx, c);
6157 return c;
6158}
6159
6160static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
6161{
6162
6163
6164
6165
6166
6167
6168 (void)atomic_dec(&c->refcount);
6169}
6170
6171
6172
6173
6174
6175
6176
6177
6178
6179
6180static struct CommandList *cmd_alloc(struct ctlr_info *h)
6181{
6182 struct CommandList *c;
6183 int refcount, i;
6184 int offset = 0;
6185
6186
6187
6188
6189
6190
6191
6192
6193
6194
6195
6196
6197
6198
6199
6200
6201
6202
6203
6204
6205 for (;;) {
6206 i = find_next_zero_bit(h->cmd_pool_bits,
6207 HPSA_NRESERVED_CMDS,
6208 offset);
6209 if (unlikely(i >= HPSA_NRESERVED_CMDS)) {
6210 offset = 0;
6211 continue;
6212 }
6213 c = h->cmd_pool + i;
6214 refcount = atomic_inc_return(&c->refcount);
6215 if (unlikely(refcount > 1)) {
6216 cmd_free(h, c);
6217 offset = (i + 1) % HPSA_NRESERVED_CMDS;
6218 continue;
6219 }
6220 set_bit(i & (BITS_PER_LONG - 1),
6221 h->cmd_pool_bits + (i / BITS_PER_LONG));
6222 break;
6223 }
6224 hpsa_cmd_partial_init(h, i, c);
6225 return c;
6226}
6227
6228
6229
6230
6231
6232
6233
6234static void cmd_free(struct ctlr_info *h, struct CommandList *c)
6235{
6236 if (atomic_dec_and_test(&c->refcount)) {
6237 int i;
6238
6239 i = c - h->cmd_pool;
6240 clear_bit(i & (BITS_PER_LONG - 1),
6241 h->cmd_pool_bits + (i / BITS_PER_LONG));
6242 }
6243}
6244
6245#ifdef CONFIG_COMPAT
6246
6247static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
6248 void __user *arg)
6249{
6250 IOCTL32_Command_struct __user *arg32 =
6251 (IOCTL32_Command_struct __user *) arg;
6252 IOCTL_Command_struct arg64;
6253 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
6254 int err;
6255 u32 cp;
6256
6257 memset(&arg64, 0, sizeof(arg64));
6258 err = 0;
6259 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
6260 sizeof(arg64.LUN_info));
6261 err |= copy_from_user(&arg64.Request, &arg32->Request,
6262 sizeof(arg64.Request));
6263 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
6264 sizeof(arg64.error_info));
6265 err |= get_user(arg64.buf_size, &arg32->buf_size);
6266 err |= get_user(cp, &arg32->buf);
6267 arg64.buf = compat_ptr(cp);
6268 err |= copy_to_user(p, &arg64, sizeof(arg64));
6269
6270 if (err)
6271 return -EFAULT;
6272
6273 err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
6274 if (err)
6275 return err;
6276 err |= copy_in_user(&arg32->error_info, &p->error_info,
6277 sizeof(arg32->error_info));
6278 if (err)
6279 return -EFAULT;
6280 return err;
6281}
6282
6283static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
6284 int cmd, void __user *arg)
6285{
6286 BIG_IOCTL32_Command_struct __user *arg32 =
6287 (BIG_IOCTL32_Command_struct __user *) arg;
6288 BIG_IOCTL_Command_struct arg64;
6289 BIG_IOCTL_Command_struct __user *p =
6290 compat_alloc_user_space(sizeof(arg64));
6291 int err;
6292 u32 cp;
6293
6294 memset(&arg64, 0, sizeof(arg64));
6295 err = 0;
6296 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
6297 sizeof(arg64.LUN_info));
6298 err |= copy_from_user(&arg64.Request, &arg32->Request,
6299 sizeof(arg64.Request));
6300 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
6301 sizeof(arg64.error_info));
6302 err |= get_user(arg64.buf_size, &arg32->buf_size);
6303 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
6304 err |= get_user(cp, &arg32->buf);
6305 arg64.buf = compat_ptr(cp);
6306 err |= copy_to_user(p, &arg64, sizeof(arg64));
6307
6308 if (err)
6309 return -EFAULT;
6310
6311 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
6312 if (err)
6313 return err;
6314 err |= copy_in_user(&arg32->error_info, &p->error_info,
6315 sizeof(arg32->error_info));
6316 if (err)
6317 return -EFAULT;
6318 return err;
6319}
6320
6321static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
6322{
6323 switch (cmd) {
6324 case CCISS_GETPCIINFO:
6325 case CCISS_GETINTINFO:
6326 case CCISS_SETINTINFO:
6327 case CCISS_GETNODENAME:
6328 case CCISS_SETNODENAME:
6329 case CCISS_GETHEARTBEAT:
6330 case CCISS_GETBUSTYPES:
6331 case CCISS_GETFIRMVER:
6332 case CCISS_GETDRIVVER:
6333 case CCISS_REVALIDVOLS:
6334 case CCISS_DEREGDISK:
6335 case CCISS_REGNEWDISK:
6336 case CCISS_REGNEWD:
6337 case CCISS_RESCANDISK:
6338 case CCISS_GETLUNINFO:
6339 return hpsa_ioctl(dev, cmd, arg);
6340
6341 case CCISS_PASSTHRU32:
6342 return hpsa_ioctl32_passthru(dev, cmd, arg);
6343 case CCISS_BIG_PASSTHRU32:
6344 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
6345
6346 default:
6347 return -ENOIOCTLCMD;
6348 }
6349}
6350#endif
6351
6352static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
6353{
6354 struct hpsa_pci_info pciinfo;
6355
6356 if (!argp)
6357 return -EINVAL;
6358 pciinfo.domain = pci_domain_nr(h->pdev->bus);
6359 pciinfo.bus = h->pdev->bus->number;
6360 pciinfo.dev_fn = h->pdev->devfn;
6361 pciinfo.board_id = h->board_id;
6362 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
6363 return -EFAULT;
6364 return 0;
6365}
6366
6367static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
6368{
6369 DriverVer_type DriverVer;
6370 unsigned char vmaj, vmin, vsubmin;
6371 int rc;
6372
6373 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
6374 &vmaj, &vmin, &vsubmin);
6375 if (rc != 3) {
6376 dev_info(&h->pdev->dev, "driver version string '%s' "
6377 "unrecognized.", HPSA_DRIVER_VERSION);
6378 vmaj = 0;
6379 vmin = 0;
6380 vsubmin = 0;
6381 }
6382 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
6383 if (!argp)
6384 return -EINVAL;
6385 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
6386 return -EFAULT;
6387 return 0;
6388}
6389
6390static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6391{
6392 IOCTL_Command_struct iocommand;
6393 struct CommandList *c;
6394 char *buff = NULL;
6395 u64 temp64;
6396 int rc = 0;
6397
6398 if (!argp)
6399 return -EINVAL;
6400 if (!capable(CAP_SYS_RAWIO))
6401 return -EPERM;
6402 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
6403 return -EFAULT;
6404 if ((iocommand.buf_size < 1) &&
6405 (iocommand.Request.Type.Direction != XFER_NONE)) {
6406 return -EINVAL;
6407 }
6408 if (iocommand.buf_size > 0) {
6409 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
6410 if (buff == NULL)
6411 return -ENOMEM;
6412 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6413
6414 if (copy_from_user(buff, iocommand.buf,
6415 iocommand.buf_size)) {
6416 rc = -EFAULT;
6417 goto out_kfree;
6418 }
6419 } else {
6420 memset(buff, 0, iocommand.buf_size);
6421 }
6422 }
6423 c = cmd_alloc(h);
6424
6425
6426 c->cmd_type = CMD_IOCTL_PEND;
6427 c->scsi_cmd = SCSI_CMD_BUSY;
6428
6429 c->Header.ReplyQueue = 0;
6430 if (iocommand.buf_size > 0) {
6431 c->Header.SGList = 1;
6432 c->Header.SGTotal = cpu_to_le16(1);
6433 } else {
6434 c->Header.SGList = 0;
6435 c->Header.SGTotal = cpu_to_le16(0);
6436 }
6437 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
6438
6439
6440 memcpy(&c->Request, &iocommand.Request,
6441 sizeof(c->Request));
6442
6443
6444 if (iocommand.buf_size > 0) {
6445 temp64 = pci_map_single(h->pdev, buff,
6446 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
6447 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
6448 c->SG[0].Addr = cpu_to_le64(0);
6449 c->SG[0].Len = cpu_to_le32(0);
6450 rc = -ENOMEM;
6451 goto out;
6452 }
6453 c->SG[0].Addr = cpu_to_le64(temp64);
6454 c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
6455 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST);
6456 }
6457 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
6458 DEFAULT_TIMEOUT);
6459 if (iocommand.buf_size > 0)
6460 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
6461 check_ioctl_unit_attention(h, c);
6462 if (rc) {
6463 rc = -EIO;
6464 goto out;
6465 }
6466
6467
6468 memcpy(&iocommand.error_info, c->err_info,
6469 sizeof(iocommand.error_info));
6470 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
6471 rc = -EFAULT;
6472 goto out;
6473 }
6474 if ((iocommand.Request.Type.Direction & XFER_READ) &&
6475 iocommand.buf_size > 0) {
6476
6477 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
6478 rc = -EFAULT;
6479 goto out;
6480 }
6481 }
6482out:
6483 cmd_free(h, c);
6484out_kfree:
6485 kfree(buff);
6486 return rc;
6487}
6488
6489static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6490{
6491 BIG_IOCTL_Command_struct *ioc;
6492 struct CommandList *c;
6493 unsigned char **buff = NULL;
6494 int *buff_size = NULL;
6495 u64 temp64;
6496 BYTE sg_used = 0;
6497 int status = 0;
6498 u32 left;
6499 u32 sz;
6500 BYTE __user *data_ptr;
6501
6502 if (!argp)
6503 return -EINVAL;
6504 if (!capable(CAP_SYS_RAWIO))
6505 return -EPERM;
6506 ioc = (BIG_IOCTL_Command_struct *)
6507 kmalloc(sizeof(*ioc), GFP_KERNEL);
6508 if (!ioc) {
6509 status = -ENOMEM;
6510 goto cleanup1;
6511 }
6512 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
6513 status = -EFAULT;
6514 goto cleanup1;
6515 }
6516 if ((ioc->buf_size < 1) &&
6517 (ioc->Request.Type.Direction != XFER_NONE)) {
6518 status = -EINVAL;
6519 goto cleanup1;
6520 }
6521
6522 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
6523 status = -EINVAL;
6524 goto cleanup1;
6525 }
6526 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
6527 status = -EINVAL;
6528 goto cleanup1;
6529 }
6530 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
6531 if (!buff) {
6532 status = -ENOMEM;
6533 goto cleanup1;
6534 }
6535 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
6536 if (!buff_size) {
6537 status = -ENOMEM;
6538 goto cleanup1;
6539 }
6540 left = ioc->buf_size;
6541 data_ptr = ioc->buf;
6542 while (left) {
6543 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
6544 buff_size[sg_used] = sz;
6545 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
6546 if (buff[sg_used] == NULL) {
6547 status = -ENOMEM;
6548 goto cleanup1;
6549 }
6550 if (ioc->Request.Type.Direction & XFER_WRITE) {
6551 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
6552 status = -EFAULT;
6553 goto cleanup1;
6554 }
6555 } else
6556 memset(buff[sg_used], 0, sz);
6557 left -= sz;
6558 data_ptr += sz;
6559 sg_used++;
6560 }
6561 c = cmd_alloc(h);
6562
6563 c->cmd_type = CMD_IOCTL_PEND;
6564 c->scsi_cmd = SCSI_CMD_BUSY;
6565 c->Header.ReplyQueue = 0;
6566 c->Header.SGList = (u8) sg_used;
6567 c->Header.SGTotal = cpu_to_le16(sg_used);
6568 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
6569 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
6570 if (ioc->buf_size > 0) {
6571 int i;
6572 for (i = 0; i < sg_used; i++) {
6573 temp64 = pci_map_single(h->pdev, buff[i],
6574 buff_size[i], PCI_DMA_BIDIRECTIONAL);
6575 if (dma_mapping_error(&h->pdev->dev,
6576 (dma_addr_t) temp64)) {
6577 c->SG[i].Addr = cpu_to_le64(0);
6578 c->SG[i].Len = cpu_to_le32(0);
6579 hpsa_pci_unmap(h->pdev, c, i,
6580 PCI_DMA_BIDIRECTIONAL);
6581 status = -ENOMEM;
6582 goto cleanup0;
6583 }
6584 c->SG[i].Addr = cpu_to_le64(temp64);
6585 c->SG[i].Len = cpu_to_le32(buff_size[i]);
6586 c->SG[i].Ext = cpu_to_le32(0);
6587 }
6588 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
6589 }
6590 status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
6591 DEFAULT_TIMEOUT);
6592 if (sg_used)
6593 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
6594 check_ioctl_unit_attention(h, c);
6595 if (status) {
6596 status = -EIO;
6597 goto cleanup0;
6598 }
6599
6600
6601 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
6602 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
6603 status = -EFAULT;
6604 goto cleanup0;
6605 }
6606 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
6607 int i;
6608
6609
6610 BYTE __user *ptr = ioc->buf;
6611 for (i = 0; i < sg_used; i++) {
6612 if (copy_to_user(ptr, buff[i], buff_size[i])) {
6613 status = -EFAULT;
6614 goto cleanup0;
6615 }
6616 ptr += buff_size[i];
6617 }
6618 }
6619 status = 0;
6620cleanup0:
6621 cmd_free(h, c);
6622cleanup1:
6623 if (buff) {
6624 int i;
6625
6626 for (i = 0; i < sg_used; i++)
6627 kfree(buff[i]);
6628 kfree(buff);
6629 }
6630 kfree(buff_size);
6631 kfree(ioc);
6632 return status;
6633}
6634
6635static void check_ioctl_unit_attention(struct ctlr_info *h,
6636 struct CommandList *c)
6637{
6638 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
6639 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
6640 (void) check_for_unit_attention(h, c);
6641}
6642
6643
6644
6645
6646static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
6647{
6648 struct ctlr_info *h;
6649 void __user *argp = (void __user *)arg;
6650 int rc;
6651
6652 h = sdev_to_hba(dev);
6653
6654 switch (cmd) {
6655 case CCISS_DEREGDISK:
6656 case CCISS_REGNEWDISK:
6657 case CCISS_REGNEWD:
6658 hpsa_scan_start(h->scsi_host);
6659 return 0;
6660 case CCISS_GETPCIINFO:
6661 return hpsa_getpciinfo_ioctl(h, argp);
6662 case CCISS_GETDRIVVER:
6663 return hpsa_getdrivver_ioctl(h, argp);
6664 case CCISS_PASSTHRU:
6665 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6666 return -EAGAIN;
6667 rc = hpsa_passthru_ioctl(h, argp);
6668 atomic_inc(&h->passthru_cmds_avail);
6669 return rc;
6670 case CCISS_BIG_PASSTHRU:
6671 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6672 return -EAGAIN;
6673 rc = hpsa_big_passthru_ioctl(h, argp);
6674 atomic_inc(&h->passthru_cmds_avail);
6675 return rc;
6676 default:
6677 return -ENOTTY;
6678 }
6679}
6680
6681static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
6682 u8 reset_type)
6683{
6684 struct CommandList *c;
6685
6686 c = cmd_alloc(h);
6687
6688
6689 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
6690 RAID_CTLR_LUNID, TYPE_MSG);
6691 c->Request.CDB[1] = reset_type;
6692 c->waiting = NULL;
6693 enqueue_cmd_and_start_io(h, c);
6694
6695
6696
6697
6698 return;
6699}
6700
6701static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
6702 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
6703 int cmd_type)
6704{
6705 int pci_dir = XFER_NONE;
6706 u64 tag;
6707
6708 c->cmd_type = CMD_IOCTL_PEND;
6709 c->scsi_cmd = SCSI_CMD_BUSY;
6710 c->Header.ReplyQueue = 0;
6711 if (buff != NULL && size > 0) {
6712 c->Header.SGList = 1;
6713 c->Header.SGTotal = cpu_to_le16(1);
6714 } else {
6715 c->Header.SGList = 0;
6716 c->Header.SGTotal = cpu_to_le16(0);
6717 }
6718 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
6719
6720 if (cmd_type == TYPE_CMD) {
6721 switch (cmd) {
6722 case HPSA_INQUIRY:
6723
6724 if (page_code & VPD_PAGE) {
6725 c->Request.CDB[1] = 0x01;
6726 c->Request.CDB[2] = (page_code & 0xff);
6727 }
6728 c->Request.CDBLen = 6;
6729 c->Request.type_attr_dir =
6730 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6731 c->Request.Timeout = 0;
6732 c->Request.CDB[0] = HPSA_INQUIRY;
6733 c->Request.CDB[4] = size & 0xFF;
6734 break;
6735 case HPSA_REPORT_LOG:
6736 case HPSA_REPORT_PHYS:
6737
6738
6739
6740 c->Request.CDBLen = 12;
6741 c->Request.type_attr_dir =
6742 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6743 c->Request.Timeout = 0;
6744 c->Request.CDB[0] = cmd;
6745 c->Request.CDB[6] = (size >> 24) & 0xFF;
6746 c->Request.CDB[7] = (size >> 16) & 0xFF;
6747 c->Request.CDB[8] = (size >> 8) & 0xFF;
6748 c->Request.CDB[9] = size & 0xFF;
6749 break;
6750 case BMIC_SENSE_DIAG_OPTIONS:
6751 c->Request.CDBLen = 16;
6752 c->Request.type_attr_dir =
6753 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6754 c->Request.Timeout = 0;
6755
6756 c->Request.CDB[0] = BMIC_READ;
6757 c->Request.CDB[6] = BMIC_SENSE_DIAG_OPTIONS;
6758 break;
6759 case BMIC_SET_DIAG_OPTIONS:
6760 c->Request.CDBLen = 16;
6761 c->Request.type_attr_dir =
6762 TYPE_ATTR_DIR(cmd_type,
6763 ATTR_SIMPLE, XFER_WRITE);
6764 c->Request.Timeout = 0;
6765 c->Request.CDB[0] = BMIC_WRITE;
6766 c->Request.CDB[6] = BMIC_SET_DIAG_OPTIONS;
6767 break;
6768 case HPSA_CACHE_FLUSH:
6769 c->Request.CDBLen = 12;
6770 c->Request.type_attr_dir =
6771 TYPE_ATTR_DIR(cmd_type,
6772 ATTR_SIMPLE, XFER_WRITE);
6773 c->Request.Timeout = 0;
6774 c->Request.CDB[0] = BMIC_WRITE;
6775 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
6776 c->Request.CDB[7] = (size >> 8) & 0xFF;
6777 c->Request.CDB[8] = size & 0xFF;
6778 break;
6779 case TEST_UNIT_READY:
6780 c->Request.CDBLen = 6;
6781 c->Request.type_attr_dir =
6782 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6783 c->Request.Timeout = 0;
6784 break;
6785 case HPSA_GET_RAID_MAP:
6786 c->Request.CDBLen = 12;
6787 c->Request.type_attr_dir =
6788 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6789 c->Request.Timeout = 0;
6790 c->Request.CDB[0] = HPSA_CISS_READ;
6791 c->Request.CDB[1] = cmd;
6792 c->Request.CDB[6] = (size >> 24) & 0xFF;
6793 c->Request.CDB[7] = (size >> 16) & 0xFF;
6794 c->Request.CDB[8] = (size >> 8) & 0xFF;
6795 c->Request.CDB[9] = size & 0xFF;
6796 break;
6797 case BMIC_SENSE_CONTROLLER_PARAMETERS:
6798 c->Request.CDBLen = 10;
6799 c->Request.type_attr_dir =
6800 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6801 c->Request.Timeout = 0;
6802 c->Request.CDB[0] = BMIC_READ;
6803 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
6804 c->Request.CDB[7] = (size >> 16) & 0xFF;
6805 c->Request.CDB[8] = (size >> 8) & 0xFF;
6806 break;
6807 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
6808 c->Request.CDBLen = 10;
6809 c->Request.type_attr_dir =
6810 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6811 c->Request.Timeout = 0;
6812 c->Request.CDB[0] = BMIC_READ;
6813 c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
6814 c->Request.CDB[7] = (size >> 16) & 0xFF;
6815 c->Request.CDB[8] = (size >> 8) & 0XFF;
6816 break;
6817 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
6818 c->Request.CDBLen = 10;
6819 c->Request.type_attr_dir =
6820 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6821 c->Request.Timeout = 0;
6822 c->Request.CDB[0] = BMIC_READ;
6823 c->Request.CDB[6] = BMIC_SENSE_SUBSYSTEM_INFORMATION;
6824 c->Request.CDB[7] = (size >> 16) & 0xFF;
6825 c->Request.CDB[8] = (size >> 8) & 0XFF;
6826 break;
6827 case BMIC_SENSE_STORAGE_BOX_PARAMS:
6828 c->Request.CDBLen = 10;
6829 c->Request.type_attr_dir =
6830 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6831 c->Request.Timeout = 0;
6832 c->Request.CDB[0] = BMIC_READ;
6833 c->Request.CDB[6] = BMIC_SENSE_STORAGE_BOX_PARAMS;
6834 c->Request.CDB[7] = (size >> 16) & 0xFF;
6835 c->Request.CDB[8] = (size >> 8) & 0XFF;
6836 break;
6837 case BMIC_IDENTIFY_CONTROLLER:
6838 c->Request.CDBLen = 10;
6839 c->Request.type_attr_dir =
6840 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6841 c->Request.Timeout = 0;
6842 c->Request.CDB[0] = BMIC_READ;
6843 c->Request.CDB[1] = 0;
6844 c->Request.CDB[2] = 0;
6845 c->Request.CDB[3] = 0;
6846 c->Request.CDB[4] = 0;
6847 c->Request.CDB[5] = 0;
6848 c->Request.CDB[6] = BMIC_IDENTIFY_CONTROLLER;
6849 c->Request.CDB[7] = (size >> 16) & 0xFF;
6850 c->Request.CDB[8] = (size >> 8) & 0XFF;
6851 c->Request.CDB[9] = 0;
6852 break;
6853 default:
6854 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
6855 BUG();
6856 return -1;
6857 }
6858 } else if (cmd_type == TYPE_MSG) {
6859 switch (cmd) {
6860
6861 case HPSA_PHYS_TARGET_RESET:
6862 c->Request.CDBLen = 16;
6863 c->Request.type_attr_dir =
6864 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6865 c->Request.Timeout = 0;
6866 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6867 c->Request.CDB[0] = HPSA_RESET;
6868 c->Request.CDB[1] = HPSA_TARGET_RESET_TYPE;
6869
6870 c->Request.CDB[4] = 0x00;
6871 c->Request.CDB[5] = 0x00;
6872 c->Request.CDB[6] = 0x00;
6873 c->Request.CDB[7] = 0x00;
6874 break;
6875 case HPSA_DEVICE_RESET_MSG:
6876 c->Request.CDBLen = 16;
6877 c->Request.type_attr_dir =
6878 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6879 c->Request.Timeout = 0;
6880 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6881 c->Request.CDB[0] = cmd;
6882 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
6883
6884
6885 c->Request.CDB[4] = 0x00;
6886 c->Request.CDB[5] = 0x00;
6887 c->Request.CDB[6] = 0x00;
6888 c->Request.CDB[7] = 0x00;
6889 break;
6890 case HPSA_ABORT_MSG:
6891 memcpy(&tag, buff, sizeof(tag));
6892 dev_dbg(&h->pdev->dev,
6893 "Abort Tag:0x%016llx using rqst Tag:0x%016llx",
6894 tag, c->Header.tag);
6895 c->Request.CDBLen = 16;
6896 c->Request.type_attr_dir =
6897 TYPE_ATTR_DIR(cmd_type,
6898 ATTR_SIMPLE, XFER_WRITE);
6899 c->Request.Timeout = 0;
6900 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
6901 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
6902 c->Request.CDB[2] = 0x00;
6903 c->Request.CDB[3] = 0x00;
6904
6905 memcpy(&c->Request.CDB[4], &tag, sizeof(tag));
6906 c->Request.CDB[12] = 0x00;
6907 c->Request.CDB[13] = 0x00;
6908 c->Request.CDB[14] = 0x00;
6909 c->Request.CDB[15] = 0x00;
6910 break;
6911 default:
6912 dev_warn(&h->pdev->dev, "unknown message type %d\n",
6913 cmd);
6914 BUG();
6915 }
6916 } else {
6917 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
6918 BUG();
6919 }
6920
6921 switch (GET_DIR(c->Request.type_attr_dir)) {
6922 case XFER_READ:
6923 pci_dir = PCI_DMA_FROMDEVICE;
6924 break;
6925 case XFER_WRITE:
6926 pci_dir = PCI_DMA_TODEVICE;
6927 break;
6928 case XFER_NONE:
6929 pci_dir = PCI_DMA_NONE;
6930 break;
6931 default:
6932 pci_dir = PCI_DMA_BIDIRECTIONAL;
6933 }
6934 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
6935 return -1;
6936 return 0;
6937}
6938
6939
6940
6941
6942static void __iomem *remap_pci_mem(ulong base, ulong size)
6943{
6944 ulong page_base = ((ulong) base) & PAGE_MASK;
6945 ulong page_offs = ((ulong) base) - page_base;
6946 void __iomem *page_remapped = ioremap_nocache(page_base,
6947 page_offs + size);
6948
6949 return page_remapped ? (page_remapped + page_offs) : NULL;
6950}
6951
6952static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
6953{
6954 return h->access.command_completed(h, q);
6955}
6956
6957static inline bool interrupt_pending(struct ctlr_info *h)
6958{
6959 return h->access.intr_pending(h);
6960}
6961
6962static inline long interrupt_not_for_us(struct ctlr_info *h)
6963{
6964 return (h->access.intr_pending(h) == 0) ||
6965 (h->interrupts_enabled == 0);
6966}
6967
6968static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
6969 u32 raw_tag)
6970{
6971 if (unlikely(tag_index >= h->nr_cmds)) {
6972 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
6973 return 1;
6974 }
6975 return 0;
6976}
6977
6978static inline void finish_cmd(struct CommandList *c)
6979{
6980 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
6981 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
6982 || c->cmd_type == CMD_IOACCEL2))
6983 complete_scsi_command(c);
6984 else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF)
6985 complete(c->waiting);
6986}
6987
6988
6989static inline void process_indexed_cmd(struct ctlr_info *h,
6990 u32 raw_tag)
6991{
6992 u32 tag_index;
6993 struct CommandList *c;
6994
6995 tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
6996 if (!bad_tag(h, tag_index, raw_tag)) {
6997 c = h->cmd_pool + tag_index;
6998 finish_cmd(c);
6999 }
7000}
7001
7002
7003
7004
7005
7006
7007static int ignore_bogus_interrupt(struct ctlr_info *h)
7008{
7009 if (likely(!reset_devices))
7010 return 0;
7011
7012 if (likely(h->interrupts_enabled))
7013 return 0;
7014
7015 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
7016 "(known firmware bug.) Ignoring.\n");
7017
7018 return 1;
7019}
7020
7021
7022
7023
7024
7025
7026static struct ctlr_info *queue_to_hba(u8 *queue)
7027{
7028 return container_of((queue - *queue), struct ctlr_info, q[0]);
7029}
7030
7031static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
7032{
7033 struct ctlr_info *h = queue_to_hba(queue);
7034 u8 q = *(u8 *) queue;
7035 u32 raw_tag;
7036
7037 if (ignore_bogus_interrupt(h))
7038 return IRQ_NONE;
7039
7040 if (interrupt_not_for_us(h))
7041 return IRQ_NONE;
7042 h->last_intr_timestamp = get_jiffies_64();
7043 while (interrupt_pending(h)) {
7044 raw_tag = get_next_completion(h, q);
7045 while (raw_tag != FIFO_EMPTY)
7046 raw_tag = next_command(h, q);
7047 }
7048 return IRQ_HANDLED;
7049}
7050
7051static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
7052{
7053 struct ctlr_info *h = queue_to_hba(queue);
7054 u32 raw_tag;
7055 u8 q = *(u8 *) queue;
7056
7057 if (ignore_bogus_interrupt(h))
7058 return IRQ_NONE;
7059
7060 h->last_intr_timestamp = get_jiffies_64();
7061 raw_tag = get_next_completion(h, q);
7062 while (raw_tag != FIFO_EMPTY)
7063 raw_tag = next_command(h, q);
7064 return IRQ_HANDLED;
7065}
7066
7067static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
7068{
7069 struct ctlr_info *h = queue_to_hba((u8 *) queue);
7070 u32 raw_tag;
7071 u8 q = *(u8 *) queue;
7072
7073 if (interrupt_not_for_us(h))
7074 return IRQ_NONE;
7075 h->last_intr_timestamp = get_jiffies_64();
7076 while (interrupt_pending(h)) {
7077 raw_tag = get_next_completion(h, q);
7078 while (raw_tag != FIFO_EMPTY) {
7079 process_indexed_cmd(h, raw_tag);
7080 raw_tag = next_command(h, q);
7081 }
7082 }
7083 return IRQ_HANDLED;
7084}
7085
7086static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
7087{
7088 struct ctlr_info *h = queue_to_hba(queue);
7089 u32 raw_tag;
7090 u8 q = *(u8 *) queue;
7091
7092 h->last_intr_timestamp = get_jiffies_64();
7093 raw_tag = get_next_completion(h, q);
7094 while (raw_tag != FIFO_EMPTY) {
7095 process_indexed_cmd(h, raw_tag);
7096 raw_tag = next_command(h, q);
7097 }
7098 return IRQ_HANDLED;
7099}
7100
7101
7102
7103
7104
7105static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
7106 unsigned char type)
7107{
7108 struct Command {
7109 struct CommandListHeader CommandHeader;
7110 struct RequestBlock Request;
7111 struct ErrDescriptor ErrorDescriptor;
7112 };
7113 struct Command *cmd;
7114 static const size_t cmd_sz = sizeof(*cmd) +
7115 sizeof(cmd->ErrorDescriptor);
7116 dma_addr_t paddr64;
7117 __le32 paddr32;
7118 u32 tag;
7119 void __iomem *vaddr;
7120 int i, err;
7121
7122 vaddr = pci_ioremap_bar(pdev, 0);
7123 if (vaddr == NULL)
7124 return -ENOMEM;
7125
7126
7127
7128
7129
7130 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
7131 if (err) {
7132 iounmap(vaddr);
7133 return err;
7134 }
7135
7136 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
7137 if (cmd == NULL) {
7138 iounmap(vaddr);
7139 return -ENOMEM;
7140 }
7141
7142
7143
7144
7145
7146 paddr32 = cpu_to_le32(paddr64);
7147
7148 cmd->CommandHeader.ReplyQueue = 0;
7149 cmd->CommandHeader.SGList = 0;
7150 cmd->CommandHeader.SGTotal = cpu_to_le16(0);
7151 cmd->CommandHeader.tag = cpu_to_le64(paddr64);
7152 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
7153
7154 cmd->Request.CDBLen = 16;
7155 cmd->Request.type_attr_dir =
7156 TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
7157 cmd->Request.Timeout = 0;
7158 cmd->Request.CDB[0] = opcode;
7159 cmd->Request.CDB[1] = type;
7160 memset(&cmd->Request.CDB[2], 0, 14);
7161 cmd->ErrorDescriptor.Addr =
7162 cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
7163 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
7164
7165 writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
7166
7167 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
7168 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
7169 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
7170 break;
7171 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
7172 }
7173
7174 iounmap(vaddr);
7175
7176
7177
7178
7179 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
7180 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
7181 opcode, type);
7182 return -ETIMEDOUT;
7183 }
7184
7185 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
7186
7187 if (tag & HPSA_ERROR_BIT) {
7188 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
7189 opcode, type);
7190 return -EIO;
7191 }
7192
7193 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
7194 opcode, type);
7195 return 0;
7196}
7197
7198#define hpsa_noop(p) hpsa_message(p, 3, 0)
7199
7200static int hpsa_controller_hard_reset(struct pci_dev *pdev,
7201 void __iomem *vaddr, u32 use_doorbell)
7202{
7203
7204 if (use_doorbell) {
7205
7206
7207
7208
7209 dev_info(&pdev->dev, "using doorbell to reset controller\n");
7210 writel(use_doorbell, vaddr + SA5_DOORBELL);
7211
7212
7213
7214
7215
7216
7217 msleep(10000);
7218 } else {
7219
7220
7221
7222
7223
7224
7225
7226
7227
7228 int rc = 0;
7229
7230 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
7231
7232
7233 rc = pci_set_power_state(pdev, PCI_D3hot);
7234 if (rc)
7235 return rc;
7236
7237 msleep(500);
7238
7239
7240 rc = pci_set_power_state(pdev, PCI_D0);
7241 if (rc)
7242 return rc;
7243
7244
7245
7246
7247
7248
7249 msleep(500);
7250 }
7251 return 0;
7252}
7253
7254static void init_driver_version(char *driver_version, int len)
7255{
7256 memset(driver_version, 0, len);
7257 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
7258}
7259
7260static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
7261{
7262 char *driver_version;
7263 int i, size = sizeof(cfgtable->driver_version);
7264
7265 driver_version = kmalloc(size, GFP_KERNEL);
7266 if (!driver_version)
7267 return -ENOMEM;
7268
7269 init_driver_version(driver_version, size);
7270 for (i = 0; i < size; i++)
7271 writeb(driver_version[i], &cfgtable->driver_version[i]);
7272 kfree(driver_version);
7273 return 0;
7274}
7275
7276static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
7277 unsigned char *driver_ver)
7278{
7279 int i;
7280
7281 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
7282 driver_ver[i] = readb(&cfgtable->driver_version[i]);
7283}
7284
7285static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
7286{
7287
7288 char *driver_ver, *old_driver_ver;
7289 int rc, size = sizeof(cfgtable->driver_version);
7290
7291 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
7292 if (!old_driver_ver)
7293 return -ENOMEM;
7294 driver_ver = old_driver_ver + size;
7295
7296
7297
7298
7299 init_driver_version(old_driver_ver, size);
7300 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
7301 rc = !memcmp(driver_ver, old_driver_ver, size);
7302 kfree(old_driver_ver);
7303 return rc;
7304}
7305
7306
7307
7308static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
7309{
7310 u64 cfg_offset;
7311 u32 cfg_base_addr;
7312 u64 cfg_base_addr_index;
7313 void __iomem *vaddr;
7314 unsigned long paddr;
7315 u32 misc_fw_support;
7316 int rc;
7317 struct CfgTable __iomem *cfgtable;
7318 u32 use_doorbell;
7319 u16 command_register;
7320
7321
7322
7323
7324
7325
7326
7327
7328
7329
7330
7331
7332
7333
7334 if (!ctlr_is_resettable(board_id)) {
7335 dev_warn(&pdev->dev, "Controller not resettable\n");
7336 return -ENODEV;
7337 }
7338
7339
7340 if (!ctlr_is_hard_resettable(board_id))
7341 return -ENOTSUPP;
7342
7343
7344 pci_read_config_word(pdev, 4, &command_register);
7345 pci_save_state(pdev);
7346
7347
7348 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
7349 if (rc)
7350 return rc;
7351 vaddr = remap_pci_mem(paddr, 0x250);
7352 if (!vaddr)
7353 return -ENOMEM;
7354
7355
7356 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
7357 &cfg_base_addr_index, &cfg_offset);
7358 if (rc)
7359 goto unmap_vaddr;
7360 cfgtable = remap_pci_mem(pci_resource_start(pdev,
7361 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
7362 if (!cfgtable) {
7363 rc = -ENOMEM;
7364 goto unmap_vaddr;
7365 }
7366 rc = write_driver_ver_to_cfgtable(cfgtable);
7367 if (rc)
7368 goto unmap_cfgtable;
7369
7370
7371
7372
7373 misc_fw_support = readl(&cfgtable->misc_fw_support);
7374 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
7375 if (use_doorbell) {
7376 use_doorbell = DOORBELL_CTLR_RESET2;
7377 } else {
7378 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
7379 if (use_doorbell) {
7380 dev_warn(&pdev->dev,
7381 "Soft reset not supported. Firmware update is required.\n");
7382 rc = -ENOTSUPP;
7383 goto unmap_cfgtable;
7384 }
7385 }
7386
7387 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
7388 if (rc)
7389 goto unmap_cfgtable;
7390
7391 pci_restore_state(pdev);
7392 pci_write_config_word(pdev, 4, command_register);
7393
7394
7395
7396 msleep(HPSA_POST_RESET_PAUSE_MSECS);
7397
7398 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
7399 if (rc) {
7400 dev_warn(&pdev->dev,
7401 "Failed waiting for board to become ready after hard reset\n");
7402 goto unmap_cfgtable;
7403 }
7404
7405 rc = controller_reset_failed(vaddr);
7406 if (rc < 0)
7407 goto unmap_cfgtable;
7408 if (rc) {
7409 dev_warn(&pdev->dev, "Unable to successfully reset "
7410 "controller. Will try soft reset.\n");
7411 rc = -ENOTSUPP;
7412 } else {
7413 dev_info(&pdev->dev, "board ready after hard reset.\n");
7414 }
7415
7416unmap_cfgtable:
7417 iounmap(cfgtable);
7418
7419unmap_vaddr:
7420 iounmap(vaddr);
7421 return rc;
7422}
7423
7424
7425
7426
7427
7428
7429static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
7430{
7431#ifdef HPSA_DEBUG
7432 int i;
7433 char temp_name[17];
7434
7435 dev_info(dev, "Controller Configuration information\n");
7436 dev_info(dev, "------------------------------------\n");
7437 for (i = 0; i < 4; i++)
7438 temp_name[i] = readb(&(tb->Signature[i]));
7439 temp_name[4] = '\0';
7440 dev_info(dev, " Signature = %s\n", temp_name);
7441 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
7442 dev_info(dev, " Transport methods supported = 0x%x\n",
7443 readl(&(tb->TransportSupport)));
7444 dev_info(dev, " Transport methods active = 0x%x\n",
7445 readl(&(tb->TransportActive)));
7446 dev_info(dev, " Requested transport Method = 0x%x\n",
7447 readl(&(tb->HostWrite.TransportRequest)));
7448 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
7449 readl(&(tb->HostWrite.CoalIntDelay)));
7450 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
7451 readl(&(tb->HostWrite.CoalIntCount)));
7452 dev_info(dev, " Max outstanding commands = %d\n",
7453 readl(&(tb->CmdsOutMax)));
7454 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
7455 for (i = 0; i < 16; i++)
7456 temp_name[i] = readb(&(tb->ServerName[i]));
7457 temp_name[16] = '\0';
7458 dev_info(dev, " Server Name = %s\n", temp_name);
7459 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
7460 readl(&(tb->HeartBeat)));
7461#endif
7462}
7463
7464static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
7465{
7466 int i, offset, mem_type, bar_type;
7467
7468 if (pci_bar_addr == PCI_BASE_ADDRESS_0)
7469 return 0;
7470 offset = 0;
7471 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
7472 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
7473 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
7474 offset += 4;
7475 else {
7476 mem_type = pci_resource_flags(pdev, i) &
7477 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
7478 switch (mem_type) {
7479 case PCI_BASE_ADDRESS_MEM_TYPE_32:
7480 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
7481 offset += 4;
7482 break;
7483 case PCI_BASE_ADDRESS_MEM_TYPE_64:
7484 offset += 8;
7485 break;
7486 default:
7487 dev_warn(&pdev->dev,
7488 "base address is invalid\n");
7489 return -1;
7490 break;
7491 }
7492 }
7493 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
7494 return i + 1;
7495 }
7496 return -1;
7497}
7498
7499static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
7500{
7501 if (h->msix_vector) {
7502 if (h->pdev->msix_enabled)
7503 pci_disable_msix(h->pdev);
7504 h->msix_vector = 0;
7505 } else if (h->msi_vector) {
7506 if (h->pdev->msi_enabled)
7507 pci_disable_msi(h->pdev);
7508 h->msi_vector = 0;
7509 }
7510}
7511
7512
7513
7514
7515static void hpsa_interrupt_mode(struct ctlr_info *h)
7516{
7517#ifdef CONFIG_PCI_MSI
7518 int err, i;
7519 struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
7520
7521 for (i = 0; i < MAX_REPLY_QUEUES; i++) {
7522 hpsa_msix_entries[i].vector = 0;
7523 hpsa_msix_entries[i].entry = i;
7524 }
7525
7526
7527 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
7528 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
7529 goto default_int_mode;
7530 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
7531 dev_info(&h->pdev->dev, "MSI-X capable controller\n");
7532 h->msix_vector = MAX_REPLY_QUEUES;
7533 if (h->msix_vector > num_online_cpus())
7534 h->msix_vector = num_online_cpus();
7535 err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
7536 1, h->msix_vector);
7537 if (err < 0) {
7538 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
7539 h->msix_vector = 0;
7540 goto single_msi_mode;
7541 } else if (err < h->msix_vector) {
7542 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
7543 "available\n", err);
7544 }
7545 h->msix_vector = err;
7546 for (i = 0; i < h->msix_vector; i++)
7547 h->intr[i] = hpsa_msix_entries[i].vector;
7548 return;
7549 }
7550single_msi_mode:
7551 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
7552 dev_info(&h->pdev->dev, "MSI capable controller\n");
7553 if (!pci_enable_msi(h->pdev))
7554 h->msi_vector = 1;
7555 else
7556 dev_warn(&h->pdev->dev, "MSI init failed\n");
7557 }
7558default_int_mode:
7559#endif
7560
7561 h->intr[h->intr_mode] = h->pdev->irq;
7562}
7563
7564static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
7565{
7566 int i;
7567 u32 subsystem_vendor_id, subsystem_device_id;
7568
7569 subsystem_vendor_id = pdev->subsystem_vendor;
7570 subsystem_device_id = pdev->subsystem_device;
7571 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
7572 subsystem_vendor_id;
7573
7574 for (i = 0; i < ARRAY_SIZE(products); i++)
7575 if (*board_id == products[i].board_id)
7576 return i;
7577
7578 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
7579 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
7580 !hpsa_allow_any) {
7581 dev_warn(&pdev->dev, "unrecognized board ID: "
7582 "0x%08x, ignoring.\n", *board_id);
7583 return -ENODEV;
7584 }
7585 return ARRAY_SIZE(products) - 1;
7586}
7587
7588static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
7589 unsigned long *memory_bar)
7590{
7591 int i;
7592
7593 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
7594 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
7595
7596 *memory_bar = pci_resource_start(pdev, i);
7597 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
7598 *memory_bar);
7599 return 0;
7600 }
7601 dev_warn(&pdev->dev, "no memory BAR found\n");
7602 return -ENODEV;
7603}
7604
7605static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
7606 int wait_for_ready)
7607{
7608 int i, iterations;
7609 u32 scratchpad;
7610 if (wait_for_ready)
7611 iterations = HPSA_BOARD_READY_ITERATIONS;
7612 else
7613 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
7614
7615 for (i = 0; i < iterations; i++) {
7616 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
7617 if (wait_for_ready) {
7618 if (scratchpad == HPSA_FIRMWARE_READY)
7619 return 0;
7620 } else {
7621 if (scratchpad != HPSA_FIRMWARE_READY)
7622 return 0;
7623 }
7624 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
7625 }
7626 dev_warn(&pdev->dev, "board not ready, timed out.\n");
7627 return -ENODEV;
7628}
7629
7630static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
7631 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
7632 u64 *cfg_offset)
7633{
7634 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
7635 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
7636 *cfg_base_addr &= (u32) 0x0000ffff;
7637 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
7638 if (*cfg_base_addr_index == -1) {
7639 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
7640 return -ENODEV;
7641 }
7642 return 0;
7643}
7644
7645static void hpsa_free_cfgtables(struct ctlr_info *h)
7646{
7647 if (h->transtable) {
7648 iounmap(h->transtable);
7649 h->transtable = NULL;
7650 }
7651 if (h->cfgtable) {
7652 iounmap(h->cfgtable);
7653 h->cfgtable = NULL;
7654 }
7655}
7656
7657
7658
7659
7660static int hpsa_find_cfgtables(struct ctlr_info *h)
7661{
7662 u64 cfg_offset;
7663 u32 cfg_base_addr;
7664 u64 cfg_base_addr_index;
7665 u32 trans_offset;
7666 int rc;
7667
7668 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7669 &cfg_base_addr_index, &cfg_offset);
7670 if (rc)
7671 return rc;
7672 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
7673 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
7674 if (!h->cfgtable) {
7675 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
7676 return -ENOMEM;
7677 }
7678 rc = write_driver_ver_to_cfgtable(h->cfgtable);
7679 if (rc)
7680 return rc;
7681
7682 trans_offset = readl(&h->cfgtable->TransMethodOffset);
7683 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
7684 cfg_base_addr_index)+cfg_offset+trans_offset,
7685 sizeof(*h->transtable));
7686 if (!h->transtable) {
7687 dev_err(&h->pdev->dev, "Failed mapping transfer table\n");
7688 hpsa_free_cfgtables(h);
7689 return -ENOMEM;
7690 }
7691 return 0;
7692}
7693
7694static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
7695{
7696#define MIN_MAX_COMMANDS 16
7697 BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
7698
7699 h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
7700
7701
7702 if (reset_devices && h->max_commands > 32)
7703 h->max_commands = 32;
7704
7705 if (h->max_commands < MIN_MAX_COMMANDS) {
7706 dev_warn(&h->pdev->dev,
7707 "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
7708 h->max_commands,
7709 MIN_MAX_COMMANDS);
7710 h->max_commands = MIN_MAX_COMMANDS;
7711 }
7712}
7713
7714
7715
7716
7717
7718static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
7719{
7720 return h->maxsgentries > 512;
7721}
7722
7723
7724
7725
7726
7727static void hpsa_find_board_params(struct ctlr_info *h)
7728{
7729 hpsa_get_max_perf_mode_cmds(h);
7730 h->nr_cmds = h->max_commands;
7731 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
7732 h->fw_support = readl(&(h->cfgtable->misc_fw_support));
7733 if (hpsa_supports_chained_sg_blocks(h)) {
7734
7735 h->max_cmd_sg_entries = 32;
7736 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
7737 h->maxsgentries--;
7738 } else {
7739
7740
7741
7742
7743
7744 h->max_cmd_sg_entries = 31;
7745 h->maxsgentries = 31;
7746 h->chainsize = 0;
7747 }
7748
7749
7750 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
7751 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
7752 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
7753 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
7754 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
7755 if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags))
7756 dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n");
7757}
7758
7759static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
7760{
7761 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
7762 dev_err(&h->pdev->dev, "not a valid CISS config table\n");
7763 return false;
7764 }
7765 return true;
7766}
7767
7768static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
7769{
7770 u32 driver_support;
7771
7772 driver_support = readl(&(h->cfgtable->driver_support));
7773
7774#ifdef CONFIG_X86
7775 driver_support |= ENABLE_SCSI_PREFETCH;
7776#endif
7777 driver_support |= ENABLE_UNIT_ATTN;
7778 writel(driver_support, &(h->cfgtable->driver_support));
7779}
7780
7781
7782
7783
7784static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
7785{
7786 u32 dma_prefetch;
7787
7788 if (h->board_id != 0x3225103C)
7789 return;
7790 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
7791 dma_prefetch |= 0x8000;
7792 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
7793}
7794
7795static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
7796{
7797 int i;
7798 u32 doorbell_value;
7799 unsigned long flags;
7800
7801 for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
7802 spin_lock_irqsave(&h->lock, flags);
7803 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7804 spin_unlock_irqrestore(&h->lock, flags);
7805 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
7806 goto done;
7807
7808 msleep(CLEAR_EVENT_WAIT_INTERVAL);
7809 }
7810 return -ENODEV;
7811done:
7812 return 0;
7813}
7814
7815static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
7816{
7817 int i;
7818 u32 doorbell_value;
7819 unsigned long flags;
7820
7821
7822
7823
7824
7825 for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
7826 if (h->remove_in_progress)
7827 goto done;
7828 spin_lock_irqsave(&h->lock, flags);
7829 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7830 spin_unlock_irqrestore(&h->lock, flags);
7831 if (!(doorbell_value & CFGTBL_ChangeReq))
7832 goto done;
7833
7834 msleep(MODE_CHANGE_WAIT_INTERVAL);
7835 }
7836 return -ENODEV;
7837done:
7838 return 0;
7839}
7840
7841
7842static int hpsa_enter_simple_mode(struct ctlr_info *h)
7843{
7844 u32 trans_support;
7845
7846 trans_support = readl(&(h->cfgtable->TransportSupport));
7847 if (!(trans_support & SIMPLE_MODE))
7848 return -ENOTSUPP;
7849
7850 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
7851
7852
7853 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
7854 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
7855 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7856 if (hpsa_wait_for_mode_change_ack(h))
7857 goto error;
7858 print_cfg_table(&h->pdev->dev, h->cfgtable);
7859 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
7860 goto error;
7861 h->transMethod = CFGTBL_Trans_Simple;
7862 return 0;
7863error:
7864 dev_err(&h->pdev->dev, "failed to enter simple mode\n");
7865 return -ENODEV;
7866}
7867
7868
7869static void hpsa_free_pci_init(struct ctlr_info *h)
7870{
7871 hpsa_free_cfgtables(h);
7872 iounmap(h->vaddr);
7873 h->vaddr = NULL;
7874 hpsa_disable_interrupt_mode(h);
7875
7876
7877
7878
7879 pci_disable_device(h->pdev);
7880 pci_release_regions(h->pdev);
7881}
7882
7883
7884static int hpsa_pci_init(struct ctlr_info *h)
7885{
7886 int prod_index, err;
7887
7888 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
7889 if (prod_index < 0)
7890 return prod_index;
7891 h->product_name = products[prod_index].product_name;
7892 h->access = *(products[prod_index].access);
7893
7894 h->needs_abort_tags_swizzled =
7895 ctlr_needs_abort_tags_swizzled(h->board_id);
7896
7897 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
7898 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
7899
7900 err = pci_enable_device(h->pdev);
7901 if (err) {
7902 dev_err(&h->pdev->dev, "failed to enable PCI device\n");
7903 pci_disable_device(h->pdev);
7904 return err;
7905 }
7906
7907 err = pci_request_regions(h->pdev, HPSA);
7908 if (err) {
7909 dev_err(&h->pdev->dev,
7910 "failed to obtain PCI resources\n");
7911 pci_disable_device(h->pdev);
7912 return err;
7913 }
7914
7915 pci_set_master(h->pdev);
7916
7917 hpsa_interrupt_mode(h);
7918 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
7919 if (err)
7920 goto clean2;
7921 h->vaddr = remap_pci_mem(h->paddr, 0x250);
7922 if (!h->vaddr) {
7923 dev_err(&h->pdev->dev, "failed to remap PCI mem\n");
7924 err = -ENOMEM;
7925 goto clean2;
7926 }
7927 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
7928 if (err)
7929 goto clean3;
7930 err = hpsa_find_cfgtables(h);
7931 if (err)
7932 goto clean3;
7933 hpsa_find_board_params(h);
7934
7935 if (!hpsa_CISS_signature_present(h)) {
7936 err = -ENODEV;
7937 goto clean4;
7938 }
7939 hpsa_set_driver_support_bits(h);
7940 hpsa_p600_dma_prefetch_quirk(h);
7941 err = hpsa_enter_simple_mode(h);
7942 if (err)
7943 goto clean4;
7944 return 0;
7945
7946clean4:
7947 hpsa_free_cfgtables(h);
7948clean3:
7949 iounmap(h->vaddr);
7950 h->vaddr = NULL;
7951clean2:
7952 hpsa_disable_interrupt_mode(h);
7953
7954
7955
7956
7957 pci_disable_device(h->pdev);
7958 pci_release_regions(h->pdev);
7959 return err;
7960}
7961
7962static void hpsa_hba_inquiry(struct ctlr_info *h)
7963{
7964 int rc;
7965
7966#define HBA_INQUIRY_BYTE_COUNT 64
7967 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
7968 if (!h->hba_inquiry_data)
7969 return;
7970 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
7971 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
7972 if (rc != 0) {
7973 kfree(h->hba_inquiry_data);
7974 h->hba_inquiry_data = NULL;
7975 }
7976}
7977
7978static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
7979{
7980 int rc, i;
7981 void __iomem *vaddr;
7982
7983 if (!reset_devices)
7984 return 0;
7985
7986
7987
7988
7989
7990 rc = pci_enable_device(pdev);
7991 if (rc) {
7992 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
7993 return -ENODEV;
7994 }
7995 pci_disable_device(pdev);
7996 msleep(260);
7997 rc = pci_enable_device(pdev);
7998 if (rc) {
7999 dev_warn(&pdev->dev, "failed to enable device.\n");
8000 return -ENODEV;
8001 }
8002
8003 pci_set_master(pdev);
8004
8005 vaddr = pci_ioremap_bar(pdev, 0);
8006 if (vaddr == NULL) {
8007 rc = -ENOMEM;
8008 goto out_disable;
8009 }
8010 writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
8011 iounmap(vaddr);
8012
8013
8014 rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
8015
8016
8017
8018
8019
8020
8021 if (rc)
8022 goto out_disable;
8023
8024
8025 dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
8026 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
8027 if (hpsa_noop(pdev) == 0)
8028 break;
8029 else
8030 dev_warn(&pdev->dev, "no-op failed%s\n",
8031 (i < 11 ? "; re-trying" : ""));
8032 }
8033
8034out_disable:
8035
8036 pci_disable_device(pdev);
8037 return rc;
8038}
8039
8040static void hpsa_free_cmd_pool(struct ctlr_info *h)
8041{
8042 kfree(h->cmd_pool_bits);
8043 h->cmd_pool_bits = NULL;
8044 if (h->cmd_pool) {
8045 pci_free_consistent(h->pdev,
8046 h->nr_cmds * sizeof(struct CommandList),
8047 h->cmd_pool,
8048 h->cmd_pool_dhandle);
8049 h->cmd_pool = NULL;
8050 h->cmd_pool_dhandle = 0;
8051 }
8052 if (h->errinfo_pool) {
8053 pci_free_consistent(h->pdev,
8054 h->nr_cmds * sizeof(struct ErrorInfo),
8055 h->errinfo_pool,
8056 h->errinfo_pool_dhandle);
8057 h->errinfo_pool = NULL;
8058 h->errinfo_pool_dhandle = 0;
8059 }
8060}
8061
8062static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
8063{
8064 h->cmd_pool_bits = kzalloc(
8065 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
8066 sizeof(unsigned long), GFP_KERNEL);
8067 h->cmd_pool = pci_alloc_consistent(h->pdev,
8068 h->nr_cmds * sizeof(*h->cmd_pool),
8069 &(h->cmd_pool_dhandle));
8070 h->errinfo_pool = pci_alloc_consistent(h->pdev,
8071 h->nr_cmds * sizeof(*h->errinfo_pool),
8072 &(h->errinfo_pool_dhandle));
8073 if ((h->cmd_pool_bits == NULL)
8074 || (h->cmd_pool == NULL)
8075 || (h->errinfo_pool == NULL)) {
8076 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
8077 goto clean_up;
8078 }
8079 hpsa_preinitialize_commands(h);
8080 return 0;
8081clean_up:
8082 hpsa_free_cmd_pool(h);
8083 return -ENOMEM;
8084}
8085
8086static void hpsa_irq_affinity_hints(struct ctlr_info *h)
8087{
8088 int i, cpu;
8089
8090 cpu = cpumask_first(cpu_online_mask);
8091 for (i = 0; i < h->msix_vector; i++) {
8092 irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
8093 cpu = cpumask_next(cpu, cpu_online_mask);
8094 }
8095}
8096
8097
8098static void hpsa_free_irqs(struct ctlr_info *h)
8099{
8100 int i;
8101
8102 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
8103
8104 i = h->intr_mode;
8105 irq_set_affinity_hint(h->intr[i], NULL);
8106 free_irq(h->intr[i], &h->q[i]);
8107 h->q[i] = 0;
8108 return;
8109 }
8110
8111 for (i = 0; i < h->msix_vector; i++) {
8112 irq_set_affinity_hint(h->intr[i], NULL);
8113 free_irq(h->intr[i], &h->q[i]);
8114 h->q[i] = 0;
8115 }
8116 for (; i < MAX_REPLY_QUEUES; i++)
8117 h->q[i] = 0;
8118}
8119
8120
8121static int hpsa_request_irqs(struct ctlr_info *h,
8122 irqreturn_t (*msixhandler)(int, void *),
8123 irqreturn_t (*intxhandler)(int, void *))
8124{
8125 int rc, i;
8126
8127
8128
8129
8130
8131 for (i = 0; i < MAX_REPLY_QUEUES; i++)
8132 h->q[i] = (u8) i;
8133
8134 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
8135
8136 for (i = 0; i < h->msix_vector; i++) {
8137 sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
8138 rc = request_irq(h->intr[i], msixhandler,
8139 0, h->intrname[i],
8140 &h->q[i]);
8141 if (rc) {
8142 int j;
8143
8144 dev_err(&h->pdev->dev,
8145 "failed to get irq %d for %s\n",
8146 h->intr[i], h->devname);
8147 for (j = 0; j < i; j++) {
8148 free_irq(h->intr[j], &h->q[j]);
8149 h->q[j] = 0;
8150 }
8151 for (; j < MAX_REPLY_QUEUES; j++)
8152 h->q[j] = 0;
8153 return rc;
8154 }
8155 }
8156 hpsa_irq_affinity_hints(h);
8157 } else {
8158
8159 if (h->msix_vector > 0 || h->msi_vector) {
8160 if (h->msix_vector)
8161 sprintf(h->intrname[h->intr_mode],
8162 "%s-msix", h->devname);
8163 else
8164 sprintf(h->intrname[h->intr_mode],
8165 "%s-msi", h->devname);
8166 rc = request_irq(h->intr[h->intr_mode],
8167 msixhandler, 0,
8168 h->intrname[h->intr_mode],
8169 &h->q[h->intr_mode]);
8170 } else {
8171 sprintf(h->intrname[h->intr_mode],
8172 "%s-intx", h->devname);
8173 rc = request_irq(h->intr[h->intr_mode],
8174 intxhandler, IRQF_SHARED,
8175 h->intrname[h->intr_mode],
8176 &h->q[h->intr_mode]);
8177 }
8178 irq_set_affinity_hint(h->intr[h->intr_mode], NULL);
8179 }
8180 if (rc) {
8181 dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
8182 h->intr[h->intr_mode], h->devname);
8183 hpsa_free_irqs(h);
8184 return -ENODEV;
8185 }
8186 return 0;
8187}
8188
8189static int hpsa_kdump_soft_reset(struct ctlr_info *h)
8190{
8191 int rc;
8192 hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER);
8193
8194 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
8195 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
8196 if (rc) {
8197 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
8198 return rc;
8199 }
8200
8201 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
8202 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
8203 if (rc) {
8204 dev_warn(&h->pdev->dev, "Board failed to become ready "
8205 "after soft reset.\n");
8206 return rc;
8207 }
8208
8209 return 0;
8210}
8211
8212static void hpsa_free_reply_queues(struct ctlr_info *h)
8213{
8214 int i;
8215
8216 for (i = 0; i < h->nreply_queues; i++) {
8217 if (!h->reply_queue[i].head)
8218 continue;
8219 pci_free_consistent(h->pdev,
8220 h->reply_queue_size,
8221 h->reply_queue[i].head,
8222 h->reply_queue[i].busaddr);
8223 h->reply_queue[i].head = NULL;
8224 h->reply_queue[i].busaddr = 0;
8225 }
8226 h->reply_queue_size = 0;
8227}
8228
8229static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
8230{
8231 hpsa_free_performant_mode(h);
8232 hpsa_free_sg_chain_blocks(h);
8233 hpsa_free_cmd_pool(h);
8234 hpsa_free_irqs(h);
8235 scsi_host_put(h->scsi_host);
8236 h->scsi_host = NULL;
8237 hpsa_free_pci_init(h);
8238 free_percpu(h->lockup_detected);
8239 h->lockup_detected = NULL;
8240 if (h->resubmit_wq) {
8241 destroy_workqueue(h->resubmit_wq);
8242 h->resubmit_wq = NULL;
8243 }
8244 if (h->rescan_ctlr_wq) {
8245 destroy_workqueue(h->rescan_ctlr_wq);
8246 h->rescan_ctlr_wq = NULL;
8247 }
8248 kfree(h);
8249}
8250
8251
8252static void fail_all_outstanding_cmds(struct ctlr_info *h)
8253{
8254 int i, refcount;
8255 struct CommandList *c;
8256 int failcount = 0;
8257
8258 flush_workqueue(h->resubmit_wq);
8259 for (i = 0; i < h->nr_cmds; i++) {
8260 c = h->cmd_pool + i;
8261 refcount = atomic_inc_return(&c->refcount);
8262 if (refcount > 1) {
8263 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
8264 finish_cmd(c);
8265 atomic_dec(&h->commands_outstanding);
8266 failcount++;
8267 }
8268 cmd_free(h, c);
8269 }
8270 dev_warn(&h->pdev->dev,
8271 "failed %d commands in fail_all\n", failcount);
8272}
8273
8274static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
8275{
8276 int cpu;
8277
8278 for_each_online_cpu(cpu) {
8279 u32 *lockup_detected;
8280 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
8281 *lockup_detected = value;
8282 }
8283 wmb();
8284}
8285
8286static void controller_lockup_detected(struct ctlr_info *h)
8287{
8288 unsigned long flags;
8289 u32 lockup_detected;
8290
8291 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8292 spin_lock_irqsave(&h->lock, flags);
8293 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
8294 if (!lockup_detected) {
8295
8296 dev_warn(&h->pdev->dev,
8297 "lockup detected after %d but scratchpad register is zero\n",
8298 h->heartbeat_sample_interval / HZ);
8299 lockup_detected = 0xffffffff;
8300 }
8301 set_lockup_detected_for_all_cpus(h, lockup_detected);
8302 spin_unlock_irqrestore(&h->lock, flags);
8303 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
8304 lockup_detected, h->heartbeat_sample_interval / HZ);
8305 pci_disable_device(h->pdev);
8306 fail_all_outstanding_cmds(h);
8307}
8308
8309static int detect_controller_lockup(struct ctlr_info *h)
8310{
8311 u64 now;
8312 u32 heartbeat;
8313 unsigned long flags;
8314
8315 now = get_jiffies_64();
8316
8317 if (time_after64(h->last_intr_timestamp +
8318 (h->heartbeat_sample_interval), now))
8319 return false;
8320
8321
8322
8323
8324
8325
8326 if (time_after64(h->last_heartbeat_timestamp +
8327 (h->heartbeat_sample_interval), now))
8328 return false;
8329
8330
8331 spin_lock_irqsave(&h->lock, flags);
8332 heartbeat = readl(&h->cfgtable->HeartBeat);
8333 spin_unlock_irqrestore(&h->lock, flags);
8334 if (h->last_heartbeat == heartbeat) {
8335 controller_lockup_detected(h);
8336 return true;
8337 }
8338
8339
8340 h->last_heartbeat = heartbeat;
8341 h->last_heartbeat_timestamp = now;
8342 return false;
8343}
8344
8345static void hpsa_ack_ctlr_events(struct ctlr_info *h)
8346{
8347 int i;
8348 char *event_type;
8349
8350 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8351 return;
8352
8353
8354 if ((h->transMethod & (CFGTBL_Trans_io_accel1
8355 | CFGTBL_Trans_io_accel2)) &&
8356 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
8357 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
8358
8359 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
8360 event_type = "state change";
8361 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
8362 event_type = "configuration change";
8363
8364 scsi_block_requests(h->scsi_host);
8365 for (i = 0; i < h->ndevices; i++) {
8366 h->dev[i]->offload_enabled = 0;
8367 h->dev[i]->offload_to_be_enabled = 0;
8368 }
8369 hpsa_drain_accel_commands(h);
8370
8371 dev_warn(&h->pdev->dev,
8372 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
8373 h->events, event_type);
8374 writel(h->events, &(h->cfgtable->clear_event_notify));
8375
8376 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8377
8378 hpsa_wait_for_clear_event_notify_ack(h);
8379 scsi_unblock_requests(h->scsi_host);
8380 } else {
8381
8382 writel(h->events, &(h->cfgtable->clear_event_notify));
8383 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8384 hpsa_wait_for_clear_event_notify_ack(h);
8385#if 0
8386 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
8387 hpsa_wait_for_mode_change_ack(h);
8388#endif
8389 }
8390 return;
8391}
8392
8393
8394
8395
8396
8397
8398static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
8399{
8400 if (h->drv_req_rescan) {
8401 h->drv_req_rescan = 0;
8402 return 1;
8403 }
8404
8405 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8406 return 0;
8407
8408 h->events = readl(&(h->cfgtable->event_notify));
8409 return h->events & RESCAN_REQUIRED_EVENT_BITS;
8410}
8411
8412
8413
8414
8415static int hpsa_offline_devices_ready(struct ctlr_info *h)
8416{
8417 unsigned long flags;
8418 struct offline_device_entry *d;
8419 struct list_head *this, *tmp;
8420
8421 spin_lock_irqsave(&h->offline_device_lock, flags);
8422 list_for_each_safe(this, tmp, &h->offline_device_list) {
8423 d = list_entry(this, struct offline_device_entry,
8424 offline_list);
8425 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8426 if (!hpsa_volume_offline(h, d->scsi3addr)) {
8427 spin_lock_irqsave(&h->offline_device_lock, flags);
8428 list_del(&d->offline_list);
8429 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8430 return 1;
8431 }
8432 spin_lock_irqsave(&h->offline_device_lock, flags);
8433 }
8434 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8435 return 0;
8436}
8437
8438static int hpsa_luns_changed(struct ctlr_info *h)
8439{
8440 int rc = 1;
8441 struct ReportLUNdata *logdev = NULL;
8442
8443
8444
8445
8446
8447 if (!h->lastlogicals)
8448 goto out;
8449
8450 logdev = kzalloc(sizeof(*logdev), GFP_KERNEL);
8451 if (!logdev) {
8452 dev_warn(&h->pdev->dev,
8453 "Out of memory, can't track lun changes.\n");
8454 goto out;
8455 }
8456 if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) {
8457 dev_warn(&h->pdev->dev,
8458 "report luns failed, can't track lun changes.\n");
8459 goto out;
8460 }
8461 if (memcmp(logdev, h->lastlogicals, sizeof(*logdev))) {
8462 dev_info(&h->pdev->dev,
8463 "Lun changes detected.\n");
8464 memcpy(h->lastlogicals, logdev, sizeof(*logdev));
8465 goto out;
8466 } else
8467 rc = 0;
8468out:
8469 kfree(logdev);
8470 return rc;
8471}
8472
8473static void hpsa_rescan_ctlr_worker(struct work_struct *work)
8474{
8475 unsigned long flags;
8476 struct ctlr_info *h = container_of(to_delayed_work(work),
8477 struct ctlr_info, rescan_ctlr_work);
8478
8479
8480 if (h->remove_in_progress)
8481 return;
8482
8483 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
8484 scsi_host_get(h->scsi_host);
8485 hpsa_ack_ctlr_events(h);
8486 hpsa_scan_start(h->scsi_host);
8487 scsi_host_put(h->scsi_host);
8488 } else if (h->discovery_polling) {
8489 hpsa_disable_rld_caching(h);
8490 if (hpsa_luns_changed(h)) {
8491 struct Scsi_Host *sh = NULL;
8492
8493 dev_info(&h->pdev->dev,
8494 "driver discovery polling rescan.\n");
8495 sh = scsi_host_get(h->scsi_host);
8496 if (sh != NULL) {
8497 hpsa_scan_start(sh);
8498 scsi_host_put(sh);
8499 }
8500 }
8501 }
8502 spin_lock_irqsave(&h->lock, flags);
8503 if (!h->remove_in_progress)
8504 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8505 h->heartbeat_sample_interval);
8506 spin_unlock_irqrestore(&h->lock, flags);
8507}
8508
8509static void hpsa_monitor_ctlr_worker(struct work_struct *work)
8510{
8511 unsigned long flags;
8512 struct ctlr_info *h = container_of(to_delayed_work(work),
8513 struct ctlr_info, monitor_ctlr_work);
8514
8515 detect_controller_lockup(h);
8516 if (lockup_detected(h))
8517 return;
8518
8519 spin_lock_irqsave(&h->lock, flags);
8520 if (!h->remove_in_progress)
8521 schedule_delayed_work(&h->monitor_ctlr_work,
8522 h->heartbeat_sample_interval);
8523 spin_unlock_irqrestore(&h->lock, flags);
8524}
8525
8526static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
8527 char *name)
8528{
8529 struct workqueue_struct *wq = NULL;
8530
8531 wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
8532 if (!wq)
8533 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
8534
8535 return wq;
8536}
8537
8538static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8539{
8540 int dac, rc;
8541 struct ctlr_info *h;
8542 int try_soft_reset = 0;
8543 unsigned long flags;
8544 u32 board_id;
8545
8546 if (number_of_controllers == 0)
8547 printk(KERN_INFO DRIVER_NAME "\n");
8548
8549 rc = hpsa_lookup_board_id(pdev, &board_id);
8550 if (rc < 0) {
8551 dev_warn(&pdev->dev, "Board ID not found\n");
8552 return rc;
8553 }
8554
8555 rc = hpsa_init_reset_devices(pdev, board_id);
8556 if (rc) {
8557 if (rc != -ENOTSUPP)
8558 return rc;
8559
8560
8561
8562
8563
8564 try_soft_reset = 1;
8565 rc = 0;
8566 }
8567
8568reinit_after_soft_reset:
8569
8570
8571
8572
8573
8574 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
8575 h = kzalloc(sizeof(*h), GFP_KERNEL);
8576 if (!h) {
8577 dev_err(&pdev->dev, "Failed to allocate controller head\n");
8578 return -ENOMEM;
8579 }
8580
8581 h->pdev = pdev;
8582
8583 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
8584 INIT_LIST_HEAD(&h->offline_device_list);
8585 spin_lock_init(&h->lock);
8586 spin_lock_init(&h->offline_device_lock);
8587 spin_lock_init(&h->scan_lock);
8588 atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
8589 atomic_set(&h->abort_cmds_available, HPSA_CMDS_RESERVED_FOR_ABORTS);
8590
8591
8592 h->lockup_detected = alloc_percpu(u32);
8593 if (!h->lockup_detected) {
8594 dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
8595 rc = -ENOMEM;
8596 goto clean1;
8597 }
8598 set_lockup_detected_for_all_cpus(h, 0);
8599
8600 rc = hpsa_pci_init(h);
8601 if (rc)
8602 goto clean2;
8603
8604
8605
8606 rc = hpsa_scsi_host_alloc(h);
8607 if (rc)
8608 goto clean2_5;
8609
8610 sprintf(h->devname, HPSA "%d", h->scsi_host->host_no);
8611 h->ctlr = number_of_controllers;
8612 number_of_controllers++;
8613
8614
8615 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8616 if (rc == 0) {
8617 dac = 1;
8618 } else {
8619 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8620 if (rc == 0) {
8621 dac = 0;
8622 } else {
8623 dev_err(&pdev->dev, "no suitable DMA available\n");
8624 goto clean3;
8625 }
8626 }
8627
8628
8629 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8630
8631 rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
8632 if (rc)
8633 goto clean3;
8634 rc = hpsa_alloc_cmd_pool(h);
8635 if (rc)
8636 goto clean4;
8637 rc = hpsa_alloc_sg_chain_blocks(h);
8638 if (rc)
8639 goto clean5;
8640 init_waitqueue_head(&h->scan_wait_queue);
8641 init_waitqueue_head(&h->abort_cmd_wait_queue);
8642 init_waitqueue_head(&h->event_sync_wait_queue);
8643 mutex_init(&h->reset_mutex);
8644 h->scan_finished = 1;
8645
8646 pci_set_drvdata(pdev, h);
8647 h->ndevices = 0;
8648
8649 spin_lock_init(&h->devlock);
8650 rc = hpsa_put_ctlr_into_performant_mode(h);
8651 if (rc)
8652 goto clean6;
8653
8654
8655 h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
8656 if (!h->rescan_ctlr_wq) {
8657 rc = -ENOMEM;
8658 goto clean7;
8659 }
8660
8661 h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
8662 if (!h->resubmit_wq) {
8663 rc = -ENOMEM;
8664 goto clean7;
8665 }
8666
8667
8668
8669
8670
8671
8672 if (try_soft_reset) {
8673
8674
8675
8676
8677
8678
8679
8680
8681 spin_lock_irqsave(&h->lock, flags);
8682 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8683 spin_unlock_irqrestore(&h->lock, flags);
8684 hpsa_free_irqs(h);
8685 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
8686 hpsa_intx_discard_completions);
8687 if (rc) {
8688 dev_warn(&h->pdev->dev,
8689 "Failed to request_irq after soft reset.\n");
8690
8691
8692
8693
8694 hpsa_free_performant_mode(h);
8695 hpsa_free_sg_chain_blocks(h);
8696 hpsa_free_cmd_pool(h);
8697
8698
8699
8700
8701 goto clean3;
8702 }
8703
8704 rc = hpsa_kdump_soft_reset(h);
8705 if (rc)
8706
8707 goto clean7;
8708
8709 dev_info(&h->pdev->dev, "Board READY.\n");
8710 dev_info(&h->pdev->dev,
8711 "Waiting for stale completions to drain.\n");
8712 h->access.set_intr_mask(h, HPSA_INTR_ON);
8713 msleep(10000);
8714 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8715
8716 rc = controller_reset_failed(h->cfgtable);
8717 if (rc)
8718 dev_info(&h->pdev->dev,
8719 "Soft reset appears to have failed.\n");
8720
8721
8722
8723
8724
8725 hpsa_undo_allocations_after_kdump_soft_reset(h);
8726 try_soft_reset = 0;
8727 if (rc)
8728
8729 return -ENODEV;
8730
8731 goto reinit_after_soft_reset;
8732 }
8733
8734
8735 h->acciopath_status = 1;
8736
8737 h->discovery_polling = 0;
8738
8739
8740
8741 h->access.set_intr_mask(h, HPSA_INTR_ON);
8742
8743 hpsa_hba_inquiry(h);
8744
8745 h->lastlogicals = kzalloc(sizeof(*(h->lastlogicals)), GFP_KERNEL);
8746 if (!h->lastlogicals)
8747 dev_info(&h->pdev->dev,
8748 "Can't track change to report lun data\n");
8749
8750
8751 rc = hpsa_scsi_add_host(h);
8752 if (rc)
8753 goto clean7;
8754
8755
8756 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
8757 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
8758 schedule_delayed_work(&h->monitor_ctlr_work,
8759 h->heartbeat_sample_interval);
8760 INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
8761 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8762 h->heartbeat_sample_interval);
8763 return 0;
8764
8765clean7:
8766 hpsa_free_performant_mode(h);
8767 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8768clean6:
8769 hpsa_free_sg_chain_blocks(h);
8770clean5:
8771 hpsa_free_cmd_pool(h);
8772clean4:
8773 hpsa_free_irqs(h);
8774clean3:
8775 scsi_host_put(h->scsi_host);
8776 h->scsi_host = NULL;
8777clean2_5:
8778 hpsa_free_pci_init(h);
8779clean2:
8780 if (h->lockup_detected) {
8781 free_percpu(h->lockup_detected);
8782 h->lockup_detected = NULL;
8783 }
8784clean1:
8785 if (h->resubmit_wq) {
8786 destroy_workqueue(h->resubmit_wq);
8787 h->resubmit_wq = NULL;
8788 }
8789 if (h->rescan_ctlr_wq) {
8790 destroy_workqueue(h->rescan_ctlr_wq);
8791 h->rescan_ctlr_wq = NULL;
8792 }
8793 kfree(h);
8794 return rc;
8795}
8796
8797static void hpsa_flush_cache(struct ctlr_info *h)
8798{
8799 char *flush_buf;
8800 struct CommandList *c;
8801 int rc;
8802
8803 if (unlikely(lockup_detected(h)))
8804 return;
8805 flush_buf = kzalloc(4, GFP_KERNEL);
8806 if (!flush_buf)
8807 return;
8808
8809 c = cmd_alloc(h);
8810
8811 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
8812 RAID_CTLR_LUNID, TYPE_CMD)) {
8813 goto out;
8814 }
8815 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8816 PCI_DMA_TODEVICE, DEFAULT_TIMEOUT);
8817 if (rc)
8818 goto out;
8819 if (c->err_info->CommandStatus != 0)
8820out:
8821 dev_warn(&h->pdev->dev,
8822 "error flushing cache on controller\n");
8823 cmd_free(h, c);
8824 kfree(flush_buf);
8825}
8826
8827
8828
8829
8830static void hpsa_disable_rld_caching(struct ctlr_info *h)
8831{
8832 u32 *options;
8833 struct CommandList *c;
8834 int rc;
8835
8836
8837 if (unlikely(h->lockup_detected))
8838 return;
8839
8840 options = kzalloc(sizeof(*options), GFP_KERNEL);
8841 if (!options) {
8842 dev_err(&h->pdev->dev,
8843 "Error: failed to disable rld caching, during alloc.\n");
8844 return;
8845 }
8846
8847 c = cmd_alloc(h);
8848
8849
8850 if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
8851 RAID_CTLR_LUNID, TYPE_CMD))
8852 goto errout;
8853
8854 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8855 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
8856 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8857 goto errout;
8858
8859
8860 *options |= HPSA_DIAG_OPTS_DISABLE_RLD_CACHING;
8861
8862 if (fill_cmd(c, BMIC_SET_DIAG_OPTIONS, h, options, 4, 0,
8863 RAID_CTLR_LUNID, TYPE_CMD))
8864 goto errout;
8865
8866 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8867 PCI_DMA_TODEVICE, DEFAULT_TIMEOUT);
8868 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8869 goto errout;
8870
8871
8872 if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
8873 RAID_CTLR_LUNID, TYPE_CMD))
8874 goto errout;
8875
8876 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8877 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
8878 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8879 goto errout;
8880
8881 if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING)
8882 goto out;
8883
8884errout:
8885 dev_err(&h->pdev->dev,
8886 "Error: failed to disable report lun data caching.\n");
8887out:
8888 cmd_free(h, c);
8889 kfree(options);
8890}
8891
8892static void hpsa_shutdown(struct pci_dev *pdev)
8893{
8894 struct ctlr_info *h;
8895
8896 h = pci_get_drvdata(pdev);
8897
8898
8899
8900
8901 hpsa_flush_cache(h);
8902 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8903 hpsa_free_irqs(h);
8904 hpsa_disable_interrupt_mode(h);
8905}
8906
8907static void hpsa_free_device_info(struct ctlr_info *h)
8908{
8909 int i;
8910
8911 for (i = 0; i < h->ndevices; i++) {
8912 kfree(h->dev[i]);
8913 h->dev[i] = NULL;
8914 }
8915}
8916
8917static void hpsa_remove_one(struct pci_dev *pdev)
8918{
8919 struct ctlr_info *h;
8920 unsigned long flags;
8921
8922 if (pci_get_drvdata(pdev) == NULL) {
8923 dev_err(&pdev->dev, "unable to remove device\n");
8924 return;
8925 }
8926 h = pci_get_drvdata(pdev);
8927
8928
8929 spin_lock_irqsave(&h->lock, flags);
8930 h->remove_in_progress = 1;
8931 spin_unlock_irqrestore(&h->lock, flags);
8932 cancel_delayed_work_sync(&h->monitor_ctlr_work);
8933 cancel_delayed_work_sync(&h->rescan_ctlr_work);
8934 destroy_workqueue(h->rescan_ctlr_wq);
8935 destroy_workqueue(h->resubmit_wq);
8936
8937
8938
8939
8940
8941
8942
8943 if (h->scsi_host)
8944 scsi_remove_host(h->scsi_host);
8945
8946
8947 hpsa_shutdown(pdev);
8948
8949 hpsa_free_device_info(h);
8950
8951 kfree(h->hba_inquiry_data);
8952 h->hba_inquiry_data = NULL;
8953 hpsa_free_ioaccel2_sg_chain_blocks(h);
8954 hpsa_free_performant_mode(h);
8955 hpsa_free_sg_chain_blocks(h);
8956 hpsa_free_cmd_pool(h);
8957 kfree(h->lastlogicals);
8958
8959
8960
8961 scsi_host_put(h->scsi_host);
8962 h->scsi_host = NULL;
8963
8964
8965 hpsa_free_pci_init(h);
8966
8967 free_percpu(h->lockup_detected);
8968 h->lockup_detected = NULL;
8969
8970
8971 hpsa_delete_sas_host(h);
8972
8973 kfree(h);
8974}
8975
8976static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
8977 __attribute__((unused)) pm_message_t state)
8978{
8979 return -ENOSYS;
8980}
8981
8982static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
8983{
8984 return -ENOSYS;
8985}
8986
8987static struct pci_driver hpsa_pci_driver = {
8988 .name = HPSA,
8989 .probe = hpsa_init_one,
8990 .remove = hpsa_remove_one,
8991 .id_table = hpsa_pci_device_id,
8992 .shutdown = hpsa_shutdown,
8993 .suspend = hpsa_suspend,
8994 .resume = hpsa_resume,
8995};
8996
8997
8998
8999
9000
9001
9002
9003
9004
9005
9006
9007
9008
9009static void calc_bucket_map(int bucket[], int num_buckets,
9010 int nsgs, int min_blocks, u32 *bucket_map)
9011{
9012 int i, j, b, size;
9013
9014
9015 for (i = 0; i <= nsgs; i++) {
9016
9017 size = i + min_blocks;
9018 b = num_buckets;
9019
9020 for (j = 0; j < num_buckets; j++) {
9021 if (bucket[j] >= size) {
9022 b = j;
9023 break;
9024 }
9025 }
9026
9027 bucket_map[i] = b;
9028 }
9029}
9030
9031
9032
9033
9034
9035static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
9036{
9037 int i;
9038 unsigned long register_value;
9039 unsigned long transMethod = CFGTBL_Trans_Performant |
9040 (trans_support & CFGTBL_Trans_use_short_tags) |
9041 CFGTBL_Trans_enable_directed_msix |
9042 (trans_support & (CFGTBL_Trans_io_accel1 |
9043 CFGTBL_Trans_io_accel2));
9044 struct access_method access = SA5_performant_access;
9045
9046
9047
9048
9049
9050
9051
9052
9053
9054
9055
9056
9057
9058
9059
9060
9061
9062
9063 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
9064#define MIN_IOACCEL2_BFT_ENTRY 5
9065#define HPSA_IOACCEL2_HEADER_SZ 4
9066 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
9067 13, 14, 15, 16, 17, 18, 19,
9068 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
9069 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
9070 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
9071 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
9072 16 * MIN_IOACCEL2_BFT_ENTRY);
9073 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
9074 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
9075
9076
9077
9078
9079
9080
9081
9082
9083
9084
9085 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
9086 access = SA5_performant_access_no_read;
9087
9088
9089 for (i = 0; i < h->nreply_queues; i++)
9090 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
9091
9092 bft[7] = SG_ENTRIES_IN_CMD + 4;
9093 calc_bucket_map(bft, ARRAY_SIZE(bft),
9094 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
9095 for (i = 0; i < 8; i++)
9096 writel(bft[i], &h->transtable->BlockFetch[i]);
9097
9098
9099 writel(h->max_commands, &h->transtable->RepQSize);
9100 writel(h->nreply_queues, &h->transtable->RepQCount);
9101 writel(0, &h->transtable->RepQCtrAddrLow32);
9102 writel(0, &h->transtable->RepQCtrAddrHigh32);
9103
9104 for (i = 0; i < h->nreply_queues; i++) {
9105 writel(0, &h->transtable->RepQAddr[i].upper);
9106 writel(h->reply_queue[i].busaddr,
9107 &h->transtable->RepQAddr[i].lower);
9108 }
9109
9110 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
9111 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
9112
9113
9114
9115 if (trans_support & CFGTBL_Trans_io_accel1) {
9116 access = SA5_ioaccel_mode1_access;
9117 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
9118 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
9119 } else {
9120 if (trans_support & CFGTBL_Trans_io_accel2) {
9121 access = SA5_ioaccel_mode2_access;
9122 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
9123 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
9124 }
9125 }
9126 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
9127 if (hpsa_wait_for_mode_change_ack(h)) {
9128 dev_err(&h->pdev->dev,
9129 "performant mode problem - doorbell timeout\n");
9130 return -ENODEV;
9131 }
9132 register_value = readl(&(h->cfgtable->TransportActive));
9133 if (!(register_value & CFGTBL_Trans_Performant)) {
9134 dev_err(&h->pdev->dev,
9135 "performant mode problem - transport not active\n");
9136 return -ENODEV;
9137 }
9138
9139 h->access = access;
9140 h->transMethod = transMethod;
9141
9142 if (!((trans_support & CFGTBL_Trans_io_accel1) ||
9143 (trans_support & CFGTBL_Trans_io_accel2)))
9144 return 0;
9145
9146 if (trans_support & CFGTBL_Trans_io_accel1) {
9147
9148 for (i = 0; i < h->nreply_queues; i++) {
9149 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
9150 h->reply_queue[i].current_entry =
9151 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
9152 }
9153 bft[7] = h->ioaccel_maxsg + 8;
9154 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
9155 h->ioaccel1_blockFetchTable);
9156
9157
9158 for (i = 0; i < h->nreply_queues; i++)
9159 memset(h->reply_queue[i].head,
9160 (u8) IOACCEL_MODE1_REPLY_UNUSED,
9161 h->reply_queue_size);
9162
9163
9164
9165
9166 for (i = 0; i < h->nr_cmds; i++) {
9167 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
9168
9169 cp->function = IOACCEL1_FUNCTION_SCSIIO;
9170 cp->err_info = (u32) (h->errinfo_pool_dhandle +
9171 (i * sizeof(struct ErrorInfo)));
9172 cp->err_info_len = sizeof(struct ErrorInfo);
9173 cp->sgl_offset = IOACCEL1_SGLOFFSET;
9174 cp->host_context_flags =
9175 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
9176 cp->timeout_sec = 0;
9177 cp->ReplyQueue = 0;
9178 cp->tag =
9179 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
9180 cp->host_addr =
9181 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
9182 (i * sizeof(struct io_accel1_cmd)));
9183 }
9184 } else if (trans_support & CFGTBL_Trans_io_accel2) {
9185 u64 cfg_offset, cfg_base_addr_index;
9186 u32 bft2_offset, cfg_base_addr;
9187 int rc;
9188
9189 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
9190 &cfg_base_addr_index, &cfg_offset);
9191 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
9192 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
9193 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
9194 4, h->ioaccel2_blockFetchTable);
9195 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
9196 BUILD_BUG_ON(offsetof(struct CfgTable,
9197 io_accel_request_size_offset) != 0xb8);
9198 h->ioaccel2_bft2_regs =
9199 remap_pci_mem(pci_resource_start(h->pdev,
9200 cfg_base_addr_index) +
9201 cfg_offset + bft2_offset,
9202 ARRAY_SIZE(bft2) *
9203 sizeof(*h->ioaccel2_bft2_regs));
9204 for (i = 0; i < ARRAY_SIZE(bft2); i++)
9205 writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
9206 }
9207 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
9208 if (hpsa_wait_for_mode_change_ack(h)) {
9209 dev_err(&h->pdev->dev,
9210 "performant mode problem - enabling ioaccel mode\n");
9211 return -ENODEV;
9212 }
9213 return 0;
9214}
9215
9216
9217static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
9218{
9219 if (h->ioaccel_cmd_pool) {
9220 pci_free_consistent(h->pdev,
9221 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
9222 h->ioaccel_cmd_pool,
9223 h->ioaccel_cmd_pool_dhandle);
9224 h->ioaccel_cmd_pool = NULL;
9225 h->ioaccel_cmd_pool_dhandle = 0;
9226 }
9227 kfree(h->ioaccel1_blockFetchTable);
9228 h->ioaccel1_blockFetchTable = NULL;
9229}
9230
9231
9232static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
9233{
9234 h->ioaccel_maxsg =
9235 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
9236 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
9237 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
9238
9239
9240
9241
9242
9243 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
9244 IOACCEL1_COMMANDLIST_ALIGNMENT);
9245 h->ioaccel_cmd_pool =
9246 pci_alloc_consistent(h->pdev,
9247 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
9248 &(h->ioaccel_cmd_pool_dhandle));
9249
9250 h->ioaccel1_blockFetchTable =
9251 kmalloc(((h->ioaccel_maxsg + 1) *
9252 sizeof(u32)), GFP_KERNEL);
9253
9254 if ((h->ioaccel_cmd_pool == NULL) ||
9255 (h->ioaccel1_blockFetchTable == NULL))
9256 goto clean_up;
9257
9258 memset(h->ioaccel_cmd_pool, 0,
9259 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
9260 return 0;
9261
9262clean_up:
9263 hpsa_free_ioaccel1_cmd_and_bft(h);
9264 return -ENOMEM;
9265}
9266
9267
9268static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
9269{
9270 hpsa_free_ioaccel2_sg_chain_blocks(h);
9271
9272 if (h->ioaccel2_cmd_pool) {
9273 pci_free_consistent(h->pdev,
9274 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9275 h->ioaccel2_cmd_pool,
9276 h->ioaccel2_cmd_pool_dhandle);
9277 h->ioaccel2_cmd_pool = NULL;
9278 h->ioaccel2_cmd_pool_dhandle = 0;
9279 }
9280 kfree(h->ioaccel2_blockFetchTable);
9281 h->ioaccel2_blockFetchTable = NULL;
9282}
9283
9284
9285static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
9286{
9287 int rc;
9288
9289
9290
9291 h->ioaccel_maxsg =
9292 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
9293 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
9294 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
9295
9296 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
9297 IOACCEL2_COMMANDLIST_ALIGNMENT);
9298 h->ioaccel2_cmd_pool =
9299 pci_alloc_consistent(h->pdev,
9300 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9301 &(h->ioaccel2_cmd_pool_dhandle));
9302
9303 h->ioaccel2_blockFetchTable =
9304 kmalloc(((h->ioaccel_maxsg + 1) *
9305 sizeof(u32)), GFP_KERNEL);
9306
9307 if ((h->ioaccel2_cmd_pool == NULL) ||
9308 (h->ioaccel2_blockFetchTable == NULL)) {
9309 rc = -ENOMEM;
9310 goto clean_up;
9311 }
9312
9313 rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h);
9314 if (rc)
9315 goto clean_up;
9316
9317 memset(h->ioaccel2_cmd_pool, 0,
9318 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
9319 return 0;
9320
9321clean_up:
9322 hpsa_free_ioaccel2_cmd_and_bft(h);
9323 return rc;
9324}
9325
9326
9327static void hpsa_free_performant_mode(struct ctlr_info *h)
9328{
9329 kfree(h->blockFetchTable);
9330 h->blockFetchTable = NULL;
9331 hpsa_free_reply_queues(h);
9332 hpsa_free_ioaccel1_cmd_and_bft(h);
9333 hpsa_free_ioaccel2_cmd_and_bft(h);
9334}
9335
9336
9337
9338
9339static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
9340{
9341 u32 trans_support;
9342 unsigned long transMethod = CFGTBL_Trans_Performant |
9343 CFGTBL_Trans_use_short_tags;
9344 int i, rc;
9345
9346 if (hpsa_simple_mode)
9347 return 0;
9348
9349 trans_support = readl(&(h->cfgtable->TransportSupport));
9350 if (!(trans_support & PERFORMANT_MODE))
9351 return 0;
9352
9353
9354 if (trans_support & CFGTBL_Trans_io_accel1) {
9355 transMethod |= CFGTBL_Trans_io_accel1 |
9356 CFGTBL_Trans_enable_directed_msix;
9357 rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
9358 if (rc)
9359 return rc;
9360 } else if (trans_support & CFGTBL_Trans_io_accel2) {
9361 transMethod |= CFGTBL_Trans_io_accel2 |
9362 CFGTBL_Trans_enable_directed_msix;
9363 rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
9364 if (rc)
9365 return rc;
9366 }
9367
9368 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
9369 hpsa_get_max_perf_mode_cmds(h);
9370
9371 h->reply_queue_size = h->max_commands * sizeof(u64);
9372
9373 for (i = 0; i < h->nreply_queues; i++) {
9374 h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
9375 h->reply_queue_size,
9376 &(h->reply_queue[i].busaddr));
9377 if (!h->reply_queue[i].head) {
9378 rc = -ENOMEM;
9379 goto clean1;
9380 }
9381 h->reply_queue[i].size = h->max_commands;
9382 h->reply_queue[i].wraparound = 1;
9383 h->reply_queue[i].current_entry = 0;
9384 }
9385
9386
9387 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
9388 sizeof(u32)), GFP_KERNEL);
9389 if (!h->blockFetchTable) {
9390 rc = -ENOMEM;
9391 goto clean1;
9392 }
9393
9394 rc = hpsa_enter_performant_mode(h, trans_support);
9395 if (rc)
9396 goto clean2;
9397 return 0;
9398
9399clean2:
9400 kfree(h->blockFetchTable);
9401 h->blockFetchTable = NULL;
9402clean1:
9403 hpsa_free_reply_queues(h);
9404 hpsa_free_ioaccel1_cmd_and_bft(h);
9405 hpsa_free_ioaccel2_cmd_and_bft(h);
9406 return rc;
9407}
9408
9409static int is_accelerated_cmd(struct CommandList *c)
9410{
9411 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
9412}
9413
9414static void hpsa_drain_accel_commands(struct ctlr_info *h)
9415{
9416 struct CommandList *c = NULL;
9417 int i, accel_cmds_out;
9418 int refcount;
9419
9420 do {
9421 accel_cmds_out = 0;
9422 for (i = 0; i < h->nr_cmds; i++) {
9423 c = h->cmd_pool + i;
9424 refcount = atomic_inc_return(&c->refcount);
9425 if (refcount > 1)
9426 accel_cmds_out += is_accelerated_cmd(c);
9427 cmd_free(h, c);
9428 }
9429 if (accel_cmds_out <= 0)
9430 break;
9431 msleep(100);
9432 } while (1);
9433}
9434
9435static struct hpsa_sas_phy *hpsa_alloc_sas_phy(
9436 struct hpsa_sas_port *hpsa_sas_port)
9437{
9438 struct hpsa_sas_phy *hpsa_sas_phy;
9439 struct sas_phy *phy;
9440
9441 hpsa_sas_phy = kzalloc(sizeof(*hpsa_sas_phy), GFP_KERNEL);
9442 if (!hpsa_sas_phy)
9443 return NULL;
9444
9445 phy = sas_phy_alloc(hpsa_sas_port->parent_node->parent_dev,
9446 hpsa_sas_port->next_phy_index);
9447 if (!phy) {
9448 kfree(hpsa_sas_phy);
9449 return NULL;
9450 }
9451
9452 hpsa_sas_port->next_phy_index++;
9453 hpsa_sas_phy->phy = phy;
9454 hpsa_sas_phy->parent_port = hpsa_sas_port;
9455
9456 return hpsa_sas_phy;
9457}
9458
9459static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9460{
9461 struct sas_phy *phy = hpsa_sas_phy->phy;
9462
9463 sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy);
9464 sas_phy_free(phy);
9465 if (hpsa_sas_phy->added_to_port)
9466 list_del(&hpsa_sas_phy->phy_list_entry);
9467 kfree(hpsa_sas_phy);
9468}
9469
9470static int hpsa_sas_port_add_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9471{
9472 int rc;
9473 struct hpsa_sas_port *hpsa_sas_port;
9474 struct sas_phy *phy;
9475 struct sas_identify *identify;
9476
9477 hpsa_sas_port = hpsa_sas_phy->parent_port;
9478 phy = hpsa_sas_phy->phy;
9479
9480 identify = &phy->identify;
9481 memset(identify, 0, sizeof(*identify));
9482 identify->sas_address = hpsa_sas_port->sas_address;
9483 identify->device_type = SAS_END_DEVICE;
9484 identify->initiator_port_protocols = SAS_PROTOCOL_STP;
9485 identify->target_port_protocols = SAS_PROTOCOL_STP;
9486 phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
9487 phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
9488 phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN;
9489 phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN;
9490 phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
9491
9492 rc = sas_phy_add(hpsa_sas_phy->phy);
9493 if (rc)
9494 return rc;
9495
9496 sas_port_add_phy(hpsa_sas_port->port, hpsa_sas_phy->phy);
9497 list_add_tail(&hpsa_sas_phy->phy_list_entry,
9498 &hpsa_sas_port->phy_list_head);
9499 hpsa_sas_phy->added_to_port = true;
9500
9501 return 0;
9502}
9503
9504static int
9505 hpsa_sas_port_add_rphy(struct hpsa_sas_port *hpsa_sas_port,
9506 struct sas_rphy *rphy)
9507{
9508 struct sas_identify *identify;
9509
9510 identify = &rphy->identify;
9511 identify->sas_address = hpsa_sas_port->sas_address;
9512 identify->initiator_port_protocols = SAS_PROTOCOL_STP;
9513 identify->target_port_protocols = SAS_PROTOCOL_STP;
9514
9515 return sas_rphy_add(rphy);
9516}
9517
9518static struct hpsa_sas_port
9519 *hpsa_alloc_sas_port(struct hpsa_sas_node *hpsa_sas_node,
9520 u64 sas_address)
9521{
9522 int rc;
9523 struct hpsa_sas_port *hpsa_sas_port;
9524 struct sas_port *port;
9525
9526 hpsa_sas_port = kzalloc(sizeof(*hpsa_sas_port), GFP_KERNEL);
9527 if (!hpsa_sas_port)
9528 return NULL;
9529
9530 INIT_LIST_HEAD(&hpsa_sas_port->phy_list_head);
9531 hpsa_sas_port->parent_node = hpsa_sas_node;
9532
9533 port = sas_port_alloc_num(hpsa_sas_node->parent_dev);
9534 if (!port)
9535 goto free_hpsa_port;
9536
9537 rc = sas_port_add(port);
9538 if (rc)
9539 goto free_sas_port;
9540
9541 hpsa_sas_port->port = port;
9542 hpsa_sas_port->sas_address = sas_address;
9543 list_add_tail(&hpsa_sas_port->port_list_entry,
9544 &hpsa_sas_node->port_list_head);
9545
9546 return hpsa_sas_port;
9547
9548free_sas_port:
9549 sas_port_free(port);
9550free_hpsa_port:
9551 kfree(hpsa_sas_port);
9552
9553 return NULL;
9554}
9555
9556static void hpsa_free_sas_port(struct hpsa_sas_port *hpsa_sas_port)
9557{
9558 struct hpsa_sas_phy *hpsa_sas_phy;
9559 struct hpsa_sas_phy *next;
9560
9561 list_for_each_entry_safe(hpsa_sas_phy, next,
9562 &hpsa_sas_port->phy_list_head, phy_list_entry)
9563 hpsa_free_sas_phy(hpsa_sas_phy);
9564
9565 sas_port_delete(hpsa_sas_port->port);
9566 list_del(&hpsa_sas_port->port_list_entry);
9567 kfree(hpsa_sas_port);
9568}
9569
9570static struct hpsa_sas_node *hpsa_alloc_sas_node(struct device *parent_dev)
9571{
9572 struct hpsa_sas_node *hpsa_sas_node;
9573
9574 hpsa_sas_node = kzalloc(sizeof(*hpsa_sas_node), GFP_KERNEL);
9575 if (hpsa_sas_node) {
9576 hpsa_sas_node->parent_dev = parent_dev;
9577 INIT_LIST_HEAD(&hpsa_sas_node->port_list_head);
9578 }
9579
9580 return hpsa_sas_node;
9581}
9582
9583static void hpsa_free_sas_node(struct hpsa_sas_node *hpsa_sas_node)
9584{
9585 struct hpsa_sas_port *hpsa_sas_port;
9586 struct hpsa_sas_port *next;
9587
9588 if (!hpsa_sas_node)
9589 return;
9590
9591 list_for_each_entry_safe(hpsa_sas_port, next,
9592 &hpsa_sas_node->port_list_head, port_list_entry)
9593 hpsa_free_sas_port(hpsa_sas_port);
9594
9595 kfree(hpsa_sas_node);
9596}
9597
9598static struct hpsa_scsi_dev_t
9599 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
9600 struct sas_rphy *rphy)
9601{
9602 int i;
9603 struct hpsa_scsi_dev_t *device;
9604
9605 for (i = 0; i < h->ndevices; i++) {
9606 device = h->dev[i];
9607 if (!device->sas_port)
9608 continue;
9609 if (device->sas_port->rphy == rphy)
9610 return device;
9611 }
9612
9613 return NULL;
9614}
9615
9616static int hpsa_add_sas_host(struct ctlr_info *h)
9617{
9618 int rc;
9619 struct device *parent_dev;
9620 struct hpsa_sas_node *hpsa_sas_node;
9621 struct hpsa_sas_port *hpsa_sas_port;
9622 struct hpsa_sas_phy *hpsa_sas_phy;
9623
9624 parent_dev = &h->scsi_host->shost_gendev;
9625
9626 hpsa_sas_node = hpsa_alloc_sas_node(parent_dev);
9627 if (!hpsa_sas_node)
9628 return -ENOMEM;
9629
9630 hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, h->sas_address);
9631 if (!hpsa_sas_port) {
9632 rc = -ENODEV;
9633 goto free_sas_node;
9634 }
9635
9636 hpsa_sas_phy = hpsa_alloc_sas_phy(hpsa_sas_port);
9637 if (!hpsa_sas_phy) {
9638 rc = -ENODEV;
9639 goto free_sas_port;
9640 }
9641
9642 rc = hpsa_sas_port_add_phy(hpsa_sas_phy);
9643 if (rc)
9644 goto free_sas_phy;
9645
9646 h->sas_host = hpsa_sas_node;
9647
9648 return 0;
9649
9650free_sas_phy:
9651 hpsa_free_sas_phy(hpsa_sas_phy);
9652free_sas_port:
9653 hpsa_free_sas_port(hpsa_sas_port);
9654free_sas_node:
9655 hpsa_free_sas_node(hpsa_sas_node);
9656
9657 return rc;
9658}
9659
9660static void hpsa_delete_sas_host(struct ctlr_info *h)
9661{
9662 hpsa_free_sas_node(h->sas_host);
9663}
9664
9665static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
9666 struct hpsa_scsi_dev_t *device)
9667{
9668 int rc;
9669 struct hpsa_sas_port *hpsa_sas_port;
9670 struct sas_rphy *rphy;
9671
9672 hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, device->sas_address);
9673 if (!hpsa_sas_port)
9674 return -ENOMEM;
9675
9676 rphy = sas_end_device_alloc(hpsa_sas_port->port);
9677 if (!rphy) {
9678 rc = -ENODEV;
9679 goto free_sas_port;
9680 }
9681
9682 hpsa_sas_port->rphy = rphy;
9683 device->sas_port = hpsa_sas_port;
9684
9685 rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy);
9686 if (rc)
9687 goto free_sas_port;
9688
9689 return 0;
9690
9691free_sas_port:
9692 hpsa_free_sas_port(hpsa_sas_port);
9693 device->sas_port = NULL;
9694
9695 return rc;
9696}
9697
9698static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device)
9699{
9700 if (device->sas_port) {
9701 hpsa_free_sas_port(device->sas_port);
9702 device->sas_port = NULL;
9703 }
9704}
9705
9706static int
9707hpsa_sas_get_linkerrors(struct sas_phy *phy)
9708{
9709 return 0;
9710}
9711
9712static int
9713hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
9714{
9715 *identifier = 0;
9716 return 0;
9717}
9718
9719static int
9720hpsa_sas_get_bay_identifier(struct sas_rphy *rphy)
9721{
9722 return -ENXIO;
9723}
9724
9725static int
9726hpsa_sas_phy_reset(struct sas_phy *phy, int hard_reset)
9727{
9728 return 0;
9729}
9730
9731static int
9732hpsa_sas_phy_enable(struct sas_phy *phy, int enable)
9733{
9734 return 0;
9735}
9736
9737static int
9738hpsa_sas_phy_setup(struct sas_phy *phy)
9739{
9740 return 0;
9741}
9742
9743static void
9744hpsa_sas_phy_release(struct sas_phy *phy)
9745{
9746}
9747
9748static int
9749hpsa_sas_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
9750{
9751 return -EINVAL;
9752}
9753
9754
9755static int
9756hpsa_sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
9757struct request *req)
9758{
9759 return -EINVAL;
9760}
9761
9762static struct sas_function_template hpsa_sas_transport_functions = {
9763 .get_linkerrors = hpsa_sas_get_linkerrors,
9764 .get_enclosure_identifier = hpsa_sas_get_enclosure_identifier,
9765 .get_bay_identifier = hpsa_sas_get_bay_identifier,
9766 .phy_reset = hpsa_sas_phy_reset,
9767 .phy_enable = hpsa_sas_phy_enable,
9768 .phy_setup = hpsa_sas_phy_setup,
9769 .phy_release = hpsa_sas_phy_release,
9770 .set_phy_speed = hpsa_sas_phy_speed,
9771 .smp_handler = hpsa_sas_smp_handler,
9772};
9773
9774
9775
9776
9777
9778static int __init hpsa_init(void)
9779{
9780 int rc;
9781
9782 hpsa_sas_transport_template =
9783 sas_attach_transport(&hpsa_sas_transport_functions);
9784 if (!hpsa_sas_transport_template)
9785 return -ENODEV;
9786
9787 rc = pci_register_driver(&hpsa_pci_driver);
9788
9789 if (rc)
9790 sas_release_transport(hpsa_sas_transport_template);
9791
9792 return rc;
9793}
9794
9795static void __exit hpsa_cleanup(void)
9796{
9797 pci_unregister_driver(&hpsa_pci_driver);
9798 sas_release_transport(hpsa_sas_transport_template);
9799}
9800
9801static void __attribute__((unused)) verify_offsets(void)
9802{
9803#define VERIFY_OFFSET(member, offset) \
9804 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
9805
9806 VERIFY_OFFSET(structure_size, 0);
9807 VERIFY_OFFSET(volume_blk_size, 4);
9808 VERIFY_OFFSET(volume_blk_cnt, 8);
9809 VERIFY_OFFSET(phys_blk_shift, 16);
9810 VERIFY_OFFSET(parity_rotation_shift, 17);
9811 VERIFY_OFFSET(strip_size, 18);
9812 VERIFY_OFFSET(disk_starting_blk, 20);
9813 VERIFY_OFFSET(disk_blk_cnt, 28);
9814 VERIFY_OFFSET(data_disks_per_row, 36);
9815 VERIFY_OFFSET(metadata_disks_per_row, 38);
9816 VERIFY_OFFSET(row_cnt, 40);
9817 VERIFY_OFFSET(layout_map_count, 42);
9818 VERIFY_OFFSET(flags, 44);
9819 VERIFY_OFFSET(dekindex, 46);
9820
9821 VERIFY_OFFSET(data, 64);
9822
9823#undef VERIFY_OFFSET
9824
9825#define VERIFY_OFFSET(member, offset) \
9826 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
9827
9828 VERIFY_OFFSET(IU_type, 0);
9829 VERIFY_OFFSET(direction, 1);
9830 VERIFY_OFFSET(reply_queue, 2);
9831
9832 VERIFY_OFFSET(scsi_nexus, 4);
9833 VERIFY_OFFSET(Tag, 8);
9834 VERIFY_OFFSET(cdb, 16);
9835 VERIFY_OFFSET(cciss_lun, 32);
9836 VERIFY_OFFSET(data_len, 40);
9837 VERIFY_OFFSET(cmd_priority_task_attr, 44);
9838 VERIFY_OFFSET(sg_count, 45);
9839
9840 VERIFY_OFFSET(err_ptr, 48);
9841 VERIFY_OFFSET(err_len, 56);
9842
9843 VERIFY_OFFSET(sg, 64);
9844
9845#undef VERIFY_OFFSET
9846
9847#define VERIFY_OFFSET(member, offset) \
9848 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
9849
9850 VERIFY_OFFSET(dev_handle, 0x00);
9851 VERIFY_OFFSET(reserved1, 0x02);
9852 VERIFY_OFFSET(function, 0x03);
9853 VERIFY_OFFSET(reserved2, 0x04);
9854 VERIFY_OFFSET(err_info, 0x0C);
9855 VERIFY_OFFSET(reserved3, 0x10);
9856 VERIFY_OFFSET(err_info_len, 0x12);
9857 VERIFY_OFFSET(reserved4, 0x13);
9858 VERIFY_OFFSET(sgl_offset, 0x14);
9859 VERIFY_OFFSET(reserved5, 0x15);
9860 VERIFY_OFFSET(transfer_len, 0x1C);
9861 VERIFY_OFFSET(reserved6, 0x20);
9862 VERIFY_OFFSET(io_flags, 0x24);
9863 VERIFY_OFFSET(reserved7, 0x26);
9864 VERIFY_OFFSET(LUN, 0x34);
9865 VERIFY_OFFSET(control, 0x3C);
9866 VERIFY_OFFSET(CDB, 0x40);
9867 VERIFY_OFFSET(reserved8, 0x50);
9868 VERIFY_OFFSET(host_context_flags, 0x60);
9869 VERIFY_OFFSET(timeout_sec, 0x62);
9870 VERIFY_OFFSET(ReplyQueue, 0x64);
9871 VERIFY_OFFSET(reserved9, 0x65);
9872 VERIFY_OFFSET(tag, 0x68);
9873 VERIFY_OFFSET(host_addr, 0x70);
9874 VERIFY_OFFSET(CISS_LUN, 0x78);
9875 VERIFY_OFFSET(SG, 0x78 + 8);
9876#undef VERIFY_OFFSET
9877}
9878
9879module_init(hpsa_init);
9880module_exit(hpsa_cleanup);
9881